xref: /illumos-gate/usr/src/uts/common/fs/zfs/dsl_dataset.c (revision 1c8564a7573482b45fcc6f9bc0c2de70f92c193c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/dmu_objset.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_dir.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dmu_traverse.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/arc.h>
34 #include <sys/zio.h>
35 #include <sys/zap.h>
36 #include <sys/unique.h>
37 #include <sys/zfs_context.h>
38 #include <sys/zfs_ioctl.h>
39 #include <sys/spa.h>
40 #include <sys/zfs_znode.h>
41 #include <sys/sunddi.h>
42 
43 static char *dsl_reaper = "the grim reaper";
44 
45 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
46 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
47 static dsl_checkfunc_t dsl_dataset_rollback_check;
48 static dsl_syncfunc_t dsl_dataset_rollback_sync;
49 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
50 
51 #define	DS_REF_MAX	(1ULL << 62)
52 
53 #define	DSL_DEADLIST_BLOCKSIZE	SPA_MAXBLOCKSIZE
54 
55 #define	DSL_DATASET_IS_DESTROYED(ds)	((ds)->ds_owner == dsl_reaper)
56 
57 
58 /*
59  * Figure out how much of this delta should be propogated to the dsl_dir
60  * layer.  If there's a refreservation, that space has already been
61  * partially accounted for in our ancestors.
62  */
63 static int64_t
64 parent_delta(dsl_dataset_t *ds, int64_t delta)
65 {
66 	uint64_t old_bytes, new_bytes;
67 
68 	if (ds->ds_reserved == 0)
69 		return (delta);
70 
71 	old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
72 	new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
73 
74 	ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
75 	return (new_bytes - old_bytes);
76 }
77 
78 void
79 dsl_dataset_block_born(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
80 {
81 	int used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
82 	int compressed = BP_GET_PSIZE(bp);
83 	int uncompressed = BP_GET_UCSIZE(bp);
84 	int64_t delta;
85 
86 	dprintf_bp(bp, "born, ds=%p\n", ds);
87 
88 	ASSERT(dmu_tx_is_syncing(tx));
89 	/* It could have been compressed away to nothing */
90 	if (BP_IS_HOLE(bp))
91 		return;
92 	ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
93 	ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
94 	if (ds == NULL) {
95 		/*
96 		 * Account for the meta-objset space in its placeholder
97 		 * dsl_dir.
98 		 */
99 		ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
100 		dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir,
101 		    used, compressed, uncompressed, tx);
102 		dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
103 		return;
104 	}
105 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
106 	mutex_enter(&ds->ds_lock);
107 	delta = parent_delta(ds, used);
108 	ds->ds_phys->ds_used_bytes += used;
109 	ds->ds_phys->ds_compressed_bytes += compressed;
110 	ds->ds_phys->ds_uncompressed_bytes += uncompressed;
111 	ds->ds_phys->ds_unique_bytes += used;
112 	mutex_exit(&ds->ds_lock);
113 	dsl_dir_diduse_space(ds->ds_dir, delta, compressed, uncompressed, tx);
114 }
115 
116 int
117 dsl_dataset_block_kill(dsl_dataset_t *ds, blkptr_t *bp, zio_t *pio,
118     dmu_tx_t *tx)
119 {
120 	int used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
121 	int compressed = BP_GET_PSIZE(bp);
122 	int uncompressed = BP_GET_UCSIZE(bp);
123 
124 	ASSERT(dmu_tx_is_syncing(tx));
125 	/* No block pointer => nothing to free */
126 	if (BP_IS_HOLE(bp))
127 		return (0);
128 
129 	ASSERT(used > 0);
130 	if (ds == NULL) {
131 		int err;
132 		/*
133 		 * Account for the meta-objset space in its placeholder
134 		 * dataset.
135 		 */
136 		err = dsl_free(pio, tx->tx_pool,
137 		    tx->tx_txg, bp, NULL, NULL, pio ? ARC_NOWAIT: ARC_WAIT);
138 		ASSERT(err == 0);
139 
140 		dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir,
141 		    -used, -compressed, -uncompressed, tx);
142 		dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
143 		return (used);
144 	}
145 	ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
146 
147 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
148 
149 	if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
150 		int err;
151 		int64_t delta;
152 
153 		dprintf_bp(bp, "freeing: %s", "");
154 		err = dsl_free(pio, tx->tx_pool,
155 		    tx->tx_txg, bp, NULL, NULL, pio ? ARC_NOWAIT: ARC_WAIT);
156 		ASSERT(err == 0);
157 
158 		mutex_enter(&ds->ds_lock);
159 		ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
160 		    !DS_UNIQUE_IS_ACCURATE(ds));
161 		delta = parent_delta(ds, -used);
162 		ds->ds_phys->ds_unique_bytes -= used;
163 		mutex_exit(&ds->ds_lock);
164 		dsl_dir_diduse_space(ds->ds_dir,
165 		    delta, -compressed, -uncompressed, tx);
166 	} else {
167 		dprintf_bp(bp, "putting on dead list: %s", "");
168 		VERIFY(0 == bplist_enqueue(&ds->ds_deadlist, bp, tx));
169 		ASSERT3U(ds->ds_prev->ds_object, ==,
170 		    ds->ds_phys->ds_prev_snap_obj);
171 		ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
172 		/* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
173 		if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
174 		    ds->ds_object && bp->blk_birth >
175 		    ds->ds_prev->ds_phys->ds_prev_snap_txg) {
176 			dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
177 			mutex_enter(&ds->ds_prev->ds_lock);
178 			ds->ds_prev->ds_phys->ds_unique_bytes += used;
179 			mutex_exit(&ds->ds_prev->ds_lock);
180 		}
181 	}
182 	mutex_enter(&ds->ds_lock);
183 	ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
184 	ds->ds_phys->ds_used_bytes -= used;
185 	ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
186 	ds->ds_phys->ds_compressed_bytes -= compressed;
187 	ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
188 	ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
189 	mutex_exit(&ds->ds_lock);
190 
191 	return (used);
192 }
193 
194 uint64_t
195 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
196 {
197 	uint64_t trysnap = 0;
198 
199 	if (ds == NULL)
200 		return (0);
201 	/*
202 	 * The snapshot creation could fail, but that would cause an
203 	 * incorrect FALSE return, which would only result in an
204 	 * overestimation of the amount of space that an operation would
205 	 * consume, which is OK.
206 	 *
207 	 * There's also a small window where we could miss a pending
208 	 * snapshot, because we could set the sync task in the quiescing
209 	 * phase.  So this should only be used as a guess.
210 	 */
211 	if (ds->ds_trysnap_txg >
212 	    spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
213 		trysnap = ds->ds_trysnap_txg;
214 	return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
215 }
216 
217 int
218 dsl_dataset_block_freeable(dsl_dataset_t *ds, uint64_t blk_birth)
219 {
220 	return (blk_birth > dsl_dataset_prev_snap_txg(ds));
221 }
222 
223 /* ARGSUSED */
224 static void
225 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
226 {
227 	dsl_dataset_t *ds = dsv;
228 
229 	ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
230 
231 	dprintf_ds(ds, "evicting %s\n", "");
232 
233 	unique_remove(ds->ds_fsid_guid);
234 
235 	if (ds->ds_user_ptr != NULL)
236 		ds->ds_user_evict_func(ds, ds->ds_user_ptr);
237 
238 	if (ds->ds_prev) {
239 		dsl_dataset_drop_ref(ds->ds_prev, ds);
240 		ds->ds_prev = NULL;
241 	}
242 
243 	bplist_close(&ds->ds_deadlist);
244 	if (ds->ds_dir)
245 		dsl_dir_close(ds->ds_dir, ds);
246 
247 	ASSERT(!list_link_active(&ds->ds_synced_link));
248 
249 	mutex_destroy(&ds->ds_lock);
250 	mutex_destroy(&ds->ds_opening_lock);
251 	mutex_destroy(&ds->ds_deadlist.bpl_lock);
252 	rw_destroy(&ds->ds_rwlock);
253 	cv_destroy(&ds->ds_exclusive_cv);
254 
255 	kmem_free(ds, sizeof (dsl_dataset_t));
256 }
257 
258 static int
259 dsl_dataset_get_snapname(dsl_dataset_t *ds)
260 {
261 	dsl_dataset_phys_t *headphys;
262 	int err;
263 	dmu_buf_t *headdbuf;
264 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
265 	objset_t *mos = dp->dp_meta_objset;
266 
267 	if (ds->ds_snapname[0])
268 		return (0);
269 	if (ds->ds_phys->ds_next_snap_obj == 0)
270 		return (0);
271 
272 	err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
273 	    FTAG, &headdbuf);
274 	if (err)
275 		return (err);
276 	headphys = headdbuf->db_data;
277 	err = zap_value_search(dp->dp_meta_objset,
278 	    headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
279 	dmu_buf_rele(headdbuf, FTAG);
280 	return (err);
281 }
282 
283 static int
284 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
285 {
286 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
287 	uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
288 	matchtype_t mt;
289 	int err;
290 
291 	if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
292 		mt = MT_FIRST;
293 	else
294 		mt = MT_EXACT;
295 
296 	err = zap_lookup_norm(mos, snapobj, name, 8, 1,
297 	    value, mt, NULL, 0, NULL);
298 	if (err == ENOTSUP && mt == MT_FIRST)
299 		err = zap_lookup(mos, snapobj, name, 8, 1, value);
300 	return (err);
301 }
302 
303 static int
304 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
305 {
306 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
307 	uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
308 	matchtype_t mt;
309 	int err;
310 
311 	if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
312 		mt = MT_FIRST;
313 	else
314 		mt = MT_EXACT;
315 
316 	err = zap_remove_norm(mos, snapobj, name, mt, tx);
317 	if (err == ENOTSUP && mt == MT_FIRST)
318 		err = zap_remove(mos, snapobj, name, tx);
319 	return (err);
320 }
321 
322 static int
323 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
324     dsl_dataset_t **dsp)
325 {
326 	objset_t *mos = dp->dp_meta_objset;
327 	dmu_buf_t *dbuf;
328 	dsl_dataset_t *ds;
329 	int err;
330 
331 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
332 	    dsl_pool_sync_context(dp));
333 
334 	err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
335 	if (err)
336 		return (err);
337 	ds = dmu_buf_get_user(dbuf);
338 	if (ds == NULL) {
339 		dsl_dataset_t *winner;
340 
341 		ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
342 		ds->ds_dbuf = dbuf;
343 		ds->ds_object = dsobj;
344 		ds->ds_phys = dbuf->db_data;
345 
346 		mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
347 		mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
348 		mutex_init(&ds->ds_deadlist.bpl_lock, NULL, MUTEX_DEFAULT,
349 		    NULL);
350 		rw_init(&ds->ds_rwlock, 0, 0, 0);
351 		cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
352 
353 		err = bplist_open(&ds->ds_deadlist,
354 		    mos, ds->ds_phys->ds_deadlist_obj);
355 		if (err == 0) {
356 			err = dsl_dir_open_obj(dp,
357 			    ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
358 		}
359 		if (err) {
360 			/*
361 			 * we don't really need to close the blist if we
362 			 * just opened it.
363 			 */
364 			mutex_destroy(&ds->ds_lock);
365 			mutex_destroy(&ds->ds_opening_lock);
366 			mutex_destroy(&ds->ds_deadlist.bpl_lock);
367 			rw_destroy(&ds->ds_rwlock);
368 			cv_destroy(&ds->ds_exclusive_cv);
369 			kmem_free(ds, sizeof (dsl_dataset_t));
370 			dmu_buf_rele(dbuf, tag);
371 			return (err);
372 		}
373 
374 		if (ds->ds_dir->dd_phys->dd_head_dataset_obj == dsobj) {
375 			ds->ds_snapname[0] = '\0';
376 			if (ds->ds_phys->ds_prev_snap_obj) {
377 				err = dsl_dataset_get_ref(dp,
378 				    ds->ds_phys->ds_prev_snap_obj,
379 				    ds, &ds->ds_prev);
380 			}
381 		} else if (zfs_flags & ZFS_DEBUG_SNAPNAMES) {
382 			err = dsl_dataset_get_snapname(ds);
383 		}
384 
385 		if (!dsl_dataset_is_snapshot(ds)) {
386 			/*
387 			 * In sync context, we're called with either no lock
388 			 * or with the write lock.  If we're not syncing,
389 			 * we're always called with the read lock held.
390 			 */
391 			boolean_t need_lock =
392 			    !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
393 			    dsl_pool_sync_context(dp);
394 
395 			if (need_lock)
396 				rw_enter(&dp->dp_config_rwlock, RW_READER);
397 
398 			err = dsl_prop_get_ds(ds,
399 			    "refreservation", sizeof (uint64_t), 1,
400 			    &ds->ds_reserved, NULL);
401 			if (err == 0) {
402 				err = dsl_prop_get_ds(ds,
403 				    "refquota", sizeof (uint64_t), 1,
404 				    &ds->ds_quota, NULL);
405 			}
406 
407 			if (need_lock)
408 				rw_exit(&dp->dp_config_rwlock);
409 		} else {
410 			ds->ds_reserved = ds->ds_quota = 0;
411 		}
412 
413 		if (err == 0) {
414 			winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
415 			    dsl_dataset_evict);
416 		}
417 		if (err || winner) {
418 			bplist_close(&ds->ds_deadlist);
419 			if (ds->ds_prev)
420 				dsl_dataset_drop_ref(ds->ds_prev, ds);
421 			dsl_dir_close(ds->ds_dir, ds);
422 			mutex_destroy(&ds->ds_lock);
423 			mutex_destroy(&ds->ds_opening_lock);
424 			mutex_destroy(&ds->ds_deadlist.bpl_lock);
425 			rw_destroy(&ds->ds_rwlock);
426 			cv_destroy(&ds->ds_exclusive_cv);
427 			kmem_free(ds, sizeof (dsl_dataset_t));
428 			if (err) {
429 				dmu_buf_rele(dbuf, tag);
430 				return (err);
431 			}
432 			ds = winner;
433 		} else {
434 			ds->ds_fsid_guid =
435 			    unique_insert(ds->ds_phys->ds_fsid_guid);
436 		}
437 	}
438 	ASSERT3P(ds->ds_dbuf, ==, dbuf);
439 	ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
440 	ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
441 	    spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
442 	    dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
443 	mutex_enter(&ds->ds_lock);
444 	if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
445 		mutex_exit(&ds->ds_lock);
446 		dmu_buf_rele(ds->ds_dbuf, tag);
447 		return (ENOENT);
448 	}
449 	mutex_exit(&ds->ds_lock);
450 	*dsp = ds;
451 	return (0);
452 }
453 
454 static int
455 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
456 {
457 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
458 
459 	/*
460 	 * In syncing context we don't want the rwlock lock: there
461 	 * may be an existing writer waiting for sync phase to
462 	 * finish.  We don't need to worry about such writers, since
463 	 * sync phase is single-threaded, so the writer can't be
464 	 * doing anything while we are active.
465 	 */
466 	if (dsl_pool_sync_context(dp)) {
467 		ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
468 		return (0);
469 	}
470 
471 	/*
472 	 * Normal users will hold the ds_rwlock as a READER until they
473 	 * are finished (i.e., call dsl_dataset_rele()).  "Owners" will
474 	 * drop their READER lock after they set the ds_owner field.
475 	 *
476 	 * If the dataset is being destroyed, the destroy thread will
477 	 * obtain a WRITER lock for exclusive access after it's done its
478 	 * open-context work and then change the ds_owner to
479 	 * dsl_reaper once destruction is assured.  So threads
480 	 * may block here temporarily, until the "destructability" of
481 	 * the dataset is determined.
482 	 */
483 	ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
484 	mutex_enter(&ds->ds_lock);
485 	while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
486 		rw_exit(&dp->dp_config_rwlock);
487 		cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
488 		if (DSL_DATASET_IS_DESTROYED(ds)) {
489 			mutex_exit(&ds->ds_lock);
490 			dsl_dataset_drop_ref(ds, tag);
491 			rw_enter(&dp->dp_config_rwlock, RW_READER);
492 			return (ENOENT);
493 		}
494 		rw_enter(&dp->dp_config_rwlock, RW_READER);
495 	}
496 	mutex_exit(&ds->ds_lock);
497 	return (0);
498 }
499 
500 int
501 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
502     dsl_dataset_t **dsp)
503 {
504 	int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
505 
506 	if (err)
507 		return (err);
508 	return (dsl_dataset_hold_ref(*dsp, tag));
509 }
510 
511 int
512 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, int flags, void *owner,
513     dsl_dataset_t **dsp)
514 {
515 	int err = dsl_dataset_hold_obj(dp, dsobj, owner, dsp);
516 
517 	ASSERT(DS_MODE_TYPE(flags) != DS_MODE_USER);
518 
519 	if (err)
520 		return (err);
521 	if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) {
522 		dsl_dataset_rele(*dsp, owner);
523 		return (EBUSY);
524 	}
525 	return (0);
526 }
527 
528 int
529 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
530 {
531 	dsl_dir_t *dd;
532 	dsl_pool_t *dp;
533 	const char *snapname;
534 	uint64_t obj;
535 	int err = 0;
536 
537 	err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
538 	if (err)
539 		return (err);
540 
541 	dp = dd->dd_pool;
542 	obj = dd->dd_phys->dd_head_dataset_obj;
543 	rw_enter(&dp->dp_config_rwlock, RW_READER);
544 	if (obj)
545 		err = dsl_dataset_get_ref(dp, obj, tag, dsp);
546 	else
547 		err = ENOENT;
548 	if (err)
549 		goto out;
550 
551 	err = dsl_dataset_hold_ref(*dsp, tag);
552 
553 	/* we may be looking for a snapshot */
554 	if (err == 0 && snapname != NULL) {
555 		dsl_dataset_t *ds = NULL;
556 
557 		if (*snapname++ != '@') {
558 			dsl_dataset_rele(*dsp, tag);
559 			err = ENOENT;
560 			goto out;
561 		}
562 
563 		dprintf("looking for snapshot '%s'\n", snapname);
564 		err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
565 		if (err == 0)
566 			err = dsl_dataset_get_ref(dp, obj, tag, &ds);
567 		dsl_dataset_rele(*dsp, tag);
568 
569 		ASSERT3U((err == 0), ==, (ds != NULL));
570 
571 		if (ds) {
572 			mutex_enter(&ds->ds_lock);
573 			if (ds->ds_snapname[0] == 0)
574 				(void) strlcpy(ds->ds_snapname, snapname,
575 				    sizeof (ds->ds_snapname));
576 			mutex_exit(&ds->ds_lock);
577 			err = dsl_dataset_hold_ref(ds, tag);
578 			*dsp = err ? NULL : ds;
579 		}
580 	}
581 out:
582 	rw_exit(&dp->dp_config_rwlock);
583 	dsl_dir_close(dd, FTAG);
584 	return (err);
585 }
586 
587 int
588 dsl_dataset_own(const char *name, int flags, void *owner, dsl_dataset_t **dsp)
589 {
590 	int err = dsl_dataset_hold(name, owner, dsp);
591 	if (err)
592 		return (err);
593 	if ((*dsp)->ds_phys->ds_num_children > 0 &&
594 	    !DS_MODE_IS_READONLY(flags)) {
595 		dsl_dataset_rele(*dsp, owner);
596 		return (EROFS);
597 	}
598 	if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) {
599 		dsl_dataset_rele(*dsp, owner);
600 		return (EBUSY);
601 	}
602 	return (0);
603 }
604 
605 void
606 dsl_dataset_name(dsl_dataset_t *ds, char *name)
607 {
608 	if (ds == NULL) {
609 		(void) strcpy(name, "mos");
610 	} else {
611 		dsl_dir_name(ds->ds_dir, name);
612 		VERIFY(0 == dsl_dataset_get_snapname(ds));
613 		if (ds->ds_snapname[0]) {
614 			(void) strcat(name, "@");
615 			/*
616 			 * We use a "recursive" mutex so that we
617 			 * can call dprintf_ds() with ds_lock held.
618 			 */
619 			if (!MUTEX_HELD(&ds->ds_lock)) {
620 				mutex_enter(&ds->ds_lock);
621 				(void) strcat(name, ds->ds_snapname);
622 				mutex_exit(&ds->ds_lock);
623 			} else {
624 				(void) strcat(name, ds->ds_snapname);
625 			}
626 		}
627 	}
628 }
629 
630 static int
631 dsl_dataset_namelen(dsl_dataset_t *ds)
632 {
633 	int result;
634 
635 	if (ds == NULL) {
636 		result = 3;	/* "mos" */
637 	} else {
638 		result = dsl_dir_namelen(ds->ds_dir);
639 		VERIFY(0 == dsl_dataset_get_snapname(ds));
640 		if (ds->ds_snapname[0]) {
641 			++result;	/* adding one for the @-sign */
642 			if (!MUTEX_HELD(&ds->ds_lock)) {
643 				mutex_enter(&ds->ds_lock);
644 				result += strlen(ds->ds_snapname);
645 				mutex_exit(&ds->ds_lock);
646 			} else {
647 				result += strlen(ds->ds_snapname);
648 			}
649 		}
650 	}
651 
652 	return (result);
653 }
654 
655 void
656 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
657 {
658 	dmu_buf_rele(ds->ds_dbuf, tag);
659 }
660 
661 void
662 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
663 {
664 	if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
665 		rw_exit(&ds->ds_rwlock);
666 	}
667 	dsl_dataset_drop_ref(ds, tag);
668 }
669 
670 void
671 dsl_dataset_disown(dsl_dataset_t *ds, void *owner)
672 {
673 	ASSERT((ds->ds_owner == owner && ds->ds_dbuf) ||
674 	    (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
675 
676 	mutex_enter(&ds->ds_lock);
677 	ds->ds_owner = NULL;
678 	if (RW_WRITE_HELD(&ds->ds_rwlock)) {
679 		rw_exit(&ds->ds_rwlock);
680 		cv_broadcast(&ds->ds_exclusive_cv);
681 	}
682 	mutex_exit(&ds->ds_lock);
683 	if (ds->ds_dbuf)
684 		dsl_dataset_drop_ref(ds, owner);
685 	else
686 		dsl_dataset_evict(ds->ds_dbuf, ds);
687 }
688 
689 boolean_t
690 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *owner)
691 {
692 	boolean_t gotit = FALSE;
693 
694 	mutex_enter(&ds->ds_lock);
695 	if (ds->ds_owner == NULL &&
696 	    (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
697 		ds->ds_owner = owner;
698 		if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
699 			rw_exit(&ds->ds_rwlock);
700 		gotit = TRUE;
701 	}
702 	mutex_exit(&ds->ds_lock);
703 	return (gotit);
704 }
705 
706 void
707 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
708 {
709 	ASSERT3P(owner, ==, ds->ds_owner);
710 	if (!RW_WRITE_HELD(&ds->ds_rwlock))
711 		rw_enter(&ds->ds_rwlock, RW_WRITER);
712 }
713 
714 uint64_t
715 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
716     uint64_t flags, dmu_tx_t *tx)
717 {
718 	dsl_pool_t *dp = dd->dd_pool;
719 	dmu_buf_t *dbuf;
720 	dsl_dataset_phys_t *dsphys;
721 	uint64_t dsobj;
722 	objset_t *mos = dp->dp_meta_objset;
723 
724 	if (origin == NULL)
725 		origin = dp->dp_origin_snap;
726 
727 	ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
728 	ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
729 	ASSERT(dmu_tx_is_syncing(tx));
730 	ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
731 
732 	dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
733 	    DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
734 	VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
735 	dmu_buf_will_dirty(dbuf, tx);
736 	dsphys = dbuf->db_data;
737 	bzero(dsphys, sizeof (dsl_dataset_phys_t));
738 	dsphys->ds_dir_obj = dd->dd_object;
739 	dsphys->ds_flags = flags;
740 	dsphys->ds_fsid_guid = unique_create();
741 	(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
742 	    sizeof (dsphys->ds_guid));
743 	dsphys->ds_snapnames_zapobj =
744 	    zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
745 	    DMU_OT_NONE, 0, tx);
746 	dsphys->ds_creation_time = gethrestime_sec();
747 	dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
748 	dsphys->ds_deadlist_obj =
749 	    bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
750 
751 	if (origin) {
752 		dsphys->ds_prev_snap_obj = origin->ds_object;
753 		dsphys->ds_prev_snap_txg =
754 		    origin->ds_phys->ds_creation_txg;
755 		dsphys->ds_used_bytes =
756 		    origin->ds_phys->ds_used_bytes;
757 		dsphys->ds_compressed_bytes =
758 		    origin->ds_phys->ds_compressed_bytes;
759 		dsphys->ds_uncompressed_bytes =
760 		    origin->ds_phys->ds_uncompressed_bytes;
761 		dsphys->ds_bp = origin->ds_phys->ds_bp;
762 		dsphys->ds_flags |= origin->ds_phys->ds_flags;
763 
764 		dmu_buf_will_dirty(origin->ds_dbuf, tx);
765 		origin->ds_phys->ds_num_children++;
766 
767 		if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
768 			if (origin->ds_phys->ds_next_clones_obj == 0) {
769 				origin->ds_phys->ds_next_clones_obj =
770 				    zap_create(mos,
771 				    DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
772 			}
773 			VERIFY(0 == zap_add_int(mos,
774 			    origin->ds_phys->ds_next_clones_obj,
775 			    dsobj, tx));
776 		}
777 
778 		dmu_buf_will_dirty(dd->dd_dbuf, tx);
779 		dd->dd_phys->dd_origin_obj = origin->ds_object;
780 	}
781 
782 	if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
783 		dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
784 
785 	dmu_buf_rele(dbuf, FTAG);
786 
787 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
788 	dd->dd_phys->dd_head_dataset_obj = dsobj;
789 
790 	return (dsobj);
791 }
792 
793 uint64_t
794 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
795     dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
796 {
797 	dsl_pool_t *dp = pdd->dd_pool;
798 	uint64_t dsobj, ddobj;
799 	dsl_dir_t *dd;
800 
801 	ASSERT(lastname[0] != '@');
802 
803 	ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
804 	VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
805 
806 	dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
807 
808 	dsl_deleg_set_create_perms(dd, tx, cr);
809 
810 	dsl_dir_close(dd, FTAG);
811 
812 	return (dsobj);
813 }
814 
815 struct destroyarg {
816 	dsl_sync_task_group_t *dstg;
817 	char *snapname;
818 	char *failed;
819 };
820 
821 static int
822 dsl_snapshot_destroy_one(char *name, void *arg)
823 {
824 	struct destroyarg *da = arg;
825 	dsl_dataset_t *ds;
826 	char *cp;
827 	int err;
828 
829 	(void) strcat(name, "@");
830 	(void) strcat(name, da->snapname);
831 	err = dsl_dataset_own(name, DS_MODE_READONLY | DS_MODE_INCONSISTENT,
832 	    da->dstg, &ds);
833 	cp = strchr(name, '@');
834 	*cp = '\0';
835 	if (err == 0) {
836 		dsl_dataset_make_exclusive(ds, da->dstg);
837 		if (ds->ds_user_ptr) {
838 			ds->ds_user_evict_func(ds, ds->ds_user_ptr);
839 			ds->ds_user_ptr = NULL;
840 		}
841 		dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
842 		    dsl_dataset_destroy_sync, ds, da->dstg, 0);
843 	} else if (err == ENOENT) {
844 		err = 0;
845 	} else {
846 		(void) strcpy(da->failed, name);
847 	}
848 	return (err);
849 }
850 
851 /*
852  * Destroy 'snapname' in all descendants of 'fsname'.
853  */
854 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
855 int
856 dsl_snapshots_destroy(char *fsname, char *snapname)
857 {
858 	int err;
859 	struct destroyarg da;
860 	dsl_sync_task_t *dst;
861 	spa_t *spa;
862 
863 	err = spa_open(fsname, &spa, FTAG);
864 	if (err)
865 		return (err);
866 	da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
867 	da.snapname = snapname;
868 	da.failed = fsname;
869 
870 	err = dmu_objset_find(fsname,
871 	    dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN);
872 
873 	if (err == 0)
874 		err = dsl_sync_task_group_wait(da.dstg);
875 
876 	for (dst = list_head(&da.dstg->dstg_tasks); dst;
877 	    dst = list_next(&da.dstg->dstg_tasks, dst)) {
878 		dsl_dataset_t *ds = dst->dst_arg1;
879 		/*
880 		 * Return the file system name that triggered the error
881 		 */
882 		if (dst->dst_err) {
883 			dsl_dataset_name(ds, fsname);
884 			*strchr(fsname, '@') = '\0';
885 		}
886 		dsl_dataset_disown(ds, da.dstg);
887 	}
888 
889 	dsl_sync_task_group_destroy(da.dstg);
890 	spa_close(spa, FTAG);
891 	return (err);
892 }
893 
894 /*
895  * ds must be opened as OWNER.  On return (whether successful or not),
896  * ds will be closed and caller can no longer dereference it.
897  */
898 int
899 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag)
900 {
901 	int err;
902 	dsl_sync_task_group_t *dstg;
903 	objset_t *os;
904 	dsl_dir_t *dd;
905 	uint64_t obj;
906 
907 	if (dsl_dataset_is_snapshot(ds)) {
908 		/* Destroying a snapshot is simpler */
909 		dsl_dataset_make_exclusive(ds, tag);
910 
911 		if (ds->ds_user_ptr) {
912 			ds->ds_user_evict_func(ds, ds->ds_user_ptr);
913 			ds->ds_user_ptr = NULL;
914 		}
915 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
916 		    dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
917 		    ds, tag, 0);
918 		goto out;
919 	}
920 
921 	dd = ds->ds_dir;
922 
923 	/*
924 	 * Check for errors and mark this ds as inconsistent, in
925 	 * case we crash while freeing the objects.
926 	 */
927 	err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
928 	    dsl_dataset_destroy_begin_sync, ds, NULL, 0);
929 	if (err)
930 		goto out;
931 
932 	err = dmu_objset_open_ds(ds, DMU_OST_ANY, &os);
933 	if (err)
934 		goto out;
935 
936 	/*
937 	 * remove the objects in open context, so that we won't
938 	 * have too much to do in syncing context.
939 	 */
940 	for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
941 	    ds->ds_phys->ds_prev_snap_txg)) {
942 		/*
943 		 * Ignore errors, if there is not enough disk space
944 		 * we will deal with it in dsl_dataset_destroy_sync().
945 		 */
946 		(void) dmu_free_object(os, obj);
947 	}
948 
949 	dmu_objset_close(os);
950 	if (err != ESRCH)
951 		goto out;
952 
953 	rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
954 	err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
955 	rw_exit(&dd->dd_pool->dp_config_rwlock);
956 
957 	if (err)
958 		goto out;
959 
960 	if (ds->ds_user_ptr) {
961 		/*
962 		 * We need to sync out all in-flight IO before we try
963 		 * to evict (the dataset evict func is trying to clear
964 		 * the cached entries for this dataset in the ARC).
965 		 */
966 		txg_wait_synced(dd->dd_pool, 0);
967 	}
968 
969 	/*
970 	 * Blow away the dsl_dir + head dataset.
971 	 */
972 	dsl_dataset_make_exclusive(ds, tag);
973 	if (ds->ds_user_ptr) {
974 		ds->ds_user_evict_func(ds, ds->ds_user_ptr);
975 		ds->ds_user_ptr = NULL;
976 	}
977 	dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
978 	dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
979 	    dsl_dataset_destroy_sync, ds, tag, 0);
980 	dsl_sync_task_create(dstg, dsl_dir_destroy_check,
981 	    dsl_dir_destroy_sync, dd, FTAG, 0);
982 	err = dsl_sync_task_group_wait(dstg);
983 	dsl_sync_task_group_destroy(dstg);
984 	/* if it is successful, dsl_dir_destroy_sync will close the dd */
985 	if (err)
986 		dsl_dir_close(dd, FTAG);
987 out:
988 	dsl_dataset_disown(ds, tag);
989 	return (err);
990 }
991 
992 int
993 dsl_dataset_rollback(dsl_dataset_t *ds, dmu_objset_type_t ost)
994 {
995 	int err;
996 
997 	ASSERT(ds->ds_owner);
998 
999 	dsl_dataset_make_exclusive(ds, ds->ds_owner);
1000 	err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1001 	    dsl_dataset_rollback_check, dsl_dataset_rollback_sync,
1002 	    ds, &ost, 0);
1003 	/* drop exclusive access */
1004 	mutex_enter(&ds->ds_lock);
1005 	rw_exit(&ds->ds_rwlock);
1006 	cv_broadcast(&ds->ds_exclusive_cv);
1007 	mutex_exit(&ds->ds_lock);
1008 	return (err);
1009 }
1010 
1011 void *
1012 dsl_dataset_set_user_ptr(dsl_dataset_t *ds,
1013     void *p, dsl_dataset_evict_func_t func)
1014 {
1015 	void *old;
1016 
1017 	mutex_enter(&ds->ds_lock);
1018 	old = ds->ds_user_ptr;
1019 	if (old == NULL) {
1020 		ds->ds_user_ptr = p;
1021 		ds->ds_user_evict_func = func;
1022 	}
1023 	mutex_exit(&ds->ds_lock);
1024 	return (old);
1025 }
1026 
1027 void *
1028 dsl_dataset_get_user_ptr(dsl_dataset_t *ds)
1029 {
1030 	return (ds->ds_user_ptr);
1031 }
1032 
1033 
1034 blkptr_t *
1035 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1036 {
1037 	return (&ds->ds_phys->ds_bp);
1038 }
1039 
1040 void
1041 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1042 {
1043 	ASSERT(dmu_tx_is_syncing(tx));
1044 	/* If it's the meta-objset, set dp_meta_rootbp */
1045 	if (ds == NULL) {
1046 		tx->tx_pool->dp_meta_rootbp = *bp;
1047 	} else {
1048 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
1049 		ds->ds_phys->ds_bp = *bp;
1050 	}
1051 }
1052 
1053 spa_t *
1054 dsl_dataset_get_spa(dsl_dataset_t *ds)
1055 {
1056 	return (ds->ds_dir->dd_pool->dp_spa);
1057 }
1058 
1059 void
1060 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1061 {
1062 	dsl_pool_t *dp;
1063 
1064 	if (ds == NULL) /* this is the meta-objset */
1065 		return;
1066 
1067 	ASSERT(ds->ds_user_ptr != NULL);
1068 
1069 	if (ds->ds_phys->ds_next_snap_obj != 0)
1070 		panic("dirtying snapshot!");
1071 
1072 	dp = ds->ds_dir->dd_pool;
1073 
1074 	if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1075 		/* up the hold count until we can be written out */
1076 		dmu_buf_add_ref(ds->ds_dbuf, ds);
1077 	}
1078 }
1079 
1080 /*
1081  * The unique space in the head dataset can be calculated by subtracting
1082  * the space used in the most recent snapshot, that is still being used
1083  * in this file system, from the space currently in use.  To figure out
1084  * the space in the most recent snapshot still in use, we need to take
1085  * the total space used in the snapshot and subtract out the space that
1086  * has been freed up since the snapshot was taken.
1087  */
1088 static void
1089 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1090 {
1091 	uint64_t mrs_used;
1092 	uint64_t dlused, dlcomp, dluncomp;
1093 
1094 	ASSERT(ds->ds_object == ds->ds_dir->dd_phys->dd_head_dataset_obj);
1095 
1096 	if (ds->ds_phys->ds_prev_snap_obj != 0)
1097 		mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1098 	else
1099 		mrs_used = 0;
1100 
1101 	VERIFY(0 == bplist_space(&ds->ds_deadlist, &dlused, &dlcomp,
1102 	    &dluncomp));
1103 
1104 	ASSERT3U(dlused, <=, mrs_used);
1105 	ds->ds_phys->ds_unique_bytes =
1106 	    ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1107 
1108 	if (!DS_UNIQUE_IS_ACCURATE(ds) &&
1109 	    spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1110 	    SPA_VERSION_UNIQUE_ACCURATE)
1111 		ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1112 }
1113 
1114 static uint64_t
1115 dsl_dataset_unique(dsl_dataset_t *ds)
1116 {
1117 	if (!DS_UNIQUE_IS_ACCURATE(ds) && !dsl_dataset_is_snapshot(ds))
1118 		dsl_dataset_recalc_head_uniq(ds);
1119 
1120 	return (ds->ds_phys->ds_unique_bytes);
1121 }
1122 
1123 struct killarg {
1124 	int64_t *usedp;
1125 	int64_t *compressedp;
1126 	int64_t *uncompressedp;
1127 	zio_t *zio;
1128 	dmu_tx_t *tx;
1129 };
1130 
1131 static int
1132 kill_blkptr(traverse_blk_cache_t *bc, spa_t *spa, void *arg)
1133 {
1134 	struct killarg *ka = arg;
1135 	blkptr_t *bp = &bc->bc_blkptr;
1136 
1137 	ASSERT3U(bc->bc_errno, ==, 0);
1138 
1139 	/*
1140 	 * Since this callback is not called concurrently, no lock is
1141 	 * needed on the accounting values.
1142 	 */
1143 	*ka->usedp += bp_get_dasize(spa, bp);
1144 	*ka->compressedp += BP_GET_PSIZE(bp);
1145 	*ka->uncompressedp += BP_GET_UCSIZE(bp);
1146 	/* XXX check for EIO? */
1147 	(void) dsl_free(ka->zio, spa_get_dsl(spa), ka->tx->tx_txg,
1148 	    bp, NULL, NULL, ARC_NOWAIT);
1149 	return (0);
1150 }
1151 
1152 /* ARGSUSED */
1153 static int
1154 dsl_dataset_rollback_check(void *arg1, void *arg2, dmu_tx_t *tx)
1155 {
1156 	dsl_dataset_t *ds = arg1;
1157 	dmu_objset_type_t *ost = arg2;
1158 
1159 	/*
1160 	 * We can only roll back to emptyness if it is a ZPL objset.
1161 	 */
1162 	if (*ost != DMU_OST_ZFS && ds->ds_phys->ds_prev_snap_txg == 0)
1163 		return (EINVAL);
1164 
1165 	/*
1166 	 * This must not be a snapshot.
1167 	 */
1168 	if (ds->ds_phys->ds_next_snap_obj != 0)
1169 		return (EINVAL);
1170 
1171 	/*
1172 	 * If we made changes this txg, traverse_dsl_dataset won't find
1173 	 * them.  Try again.
1174 	 */
1175 	if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1176 		return (EAGAIN);
1177 
1178 	return (0);
1179 }
1180 
1181 /* ARGSUSED */
1182 static void
1183 dsl_dataset_rollback_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1184 {
1185 	dsl_dataset_t *ds = arg1;
1186 	dmu_objset_type_t *ost = arg2;
1187 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1188 
1189 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1190 
1191 	/*
1192 	 * Before the roll back destroy the zil.
1193 	 */
1194 	if (ds->ds_user_ptr != NULL) {
1195 		zil_rollback_destroy(
1196 		    ((objset_impl_t *)ds->ds_user_ptr)->os_zil, tx);
1197 
1198 		/*
1199 		 * We need to make sure that the objset_impl_t is reopened after
1200 		 * we do the rollback, otherwise it will have the wrong
1201 		 * objset_phys_t.  Normally this would happen when this
1202 		 * dataset-open is closed, thus causing the
1203 		 * dataset to be immediately evicted.  But when doing "zfs recv
1204 		 * -F", we reopen the objset before that, so that there is no
1205 		 * window where the dataset is closed and inconsistent.
1206 		 */
1207 		ds->ds_user_evict_func(ds, ds->ds_user_ptr);
1208 		ds->ds_user_ptr = NULL;
1209 	}
1210 
1211 	/* Zero out the deadlist. */
1212 	bplist_close(&ds->ds_deadlist);
1213 	bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx);
1214 	ds->ds_phys->ds_deadlist_obj =
1215 	    bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
1216 	VERIFY(0 == bplist_open(&ds->ds_deadlist, mos,
1217 	    ds->ds_phys->ds_deadlist_obj));
1218 
1219 	{
1220 		/* Free blkptrs that we gave birth to */
1221 		zio_t *zio;
1222 		int64_t used = 0, compressed = 0, uncompressed = 0;
1223 		struct killarg ka;
1224 		int64_t delta;
1225 
1226 		zio = zio_root(tx->tx_pool->dp_spa, NULL, NULL,
1227 		    ZIO_FLAG_MUSTSUCCEED);
1228 		ka.usedp = &used;
1229 		ka.compressedp = &compressed;
1230 		ka.uncompressedp = &uncompressed;
1231 		ka.zio = zio;
1232 		ka.tx = tx;
1233 		(void) traverse_dsl_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1234 		    ADVANCE_POST, kill_blkptr, &ka);
1235 		(void) zio_wait(zio);
1236 
1237 		/* only deduct space beyond any refreservation */
1238 		delta = parent_delta(ds, -used);
1239 		dsl_dir_diduse_space(ds->ds_dir,
1240 		    delta, -compressed, -uncompressed, tx);
1241 	}
1242 
1243 	if (ds->ds_prev && ds->ds_prev != ds->ds_dir->dd_pool->dp_origin_snap) {
1244 		/* Change our contents to that of the prev snapshot */
1245 		ASSERT3U(ds->ds_prev->ds_object, ==,
1246 		    ds->ds_phys->ds_prev_snap_obj);
1247 		ds->ds_phys->ds_bp = ds->ds_prev->ds_phys->ds_bp;
1248 		ds->ds_phys->ds_used_bytes =
1249 		    ds->ds_prev->ds_phys->ds_used_bytes;
1250 		ds->ds_phys->ds_compressed_bytes =
1251 		    ds->ds_prev->ds_phys->ds_compressed_bytes;
1252 		ds->ds_phys->ds_uncompressed_bytes =
1253 		    ds->ds_prev->ds_phys->ds_uncompressed_bytes;
1254 		ds->ds_phys->ds_flags = ds->ds_prev->ds_phys->ds_flags;
1255 		ds->ds_phys->ds_unique_bytes = 0;
1256 
1257 		if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
1258 			dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1259 			ds->ds_prev->ds_phys->ds_unique_bytes = 0;
1260 		}
1261 	} else {
1262 		objset_impl_t *osi;
1263 
1264 		/* Zero out our contents, recreate objset */
1265 		bzero(&ds->ds_phys->ds_bp, sizeof (blkptr_t));
1266 		ds->ds_phys->ds_used_bytes = 0;
1267 		ds->ds_phys->ds_compressed_bytes = 0;
1268 		ds->ds_phys->ds_uncompressed_bytes = 0;
1269 		ds->ds_phys->ds_flags = 0;
1270 		ds->ds_phys->ds_unique_bytes = 0;
1271 		osi = dmu_objset_create_impl(ds->ds_dir->dd_pool->dp_spa, ds,
1272 		    &ds->ds_phys->ds_bp, *ost, tx);
1273 #ifdef _KERNEL
1274 		zfs_create_fs(&osi->os, kcred, NULL, tx);
1275 #endif
1276 	}
1277 
1278 	spa_history_internal_log(LOG_DS_ROLLBACK, ds->ds_dir->dd_pool->dp_spa,
1279 	    tx, cr, "dataset = %llu", ds->ds_object);
1280 }
1281 
1282 /* ARGSUSED */
1283 static int
1284 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1285 {
1286 	dsl_dataset_t *ds = arg1;
1287 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1288 	uint64_t count;
1289 	int err;
1290 
1291 	/*
1292 	 * Can't delete a head dataset if there are snapshots of it.
1293 	 * (Except if the only snapshots are from the branch we cloned
1294 	 * from.)
1295 	 */
1296 	if (ds->ds_prev != NULL &&
1297 	    ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1298 		return (EINVAL);
1299 
1300 	/*
1301 	 * This is really a dsl_dir thing, but check it here so that
1302 	 * we'll be less likely to leave this dataset inconsistent &
1303 	 * nearly destroyed.
1304 	 */
1305 	err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1306 	if (err)
1307 		return (err);
1308 	if (count != 0)
1309 		return (EEXIST);
1310 
1311 	return (0);
1312 }
1313 
1314 /* ARGSUSED */
1315 static void
1316 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1317 {
1318 	dsl_dataset_t *ds = arg1;
1319 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1320 
1321 	/* Mark it as inconsistent on-disk, in case we crash */
1322 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1323 	ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1324 
1325 	spa_history_internal_log(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1326 	    cr, "dataset = %llu", ds->ds_object);
1327 }
1328 
1329 /* ARGSUSED */
1330 int
1331 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1332 {
1333 	dsl_dataset_t *ds = arg1;
1334 
1335 	/* we have an owner hold, so noone else can destroy us */
1336 	ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1337 
1338 	/* Can't delete a branch point. */
1339 	if (ds->ds_phys->ds_num_children > 1)
1340 		return (EEXIST);
1341 
1342 	/*
1343 	 * Can't delete a head dataset if there are snapshots of it.
1344 	 * (Except if the only snapshots are from the branch we cloned
1345 	 * from.)
1346 	 */
1347 	if (ds->ds_prev != NULL &&
1348 	    ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1349 		return (EINVAL);
1350 
1351 	/*
1352 	 * If we made changes this txg, traverse_dsl_dataset won't find
1353 	 * them.  Try again.
1354 	 */
1355 	if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1356 		return (EAGAIN);
1357 
1358 	/* XXX we should do some i/o error checking... */
1359 	return (0);
1360 }
1361 
1362 struct refsarg {
1363 	kmutex_t lock;
1364 	boolean_t gone;
1365 	kcondvar_t cv;
1366 };
1367 
1368 /* ARGSUSED */
1369 static void
1370 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1371 {
1372 	struct refsarg *arg = argv;
1373 
1374 	mutex_enter(&arg->lock);
1375 	arg->gone = TRUE;
1376 	cv_signal(&arg->cv);
1377 	mutex_exit(&arg->lock);
1378 }
1379 
1380 static void
1381 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1382 {
1383 	struct refsarg arg;
1384 
1385 	mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1386 	cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1387 	arg.gone = FALSE;
1388 	(void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1389 	    dsl_dataset_refs_gone);
1390 	dmu_buf_rele(ds->ds_dbuf, tag);
1391 	mutex_enter(&arg.lock);
1392 	while (!arg.gone)
1393 		cv_wait(&arg.cv, &arg.lock);
1394 	ASSERT(arg.gone);
1395 	mutex_exit(&arg.lock);
1396 	ds->ds_dbuf = NULL;
1397 	ds->ds_phys = NULL;
1398 	mutex_destroy(&arg.lock);
1399 	cv_destroy(&arg.cv);
1400 }
1401 
1402 void
1403 dsl_dataset_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx)
1404 {
1405 	dsl_dataset_t *ds = arg1;
1406 	int64_t used = 0, compressed = 0, uncompressed = 0;
1407 	zio_t *zio;
1408 	int err;
1409 	int after_branch_point = FALSE;
1410 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1411 	objset_t *mos = dp->dp_meta_objset;
1412 	dsl_dataset_t *ds_prev = NULL;
1413 	uint64_t obj;
1414 
1415 	ASSERT(ds->ds_owner);
1416 	ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
1417 	ASSERT(ds->ds_prev == NULL ||
1418 	    ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1419 	ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1420 
1421 	/* signal any waiters that this dataset is going away */
1422 	mutex_enter(&ds->ds_lock);
1423 	ds->ds_owner = dsl_reaper;
1424 	cv_broadcast(&ds->ds_exclusive_cv);
1425 	mutex_exit(&ds->ds_lock);
1426 
1427 	/* Remove our reservation */
1428 	if (ds->ds_reserved != 0) {
1429 		uint64_t val = 0;
1430 		dsl_dataset_set_reservation_sync(ds, &val, cr, tx);
1431 		ASSERT3U(ds->ds_reserved, ==, 0);
1432 	}
1433 
1434 	ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1435 
1436 	dsl_pool_ds_destroyed(ds, tx);
1437 
1438 	obj = ds->ds_object;
1439 
1440 	if (ds->ds_phys->ds_prev_snap_obj != 0) {
1441 		if (ds->ds_prev) {
1442 			ds_prev = ds->ds_prev;
1443 		} else {
1444 			VERIFY(0 == dsl_dataset_hold_obj(dp,
1445 			    ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1446 		}
1447 		after_branch_point =
1448 		    (ds_prev->ds_phys->ds_next_snap_obj != obj);
1449 
1450 		dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1451 		if (after_branch_point &&
1452 		    ds_prev->ds_phys->ds_next_clones_obj != 0) {
1453 			VERIFY(0 == zap_remove_int(mos,
1454 			    ds_prev->ds_phys->ds_next_clones_obj, obj, tx));
1455 			if (ds->ds_phys->ds_next_snap_obj != 0) {
1456 				VERIFY(0 == zap_add_int(mos,
1457 				    ds_prev->ds_phys->ds_next_clones_obj,
1458 				    ds->ds_phys->ds_next_snap_obj, tx));
1459 			}
1460 		}
1461 		if (after_branch_point &&
1462 		    ds->ds_phys->ds_next_snap_obj == 0) {
1463 			/* This clone is toast. */
1464 			ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1465 			ds_prev->ds_phys->ds_num_children--;
1466 		} else if (!after_branch_point) {
1467 			ds_prev->ds_phys->ds_next_snap_obj =
1468 			    ds->ds_phys->ds_next_snap_obj;
1469 		}
1470 	}
1471 
1472 	zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1473 
1474 	if (ds->ds_phys->ds_next_snap_obj != 0) {
1475 		blkptr_t bp;
1476 		dsl_dataset_t *ds_next;
1477 		uint64_t itor = 0;
1478 		uint64_t old_unique;
1479 
1480 		VERIFY(0 == dsl_dataset_hold_obj(dp,
1481 		    ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1482 		ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1483 
1484 		old_unique = dsl_dataset_unique(ds_next);
1485 
1486 		dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1487 		ds_next->ds_phys->ds_prev_snap_obj =
1488 		    ds->ds_phys->ds_prev_snap_obj;
1489 		ds_next->ds_phys->ds_prev_snap_txg =
1490 		    ds->ds_phys->ds_prev_snap_txg;
1491 		ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1492 		    ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1493 
1494 		/*
1495 		 * Transfer to our deadlist (which will become next's
1496 		 * new deadlist) any entries from next's current
1497 		 * deadlist which were born before prev, and free the
1498 		 * other entries.
1499 		 *
1500 		 * XXX we're doing this long task with the config lock held
1501 		 */
1502 		while (bplist_iterate(&ds_next->ds_deadlist, &itor, &bp) == 0) {
1503 			if (bp.blk_birth <= ds->ds_phys->ds_prev_snap_txg) {
1504 				VERIFY(0 == bplist_enqueue(&ds->ds_deadlist,
1505 				    &bp, tx));
1506 				if (ds_prev && !after_branch_point &&
1507 				    bp.blk_birth >
1508 				    ds_prev->ds_phys->ds_prev_snap_txg) {
1509 					ds_prev->ds_phys->ds_unique_bytes +=
1510 					    bp_get_dasize(dp->dp_spa, &bp);
1511 				}
1512 			} else {
1513 				used += bp_get_dasize(dp->dp_spa, &bp);
1514 				compressed += BP_GET_PSIZE(&bp);
1515 				uncompressed += BP_GET_UCSIZE(&bp);
1516 				/* XXX check return value? */
1517 				(void) dsl_free(zio, dp, tx->tx_txg,
1518 				    &bp, NULL, NULL, ARC_NOWAIT);
1519 			}
1520 		}
1521 
1522 		/* free next's deadlist */
1523 		bplist_close(&ds_next->ds_deadlist);
1524 		bplist_destroy(mos, ds_next->ds_phys->ds_deadlist_obj, tx);
1525 
1526 		/* set next's deadlist to our deadlist */
1527 		bplist_close(&ds->ds_deadlist);
1528 		ds_next->ds_phys->ds_deadlist_obj =
1529 		    ds->ds_phys->ds_deadlist_obj;
1530 		VERIFY(0 == bplist_open(&ds_next->ds_deadlist, mos,
1531 		    ds_next->ds_phys->ds_deadlist_obj));
1532 		ds->ds_phys->ds_deadlist_obj = 0;
1533 
1534 		if (ds_next->ds_phys->ds_next_snap_obj != 0) {
1535 			/*
1536 			 * Update next's unique to include blocks which
1537 			 * were previously shared by only this snapshot
1538 			 * and it.  Those blocks will be born after the
1539 			 * prev snap and before this snap, and will have
1540 			 * died after the next snap and before the one
1541 			 * after that (ie. be on the snap after next's
1542 			 * deadlist).
1543 			 *
1544 			 * XXX we're doing this long task with the
1545 			 * config lock held
1546 			 */
1547 			dsl_dataset_t *ds_after_next;
1548 
1549 			VERIFY(0 == dsl_dataset_hold_obj(dp,
1550 			    ds_next->ds_phys->ds_next_snap_obj,
1551 			    FTAG, &ds_after_next));
1552 			itor = 0;
1553 			while (bplist_iterate(&ds_after_next->ds_deadlist,
1554 			    &itor, &bp) == 0) {
1555 				if (bp.blk_birth >
1556 				    ds->ds_phys->ds_prev_snap_txg &&
1557 				    bp.blk_birth <=
1558 				    ds->ds_phys->ds_creation_txg) {
1559 					ds_next->ds_phys->ds_unique_bytes +=
1560 					    bp_get_dasize(dp->dp_spa, &bp);
1561 				}
1562 			}
1563 
1564 			dsl_dataset_rele(ds_after_next, FTAG);
1565 			ASSERT3P(ds_next->ds_prev, ==, NULL);
1566 		} else {
1567 			ASSERT3P(ds_next->ds_prev, ==, ds);
1568 			dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1569 			ds_next->ds_prev = NULL;
1570 			if (ds_prev) {
1571 				VERIFY(0 == dsl_dataset_get_ref(dp,
1572 				    ds->ds_phys->ds_prev_snap_obj,
1573 				    ds_next, &ds_next->ds_prev));
1574 			}
1575 
1576 			dsl_dataset_recalc_head_uniq(ds_next);
1577 
1578 			/*
1579 			 * Reduce the amount of our unconsmed refreservation
1580 			 * being charged to our parent by the amount of
1581 			 * new unique data we have gained.
1582 			 */
1583 			if (old_unique < ds_next->ds_reserved) {
1584 				int64_t mrsdelta;
1585 				uint64_t new_unique =
1586 				    ds_next->ds_phys->ds_unique_bytes;
1587 
1588 				ASSERT(old_unique <= new_unique);
1589 				mrsdelta = MIN(new_unique - old_unique,
1590 				    ds_next->ds_reserved - old_unique);
1591 				dsl_dir_diduse_space(ds->ds_dir, -mrsdelta,
1592 				    0, 0, tx);
1593 			}
1594 		}
1595 		dsl_dataset_rele(ds_next, FTAG);
1596 
1597 		/*
1598 		 * NB: unique_bytes might not be accurate for the head objset.
1599 		 * Before SPA_VERSION 9, we didn't update its value when we
1600 		 * deleted the most recent snapshot.
1601 		 */
1602 		ASSERT3U(used, ==, ds->ds_phys->ds_unique_bytes);
1603 	} else {
1604 		/*
1605 		 * There's no next snapshot, so this is a head dataset.
1606 		 * Destroy the deadlist.  Unless it's a clone, the
1607 		 * deadlist should be empty.  (If it's a clone, it's
1608 		 * safe to ignore the deadlist contents.)
1609 		 */
1610 		struct killarg ka;
1611 
1612 		ASSERT(after_branch_point || bplist_empty(&ds->ds_deadlist));
1613 		bplist_close(&ds->ds_deadlist);
1614 		bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx);
1615 		ds->ds_phys->ds_deadlist_obj = 0;
1616 
1617 		/*
1618 		 * Free everything that we point to (that's born after
1619 		 * the previous snapshot, if we are a clone)
1620 		 *
1621 		 * XXX we're doing this long task with the config lock held
1622 		 */
1623 		ka.usedp = &used;
1624 		ka.compressedp = &compressed;
1625 		ka.uncompressedp = &uncompressed;
1626 		ka.zio = zio;
1627 		ka.tx = tx;
1628 		err = traverse_dsl_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1629 		    ADVANCE_POST, kill_blkptr, &ka);
1630 		ASSERT3U(err, ==, 0);
1631 		ASSERT(spa_version(dp->dp_spa) <
1632 		    SPA_VERSION_UNIQUE_ACCURATE ||
1633 		    used == ds->ds_phys->ds_unique_bytes);
1634 	}
1635 
1636 	err = zio_wait(zio);
1637 	ASSERT3U(err, ==, 0);
1638 
1639 	dsl_dir_diduse_space(ds->ds_dir, -used, -compressed, -uncompressed, tx);
1640 
1641 	if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1642 		/* Erase the link in the dir */
1643 		dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1644 		ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1645 		ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1646 		err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1647 		ASSERT(err == 0);
1648 	} else {
1649 		/* remove from snapshot namespace */
1650 		dsl_dataset_t *ds_head;
1651 		ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1652 		VERIFY(0 == dsl_dataset_hold_obj(dp,
1653 		    ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1654 		VERIFY(0 == dsl_dataset_get_snapname(ds));
1655 #ifdef ZFS_DEBUG
1656 		{
1657 			uint64_t val;
1658 
1659 			err = dsl_dataset_snap_lookup(ds_head,
1660 			    ds->ds_snapname, &val);
1661 			ASSERT3U(err, ==, 0);
1662 			ASSERT3U(val, ==, obj);
1663 		}
1664 #endif
1665 		err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1666 		ASSERT(err == 0);
1667 		dsl_dataset_rele(ds_head, FTAG);
1668 	}
1669 
1670 	if (ds_prev && ds->ds_prev != ds_prev)
1671 		dsl_dataset_rele(ds_prev, FTAG);
1672 
1673 	spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1674 	spa_history_internal_log(LOG_DS_DESTROY, dp->dp_spa, tx,
1675 	    cr, "dataset = %llu", ds->ds_object);
1676 
1677 	if (ds->ds_phys->ds_next_clones_obj != 0) {
1678 		uint64_t count;
1679 		ASSERT(0 == zap_count(mos,
1680 		    ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1681 		VERIFY(0 == dmu_object_free(mos,
1682 		    ds->ds_phys->ds_next_clones_obj, tx));
1683 	}
1684 	if (ds->ds_phys->ds_props_obj != 0) {
1685 		VERIFY(0 == zap_destroy(mos,
1686 		    ds->ds_phys->ds_props_obj, tx));
1687 	}
1688 	dsl_dir_close(ds->ds_dir, ds);
1689 	ds->ds_dir = NULL;
1690 	dsl_dataset_drain_refs(ds, tag);
1691 	VERIFY(0 == dmu_object_free(mos, obj, tx));
1692 }
1693 
1694 static int
1695 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1696 {
1697 	uint64_t asize;
1698 
1699 	if (!dmu_tx_is_syncing(tx))
1700 		return (0);
1701 
1702 	/*
1703 	 * If there's an fs-only reservation, any blocks that might become
1704 	 * owned by the snapshot dataset must be accommodated by space
1705 	 * outside of the reservation.
1706 	 */
1707 	asize = MIN(dsl_dataset_unique(ds), ds->ds_reserved);
1708 	if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, FALSE))
1709 		return (ENOSPC);
1710 
1711 	/*
1712 	 * Propogate any reserved space for this snapshot to other
1713 	 * snapshot checks in this sync group.
1714 	 */
1715 	if (asize > 0)
1716 		dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1717 
1718 	return (0);
1719 }
1720 
1721 /* ARGSUSED */
1722 int
1723 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1724 {
1725 	dsl_dataset_t *ds = arg1;
1726 	const char *snapname = arg2;
1727 	int err;
1728 	uint64_t value;
1729 
1730 	/*
1731 	 * We don't allow multiple snapshots of the same txg.  If there
1732 	 * is already one, try again.
1733 	 */
1734 	if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1735 		return (EAGAIN);
1736 
1737 	/*
1738 	 * Check for conflicting name snapshot name.
1739 	 */
1740 	err = dsl_dataset_snap_lookup(ds, snapname, &value);
1741 	if (err == 0)
1742 		return (EEXIST);
1743 	if (err != ENOENT)
1744 		return (err);
1745 
1746 	/*
1747 	 * Check that the dataset's name is not too long.  Name consists
1748 	 * of the dataset's length + 1 for the @-sign + snapshot name's length
1749 	 */
1750 	if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
1751 		return (ENAMETOOLONG);
1752 
1753 	err = dsl_dataset_snapshot_reserve_space(ds, tx);
1754 	if (err)
1755 		return (err);
1756 
1757 	ds->ds_trysnap_txg = tx->tx_txg;
1758 	return (0);
1759 }
1760 
1761 void
1762 dsl_dataset_snapshot_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1763 {
1764 	dsl_dataset_t *ds = arg1;
1765 	const char *snapname = arg2;
1766 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1767 	dmu_buf_t *dbuf;
1768 	dsl_dataset_phys_t *dsphys;
1769 	uint64_t dsobj, crtxg;
1770 	objset_t *mos = dp->dp_meta_objset;
1771 	int err;
1772 
1773 	ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1774 
1775 	/*
1776 	 * The origin's ds_creation_txg has to be < TXG_INITIAL
1777 	 */
1778 	if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
1779 		crtxg = 1;
1780 	else
1781 		crtxg = tx->tx_txg;
1782 
1783 	dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
1784 	    DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
1785 	VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
1786 	dmu_buf_will_dirty(dbuf, tx);
1787 	dsphys = dbuf->db_data;
1788 	bzero(dsphys, sizeof (dsl_dataset_phys_t));
1789 	dsphys->ds_dir_obj = ds->ds_dir->dd_object;
1790 	dsphys->ds_fsid_guid = unique_create();
1791 	(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
1792 	    sizeof (dsphys->ds_guid));
1793 	dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
1794 	dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
1795 	dsphys->ds_next_snap_obj = ds->ds_object;
1796 	dsphys->ds_num_children = 1;
1797 	dsphys->ds_creation_time = gethrestime_sec();
1798 	dsphys->ds_creation_txg = crtxg;
1799 	dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
1800 	dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
1801 	dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
1802 	dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
1803 	dsphys->ds_flags = ds->ds_phys->ds_flags;
1804 	dsphys->ds_bp = ds->ds_phys->ds_bp;
1805 	dmu_buf_rele(dbuf, FTAG);
1806 
1807 	ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
1808 	if (ds->ds_prev) {
1809 		uint64_t next_clones_obj =
1810 		    ds->ds_prev->ds_phys->ds_next_clones_obj;
1811 		ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
1812 		    ds->ds_object ||
1813 		    ds->ds_prev->ds_phys->ds_num_children > 1);
1814 		if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
1815 			dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1816 			ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1817 			    ds->ds_prev->ds_phys->ds_creation_txg);
1818 			ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
1819 		} else if (next_clones_obj != 0) {
1820 			VERIFY3U(0, ==, zap_remove_int(mos,
1821 			    next_clones_obj, dsphys->ds_next_snap_obj, tx));
1822 			VERIFY3U(0, ==, zap_add_int(mos,
1823 			    next_clones_obj, dsobj, tx));
1824 		}
1825 	}
1826 
1827 	/*
1828 	 * If we have a reference-reservation on this dataset, we will
1829 	 * need to increase the amount of refreservation being charged
1830 	 * since our unique space is going to zero.
1831 	 */
1832 	if (ds->ds_reserved) {
1833 		int64_t add = MIN(dsl_dataset_unique(ds), ds->ds_reserved);
1834 		dsl_dir_diduse_space(ds->ds_dir, add, 0, 0, tx);
1835 	}
1836 
1837 	bplist_close(&ds->ds_deadlist);
1838 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1839 	ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
1840 	ds->ds_phys->ds_prev_snap_obj = dsobj;
1841 	ds->ds_phys->ds_prev_snap_txg = crtxg;
1842 	ds->ds_phys->ds_unique_bytes = 0;
1843 	if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
1844 		ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1845 	ds->ds_phys->ds_deadlist_obj =
1846 	    bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
1847 	VERIFY(0 == bplist_open(&ds->ds_deadlist, mos,
1848 	    ds->ds_phys->ds_deadlist_obj));
1849 
1850 	dprintf("snap '%s' -> obj %llu\n", snapname, dsobj);
1851 	err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
1852 	    snapname, 8, 1, &dsobj, tx);
1853 	ASSERT(err == 0);
1854 
1855 	if (ds->ds_prev)
1856 		dsl_dataset_drop_ref(ds->ds_prev, ds);
1857 	VERIFY(0 == dsl_dataset_get_ref(dp,
1858 	    ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
1859 
1860 	dsl_pool_ds_snapshotted(ds, tx);
1861 
1862 	spa_history_internal_log(LOG_DS_SNAPSHOT, dp->dp_spa, tx, cr,
1863 	    "dataset = %llu", dsobj);
1864 }
1865 
1866 void
1867 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
1868 {
1869 	ASSERT(dmu_tx_is_syncing(tx));
1870 	ASSERT(ds->ds_user_ptr != NULL);
1871 	ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
1872 
1873 	/*
1874 	 * in case we had to change ds_fsid_guid when we opened it,
1875 	 * sync it out now.
1876 	 */
1877 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1878 	ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
1879 
1880 	dsl_dir_dirty(ds->ds_dir, tx);
1881 	dmu_objset_sync(ds->ds_user_ptr, zio, tx);
1882 }
1883 
1884 void
1885 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
1886 {
1887 	uint64_t refd, avail, uobjs, aobjs;
1888 
1889 	dsl_dir_stats(ds->ds_dir, nv);
1890 
1891 	dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
1892 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
1893 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
1894 
1895 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
1896 	    ds->ds_phys->ds_creation_time);
1897 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
1898 	    ds->ds_phys->ds_creation_txg);
1899 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
1900 	    ds->ds_quota);
1901 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
1902 	    ds->ds_reserved);
1903 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
1904 	    ds->ds_phys->ds_guid);
1905 
1906 	if (ds->ds_phys->ds_next_snap_obj) {
1907 		/*
1908 		 * This is a snapshot; override the dd's space used with
1909 		 * our unique space and compression ratio.
1910 		 */
1911 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
1912 		    ds->ds_phys->ds_unique_bytes);
1913 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
1914 		    ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
1915 		    (ds->ds_phys->ds_uncompressed_bytes * 100 /
1916 		    ds->ds_phys->ds_compressed_bytes));
1917 	}
1918 }
1919 
1920 void
1921 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
1922 {
1923 	stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
1924 	stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
1925 	stat->dds_guid = ds->ds_phys->ds_guid;
1926 	if (ds->ds_phys->ds_next_snap_obj) {
1927 		stat->dds_is_snapshot = B_TRUE;
1928 		stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
1929 	}
1930 
1931 	/* clone origin is really a dsl_dir thing... */
1932 	rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
1933 	if (dsl_dir_is_clone(ds->ds_dir)) {
1934 		dsl_dataset_t *ods;
1935 
1936 		VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
1937 		    ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
1938 		dsl_dataset_name(ods, stat->dds_origin);
1939 		dsl_dataset_drop_ref(ods, FTAG);
1940 	}
1941 	rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
1942 }
1943 
1944 uint64_t
1945 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
1946 {
1947 	return (ds->ds_fsid_guid);
1948 }
1949 
1950 void
1951 dsl_dataset_space(dsl_dataset_t *ds,
1952     uint64_t *refdbytesp, uint64_t *availbytesp,
1953     uint64_t *usedobjsp, uint64_t *availobjsp)
1954 {
1955 	*refdbytesp = ds->ds_phys->ds_used_bytes;
1956 	*availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
1957 	if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
1958 		*availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
1959 	if (ds->ds_quota != 0) {
1960 		/*
1961 		 * Adjust available bytes according to refquota
1962 		 */
1963 		if (*refdbytesp < ds->ds_quota)
1964 			*availbytesp = MIN(*availbytesp,
1965 			    ds->ds_quota - *refdbytesp);
1966 		else
1967 			*availbytesp = 0;
1968 	}
1969 	*usedobjsp = ds->ds_phys->ds_bp.blk_fill;
1970 	*availobjsp = DN_MAX_OBJECT - *usedobjsp;
1971 }
1972 
1973 boolean_t
1974 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
1975 {
1976 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1977 
1978 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
1979 	    dsl_pool_sync_context(dp));
1980 	if (ds->ds_prev == NULL)
1981 		return (B_FALSE);
1982 	if (ds->ds_phys->ds_bp.blk_birth >
1983 	    ds->ds_prev->ds_phys->ds_creation_txg)
1984 		return (B_TRUE);
1985 	return (B_FALSE);
1986 }
1987 
1988 /* ARGSUSED */
1989 static int
1990 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
1991 {
1992 	dsl_dataset_t *ds = arg1;
1993 	char *newsnapname = arg2;
1994 	dsl_dir_t *dd = ds->ds_dir;
1995 	dsl_dataset_t *hds;
1996 	uint64_t val;
1997 	int err;
1998 
1999 	err = dsl_dataset_hold_obj(dd->dd_pool,
2000 	    dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2001 	if (err)
2002 		return (err);
2003 
2004 	/* new name better not be in use */
2005 	err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2006 	dsl_dataset_rele(hds, FTAG);
2007 
2008 	if (err == 0)
2009 		err = EEXIST;
2010 	else if (err == ENOENT)
2011 		err = 0;
2012 
2013 	/* dataset name + 1 for the "@" + the new snapshot name must fit */
2014 	if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2015 		err = ENAMETOOLONG;
2016 
2017 	return (err);
2018 }
2019 
2020 static void
2021 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2,
2022     cred_t *cr, dmu_tx_t *tx)
2023 {
2024 	dsl_dataset_t *ds = arg1;
2025 	const char *newsnapname = arg2;
2026 	dsl_dir_t *dd = ds->ds_dir;
2027 	objset_t *mos = dd->dd_pool->dp_meta_objset;
2028 	dsl_dataset_t *hds;
2029 	int err;
2030 
2031 	ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2032 
2033 	VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2034 	    dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2035 
2036 	VERIFY(0 == dsl_dataset_get_snapname(ds));
2037 	err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2038 	ASSERT3U(err, ==, 0);
2039 	mutex_enter(&ds->ds_lock);
2040 	(void) strcpy(ds->ds_snapname, newsnapname);
2041 	mutex_exit(&ds->ds_lock);
2042 	err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2043 	    ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2044 	ASSERT3U(err, ==, 0);
2045 
2046 	spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2047 	    cr, "dataset = %llu", ds->ds_object);
2048 	dsl_dataset_rele(hds, FTAG);
2049 }
2050 
2051 struct renamesnaparg {
2052 	dsl_sync_task_group_t *dstg;
2053 	char failed[MAXPATHLEN];
2054 	char *oldsnap;
2055 	char *newsnap;
2056 };
2057 
2058 static int
2059 dsl_snapshot_rename_one(char *name, void *arg)
2060 {
2061 	struct renamesnaparg *ra = arg;
2062 	dsl_dataset_t *ds = NULL;
2063 	char *cp;
2064 	int err;
2065 
2066 	cp = name + strlen(name);
2067 	*cp = '@';
2068 	(void) strcpy(cp + 1, ra->oldsnap);
2069 
2070 	/*
2071 	 * For recursive snapshot renames the parent won't be changing
2072 	 * so we just pass name for both the to/from argument.
2073 	 */
2074 	err = zfs_secpolicy_rename_perms(name, name, CRED());
2075 	if (err == ENOENT) {
2076 		return (0);
2077 	} else if (err) {
2078 		(void) strcpy(ra->failed, name);
2079 		return (err);
2080 	}
2081 
2082 #ifdef _KERNEL
2083 	/*
2084 	 * For all filesystems undergoing rename, we'll need to unmount it.
2085 	 */
2086 	(void) zfs_unmount_snap(name, NULL);
2087 #endif
2088 	err = dsl_dataset_hold(name, ra->dstg, &ds);
2089 	*cp = '\0';
2090 	if (err == ENOENT) {
2091 		return (0);
2092 	} else if (err) {
2093 		(void) strcpy(ra->failed, name);
2094 		return (err);
2095 	}
2096 
2097 	dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2098 	    dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2099 
2100 	return (0);
2101 }
2102 
2103 static int
2104 dsl_recursive_rename(char *oldname, const char *newname)
2105 {
2106 	int err;
2107 	struct renamesnaparg *ra;
2108 	dsl_sync_task_t *dst;
2109 	spa_t *spa;
2110 	char *cp, *fsname = spa_strdup(oldname);
2111 	int len = strlen(oldname);
2112 
2113 	/* truncate the snapshot name to get the fsname */
2114 	cp = strchr(fsname, '@');
2115 	*cp = '\0';
2116 
2117 	err = spa_open(fsname, &spa, FTAG);
2118 	if (err) {
2119 		kmem_free(fsname, len + 1);
2120 		return (err);
2121 	}
2122 	ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2123 	ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2124 
2125 	ra->oldsnap = strchr(oldname, '@') + 1;
2126 	ra->newsnap = strchr(newname, '@') + 1;
2127 	*ra->failed = '\0';
2128 
2129 	err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2130 	    DS_FIND_CHILDREN);
2131 	kmem_free(fsname, len + 1);
2132 
2133 	if (err == 0) {
2134 		err = dsl_sync_task_group_wait(ra->dstg);
2135 	}
2136 
2137 	for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2138 	    dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2139 		dsl_dataset_t *ds = dst->dst_arg1;
2140 		if (dst->dst_err) {
2141 			dsl_dir_name(ds->ds_dir, ra->failed);
2142 			(void) strcat(ra->failed, "@");
2143 			(void) strcat(ra->failed, ra->newsnap);
2144 		}
2145 		dsl_dataset_rele(ds, ra->dstg);
2146 	}
2147 
2148 	if (err)
2149 		(void) strcpy(oldname, ra->failed);
2150 
2151 	dsl_sync_task_group_destroy(ra->dstg);
2152 	kmem_free(ra, sizeof (struct renamesnaparg));
2153 	spa_close(spa, FTAG);
2154 	return (err);
2155 }
2156 
2157 static int
2158 dsl_valid_rename(char *oldname, void *arg)
2159 {
2160 	int delta = *(int *)arg;
2161 
2162 	if (strlen(oldname) + delta >= MAXNAMELEN)
2163 		return (ENAMETOOLONG);
2164 
2165 	return (0);
2166 }
2167 
2168 #pragma weak dmu_objset_rename = dsl_dataset_rename
2169 int
2170 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2171 {
2172 	dsl_dir_t *dd;
2173 	dsl_dataset_t *ds;
2174 	const char *tail;
2175 	int err;
2176 
2177 	err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2178 	if (err)
2179 		return (err);
2180 	if (tail == NULL) {
2181 		int delta = strlen(newname) - strlen(oldname);
2182 
2183 		/* if we're growing, validate child name lengths */
2184 		if (delta > 0)
2185 			err = dmu_objset_find(oldname, dsl_valid_rename,
2186 			    &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2187 
2188 		if (!err)
2189 			err = dsl_dir_rename(dd, newname);
2190 		dsl_dir_close(dd, FTAG);
2191 		return (err);
2192 	}
2193 	if (tail[0] != '@') {
2194 		/* the name ended in a nonexistant component */
2195 		dsl_dir_close(dd, FTAG);
2196 		return (ENOENT);
2197 	}
2198 
2199 	dsl_dir_close(dd, FTAG);
2200 
2201 	/* new name must be snapshot in same filesystem */
2202 	tail = strchr(newname, '@');
2203 	if (tail == NULL)
2204 		return (EINVAL);
2205 	tail++;
2206 	if (strncmp(oldname, newname, tail - newname) != 0)
2207 		return (EXDEV);
2208 
2209 	if (recursive) {
2210 		err = dsl_recursive_rename(oldname, newname);
2211 	} else {
2212 		err = dsl_dataset_hold(oldname, FTAG, &ds);
2213 		if (err)
2214 			return (err);
2215 
2216 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2217 		    dsl_dataset_snapshot_rename_check,
2218 		    dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2219 
2220 		dsl_dataset_rele(ds, FTAG);
2221 	}
2222 
2223 	return (err);
2224 }
2225 
2226 struct promotenode {
2227 	list_node_t link;
2228 	dsl_dataset_t *ds;
2229 };
2230 
2231 struct promotearg {
2232 	list_t snap_list;
2233 	dsl_dataset_t *clone_origin, *old_head;
2234 	uint64_t used, comp, uncomp, unique;
2235 	uint64_t newnext_obj;
2236 };
2237 
2238 /* ARGSUSED */
2239 static int
2240 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2241 {
2242 	dsl_dataset_t *hds = arg1;
2243 	struct promotearg *pa = arg2;
2244 	struct promotenode *snap = list_head(&pa->snap_list);
2245 	dsl_pool_t *dp = hds->ds_dir->dd_pool;
2246 	dsl_dataset_t *origin_ds = snap->ds;
2247 	dsl_dataset_t *newnext_ds;
2248 	char *name;
2249 	uint64_t itor = 0;
2250 	blkptr_t bp;
2251 	int err;
2252 
2253 	/* Check that it is a real clone */
2254 	if (!dsl_dir_is_clone(hds->ds_dir))
2255 		return (EINVAL);
2256 
2257 	/* Since this is so expensive, don't do the preliminary check */
2258 	if (!dmu_tx_is_syncing(tx))
2259 		return (0);
2260 
2261 	if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2262 		return (EXDEV);
2263 
2264 	/* find origin's new next ds */
2265 	newnext_ds = hds;
2266 	while (newnext_ds->ds_phys->ds_prev_snap_obj != origin_ds->ds_object) {
2267 		dsl_dataset_t *prev;
2268 
2269 		err = dsl_dataset_hold_obj(dp,
2270 		    newnext_ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
2271 		if (newnext_ds != hds)
2272 			dsl_dataset_rele(newnext_ds, FTAG);
2273 		if (err)
2274 			return (err);
2275 		newnext_ds = prev;
2276 	}
2277 	pa->newnext_obj = newnext_ds->ds_object;
2278 
2279 	/* compute origin's new unique space */
2280 	pa->unique = 0;
2281 	while ((err = bplist_iterate(&newnext_ds->ds_deadlist,
2282 	    &itor, &bp)) == 0) {
2283 		if (bp.blk_birth > origin_ds->ds_phys->ds_prev_snap_txg)
2284 			pa->unique += bp_get_dasize(dp->dp_spa, &bp);
2285 	}
2286 	if (newnext_ds != hds)
2287 		dsl_dataset_rele(newnext_ds, FTAG);
2288 	if (err != ENOENT)
2289 		return (err);
2290 
2291 	name = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2292 
2293 	/*
2294 	 * Walk the snapshots that we are moving
2295 	 *
2296 	 * Compute space to transfer.  Each snapshot gave birth to:
2297 	 * (my used) - (prev's used) + (deadlist's used)
2298 	 * So a sequence would look like:
2299 	 * uN - u(N-1) + dN + ... + u1 - u0 + d1 + u0 - 0 + d0
2300 	 * Which simplifies to:
2301 	 * uN + dN + ... + d1 + d0
2302 	 * Note however, if we stop before we reach the ORIGIN we get:
2303 	 * uN + dN + ... + dM - uM-1
2304 	 */
2305 	pa->used = origin_ds->ds_phys->ds_used_bytes;
2306 	pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2307 	pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2308 	do {
2309 		uint64_t val, dlused, dlcomp, dluncomp;
2310 		dsl_dataset_t *ds = snap->ds;
2311 
2312 		/* Check that the snapshot name does not conflict */
2313 		dsl_dataset_name(ds, name);
2314 		err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2315 		if (err == 0)
2316 			err = EEXIST;
2317 		if (err != ENOENT)
2318 			break;
2319 		err = 0;
2320 
2321 		/* The very first snapshot does not have a deadlist */
2322 		if (ds->ds_phys->ds_prev_snap_obj != 0) {
2323 			if (err = bplist_space(&ds->ds_deadlist,
2324 			    &dlused, &dlcomp, &dluncomp))
2325 				break;
2326 			pa->used += dlused;
2327 			pa->comp += dlcomp;
2328 			pa->uncomp += dluncomp;
2329 		}
2330 	} while (snap = list_next(&pa->snap_list, snap));
2331 
2332 	/*
2333 	 * If we are a clone of a clone then we never reached ORIGIN,
2334 	 * so we need to subtract out the clone origin's used space.
2335 	 */
2336 	if (pa->clone_origin) {
2337 		pa->used -= pa->clone_origin->ds_phys->ds_used_bytes;
2338 		pa->comp -= pa->clone_origin->ds_phys->ds_compressed_bytes;
2339 		pa->uncomp -= pa->clone_origin->ds_phys->ds_uncompressed_bytes;
2340 	}
2341 
2342 	kmem_free(name, MAXPATHLEN);
2343 
2344 	/* Check that there is enough space here */
2345 	if (err == 0) {
2346 		dsl_dir_t *odd = origin_ds->ds_dir;
2347 		err = dsl_dir_transfer_possible(odd, hds->ds_dir, pa->used);
2348 	}
2349 
2350 	return (err);
2351 }
2352 
2353 static void
2354 dsl_dataset_promote_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2355 {
2356 	dsl_dataset_t *hds = arg1;
2357 	struct promotearg *pa = arg2;
2358 	struct promotenode *snap = list_head(&pa->snap_list);
2359 	dsl_dataset_t *origin_ds = snap->ds;
2360 	dsl_dir_t *dd = hds->ds_dir;
2361 	dsl_pool_t *dp = hds->ds_dir->dd_pool;
2362 	dsl_dir_t *odd = NULL;
2363 	char *name;
2364 	uint64_t oldnext_obj;
2365 
2366 	ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2367 
2368 	/*
2369 	 * We need to explicitly open odd, since origin_ds's dd will be
2370 	 * changing.
2371 	 */
2372 	VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2373 	    NULL, FTAG, &odd));
2374 
2375 	/* change origin's next snap */
2376 	dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2377 	oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2378 	origin_ds->ds_phys->ds_next_snap_obj = pa->newnext_obj;
2379 
2380 	/* change the origin's next clone */
2381 	if (origin_ds->ds_phys->ds_next_clones_obj) {
2382 		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2383 		    origin_ds->ds_phys->ds_next_clones_obj,
2384 		    pa->newnext_obj, tx));
2385 		VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2386 		    origin_ds->ds_phys->ds_next_clones_obj,
2387 		    oldnext_obj, tx));
2388 	}
2389 
2390 	/* change origin */
2391 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
2392 	ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2393 	dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2394 	dmu_buf_will_dirty(odd->dd_dbuf, tx);
2395 	odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2396 
2397 	/* move snapshots to this dir */
2398 	name = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2399 	do {
2400 		dsl_dataset_t *ds = snap->ds;
2401 
2402 		/* unregister props as dsl_dir is changing */
2403 		if (ds->ds_user_ptr) {
2404 			ds->ds_user_evict_func(ds, ds->ds_user_ptr);
2405 			ds->ds_user_ptr = NULL;
2406 		}
2407 		/* move snap name entry */
2408 		dsl_dataset_name(ds, name);
2409 		VERIFY(0 == dsl_dataset_snap_remove(pa->old_head,
2410 		    ds->ds_snapname, tx));
2411 		VERIFY(0 == zap_add(dp->dp_meta_objset,
2412 		    hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2413 		    8, 1, &ds->ds_object, tx));
2414 		/* change containing dsl_dir */
2415 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
2416 		ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2417 		ds->ds_phys->ds_dir_obj = dd->dd_object;
2418 		ASSERT3P(ds->ds_dir, ==, odd);
2419 		dsl_dir_close(ds->ds_dir, ds);
2420 		VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2421 		    NULL, ds, &ds->ds_dir));
2422 
2423 		ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2424 	} while (snap = list_next(&pa->snap_list, snap));
2425 
2426 	/* change space accounting */
2427 	dsl_dir_diduse_space(odd, -pa->used, -pa->comp, -pa->uncomp, tx);
2428 	dsl_dir_diduse_space(dd, pa->used, pa->comp, pa->uncomp, tx);
2429 	origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2430 
2431 	/* log history record */
2432 	spa_history_internal_log(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2433 	    cr, "dataset = %llu", hds->ds_object);
2434 
2435 	dsl_dir_close(odd, FTAG);
2436 	kmem_free(name, MAXPATHLEN);
2437 }
2438 
2439 int
2440 dsl_dataset_promote(const char *name)
2441 {
2442 	dsl_dataset_t *ds;
2443 	dsl_dir_t *dd;
2444 	dsl_pool_t *dp;
2445 	dmu_object_info_t doi;
2446 	struct promotearg pa;
2447 	struct promotenode *snap;
2448 	uint64_t snap_obj;
2449 	uint64_t last_snap = 0;
2450 	int err;
2451 
2452 	err = dsl_dataset_hold(name, FTAG, &ds);
2453 	if (err)
2454 		return (err);
2455 	dd = ds->ds_dir;
2456 	dp = dd->dd_pool;
2457 
2458 	err = dmu_object_info(dp->dp_meta_objset,
2459 	    ds->ds_phys->ds_snapnames_zapobj, &doi);
2460 	if (err) {
2461 		dsl_dataset_rele(ds, FTAG);
2462 		return (err);
2463 	}
2464 
2465 	/*
2466 	 * We are going to inherit all the snapshots taken before our
2467 	 * origin (i.e., our new origin will be our parent's origin).
2468 	 * Take ownership of them so that we can rename them into our
2469 	 * namespace.
2470 	 */
2471 	pa.clone_origin = NULL;
2472 	list_create(&pa.snap_list,
2473 	    sizeof (struct promotenode), offsetof(struct promotenode, link));
2474 	rw_enter(&dp->dp_config_rwlock, RW_READER);
2475 	ASSERT(dd->dd_phys->dd_origin_obj != 0);
2476 	snap_obj = dd->dd_phys->dd_origin_obj;
2477 	while (snap_obj) {
2478 		dsl_dataset_t *snapds;
2479 
2480 		/*
2481 		 * NB: this would be handled by the below check for
2482 		 * clone of a clone, but then we'd always own_obj() the
2483 		 * $ORIGIN, thus causing unnecessary EBUSYs.  We don't
2484 		 * need to set pa.clone_origin because the $ORIGIN has
2485 		 * no data to account for.
2486 		 */
2487 		if (dp->dp_origin_snap &&
2488 		    snap_obj == dp->dp_origin_snap->ds_object)
2489 			break;
2490 
2491 		err = dsl_dataset_own_obj(dp, snap_obj, 0, FTAG, &snapds);
2492 		if (err == ENOENT) {
2493 			/* lost race with snapshot destroy */
2494 			struct promotenode *last = list_tail(&pa.snap_list);
2495 			ASSERT(snap_obj != last->ds->ds_phys->ds_prev_snap_obj);
2496 			snap_obj = last->ds->ds_phys->ds_prev_snap_obj;
2497 			continue;
2498 		} else if (err) {
2499 			rw_exit(&dp->dp_config_rwlock);
2500 			goto out;
2501 		}
2502 
2503 		/*
2504 		 * We could be a clone of a clone.  If we reach our
2505 		 * parent's branch point, we're done.
2506 		 */
2507 		if (last_snap &&
2508 		    snapds->ds_phys->ds_next_snap_obj != last_snap) {
2509 			pa.clone_origin = snapds;
2510 			break;
2511 		}
2512 
2513 		snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2514 		snap->ds = snapds;
2515 		list_insert_tail(&pa.snap_list, snap);
2516 		last_snap = snap_obj;
2517 		snap_obj = snap->ds->ds_phys->ds_prev_snap_obj;
2518 		dsl_dataset_make_exclusive(snapds, FTAG);
2519 	}
2520 	snap = list_head(&pa.snap_list);
2521 	ASSERT(snap != NULL);
2522 	err = dsl_dataset_hold_obj(dp,
2523 	    snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &pa.old_head);
2524 	rw_exit(&dp->dp_config_rwlock);
2525 
2526 	if (err)
2527 		goto out;
2528 
2529 	/*
2530 	 * Add in 128x the snapnames zapobj size, since we will be moving
2531 	 * a bunch of snapnames to the promoted ds, and dirtying their
2532 	 * bonus buffers.
2533 	 */
2534 	err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
2535 	    dsl_dataset_promote_sync, ds, &pa, 2 + 2 * doi.doi_physical_blks);
2536 
2537 	dsl_dataset_rele(pa.old_head, FTAG);
2538 out:
2539 	while ((snap = list_tail(&pa.snap_list)) != NULL) {
2540 		list_remove(&pa.snap_list, snap);
2541 		dsl_dataset_disown(snap->ds, FTAG);
2542 		kmem_free(snap, sizeof (struct promotenode));
2543 	}
2544 	list_destroy(&pa.snap_list);
2545 	if (pa.clone_origin)
2546 		dsl_dataset_disown(pa.clone_origin, FTAG);
2547 	dsl_dataset_rele(ds, FTAG);
2548 	return (err);
2549 }
2550 
2551 struct cloneswaparg {
2552 	dsl_dataset_t *cds; /* clone dataset */
2553 	dsl_dataset_t *ohds; /* origin's head dataset */
2554 	boolean_t force;
2555 	int64_t unused_refres_delta; /* change in unconsumed refreservation */
2556 };
2557 
2558 /* ARGSUSED */
2559 static int
2560 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
2561 {
2562 	struct cloneswaparg *csa = arg1;
2563 
2564 	/* they should both be heads */
2565 	if (dsl_dataset_is_snapshot(csa->cds) ||
2566 	    dsl_dataset_is_snapshot(csa->ohds))
2567 		return (EINVAL);
2568 
2569 	/* the branch point should be just before them */
2570 	if (csa->cds->ds_prev != csa->ohds->ds_prev)
2571 		return (EINVAL);
2572 
2573 	/* cds should be the clone */
2574 	if (csa->cds->ds_prev->ds_phys->ds_next_snap_obj !=
2575 	    csa->ohds->ds_object)
2576 		return (EINVAL);
2577 
2578 	/* the clone should be a child of the origin */
2579 	if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
2580 		return (EINVAL);
2581 
2582 	/* ohds shouldn't be modified unless 'force' */
2583 	if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
2584 		return (ETXTBSY);
2585 
2586 	/* adjust amount of any unconsumed refreservation */
2587 	csa->unused_refres_delta =
2588 	    (int64_t)MIN(csa->ohds->ds_reserved,
2589 	    csa->ohds->ds_phys->ds_unique_bytes) -
2590 	    (int64_t)MIN(csa->ohds->ds_reserved,
2591 	    csa->cds->ds_phys->ds_unique_bytes);
2592 
2593 	if (csa->unused_refres_delta > 0 &&
2594 	    csa->unused_refres_delta >
2595 	    dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
2596 		return (ENOSPC);
2597 
2598 	return (0);
2599 }
2600 
2601 /* ARGSUSED */
2602 static void
2603 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2604 {
2605 	struct cloneswaparg *csa = arg1;
2606 	dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
2607 	uint64_t itor = 0;
2608 	blkptr_t bp;
2609 	uint64_t unique = 0;
2610 	int err;
2611 
2612 	ASSERT(csa->cds->ds_reserved == 0);
2613 	ASSERT(csa->cds->ds_quota == csa->ohds->ds_quota);
2614 
2615 	dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
2616 	dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
2617 	dmu_buf_will_dirty(csa->cds->ds_prev->ds_dbuf, tx);
2618 
2619 	if (csa->cds->ds_user_ptr != NULL) {
2620 		csa->cds->ds_user_evict_func(csa->cds, csa->cds->ds_user_ptr);
2621 		csa->cds->ds_user_ptr = NULL;
2622 	}
2623 
2624 	if (csa->ohds->ds_user_ptr != NULL) {
2625 		csa->ohds->ds_user_evict_func(csa->ohds,
2626 		    csa->ohds->ds_user_ptr);
2627 		csa->ohds->ds_user_ptr = NULL;
2628 	}
2629 
2630 	/* compute unique space */
2631 	while ((err = bplist_iterate(&csa->cds->ds_deadlist,
2632 	    &itor, &bp)) == 0) {
2633 		if (bp.blk_birth > csa->cds->ds_prev->ds_phys->ds_prev_snap_txg)
2634 			unique += bp_get_dasize(dp->dp_spa, &bp);
2635 	}
2636 	VERIFY(err == ENOENT);
2637 
2638 	/* reset origin's unique bytes */
2639 	csa->cds->ds_prev->ds_phys->ds_unique_bytes = unique;
2640 
2641 	/* swap blkptrs */
2642 	{
2643 		blkptr_t tmp;
2644 		tmp = csa->ohds->ds_phys->ds_bp;
2645 		csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
2646 		csa->cds->ds_phys->ds_bp = tmp;
2647 	}
2648 
2649 	/* set dd_*_bytes */
2650 	{
2651 		int64_t dused, dcomp, duncomp;
2652 		uint64_t cdl_used, cdl_comp, cdl_uncomp;
2653 		uint64_t odl_used, odl_comp, odl_uncomp;
2654 
2655 		VERIFY(0 == bplist_space(&csa->cds->ds_deadlist, &cdl_used,
2656 		    &cdl_comp, &cdl_uncomp));
2657 		VERIFY(0 == bplist_space(&csa->ohds->ds_deadlist, &odl_used,
2658 		    &odl_comp, &odl_uncomp));
2659 		dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
2660 		    (csa->ohds->ds_phys->ds_used_bytes + odl_used);
2661 		dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
2662 		    (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
2663 		duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
2664 		    cdl_uncomp -
2665 		    (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
2666 
2667 		dsl_dir_diduse_space(csa->ohds->ds_dir,
2668 		    dused, dcomp, duncomp, tx);
2669 		dsl_dir_diduse_space(csa->cds->ds_dir,
2670 		    -dused, -dcomp, -duncomp, tx);
2671 	}
2672 
2673 #define	SWITCH64(x, y) \
2674 	{ \
2675 		uint64_t __tmp = (x); \
2676 		(x) = (y); \
2677 		(y) = __tmp; \
2678 	}
2679 
2680 	/* swap ds_*_bytes */
2681 	SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
2682 	    csa->cds->ds_phys->ds_used_bytes);
2683 	SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
2684 	    csa->cds->ds_phys->ds_compressed_bytes);
2685 	SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
2686 	    csa->cds->ds_phys->ds_uncompressed_bytes);
2687 	SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
2688 	    csa->cds->ds_phys->ds_unique_bytes);
2689 
2690 	/* apply any parent delta for change in unconsumed refreservation */
2691 	dsl_dir_diduse_space(csa->ohds->ds_dir, csa->unused_refres_delta,
2692 	    0, 0, tx);
2693 
2694 	/* swap deadlists */
2695 	bplist_close(&csa->cds->ds_deadlist);
2696 	bplist_close(&csa->ohds->ds_deadlist);
2697 	SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
2698 	    csa->cds->ds_phys->ds_deadlist_obj);
2699 	VERIFY(0 == bplist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
2700 	    csa->cds->ds_phys->ds_deadlist_obj));
2701 	VERIFY(0 == bplist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
2702 	    csa->ohds->ds_phys->ds_deadlist_obj));
2703 }
2704 
2705 /*
2706  * Swap 'clone' with its origin head file system.  Used at the end
2707  * of "online recv" to swizzle the file system to the new version.
2708  */
2709 int
2710 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
2711     boolean_t force)
2712 {
2713 	struct cloneswaparg csa;
2714 	int error;
2715 
2716 	ASSERT(clone->ds_owner);
2717 	ASSERT(origin_head->ds_owner);
2718 retry:
2719 	/* Need exclusive access for the swap */
2720 	rw_enter(&clone->ds_rwlock, RW_WRITER);
2721 	if (!rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
2722 		rw_exit(&clone->ds_rwlock);
2723 		rw_enter(&origin_head->ds_rwlock, RW_WRITER);
2724 		if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
2725 			rw_exit(&origin_head->ds_rwlock);
2726 			goto retry;
2727 		}
2728 	}
2729 	csa.cds = clone;
2730 	csa.ohds = origin_head;
2731 	csa.force = force;
2732 	error = dsl_sync_task_do(clone->ds_dir->dd_pool,
2733 	    dsl_dataset_clone_swap_check,
2734 	    dsl_dataset_clone_swap_sync, &csa, NULL, 9);
2735 	return (error);
2736 }
2737 
2738 /*
2739  * Given a pool name and a dataset object number in that pool,
2740  * return the name of that dataset.
2741  */
2742 int
2743 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
2744 {
2745 	spa_t *spa;
2746 	dsl_pool_t *dp;
2747 	dsl_dataset_t *ds;
2748 	int error;
2749 
2750 	if ((error = spa_open(pname, &spa, FTAG)) != 0)
2751 		return (error);
2752 	dp = spa_get_dsl(spa);
2753 	rw_enter(&dp->dp_config_rwlock, RW_READER);
2754 	if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
2755 		dsl_dataset_name(ds, buf);
2756 		dsl_dataset_rele(ds, FTAG);
2757 	}
2758 	rw_exit(&dp->dp_config_rwlock);
2759 	spa_close(spa, FTAG);
2760 
2761 	return (error);
2762 }
2763 
2764 int
2765 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
2766     uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
2767 {
2768 	int error = 0;
2769 
2770 	ASSERT3S(asize, >, 0);
2771 
2772 	/*
2773 	 * *ref_rsrv is the portion of asize that will come from any
2774 	 * unconsumed refreservation space.
2775 	 */
2776 	*ref_rsrv = 0;
2777 
2778 	mutex_enter(&ds->ds_lock);
2779 	/*
2780 	 * Make a space adjustment for reserved bytes.
2781 	 */
2782 	if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
2783 		ASSERT3U(*used, >=,
2784 		    ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
2785 		*used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
2786 		*ref_rsrv =
2787 		    asize - MIN(asize, parent_delta(ds, asize + inflight));
2788 	}
2789 
2790 	if (!check_quota || ds->ds_quota == 0) {
2791 		mutex_exit(&ds->ds_lock);
2792 		return (0);
2793 	}
2794 	/*
2795 	 * If they are requesting more space, and our current estimate
2796 	 * is over quota, they get to try again unless the actual
2797 	 * on-disk is over quota and there are no pending changes (which
2798 	 * may free up space for us).
2799 	 */
2800 	if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
2801 		if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
2802 			error = ERESTART;
2803 		else
2804 			error = EDQUOT;
2805 	}
2806 	mutex_exit(&ds->ds_lock);
2807 
2808 	return (error);
2809 }
2810 
2811 /* ARGSUSED */
2812 static int
2813 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
2814 {
2815 	dsl_dataset_t *ds = arg1;
2816 	uint64_t *quotap = arg2;
2817 	uint64_t new_quota = *quotap;
2818 
2819 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
2820 		return (ENOTSUP);
2821 
2822 	if (new_quota == 0)
2823 		return (0);
2824 
2825 	if (new_quota < ds->ds_phys->ds_used_bytes ||
2826 	    new_quota < ds->ds_reserved)
2827 		return (ENOSPC);
2828 
2829 	return (0);
2830 }
2831 
2832 /* ARGSUSED */
2833 void
2834 dsl_dataset_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2835 {
2836 	dsl_dataset_t *ds = arg1;
2837 	uint64_t *quotap = arg2;
2838 	uint64_t new_quota = *quotap;
2839 
2840 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
2841 
2842 	ds->ds_quota = new_quota;
2843 
2844 	dsl_prop_set_uint64_sync(ds->ds_dir, "refquota", new_quota, cr, tx);
2845 
2846 	spa_history_internal_log(LOG_DS_REFQUOTA, ds->ds_dir->dd_pool->dp_spa,
2847 	    tx, cr, "%lld dataset = %llu ",
2848 	    (longlong_t)new_quota, ds->ds_object);
2849 }
2850 
2851 int
2852 dsl_dataset_set_quota(const char *dsname, uint64_t quota)
2853 {
2854 	dsl_dataset_t *ds;
2855 	int err;
2856 
2857 	err = dsl_dataset_hold(dsname, FTAG, &ds);
2858 	if (err)
2859 		return (err);
2860 
2861 	if (quota != ds->ds_quota) {
2862 		/*
2863 		 * If someone removes a file, then tries to set the quota, we
2864 		 * want to make sure the file freeing takes effect.
2865 		 */
2866 		txg_wait_open(ds->ds_dir->dd_pool, 0);
2867 
2868 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2869 		    dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
2870 		    ds, &quota, 0);
2871 	}
2872 	dsl_dataset_rele(ds, FTAG);
2873 	return (err);
2874 }
2875 
2876 static int
2877 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
2878 {
2879 	dsl_dataset_t *ds = arg1;
2880 	uint64_t *reservationp = arg2;
2881 	uint64_t new_reservation = *reservationp;
2882 	int64_t delta;
2883 	uint64_t unique;
2884 
2885 	if (new_reservation > INT64_MAX)
2886 		return (EOVERFLOW);
2887 
2888 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
2889 	    SPA_VERSION_REFRESERVATION)
2890 		return (ENOTSUP);
2891 
2892 	if (dsl_dataset_is_snapshot(ds))
2893 		return (EINVAL);
2894 
2895 	/*
2896 	 * If we are doing the preliminary check in open context, the
2897 	 * space estimates may be inaccurate.
2898 	 */
2899 	if (!dmu_tx_is_syncing(tx))
2900 		return (0);
2901 
2902 	mutex_enter(&ds->ds_lock);
2903 	unique = dsl_dataset_unique(ds);
2904 	delta = MAX(unique, new_reservation) - MAX(unique, ds->ds_reserved);
2905 	mutex_exit(&ds->ds_lock);
2906 
2907 	if (delta > 0 &&
2908 	    delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
2909 		return (ENOSPC);
2910 	if (delta > 0 && ds->ds_quota > 0 &&
2911 	    new_reservation > ds->ds_quota)
2912 		return (ENOSPC);
2913 
2914 	return (0);
2915 }
2916 
2917 /* ARGSUSED */
2918 static void
2919 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, cred_t *cr,
2920     dmu_tx_t *tx)
2921 {
2922 	dsl_dataset_t *ds = arg1;
2923 	uint64_t *reservationp = arg2;
2924 	uint64_t new_reservation = *reservationp;
2925 	uint64_t unique;
2926 	int64_t delta;
2927 
2928 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
2929 
2930 	mutex_enter(&ds->ds_lock);
2931 	unique = dsl_dataset_unique(ds);
2932 	delta = MAX(0, (int64_t)(new_reservation - unique)) -
2933 	    MAX(0, (int64_t)(ds->ds_reserved - unique));
2934 	ds->ds_reserved = new_reservation;
2935 	mutex_exit(&ds->ds_lock);
2936 
2937 	dsl_prop_set_uint64_sync(ds->ds_dir, "refreservation",
2938 	    new_reservation, cr, tx);
2939 
2940 	dsl_dir_diduse_space(ds->ds_dir, delta, 0, 0, tx);
2941 
2942 	spa_history_internal_log(LOG_DS_REFRESERV,
2943 	    ds->ds_dir->dd_pool->dp_spa, tx, cr, "%lld dataset = %llu",
2944 	    (longlong_t)new_reservation,
2945 	    ds->ds_dir->dd_phys->dd_head_dataset_obj);
2946 }
2947 
2948 int
2949 dsl_dataset_set_reservation(const char *dsname, uint64_t reservation)
2950 {
2951 	dsl_dataset_t *ds;
2952 	int err;
2953 
2954 	err = dsl_dataset_hold(dsname, FTAG, &ds);
2955 	if (err)
2956 		return (err);
2957 
2958 	err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2959 	    dsl_dataset_set_reservation_check,
2960 	    dsl_dataset_set_reservation_sync, ds, &reservation, 0);
2961 	dsl_dataset_rele(ds, FTAG);
2962 	return (err);
2963 }
2964