xref: /illumos-gate/usr/src/uts/common/fs/zfs/dsl_dataset.c (revision 2e2c135528b3edfe9aaf67d1f004dc0202fa1a54)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012 by Delphix. All rights reserved.
24  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
25  */
26 
27 #include <sys/dmu_objset.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_prop.h>
31 #include <sys/dsl_synctask.h>
32 #include <sys/dmu_traverse.h>
33 #include <sys/dmu_impl.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/arc.h>
36 #include <sys/zio.h>
37 #include <sys/zap.h>
38 #include <sys/zfeature.h>
39 #include <sys/unique.h>
40 #include <sys/zfs_context.h>
41 #include <sys/zfs_ioctl.h>
42 #include <sys/spa.h>
43 #include <sys/zfs_znode.h>
44 #include <sys/zfs_onexit.h>
45 #include <sys/zvol.h>
46 #include <sys/dsl_scan.h>
47 #include <sys/dsl_deadlist.h>
48 
49 static char *dsl_reaper = "the grim reaper";
50 
51 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
52 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
53 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
54 
55 #define	SWITCH64(x, y) \
56 	{ \
57 		uint64_t __tmp = (x); \
58 		(x) = (y); \
59 		(y) = __tmp; \
60 	}
61 
62 #define	DS_REF_MAX	(1ULL << 62)
63 
64 #define	DSL_DEADLIST_BLOCKSIZE	SPA_MAXBLOCKSIZE
65 
66 #define	DSL_DATASET_IS_DESTROYED(ds)	((ds)->ds_owner == dsl_reaper)
67 
68 
69 /*
70  * Figure out how much of this delta should be propogated to the dsl_dir
71  * layer.  If there's a refreservation, that space has already been
72  * partially accounted for in our ancestors.
73  */
74 static int64_t
75 parent_delta(dsl_dataset_t *ds, int64_t delta)
76 {
77 	uint64_t old_bytes, new_bytes;
78 
79 	if (ds->ds_reserved == 0)
80 		return (delta);
81 
82 	old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
83 	new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
84 
85 	ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
86 	return (new_bytes - old_bytes);
87 }
88 
89 void
90 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
91 {
92 	int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
93 	int compressed = BP_GET_PSIZE(bp);
94 	int uncompressed = BP_GET_UCSIZE(bp);
95 	int64_t delta;
96 
97 	dprintf_bp(bp, "ds=%p", ds);
98 
99 	ASSERT(dmu_tx_is_syncing(tx));
100 	/* It could have been compressed away to nothing */
101 	if (BP_IS_HOLE(bp))
102 		return;
103 	ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
104 	ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp)));
105 	if (ds == NULL) {
106 		dsl_pool_mos_diduse_space(tx->tx_pool,
107 		    used, compressed, uncompressed);
108 		return;
109 	}
110 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
111 
112 	mutex_enter(&ds->ds_dir->dd_lock);
113 	mutex_enter(&ds->ds_lock);
114 	delta = parent_delta(ds, used);
115 	ds->ds_phys->ds_referenced_bytes += used;
116 	ds->ds_phys->ds_compressed_bytes += compressed;
117 	ds->ds_phys->ds_uncompressed_bytes += uncompressed;
118 	ds->ds_phys->ds_unique_bytes += used;
119 	mutex_exit(&ds->ds_lock);
120 	dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
121 	    compressed, uncompressed, tx);
122 	dsl_dir_transfer_space(ds->ds_dir, used - delta,
123 	    DD_USED_REFRSRV, DD_USED_HEAD, tx);
124 	mutex_exit(&ds->ds_dir->dd_lock);
125 }
126 
127 int
128 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
129     boolean_t async)
130 {
131 	if (BP_IS_HOLE(bp))
132 		return (0);
133 
134 	ASSERT(dmu_tx_is_syncing(tx));
135 	ASSERT(bp->blk_birth <= tx->tx_txg);
136 
137 	int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
138 	int compressed = BP_GET_PSIZE(bp);
139 	int uncompressed = BP_GET_UCSIZE(bp);
140 
141 	ASSERT(used > 0);
142 	if (ds == NULL) {
143 		dsl_free(tx->tx_pool, tx->tx_txg, bp);
144 		dsl_pool_mos_diduse_space(tx->tx_pool,
145 		    -used, -compressed, -uncompressed);
146 		return (used);
147 	}
148 	ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
149 
150 	ASSERT(!dsl_dataset_is_snapshot(ds));
151 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
152 
153 	if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
154 		int64_t delta;
155 
156 		dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
157 		dsl_free(tx->tx_pool, tx->tx_txg, bp);
158 
159 		mutex_enter(&ds->ds_dir->dd_lock);
160 		mutex_enter(&ds->ds_lock);
161 		ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
162 		    !DS_UNIQUE_IS_ACCURATE(ds));
163 		delta = parent_delta(ds, -used);
164 		ds->ds_phys->ds_unique_bytes -= used;
165 		mutex_exit(&ds->ds_lock);
166 		dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
167 		    delta, -compressed, -uncompressed, tx);
168 		dsl_dir_transfer_space(ds->ds_dir, -used - delta,
169 		    DD_USED_REFRSRV, DD_USED_HEAD, tx);
170 		mutex_exit(&ds->ds_dir->dd_lock);
171 	} else {
172 		dprintf_bp(bp, "putting on dead list: %s", "");
173 		if (async) {
174 			/*
175 			 * We are here as part of zio's write done callback,
176 			 * which means we're a zio interrupt thread.  We can't
177 			 * call dsl_deadlist_insert() now because it may block
178 			 * waiting for I/O.  Instead, put bp on the deferred
179 			 * queue and let dsl_pool_sync() finish the job.
180 			 */
181 			bplist_append(&ds->ds_pending_deadlist, bp);
182 		} else {
183 			dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
184 		}
185 		ASSERT3U(ds->ds_prev->ds_object, ==,
186 		    ds->ds_phys->ds_prev_snap_obj);
187 		ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
188 		/* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
189 		if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
190 		    ds->ds_object && bp->blk_birth >
191 		    ds->ds_prev->ds_phys->ds_prev_snap_txg) {
192 			dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
193 			mutex_enter(&ds->ds_prev->ds_lock);
194 			ds->ds_prev->ds_phys->ds_unique_bytes += used;
195 			mutex_exit(&ds->ds_prev->ds_lock);
196 		}
197 		if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
198 			dsl_dir_transfer_space(ds->ds_dir, used,
199 			    DD_USED_HEAD, DD_USED_SNAP, tx);
200 		}
201 	}
202 	mutex_enter(&ds->ds_lock);
203 	ASSERT3U(ds->ds_phys->ds_referenced_bytes, >=, used);
204 	ds->ds_phys->ds_referenced_bytes -= used;
205 	ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
206 	ds->ds_phys->ds_compressed_bytes -= compressed;
207 	ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
208 	ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
209 	mutex_exit(&ds->ds_lock);
210 
211 	return (used);
212 }
213 
214 uint64_t
215 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
216 {
217 	uint64_t trysnap = 0;
218 
219 	if (ds == NULL)
220 		return (0);
221 	/*
222 	 * The snapshot creation could fail, but that would cause an
223 	 * incorrect FALSE return, which would only result in an
224 	 * overestimation of the amount of space that an operation would
225 	 * consume, which is OK.
226 	 *
227 	 * There's also a small window where we could miss a pending
228 	 * snapshot, because we could set the sync task in the quiescing
229 	 * phase.  So this should only be used as a guess.
230 	 */
231 	if (ds->ds_trysnap_txg >
232 	    spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
233 		trysnap = ds->ds_trysnap_txg;
234 	return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
235 }
236 
237 boolean_t
238 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
239     uint64_t blk_birth)
240 {
241 	if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
242 		return (B_FALSE);
243 
244 	ddt_prefetch(dsl_dataset_get_spa(ds), bp);
245 
246 	return (B_TRUE);
247 }
248 
249 /* ARGSUSED */
250 static void
251 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
252 {
253 	dsl_dataset_t *ds = dsv;
254 
255 	ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
256 
257 	unique_remove(ds->ds_fsid_guid);
258 
259 	if (ds->ds_objset != NULL)
260 		dmu_objset_evict(ds->ds_objset);
261 
262 	if (ds->ds_prev) {
263 		dsl_dataset_drop_ref(ds->ds_prev, ds);
264 		ds->ds_prev = NULL;
265 	}
266 
267 	bplist_destroy(&ds->ds_pending_deadlist);
268 	if (db != NULL) {
269 		dsl_deadlist_close(&ds->ds_deadlist);
270 	} else {
271 		ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
272 		ASSERT(!ds->ds_deadlist.dl_oldfmt);
273 	}
274 	if (ds->ds_dir)
275 		dsl_dir_close(ds->ds_dir, ds);
276 
277 	ASSERT(!list_link_active(&ds->ds_synced_link));
278 
279 	mutex_destroy(&ds->ds_lock);
280 	mutex_destroy(&ds->ds_recvlock);
281 	mutex_destroy(&ds->ds_opening_lock);
282 	rw_destroy(&ds->ds_rwlock);
283 	cv_destroy(&ds->ds_exclusive_cv);
284 
285 	kmem_free(ds, sizeof (dsl_dataset_t));
286 }
287 
288 static int
289 dsl_dataset_get_snapname(dsl_dataset_t *ds)
290 {
291 	dsl_dataset_phys_t *headphys;
292 	int err;
293 	dmu_buf_t *headdbuf;
294 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
295 	objset_t *mos = dp->dp_meta_objset;
296 
297 	if (ds->ds_snapname[0])
298 		return (0);
299 	if (ds->ds_phys->ds_next_snap_obj == 0)
300 		return (0);
301 
302 	err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
303 	    FTAG, &headdbuf);
304 	if (err)
305 		return (err);
306 	headphys = headdbuf->db_data;
307 	err = zap_value_search(dp->dp_meta_objset,
308 	    headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
309 	dmu_buf_rele(headdbuf, FTAG);
310 	return (err);
311 }
312 
313 static int
314 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
315 {
316 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
317 	uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
318 	matchtype_t mt;
319 	int err;
320 
321 	if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
322 		mt = MT_FIRST;
323 	else
324 		mt = MT_EXACT;
325 
326 	err = zap_lookup_norm(mos, snapobj, name, 8, 1,
327 	    value, mt, NULL, 0, NULL);
328 	if (err == ENOTSUP && mt == MT_FIRST)
329 		err = zap_lookup(mos, snapobj, name, 8, 1, value);
330 	return (err);
331 }
332 
333 static int
334 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
335 {
336 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
337 	uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
338 	matchtype_t mt;
339 	int err;
340 
341 	dsl_dir_snap_cmtime_update(ds->ds_dir);
342 
343 	if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
344 		mt = MT_FIRST;
345 	else
346 		mt = MT_EXACT;
347 
348 	err = zap_remove_norm(mos, snapobj, name, mt, tx);
349 	if (err == ENOTSUP && mt == MT_FIRST)
350 		err = zap_remove(mos, snapobj, name, tx);
351 	return (err);
352 }
353 
354 static int
355 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
356     dsl_dataset_t **dsp)
357 {
358 	objset_t *mos = dp->dp_meta_objset;
359 	dmu_buf_t *dbuf;
360 	dsl_dataset_t *ds;
361 	int err;
362 	dmu_object_info_t doi;
363 
364 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
365 	    dsl_pool_sync_context(dp));
366 
367 	err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
368 	if (err)
369 		return (err);
370 
371 	/* Make sure dsobj has the correct object type. */
372 	dmu_object_info_from_db(dbuf, &doi);
373 	if (doi.doi_type != DMU_OT_DSL_DATASET)
374 		return (EINVAL);
375 
376 	ds = dmu_buf_get_user(dbuf);
377 	if (ds == NULL) {
378 		dsl_dataset_t *winner;
379 
380 		ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
381 		ds->ds_dbuf = dbuf;
382 		ds->ds_object = dsobj;
383 		ds->ds_phys = dbuf->db_data;
384 
385 		mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
386 		mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
387 		mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
388 		mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
389 
390 		rw_init(&ds->ds_rwlock, 0, 0, 0);
391 		cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
392 
393 		bplist_create(&ds->ds_pending_deadlist);
394 		dsl_deadlist_open(&ds->ds_deadlist,
395 		    mos, ds->ds_phys->ds_deadlist_obj);
396 
397 		list_create(&ds->ds_sendstreams, sizeof (dmu_sendarg_t),
398 		    offsetof(dmu_sendarg_t, dsa_link));
399 
400 		if (err == 0) {
401 			err = dsl_dir_open_obj(dp,
402 			    ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
403 		}
404 		if (err) {
405 			mutex_destroy(&ds->ds_lock);
406 			mutex_destroy(&ds->ds_recvlock);
407 			mutex_destroy(&ds->ds_opening_lock);
408 			rw_destroy(&ds->ds_rwlock);
409 			cv_destroy(&ds->ds_exclusive_cv);
410 			bplist_destroy(&ds->ds_pending_deadlist);
411 			dsl_deadlist_close(&ds->ds_deadlist);
412 			kmem_free(ds, sizeof (dsl_dataset_t));
413 			dmu_buf_rele(dbuf, tag);
414 			return (err);
415 		}
416 
417 		if (!dsl_dataset_is_snapshot(ds)) {
418 			ds->ds_snapname[0] = '\0';
419 			if (ds->ds_phys->ds_prev_snap_obj) {
420 				err = dsl_dataset_get_ref(dp,
421 				    ds->ds_phys->ds_prev_snap_obj,
422 				    ds, &ds->ds_prev);
423 			}
424 		} else {
425 			if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
426 				err = dsl_dataset_get_snapname(ds);
427 			if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
428 				err = zap_count(
429 				    ds->ds_dir->dd_pool->dp_meta_objset,
430 				    ds->ds_phys->ds_userrefs_obj,
431 				    &ds->ds_userrefs);
432 			}
433 		}
434 
435 		if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
436 			/*
437 			 * In sync context, we're called with either no lock
438 			 * or with the write lock.  If we're not syncing,
439 			 * we're always called with the read lock held.
440 			 */
441 			boolean_t need_lock =
442 			    !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
443 			    dsl_pool_sync_context(dp);
444 
445 			if (need_lock)
446 				rw_enter(&dp->dp_config_rwlock, RW_READER);
447 
448 			err = dsl_prop_get_ds(ds,
449 			    "refreservation", sizeof (uint64_t), 1,
450 			    &ds->ds_reserved, NULL);
451 			if (err == 0) {
452 				err = dsl_prop_get_ds(ds,
453 				    "refquota", sizeof (uint64_t), 1,
454 				    &ds->ds_quota, NULL);
455 			}
456 
457 			if (need_lock)
458 				rw_exit(&dp->dp_config_rwlock);
459 		} else {
460 			ds->ds_reserved = ds->ds_quota = 0;
461 		}
462 
463 		if (err == 0) {
464 			winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
465 			    dsl_dataset_evict);
466 		}
467 		if (err || winner) {
468 			bplist_destroy(&ds->ds_pending_deadlist);
469 			dsl_deadlist_close(&ds->ds_deadlist);
470 			if (ds->ds_prev)
471 				dsl_dataset_drop_ref(ds->ds_prev, ds);
472 			dsl_dir_close(ds->ds_dir, ds);
473 			mutex_destroy(&ds->ds_lock);
474 			mutex_destroy(&ds->ds_recvlock);
475 			mutex_destroy(&ds->ds_opening_lock);
476 			rw_destroy(&ds->ds_rwlock);
477 			cv_destroy(&ds->ds_exclusive_cv);
478 			kmem_free(ds, sizeof (dsl_dataset_t));
479 			if (err) {
480 				dmu_buf_rele(dbuf, tag);
481 				return (err);
482 			}
483 			ds = winner;
484 		} else {
485 			ds->ds_fsid_guid =
486 			    unique_insert(ds->ds_phys->ds_fsid_guid);
487 		}
488 	}
489 	ASSERT3P(ds->ds_dbuf, ==, dbuf);
490 	ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
491 	ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
492 	    spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
493 	    dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
494 	mutex_enter(&ds->ds_lock);
495 	if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
496 		mutex_exit(&ds->ds_lock);
497 		dmu_buf_rele(ds->ds_dbuf, tag);
498 		return (ENOENT);
499 	}
500 	mutex_exit(&ds->ds_lock);
501 	*dsp = ds;
502 	return (0);
503 }
504 
505 static int
506 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
507 {
508 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
509 
510 	/*
511 	 * In syncing context we don't want the rwlock lock: there
512 	 * may be an existing writer waiting for sync phase to
513 	 * finish.  We don't need to worry about such writers, since
514 	 * sync phase is single-threaded, so the writer can't be
515 	 * doing anything while we are active.
516 	 */
517 	if (dsl_pool_sync_context(dp)) {
518 		ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
519 		return (0);
520 	}
521 
522 	/*
523 	 * Normal users will hold the ds_rwlock as a READER until they
524 	 * are finished (i.e., call dsl_dataset_rele()).  "Owners" will
525 	 * drop their READER lock after they set the ds_owner field.
526 	 *
527 	 * If the dataset is being destroyed, the destroy thread will
528 	 * obtain a WRITER lock for exclusive access after it's done its
529 	 * open-context work and then change the ds_owner to
530 	 * dsl_reaper once destruction is assured.  So threads
531 	 * may block here temporarily, until the "destructability" of
532 	 * the dataset is determined.
533 	 */
534 	ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
535 	mutex_enter(&ds->ds_lock);
536 	while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
537 		rw_exit(&dp->dp_config_rwlock);
538 		cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
539 		if (DSL_DATASET_IS_DESTROYED(ds)) {
540 			mutex_exit(&ds->ds_lock);
541 			dsl_dataset_drop_ref(ds, tag);
542 			rw_enter(&dp->dp_config_rwlock, RW_READER);
543 			return (ENOENT);
544 		}
545 		/*
546 		 * The dp_config_rwlock lives above the ds_lock. And
547 		 * we need to check DSL_DATASET_IS_DESTROYED() while
548 		 * holding the ds_lock, so we have to drop and reacquire
549 		 * the ds_lock here.
550 		 */
551 		mutex_exit(&ds->ds_lock);
552 		rw_enter(&dp->dp_config_rwlock, RW_READER);
553 		mutex_enter(&ds->ds_lock);
554 	}
555 	mutex_exit(&ds->ds_lock);
556 	return (0);
557 }
558 
559 int
560 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
561     dsl_dataset_t **dsp)
562 {
563 	int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
564 
565 	if (err)
566 		return (err);
567 	return (dsl_dataset_hold_ref(*dsp, tag));
568 }
569 
570 int
571 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
572     void *tag, dsl_dataset_t **dsp)
573 {
574 	int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
575 	if (err)
576 		return (err);
577 	if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
578 		dsl_dataset_rele(*dsp, tag);
579 		*dsp = NULL;
580 		return (EBUSY);
581 	}
582 	return (0);
583 }
584 
585 int
586 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
587 {
588 	dsl_dir_t *dd;
589 	dsl_pool_t *dp;
590 	const char *snapname;
591 	uint64_t obj;
592 	int err = 0;
593 
594 	err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
595 	if (err)
596 		return (err);
597 
598 	dp = dd->dd_pool;
599 	obj = dd->dd_phys->dd_head_dataset_obj;
600 	rw_enter(&dp->dp_config_rwlock, RW_READER);
601 	if (obj)
602 		err = dsl_dataset_get_ref(dp, obj, tag, dsp);
603 	else
604 		err = ENOENT;
605 	if (err)
606 		goto out;
607 
608 	err = dsl_dataset_hold_ref(*dsp, tag);
609 
610 	/* we may be looking for a snapshot */
611 	if (err == 0 && snapname != NULL) {
612 		dsl_dataset_t *ds = NULL;
613 
614 		if (*snapname++ != '@') {
615 			dsl_dataset_rele(*dsp, tag);
616 			err = ENOENT;
617 			goto out;
618 		}
619 
620 		dprintf("looking for snapshot '%s'\n", snapname);
621 		err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
622 		if (err == 0)
623 			err = dsl_dataset_get_ref(dp, obj, tag, &ds);
624 		dsl_dataset_rele(*dsp, tag);
625 
626 		ASSERT3U((err == 0), ==, (ds != NULL));
627 
628 		if (ds) {
629 			mutex_enter(&ds->ds_lock);
630 			if (ds->ds_snapname[0] == 0)
631 				(void) strlcpy(ds->ds_snapname, snapname,
632 				    sizeof (ds->ds_snapname));
633 			mutex_exit(&ds->ds_lock);
634 			err = dsl_dataset_hold_ref(ds, tag);
635 			*dsp = err ? NULL : ds;
636 		}
637 	}
638 out:
639 	rw_exit(&dp->dp_config_rwlock);
640 	dsl_dir_close(dd, FTAG);
641 	return (err);
642 }
643 
644 int
645 dsl_dataset_own(const char *name, boolean_t inconsistentok,
646     void *tag, dsl_dataset_t **dsp)
647 {
648 	int err = dsl_dataset_hold(name, tag, dsp);
649 	if (err)
650 		return (err);
651 	if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
652 		dsl_dataset_rele(*dsp, tag);
653 		return (EBUSY);
654 	}
655 	return (0);
656 }
657 
658 void
659 dsl_dataset_name(dsl_dataset_t *ds, char *name)
660 {
661 	if (ds == NULL) {
662 		(void) strcpy(name, "mos");
663 	} else {
664 		dsl_dir_name(ds->ds_dir, name);
665 		VERIFY(0 == dsl_dataset_get_snapname(ds));
666 		if (ds->ds_snapname[0]) {
667 			(void) strcat(name, "@");
668 			/*
669 			 * We use a "recursive" mutex so that we
670 			 * can call dprintf_ds() with ds_lock held.
671 			 */
672 			if (!MUTEX_HELD(&ds->ds_lock)) {
673 				mutex_enter(&ds->ds_lock);
674 				(void) strcat(name, ds->ds_snapname);
675 				mutex_exit(&ds->ds_lock);
676 			} else {
677 				(void) strcat(name, ds->ds_snapname);
678 			}
679 		}
680 	}
681 }
682 
683 static int
684 dsl_dataset_namelen(dsl_dataset_t *ds)
685 {
686 	int result;
687 
688 	if (ds == NULL) {
689 		result = 3;	/* "mos" */
690 	} else {
691 		result = dsl_dir_namelen(ds->ds_dir);
692 		VERIFY(0 == dsl_dataset_get_snapname(ds));
693 		if (ds->ds_snapname[0]) {
694 			++result;	/* adding one for the @-sign */
695 			if (!MUTEX_HELD(&ds->ds_lock)) {
696 				mutex_enter(&ds->ds_lock);
697 				result += strlen(ds->ds_snapname);
698 				mutex_exit(&ds->ds_lock);
699 			} else {
700 				result += strlen(ds->ds_snapname);
701 			}
702 		}
703 	}
704 
705 	return (result);
706 }
707 
708 void
709 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
710 {
711 	dmu_buf_rele(ds->ds_dbuf, tag);
712 }
713 
714 void
715 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
716 {
717 	if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
718 		rw_exit(&ds->ds_rwlock);
719 	}
720 	dsl_dataset_drop_ref(ds, tag);
721 }
722 
723 void
724 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
725 {
726 	ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
727 	    (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
728 
729 	mutex_enter(&ds->ds_lock);
730 	ds->ds_owner = NULL;
731 	if (RW_WRITE_HELD(&ds->ds_rwlock)) {
732 		rw_exit(&ds->ds_rwlock);
733 		cv_broadcast(&ds->ds_exclusive_cv);
734 	}
735 	mutex_exit(&ds->ds_lock);
736 	if (ds->ds_dbuf)
737 		dsl_dataset_drop_ref(ds, tag);
738 	else
739 		dsl_dataset_evict(NULL, ds);
740 }
741 
742 boolean_t
743 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
744 {
745 	boolean_t gotit = FALSE;
746 
747 	mutex_enter(&ds->ds_lock);
748 	if (ds->ds_owner == NULL &&
749 	    (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
750 		ds->ds_owner = tag;
751 		if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
752 			rw_exit(&ds->ds_rwlock);
753 		gotit = TRUE;
754 	}
755 	mutex_exit(&ds->ds_lock);
756 	return (gotit);
757 }
758 
759 void
760 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
761 {
762 	ASSERT3P(owner, ==, ds->ds_owner);
763 	if (!RW_WRITE_HELD(&ds->ds_rwlock))
764 		rw_enter(&ds->ds_rwlock, RW_WRITER);
765 }
766 
767 uint64_t
768 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
769     uint64_t flags, dmu_tx_t *tx)
770 {
771 	dsl_pool_t *dp = dd->dd_pool;
772 	dmu_buf_t *dbuf;
773 	dsl_dataset_phys_t *dsphys;
774 	uint64_t dsobj;
775 	objset_t *mos = dp->dp_meta_objset;
776 
777 	if (origin == NULL)
778 		origin = dp->dp_origin_snap;
779 
780 	ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
781 	ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
782 	ASSERT(dmu_tx_is_syncing(tx));
783 	ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
784 
785 	dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
786 	    DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
787 	VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
788 	dmu_buf_will_dirty(dbuf, tx);
789 	dsphys = dbuf->db_data;
790 	bzero(dsphys, sizeof (dsl_dataset_phys_t));
791 	dsphys->ds_dir_obj = dd->dd_object;
792 	dsphys->ds_flags = flags;
793 	dsphys->ds_fsid_guid = unique_create();
794 	(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
795 	    sizeof (dsphys->ds_guid));
796 	dsphys->ds_snapnames_zapobj =
797 	    zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
798 	    DMU_OT_NONE, 0, tx);
799 	dsphys->ds_creation_time = gethrestime_sec();
800 	dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
801 
802 	if (origin == NULL) {
803 		dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
804 	} else {
805 		dsl_dataset_t *ohds;
806 
807 		dsphys->ds_prev_snap_obj = origin->ds_object;
808 		dsphys->ds_prev_snap_txg =
809 		    origin->ds_phys->ds_creation_txg;
810 		dsphys->ds_referenced_bytes =
811 		    origin->ds_phys->ds_referenced_bytes;
812 		dsphys->ds_compressed_bytes =
813 		    origin->ds_phys->ds_compressed_bytes;
814 		dsphys->ds_uncompressed_bytes =
815 		    origin->ds_phys->ds_uncompressed_bytes;
816 		dsphys->ds_bp = origin->ds_phys->ds_bp;
817 		dsphys->ds_flags |= origin->ds_phys->ds_flags;
818 
819 		dmu_buf_will_dirty(origin->ds_dbuf, tx);
820 		origin->ds_phys->ds_num_children++;
821 
822 		VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
823 		    origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
824 		dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
825 		    dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
826 		dsl_dataset_rele(ohds, FTAG);
827 
828 		if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
829 			if (origin->ds_phys->ds_next_clones_obj == 0) {
830 				origin->ds_phys->ds_next_clones_obj =
831 				    zap_create(mos,
832 				    DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
833 			}
834 			VERIFY(0 == zap_add_int(mos,
835 			    origin->ds_phys->ds_next_clones_obj,
836 			    dsobj, tx));
837 		}
838 
839 		dmu_buf_will_dirty(dd->dd_dbuf, tx);
840 		dd->dd_phys->dd_origin_obj = origin->ds_object;
841 		if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
842 			if (origin->ds_dir->dd_phys->dd_clones == 0) {
843 				dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
844 				origin->ds_dir->dd_phys->dd_clones =
845 				    zap_create(mos,
846 				    DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
847 			}
848 			VERIFY3U(0, ==, zap_add_int(mos,
849 			    origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
850 		}
851 	}
852 
853 	if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
854 		dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
855 
856 	dmu_buf_rele(dbuf, FTAG);
857 
858 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
859 	dd->dd_phys->dd_head_dataset_obj = dsobj;
860 
861 	return (dsobj);
862 }
863 
864 uint64_t
865 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
866     dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
867 {
868 	dsl_pool_t *dp = pdd->dd_pool;
869 	uint64_t dsobj, ddobj;
870 	dsl_dir_t *dd;
871 
872 	ASSERT(lastname[0] != '@');
873 
874 	ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
875 	VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
876 
877 	dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
878 
879 	dsl_deleg_set_create_perms(dd, tx, cr);
880 
881 	dsl_dir_close(dd, FTAG);
882 
883 	/*
884 	 * If we are creating a clone, make sure we zero out any stale
885 	 * data from the origin snapshots zil header.
886 	 */
887 	if (origin != NULL) {
888 		dsl_dataset_t *ds;
889 		objset_t *os;
890 
891 		VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
892 		VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
893 		bzero(&os->os_zil_header, sizeof (os->os_zil_header));
894 		dsl_dataset_dirty(ds, tx);
895 		dsl_dataset_rele(ds, FTAG);
896 	}
897 
898 	return (dsobj);
899 }
900 
901 /*
902  * The snapshots must all be in the same pool.
903  */
904 int
905 dmu_snapshots_destroy_nvl(nvlist_t *snaps, boolean_t defer,
906     nvlist_t *errlist)
907 {
908 	int err;
909 	dsl_sync_task_t *dst;
910 	spa_t *spa;
911 	nvpair_t *pair;
912 	dsl_sync_task_group_t *dstg;
913 
914 	pair = nvlist_next_nvpair(snaps, NULL);
915 	if (pair == NULL)
916 		return (0);
917 
918 	err = spa_open(nvpair_name(pair), &spa, FTAG);
919 	if (err)
920 		return (err);
921 	dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
922 
923 	for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
924 	    pair = nvlist_next_nvpair(snaps, pair)) {
925 		dsl_dataset_t *ds;
926 
927 		err = dsl_dataset_own(nvpair_name(pair), B_TRUE, dstg, &ds);
928 		if (err == 0) {
929 			struct dsl_ds_destroyarg *dsda;
930 
931 			dsl_dataset_make_exclusive(ds, dstg);
932 			dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg),
933 			    KM_SLEEP);
934 			dsda->ds = ds;
935 			dsda->defer = defer;
936 			dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
937 			    dsl_dataset_destroy_sync, dsda, dstg, 0);
938 		} else if (err == ENOENT) {
939 			err = 0;
940 		} else {
941 			fnvlist_add_int32(errlist, nvpair_name(pair), err);
942 			break;
943 		}
944 	}
945 
946 	if (err == 0)
947 		err = dsl_sync_task_group_wait(dstg);
948 
949 	for (dst = list_head(&dstg->dstg_tasks); dst;
950 	    dst = list_next(&dstg->dstg_tasks, dst)) {
951 		struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
952 		dsl_dataset_t *ds = dsda->ds;
953 
954 		/*
955 		 * Return the snapshots that triggered the error.
956 		 */
957 		if (dst->dst_err != 0) {
958 			char name[ZFS_MAXNAMELEN];
959 			dsl_dataset_name(ds, name);
960 			fnvlist_add_int32(errlist, name, dst->dst_err);
961 		}
962 		ASSERT3P(dsda->rm_origin, ==, NULL);
963 		dsl_dataset_disown(ds, dstg);
964 		kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
965 	}
966 
967 	dsl_sync_task_group_destroy(dstg);
968 	spa_close(spa, FTAG);
969 	return (err);
970 
971 }
972 
973 static boolean_t
974 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
975 {
976 	boolean_t might_destroy = B_FALSE;
977 
978 	mutex_enter(&ds->ds_lock);
979 	if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
980 	    DS_IS_DEFER_DESTROY(ds))
981 		might_destroy = B_TRUE;
982 	mutex_exit(&ds->ds_lock);
983 
984 	return (might_destroy);
985 }
986 
987 /*
988  * If we're removing a clone, and these three conditions are true:
989  *	1) the clone's origin has no other children
990  *	2) the clone's origin has no user references
991  *	3) the clone's origin has been marked for deferred destruction
992  * Then, prepare to remove the origin as part of this sync task group.
993  */
994 static int
995 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
996 {
997 	dsl_dataset_t *ds = dsda->ds;
998 	dsl_dataset_t *origin = ds->ds_prev;
999 
1000 	if (dsl_dataset_might_destroy_origin(origin)) {
1001 		char *name;
1002 		int namelen;
1003 		int error;
1004 
1005 		namelen = dsl_dataset_namelen(origin) + 1;
1006 		name = kmem_alloc(namelen, KM_SLEEP);
1007 		dsl_dataset_name(origin, name);
1008 #ifdef _KERNEL
1009 		error = zfs_unmount_snap(name, NULL);
1010 		if (error) {
1011 			kmem_free(name, namelen);
1012 			return (error);
1013 		}
1014 #endif
1015 		error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1016 		kmem_free(name, namelen);
1017 		if (error)
1018 			return (error);
1019 		dsda->rm_origin = origin;
1020 		dsl_dataset_make_exclusive(origin, tag);
1021 	}
1022 
1023 	return (0);
1024 }
1025 
1026 /*
1027  * ds must be opened as OWNER.  On return (whether successful or not),
1028  * ds will be closed and caller can no longer dereference it.
1029  */
1030 int
1031 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1032 {
1033 	int err;
1034 	dsl_sync_task_group_t *dstg;
1035 	objset_t *os;
1036 	dsl_dir_t *dd;
1037 	uint64_t obj;
1038 	struct dsl_ds_destroyarg dsda = { 0 };
1039 
1040 	dsda.ds = ds;
1041 
1042 	if (dsl_dataset_is_snapshot(ds)) {
1043 		/* Destroying a snapshot is simpler */
1044 		dsl_dataset_make_exclusive(ds, tag);
1045 
1046 		dsda.defer = defer;
1047 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1048 		    dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1049 		    &dsda, tag, 0);
1050 		ASSERT3P(dsda.rm_origin, ==, NULL);
1051 		goto out;
1052 	} else if (defer) {
1053 		err = EINVAL;
1054 		goto out;
1055 	}
1056 
1057 	dd = ds->ds_dir;
1058 
1059 	if (!spa_feature_is_enabled(dsl_dataset_get_spa(ds),
1060 	    &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
1061 		/*
1062 		 * Check for errors and mark this ds as inconsistent, in
1063 		 * case we crash while freeing the objects.
1064 		 */
1065 		err = dsl_sync_task_do(dd->dd_pool,
1066 		    dsl_dataset_destroy_begin_check,
1067 		    dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1068 		if (err)
1069 			goto out;
1070 
1071 		err = dmu_objset_from_ds(ds, &os);
1072 		if (err)
1073 			goto out;
1074 
1075 		/*
1076 		 * Remove all objects while in the open context so that
1077 		 * there is less work to do in the syncing context.
1078 		 */
1079 		for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1080 		    ds->ds_phys->ds_prev_snap_txg)) {
1081 			/*
1082 			 * Ignore errors, if there is not enough disk space
1083 			 * we will deal with it in dsl_dataset_destroy_sync().
1084 			 */
1085 			(void) dmu_free_object(os, obj);
1086 		}
1087 		if (err != ESRCH)
1088 			goto out;
1089 
1090 		/*
1091 		 * Sync out all in-flight IO.
1092 		 */
1093 		txg_wait_synced(dd->dd_pool, 0);
1094 
1095 		/*
1096 		 * If we managed to free all the objects in open
1097 		 * context, the user space accounting should be zero.
1098 		 */
1099 		if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1100 		    dmu_objset_userused_enabled(os)) {
1101 			uint64_t count;
1102 
1103 			ASSERT(zap_count(os, DMU_USERUSED_OBJECT,
1104 			    &count) != 0 || count == 0);
1105 			ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT,
1106 			    &count) != 0 || count == 0);
1107 		}
1108 	}
1109 
1110 	rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1111 	err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1112 	rw_exit(&dd->dd_pool->dp_config_rwlock);
1113 
1114 	if (err)
1115 		goto out;
1116 
1117 	/*
1118 	 * Blow away the dsl_dir + head dataset.
1119 	 */
1120 	dsl_dataset_make_exclusive(ds, tag);
1121 	/*
1122 	 * If we're removing a clone, we might also need to remove its
1123 	 * origin.
1124 	 */
1125 	do {
1126 		dsda.need_prep = B_FALSE;
1127 		if (dsl_dir_is_clone(dd)) {
1128 			err = dsl_dataset_origin_rm_prep(&dsda, tag);
1129 			if (err) {
1130 				dsl_dir_close(dd, FTAG);
1131 				goto out;
1132 			}
1133 		}
1134 
1135 		dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1136 		dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1137 		    dsl_dataset_destroy_sync, &dsda, tag, 0);
1138 		dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1139 		    dsl_dir_destroy_sync, dd, FTAG, 0);
1140 		err = dsl_sync_task_group_wait(dstg);
1141 		dsl_sync_task_group_destroy(dstg);
1142 
1143 		/*
1144 		 * We could be racing against 'zfs release' or 'zfs destroy -d'
1145 		 * on the origin snap, in which case we can get EBUSY if we
1146 		 * needed to destroy the origin snap but were not ready to
1147 		 * do so.
1148 		 */
1149 		if (dsda.need_prep) {
1150 			ASSERT(err == EBUSY);
1151 			ASSERT(dsl_dir_is_clone(dd));
1152 			ASSERT(dsda.rm_origin == NULL);
1153 		}
1154 	} while (dsda.need_prep);
1155 
1156 	if (dsda.rm_origin != NULL)
1157 		dsl_dataset_disown(dsda.rm_origin, tag);
1158 
1159 	/* if it is successful, dsl_dir_destroy_sync will close the dd */
1160 	if (err)
1161 		dsl_dir_close(dd, FTAG);
1162 out:
1163 	dsl_dataset_disown(ds, tag);
1164 	return (err);
1165 }
1166 
1167 blkptr_t *
1168 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1169 {
1170 	return (&ds->ds_phys->ds_bp);
1171 }
1172 
1173 void
1174 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1175 {
1176 	ASSERT(dmu_tx_is_syncing(tx));
1177 	/* If it's the meta-objset, set dp_meta_rootbp */
1178 	if (ds == NULL) {
1179 		tx->tx_pool->dp_meta_rootbp = *bp;
1180 	} else {
1181 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
1182 		ds->ds_phys->ds_bp = *bp;
1183 	}
1184 }
1185 
1186 spa_t *
1187 dsl_dataset_get_spa(dsl_dataset_t *ds)
1188 {
1189 	return (ds->ds_dir->dd_pool->dp_spa);
1190 }
1191 
1192 void
1193 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1194 {
1195 	dsl_pool_t *dp;
1196 
1197 	if (ds == NULL) /* this is the meta-objset */
1198 		return;
1199 
1200 	ASSERT(ds->ds_objset != NULL);
1201 
1202 	if (ds->ds_phys->ds_next_snap_obj != 0)
1203 		panic("dirtying snapshot!");
1204 
1205 	dp = ds->ds_dir->dd_pool;
1206 
1207 	if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1208 		/* up the hold count until we can be written out */
1209 		dmu_buf_add_ref(ds->ds_dbuf, ds);
1210 	}
1211 }
1212 
1213 boolean_t
1214 dsl_dataset_is_dirty(dsl_dataset_t *ds)
1215 {
1216 	for (int t = 0; t < TXG_SIZE; t++) {
1217 		if (txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
1218 		    ds, t))
1219 			return (B_TRUE);
1220 	}
1221 	return (B_FALSE);
1222 }
1223 
1224 /*
1225  * The unique space in the head dataset can be calculated by subtracting
1226  * the space used in the most recent snapshot, that is still being used
1227  * in this file system, from the space currently in use.  To figure out
1228  * the space in the most recent snapshot still in use, we need to take
1229  * the total space used in the snapshot and subtract out the space that
1230  * has been freed up since the snapshot was taken.
1231  */
1232 static void
1233 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1234 {
1235 	uint64_t mrs_used;
1236 	uint64_t dlused, dlcomp, dluncomp;
1237 
1238 	ASSERT(!dsl_dataset_is_snapshot(ds));
1239 
1240 	if (ds->ds_phys->ds_prev_snap_obj != 0)
1241 		mrs_used = ds->ds_prev->ds_phys->ds_referenced_bytes;
1242 	else
1243 		mrs_used = 0;
1244 
1245 	dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1246 
1247 	ASSERT3U(dlused, <=, mrs_used);
1248 	ds->ds_phys->ds_unique_bytes =
1249 	    ds->ds_phys->ds_referenced_bytes - (mrs_used - dlused);
1250 
1251 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1252 	    SPA_VERSION_UNIQUE_ACCURATE)
1253 		ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1254 }
1255 
1256 struct killarg {
1257 	dsl_dataset_t *ds;
1258 	dmu_tx_t *tx;
1259 };
1260 
1261 /* ARGSUSED */
1262 static int
1263 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1264     const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1265 {
1266 	struct killarg *ka = arg;
1267 	dmu_tx_t *tx = ka->tx;
1268 
1269 	if (bp == NULL)
1270 		return (0);
1271 
1272 	if (zb->zb_level == ZB_ZIL_LEVEL) {
1273 		ASSERT(zilog != NULL);
1274 		/*
1275 		 * It's a block in the intent log.  It has no
1276 		 * accounting, so just free it.
1277 		 */
1278 		dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1279 	} else {
1280 		ASSERT(zilog == NULL);
1281 		ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1282 		(void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1283 	}
1284 
1285 	return (0);
1286 }
1287 
1288 /* ARGSUSED */
1289 static int
1290 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1291 {
1292 	dsl_dataset_t *ds = arg1;
1293 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1294 	uint64_t count;
1295 	int err;
1296 
1297 	/*
1298 	 * Can't delete a head dataset if there are snapshots of it.
1299 	 * (Except if the only snapshots are from the branch we cloned
1300 	 * from.)
1301 	 */
1302 	if (ds->ds_prev != NULL &&
1303 	    ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1304 		return (EBUSY);
1305 
1306 	/*
1307 	 * This is really a dsl_dir thing, but check it here so that
1308 	 * we'll be less likely to leave this dataset inconsistent &
1309 	 * nearly destroyed.
1310 	 */
1311 	err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1312 	if (err)
1313 		return (err);
1314 	if (count != 0)
1315 		return (EEXIST);
1316 
1317 	return (0);
1318 }
1319 
1320 /* ARGSUSED */
1321 static void
1322 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1323 {
1324 	dsl_dataset_t *ds = arg1;
1325 
1326 	/* Mark it as inconsistent on-disk, in case we crash */
1327 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1328 	ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1329 
1330 	spa_history_log_internal_ds(ds, "destroy begin", tx, "");
1331 }
1332 
1333 static int
1334 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1335     dmu_tx_t *tx)
1336 {
1337 	dsl_dataset_t *ds = dsda->ds;
1338 	dsl_dataset_t *ds_prev = ds->ds_prev;
1339 
1340 	if (dsl_dataset_might_destroy_origin(ds_prev)) {
1341 		struct dsl_ds_destroyarg ndsda = {0};
1342 
1343 		/*
1344 		 * If we're not prepared to remove the origin, don't remove
1345 		 * the clone either.
1346 		 */
1347 		if (dsda->rm_origin == NULL) {
1348 			dsda->need_prep = B_TRUE;
1349 			return (EBUSY);
1350 		}
1351 
1352 		ndsda.ds = ds_prev;
1353 		ndsda.is_origin_rm = B_TRUE;
1354 		return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1355 	}
1356 
1357 	/*
1358 	 * If we're not going to remove the origin after all,
1359 	 * undo the open context setup.
1360 	 */
1361 	if (dsda->rm_origin != NULL) {
1362 		dsl_dataset_disown(dsda->rm_origin, tag);
1363 		dsda->rm_origin = NULL;
1364 	}
1365 
1366 	return (0);
1367 }
1368 
1369 /*
1370  * If you add new checks here, you may need to add
1371  * additional checks to the "temporary" case in
1372  * snapshot_check() in dmu_objset.c.
1373  */
1374 /* ARGSUSED */
1375 int
1376 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1377 {
1378 	struct dsl_ds_destroyarg *dsda = arg1;
1379 	dsl_dataset_t *ds = dsda->ds;
1380 
1381 	/* we have an owner hold, so noone else can destroy us */
1382 	ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1383 
1384 	/*
1385 	 * Only allow deferred destroy on pools that support it.
1386 	 * NOTE: deferred destroy is only supported on snapshots.
1387 	 */
1388 	if (dsda->defer) {
1389 		if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1390 		    SPA_VERSION_USERREFS)
1391 			return (ENOTSUP);
1392 		ASSERT(dsl_dataset_is_snapshot(ds));
1393 		return (0);
1394 	}
1395 
1396 	/*
1397 	 * Can't delete a head dataset if there are snapshots of it.
1398 	 * (Except if the only snapshots are from the branch we cloned
1399 	 * from.)
1400 	 */
1401 	if (ds->ds_prev != NULL &&
1402 	    ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1403 		return (EBUSY);
1404 
1405 	/*
1406 	 * If we made changes this txg, traverse_dsl_dataset won't find
1407 	 * them.  Try again.
1408 	 */
1409 	if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1410 		return (EAGAIN);
1411 
1412 	if (dsl_dataset_is_snapshot(ds)) {
1413 		/*
1414 		 * If this snapshot has an elevated user reference count,
1415 		 * we can't destroy it yet.
1416 		 */
1417 		if (ds->ds_userrefs > 0 && !dsda->releasing)
1418 			return (EBUSY);
1419 
1420 		mutex_enter(&ds->ds_lock);
1421 		/*
1422 		 * Can't delete a branch point. However, if we're destroying
1423 		 * a clone and removing its origin due to it having a user
1424 		 * hold count of 0 and having been marked for deferred destroy,
1425 		 * it's OK for the origin to have a single clone.
1426 		 */
1427 		if (ds->ds_phys->ds_num_children >
1428 		    (dsda->is_origin_rm ? 2 : 1)) {
1429 			mutex_exit(&ds->ds_lock);
1430 			return (EEXIST);
1431 		}
1432 		mutex_exit(&ds->ds_lock);
1433 	} else if (dsl_dir_is_clone(ds->ds_dir)) {
1434 		return (dsl_dataset_origin_check(dsda, arg2, tx));
1435 	}
1436 
1437 	/* XXX we should do some i/o error checking... */
1438 	return (0);
1439 }
1440 
1441 struct refsarg {
1442 	kmutex_t lock;
1443 	boolean_t gone;
1444 	kcondvar_t cv;
1445 };
1446 
1447 /* ARGSUSED */
1448 static void
1449 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1450 {
1451 	struct refsarg *arg = argv;
1452 
1453 	mutex_enter(&arg->lock);
1454 	arg->gone = TRUE;
1455 	cv_signal(&arg->cv);
1456 	mutex_exit(&arg->lock);
1457 }
1458 
1459 static void
1460 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1461 {
1462 	struct refsarg arg;
1463 
1464 	mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1465 	cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1466 	arg.gone = FALSE;
1467 	(void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1468 	    dsl_dataset_refs_gone);
1469 	dmu_buf_rele(ds->ds_dbuf, tag);
1470 	mutex_enter(&arg.lock);
1471 	while (!arg.gone)
1472 		cv_wait(&arg.cv, &arg.lock);
1473 	ASSERT(arg.gone);
1474 	mutex_exit(&arg.lock);
1475 	ds->ds_dbuf = NULL;
1476 	ds->ds_phys = NULL;
1477 	mutex_destroy(&arg.lock);
1478 	cv_destroy(&arg.cv);
1479 }
1480 
1481 static void
1482 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1483 {
1484 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1485 	uint64_t count;
1486 	int err;
1487 
1488 	ASSERT(ds->ds_phys->ds_num_children >= 2);
1489 	err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1490 	/*
1491 	 * The err should not be ENOENT, but a bug in a previous version
1492 	 * of the code could cause upgrade_clones_cb() to not set
1493 	 * ds_next_snap_obj when it should, leading to a missing entry.
1494 	 * If we knew that the pool was created after
1495 	 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1496 	 * ENOENT.  However, at least we can check that we don't have
1497 	 * too many entries in the next_clones_obj even after failing to
1498 	 * remove this one.
1499 	 */
1500 	if (err != ENOENT) {
1501 		VERIFY3U(err, ==, 0);
1502 	}
1503 	ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1504 	    &count));
1505 	ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1506 }
1507 
1508 static void
1509 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1510 {
1511 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1512 	zap_cursor_t zc;
1513 	zap_attribute_t za;
1514 
1515 	/*
1516 	 * If it is the old version, dd_clones doesn't exist so we can't
1517 	 * find the clones, but deadlist_remove_key() is a no-op so it
1518 	 * doesn't matter.
1519 	 */
1520 	if (ds->ds_dir->dd_phys->dd_clones == 0)
1521 		return;
1522 
1523 	for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1524 	    zap_cursor_retrieve(&zc, &za) == 0;
1525 	    zap_cursor_advance(&zc)) {
1526 		dsl_dataset_t *clone;
1527 
1528 		VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1529 		    za.za_first_integer, FTAG, &clone));
1530 		if (clone->ds_dir->dd_origin_txg > mintxg) {
1531 			dsl_deadlist_remove_key(&clone->ds_deadlist,
1532 			    mintxg, tx);
1533 			dsl_dataset_remove_clones_key(clone, mintxg, tx);
1534 		}
1535 		dsl_dataset_rele(clone, FTAG);
1536 	}
1537 	zap_cursor_fini(&zc);
1538 }
1539 
1540 struct process_old_arg {
1541 	dsl_dataset_t *ds;
1542 	dsl_dataset_t *ds_prev;
1543 	boolean_t after_branch_point;
1544 	zio_t *pio;
1545 	uint64_t used, comp, uncomp;
1546 };
1547 
1548 static int
1549 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1550 {
1551 	struct process_old_arg *poa = arg;
1552 	dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1553 
1554 	if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1555 		dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1556 		if (poa->ds_prev && !poa->after_branch_point &&
1557 		    bp->blk_birth >
1558 		    poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1559 			poa->ds_prev->ds_phys->ds_unique_bytes +=
1560 			    bp_get_dsize_sync(dp->dp_spa, bp);
1561 		}
1562 	} else {
1563 		poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1564 		poa->comp += BP_GET_PSIZE(bp);
1565 		poa->uncomp += BP_GET_UCSIZE(bp);
1566 		dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1567 	}
1568 	return (0);
1569 }
1570 
1571 static void
1572 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1573     dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1574 {
1575 	struct process_old_arg poa = { 0 };
1576 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1577 	objset_t *mos = dp->dp_meta_objset;
1578 
1579 	ASSERT(ds->ds_deadlist.dl_oldfmt);
1580 	ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1581 
1582 	poa.ds = ds;
1583 	poa.ds_prev = ds_prev;
1584 	poa.after_branch_point = after_branch_point;
1585 	poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1586 	VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1587 	    process_old_cb, &poa, tx));
1588 	VERIFY3U(zio_wait(poa.pio), ==, 0);
1589 	ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1590 
1591 	/* change snapused */
1592 	dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1593 	    -poa.used, -poa.comp, -poa.uncomp, tx);
1594 
1595 	/* swap next's deadlist to our deadlist */
1596 	dsl_deadlist_close(&ds->ds_deadlist);
1597 	dsl_deadlist_close(&ds_next->ds_deadlist);
1598 	SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1599 	    ds->ds_phys->ds_deadlist_obj);
1600 	dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1601 	dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1602 	    ds_next->ds_phys->ds_deadlist_obj);
1603 }
1604 
1605 static int
1606 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
1607 {
1608 	int err;
1609 	struct killarg ka;
1610 
1611 	/*
1612 	 * Free everything that we point to (that's born after
1613 	 * the previous snapshot, if we are a clone)
1614 	 *
1615 	 * NB: this should be very quick, because we already
1616 	 * freed all the objects in open context.
1617 	 */
1618 	ka.ds = ds;
1619 	ka.tx = tx;
1620 	err = traverse_dataset(ds,
1621 	    ds->ds_phys->ds_prev_snap_txg, TRAVERSE_POST,
1622 	    kill_blkptr, &ka);
1623 	ASSERT3U(err, ==, 0);
1624 	ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0);
1625 
1626 	return (err);
1627 }
1628 
1629 void
1630 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1631 {
1632 	struct dsl_ds_destroyarg *dsda = arg1;
1633 	dsl_dataset_t *ds = dsda->ds;
1634 	int err;
1635 	int after_branch_point = FALSE;
1636 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1637 	objset_t *mos = dp->dp_meta_objset;
1638 	dsl_dataset_t *ds_prev = NULL;
1639 	boolean_t wont_destroy;
1640 	uint64_t obj;
1641 
1642 	wont_destroy = (dsda->defer &&
1643 	    (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1644 
1645 	ASSERT(ds->ds_owner || wont_destroy);
1646 	ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1647 	ASSERT(ds->ds_prev == NULL ||
1648 	    ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1649 	ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1650 
1651 	if (wont_destroy) {
1652 		ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1653 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
1654 		ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1655 		spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
1656 		return;
1657 	}
1658 
1659 	/* We need to log before removing it from the namespace. */
1660 	spa_history_log_internal_ds(ds, "destroy", tx, "");
1661 
1662 	/* signal any waiters that this dataset is going away */
1663 	mutex_enter(&ds->ds_lock);
1664 	ds->ds_owner = dsl_reaper;
1665 	cv_broadcast(&ds->ds_exclusive_cv);
1666 	mutex_exit(&ds->ds_lock);
1667 
1668 	/* Remove our reservation */
1669 	if (ds->ds_reserved != 0) {
1670 		dsl_prop_setarg_t psa;
1671 		uint64_t value = 0;
1672 
1673 		dsl_prop_setarg_init_uint64(&psa, "refreservation",
1674 		    (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1675 		    &value);
1676 		psa.psa_effective_value = 0;	/* predict default value */
1677 
1678 		dsl_dataset_set_reservation_sync(ds, &psa, tx);
1679 		ASSERT3U(ds->ds_reserved, ==, 0);
1680 	}
1681 
1682 	ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1683 
1684 	dsl_scan_ds_destroyed(ds, tx);
1685 
1686 	obj = ds->ds_object;
1687 
1688 	if (ds->ds_phys->ds_prev_snap_obj != 0) {
1689 		if (ds->ds_prev) {
1690 			ds_prev = ds->ds_prev;
1691 		} else {
1692 			VERIFY(0 == dsl_dataset_hold_obj(dp,
1693 			    ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1694 		}
1695 		after_branch_point =
1696 		    (ds_prev->ds_phys->ds_next_snap_obj != obj);
1697 
1698 		dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1699 		if (after_branch_point &&
1700 		    ds_prev->ds_phys->ds_next_clones_obj != 0) {
1701 			remove_from_next_clones(ds_prev, obj, tx);
1702 			if (ds->ds_phys->ds_next_snap_obj != 0) {
1703 				VERIFY(0 == zap_add_int(mos,
1704 				    ds_prev->ds_phys->ds_next_clones_obj,
1705 				    ds->ds_phys->ds_next_snap_obj, tx));
1706 			}
1707 		}
1708 		if (after_branch_point &&
1709 		    ds->ds_phys->ds_next_snap_obj == 0) {
1710 			/* This clone is toast. */
1711 			ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1712 			ds_prev->ds_phys->ds_num_children--;
1713 
1714 			/*
1715 			 * If the clone's origin has no other clones, no
1716 			 * user holds, and has been marked for deferred
1717 			 * deletion, then we should have done the necessary
1718 			 * destroy setup for it.
1719 			 */
1720 			if (ds_prev->ds_phys->ds_num_children == 1 &&
1721 			    ds_prev->ds_userrefs == 0 &&
1722 			    DS_IS_DEFER_DESTROY(ds_prev)) {
1723 				ASSERT3P(dsda->rm_origin, !=, NULL);
1724 			} else {
1725 				ASSERT3P(dsda->rm_origin, ==, NULL);
1726 			}
1727 		} else if (!after_branch_point) {
1728 			ds_prev->ds_phys->ds_next_snap_obj =
1729 			    ds->ds_phys->ds_next_snap_obj;
1730 		}
1731 	}
1732 
1733 	if (dsl_dataset_is_snapshot(ds)) {
1734 		dsl_dataset_t *ds_next;
1735 		uint64_t old_unique;
1736 		uint64_t used = 0, comp = 0, uncomp = 0;
1737 
1738 		VERIFY(0 == dsl_dataset_hold_obj(dp,
1739 		    ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1740 		ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1741 
1742 		old_unique = ds_next->ds_phys->ds_unique_bytes;
1743 
1744 		dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1745 		ds_next->ds_phys->ds_prev_snap_obj =
1746 		    ds->ds_phys->ds_prev_snap_obj;
1747 		ds_next->ds_phys->ds_prev_snap_txg =
1748 		    ds->ds_phys->ds_prev_snap_txg;
1749 		ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1750 		    ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1751 
1752 
1753 		if (ds_next->ds_deadlist.dl_oldfmt) {
1754 			process_old_deadlist(ds, ds_prev, ds_next,
1755 			    after_branch_point, tx);
1756 		} else {
1757 			/* Adjust prev's unique space. */
1758 			if (ds_prev && !after_branch_point) {
1759 				dsl_deadlist_space_range(&ds_next->ds_deadlist,
1760 				    ds_prev->ds_phys->ds_prev_snap_txg,
1761 				    ds->ds_phys->ds_prev_snap_txg,
1762 				    &used, &comp, &uncomp);
1763 				ds_prev->ds_phys->ds_unique_bytes += used;
1764 			}
1765 
1766 			/* Adjust snapused. */
1767 			dsl_deadlist_space_range(&ds_next->ds_deadlist,
1768 			    ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1769 			    &used, &comp, &uncomp);
1770 			dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1771 			    -used, -comp, -uncomp, tx);
1772 
1773 			/* Move blocks to be freed to pool's free list. */
1774 			dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1775 			    &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1776 			    tx);
1777 			dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1778 			    DD_USED_HEAD, used, comp, uncomp, tx);
1779 
1780 			/* Merge our deadlist into next's and free it. */
1781 			dsl_deadlist_merge(&ds_next->ds_deadlist,
1782 			    ds->ds_phys->ds_deadlist_obj, tx);
1783 		}
1784 		dsl_deadlist_close(&ds->ds_deadlist);
1785 		dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1786 
1787 		/* Collapse range in clone heads */
1788 		dsl_dataset_remove_clones_key(ds,
1789 		    ds->ds_phys->ds_creation_txg, tx);
1790 
1791 		if (dsl_dataset_is_snapshot(ds_next)) {
1792 			dsl_dataset_t *ds_nextnext;
1793 
1794 			/*
1795 			 * Update next's unique to include blocks which
1796 			 * were previously shared by only this snapshot
1797 			 * and it.  Those blocks will be born after the
1798 			 * prev snap and before this snap, and will have
1799 			 * died after the next snap and before the one
1800 			 * after that (ie. be on the snap after next's
1801 			 * deadlist).
1802 			 */
1803 			VERIFY(0 == dsl_dataset_hold_obj(dp,
1804 			    ds_next->ds_phys->ds_next_snap_obj,
1805 			    FTAG, &ds_nextnext));
1806 			dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1807 			    ds->ds_phys->ds_prev_snap_txg,
1808 			    ds->ds_phys->ds_creation_txg,
1809 			    &used, &comp, &uncomp);
1810 			ds_next->ds_phys->ds_unique_bytes += used;
1811 			dsl_dataset_rele(ds_nextnext, FTAG);
1812 			ASSERT3P(ds_next->ds_prev, ==, NULL);
1813 
1814 			/* Collapse range in this head. */
1815 			dsl_dataset_t *hds;
1816 			VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1817 			    ds->ds_dir->dd_phys->dd_head_dataset_obj,
1818 			    FTAG, &hds));
1819 			dsl_deadlist_remove_key(&hds->ds_deadlist,
1820 			    ds->ds_phys->ds_creation_txg, tx);
1821 			dsl_dataset_rele(hds, FTAG);
1822 
1823 		} else {
1824 			ASSERT3P(ds_next->ds_prev, ==, ds);
1825 			dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1826 			ds_next->ds_prev = NULL;
1827 			if (ds_prev) {
1828 				VERIFY(0 == dsl_dataset_get_ref(dp,
1829 				    ds->ds_phys->ds_prev_snap_obj,
1830 				    ds_next, &ds_next->ds_prev));
1831 			}
1832 
1833 			dsl_dataset_recalc_head_uniq(ds_next);
1834 
1835 			/*
1836 			 * Reduce the amount of our unconsmed refreservation
1837 			 * being charged to our parent by the amount of
1838 			 * new unique data we have gained.
1839 			 */
1840 			if (old_unique < ds_next->ds_reserved) {
1841 				int64_t mrsdelta;
1842 				uint64_t new_unique =
1843 				    ds_next->ds_phys->ds_unique_bytes;
1844 
1845 				ASSERT(old_unique <= new_unique);
1846 				mrsdelta = MIN(new_unique - old_unique,
1847 				    ds_next->ds_reserved - old_unique);
1848 				dsl_dir_diduse_space(ds->ds_dir,
1849 				    DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1850 			}
1851 		}
1852 		dsl_dataset_rele(ds_next, FTAG);
1853 	} else {
1854 		zfeature_info_t *async_destroy =
1855 		    &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY];
1856 		objset_t *os;
1857 
1858 		/*
1859 		 * There's no next snapshot, so this is a head dataset.
1860 		 * Destroy the deadlist.  Unless it's a clone, the
1861 		 * deadlist should be empty.  (If it's a clone, it's
1862 		 * safe to ignore the deadlist contents.)
1863 		 */
1864 		dsl_deadlist_close(&ds->ds_deadlist);
1865 		dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1866 		ds->ds_phys->ds_deadlist_obj = 0;
1867 
1868 		VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
1869 
1870 		if (!spa_feature_is_enabled(dp->dp_spa, async_destroy)) {
1871 			err = old_synchronous_dataset_destroy(ds, tx);
1872 		} else {
1873 			/*
1874 			 * Move the bptree into the pool's list of trees to
1875 			 * clean up and update space accounting information.
1876 			 */
1877 			uint64_t used, comp, uncomp;
1878 
1879 			zil_destroy_sync(dmu_objset_zil(os), tx);
1880 
1881 			if (!spa_feature_is_active(dp->dp_spa, async_destroy)) {
1882 				spa_feature_incr(dp->dp_spa, async_destroy, tx);
1883 				dp->dp_bptree_obj = bptree_alloc(mos, tx);
1884 				VERIFY(zap_add(mos,
1885 				    DMU_POOL_DIRECTORY_OBJECT,
1886 				    DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
1887 				    &dp->dp_bptree_obj, tx) == 0);
1888 			}
1889 
1890 			used = ds->ds_dir->dd_phys->dd_used_bytes;
1891 			comp = ds->ds_dir->dd_phys->dd_compressed_bytes;
1892 			uncomp = ds->ds_dir->dd_phys->dd_uncompressed_bytes;
1893 
1894 			ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1895 			    ds->ds_phys->ds_unique_bytes == used);
1896 
1897 			bptree_add(mos, dp->dp_bptree_obj,
1898 			    &ds->ds_phys->ds_bp, ds->ds_phys->ds_prev_snap_txg,
1899 			    used, comp, uncomp, tx);
1900 			dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
1901 			    -used, -comp, -uncomp, tx);
1902 			dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
1903 			    used, comp, uncomp, tx);
1904 		}
1905 
1906 		if (ds->ds_prev != NULL) {
1907 			if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1908 				VERIFY3U(0, ==, zap_remove_int(mos,
1909 				    ds->ds_prev->ds_dir->dd_phys->dd_clones,
1910 				    ds->ds_object, tx));
1911 			}
1912 			dsl_dataset_rele(ds->ds_prev, ds);
1913 			ds->ds_prev = ds_prev = NULL;
1914 		}
1915 	}
1916 
1917 	/*
1918 	 * This must be done after the dsl_traverse(), because it will
1919 	 * re-open the objset.
1920 	 */
1921 	if (ds->ds_objset) {
1922 		dmu_objset_evict(ds->ds_objset);
1923 		ds->ds_objset = NULL;
1924 	}
1925 
1926 	if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1927 		/* Erase the link in the dir */
1928 		dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1929 		ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1930 		ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1931 		err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1932 		ASSERT(err == 0);
1933 	} else {
1934 		/* remove from snapshot namespace */
1935 		dsl_dataset_t *ds_head;
1936 		ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1937 		VERIFY(0 == dsl_dataset_hold_obj(dp,
1938 		    ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1939 		VERIFY(0 == dsl_dataset_get_snapname(ds));
1940 #ifdef ZFS_DEBUG
1941 		{
1942 			uint64_t val;
1943 
1944 			err = dsl_dataset_snap_lookup(ds_head,
1945 			    ds->ds_snapname, &val);
1946 			ASSERT3U(err, ==, 0);
1947 			ASSERT3U(val, ==, obj);
1948 		}
1949 #endif
1950 		err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1951 		ASSERT(err == 0);
1952 		dsl_dataset_rele(ds_head, FTAG);
1953 	}
1954 
1955 	if (ds_prev && ds->ds_prev != ds_prev)
1956 		dsl_dataset_rele(ds_prev, FTAG);
1957 
1958 	spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1959 
1960 	if (ds->ds_phys->ds_next_clones_obj != 0) {
1961 		uint64_t count;
1962 		ASSERT(0 == zap_count(mos,
1963 		    ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1964 		VERIFY(0 == dmu_object_free(mos,
1965 		    ds->ds_phys->ds_next_clones_obj, tx));
1966 	}
1967 	if (ds->ds_phys->ds_props_obj != 0)
1968 		VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1969 	if (ds->ds_phys->ds_userrefs_obj != 0)
1970 		VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1971 	dsl_dir_close(ds->ds_dir, ds);
1972 	ds->ds_dir = NULL;
1973 	dsl_dataset_drain_refs(ds, tag);
1974 	VERIFY(0 == dmu_object_free(mos, obj, tx));
1975 
1976 	if (dsda->rm_origin) {
1977 		/*
1978 		 * Remove the origin of the clone we just destroyed.
1979 		 */
1980 		struct dsl_ds_destroyarg ndsda = {0};
1981 
1982 		ndsda.ds = dsda->rm_origin;
1983 		dsl_dataset_destroy_sync(&ndsda, tag, tx);
1984 	}
1985 }
1986 
1987 static int
1988 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1989 {
1990 	uint64_t asize;
1991 
1992 	if (!dmu_tx_is_syncing(tx))
1993 		return (0);
1994 
1995 	/*
1996 	 * If there's an fs-only reservation, any blocks that might become
1997 	 * owned by the snapshot dataset must be accommodated by space
1998 	 * outside of the reservation.
1999 	 */
2000 	ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
2001 	asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2002 	if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
2003 		return (ENOSPC);
2004 
2005 	/*
2006 	 * Propagate any reserved space for this snapshot to other
2007 	 * snapshot checks in this sync group.
2008 	 */
2009 	if (asize > 0)
2010 		dsl_dir_willuse_space(ds->ds_dir, asize, tx);
2011 
2012 	return (0);
2013 }
2014 
2015 int
2016 dsl_dataset_snapshot_check(dsl_dataset_t *ds, const char *snapname,
2017     dmu_tx_t *tx)
2018 {
2019 	int err;
2020 	uint64_t value;
2021 
2022 	/*
2023 	 * We don't allow multiple snapshots of the same txg.  If there
2024 	 * is already one, try again.
2025 	 */
2026 	if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
2027 		return (EAGAIN);
2028 
2029 	/*
2030 	 * Check for conflicting snapshot name.
2031 	 */
2032 	err = dsl_dataset_snap_lookup(ds, snapname, &value);
2033 	if (err == 0)
2034 		return (EEXIST);
2035 	if (err != ENOENT)
2036 		return (err);
2037 
2038 	/*
2039 	 * Check that the dataset's name is not too long.  Name consists
2040 	 * of the dataset's length + 1 for the @-sign + snapshot name's length
2041 	 */
2042 	if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2043 		return (ENAMETOOLONG);
2044 
2045 	err = dsl_dataset_snapshot_reserve_space(ds, tx);
2046 	if (err)
2047 		return (err);
2048 
2049 	ds->ds_trysnap_txg = tx->tx_txg;
2050 	return (0);
2051 }
2052 
2053 void
2054 dsl_dataset_snapshot_sync(dsl_dataset_t *ds, const char *snapname,
2055     dmu_tx_t *tx)
2056 {
2057 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
2058 	dmu_buf_t *dbuf;
2059 	dsl_dataset_phys_t *dsphys;
2060 	uint64_t dsobj, crtxg;
2061 	objset_t *mos = dp->dp_meta_objset;
2062 	int err;
2063 
2064 	ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2065 
2066 	/*
2067 	 * The origin's ds_creation_txg has to be < TXG_INITIAL
2068 	 */
2069 	if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2070 		crtxg = 1;
2071 	else
2072 		crtxg = tx->tx_txg;
2073 
2074 	dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2075 	    DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2076 	VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2077 	dmu_buf_will_dirty(dbuf, tx);
2078 	dsphys = dbuf->db_data;
2079 	bzero(dsphys, sizeof (dsl_dataset_phys_t));
2080 	dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2081 	dsphys->ds_fsid_guid = unique_create();
2082 	(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2083 	    sizeof (dsphys->ds_guid));
2084 	dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2085 	dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2086 	dsphys->ds_next_snap_obj = ds->ds_object;
2087 	dsphys->ds_num_children = 1;
2088 	dsphys->ds_creation_time = gethrestime_sec();
2089 	dsphys->ds_creation_txg = crtxg;
2090 	dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2091 	dsphys->ds_referenced_bytes = ds->ds_phys->ds_referenced_bytes;
2092 	dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2093 	dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2094 	dsphys->ds_flags = ds->ds_phys->ds_flags;
2095 	dsphys->ds_bp = ds->ds_phys->ds_bp;
2096 	dmu_buf_rele(dbuf, FTAG);
2097 
2098 	ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2099 	if (ds->ds_prev) {
2100 		uint64_t next_clones_obj =
2101 		    ds->ds_prev->ds_phys->ds_next_clones_obj;
2102 		ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2103 		    ds->ds_object ||
2104 		    ds->ds_prev->ds_phys->ds_num_children > 1);
2105 		if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2106 			dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2107 			ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2108 			    ds->ds_prev->ds_phys->ds_creation_txg);
2109 			ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2110 		} else if (next_clones_obj != 0) {
2111 			remove_from_next_clones(ds->ds_prev,
2112 			    dsphys->ds_next_snap_obj, tx);
2113 			VERIFY3U(0, ==, zap_add_int(mos,
2114 			    next_clones_obj, dsobj, tx));
2115 		}
2116 	}
2117 
2118 	/*
2119 	 * If we have a reference-reservation on this dataset, we will
2120 	 * need to increase the amount of refreservation being charged
2121 	 * since our unique space is going to zero.
2122 	 */
2123 	if (ds->ds_reserved) {
2124 		int64_t delta;
2125 		ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2126 		delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2127 		dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2128 		    delta, 0, 0, tx);
2129 	}
2130 
2131 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
2132 	zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2133 	    ds->ds_dir->dd_myname, snapname, dsobj,
2134 	    ds->ds_phys->ds_prev_snap_txg);
2135 	ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2136 	    UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2137 	dsl_deadlist_close(&ds->ds_deadlist);
2138 	dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2139 	dsl_deadlist_add_key(&ds->ds_deadlist,
2140 	    ds->ds_phys->ds_prev_snap_txg, tx);
2141 
2142 	ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2143 	ds->ds_phys->ds_prev_snap_obj = dsobj;
2144 	ds->ds_phys->ds_prev_snap_txg = crtxg;
2145 	ds->ds_phys->ds_unique_bytes = 0;
2146 	if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2147 		ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2148 
2149 	err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2150 	    snapname, 8, 1, &dsobj, tx);
2151 	ASSERT(err == 0);
2152 
2153 	if (ds->ds_prev)
2154 		dsl_dataset_drop_ref(ds->ds_prev, ds);
2155 	VERIFY(0 == dsl_dataset_get_ref(dp,
2156 	    ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2157 
2158 	dsl_scan_ds_snapshotted(ds, tx);
2159 
2160 	dsl_dir_snap_cmtime_update(ds->ds_dir);
2161 
2162 	spa_history_log_internal_ds(ds->ds_prev, "snapshot", tx, "");
2163 }
2164 
2165 void
2166 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2167 {
2168 	ASSERT(dmu_tx_is_syncing(tx));
2169 	ASSERT(ds->ds_objset != NULL);
2170 	ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2171 
2172 	/*
2173 	 * in case we had to change ds_fsid_guid when we opened it,
2174 	 * sync it out now.
2175 	 */
2176 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
2177 	ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2178 
2179 	dmu_objset_sync(ds->ds_objset, zio, tx);
2180 }
2181 
2182 static void
2183 get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
2184 {
2185 	uint64_t count = 0;
2186 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
2187 	zap_cursor_t zc;
2188 	zap_attribute_t za;
2189 	nvlist_t *propval;
2190 	nvlist_t *val;
2191 
2192 	rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2193 	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2194 	VERIFY(nvlist_alloc(&val, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2195 
2196 	/*
2197 	 * There may me missing entries in ds_next_clones_obj
2198 	 * due to a bug in a previous version of the code.
2199 	 * Only trust it if it has the right number of entries.
2200 	 */
2201 	if (ds->ds_phys->ds_next_clones_obj != 0) {
2202 		ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
2203 		    &count));
2204 	}
2205 	if (count != ds->ds_phys->ds_num_children - 1) {
2206 		goto fail;
2207 	}
2208 	for (zap_cursor_init(&zc, mos, ds->ds_phys->ds_next_clones_obj);
2209 	    zap_cursor_retrieve(&zc, &za) == 0;
2210 	    zap_cursor_advance(&zc)) {
2211 		dsl_dataset_t *clone;
2212 		char buf[ZFS_MAXNAMELEN];
2213 		/*
2214 		 * Even though we hold the dp_config_rwlock, the dataset
2215 		 * may fail to open, returning ENOENT.  If there is a
2216 		 * thread concurrently attempting to destroy this
2217 		 * dataset, it will have the ds_rwlock held for
2218 		 * RW_WRITER.  Our call to dsl_dataset_hold_obj() ->
2219 		 * dsl_dataset_hold_ref() will fail its
2220 		 * rw_tryenter(&ds->ds_rwlock, RW_READER), drop the
2221 		 * dp_config_rwlock, and wait for the destroy progress
2222 		 * and signal ds_exclusive_cv.  If the destroy was
2223 		 * successful, we will see that
2224 		 * DSL_DATASET_IS_DESTROYED(), and return ENOENT.
2225 		 */
2226 		if (dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
2227 		    za.za_first_integer, FTAG, &clone) != 0)
2228 			continue;
2229 		dsl_dir_name(clone->ds_dir, buf);
2230 		VERIFY(nvlist_add_boolean(val, buf) == 0);
2231 		dsl_dataset_rele(clone, FTAG);
2232 	}
2233 	zap_cursor_fini(&zc);
2234 	VERIFY(nvlist_add_nvlist(propval, ZPROP_VALUE, val) == 0);
2235 	VERIFY(nvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
2236 	    propval) == 0);
2237 fail:
2238 	nvlist_free(val);
2239 	nvlist_free(propval);
2240 	rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2241 }
2242 
2243 void
2244 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2245 {
2246 	uint64_t refd, avail, uobjs, aobjs, ratio;
2247 
2248 	ratio = ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2249 	    (ds->ds_phys->ds_uncompressed_bytes * 100 /
2250 	    ds->ds_phys->ds_compressed_bytes);
2251 
2252 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO, ratio);
2253 
2254 	if (dsl_dataset_is_snapshot(ds)) {
2255 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, ratio);
2256 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2257 		    ds->ds_phys->ds_unique_bytes);
2258 		get_clones_stat(ds, nv);
2259 	} else {
2260 		dsl_dir_stats(ds->ds_dir, nv);
2261 	}
2262 
2263 	dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2264 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2265 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2266 
2267 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2268 	    ds->ds_phys->ds_creation_time);
2269 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2270 	    ds->ds_phys->ds_creation_txg);
2271 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2272 	    ds->ds_quota);
2273 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2274 	    ds->ds_reserved);
2275 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2276 	    ds->ds_phys->ds_guid);
2277 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2278 	    ds->ds_phys->ds_unique_bytes);
2279 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2280 	    ds->ds_object);
2281 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2282 	    ds->ds_userrefs);
2283 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2284 	    DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2285 
2286 	if (ds->ds_phys->ds_prev_snap_obj != 0) {
2287 		uint64_t written, comp, uncomp;
2288 		dsl_pool_t *dp = ds->ds_dir->dd_pool;
2289 		dsl_dataset_t *prev;
2290 
2291 		rw_enter(&dp->dp_config_rwlock, RW_READER);
2292 		int err = dsl_dataset_hold_obj(dp,
2293 		    ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
2294 		rw_exit(&dp->dp_config_rwlock);
2295 		if (err == 0) {
2296 			err = dsl_dataset_space_written(prev, ds, &written,
2297 			    &comp, &uncomp);
2298 			dsl_dataset_rele(prev, FTAG);
2299 			if (err == 0) {
2300 				dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
2301 				    written);
2302 			}
2303 		}
2304 	}
2305 
2306 }
2307 
2308 void
2309 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2310 {
2311 	stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2312 	stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2313 	stat->dds_guid = ds->ds_phys->ds_guid;
2314 	stat->dds_origin[0] = '\0';
2315 	if (dsl_dataset_is_snapshot(ds)) {
2316 		stat->dds_is_snapshot = B_TRUE;
2317 		stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2318 	} else {
2319 		stat->dds_is_snapshot = B_FALSE;
2320 		stat->dds_num_clones = 0;
2321 
2322 		rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2323 		if (dsl_dir_is_clone(ds->ds_dir)) {
2324 			dsl_dataset_t *ods;
2325 
2326 			VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2327 			    ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2328 			dsl_dataset_name(ods, stat->dds_origin);
2329 			dsl_dataset_drop_ref(ods, FTAG);
2330 		}
2331 		rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2332 	}
2333 }
2334 
2335 uint64_t
2336 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2337 {
2338 	return (ds->ds_fsid_guid);
2339 }
2340 
2341 void
2342 dsl_dataset_space(dsl_dataset_t *ds,
2343     uint64_t *refdbytesp, uint64_t *availbytesp,
2344     uint64_t *usedobjsp, uint64_t *availobjsp)
2345 {
2346 	*refdbytesp = ds->ds_phys->ds_referenced_bytes;
2347 	*availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2348 	if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2349 		*availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2350 	if (ds->ds_quota != 0) {
2351 		/*
2352 		 * Adjust available bytes according to refquota
2353 		 */
2354 		if (*refdbytesp < ds->ds_quota)
2355 			*availbytesp = MIN(*availbytesp,
2356 			    ds->ds_quota - *refdbytesp);
2357 		else
2358 			*availbytesp = 0;
2359 	}
2360 	*usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2361 	*availobjsp = DN_MAX_OBJECT - *usedobjsp;
2362 }
2363 
2364 boolean_t
2365 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2366 {
2367 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
2368 
2369 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2370 	    dsl_pool_sync_context(dp));
2371 	if (ds->ds_prev == NULL)
2372 		return (B_FALSE);
2373 	if (ds->ds_phys->ds_bp.blk_birth >
2374 	    ds->ds_prev->ds_phys->ds_creation_txg) {
2375 		objset_t *os, *os_prev;
2376 		/*
2377 		 * It may be that only the ZIL differs, because it was
2378 		 * reset in the head.  Don't count that as being
2379 		 * modified.
2380 		 */
2381 		if (dmu_objset_from_ds(ds, &os) != 0)
2382 			return (B_TRUE);
2383 		if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2384 			return (B_TRUE);
2385 		return (bcmp(&os->os_phys->os_meta_dnode,
2386 		    &os_prev->os_phys->os_meta_dnode,
2387 		    sizeof (os->os_phys->os_meta_dnode)) != 0);
2388 	}
2389 	return (B_FALSE);
2390 }
2391 
2392 /* ARGSUSED */
2393 static int
2394 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2395 {
2396 	dsl_dataset_t *ds = arg1;
2397 	char *newsnapname = arg2;
2398 	dsl_dir_t *dd = ds->ds_dir;
2399 	dsl_dataset_t *hds;
2400 	uint64_t val;
2401 	int err;
2402 
2403 	err = dsl_dataset_hold_obj(dd->dd_pool,
2404 	    dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2405 	if (err)
2406 		return (err);
2407 
2408 	/* new name better not be in use */
2409 	err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2410 	dsl_dataset_rele(hds, FTAG);
2411 
2412 	if (err == 0)
2413 		err = EEXIST;
2414 	else if (err == ENOENT)
2415 		err = 0;
2416 
2417 	/* dataset name + 1 for the "@" + the new snapshot name must fit */
2418 	if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2419 		err = ENAMETOOLONG;
2420 
2421 	return (err);
2422 }
2423 
2424 static void
2425 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2426 {
2427 	dsl_dataset_t *ds = arg1;
2428 	const char *newsnapname = arg2;
2429 	dsl_dir_t *dd = ds->ds_dir;
2430 	objset_t *mos = dd->dd_pool->dp_meta_objset;
2431 	dsl_dataset_t *hds;
2432 	int err;
2433 
2434 	ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2435 
2436 	VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2437 	    dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2438 
2439 	VERIFY(0 == dsl_dataset_get_snapname(ds));
2440 	err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2441 	ASSERT3U(err, ==, 0);
2442 	mutex_enter(&ds->ds_lock);
2443 	(void) strcpy(ds->ds_snapname, newsnapname);
2444 	mutex_exit(&ds->ds_lock);
2445 	err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2446 	    ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2447 	ASSERT3U(err, ==, 0);
2448 
2449 	spa_history_log_internal_ds(ds, "rename", tx,
2450 	    "-> @%s", newsnapname);
2451 	dsl_dataset_rele(hds, FTAG);
2452 }
2453 
2454 struct renamesnaparg {
2455 	dsl_sync_task_group_t *dstg;
2456 	char failed[MAXPATHLEN];
2457 	char *oldsnap;
2458 	char *newsnap;
2459 };
2460 
2461 static int
2462 dsl_snapshot_rename_one(const char *name, void *arg)
2463 {
2464 	struct renamesnaparg *ra = arg;
2465 	dsl_dataset_t *ds = NULL;
2466 	char *snapname;
2467 	int err;
2468 
2469 	snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2470 	(void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2471 
2472 	/*
2473 	 * For recursive snapshot renames the parent won't be changing
2474 	 * so we just pass name for both the to/from argument.
2475 	 */
2476 	err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2477 	if (err != 0) {
2478 		strfree(snapname);
2479 		return (err == ENOENT ? 0 : err);
2480 	}
2481 
2482 #ifdef _KERNEL
2483 	/*
2484 	 * For all filesystems undergoing rename, we'll need to unmount it.
2485 	 */
2486 	(void) zfs_unmount_snap(snapname, NULL);
2487 #endif
2488 	err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2489 	strfree(snapname);
2490 	if (err != 0)
2491 		return (err == ENOENT ? 0 : err);
2492 
2493 	dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2494 	    dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2495 
2496 	return (0);
2497 }
2498 
2499 static int
2500 dsl_recursive_rename(char *oldname, const char *newname)
2501 {
2502 	int err;
2503 	struct renamesnaparg *ra;
2504 	dsl_sync_task_t *dst;
2505 	spa_t *spa;
2506 	char *cp, *fsname = spa_strdup(oldname);
2507 	int len = strlen(oldname) + 1;
2508 
2509 	/* truncate the snapshot name to get the fsname */
2510 	cp = strchr(fsname, '@');
2511 	*cp = '\0';
2512 
2513 	err = spa_open(fsname, &spa, FTAG);
2514 	if (err) {
2515 		kmem_free(fsname, len);
2516 		return (err);
2517 	}
2518 	ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2519 	ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2520 
2521 	ra->oldsnap = strchr(oldname, '@') + 1;
2522 	ra->newsnap = strchr(newname, '@') + 1;
2523 	*ra->failed = '\0';
2524 
2525 	err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2526 	    DS_FIND_CHILDREN);
2527 	kmem_free(fsname, len);
2528 
2529 	if (err == 0) {
2530 		err = dsl_sync_task_group_wait(ra->dstg);
2531 	}
2532 
2533 	for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2534 	    dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2535 		dsl_dataset_t *ds = dst->dst_arg1;
2536 		if (dst->dst_err) {
2537 			dsl_dir_name(ds->ds_dir, ra->failed);
2538 			(void) strlcat(ra->failed, "@", sizeof (ra->failed));
2539 			(void) strlcat(ra->failed, ra->newsnap,
2540 			    sizeof (ra->failed));
2541 		}
2542 		dsl_dataset_rele(ds, ra->dstg);
2543 	}
2544 
2545 	if (err)
2546 		(void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2547 
2548 	dsl_sync_task_group_destroy(ra->dstg);
2549 	kmem_free(ra, sizeof (struct renamesnaparg));
2550 	spa_close(spa, FTAG);
2551 	return (err);
2552 }
2553 
2554 static int
2555 dsl_valid_rename(const char *oldname, void *arg)
2556 {
2557 	int delta = *(int *)arg;
2558 
2559 	if (strlen(oldname) + delta >= MAXNAMELEN)
2560 		return (ENAMETOOLONG);
2561 
2562 	return (0);
2563 }
2564 
2565 #pragma weak dmu_objset_rename = dsl_dataset_rename
2566 int
2567 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2568 {
2569 	dsl_dir_t *dd;
2570 	dsl_dataset_t *ds;
2571 	const char *tail;
2572 	int err;
2573 
2574 	err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2575 	if (err)
2576 		return (err);
2577 
2578 	if (tail == NULL) {
2579 		int delta = strlen(newname) - strlen(oldname);
2580 
2581 		/* if we're growing, validate child name lengths */
2582 		if (delta > 0)
2583 			err = dmu_objset_find(oldname, dsl_valid_rename,
2584 			    &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2585 
2586 		if (err == 0)
2587 			err = dsl_dir_rename(dd, newname);
2588 		dsl_dir_close(dd, FTAG);
2589 		return (err);
2590 	}
2591 
2592 	if (tail[0] != '@') {
2593 		/* the name ended in a nonexistent component */
2594 		dsl_dir_close(dd, FTAG);
2595 		return (ENOENT);
2596 	}
2597 
2598 	dsl_dir_close(dd, FTAG);
2599 
2600 	/* new name must be snapshot in same filesystem */
2601 	tail = strchr(newname, '@');
2602 	if (tail == NULL)
2603 		return (EINVAL);
2604 	tail++;
2605 	if (strncmp(oldname, newname, tail - newname) != 0)
2606 		return (EXDEV);
2607 
2608 	if (recursive) {
2609 		err = dsl_recursive_rename(oldname, newname);
2610 	} else {
2611 		err = dsl_dataset_hold(oldname, FTAG, &ds);
2612 		if (err)
2613 			return (err);
2614 
2615 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2616 		    dsl_dataset_snapshot_rename_check,
2617 		    dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2618 
2619 		dsl_dataset_rele(ds, FTAG);
2620 	}
2621 
2622 	return (err);
2623 }
2624 
2625 struct promotenode {
2626 	list_node_t link;
2627 	dsl_dataset_t *ds;
2628 };
2629 
2630 struct promotearg {
2631 	list_t shared_snaps, origin_snaps, clone_snaps;
2632 	dsl_dataset_t *origin_origin;
2633 	uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2634 	char *err_ds;
2635 };
2636 
2637 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2638 static boolean_t snaplist_unstable(list_t *l);
2639 
2640 static int
2641 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2642 {
2643 	dsl_dataset_t *hds = arg1;
2644 	struct promotearg *pa = arg2;
2645 	struct promotenode *snap = list_head(&pa->shared_snaps);
2646 	dsl_dataset_t *origin_ds = snap->ds;
2647 	int err;
2648 	uint64_t unused;
2649 
2650 	/* Check that it is a real clone */
2651 	if (!dsl_dir_is_clone(hds->ds_dir))
2652 		return (EINVAL);
2653 
2654 	/* Since this is so expensive, don't do the preliminary check */
2655 	if (!dmu_tx_is_syncing(tx))
2656 		return (0);
2657 
2658 	if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2659 		return (EXDEV);
2660 
2661 	/* compute origin's new unique space */
2662 	snap = list_tail(&pa->clone_snaps);
2663 	ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2664 	dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2665 	    origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2666 	    &pa->unique, &unused, &unused);
2667 
2668 	/*
2669 	 * Walk the snapshots that we are moving
2670 	 *
2671 	 * Compute space to transfer.  Consider the incremental changes
2672 	 * to used for each snapshot:
2673 	 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2674 	 * So each snapshot gave birth to:
2675 	 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2676 	 * So a sequence would look like:
2677 	 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2678 	 * Which simplifies to:
2679 	 * uN + kN + kN-1 + ... + k1 + k0
2680 	 * Note however, if we stop before we reach the ORIGIN we get:
2681 	 * uN + kN + kN-1 + ... + kM - uM-1
2682 	 */
2683 	pa->used = origin_ds->ds_phys->ds_referenced_bytes;
2684 	pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2685 	pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2686 	for (snap = list_head(&pa->shared_snaps); snap;
2687 	    snap = list_next(&pa->shared_snaps, snap)) {
2688 		uint64_t val, dlused, dlcomp, dluncomp;
2689 		dsl_dataset_t *ds = snap->ds;
2690 
2691 		/* Check that the snapshot name does not conflict */
2692 		VERIFY(0 == dsl_dataset_get_snapname(ds));
2693 		err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2694 		if (err == 0) {
2695 			err = EEXIST;
2696 			goto out;
2697 		}
2698 		if (err != ENOENT)
2699 			goto out;
2700 
2701 		/* The very first snapshot does not have a deadlist */
2702 		if (ds->ds_phys->ds_prev_snap_obj == 0)
2703 			continue;
2704 
2705 		dsl_deadlist_space(&ds->ds_deadlist,
2706 		    &dlused, &dlcomp, &dluncomp);
2707 		pa->used += dlused;
2708 		pa->comp += dlcomp;
2709 		pa->uncomp += dluncomp;
2710 	}
2711 
2712 	/*
2713 	 * If we are a clone of a clone then we never reached ORIGIN,
2714 	 * so we need to subtract out the clone origin's used space.
2715 	 */
2716 	if (pa->origin_origin) {
2717 		pa->used -= pa->origin_origin->ds_phys->ds_referenced_bytes;
2718 		pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2719 		pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2720 	}
2721 
2722 	/* Check that there is enough space here */
2723 	err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2724 	    pa->used);
2725 	if (err)
2726 		return (err);
2727 
2728 	/*
2729 	 * Compute the amounts of space that will be used by snapshots
2730 	 * after the promotion (for both origin and clone).  For each,
2731 	 * it is the amount of space that will be on all of their
2732 	 * deadlists (that was not born before their new origin).
2733 	 */
2734 	if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2735 		uint64_t space;
2736 
2737 		/*
2738 		 * Note, typically this will not be a clone of a clone,
2739 		 * so dd_origin_txg will be < TXG_INITIAL, so
2740 		 * these snaplist_space() -> dsl_deadlist_space_range()
2741 		 * calls will be fast because they do not have to
2742 		 * iterate over all bps.
2743 		 */
2744 		snap = list_head(&pa->origin_snaps);
2745 		err = snaplist_space(&pa->shared_snaps,
2746 		    snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2747 		if (err)
2748 			return (err);
2749 
2750 		err = snaplist_space(&pa->clone_snaps,
2751 		    snap->ds->ds_dir->dd_origin_txg, &space);
2752 		if (err)
2753 			return (err);
2754 		pa->cloneusedsnap += space;
2755 	}
2756 	if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2757 		err = snaplist_space(&pa->origin_snaps,
2758 		    origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2759 		if (err)
2760 			return (err);
2761 	}
2762 
2763 	return (0);
2764 out:
2765 	pa->err_ds =  snap->ds->ds_snapname;
2766 	return (err);
2767 }
2768 
2769 static void
2770 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2771 {
2772 	dsl_dataset_t *hds = arg1;
2773 	struct promotearg *pa = arg2;
2774 	struct promotenode *snap = list_head(&pa->shared_snaps);
2775 	dsl_dataset_t *origin_ds = snap->ds;
2776 	dsl_dataset_t *origin_head;
2777 	dsl_dir_t *dd = hds->ds_dir;
2778 	dsl_pool_t *dp = hds->ds_dir->dd_pool;
2779 	dsl_dir_t *odd = NULL;
2780 	uint64_t oldnext_obj;
2781 	int64_t delta;
2782 
2783 	ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2784 
2785 	snap = list_head(&pa->origin_snaps);
2786 	origin_head = snap->ds;
2787 
2788 	/*
2789 	 * We need to explicitly open odd, since origin_ds's dd will be
2790 	 * changing.
2791 	 */
2792 	VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2793 	    NULL, FTAG, &odd));
2794 
2795 	/* change origin's next snap */
2796 	dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2797 	oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2798 	snap = list_tail(&pa->clone_snaps);
2799 	ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2800 	origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2801 
2802 	/* change the origin's next clone */
2803 	if (origin_ds->ds_phys->ds_next_clones_obj) {
2804 		remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2805 		VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2806 		    origin_ds->ds_phys->ds_next_clones_obj,
2807 		    oldnext_obj, tx));
2808 	}
2809 
2810 	/* change origin */
2811 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
2812 	ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2813 	dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2814 	dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2815 	dmu_buf_will_dirty(odd->dd_dbuf, tx);
2816 	odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2817 	origin_head->ds_dir->dd_origin_txg =
2818 	    origin_ds->ds_phys->ds_creation_txg;
2819 
2820 	/* change dd_clone entries */
2821 	if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2822 		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2823 		    odd->dd_phys->dd_clones, hds->ds_object, tx));
2824 		VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2825 		    pa->origin_origin->ds_dir->dd_phys->dd_clones,
2826 		    hds->ds_object, tx));
2827 
2828 		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2829 		    pa->origin_origin->ds_dir->dd_phys->dd_clones,
2830 		    origin_head->ds_object, tx));
2831 		if (dd->dd_phys->dd_clones == 0) {
2832 			dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2833 			    DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2834 		}
2835 		VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2836 		    dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2837 
2838 	}
2839 
2840 	/* move snapshots to this dir */
2841 	for (snap = list_head(&pa->shared_snaps); snap;
2842 	    snap = list_next(&pa->shared_snaps, snap)) {
2843 		dsl_dataset_t *ds = snap->ds;
2844 
2845 		/* unregister props as dsl_dir is changing */
2846 		if (ds->ds_objset) {
2847 			dmu_objset_evict(ds->ds_objset);
2848 			ds->ds_objset = NULL;
2849 		}
2850 		/* move snap name entry */
2851 		VERIFY(0 == dsl_dataset_get_snapname(ds));
2852 		VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2853 		    ds->ds_snapname, tx));
2854 		VERIFY(0 == zap_add(dp->dp_meta_objset,
2855 		    hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2856 		    8, 1, &ds->ds_object, tx));
2857 
2858 		/* change containing dsl_dir */
2859 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
2860 		ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2861 		ds->ds_phys->ds_dir_obj = dd->dd_object;
2862 		ASSERT3P(ds->ds_dir, ==, odd);
2863 		dsl_dir_close(ds->ds_dir, ds);
2864 		VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2865 		    NULL, ds, &ds->ds_dir));
2866 
2867 		/* move any clone references */
2868 		if (ds->ds_phys->ds_next_clones_obj &&
2869 		    spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2870 			zap_cursor_t zc;
2871 			zap_attribute_t za;
2872 
2873 			for (zap_cursor_init(&zc, dp->dp_meta_objset,
2874 			    ds->ds_phys->ds_next_clones_obj);
2875 			    zap_cursor_retrieve(&zc, &za) == 0;
2876 			    zap_cursor_advance(&zc)) {
2877 				dsl_dataset_t *cnds;
2878 				uint64_t o;
2879 
2880 				if (za.za_first_integer == oldnext_obj) {
2881 					/*
2882 					 * We've already moved the
2883 					 * origin's reference.
2884 					 */
2885 					continue;
2886 				}
2887 
2888 				VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2889 				    za.za_first_integer, FTAG, &cnds));
2890 				o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2891 
2892 				VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2893 				    odd->dd_phys->dd_clones, o, tx), ==, 0);
2894 				VERIFY3U(zap_add_int(dp->dp_meta_objset,
2895 				    dd->dd_phys->dd_clones, o, tx), ==, 0);
2896 				dsl_dataset_rele(cnds, FTAG);
2897 			}
2898 			zap_cursor_fini(&zc);
2899 		}
2900 
2901 		ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2902 	}
2903 
2904 	/*
2905 	 * Change space accounting.
2906 	 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2907 	 * both be valid, or both be 0 (resulting in delta == 0).  This
2908 	 * is true for each of {clone,origin} independently.
2909 	 */
2910 
2911 	delta = pa->cloneusedsnap -
2912 	    dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2913 	ASSERT3S(delta, >=, 0);
2914 	ASSERT3U(pa->used, >=, delta);
2915 	dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2916 	dsl_dir_diduse_space(dd, DD_USED_HEAD,
2917 	    pa->used - delta, pa->comp, pa->uncomp, tx);
2918 
2919 	delta = pa->originusedsnap -
2920 	    odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2921 	ASSERT3S(delta, <=, 0);
2922 	ASSERT3U(pa->used, >=, -delta);
2923 	dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2924 	dsl_dir_diduse_space(odd, DD_USED_HEAD,
2925 	    -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2926 
2927 	origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2928 
2929 	/* log history record */
2930 	spa_history_log_internal_ds(hds, "promote", tx, "");
2931 
2932 	dsl_dir_close(odd, FTAG);
2933 }
2934 
2935 static char *snaplist_tag = "snaplist";
2936 /*
2937  * Make a list of dsl_dataset_t's for the snapshots between first_obj
2938  * (exclusive) and last_obj (inclusive).  The list will be in reverse
2939  * order (last_obj will be the list_head()).  If first_obj == 0, do all
2940  * snapshots back to this dataset's origin.
2941  */
2942 static int
2943 snaplist_make(dsl_pool_t *dp, boolean_t own,
2944     uint64_t first_obj, uint64_t last_obj, list_t *l)
2945 {
2946 	uint64_t obj = last_obj;
2947 
2948 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2949 
2950 	list_create(l, sizeof (struct promotenode),
2951 	    offsetof(struct promotenode, link));
2952 
2953 	while (obj != first_obj) {
2954 		dsl_dataset_t *ds;
2955 		struct promotenode *snap;
2956 		int err;
2957 
2958 		if (own) {
2959 			err = dsl_dataset_own_obj(dp, obj,
2960 			    0, snaplist_tag, &ds);
2961 			if (err == 0)
2962 				dsl_dataset_make_exclusive(ds, snaplist_tag);
2963 		} else {
2964 			err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2965 		}
2966 		if (err == ENOENT) {
2967 			/* lost race with snapshot destroy */
2968 			struct promotenode *last = list_tail(l);
2969 			ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2970 			obj = last->ds->ds_phys->ds_prev_snap_obj;
2971 			continue;
2972 		} else if (err) {
2973 			return (err);
2974 		}
2975 
2976 		if (first_obj == 0)
2977 			first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2978 
2979 		snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2980 		snap->ds = ds;
2981 		list_insert_tail(l, snap);
2982 		obj = ds->ds_phys->ds_prev_snap_obj;
2983 	}
2984 
2985 	return (0);
2986 }
2987 
2988 static int
2989 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2990 {
2991 	struct promotenode *snap;
2992 
2993 	*spacep = 0;
2994 	for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2995 		uint64_t used, comp, uncomp;
2996 		dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2997 		    mintxg, UINT64_MAX, &used, &comp, &uncomp);
2998 		*spacep += used;
2999 	}
3000 	return (0);
3001 }
3002 
3003 static void
3004 snaplist_destroy(list_t *l, boolean_t own)
3005 {
3006 	struct promotenode *snap;
3007 
3008 	if (!l || !list_link_active(&l->list_head))
3009 		return;
3010 
3011 	while ((snap = list_tail(l)) != NULL) {
3012 		list_remove(l, snap);
3013 		if (own)
3014 			dsl_dataset_disown(snap->ds, snaplist_tag);
3015 		else
3016 			dsl_dataset_rele(snap->ds, snaplist_tag);
3017 		kmem_free(snap, sizeof (struct promotenode));
3018 	}
3019 	list_destroy(l);
3020 }
3021 
3022 /*
3023  * Promote a clone.  Nomenclature note:
3024  * "clone" or "cds": the original clone which is being promoted
3025  * "origin" or "ods": the snapshot which is originally clone's origin
3026  * "origin head" or "ohds": the dataset which is the head
3027  * (filesystem/volume) for the origin
3028  * "origin origin": the origin of the origin's filesystem (typically
3029  * NULL, indicating that the clone is not a clone of a clone).
3030  */
3031 int
3032 dsl_dataset_promote(const char *name, char *conflsnap)
3033 {
3034 	dsl_dataset_t *ds;
3035 	dsl_dir_t *dd;
3036 	dsl_pool_t *dp;
3037 	dmu_object_info_t doi;
3038 	struct promotearg pa = { 0 };
3039 	struct promotenode *snap;
3040 	int err;
3041 
3042 	err = dsl_dataset_hold(name, FTAG, &ds);
3043 	if (err)
3044 		return (err);
3045 	dd = ds->ds_dir;
3046 	dp = dd->dd_pool;
3047 
3048 	err = dmu_object_info(dp->dp_meta_objset,
3049 	    ds->ds_phys->ds_snapnames_zapobj, &doi);
3050 	if (err) {
3051 		dsl_dataset_rele(ds, FTAG);
3052 		return (err);
3053 	}
3054 
3055 	if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
3056 		dsl_dataset_rele(ds, FTAG);
3057 		return (EINVAL);
3058 	}
3059 
3060 	/*
3061 	 * We are going to inherit all the snapshots taken before our
3062 	 * origin (i.e., our new origin will be our parent's origin).
3063 	 * Take ownership of them so that we can rename them into our
3064 	 * namespace.
3065 	 */
3066 	rw_enter(&dp->dp_config_rwlock, RW_READER);
3067 
3068 	err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
3069 	    &pa.shared_snaps);
3070 	if (err != 0)
3071 		goto out;
3072 
3073 	err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
3074 	if (err != 0)
3075 		goto out;
3076 
3077 	snap = list_head(&pa.shared_snaps);
3078 	ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
3079 	err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
3080 	    snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
3081 	if (err != 0)
3082 		goto out;
3083 
3084 	if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
3085 		err = dsl_dataset_hold_obj(dp,
3086 		    snap->ds->ds_dir->dd_phys->dd_origin_obj,
3087 		    FTAG, &pa.origin_origin);
3088 		if (err != 0)
3089 			goto out;
3090 	}
3091 
3092 out:
3093 	rw_exit(&dp->dp_config_rwlock);
3094 
3095 	/*
3096 	 * Add in 128x the snapnames zapobj size, since we will be moving
3097 	 * a bunch of snapnames to the promoted ds, and dirtying their
3098 	 * bonus buffers.
3099 	 */
3100 	if (err == 0) {
3101 		err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
3102 		    dsl_dataset_promote_sync, ds, &pa,
3103 		    2 + 2 * doi.doi_physical_blocks_512);
3104 		if (err && pa.err_ds && conflsnap)
3105 			(void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
3106 	}
3107 
3108 	snaplist_destroy(&pa.shared_snaps, B_TRUE);
3109 	snaplist_destroy(&pa.clone_snaps, B_FALSE);
3110 	snaplist_destroy(&pa.origin_snaps, B_FALSE);
3111 	if (pa.origin_origin)
3112 		dsl_dataset_rele(pa.origin_origin, FTAG);
3113 	dsl_dataset_rele(ds, FTAG);
3114 	return (err);
3115 }
3116 
3117 struct cloneswaparg {
3118 	dsl_dataset_t *cds; /* clone dataset */
3119 	dsl_dataset_t *ohds; /* origin's head dataset */
3120 	boolean_t force;
3121 	int64_t unused_refres_delta; /* change in unconsumed refreservation */
3122 };
3123 
3124 /* ARGSUSED */
3125 static int
3126 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3127 {
3128 	struct cloneswaparg *csa = arg1;
3129 
3130 	/* they should both be heads */
3131 	if (dsl_dataset_is_snapshot(csa->cds) ||
3132 	    dsl_dataset_is_snapshot(csa->ohds))
3133 		return (EINVAL);
3134 
3135 	/* the branch point should be just before them */
3136 	if (csa->cds->ds_prev != csa->ohds->ds_prev)
3137 		return (EINVAL);
3138 
3139 	/* cds should be the clone (unless they are unrelated) */
3140 	if (csa->cds->ds_prev != NULL &&
3141 	    csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3142 	    csa->ohds->ds_object !=
3143 	    csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3144 		return (EINVAL);
3145 
3146 	/* the clone should be a child of the origin */
3147 	if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3148 		return (EINVAL);
3149 
3150 	/* ohds shouldn't be modified unless 'force' */
3151 	if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3152 		return (ETXTBSY);
3153 
3154 	/* adjust amount of any unconsumed refreservation */
3155 	csa->unused_refres_delta =
3156 	    (int64_t)MIN(csa->ohds->ds_reserved,
3157 	    csa->ohds->ds_phys->ds_unique_bytes) -
3158 	    (int64_t)MIN(csa->ohds->ds_reserved,
3159 	    csa->cds->ds_phys->ds_unique_bytes);
3160 
3161 	if (csa->unused_refres_delta > 0 &&
3162 	    csa->unused_refres_delta >
3163 	    dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3164 		return (ENOSPC);
3165 
3166 	if (csa->ohds->ds_quota != 0 &&
3167 	    csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3168 		return (EDQUOT);
3169 
3170 	return (0);
3171 }
3172 
3173 /* ARGSUSED */
3174 static void
3175 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3176 {
3177 	struct cloneswaparg *csa = arg1;
3178 	dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3179 
3180 	ASSERT(csa->cds->ds_reserved == 0);
3181 	ASSERT(csa->ohds->ds_quota == 0 ||
3182 	    csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3183 
3184 	dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3185 	dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3186 
3187 	if (csa->cds->ds_objset != NULL) {
3188 		dmu_objset_evict(csa->cds->ds_objset);
3189 		csa->cds->ds_objset = NULL;
3190 	}
3191 
3192 	if (csa->ohds->ds_objset != NULL) {
3193 		dmu_objset_evict(csa->ohds->ds_objset);
3194 		csa->ohds->ds_objset = NULL;
3195 	}
3196 
3197 	/*
3198 	 * Reset origin's unique bytes, if it exists.
3199 	 */
3200 	if (csa->cds->ds_prev) {
3201 		dsl_dataset_t *origin = csa->cds->ds_prev;
3202 		uint64_t comp, uncomp;
3203 
3204 		dmu_buf_will_dirty(origin->ds_dbuf, tx);
3205 		dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3206 		    origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3207 		    &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3208 	}
3209 
3210 	/* swap blkptrs */
3211 	{
3212 		blkptr_t tmp;
3213 		tmp = csa->ohds->ds_phys->ds_bp;
3214 		csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3215 		csa->cds->ds_phys->ds_bp = tmp;
3216 	}
3217 
3218 	/* set dd_*_bytes */
3219 	{
3220 		int64_t dused, dcomp, duncomp;
3221 		uint64_t cdl_used, cdl_comp, cdl_uncomp;
3222 		uint64_t odl_used, odl_comp, odl_uncomp;
3223 
3224 		ASSERT3U(csa->cds->ds_dir->dd_phys->
3225 		    dd_used_breakdown[DD_USED_SNAP], ==, 0);
3226 
3227 		dsl_deadlist_space(&csa->cds->ds_deadlist,
3228 		    &cdl_used, &cdl_comp, &cdl_uncomp);
3229 		dsl_deadlist_space(&csa->ohds->ds_deadlist,
3230 		    &odl_used, &odl_comp, &odl_uncomp);
3231 
3232 		dused = csa->cds->ds_phys->ds_referenced_bytes + cdl_used -
3233 		    (csa->ohds->ds_phys->ds_referenced_bytes + odl_used);
3234 		dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3235 		    (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3236 		duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3237 		    cdl_uncomp -
3238 		    (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3239 
3240 		dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3241 		    dused, dcomp, duncomp, tx);
3242 		dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3243 		    -dused, -dcomp, -duncomp, tx);
3244 
3245 		/*
3246 		 * The difference in the space used by snapshots is the
3247 		 * difference in snapshot space due to the head's
3248 		 * deadlist (since that's the only thing that's
3249 		 * changing that affects the snapused).
3250 		 */
3251 		dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3252 		    csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3253 		    &cdl_used, &cdl_comp, &cdl_uncomp);
3254 		dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3255 		    csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3256 		    &odl_used, &odl_comp, &odl_uncomp);
3257 		dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3258 		    DD_USED_HEAD, DD_USED_SNAP, tx);
3259 	}
3260 
3261 	/* swap ds_*_bytes */
3262 	SWITCH64(csa->ohds->ds_phys->ds_referenced_bytes,
3263 	    csa->cds->ds_phys->ds_referenced_bytes);
3264 	SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3265 	    csa->cds->ds_phys->ds_compressed_bytes);
3266 	SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3267 	    csa->cds->ds_phys->ds_uncompressed_bytes);
3268 	SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3269 	    csa->cds->ds_phys->ds_unique_bytes);
3270 
3271 	/* apply any parent delta for change in unconsumed refreservation */
3272 	dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3273 	    csa->unused_refres_delta, 0, 0, tx);
3274 
3275 	/*
3276 	 * Swap deadlists.
3277 	 */
3278 	dsl_deadlist_close(&csa->cds->ds_deadlist);
3279 	dsl_deadlist_close(&csa->ohds->ds_deadlist);
3280 	SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3281 	    csa->cds->ds_phys->ds_deadlist_obj);
3282 	dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3283 	    csa->cds->ds_phys->ds_deadlist_obj);
3284 	dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3285 	    csa->ohds->ds_phys->ds_deadlist_obj);
3286 
3287 	dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3288 
3289 	spa_history_log_internal_ds(csa->cds, "clone swap", tx,
3290 	    "parent=%s", csa->ohds->ds_dir->dd_myname);
3291 }
3292 
3293 /*
3294  * Swap 'clone' with its origin head datasets.  Used at the end of "zfs
3295  * recv" into an existing fs to swizzle the file system to the new
3296  * version, and by "zfs rollback".  Can also be used to swap two
3297  * independent head datasets if neither has any snapshots.
3298  */
3299 int
3300 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3301     boolean_t force)
3302 {
3303 	struct cloneswaparg csa;
3304 	int error;
3305 
3306 	ASSERT(clone->ds_owner);
3307 	ASSERT(origin_head->ds_owner);
3308 retry:
3309 	/*
3310 	 * Need exclusive access for the swap. If we're swapping these
3311 	 * datasets back after an error, we already hold the locks.
3312 	 */
3313 	if (!RW_WRITE_HELD(&clone->ds_rwlock))
3314 		rw_enter(&clone->ds_rwlock, RW_WRITER);
3315 	if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3316 	    !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3317 		rw_exit(&clone->ds_rwlock);
3318 		rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3319 		if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3320 			rw_exit(&origin_head->ds_rwlock);
3321 			goto retry;
3322 		}
3323 	}
3324 	csa.cds = clone;
3325 	csa.ohds = origin_head;
3326 	csa.force = force;
3327 	error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3328 	    dsl_dataset_clone_swap_check,
3329 	    dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3330 	return (error);
3331 }
3332 
3333 /*
3334  * Given a pool name and a dataset object number in that pool,
3335  * return the name of that dataset.
3336  */
3337 int
3338 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3339 {
3340 	spa_t *spa;
3341 	dsl_pool_t *dp;
3342 	dsl_dataset_t *ds;
3343 	int error;
3344 
3345 	if ((error = spa_open(pname, &spa, FTAG)) != 0)
3346 		return (error);
3347 	dp = spa_get_dsl(spa);
3348 	rw_enter(&dp->dp_config_rwlock, RW_READER);
3349 	if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3350 		dsl_dataset_name(ds, buf);
3351 		dsl_dataset_rele(ds, FTAG);
3352 	}
3353 	rw_exit(&dp->dp_config_rwlock);
3354 	spa_close(spa, FTAG);
3355 
3356 	return (error);
3357 }
3358 
3359 int
3360 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3361     uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3362 {
3363 	int error = 0;
3364 
3365 	ASSERT3S(asize, >, 0);
3366 
3367 	/*
3368 	 * *ref_rsrv is the portion of asize that will come from any
3369 	 * unconsumed refreservation space.
3370 	 */
3371 	*ref_rsrv = 0;
3372 
3373 	mutex_enter(&ds->ds_lock);
3374 	/*
3375 	 * Make a space adjustment for reserved bytes.
3376 	 */
3377 	if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3378 		ASSERT3U(*used, >=,
3379 		    ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3380 		*used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3381 		*ref_rsrv =
3382 		    asize - MIN(asize, parent_delta(ds, asize + inflight));
3383 	}
3384 
3385 	if (!check_quota || ds->ds_quota == 0) {
3386 		mutex_exit(&ds->ds_lock);
3387 		return (0);
3388 	}
3389 	/*
3390 	 * If they are requesting more space, and our current estimate
3391 	 * is over quota, they get to try again unless the actual
3392 	 * on-disk is over quota and there are no pending changes (which
3393 	 * may free up space for us).
3394 	 */
3395 	if (ds->ds_phys->ds_referenced_bytes + inflight >= ds->ds_quota) {
3396 		if (inflight > 0 ||
3397 		    ds->ds_phys->ds_referenced_bytes < ds->ds_quota)
3398 			error = ERESTART;
3399 		else
3400 			error = EDQUOT;
3401 	}
3402 	mutex_exit(&ds->ds_lock);
3403 
3404 	return (error);
3405 }
3406 
3407 /* ARGSUSED */
3408 static int
3409 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3410 {
3411 	dsl_dataset_t *ds = arg1;
3412 	dsl_prop_setarg_t *psa = arg2;
3413 	int err;
3414 
3415 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3416 		return (ENOTSUP);
3417 
3418 	if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3419 		return (err);
3420 
3421 	if (psa->psa_effective_value == 0)
3422 		return (0);
3423 
3424 	if (psa->psa_effective_value < ds->ds_phys->ds_referenced_bytes ||
3425 	    psa->psa_effective_value < ds->ds_reserved)
3426 		return (ENOSPC);
3427 
3428 	return (0);
3429 }
3430 
3431 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3432 
3433 void
3434 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3435 {
3436 	dsl_dataset_t *ds = arg1;
3437 	dsl_prop_setarg_t *psa = arg2;
3438 	uint64_t effective_value = psa->psa_effective_value;
3439 
3440 	dsl_prop_set_sync(ds, psa, tx);
3441 	DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3442 
3443 	if (ds->ds_quota != effective_value) {
3444 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
3445 		ds->ds_quota = effective_value;
3446 	}
3447 }
3448 
3449 int
3450 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3451 {
3452 	dsl_dataset_t *ds;
3453 	dsl_prop_setarg_t psa;
3454 	int err;
3455 
3456 	dsl_prop_setarg_init_uint64(&psa, "refquota", source, &quota);
3457 
3458 	err = dsl_dataset_hold(dsname, FTAG, &ds);
3459 	if (err)
3460 		return (err);
3461 
3462 	/*
3463 	 * If someone removes a file, then tries to set the quota, we
3464 	 * want to make sure the file freeing takes effect.
3465 	 */
3466 	txg_wait_open(ds->ds_dir->dd_pool, 0);
3467 
3468 	err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3469 	    dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3470 	    ds, &psa, 0);
3471 
3472 	dsl_dataset_rele(ds, FTAG);
3473 	return (err);
3474 }
3475 
3476 static int
3477 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3478 {
3479 	dsl_dataset_t *ds = arg1;
3480 	dsl_prop_setarg_t *psa = arg2;
3481 	uint64_t effective_value;
3482 	uint64_t unique;
3483 	int err;
3484 
3485 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3486 	    SPA_VERSION_REFRESERVATION)
3487 		return (ENOTSUP);
3488 
3489 	if (dsl_dataset_is_snapshot(ds))
3490 		return (EINVAL);
3491 
3492 	if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3493 		return (err);
3494 
3495 	effective_value = psa->psa_effective_value;
3496 
3497 	/*
3498 	 * If we are doing the preliminary check in open context, the
3499 	 * space estimates may be inaccurate.
3500 	 */
3501 	if (!dmu_tx_is_syncing(tx))
3502 		return (0);
3503 
3504 	mutex_enter(&ds->ds_lock);
3505 	if (!DS_UNIQUE_IS_ACCURATE(ds))
3506 		dsl_dataset_recalc_head_uniq(ds);
3507 	unique = ds->ds_phys->ds_unique_bytes;
3508 	mutex_exit(&ds->ds_lock);
3509 
3510 	if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3511 		uint64_t delta = MAX(unique, effective_value) -
3512 		    MAX(unique, ds->ds_reserved);
3513 
3514 		if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3515 			return (ENOSPC);
3516 		if (ds->ds_quota > 0 &&
3517 		    effective_value > ds->ds_quota)
3518 			return (ENOSPC);
3519 	}
3520 
3521 	return (0);
3522 }
3523 
3524 static void
3525 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3526 {
3527 	dsl_dataset_t *ds = arg1;
3528 	dsl_prop_setarg_t *psa = arg2;
3529 	uint64_t effective_value = psa->psa_effective_value;
3530 	uint64_t unique;
3531 	int64_t delta;
3532 
3533 	dsl_prop_set_sync(ds, psa, tx);
3534 	DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3535 
3536 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
3537 
3538 	mutex_enter(&ds->ds_dir->dd_lock);
3539 	mutex_enter(&ds->ds_lock);
3540 	ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3541 	unique = ds->ds_phys->ds_unique_bytes;
3542 	delta = MAX(0, (int64_t)(effective_value - unique)) -
3543 	    MAX(0, (int64_t)(ds->ds_reserved - unique));
3544 	ds->ds_reserved = effective_value;
3545 	mutex_exit(&ds->ds_lock);
3546 
3547 	dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3548 	mutex_exit(&ds->ds_dir->dd_lock);
3549 }
3550 
3551 int
3552 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3553     uint64_t reservation)
3554 {
3555 	dsl_dataset_t *ds;
3556 	dsl_prop_setarg_t psa;
3557 	int err;
3558 
3559 	dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3560 	    &reservation);
3561 
3562 	err = dsl_dataset_hold(dsname, FTAG, &ds);
3563 	if (err)
3564 		return (err);
3565 
3566 	err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3567 	    dsl_dataset_set_reservation_check,
3568 	    dsl_dataset_set_reservation_sync, ds, &psa, 0);
3569 
3570 	dsl_dataset_rele(ds, FTAG);
3571 	return (err);
3572 }
3573 
3574 typedef struct zfs_hold_cleanup_arg {
3575 	dsl_pool_t *dp;
3576 	uint64_t dsobj;
3577 	char htag[MAXNAMELEN];
3578 } zfs_hold_cleanup_arg_t;
3579 
3580 static void
3581 dsl_dataset_user_release_onexit(void *arg)
3582 {
3583 	zfs_hold_cleanup_arg_t *ca = arg;
3584 
3585 	(void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3586 	    B_TRUE);
3587 	kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3588 }
3589 
3590 void
3591 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3592     minor_t minor)
3593 {
3594 	zfs_hold_cleanup_arg_t *ca;
3595 
3596 	ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3597 	ca->dp = ds->ds_dir->dd_pool;
3598 	ca->dsobj = ds->ds_object;
3599 	(void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3600 	VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3601 	    dsl_dataset_user_release_onexit, ca, NULL));
3602 }
3603 
3604 /*
3605  * If you add new checks here, you may need to add
3606  * additional checks to the "temporary" case in
3607  * snapshot_check() in dmu_objset.c.
3608  */
3609 static int
3610 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3611 {
3612 	dsl_dataset_t *ds = arg1;
3613 	struct dsl_ds_holdarg *ha = arg2;
3614 	const char *htag = ha->htag;
3615 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3616 	int error = 0;
3617 
3618 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3619 		return (ENOTSUP);
3620 
3621 	if (!dsl_dataset_is_snapshot(ds))
3622 		return (EINVAL);
3623 
3624 	/* tags must be unique */
3625 	mutex_enter(&ds->ds_lock);
3626 	if (ds->ds_phys->ds_userrefs_obj) {
3627 		error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3628 		    8, 1, tx);
3629 		if (error == 0)
3630 			error = EEXIST;
3631 		else if (error == ENOENT)
3632 			error = 0;
3633 	}
3634 	mutex_exit(&ds->ds_lock);
3635 
3636 	if (error == 0 && ha->temphold &&
3637 	    strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3638 		error = E2BIG;
3639 
3640 	return (error);
3641 }
3642 
3643 void
3644 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3645 {
3646 	dsl_dataset_t *ds = arg1;
3647 	struct dsl_ds_holdarg *ha = arg2;
3648 	const char *htag = ha->htag;
3649 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
3650 	objset_t *mos = dp->dp_meta_objset;
3651 	uint64_t now = gethrestime_sec();
3652 	uint64_t zapobj;
3653 
3654 	mutex_enter(&ds->ds_lock);
3655 	if (ds->ds_phys->ds_userrefs_obj == 0) {
3656 		/*
3657 		 * This is the first user hold for this dataset.  Create
3658 		 * the userrefs zap object.
3659 		 */
3660 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
3661 		zapobj = ds->ds_phys->ds_userrefs_obj =
3662 		    zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3663 	} else {
3664 		zapobj = ds->ds_phys->ds_userrefs_obj;
3665 	}
3666 	ds->ds_userrefs++;
3667 	mutex_exit(&ds->ds_lock);
3668 
3669 	VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3670 
3671 	if (ha->temphold) {
3672 		VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3673 		    htag, &now, tx));
3674 	}
3675 
3676 	spa_history_log_internal_ds(ds, "hold", tx,
3677 	    "tag = %s temp = %d holds now = %llu",
3678 	    htag, (int)ha->temphold, ds->ds_userrefs);
3679 }
3680 
3681 static int
3682 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3683 {
3684 	struct dsl_ds_holdarg *ha = arg;
3685 	dsl_dataset_t *ds;
3686 	int error;
3687 	char *name;
3688 
3689 	/* alloc a buffer to hold dsname@snapname plus terminating NULL */
3690 	name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3691 	error = dsl_dataset_hold(name, ha->dstg, &ds);
3692 	strfree(name);
3693 	if (error == 0) {
3694 		ha->gotone = B_TRUE;
3695 		dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3696 		    dsl_dataset_user_hold_sync, ds, ha, 0);
3697 	} else if (error == ENOENT && ha->recursive) {
3698 		error = 0;
3699 	} else {
3700 		(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3701 	}
3702 	return (error);
3703 }
3704 
3705 int
3706 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3707     boolean_t temphold)
3708 {
3709 	struct dsl_ds_holdarg *ha;
3710 	int error;
3711 
3712 	ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3713 	ha->htag = htag;
3714 	ha->temphold = temphold;
3715 	error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3716 	    dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3717 	    ds, ha, 0);
3718 	kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3719 
3720 	return (error);
3721 }
3722 
3723 int
3724 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3725     boolean_t recursive, boolean_t temphold, int cleanup_fd)
3726 {
3727 	struct dsl_ds_holdarg *ha;
3728 	dsl_sync_task_t *dst;
3729 	spa_t *spa;
3730 	int error;
3731 	minor_t minor = 0;
3732 
3733 	if (cleanup_fd != -1) {
3734 		/* Currently we only support cleanup-on-exit of tempholds. */
3735 		if (!temphold)
3736 			return (EINVAL);
3737 		error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3738 		if (error)
3739 			return (error);
3740 	}
3741 
3742 	ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3743 
3744 	(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3745 
3746 	error = spa_open(dsname, &spa, FTAG);
3747 	if (error) {
3748 		kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3749 		if (cleanup_fd != -1)
3750 			zfs_onexit_fd_rele(cleanup_fd);
3751 		return (error);
3752 	}
3753 
3754 	ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3755 	ha->htag = htag;
3756 	ha->snapname = snapname;
3757 	ha->recursive = recursive;
3758 	ha->temphold = temphold;
3759 
3760 	if (recursive) {
3761 		error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3762 		    ha, DS_FIND_CHILDREN);
3763 	} else {
3764 		error = dsl_dataset_user_hold_one(dsname, ha);
3765 	}
3766 	if (error == 0)
3767 		error = dsl_sync_task_group_wait(ha->dstg);
3768 
3769 	for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3770 	    dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3771 		dsl_dataset_t *ds = dst->dst_arg1;
3772 
3773 		if (dst->dst_err) {
3774 			dsl_dataset_name(ds, ha->failed);
3775 			*strchr(ha->failed, '@') = '\0';
3776 		} else if (error == 0 && minor != 0 && temphold) {
3777 			/*
3778 			 * If this hold is to be released upon process exit,
3779 			 * register that action now.
3780 			 */
3781 			dsl_register_onexit_hold_cleanup(ds, htag, minor);
3782 		}
3783 		dsl_dataset_rele(ds, ha->dstg);
3784 	}
3785 
3786 	if (error == 0 && recursive && !ha->gotone)
3787 		error = ENOENT;
3788 
3789 	if (error)
3790 		(void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3791 
3792 	dsl_sync_task_group_destroy(ha->dstg);
3793 
3794 	kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3795 	spa_close(spa, FTAG);
3796 	if (cleanup_fd != -1)
3797 		zfs_onexit_fd_rele(cleanup_fd);
3798 	return (error);
3799 }
3800 
3801 struct dsl_ds_releasearg {
3802 	dsl_dataset_t *ds;
3803 	const char *htag;
3804 	boolean_t own;		/* do we own or just hold ds? */
3805 };
3806 
3807 static int
3808 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3809     boolean_t *might_destroy)
3810 {
3811 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3812 	uint64_t zapobj;
3813 	uint64_t tmp;
3814 	int error;
3815 
3816 	*might_destroy = B_FALSE;
3817 
3818 	mutex_enter(&ds->ds_lock);
3819 	zapobj = ds->ds_phys->ds_userrefs_obj;
3820 	if (zapobj == 0) {
3821 		/* The tag can't possibly exist */
3822 		mutex_exit(&ds->ds_lock);
3823 		return (ESRCH);
3824 	}
3825 
3826 	/* Make sure the tag exists */
3827 	error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3828 	if (error) {
3829 		mutex_exit(&ds->ds_lock);
3830 		if (error == ENOENT)
3831 			error = ESRCH;
3832 		return (error);
3833 	}
3834 
3835 	if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3836 	    DS_IS_DEFER_DESTROY(ds))
3837 		*might_destroy = B_TRUE;
3838 
3839 	mutex_exit(&ds->ds_lock);
3840 	return (0);
3841 }
3842 
3843 static int
3844 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3845 {
3846 	struct dsl_ds_releasearg *ra = arg1;
3847 	dsl_dataset_t *ds = ra->ds;
3848 	boolean_t might_destroy;
3849 	int error;
3850 
3851 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3852 		return (ENOTSUP);
3853 
3854 	error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3855 	if (error)
3856 		return (error);
3857 
3858 	if (might_destroy) {
3859 		struct dsl_ds_destroyarg dsda = {0};
3860 
3861 		if (dmu_tx_is_syncing(tx)) {
3862 			/*
3863 			 * If we're not prepared to remove the snapshot,
3864 			 * we can't allow the release to happen right now.
3865 			 */
3866 			if (!ra->own)
3867 				return (EBUSY);
3868 		}
3869 		dsda.ds = ds;
3870 		dsda.releasing = B_TRUE;
3871 		return (dsl_dataset_destroy_check(&dsda, tag, tx));
3872 	}
3873 
3874 	return (0);
3875 }
3876 
3877 static void
3878 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3879 {
3880 	struct dsl_ds_releasearg *ra = arg1;
3881 	dsl_dataset_t *ds = ra->ds;
3882 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
3883 	objset_t *mos = dp->dp_meta_objset;
3884 	uint64_t zapobj;
3885 	uint64_t refs;
3886 	int error;
3887 
3888 	mutex_enter(&ds->ds_lock);
3889 	ds->ds_userrefs--;
3890 	refs = ds->ds_userrefs;
3891 	mutex_exit(&ds->ds_lock);
3892 	error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3893 	VERIFY(error == 0 || error == ENOENT);
3894 	zapobj = ds->ds_phys->ds_userrefs_obj;
3895 	VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3896 
3897 	spa_history_log_internal_ds(ds, "release", tx,
3898 	    "tag = %s refs now = %lld", ra->htag, (longlong_t)refs);
3899 
3900 	if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3901 	    DS_IS_DEFER_DESTROY(ds)) {
3902 		struct dsl_ds_destroyarg dsda = {0};
3903 
3904 		ASSERT(ra->own);
3905 		dsda.ds = ds;
3906 		dsda.releasing = B_TRUE;
3907 		/* We already did the destroy_check */
3908 		dsl_dataset_destroy_sync(&dsda, tag, tx);
3909 	}
3910 }
3911 
3912 static int
3913 dsl_dataset_user_release_one(const char *dsname, void *arg)
3914 {
3915 	struct dsl_ds_holdarg *ha = arg;
3916 	struct dsl_ds_releasearg *ra;
3917 	dsl_dataset_t *ds;
3918 	int error;
3919 	void *dtag = ha->dstg;
3920 	char *name;
3921 	boolean_t own = B_FALSE;
3922 	boolean_t might_destroy;
3923 
3924 	/* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3925 	name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3926 	error = dsl_dataset_hold(name, dtag, &ds);
3927 	strfree(name);
3928 	if (error == ENOENT && ha->recursive)
3929 		return (0);
3930 	(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3931 	if (error)
3932 		return (error);
3933 
3934 	ha->gotone = B_TRUE;
3935 
3936 	ASSERT(dsl_dataset_is_snapshot(ds));
3937 
3938 	error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3939 	if (error) {
3940 		dsl_dataset_rele(ds, dtag);
3941 		return (error);
3942 	}
3943 
3944 	if (might_destroy) {
3945 #ifdef _KERNEL
3946 		name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3947 		error = zfs_unmount_snap(name, NULL);
3948 		strfree(name);
3949 		if (error) {
3950 			dsl_dataset_rele(ds, dtag);
3951 			return (error);
3952 		}
3953 #endif
3954 		if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3955 			dsl_dataset_rele(ds, dtag);
3956 			return (EBUSY);
3957 		} else {
3958 			own = B_TRUE;
3959 			dsl_dataset_make_exclusive(ds, dtag);
3960 		}
3961 	}
3962 
3963 	ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3964 	ra->ds = ds;
3965 	ra->htag = ha->htag;
3966 	ra->own = own;
3967 	dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3968 	    dsl_dataset_user_release_sync, ra, dtag, 0);
3969 
3970 	return (0);
3971 }
3972 
3973 int
3974 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3975     boolean_t recursive)
3976 {
3977 	struct dsl_ds_holdarg *ha;
3978 	dsl_sync_task_t *dst;
3979 	spa_t *spa;
3980 	int error;
3981 
3982 top:
3983 	ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3984 
3985 	(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3986 
3987 	error = spa_open(dsname, &spa, FTAG);
3988 	if (error) {
3989 		kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3990 		return (error);
3991 	}
3992 
3993 	ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3994 	ha->htag = htag;
3995 	ha->snapname = snapname;
3996 	ha->recursive = recursive;
3997 	if (recursive) {
3998 		error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3999 		    ha, DS_FIND_CHILDREN);
4000 	} else {
4001 		error = dsl_dataset_user_release_one(dsname, ha);
4002 	}
4003 	if (error == 0)
4004 		error = dsl_sync_task_group_wait(ha->dstg);
4005 
4006 	for (dst = list_head(&ha->dstg->dstg_tasks); dst;
4007 	    dst = list_next(&ha->dstg->dstg_tasks, dst)) {
4008 		struct dsl_ds_releasearg *ra = dst->dst_arg1;
4009 		dsl_dataset_t *ds = ra->ds;
4010 
4011 		if (dst->dst_err)
4012 			dsl_dataset_name(ds, ha->failed);
4013 
4014 		if (ra->own)
4015 			dsl_dataset_disown(ds, ha->dstg);
4016 		else
4017 			dsl_dataset_rele(ds, ha->dstg);
4018 
4019 		kmem_free(ra, sizeof (struct dsl_ds_releasearg));
4020 	}
4021 
4022 	if (error == 0 && recursive && !ha->gotone)
4023 		error = ENOENT;
4024 
4025 	if (error && error != EBUSY)
4026 		(void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
4027 
4028 	dsl_sync_task_group_destroy(ha->dstg);
4029 	kmem_free(ha, sizeof (struct dsl_ds_holdarg));
4030 	spa_close(spa, FTAG);
4031 
4032 	/*
4033 	 * We can get EBUSY if we were racing with deferred destroy and
4034 	 * dsl_dataset_user_release_check() hadn't done the necessary
4035 	 * open context setup.  We can also get EBUSY if we're racing
4036 	 * with destroy and that thread is the ds_owner.  Either way
4037 	 * the busy condition should be transient, and we should retry
4038 	 * the release operation.
4039 	 */
4040 	if (error == EBUSY)
4041 		goto top;
4042 
4043 	return (error);
4044 }
4045 
4046 /*
4047  * Called at spa_load time (with retry == B_FALSE) to release a stale
4048  * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
4049  */
4050 int
4051 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
4052     boolean_t retry)
4053 {
4054 	dsl_dataset_t *ds;
4055 	char *snap;
4056 	char *name;
4057 	int namelen;
4058 	int error;
4059 
4060 	do {
4061 		rw_enter(&dp->dp_config_rwlock, RW_READER);
4062 		error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
4063 		rw_exit(&dp->dp_config_rwlock);
4064 		if (error)
4065 			return (error);
4066 		namelen = dsl_dataset_namelen(ds)+1;
4067 		name = kmem_alloc(namelen, KM_SLEEP);
4068 		dsl_dataset_name(ds, name);
4069 		dsl_dataset_rele(ds, FTAG);
4070 
4071 		snap = strchr(name, '@');
4072 		*snap = '\0';
4073 		++snap;
4074 		error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
4075 		kmem_free(name, namelen);
4076 
4077 		/*
4078 		 * The object can't have been destroyed because we have a hold,
4079 		 * but it might have been renamed, resulting in ENOENT.  Retry
4080 		 * if we've been requested to do so.
4081 		 *
4082 		 * It would be nice if we could use the dsobj all the way
4083 		 * through and avoid ENOENT entirely.  But we might need to
4084 		 * unmount the snapshot, and there's currently no way to lookup
4085 		 * a vfsp using a ZFS object id.
4086 		 */
4087 	} while ((error == ENOENT) && retry);
4088 
4089 	return (error);
4090 }
4091 
4092 int
4093 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
4094 {
4095 	dsl_dataset_t *ds;
4096 	int err;
4097 
4098 	err = dsl_dataset_hold(dsname, FTAG, &ds);
4099 	if (err)
4100 		return (err);
4101 
4102 	VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
4103 	if (ds->ds_phys->ds_userrefs_obj != 0) {
4104 		zap_attribute_t *za;
4105 		zap_cursor_t zc;
4106 
4107 		za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4108 		for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4109 		    ds->ds_phys->ds_userrefs_obj);
4110 		    zap_cursor_retrieve(&zc, za) == 0;
4111 		    zap_cursor_advance(&zc)) {
4112 			VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4113 			    za->za_first_integer));
4114 		}
4115 		zap_cursor_fini(&zc);
4116 		kmem_free(za, sizeof (zap_attribute_t));
4117 	}
4118 	dsl_dataset_rele(ds, FTAG);
4119 	return (0);
4120 }
4121 
4122 /*
4123  * Note, this function is used as the callback for dmu_objset_find().  We
4124  * always return 0 so that we will continue to find and process
4125  * inconsistent datasets, even if we encounter an error trying to
4126  * process one of them.
4127  */
4128 /* ARGSUSED */
4129 int
4130 dsl_destroy_inconsistent(const char *dsname, void *arg)
4131 {
4132 	dsl_dataset_t *ds;
4133 
4134 	if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4135 		if (DS_IS_INCONSISTENT(ds))
4136 			(void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4137 		else
4138 			dsl_dataset_disown(ds, FTAG);
4139 	}
4140 	return (0);
4141 }
4142 
4143 /*
4144  * Return (in *usedp) the amount of space written in new that is not
4145  * present in oldsnap.  New may be a snapshot or the head.  Old must be
4146  * a snapshot before new, in new's filesystem (or its origin).  If not then
4147  * fail and return EINVAL.
4148  *
4149  * The written space is calculated by considering two components:  First, we
4150  * ignore any freed space, and calculate the written as new's used space
4151  * minus old's used space.  Next, we add in the amount of space that was freed
4152  * between the two snapshots, thus reducing new's used space relative to old's.
4153  * Specifically, this is the space that was born before old->ds_creation_txg,
4154  * and freed before new (ie. on new's deadlist or a previous deadlist).
4155  *
4156  * space freed                         [---------------------]
4157  * snapshots                       ---O-------O--------O-------O------
4158  *                                         oldsnap            new
4159  */
4160 int
4161 dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
4162     uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4163 {
4164 	int err = 0;
4165 	uint64_t snapobj;
4166 	dsl_pool_t *dp = new->ds_dir->dd_pool;
4167 
4168 	*usedp = 0;
4169 	*usedp += new->ds_phys->ds_referenced_bytes;
4170 	*usedp -= oldsnap->ds_phys->ds_referenced_bytes;
4171 
4172 	*compp = 0;
4173 	*compp += new->ds_phys->ds_compressed_bytes;
4174 	*compp -= oldsnap->ds_phys->ds_compressed_bytes;
4175 
4176 	*uncompp = 0;
4177 	*uncompp += new->ds_phys->ds_uncompressed_bytes;
4178 	*uncompp -= oldsnap->ds_phys->ds_uncompressed_bytes;
4179 
4180 	rw_enter(&dp->dp_config_rwlock, RW_READER);
4181 	snapobj = new->ds_object;
4182 	while (snapobj != oldsnap->ds_object) {
4183 		dsl_dataset_t *snap;
4184 		uint64_t used, comp, uncomp;
4185 
4186 		if (snapobj == new->ds_object) {
4187 			snap = new;
4188 		} else {
4189 			err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
4190 			if (err != 0)
4191 				break;
4192 		}
4193 
4194 		if (snap->ds_phys->ds_prev_snap_txg ==
4195 		    oldsnap->ds_phys->ds_creation_txg) {
4196 			/*
4197 			 * The blocks in the deadlist can not be born after
4198 			 * ds_prev_snap_txg, so get the whole deadlist space,
4199 			 * which is more efficient (especially for old-format
4200 			 * deadlists).  Unfortunately the deadlist code
4201 			 * doesn't have enough information to make this
4202 			 * optimization itself.
4203 			 */
4204 			dsl_deadlist_space(&snap->ds_deadlist,
4205 			    &used, &comp, &uncomp);
4206 		} else {
4207 			dsl_deadlist_space_range(&snap->ds_deadlist,
4208 			    0, oldsnap->ds_phys->ds_creation_txg,
4209 			    &used, &comp, &uncomp);
4210 		}
4211 		*usedp += used;
4212 		*compp += comp;
4213 		*uncompp += uncomp;
4214 
4215 		/*
4216 		 * If we get to the beginning of the chain of snapshots
4217 		 * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
4218 		 * was not a snapshot of/before new.
4219 		 */
4220 		snapobj = snap->ds_phys->ds_prev_snap_obj;
4221 		if (snap != new)
4222 			dsl_dataset_rele(snap, FTAG);
4223 		if (snapobj == 0) {
4224 			err = EINVAL;
4225 			break;
4226 		}
4227 
4228 	}
4229 	rw_exit(&dp->dp_config_rwlock);
4230 	return (err);
4231 }
4232 
4233 /*
4234  * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4235  * lastsnap, and all snapshots in between are deleted.
4236  *
4237  * blocks that would be freed            [---------------------------]
4238  * snapshots                       ---O-------O--------O-------O--------O
4239  *                                        firstsnap        lastsnap
4240  *
4241  * This is the set of blocks that were born after the snap before firstsnap,
4242  * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4243  * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4244  * We calculate this by iterating over the relevant deadlists (from the snap
4245  * after lastsnap, backward to the snap after firstsnap), summing up the
4246  * space on the deadlist that was born after the snap before firstsnap.
4247  */
4248 int
4249 dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
4250     dsl_dataset_t *lastsnap,
4251     uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4252 {
4253 	int err = 0;
4254 	uint64_t snapobj;
4255 	dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
4256 
4257 	ASSERT(dsl_dataset_is_snapshot(firstsnap));
4258 	ASSERT(dsl_dataset_is_snapshot(lastsnap));
4259 
4260 	/*
4261 	 * Check that the snapshots are in the same dsl_dir, and firstsnap
4262 	 * is before lastsnap.
4263 	 */
4264 	if (firstsnap->ds_dir != lastsnap->ds_dir ||
4265 	    firstsnap->ds_phys->ds_creation_txg >
4266 	    lastsnap->ds_phys->ds_creation_txg)
4267 		return (EINVAL);
4268 
4269 	*usedp = *compp = *uncompp = 0;
4270 
4271 	rw_enter(&dp->dp_config_rwlock, RW_READER);
4272 	snapobj = lastsnap->ds_phys->ds_next_snap_obj;
4273 	while (snapobj != firstsnap->ds_object) {
4274 		dsl_dataset_t *ds;
4275 		uint64_t used, comp, uncomp;
4276 
4277 		err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
4278 		if (err != 0)
4279 			break;
4280 
4281 		dsl_deadlist_space_range(&ds->ds_deadlist,
4282 		    firstsnap->ds_phys->ds_prev_snap_txg, UINT64_MAX,
4283 		    &used, &comp, &uncomp);
4284 		*usedp += used;
4285 		*compp += comp;
4286 		*uncompp += uncomp;
4287 
4288 		snapobj = ds->ds_phys->ds_prev_snap_obj;
4289 		ASSERT3U(snapobj, !=, 0);
4290 		dsl_dataset_rele(ds, FTAG);
4291 	}
4292 	rw_exit(&dp->dp_config_rwlock);
4293 	return (err);
4294 }
4295