xref: /illumos-gate/usr/src/uts/common/fs/zfs/dsl_dir.c (revision fb09f5aad449c97fe309678f3f604982b563a96f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012 by Delphix. All rights reserved.
24  */
25 
26 #include <sys/dmu.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_tx.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_prop.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dsl_deleg.h>
34 #include <sys/spa.h>
35 #include <sys/metaslab.h>
36 #include <sys/zap.h>
37 #include <sys/zio.h>
38 #include <sys/arc.h>
39 #include <sys/sunddi.h>
40 #include "zfs_namecheck.h"
41 
42 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
43 static void dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd,
44     uint64_t value, dmu_tx_t *tx);
45 
46 /* ARGSUSED */
47 static void
48 dsl_dir_evict(dmu_buf_t *db, void *arg)
49 {
50 	dsl_dir_t *dd = arg;
51 	dsl_pool_t *dp = dd->dd_pool;
52 	int t;
53 
54 	for (t = 0; t < TXG_SIZE; t++) {
55 		ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
56 		ASSERT(dd->dd_tempreserved[t] == 0);
57 		ASSERT(dd->dd_space_towrite[t] == 0);
58 	}
59 
60 	if (dd->dd_parent)
61 		dsl_dir_close(dd->dd_parent, dd);
62 
63 	spa_close(dd->dd_pool->dp_spa, dd);
64 
65 	/*
66 	 * The props callback list should have been cleaned up by
67 	 * objset_evict().
68 	 */
69 	list_destroy(&dd->dd_prop_cbs);
70 	mutex_destroy(&dd->dd_lock);
71 	kmem_free(dd, sizeof (dsl_dir_t));
72 }
73 
74 int
75 dsl_dir_open_obj(dsl_pool_t *dp, uint64_t ddobj,
76     const char *tail, void *tag, dsl_dir_t **ddp)
77 {
78 	dmu_buf_t *dbuf;
79 	dsl_dir_t *dd;
80 	int err;
81 
82 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
83 	    dsl_pool_sync_context(dp));
84 
85 	err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
86 	if (err)
87 		return (err);
88 	dd = dmu_buf_get_user(dbuf);
89 #ifdef ZFS_DEBUG
90 	{
91 		dmu_object_info_t doi;
92 		dmu_object_info_from_db(dbuf, &doi);
93 		ASSERT3U(doi.doi_type, ==, DMU_OT_DSL_DIR);
94 		ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
95 	}
96 #endif
97 	if (dd == NULL) {
98 		dsl_dir_t *winner;
99 
100 		dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
101 		dd->dd_object = ddobj;
102 		dd->dd_dbuf = dbuf;
103 		dd->dd_pool = dp;
104 		dd->dd_phys = dbuf->db_data;
105 		mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
106 
107 		list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t),
108 		    offsetof(dsl_prop_cb_record_t, cbr_node));
109 
110 		dsl_dir_snap_cmtime_update(dd);
111 
112 		if (dd->dd_phys->dd_parent_obj) {
113 			err = dsl_dir_open_obj(dp, dd->dd_phys->dd_parent_obj,
114 			    NULL, dd, &dd->dd_parent);
115 			if (err)
116 				goto errout;
117 			if (tail) {
118 #ifdef ZFS_DEBUG
119 				uint64_t foundobj;
120 
121 				err = zap_lookup(dp->dp_meta_objset,
122 				    dd->dd_parent->dd_phys->dd_child_dir_zapobj,
123 				    tail, sizeof (foundobj), 1, &foundobj);
124 				ASSERT(err || foundobj == ddobj);
125 #endif
126 				(void) strcpy(dd->dd_myname, tail);
127 			} else {
128 				err = zap_value_search(dp->dp_meta_objset,
129 				    dd->dd_parent->dd_phys->dd_child_dir_zapobj,
130 				    ddobj, 0, dd->dd_myname);
131 			}
132 			if (err)
133 				goto errout;
134 		} else {
135 			(void) strcpy(dd->dd_myname, spa_name(dp->dp_spa));
136 		}
137 
138 		if (dsl_dir_is_clone(dd)) {
139 			dmu_buf_t *origin_bonus;
140 			dsl_dataset_phys_t *origin_phys;
141 
142 			/*
143 			 * We can't open the origin dataset, because
144 			 * that would require opening this dsl_dir.
145 			 * Just look at its phys directly instead.
146 			 */
147 			err = dmu_bonus_hold(dp->dp_meta_objset,
148 			    dd->dd_phys->dd_origin_obj, FTAG, &origin_bonus);
149 			if (err)
150 				goto errout;
151 			origin_phys = origin_bonus->db_data;
152 			dd->dd_origin_txg =
153 			    origin_phys->ds_creation_txg;
154 			dmu_buf_rele(origin_bonus, FTAG);
155 		}
156 
157 		winner = dmu_buf_set_user_ie(dbuf, dd, &dd->dd_phys,
158 		    dsl_dir_evict);
159 		if (winner) {
160 			if (dd->dd_parent)
161 				dsl_dir_close(dd->dd_parent, dd);
162 			mutex_destroy(&dd->dd_lock);
163 			kmem_free(dd, sizeof (dsl_dir_t));
164 			dd = winner;
165 		} else {
166 			spa_open_ref(dp->dp_spa, dd);
167 		}
168 	}
169 
170 	/*
171 	 * The dsl_dir_t has both open-to-close and instantiate-to-evict
172 	 * holds on the spa.  We need the open-to-close holds because
173 	 * otherwise the spa_refcnt wouldn't change when we open a
174 	 * dir which the spa also has open, so we could incorrectly
175 	 * think it was OK to unload/export/destroy the pool.  We need
176 	 * the instantiate-to-evict hold because the dsl_dir_t has a
177 	 * pointer to the dd_pool, which has a pointer to the spa_t.
178 	 */
179 	spa_open_ref(dp->dp_spa, tag);
180 	ASSERT3P(dd->dd_pool, ==, dp);
181 	ASSERT3U(dd->dd_object, ==, ddobj);
182 	ASSERT3P(dd->dd_dbuf, ==, dbuf);
183 	*ddp = dd;
184 	return (0);
185 
186 errout:
187 	if (dd->dd_parent)
188 		dsl_dir_close(dd->dd_parent, dd);
189 	mutex_destroy(&dd->dd_lock);
190 	kmem_free(dd, sizeof (dsl_dir_t));
191 	dmu_buf_rele(dbuf, tag);
192 	return (err);
193 }
194 
195 void
196 dsl_dir_close(dsl_dir_t *dd, void *tag)
197 {
198 	dprintf_dd(dd, "%s\n", "");
199 	spa_close(dd->dd_pool->dp_spa, tag);
200 	dmu_buf_rele(dd->dd_dbuf, tag);
201 }
202 
203 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */
204 void
205 dsl_dir_name(dsl_dir_t *dd, char *buf)
206 {
207 	if (dd->dd_parent) {
208 		dsl_dir_name(dd->dd_parent, buf);
209 		(void) strcat(buf, "/");
210 	} else {
211 		buf[0] = '\0';
212 	}
213 	if (!MUTEX_HELD(&dd->dd_lock)) {
214 		/*
215 		 * recursive mutex so that we can use
216 		 * dprintf_dd() with dd_lock held
217 		 */
218 		mutex_enter(&dd->dd_lock);
219 		(void) strcat(buf, dd->dd_myname);
220 		mutex_exit(&dd->dd_lock);
221 	} else {
222 		(void) strcat(buf, dd->dd_myname);
223 	}
224 }
225 
226 /* Calculate name length, avoiding all the strcat calls of dsl_dir_name */
227 int
228 dsl_dir_namelen(dsl_dir_t *dd)
229 {
230 	int result = 0;
231 
232 	if (dd->dd_parent) {
233 		/* parent's name + 1 for the "/" */
234 		result = dsl_dir_namelen(dd->dd_parent) + 1;
235 	}
236 
237 	if (!MUTEX_HELD(&dd->dd_lock)) {
238 		/* see dsl_dir_name */
239 		mutex_enter(&dd->dd_lock);
240 		result += strlen(dd->dd_myname);
241 		mutex_exit(&dd->dd_lock);
242 	} else {
243 		result += strlen(dd->dd_myname);
244 	}
245 
246 	return (result);
247 }
248 
249 static int
250 getcomponent(const char *path, char *component, const char **nextp)
251 {
252 	char *p;
253 	if ((path == NULL) || (path[0] == '\0'))
254 		return (ENOENT);
255 	/* This would be a good place to reserve some namespace... */
256 	p = strpbrk(path, "/@");
257 	if (p && (p[1] == '/' || p[1] == '@')) {
258 		/* two separators in a row */
259 		return (EINVAL);
260 	}
261 	if (p == NULL || p == path) {
262 		/*
263 		 * if the first thing is an @ or /, it had better be an
264 		 * @ and it had better not have any more ats or slashes,
265 		 * and it had better have something after the @.
266 		 */
267 		if (p != NULL &&
268 		    (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
269 			return (EINVAL);
270 		if (strlen(path) >= MAXNAMELEN)
271 			return (ENAMETOOLONG);
272 		(void) strcpy(component, path);
273 		p = NULL;
274 	} else if (p[0] == '/') {
275 		if (p-path >= MAXNAMELEN)
276 			return (ENAMETOOLONG);
277 		(void) strncpy(component, path, p - path);
278 		component[p-path] = '\0';
279 		p++;
280 	} else if (p[0] == '@') {
281 		/*
282 		 * if the next separator is an @, there better not be
283 		 * any more slashes.
284 		 */
285 		if (strchr(path, '/'))
286 			return (EINVAL);
287 		if (p-path >= MAXNAMELEN)
288 			return (ENAMETOOLONG);
289 		(void) strncpy(component, path, p - path);
290 		component[p-path] = '\0';
291 	} else {
292 		ASSERT(!"invalid p");
293 	}
294 	*nextp = p;
295 	return (0);
296 }
297 
298 /*
299  * same as dsl_open_dir, ignore the first component of name and use the
300  * spa instead
301  */
302 int
303 dsl_dir_open_spa(spa_t *spa, const char *name, void *tag,
304     dsl_dir_t **ddp, const char **tailp)
305 {
306 	char buf[MAXNAMELEN];
307 	const char *next, *nextnext = NULL;
308 	int err;
309 	dsl_dir_t *dd;
310 	dsl_pool_t *dp;
311 	uint64_t ddobj;
312 	int openedspa = FALSE;
313 
314 	dprintf("%s\n", name);
315 
316 	err = getcomponent(name, buf, &next);
317 	if (err)
318 		return (err);
319 	if (spa == NULL) {
320 		err = spa_open(buf, &spa, FTAG);
321 		if (err) {
322 			dprintf("spa_open(%s) failed\n", buf);
323 			return (err);
324 		}
325 		openedspa = TRUE;
326 
327 		/* XXX this assertion belongs in spa_open */
328 		ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa)));
329 	}
330 
331 	dp = spa_get_dsl(spa);
332 
333 	rw_enter(&dp->dp_config_rwlock, RW_READER);
334 	err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
335 	if (err) {
336 		rw_exit(&dp->dp_config_rwlock);
337 		if (openedspa)
338 			spa_close(spa, FTAG);
339 		return (err);
340 	}
341 
342 	while (next != NULL) {
343 		dsl_dir_t *child_ds;
344 		err = getcomponent(next, buf, &nextnext);
345 		if (err)
346 			break;
347 		ASSERT(next[0] != '\0');
348 		if (next[0] == '@')
349 			break;
350 		dprintf("looking up %s in obj%lld\n",
351 		    buf, dd->dd_phys->dd_child_dir_zapobj);
352 
353 		err = zap_lookup(dp->dp_meta_objset,
354 		    dd->dd_phys->dd_child_dir_zapobj,
355 		    buf, sizeof (ddobj), 1, &ddobj);
356 		if (err) {
357 			if (err == ENOENT)
358 				err = 0;
359 			break;
360 		}
361 
362 		err = dsl_dir_open_obj(dp, ddobj, buf, tag, &child_ds);
363 		if (err)
364 			break;
365 		dsl_dir_close(dd, tag);
366 		dd = child_ds;
367 		next = nextnext;
368 	}
369 	rw_exit(&dp->dp_config_rwlock);
370 
371 	if (err) {
372 		dsl_dir_close(dd, tag);
373 		if (openedspa)
374 			spa_close(spa, FTAG);
375 		return (err);
376 	}
377 
378 	/*
379 	 * It's an error if there's more than one component left, or
380 	 * tailp==NULL and there's any component left.
381 	 */
382 	if (next != NULL &&
383 	    (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
384 		/* bad path name */
385 		dsl_dir_close(dd, tag);
386 		dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
387 		err = ENOENT;
388 	}
389 	if (tailp)
390 		*tailp = next;
391 	if (openedspa)
392 		spa_close(spa, FTAG);
393 	*ddp = dd;
394 	return (err);
395 }
396 
397 /*
398  * Return the dsl_dir_t, and possibly the last component which couldn't
399  * be found in *tail.  Return NULL if the path is bogus, or if
400  * tail==NULL and we couldn't parse the whole name.  (*tail)[0] == '@'
401  * means that the last component is a snapshot.
402  */
403 int
404 dsl_dir_open(const char *name, void *tag, dsl_dir_t **ddp, const char **tailp)
405 {
406 	return (dsl_dir_open_spa(NULL, name, tag, ddp, tailp));
407 }
408 
409 uint64_t
410 dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
411     dmu_tx_t *tx)
412 {
413 	objset_t *mos = dp->dp_meta_objset;
414 	uint64_t ddobj;
415 	dsl_dir_phys_t *ddphys;
416 	dmu_buf_t *dbuf;
417 
418 	ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
419 	    DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
420 	if (pds) {
421 		VERIFY(0 == zap_add(mos, pds->dd_phys->dd_child_dir_zapobj,
422 		    name, sizeof (uint64_t), 1, &ddobj, tx));
423 	} else {
424 		/* it's the root dir */
425 		VERIFY(0 == zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
426 		    DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
427 	}
428 	VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
429 	dmu_buf_will_dirty(dbuf, tx);
430 	ddphys = dbuf->db_data;
431 
432 	ddphys->dd_creation_time = gethrestime_sec();
433 	if (pds)
434 		ddphys->dd_parent_obj = pds->dd_object;
435 	ddphys->dd_props_zapobj = zap_create(mos,
436 	    DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
437 	ddphys->dd_child_dir_zapobj = zap_create(mos,
438 	    DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
439 	if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
440 		ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
441 	dmu_buf_rele(dbuf, FTAG);
442 
443 	return (ddobj);
444 }
445 
446 /* ARGSUSED */
447 int
448 dsl_dir_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
449 {
450 	dsl_dir_t *dd = arg1;
451 	dsl_pool_t *dp = dd->dd_pool;
452 	objset_t *mos = dp->dp_meta_objset;
453 	int err;
454 	uint64_t count;
455 
456 	/*
457 	 * There should be exactly two holds, both from
458 	 * dsl_dataset_destroy: one on the dd directory, and one on its
459 	 * head ds.  If there are more holds, then a concurrent thread is
460 	 * performing a lookup inside this dir while we're trying to destroy
461 	 * it.  To minimize this possibility, we perform this check only
462 	 * in syncing context and fail the operation if we encounter
463 	 * additional holds.  The dp_config_rwlock ensures that nobody else
464 	 * opens it after we check.
465 	 */
466 	if (dmu_tx_is_syncing(tx) && dmu_buf_refcount(dd->dd_dbuf) > 2)
467 		return (EBUSY);
468 
469 	err = zap_count(mos, dd->dd_phys->dd_child_dir_zapobj, &count);
470 	if (err)
471 		return (err);
472 	if (count != 0)
473 		return (EEXIST);
474 
475 	return (0);
476 }
477 
478 void
479 dsl_dir_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
480 {
481 	dsl_dir_t *dd = arg1;
482 	objset_t *mos = dd->dd_pool->dp_meta_objset;
483 	uint64_t obj;
484 	dd_used_t t;
485 
486 	ASSERT(RW_WRITE_HELD(&dd->dd_pool->dp_config_rwlock));
487 	ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
488 
489 	/*
490 	 * Remove our reservation. The impl() routine avoids setting the
491 	 * actual property, which would require the (already destroyed) ds.
492 	 */
493 	dsl_dir_set_reservation_sync_impl(dd, 0, tx);
494 
495 	ASSERT0(dd->dd_phys->dd_used_bytes);
496 	ASSERT0(dd->dd_phys->dd_reserved);
497 	for (t = 0; t < DD_USED_NUM; t++)
498 		ASSERT0(dd->dd_phys->dd_used_breakdown[t]);
499 
500 	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
501 	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
502 	VERIFY(0 == dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
503 	VERIFY(0 == zap_remove(mos,
504 	    dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));
505 
506 	obj = dd->dd_object;
507 	dsl_dir_close(dd, tag);
508 	VERIFY(0 == dmu_object_free(mos, obj, tx));
509 }
510 
511 boolean_t
512 dsl_dir_is_clone(dsl_dir_t *dd)
513 {
514 	return (dd->dd_phys->dd_origin_obj &&
515 	    (dd->dd_pool->dp_origin_snap == NULL ||
516 	    dd->dd_phys->dd_origin_obj !=
517 	    dd->dd_pool->dp_origin_snap->ds_object));
518 }
519 
520 void
521 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
522 {
523 	mutex_enter(&dd->dd_lock);
524 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
525 	    dd->dd_phys->dd_used_bytes);
526 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA, dd->dd_phys->dd_quota);
527 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
528 	    dd->dd_phys->dd_reserved);
529 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
530 	    dd->dd_phys->dd_compressed_bytes == 0 ? 100 :
531 	    (dd->dd_phys->dd_uncompressed_bytes * 100 /
532 	    dd->dd_phys->dd_compressed_bytes));
533 	if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
534 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
535 		    dd->dd_phys->dd_used_breakdown[DD_USED_SNAP]);
536 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
537 		    dd->dd_phys->dd_used_breakdown[DD_USED_HEAD]);
538 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
539 		    dd->dd_phys->dd_used_breakdown[DD_USED_REFRSRV]);
540 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
541 		    dd->dd_phys->dd_used_breakdown[DD_USED_CHILD] +
542 		    dd->dd_phys->dd_used_breakdown[DD_USED_CHILD_RSRV]);
543 	}
544 	mutex_exit(&dd->dd_lock);
545 
546 	rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
547 	if (dsl_dir_is_clone(dd)) {
548 		dsl_dataset_t *ds;
549 		char buf[MAXNAMELEN];
550 
551 		VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
552 		    dd->dd_phys->dd_origin_obj, FTAG, &ds));
553 		dsl_dataset_name(ds, buf);
554 		dsl_dataset_rele(ds, FTAG);
555 		dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
556 	}
557 	rw_exit(&dd->dd_pool->dp_config_rwlock);
558 }
559 
560 void
561 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
562 {
563 	dsl_pool_t *dp = dd->dd_pool;
564 
565 	ASSERT(dd->dd_phys);
566 
567 	if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg) == 0) {
568 		/* up the hold count until we can be written out */
569 		dmu_buf_add_ref(dd->dd_dbuf, dd);
570 	}
571 }
572 
573 static int64_t
574 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
575 {
576 	uint64_t old_accounted = MAX(used, dd->dd_phys->dd_reserved);
577 	uint64_t new_accounted = MAX(used + delta, dd->dd_phys->dd_reserved);
578 	return (new_accounted - old_accounted);
579 }
580 
581 void
582 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
583 {
584 	ASSERT(dmu_tx_is_syncing(tx));
585 
586 	mutex_enter(&dd->dd_lock);
587 	ASSERT0(dd->dd_tempreserved[tx->tx_txg&TXG_MASK]);
588 	dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
589 	    dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024);
590 	dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0;
591 	mutex_exit(&dd->dd_lock);
592 
593 	/* release the hold from dsl_dir_dirty */
594 	dmu_buf_rele(dd->dd_dbuf, dd);
595 }
596 
597 static uint64_t
598 dsl_dir_space_towrite(dsl_dir_t *dd)
599 {
600 	uint64_t space = 0;
601 	int i;
602 
603 	ASSERT(MUTEX_HELD(&dd->dd_lock));
604 
605 	for (i = 0; i < TXG_SIZE; i++) {
606 		space += dd->dd_space_towrite[i&TXG_MASK];
607 		ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0);
608 	}
609 	return (space);
610 }
611 
612 /*
613  * How much space would dd have available if ancestor had delta applied
614  * to it?  If ondiskonly is set, we're only interested in what's
615  * on-disk, not estimated pending changes.
616  */
617 uint64_t
618 dsl_dir_space_available(dsl_dir_t *dd,
619     dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
620 {
621 	uint64_t parentspace, myspace, quota, used;
622 
623 	/*
624 	 * If there are no restrictions otherwise, assume we have
625 	 * unlimited space available.
626 	 */
627 	quota = UINT64_MAX;
628 	parentspace = UINT64_MAX;
629 
630 	if (dd->dd_parent != NULL) {
631 		parentspace = dsl_dir_space_available(dd->dd_parent,
632 		    ancestor, delta, ondiskonly);
633 	}
634 
635 	mutex_enter(&dd->dd_lock);
636 	if (dd->dd_phys->dd_quota != 0)
637 		quota = dd->dd_phys->dd_quota;
638 	used = dd->dd_phys->dd_used_bytes;
639 	if (!ondiskonly)
640 		used += dsl_dir_space_towrite(dd);
641 
642 	if (dd->dd_parent == NULL) {
643 		uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE);
644 		quota = MIN(quota, poolsize);
645 	}
646 
647 	if (dd->dd_phys->dd_reserved > used && parentspace != UINT64_MAX) {
648 		/*
649 		 * We have some space reserved, in addition to what our
650 		 * parent gave us.
651 		 */
652 		parentspace += dd->dd_phys->dd_reserved - used;
653 	}
654 
655 	if (dd == ancestor) {
656 		ASSERT(delta <= 0);
657 		ASSERT(used >= -delta);
658 		used += delta;
659 		if (parentspace != UINT64_MAX)
660 			parentspace -= delta;
661 	}
662 
663 	if (used > quota) {
664 		/* over quota */
665 		myspace = 0;
666 	} else {
667 		/*
668 		 * the lesser of the space provided by our parent and
669 		 * the space left in our quota
670 		 */
671 		myspace = MIN(parentspace, quota - used);
672 	}
673 
674 	mutex_exit(&dd->dd_lock);
675 
676 	return (myspace);
677 }
678 
679 struct tempreserve {
680 	list_node_t tr_node;
681 	dsl_pool_t *tr_dp;
682 	dsl_dir_t *tr_ds;
683 	uint64_t tr_size;
684 };
685 
686 static int
687 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
688     boolean_t ignorequota, boolean_t checkrefquota, list_t *tr_list,
689     dmu_tx_t *tx, boolean_t first)
690 {
691 	uint64_t txg = tx->tx_txg;
692 	uint64_t est_inflight, used_on_disk, quota, parent_rsrv;
693 	uint64_t deferred = 0;
694 	struct tempreserve *tr;
695 	int retval = EDQUOT;
696 	int txgidx = txg & TXG_MASK;
697 	int i;
698 	uint64_t ref_rsrv = 0;
699 
700 	ASSERT3U(txg, !=, 0);
701 	ASSERT3S(asize, >, 0);
702 
703 	mutex_enter(&dd->dd_lock);
704 
705 	/*
706 	 * Check against the dsl_dir's quota.  We don't add in the delta
707 	 * when checking for over-quota because they get one free hit.
708 	 */
709 	est_inflight = dsl_dir_space_towrite(dd);
710 	for (i = 0; i < TXG_SIZE; i++)
711 		est_inflight += dd->dd_tempreserved[i];
712 	used_on_disk = dd->dd_phys->dd_used_bytes;
713 
714 	/*
715 	 * On the first iteration, fetch the dataset's used-on-disk and
716 	 * refreservation values. Also, if checkrefquota is set, test if
717 	 * allocating this space would exceed the dataset's refquota.
718 	 */
719 	if (first && tx->tx_objset) {
720 		int error;
721 		dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
722 
723 		error = dsl_dataset_check_quota(ds, checkrefquota,
724 		    asize, est_inflight, &used_on_disk, &ref_rsrv);
725 		if (error) {
726 			mutex_exit(&dd->dd_lock);
727 			return (error);
728 		}
729 	}
730 
731 	/*
732 	 * If this transaction will result in a net free of space,
733 	 * we want to let it through.
734 	 */
735 	if (ignorequota || netfree || dd->dd_phys->dd_quota == 0)
736 		quota = UINT64_MAX;
737 	else
738 		quota = dd->dd_phys->dd_quota;
739 
740 	/*
741 	 * Adjust the quota against the actual pool size at the root
742 	 * minus any outstanding deferred frees.
743 	 * To ensure that it's possible to remove files from a full
744 	 * pool without inducing transient overcommits, we throttle
745 	 * netfree transactions against a quota that is slightly larger,
746 	 * but still within the pool's allocation slop.  In cases where
747 	 * we're very close to full, this will allow a steady trickle of
748 	 * removes to get through.
749 	 */
750 	if (dd->dd_parent == NULL) {
751 		spa_t *spa = dd->dd_pool->dp_spa;
752 		uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree);
753 		deferred = metaslab_class_get_deferred(spa_normal_class(spa));
754 		if (poolsize - deferred < quota) {
755 			quota = poolsize - deferred;
756 			retval = ENOSPC;
757 		}
758 	}
759 
760 	/*
761 	 * If they are requesting more space, and our current estimate
762 	 * is over quota, they get to try again unless the actual
763 	 * on-disk is over quota and there are no pending changes (which
764 	 * may free up space for us).
765 	 */
766 	if (used_on_disk + est_inflight >= quota) {
767 		if (est_inflight > 0 || used_on_disk < quota ||
768 		    (retval == ENOSPC && used_on_disk < quota + deferred))
769 			retval = ERESTART;
770 		dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
771 		    "quota=%lluK tr=%lluK err=%d\n",
772 		    used_on_disk>>10, est_inflight>>10,
773 		    quota>>10, asize>>10, retval);
774 		mutex_exit(&dd->dd_lock);
775 		return (retval);
776 	}
777 
778 	/* We need to up our estimated delta before dropping dd_lock */
779 	dd->dd_tempreserved[txgidx] += asize;
780 
781 	parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
782 	    asize - ref_rsrv);
783 	mutex_exit(&dd->dd_lock);
784 
785 	tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
786 	tr->tr_ds = dd;
787 	tr->tr_size = asize;
788 	list_insert_tail(tr_list, tr);
789 
790 	/* see if it's OK with our parent */
791 	if (dd->dd_parent && parent_rsrv) {
792 		boolean_t ismos = (dd->dd_phys->dd_head_dataset_obj == 0);
793 
794 		return (dsl_dir_tempreserve_impl(dd->dd_parent,
795 		    parent_rsrv, netfree, ismos, TRUE, tr_list, tx, FALSE));
796 	} else {
797 		return (0);
798 	}
799 }
800 
801 /*
802  * Reserve space in this dsl_dir, to be used in this tx's txg.
803  * After the space has been dirtied (and dsl_dir_willuse_space()
804  * has been called), the reservation should be canceled, using
805  * dsl_dir_tempreserve_clear().
806  */
807 int
808 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
809     uint64_t fsize, uint64_t usize, void **tr_cookiep, dmu_tx_t *tx)
810 {
811 	int err;
812 	list_t *tr_list;
813 
814 	if (asize == 0) {
815 		*tr_cookiep = NULL;
816 		return (0);
817 	}
818 
819 	tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
820 	list_create(tr_list, sizeof (struct tempreserve),
821 	    offsetof(struct tempreserve, tr_node));
822 	ASSERT3S(asize, >, 0);
823 	ASSERT3S(fsize, >=, 0);
824 
825 	err = arc_tempreserve_space(lsize, tx->tx_txg);
826 	if (err == 0) {
827 		struct tempreserve *tr;
828 
829 		tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
830 		tr->tr_size = lsize;
831 		list_insert_tail(tr_list, tr);
832 
833 		err = dsl_pool_tempreserve_space(dd->dd_pool, asize, tx);
834 	} else {
835 		if (err == EAGAIN) {
836 			txg_delay(dd->dd_pool, tx->tx_txg, 1);
837 			err = ERESTART;
838 		}
839 		dsl_pool_memory_pressure(dd->dd_pool);
840 	}
841 
842 	if (err == 0) {
843 		struct tempreserve *tr;
844 
845 		tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
846 		tr->tr_dp = dd->dd_pool;
847 		tr->tr_size = asize;
848 		list_insert_tail(tr_list, tr);
849 
850 		err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize,
851 		    FALSE, asize > usize, tr_list, tx, TRUE);
852 	}
853 
854 	if (err)
855 		dsl_dir_tempreserve_clear(tr_list, tx);
856 	else
857 		*tr_cookiep = tr_list;
858 
859 	return (err);
860 }
861 
862 /*
863  * Clear a temporary reservation that we previously made with
864  * dsl_dir_tempreserve_space().
865  */
866 void
867 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
868 {
869 	int txgidx = tx->tx_txg & TXG_MASK;
870 	list_t *tr_list = tr_cookie;
871 	struct tempreserve *tr;
872 
873 	ASSERT3U(tx->tx_txg, !=, 0);
874 
875 	if (tr_cookie == NULL)
876 		return;
877 
878 	while (tr = list_head(tr_list)) {
879 		if (tr->tr_dp) {
880 			dsl_pool_tempreserve_clear(tr->tr_dp, tr->tr_size, tx);
881 		} else if (tr->tr_ds) {
882 			mutex_enter(&tr->tr_ds->dd_lock);
883 			ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
884 			    tr->tr_size);
885 			tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
886 			mutex_exit(&tr->tr_ds->dd_lock);
887 		} else {
888 			arc_tempreserve_clear(tr->tr_size);
889 		}
890 		list_remove(tr_list, tr);
891 		kmem_free(tr, sizeof (struct tempreserve));
892 	}
893 
894 	kmem_free(tr_list, sizeof (list_t));
895 }
896 
897 static void
898 dsl_dir_willuse_space_impl(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
899 {
900 	int64_t parent_space;
901 	uint64_t est_used;
902 
903 	mutex_enter(&dd->dd_lock);
904 	if (space > 0)
905 		dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
906 
907 	est_used = dsl_dir_space_towrite(dd) + dd->dd_phys->dd_used_bytes;
908 	parent_space = parent_delta(dd, est_used, space);
909 	mutex_exit(&dd->dd_lock);
910 
911 	/* Make sure that we clean up dd_space_to* */
912 	dsl_dir_dirty(dd, tx);
913 
914 	/* XXX this is potentially expensive and unnecessary... */
915 	if (parent_space && dd->dd_parent)
916 		dsl_dir_willuse_space_impl(dd->dd_parent, parent_space, tx);
917 }
918 
919 /*
920  * Call in open context when we think we're going to write/free space,
921  * eg. when dirtying data.  Be conservative (ie. OK to write less than
922  * this or free more than this, but don't write more or free less).
923  */
924 void
925 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
926 {
927 	dsl_pool_willuse_space(dd->dd_pool, space, tx);
928 	dsl_dir_willuse_space_impl(dd, space, tx);
929 }
930 
931 /* call from syncing context when we actually write/free space for this dd */
932 void
933 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
934     int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
935 {
936 	int64_t accounted_delta;
937 	boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
938 
939 	ASSERT(dmu_tx_is_syncing(tx));
940 	ASSERT(type < DD_USED_NUM);
941 
942 	if (needlock)
943 		mutex_enter(&dd->dd_lock);
944 	accounted_delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, used);
945 	ASSERT(used >= 0 || dd->dd_phys->dd_used_bytes >= -used);
946 	ASSERT(compressed >= 0 ||
947 	    dd->dd_phys->dd_compressed_bytes >= -compressed);
948 	ASSERT(uncompressed >= 0 ||
949 	    dd->dd_phys->dd_uncompressed_bytes >= -uncompressed);
950 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
951 	dd->dd_phys->dd_used_bytes += used;
952 	dd->dd_phys->dd_uncompressed_bytes += uncompressed;
953 	dd->dd_phys->dd_compressed_bytes += compressed;
954 
955 	if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
956 		ASSERT(used > 0 ||
957 		    dd->dd_phys->dd_used_breakdown[type] >= -used);
958 		dd->dd_phys->dd_used_breakdown[type] += used;
959 #ifdef DEBUG
960 		dd_used_t t;
961 		uint64_t u = 0;
962 		for (t = 0; t < DD_USED_NUM; t++)
963 			u += dd->dd_phys->dd_used_breakdown[t];
964 		ASSERT3U(u, ==, dd->dd_phys->dd_used_bytes);
965 #endif
966 	}
967 	if (needlock)
968 		mutex_exit(&dd->dd_lock);
969 
970 	if (dd->dd_parent != NULL) {
971 		dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
972 		    accounted_delta, compressed, uncompressed, tx);
973 		dsl_dir_transfer_space(dd->dd_parent,
974 		    used - accounted_delta,
975 		    DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
976 	}
977 }
978 
979 void
980 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
981     dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
982 {
983 	boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
984 
985 	ASSERT(dmu_tx_is_syncing(tx));
986 	ASSERT(oldtype < DD_USED_NUM);
987 	ASSERT(newtype < DD_USED_NUM);
988 
989 	if (delta == 0 || !(dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN))
990 		return;
991 
992 	if (needlock)
993 		mutex_enter(&dd->dd_lock);
994 	ASSERT(delta > 0 ?
995 	    dd->dd_phys->dd_used_breakdown[oldtype] >= delta :
996 	    dd->dd_phys->dd_used_breakdown[newtype] >= -delta);
997 	ASSERT(dd->dd_phys->dd_used_bytes >= ABS(delta));
998 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
999 	dd->dd_phys->dd_used_breakdown[oldtype] -= delta;
1000 	dd->dd_phys->dd_used_breakdown[newtype] += delta;
1001 	if (needlock)
1002 		mutex_exit(&dd->dd_lock);
1003 }
1004 
1005 static int
1006 dsl_dir_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
1007 {
1008 	dsl_dataset_t *ds = arg1;
1009 	dsl_dir_t *dd = ds->ds_dir;
1010 	dsl_prop_setarg_t *psa = arg2;
1011 	int err;
1012 	uint64_t towrite;
1013 
1014 	if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
1015 		return (err);
1016 
1017 	if (psa->psa_effective_value == 0)
1018 		return (0);
1019 
1020 	mutex_enter(&dd->dd_lock);
1021 	/*
1022 	 * If we are doing the preliminary check in open context, and
1023 	 * there are pending changes, then don't fail it, since the
1024 	 * pending changes could under-estimate the amount of space to be
1025 	 * freed up.
1026 	 */
1027 	towrite = dsl_dir_space_towrite(dd);
1028 	if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
1029 	    (psa->psa_effective_value < dd->dd_phys->dd_reserved ||
1030 	    psa->psa_effective_value < dd->dd_phys->dd_used_bytes + towrite)) {
1031 		err = ENOSPC;
1032 	}
1033 	mutex_exit(&dd->dd_lock);
1034 	return (err);
1035 }
1036 
1037 extern dsl_syncfunc_t dsl_prop_set_sync;
1038 
1039 static void
1040 dsl_dir_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1041 {
1042 	dsl_dataset_t *ds = arg1;
1043 	dsl_dir_t *dd = ds->ds_dir;
1044 	dsl_prop_setarg_t *psa = arg2;
1045 	uint64_t effective_value = psa->psa_effective_value;
1046 
1047 	dsl_prop_set_sync(ds, psa, tx);
1048 	DSL_PROP_CHECK_PREDICTION(dd, psa);
1049 
1050 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1051 
1052 	mutex_enter(&dd->dd_lock);
1053 	dd->dd_phys->dd_quota = effective_value;
1054 	mutex_exit(&dd->dd_lock);
1055 }
1056 
1057 int
1058 dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
1059 {
1060 	dsl_dir_t *dd;
1061 	dsl_dataset_t *ds;
1062 	dsl_prop_setarg_t psa;
1063 	int err;
1064 
1065 	dsl_prop_setarg_init_uint64(&psa, "quota", source, &quota);
1066 
1067 	err = dsl_dataset_hold(ddname, FTAG, &ds);
1068 	if (err)
1069 		return (err);
1070 
1071 	err = dsl_dir_open(ddname, FTAG, &dd, NULL);
1072 	if (err) {
1073 		dsl_dataset_rele(ds, FTAG);
1074 		return (err);
1075 	}
1076 
1077 	ASSERT(ds->ds_dir == dd);
1078 
1079 	/*
1080 	 * If someone removes a file, then tries to set the quota, we want to
1081 	 * make sure the file freeing takes effect.
1082 	 */
1083 	txg_wait_open(dd->dd_pool, 0);
1084 
1085 	err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_quota_check,
1086 	    dsl_dir_set_quota_sync, ds, &psa, 0);
1087 
1088 	dsl_dir_close(dd, FTAG);
1089 	dsl_dataset_rele(ds, FTAG);
1090 	return (err);
1091 }
1092 
1093 int
1094 dsl_dir_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
1095 {
1096 	dsl_dataset_t *ds = arg1;
1097 	dsl_dir_t *dd = ds->ds_dir;
1098 	dsl_prop_setarg_t *psa = arg2;
1099 	uint64_t effective_value;
1100 	uint64_t used, avail;
1101 	int err;
1102 
1103 	if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
1104 		return (err);
1105 
1106 	effective_value = psa->psa_effective_value;
1107 
1108 	/*
1109 	 * If we are doing the preliminary check in open context, the
1110 	 * space estimates may be inaccurate.
1111 	 */
1112 	if (!dmu_tx_is_syncing(tx))
1113 		return (0);
1114 
1115 	mutex_enter(&dd->dd_lock);
1116 	used = dd->dd_phys->dd_used_bytes;
1117 	mutex_exit(&dd->dd_lock);
1118 
1119 	if (dd->dd_parent) {
1120 		avail = dsl_dir_space_available(dd->dd_parent,
1121 		    NULL, 0, FALSE);
1122 	} else {
1123 		avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used;
1124 	}
1125 
1126 	if (MAX(used, effective_value) > MAX(used, dd->dd_phys->dd_reserved)) {
1127 		uint64_t delta = MAX(used, effective_value) -
1128 		    MAX(used, dd->dd_phys->dd_reserved);
1129 
1130 		if (delta > avail)
1131 			return (ENOSPC);
1132 		if (dd->dd_phys->dd_quota > 0 &&
1133 		    effective_value > dd->dd_phys->dd_quota)
1134 			return (ENOSPC);
1135 	}
1136 
1137 	return (0);
1138 }
1139 
1140 static void
1141 dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx)
1142 {
1143 	uint64_t used;
1144 	int64_t delta;
1145 
1146 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1147 
1148 	mutex_enter(&dd->dd_lock);
1149 	used = dd->dd_phys->dd_used_bytes;
1150 	delta = MAX(used, value) - MAX(used, dd->dd_phys->dd_reserved);
1151 	dd->dd_phys->dd_reserved = value;
1152 
1153 	if (dd->dd_parent != NULL) {
1154 		/* Roll up this additional usage into our ancestors */
1155 		dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1156 		    delta, 0, 0, tx);
1157 	}
1158 	mutex_exit(&dd->dd_lock);
1159 }
1160 
1161 
1162 static void
1163 dsl_dir_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1164 {
1165 	dsl_dataset_t *ds = arg1;
1166 	dsl_dir_t *dd = ds->ds_dir;
1167 	dsl_prop_setarg_t *psa = arg2;
1168 	uint64_t value = psa->psa_effective_value;
1169 
1170 	dsl_prop_set_sync(ds, psa, tx);
1171 	DSL_PROP_CHECK_PREDICTION(dd, psa);
1172 
1173 	dsl_dir_set_reservation_sync_impl(dd, value, tx);
1174 }
1175 
1176 int
1177 dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
1178     uint64_t reservation)
1179 {
1180 	dsl_dir_t *dd;
1181 	dsl_dataset_t *ds;
1182 	dsl_prop_setarg_t psa;
1183 	int err;
1184 
1185 	dsl_prop_setarg_init_uint64(&psa, "reservation", source, &reservation);
1186 
1187 	err = dsl_dataset_hold(ddname, FTAG, &ds);
1188 	if (err)
1189 		return (err);
1190 
1191 	err = dsl_dir_open(ddname, FTAG, &dd, NULL);
1192 	if (err) {
1193 		dsl_dataset_rele(ds, FTAG);
1194 		return (err);
1195 	}
1196 
1197 	ASSERT(ds->ds_dir == dd);
1198 
1199 	err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_reservation_check,
1200 	    dsl_dir_set_reservation_sync, ds, &psa, 0);
1201 
1202 	dsl_dir_close(dd, FTAG);
1203 	dsl_dataset_rele(ds, FTAG);
1204 	return (err);
1205 }
1206 
1207 static dsl_dir_t *
1208 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1209 {
1210 	for (; ds1; ds1 = ds1->dd_parent) {
1211 		dsl_dir_t *dd;
1212 		for (dd = ds2; dd; dd = dd->dd_parent) {
1213 			if (ds1 == dd)
1214 				return (dd);
1215 		}
1216 	}
1217 	return (NULL);
1218 }
1219 
1220 /*
1221  * If delta is applied to dd, how much of that delta would be applied to
1222  * ancestor?  Syncing context only.
1223  */
1224 static int64_t
1225 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1226 {
1227 	if (dd == ancestor)
1228 		return (delta);
1229 
1230 	mutex_enter(&dd->dd_lock);
1231 	delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, delta);
1232 	mutex_exit(&dd->dd_lock);
1233 	return (would_change(dd->dd_parent, delta, ancestor));
1234 }
1235 
1236 struct renamearg {
1237 	dsl_dir_t *newparent;
1238 	const char *mynewname;
1239 };
1240 
1241 static int
1242 dsl_dir_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
1243 {
1244 	dsl_dir_t *dd = arg1;
1245 	struct renamearg *ra = arg2;
1246 	dsl_pool_t *dp = dd->dd_pool;
1247 	objset_t *mos = dp->dp_meta_objset;
1248 	int err;
1249 	uint64_t val;
1250 
1251 	/*
1252 	 * There should only be one reference, from dmu_objset_rename().
1253 	 * Fleeting holds are also possible (eg, from "zfs list" getting
1254 	 * stats), but any that are present in open context will likely
1255 	 * be gone by syncing context, so only fail from syncing
1256 	 * context.
1257 	 */
1258 	if (dmu_tx_is_syncing(tx) && dmu_buf_refcount(dd->dd_dbuf) > 1)
1259 		return (EBUSY);
1260 
1261 	/* check for existing name */
1262 	err = zap_lookup(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1263 	    ra->mynewname, 8, 1, &val);
1264 	if (err == 0)
1265 		return (EEXIST);
1266 	if (err != ENOENT)
1267 		return (err);
1268 
1269 	if (ra->newparent != dd->dd_parent) {
1270 		/* is there enough space? */
1271 		uint64_t myspace =
1272 		    MAX(dd->dd_phys->dd_used_bytes, dd->dd_phys->dd_reserved);
1273 
1274 		/* no rename into our descendant */
1275 		if (closest_common_ancestor(dd, ra->newparent) == dd)
1276 			return (EINVAL);
1277 
1278 		if (err = dsl_dir_transfer_possible(dd->dd_parent,
1279 		    ra->newparent, myspace))
1280 			return (err);
1281 	}
1282 
1283 	return (0);
1284 }
1285 
1286 static void
1287 dsl_dir_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1288 {
1289 	dsl_dir_t *dd = arg1;
1290 	struct renamearg *ra = arg2;
1291 	dsl_pool_t *dp = dd->dd_pool;
1292 	objset_t *mos = dp->dp_meta_objset;
1293 	int err;
1294 	char namebuf[MAXNAMELEN];
1295 
1296 	ASSERT(dmu_buf_refcount(dd->dd_dbuf) <= 2);
1297 
1298 	/* Log this before we change the name. */
1299 	dsl_dir_name(ra->newparent, namebuf);
1300 	spa_history_log_internal_dd(dd, "rename", tx,
1301 	    "-> %s/%s", namebuf, ra->mynewname);
1302 
1303 	if (ra->newparent != dd->dd_parent) {
1304 		dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
1305 		    -dd->dd_phys->dd_used_bytes,
1306 		    -dd->dd_phys->dd_compressed_bytes,
1307 		    -dd->dd_phys->dd_uncompressed_bytes, tx);
1308 		dsl_dir_diduse_space(ra->newparent, DD_USED_CHILD,
1309 		    dd->dd_phys->dd_used_bytes,
1310 		    dd->dd_phys->dd_compressed_bytes,
1311 		    dd->dd_phys->dd_uncompressed_bytes, tx);
1312 
1313 		if (dd->dd_phys->dd_reserved > dd->dd_phys->dd_used_bytes) {
1314 			uint64_t unused_rsrv = dd->dd_phys->dd_reserved -
1315 			    dd->dd_phys->dd_used_bytes;
1316 
1317 			dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1318 			    -unused_rsrv, 0, 0, tx);
1319 			dsl_dir_diduse_space(ra->newparent, DD_USED_CHILD_RSRV,
1320 			    unused_rsrv, 0, 0, tx);
1321 		}
1322 	}
1323 
1324 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1325 
1326 	/* remove from old parent zapobj */
1327 	err = zap_remove(mos, dd->dd_parent->dd_phys->dd_child_dir_zapobj,
1328 	    dd->dd_myname, tx);
1329 	ASSERT0(err);
1330 
1331 	(void) strcpy(dd->dd_myname, ra->mynewname);
1332 	dsl_dir_close(dd->dd_parent, dd);
1333 	dd->dd_phys->dd_parent_obj = ra->newparent->dd_object;
1334 	VERIFY(0 == dsl_dir_open_obj(dd->dd_pool,
1335 	    ra->newparent->dd_object, NULL, dd, &dd->dd_parent));
1336 
1337 	/* add to new parent zapobj */
1338 	err = zap_add(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1339 	    dd->dd_myname, 8, 1, &dd->dd_object, tx);
1340 	ASSERT0(err);
1341 
1342 }
1343 
1344 int
1345 dsl_dir_rename(dsl_dir_t *dd, const char *newname)
1346 {
1347 	struct renamearg ra;
1348 	int err;
1349 
1350 	/* new parent should exist */
1351 	err = dsl_dir_open(newname, FTAG, &ra.newparent, &ra.mynewname);
1352 	if (err)
1353 		return (err);
1354 
1355 	/* can't rename to different pool */
1356 	if (dd->dd_pool != ra.newparent->dd_pool) {
1357 		err = ENXIO;
1358 		goto out;
1359 	}
1360 
1361 	/* new name should not already exist */
1362 	if (ra.mynewname == NULL) {
1363 		err = EEXIST;
1364 		goto out;
1365 	}
1366 
1367 	err = dsl_sync_task_do(dd->dd_pool,
1368 	    dsl_dir_rename_check, dsl_dir_rename_sync, dd, &ra, 3);
1369 
1370 out:
1371 	dsl_dir_close(ra.newparent, FTAG);
1372 	return (err);
1373 }
1374 
1375 int
1376 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t space)
1377 {
1378 	dsl_dir_t *ancestor;
1379 	int64_t adelta;
1380 	uint64_t avail;
1381 
1382 	ancestor = closest_common_ancestor(sdd, tdd);
1383 	adelta = would_change(sdd, -space, ancestor);
1384 	avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
1385 	if (avail < space)
1386 		return (ENOSPC);
1387 
1388 	return (0);
1389 }
1390 
1391 timestruc_t
1392 dsl_dir_snap_cmtime(dsl_dir_t *dd)
1393 {
1394 	timestruc_t t;
1395 
1396 	mutex_enter(&dd->dd_lock);
1397 	t = dd->dd_snap_cmtime;
1398 	mutex_exit(&dd->dd_lock);
1399 
1400 	return (t);
1401 }
1402 
1403 void
1404 dsl_dir_snap_cmtime_update(dsl_dir_t *dd)
1405 {
1406 	timestruc_t t;
1407 
1408 	gethrestime(&t);
1409 	mutex_enter(&dd->dd_lock);
1410 	dd->dd_snap_cmtime = t;
1411 	mutex_exit(&dd->dd_lock);
1412 }
1413