xref: /illumos-gate/usr/src/uts/common/fs/zfs/dsl_dir.c (revision e7437265dc2a4920c197ed4337665539d358b22c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/dmu.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/dsl_dataset.h>
31 #include <sys/dsl_dir.h>
32 #include <sys/dsl_prop.h>
33 #include <sys/dsl_synctask.h>
34 #include <sys/dsl_deleg.h>
35 #include <sys/spa.h>
36 #include <sys/zap.h>
37 #include <sys/zio.h>
38 #include <sys/arc.h>
39 #include <sys/sunddi.h>
40 #include "zfs_namecheck.h"
41 
42 static uint64_t dsl_dir_estimated_space(dsl_dir_t *dd);
43 static void dsl_dir_set_reservation_sync(void *arg1, void *arg2,
44     cred_t *cr, dmu_tx_t *tx);
45 
46 
47 /* ARGSUSED */
48 static void
49 dsl_dir_evict(dmu_buf_t *db, void *arg)
50 {
51 	dsl_dir_t *dd = arg;
52 	dsl_pool_t *dp = dd->dd_pool;
53 	int t;
54 
55 	for (t = 0; t < TXG_SIZE; t++) {
56 		ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
57 		ASSERT(dd->dd_tempreserved[t] == 0);
58 		ASSERT(dd->dd_space_towrite[t] == 0);
59 	}
60 
61 	ASSERT3U(dd->dd_used_bytes, ==, dd->dd_phys->dd_used_bytes);
62 
63 	if (dd->dd_parent)
64 		dsl_dir_close(dd->dd_parent, dd);
65 
66 	spa_close(dd->dd_pool->dp_spa, dd);
67 
68 	/*
69 	 * The props callback list should be empty since they hold the
70 	 * dir open.
71 	 */
72 	list_destroy(&dd->dd_prop_cbs);
73 	mutex_destroy(&dd->dd_lock);
74 	kmem_free(dd, sizeof (dsl_dir_t));
75 }
76 
77 int
78 dsl_dir_open_obj(dsl_pool_t *dp, uint64_t ddobj,
79     const char *tail, void *tag, dsl_dir_t **ddp)
80 {
81 	dmu_buf_t *dbuf;
82 	dsl_dir_t *dd;
83 	int err;
84 
85 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
86 	    dsl_pool_sync_context(dp));
87 
88 	err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
89 	if (err)
90 		return (err);
91 	dd = dmu_buf_get_user(dbuf);
92 #ifdef ZFS_DEBUG
93 	{
94 		dmu_object_info_t doi;
95 		dmu_object_info_from_db(dbuf, &doi);
96 		ASSERT3U(doi.doi_type, ==, DMU_OT_DSL_DIR);
97 	}
98 #endif
99 	/* XXX assert bonus buffer size is correct */
100 	if (dd == NULL) {
101 		dsl_dir_t *winner;
102 		int err;
103 
104 		dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
105 		dd->dd_object = ddobj;
106 		dd->dd_dbuf = dbuf;
107 		dd->dd_pool = dp;
108 		dd->dd_phys = dbuf->db_data;
109 		dd->dd_used_bytes = dd->dd_phys->dd_used_bytes;
110 		mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
111 
112 		list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t),
113 		    offsetof(dsl_prop_cb_record_t, cbr_node));
114 
115 		if (dd->dd_phys->dd_parent_obj) {
116 			err = dsl_dir_open_obj(dp, dd->dd_phys->dd_parent_obj,
117 			    NULL, dd, &dd->dd_parent);
118 			if (err) {
119 				mutex_destroy(&dd->dd_lock);
120 				kmem_free(dd, sizeof (dsl_dir_t));
121 				dmu_buf_rele(dbuf, tag);
122 				return (err);
123 			}
124 			if (tail) {
125 #ifdef ZFS_DEBUG
126 				uint64_t foundobj;
127 
128 				err = zap_lookup(dp->dp_meta_objset,
129 				    dd->dd_parent->dd_phys->dd_child_dir_zapobj,
130 				    tail, sizeof (foundobj), 1, &foundobj);
131 				ASSERT(err || foundobj == ddobj);
132 #endif
133 				(void) strcpy(dd->dd_myname, tail);
134 			} else {
135 				err = zap_value_search(dp->dp_meta_objset,
136 				    dd->dd_parent->dd_phys->dd_child_dir_zapobj,
137 				    ddobj, 0, dd->dd_myname);
138 			}
139 			if (err) {
140 				dsl_dir_close(dd->dd_parent, dd);
141 				mutex_destroy(&dd->dd_lock);
142 				kmem_free(dd, sizeof (dsl_dir_t));
143 				dmu_buf_rele(dbuf, tag);
144 				return (err);
145 			}
146 		} else {
147 			(void) strcpy(dd->dd_myname, spa_name(dp->dp_spa));
148 		}
149 
150 		winner = dmu_buf_set_user_ie(dbuf, dd, &dd->dd_phys,
151 		    dsl_dir_evict);
152 		if (winner) {
153 			if (dd->dd_parent)
154 				dsl_dir_close(dd->dd_parent, dd);
155 			mutex_destroy(&dd->dd_lock);
156 			kmem_free(dd, sizeof (dsl_dir_t));
157 			dd = winner;
158 		} else {
159 			spa_open_ref(dp->dp_spa, dd);
160 		}
161 	}
162 
163 	/*
164 	 * The dsl_dir_t has both open-to-close and instantiate-to-evict
165 	 * holds on the spa.  We need the open-to-close holds because
166 	 * otherwise the spa_refcnt wouldn't change when we open a
167 	 * dir which the spa also has open, so we could incorrectly
168 	 * think it was OK to unload/export/destroy the pool.  We need
169 	 * the instantiate-to-evict hold because the dsl_dir_t has a
170 	 * pointer to the dd_pool, which has a pointer to the spa_t.
171 	 */
172 	spa_open_ref(dp->dp_spa, tag);
173 	ASSERT3P(dd->dd_pool, ==, dp);
174 	ASSERT3U(dd->dd_object, ==, ddobj);
175 	ASSERT3P(dd->dd_dbuf, ==, dbuf);
176 	*ddp = dd;
177 	return (0);
178 }
179 
180 void
181 dsl_dir_close(dsl_dir_t *dd, void *tag)
182 {
183 	dprintf_dd(dd, "%s\n", "");
184 	spa_close(dd->dd_pool->dp_spa, tag);
185 	dmu_buf_rele(dd->dd_dbuf, tag);
186 }
187 
188 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */
189 void
190 dsl_dir_name(dsl_dir_t *dd, char *buf)
191 {
192 	if (dd->dd_parent) {
193 		dsl_dir_name(dd->dd_parent, buf);
194 		(void) strcat(buf, "/");
195 	} else {
196 		buf[0] = '\0';
197 	}
198 	if (!MUTEX_HELD(&dd->dd_lock)) {
199 		/*
200 		 * recursive mutex so that we can use
201 		 * dprintf_dd() with dd_lock held
202 		 */
203 		mutex_enter(&dd->dd_lock);
204 		(void) strcat(buf, dd->dd_myname);
205 		mutex_exit(&dd->dd_lock);
206 	} else {
207 		(void) strcat(buf, dd->dd_myname);
208 	}
209 }
210 
211 /* Calculate name legnth, avoiding all the strcat calls of dsl_dir_name */
212 int
213 dsl_dir_namelen(dsl_dir_t *dd)
214 {
215 	int result = 0;
216 
217 	if (dd->dd_parent) {
218 		/* parent's name + 1 for the "/" */
219 		result = dsl_dir_namelen(dd->dd_parent) + 1;
220 	}
221 
222 	if (!MUTEX_HELD(&dd->dd_lock)) {
223 		/* see dsl_dir_name */
224 		mutex_enter(&dd->dd_lock);
225 		result += strlen(dd->dd_myname);
226 		mutex_exit(&dd->dd_lock);
227 	} else {
228 		result += strlen(dd->dd_myname);
229 	}
230 
231 	return (result);
232 }
233 
234 int
235 dsl_dir_is_private(dsl_dir_t *dd)
236 {
237 	int rv = FALSE;
238 
239 	if (dd->dd_parent && dsl_dir_is_private(dd->dd_parent))
240 		rv = TRUE;
241 	if (dataset_name_hidden(dd->dd_myname))
242 		rv = TRUE;
243 	return (rv);
244 }
245 
246 
247 static int
248 getcomponent(const char *path, char *component, const char **nextp)
249 {
250 	char *p;
251 	if (path == NULL)
252 		return (ENOENT);
253 	/* This would be a good place to reserve some namespace... */
254 	p = strpbrk(path, "/@");
255 	if (p && (p[1] == '/' || p[1] == '@')) {
256 		/* two separators in a row */
257 		return (EINVAL);
258 	}
259 	if (p == NULL || p == path) {
260 		/*
261 		 * if the first thing is an @ or /, it had better be an
262 		 * @ and it had better not have any more ats or slashes,
263 		 * and it had better have something after the @.
264 		 */
265 		if (p != NULL &&
266 		    (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
267 			return (EINVAL);
268 		if (strlen(path) >= MAXNAMELEN)
269 			return (ENAMETOOLONG);
270 		(void) strcpy(component, path);
271 		p = NULL;
272 	} else if (p[0] == '/') {
273 		if (p-path >= MAXNAMELEN)
274 			return (ENAMETOOLONG);
275 		(void) strncpy(component, path, p - path);
276 		component[p-path] = '\0';
277 		p++;
278 	} else if (p[0] == '@') {
279 		/*
280 		 * if the next separator is an @, there better not be
281 		 * any more slashes.
282 		 */
283 		if (strchr(path, '/'))
284 			return (EINVAL);
285 		if (p-path >= MAXNAMELEN)
286 			return (ENAMETOOLONG);
287 		(void) strncpy(component, path, p - path);
288 		component[p-path] = '\0';
289 	} else {
290 		ASSERT(!"invalid p");
291 	}
292 	*nextp = p;
293 	return (0);
294 }
295 
296 /*
297  * same as dsl_open_dir, ignore the first component of name and use the
298  * spa instead
299  */
300 int
301 dsl_dir_open_spa(spa_t *spa, const char *name, void *tag,
302     dsl_dir_t **ddp, const char **tailp)
303 {
304 	char buf[MAXNAMELEN];
305 	const char *next, *nextnext = NULL;
306 	int err;
307 	dsl_dir_t *dd;
308 	dsl_pool_t *dp;
309 	uint64_t ddobj;
310 	int openedspa = FALSE;
311 
312 	dprintf("%s\n", name);
313 
314 	err = getcomponent(name, buf, &next);
315 	if (err)
316 		return (err);
317 	if (spa == NULL) {
318 		err = spa_open(buf, &spa, FTAG);
319 		if (err) {
320 			dprintf("spa_open(%s) failed\n", buf);
321 			return (err);
322 		}
323 		openedspa = TRUE;
324 
325 		/* XXX this assertion belongs in spa_open */
326 		ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa)));
327 	}
328 
329 	dp = spa_get_dsl(spa);
330 
331 	rw_enter(&dp->dp_config_rwlock, RW_READER);
332 	err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
333 	if (err) {
334 		rw_exit(&dp->dp_config_rwlock);
335 		if (openedspa)
336 			spa_close(spa, FTAG);
337 		return (err);
338 	}
339 
340 	while (next != NULL) {
341 		dsl_dir_t *child_ds;
342 		err = getcomponent(next, buf, &nextnext);
343 		if (err)
344 			break;
345 		ASSERT(next[0] != '\0');
346 		if (next[0] == '@')
347 			break;
348 		dprintf("looking up %s in obj%lld\n",
349 		    buf, dd->dd_phys->dd_child_dir_zapobj);
350 
351 		err = zap_lookup(dp->dp_meta_objset,
352 		    dd->dd_phys->dd_child_dir_zapobj,
353 		    buf, sizeof (ddobj), 1, &ddobj);
354 		if (err) {
355 			if (err == ENOENT)
356 				err = 0;
357 			break;
358 		}
359 
360 		err = dsl_dir_open_obj(dp, ddobj, buf, tag, &child_ds);
361 		if (err)
362 			break;
363 		dsl_dir_close(dd, tag);
364 		dd = child_ds;
365 		next = nextnext;
366 	}
367 	rw_exit(&dp->dp_config_rwlock);
368 
369 	if (err) {
370 		dsl_dir_close(dd, tag);
371 		if (openedspa)
372 			spa_close(spa, FTAG);
373 		return (err);
374 	}
375 
376 	/*
377 	 * It's an error if there's more than one component left, or
378 	 * tailp==NULL and there's any component left.
379 	 */
380 	if (next != NULL &&
381 	    (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
382 		/* bad path name */
383 		dsl_dir_close(dd, tag);
384 		dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
385 		err = ENOENT;
386 	}
387 	if (tailp)
388 		*tailp = next;
389 	if (openedspa)
390 		spa_close(spa, FTAG);
391 	*ddp = dd;
392 	return (err);
393 }
394 
395 /*
396  * Return the dsl_dir_t, and possibly the last component which couldn't
397  * be found in *tail.  Return NULL if the path is bogus, or if
398  * tail==NULL and we couldn't parse the whole name.  (*tail)[0] == '@'
399  * means that the last component is a snapshot.
400  */
401 int
402 dsl_dir_open(const char *name, void *tag, dsl_dir_t **ddp, const char **tailp)
403 {
404 	return (dsl_dir_open_spa(NULL, name, tag, ddp, tailp));
405 }
406 
407 uint64_t
408 dsl_dir_create_sync(dsl_dir_t *pds, const char *name, dmu_tx_t *tx)
409 {
410 	objset_t *mos = pds->dd_pool->dp_meta_objset;
411 	uint64_t ddobj;
412 	dsl_dir_phys_t *dsphys;
413 	dmu_buf_t *dbuf;
414 
415 	ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
416 	    DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
417 	VERIFY(0 == zap_add(mos, pds->dd_phys->dd_child_dir_zapobj,
418 	    name, sizeof (uint64_t), 1, &ddobj, tx));
419 	VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
420 	dmu_buf_will_dirty(dbuf, tx);
421 	dsphys = dbuf->db_data;
422 
423 	dsphys->dd_creation_time = gethrestime_sec();
424 	dsphys->dd_parent_obj = pds->dd_object;
425 	dsphys->dd_props_zapobj = zap_create(mos,
426 	    DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
427 	dsphys->dd_child_dir_zapobj = zap_create(mos,
428 	    DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
429 	dmu_buf_rele(dbuf, FTAG);
430 
431 	return (ddobj);
432 }
433 
434 /* ARGSUSED */
435 int
436 dsl_dir_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
437 {
438 	dsl_dir_t *dd = arg1;
439 	dsl_pool_t *dp = dd->dd_pool;
440 	objset_t *mos = dp->dp_meta_objset;
441 	int err;
442 	uint64_t count;
443 
444 	/*
445 	 * There should be exactly two holds, both from
446 	 * dsl_dataset_destroy: one on the dd directory, and one on its
447 	 * head ds.  Otherwise, someone is trying to lookup something
448 	 * inside this dir while we want to destroy it.  The
449 	 * config_rwlock ensures that nobody else opens it after we
450 	 * check.
451 	 */
452 	if (dmu_buf_refcount(dd->dd_dbuf) > 2)
453 		return (EBUSY);
454 
455 	err = zap_count(mos, dd->dd_phys->dd_child_dir_zapobj, &count);
456 	if (err)
457 		return (err);
458 	if (count != 0)
459 		return (EEXIST);
460 
461 	return (0);
462 }
463 
464 void
465 dsl_dir_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx)
466 {
467 	dsl_dir_t *dd = arg1;
468 	objset_t *mos = dd->dd_pool->dp_meta_objset;
469 	uint64_t val, obj;
470 
471 	ASSERT(RW_WRITE_HELD(&dd->dd_pool->dp_config_rwlock));
472 	ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
473 
474 	/* Remove our reservation. */
475 	val = 0;
476 	dsl_dir_set_reservation_sync(dd, &val, cr, tx);
477 	ASSERT3U(dd->dd_used_bytes, ==, 0);
478 	ASSERT3U(dd->dd_phys->dd_reserved, ==, 0);
479 
480 	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
481 	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
482 	VERIFY(0 == dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
483 	VERIFY(0 == zap_remove(mos,
484 	    dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));
485 
486 	obj = dd->dd_object;
487 	dsl_dir_close(dd, tag);
488 	VERIFY(0 == dmu_object_free(mos, obj, tx));
489 }
490 
491 void
492 dsl_dir_create_root(objset_t *mos, uint64_t *ddobjp, dmu_tx_t *tx)
493 {
494 	dsl_dir_phys_t *dsp;
495 	dmu_buf_t *dbuf;
496 	int error;
497 
498 	*ddobjp = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
499 	    DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
500 
501 	error = zap_add(mos, DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ROOT_DATASET,
502 	    sizeof (uint64_t), 1, ddobjp, tx);
503 	ASSERT3U(error, ==, 0);
504 
505 	VERIFY(0 == dmu_bonus_hold(mos, *ddobjp, FTAG, &dbuf));
506 	dmu_buf_will_dirty(dbuf, tx);
507 	dsp = dbuf->db_data;
508 
509 	dsp->dd_creation_time = gethrestime_sec();
510 	dsp->dd_props_zapobj = zap_create(mos,
511 	    DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
512 	dsp->dd_child_dir_zapobj = zap_create(mos,
513 	    DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
514 
515 	dmu_buf_rele(dbuf, FTAG);
516 }
517 
518 void
519 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
520 {
521 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE,
522 	    dsl_dir_space_available(dd, NULL, 0, TRUE));
523 
524 	mutex_enter(&dd->dd_lock);
525 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED, dd->dd_used_bytes);
526 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA,
527 	    dd->dd_phys->dd_quota);
528 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
529 	    dd->dd_phys->dd_reserved);
530 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
531 	    dd->dd_phys->dd_compressed_bytes == 0 ? 100 :
532 	    (dd->dd_phys->dd_uncompressed_bytes * 100 /
533 	    dd->dd_phys->dd_compressed_bytes));
534 	mutex_exit(&dd->dd_lock);
535 
536 	if (dd->dd_phys->dd_clone_parent_obj) {
537 		dsl_dataset_t *ds;
538 		char buf[MAXNAMELEN];
539 
540 		rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
541 		VERIFY(0 == dsl_dataset_open_obj(dd->dd_pool,
542 		    dd->dd_phys->dd_clone_parent_obj,
543 		    NULL, DS_MODE_NONE, FTAG, &ds));
544 		dsl_dataset_name(ds, buf);
545 		dsl_dataset_close(ds, DS_MODE_NONE, FTAG);
546 		rw_exit(&dd->dd_pool->dp_config_rwlock);
547 
548 		dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
549 	}
550 }
551 
552 void
553 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
554 {
555 	dsl_pool_t *dp = dd->dd_pool;
556 
557 	ASSERT(dd->dd_phys);
558 
559 	if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg) == 0) {
560 		/* up the hold count until we can be written out */
561 		dmu_buf_add_ref(dd->dd_dbuf, dd);
562 	}
563 }
564 
565 static int64_t
566 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
567 {
568 	uint64_t old_accounted = MAX(used, dd->dd_phys->dd_reserved);
569 	uint64_t new_accounted = MAX(used + delta, dd->dd_phys->dd_reserved);
570 	return (new_accounted - old_accounted);
571 }
572 
573 void
574 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
575 {
576 	ASSERT(dmu_tx_is_syncing(tx));
577 
578 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
579 
580 	mutex_enter(&dd->dd_lock);
581 	ASSERT3U(dd->dd_tempreserved[tx->tx_txg&TXG_MASK], ==, 0);
582 	dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
583 	    dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024);
584 	dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0;
585 	dd->dd_phys->dd_used_bytes = dd->dd_used_bytes;
586 	mutex_exit(&dd->dd_lock);
587 
588 	/* release the hold from dsl_dir_dirty */
589 	dmu_buf_rele(dd->dd_dbuf, dd);
590 }
591 
592 static uint64_t
593 dsl_dir_estimated_space(dsl_dir_t *dd)
594 {
595 	int64_t space;
596 	int i;
597 
598 	ASSERT(MUTEX_HELD(&dd->dd_lock));
599 
600 	space = dd->dd_phys->dd_used_bytes;
601 	ASSERT(space >= 0);
602 	for (i = 0; i < TXG_SIZE; i++) {
603 		space += dd->dd_space_towrite[i&TXG_MASK];
604 		ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0);
605 	}
606 	return (space);
607 }
608 
609 /*
610  * How much space would dd have available if ancestor had delta applied
611  * to it?  If ondiskonly is set, we're only interested in what's
612  * on-disk, not estimated pending changes.
613  */
614 uint64_t
615 dsl_dir_space_available(dsl_dir_t *dd,
616     dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
617 {
618 	uint64_t parentspace, myspace, quota, used;
619 
620 	/*
621 	 * If there are no restrictions otherwise, assume we have
622 	 * unlimited space available.
623 	 */
624 	quota = UINT64_MAX;
625 	parentspace = UINT64_MAX;
626 
627 	if (dd->dd_parent != NULL) {
628 		parentspace = dsl_dir_space_available(dd->dd_parent,
629 		    ancestor, delta, ondiskonly);
630 	}
631 
632 	mutex_enter(&dd->dd_lock);
633 	if (dd->dd_phys->dd_quota != 0)
634 		quota = dd->dd_phys->dd_quota;
635 	if (ondiskonly) {
636 		used = dd->dd_used_bytes;
637 	} else {
638 		used = dsl_dir_estimated_space(dd);
639 	}
640 	if (dd == ancestor)
641 		used += delta;
642 
643 	if (dd->dd_parent == NULL) {
644 		uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE);
645 		quota = MIN(quota, poolsize);
646 	}
647 
648 	if (dd->dd_phys->dd_reserved > used && parentspace != UINT64_MAX) {
649 		/*
650 		 * We have some space reserved, in addition to what our
651 		 * parent gave us.
652 		 */
653 		parentspace += dd->dd_phys->dd_reserved - used;
654 	}
655 
656 	if (used > quota) {
657 		/* over quota */
658 		myspace = 0;
659 
660 		/*
661 		 * While it's OK to be a little over quota, if
662 		 * we think we are using more space than there
663 		 * is in the pool (which is already 1.6% more than
664 		 * dsl_pool_adjustedsize()), something is very
665 		 * wrong.
666 		 */
667 		ASSERT3U(used, <=, spa_get_space(dd->dd_pool->dp_spa));
668 	} else {
669 		/*
670 		 * the lesser of the space provided by our parent and
671 		 * the space left in our quota
672 		 */
673 		myspace = MIN(parentspace, quota - used);
674 	}
675 
676 	mutex_exit(&dd->dd_lock);
677 
678 	return (myspace);
679 }
680 
681 struct tempreserve {
682 	list_node_t tr_node;
683 	dsl_dir_t *tr_ds;
684 	uint64_t tr_size;
685 };
686 
687 /*
688  * Reserve space in this dsl_dir, to be used in this tx's txg.
689  * After the space has been dirtied (and thus
690  * dsl_dir_willuse_space() has been called), the reservation should
691  * be canceled, using dsl_dir_tempreserve_clear().
692  */
693 static int
694 dsl_dir_tempreserve_impl(dsl_dir_t *dd,
695     uint64_t asize, boolean_t netfree, list_t *tr_list, dmu_tx_t *tx)
696 {
697 	uint64_t txg = tx->tx_txg;
698 	uint64_t est_used, quota, parent_rsrv;
699 	int edquot = EDQUOT;
700 	int txgidx = txg & TXG_MASK;
701 	int i;
702 	struct tempreserve *tr;
703 
704 	ASSERT3U(txg, !=, 0);
705 	ASSERT3S(asize, >=, 0);
706 
707 	mutex_enter(&dd->dd_lock);
708 	/*
709 	 * Check against the dsl_dir's quota.  We don't add in the delta
710 	 * when checking for over-quota because they get one free hit.
711 	 */
712 	est_used = dsl_dir_estimated_space(dd);
713 	for (i = 0; i < TXG_SIZE; i++)
714 		est_used += dd->dd_tempreserved[i];
715 
716 	quota = UINT64_MAX;
717 
718 	if (dd->dd_phys->dd_quota)
719 		quota = dd->dd_phys->dd_quota;
720 
721 	/*
722 	 * If this transaction will result in a net free of space, we want
723 	 * to let it through, but we have to be careful: the space that it
724 	 * frees won't become available until *after* this txg syncs.
725 	 * Therefore, to ensure that it's possible to remove files from
726 	 * a full pool without inducing transient overcommits, we throttle
727 	 * netfree transactions against a quota that is slightly larger,
728 	 * but still within the pool's allocation slop.  In cases where
729 	 * we're very close to full, this will allow a steady trickle of
730 	 * removes to get through.
731 	 */
732 	if (dd->dd_parent == NULL) {
733 		uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree);
734 		if (poolsize < quota) {
735 			quota = poolsize;
736 			edquot = ENOSPC;
737 		}
738 	} else if (netfree) {
739 		quota = UINT64_MAX;
740 	}
741 
742 	/*
743 	 * If they are requesting more space, and our current estimate
744 	 * is over quota.  They get to try again unless the actual
745 	 * on-disk is over quota and there are no pending changes (which
746 	 * may free up space for us).
747 	 */
748 	if (asize > 0 && est_used > quota) {
749 		if (dd->dd_space_towrite[txg & TXG_MASK] != 0 ||
750 		    dd->dd_space_towrite[(txg-1) & TXG_MASK] != 0 ||
751 		    dd->dd_space_towrite[(txg-2) & TXG_MASK] != 0 ||
752 		    dd->dd_used_bytes < quota)
753 			edquot = ERESTART;
754 		dprintf_dd(dd, "failing: used=%lluK est_used = %lluK "
755 		    "quota=%lluK tr=%lluK err=%d\n",
756 		    dd->dd_used_bytes>>10, est_used>>10,
757 		    quota>>10, asize>>10, edquot);
758 		mutex_exit(&dd->dd_lock);
759 		return (edquot);
760 	}
761 
762 	/* We need to up our estimated delta before dropping dd_lock */
763 	dd->dd_tempreserved[txgidx] += asize;
764 
765 	parent_rsrv = parent_delta(dd, est_used, asize);
766 	mutex_exit(&dd->dd_lock);
767 
768 	tr = kmem_alloc(sizeof (struct tempreserve), KM_SLEEP);
769 	tr->tr_ds = dd;
770 	tr->tr_size = asize;
771 	list_insert_tail(tr_list, tr);
772 
773 	/* see if it's OK with our parent */
774 	if (dd->dd_parent && parent_rsrv) {
775 		return (dsl_dir_tempreserve_impl(dd->dd_parent,
776 		    parent_rsrv, netfree, tr_list, tx));
777 	} else {
778 		return (0);
779 	}
780 }
781 
782 /*
783  * Reserve space in this dsl_dir, to be used in this tx's txg.
784  * After the space has been dirtied (and thus
785  * dsl_dir_willuse_space() has been called), the reservation should
786  * be canceled, using dsl_dir_tempreserve_clear().
787  */
788 int
789 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize,
790     uint64_t asize, uint64_t fsize, void **tr_cookiep, dmu_tx_t *tx)
791 {
792 	int err = 0;
793 	list_t *tr_list;
794 
795 	tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
796 	list_create(tr_list, sizeof (struct tempreserve),
797 	    offsetof(struct tempreserve, tr_node));
798 	ASSERT3S(asize, >=, 0);
799 	ASSERT3S(fsize, >=, 0);
800 
801 	err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize,
802 	    tr_list, tx);
803 
804 	if (err == 0) {
805 		struct tempreserve *tr;
806 
807 		err = arc_tempreserve_space(lsize);
808 		if (err == 0) {
809 			tr = kmem_alloc(sizeof (struct tempreserve), KM_SLEEP);
810 			tr->tr_ds = NULL;
811 			tr->tr_size = lsize;
812 			list_insert_tail(tr_list, tr);
813 		}
814 	}
815 
816 	if (err)
817 		dsl_dir_tempreserve_clear(tr_list, tx);
818 	else
819 		*tr_cookiep = tr_list;
820 	return (err);
821 }
822 
823 /*
824  * Clear a temporary reservation that we previously made with
825  * dsl_dir_tempreserve_space().
826  */
827 void
828 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
829 {
830 	int txgidx = tx->tx_txg & TXG_MASK;
831 	list_t *tr_list = tr_cookie;
832 	struct tempreserve *tr;
833 
834 	ASSERT3U(tx->tx_txg, !=, 0);
835 
836 	while (tr = list_head(tr_list)) {
837 		if (tr->tr_ds == NULL) {
838 			arc_tempreserve_clear(tr->tr_size);
839 		} else {
840 			mutex_enter(&tr->tr_ds->dd_lock);
841 			ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
842 			    tr->tr_size);
843 			tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
844 			mutex_exit(&tr->tr_ds->dd_lock);
845 		}
846 		list_remove(tr_list, tr);
847 		kmem_free(tr, sizeof (struct tempreserve));
848 	}
849 
850 	kmem_free(tr_list, sizeof (list_t));
851 }
852 
853 /*
854  * Call in open context when we think we're going to write/free space,
855  * eg. when dirtying data.  Be conservative (ie. OK to write less than
856  * this or free more than this, but don't write more or free less).
857  */
858 void
859 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
860 {
861 	int64_t parent_space;
862 	uint64_t est_used;
863 
864 	mutex_enter(&dd->dd_lock);
865 	if (space > 0)
866 		dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
867 
868 	est_used = dsl_dir_estimated_space(dd);
869 	parent_space = parent_delta(dd, est_used, space);
870 	mutex_exit(&dd->dd_lock);
871 
872 	/* Make sure that we clean up dd_space_to* */
873 	dsl_dir_dirty(dd, tx);
874 
875 	/* XXX this is potentially expensive and unnecessary... */
876 	if (parent_space && dd->dd_parent)
877 		dsl_dir_willuse_space(dd->dd_parent, parent_space, tx);
878 }
879 
880 /* call from syncing context when we actually write/free space for this dd */
881 void
882 dsl_dir_diduse_space(dsl_dir_t *dd,
883     int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
884 {
885 	int64_t accounted_delta;
886 
887 	ASSERT(dmu_tx_is_syncing(tx));
888 
889 	dsl_dir_dirty(dd, tx);
890 
891 	mutex_enter(&dd->dd_lock);
892 	accounted_delta = parent_delta(dd, dd->dd_used_bytes, used);
893 	ASSERT(used >= 0 || dd->dd_used_bytes >= -used);
894 	ASSERT(compressed >= 0 ||
895 	    dd->dd_phys->dd_compressed_bytes >= -compressed);
896 	ASSERT(uncompressed >= 0 ||
897 	    dd->dd_phys->dd_uncompressed_bytes >= -uncompressed);
898 	dd->dd_used_bytes += used;
899 	dd->dd_phys->dd_uncompressed_bytes += uncompressed;
900 	dd->dd_phys->dd_compressed_bytes += compressed;
901 	mutex_exit(&dd->dd_lock);
902 
903 	if (dd->dd_parent != NULL) {
904 		dsl_dir_diduse_space(dd->dd_parent,
905 		    accounted_delta, compressed, uncompressed, tx);
906 	}
907 }
908 
909 static int
910 dsl_dir_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
911 {
912 	dsl_dir_t *dd = arg1;
913 	uint64_t *quotap = arg2;
914 	uint64_t new_quota = *quotap;
915 	int err = 0;
916 	uint64_t towrite;
917 
918 	if (new_quota == 0)
919 		return (0);
920 
921 	mutex_enter(&dd->dd_lock);
922 	/*
923 	 * If we are doing the preliminary check in open context, and
924 	 * there are pending changes, then don't fail it, since the
925 	 * pending changes could under-estimat the amount of space to be
926 	 * freed up.
927 	 */
928 	towrite = dd->dd_space_towrite[0] + dd->dd_space_towrite[1] +
929 	    dd->dd_space_towrite[2] + dd->dd_space_towrite[3];
930 	if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
931 	    (new_quota < dd->dd_phys->dd_reserved ||
932 	    new_quota < dsl_dir_estimated_space(dd))) {
933 		err = ENOSPC;
934 	}
935 	mutex_exit(&dd->dd_lock);
936 	return (err);
937 }
938 
939 /* ARGSUSED */
940 static void
941 dsl_dir_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
942 {
943 	dsl_dir_t *dd = arg1;
944 	uint64_t *quotap = arg2;
945 	uint64_t new_quota = *quotap;
946 
947 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
948 
949 	mutex_enter(&dd->dd_lock);
950 	dd->dd_phys->dd_quota = new_quota;
951 	mutex_exit(&dd->dd_lock);
952 
953 	spa_history_internal_log(LOG_DS_QUOTA, dd->dd_pool->dp_spa,
954 	    tx, cr, "%lld dataset = %llu ",
955 	    (longlong_t)new_quota, dd->dd_phys->dd_head_dataset_obj);
956 }
957 
958 int
959 dsl_dir_set_quota(const char *ddname, uint64_t quota)
960 {
961 	dsl_dir_t *dd;
962 	int err;
963 
964 	err = dsl_dir_open(ddname, FTAG, &dd, NULL);
965 	if (err)
966 		return (err);
967 	/*
968 	 * If someone removes a file, then tries to set the quota, we
969 	 * want to make sure the file freeing takes effect.
970 	 */
971 	txg_wait_open(dd->dd_pool, 0);
972 
973 	err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_quota_check,
974 	    dsl_dir_set_quota_sync, dd, &quota, 0);
975 	dsl_dir_close(dd, FTAG);
976 	return (err);
977 }
978 
979 static int
980 dsl_dir_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
981 {
982 	dsl_dir_t *dd = arg1;
983 	uint64_t *reservationp = arg2;
984 	uint64_t new_reservation = *reservationp;
985 	uint64_t used, avail;
986 	int64_t delta;
987 
988 	if (new_reservation > INT64_MAX)
989 		return (EOVERFLOW);
990 
991 	/*
992 	 * If we are doing the preliminary check in open context, the
993 	 * space estimates may be inaccurate.
994 	 */
995 	if (!dmu_tx_is_syncing(tx))
996 		return (0);
997 
998 	mutex_enter(&dd->dd_lock);
999 	used = dd->dd_used_bytes;
1000 	delta = MAX(used, new_reservation) -
1001 	    MAX(used, dd->dd_phys->dd_reserved);
1002 	mutex_exit(&dd->dd_lock);
1003 
1004 	if (dd->dd_parent) {
1005 		avail = dsl_dir_space_available(dd->dd_parent,
1006 		    NULL, 0, FALSE);
1007 	} else {
1008 		avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used;
1009 	}
1010 
1011 	if (delta > 0 && delta > avail)
1012 		return (ENOSPC);
1013 	if (delta > 0 && dd->dd_phys->dd_quota > 0 &&
1014 	    new_reservation > dd->dd_phys->dd_quota)
1015 		return (ENOSPC);
1016 	return (0);
1017 }
1018 
1019 /* ARGSUSED */
1020 static void
1021 dsl_dir_set_reservation_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1022 {
1023 	dsl_dir_t *dd = arg1;
1024 	uint64_t *reservationp = arg2;
1025 	uint64_t new_reservation = *reservationp;
1026 	uint64_t used;
1027 	int64_t delta;
1028 
1029 	mutex_enter(&dd->dd_lock);
1030 	used = dd->dd_used_bytes;
1031 	delta = MAX(used, new_reservation) -
1032 	    MAX(used, dd->dd_phys->dd_reserved);
1033 	mutex_exit(&dd->dd_lock);
1034 
1035 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1036 	dd->dd_phys->dd_reserved = new_reservation;
1037 
1038 	if (dd->dd_parent != NULL) {
1039 		/* Roll up this additional usage into our ancestors */
1040 		dsl_dir_diduse_space(dd->dd_parent, delta, 0, 0, tx);
1041 	}
1042 
1043 	spa_history_internal_log(LOG_DS_RESERVATION, dd->dd_pool->dp_spa,
1044 	    tx, cr, "%lld dataset = %llu",
1045 	    (longlong_t)new_reservation, dd->dd_phys->dd_head_dataset_obj);
1046 }
1047 
1048 int
1049 dsl_dir_set_reservation(const char *ddname, uint64_t reservation)
1050 {
1051 	dsl_dir_t *dd;
1052 	int err;
1053 
1054 	err = dsl_dir_open(ddname, FTAG, &dd, NULL);
1055 	if (err)
1056 		return (err);
1057 	err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_reservation_check,
1058 	    dsl_dir_set_reservation_sync, dd, &reservation, 0);
1059 	dsl_dir_close(dd, FTAG);
1060 	return (err);
1061 }
1062 
1063 static dsl_dir_t *
1064 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1065 {
1066 	for (; ds1; ds1 = ds1->dd_parent) {
1067 		dsl_dir_t *dd;
1068 		for (dd = ds2; dd; dd = dd->dd_parent) {
1069 			if (ds1 == dd)
1070 				return (dd);
1071 		}
1072 	}
1073 	return (NULL);
1074 }
1075 
1076 /*
1077  * If delta is applied to dd, how much of that delta would be applied to
1078  * ancestor?  Syncing context only.
1079  */
1080 static int64_t
1081 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1082 {
1083 	if (dd == ancestor)
1084 		return (delta);
1085 
1086 	mutex_enter(&dd->dd_lock);
1087 	delta = parent_delta(dd, dd->dd_used_bytes, delta);
1088 	mutex_exit(&dd->dd_lock);
1089 	return (would_change(dd->dd_parent, delta, ancestor));
1090 }
1091 
1092 struct renamearg {
1093 	dsl_dir_t *newparent;
1094 	const char *mynewname;
1095 };
1096 
1097 /*ARGSUSED*/
1098 static int
1099 dsl_dir_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
1100 {
1101 	dsl_dir_t *dd = arg1;
1102 	struct renamearg *ra = arg2;
1103 	dsl_pool_t *dp = dd->dd_pool;
1104 	objset_t *mos = dp->dp_meta_objset;
1105 	int err;
1106 	uint64_t val;
1107 
1108 	/* There should be 2 references: the open and the dirty */
1109 	if (dmu_buf_refcount(dd->dd_dbuf) > 2)
1110 		return (EBUSY);
1111 
1112 	/* check for existing name */
1113 	err = zap_lookup(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1114 	    ra->mynewname, 8, 1, &val);
1115 	if (err == 0)
1116 		return (EEXIST);
1117 	if (err != ENOENT)
1118 		return (err);
1119 
1120 	if (ra->newparent != dd->dd_parent) {
1121 		/* is there enough space? */
1122 		uint64_t myspace =
1123 		    MAX(dd->dd_used_bytes, dd->dd_phys->dd_reserved);
1124 
1125 		/* no rename into our descendant */
1126 		if (closest_common_ancestor(dd, ra->newparent) == dd)
1127 			return (EINVAL);
1128 
1129 		if (err = dsl_dir_transfer_possible(dd->dd_parent,
1130 		    ra->newparent, myspace))
1131 			return (err);
1132 	}
1133 
1134 	return (0);
1135 }
1136 
1137 static void
1138 dsl_dir_rename_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1139 {
1140 	dsl_dir_t *dd = arg1;
1141 	struct renamearg *ra = arg2;
1142 	dsl_pool_t *dp = dd->dd_pool;
1143 	objset_t *mos = dp->dp_meta_objset;
1144 	int err;
1145 
1146 	ASSERT(dmu_buf_refcount(dd->dd_dbuf) <= 2);
1147 
1148 	if (ra->newparent != dd->dd_parent) {
1149 		uint64_t myspace =
1150 		    MAX(dd->dd_used_bytes, dd->dd_phys->dd_reserved);
1151 
1152 		dsl_dir_diduse_space(dd->dd_parent, -myspace,
1153 		    -dd->dd_phys->dd_compressed_bytes,
1154 		    -dd->dd_phys->dd_uncompressed_bytes, tx);
1155 		dsl_dir_diduse_space(ra->newparent, myspace,
1156 		    dd->dd_phys->dd_compressed_bytes,
1157 		    dd->dd_phys->dd_uncompressed_bytes, tx);
1158 	}
1159 
1160 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1161 
1162 	/* remove from old parent zapobj */
1163 	err = zap_remove(mos, dd->dd_parent->dd_phys->dd_child_dir_zapobj,
1164 	    dd->dd_myname, tx);
1165 	ASSERT3U(err, ==, 0);
1166 
1167 	(void) strcpy(dd->dd_myname, ra->mynewname);
1168 	dsl_dir_close(dd->dd_parent, dd);
1169 	dd->dd_phys->dd_parent_obj = ra->newparent->dd_object;
1170 	VERIFY(0 == dsl_dir_open_obj(dd->dd_pool,
1171 	    ra->newparent->dd_object, NULL, dd, &dd->dd_parent));
1172 
1173 	/* add to new parent zapobj */
1174 	err = zap_add(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1175 	    dd->dd_myname, 8, 1, &dd->dd_object, tx);
1176 	ASSERT3U(err, ==, 0);
1177 
1178 	spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa,
1179 	    tx, cr, "dataset = %llu", dd->dd_phys->dd_head_dataset_obj);
1180 }
1181 
1182 int
1183 dsl_dir_rename(dsl_dir_t *dd, const char *newname)
1184 {
1185 	struct renamearg ra;
1186 	int err;
1187 
1188 	/* new parent should exist */
1189 	err = dsl_dir_open(newname, FTAG, &ra.newparent, &ra.mynewname);
1190 	if (err)
1191 		return (err);
1192 
1193 	/* can't rename to different pool */
1194 	if (dd->dd_pool != ra.newparent->dd_pool) {
1195 		err = ENXIO;
1196 		goto out;
1197 	}
1198 
1199 	/* new name should not already exist */
1200 	if (ra.mynewname == NULL) {
1201 		err = EEXIST;
1202 		goto out;
1203 	}
1204 
1205 	err = dsl_sync_task_do(dd->dd_pool,
1206 	    dsl_dir_rename_check, dsl_dir_rename_sync, dd, &ra, 3);
1207 
1208 out:
1209 	dsl_dir_close(ra.newparent, FTAG);
1210 	return (err);
1211 }
1212 
1213 int
1214 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t space)
1215 {
1216 	dsl_dir_t *ancestor;
1217 	int64_t adelta;
1218 	uint64_t avail;
1219 
1220 	ancestor = closest_common_ancestor(sdd, tdd);
1221 	adelta = would_change(sdd, -space, ancestor);
1222 	avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
1223 	if (avail < space)
1224 		return (ENOSPC);
1225 
1226 	return (0);
1227 }
1228