xref: /illumos-gate/usr/src/uts/common/fs/zfs/dsl_dir.c (revision a9799022bd90b13722204e80112efaa5bf573099)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/dmu.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_dir.h>
33 #include <sys/dsl_prop.h>
34 #include <sys/dsl_synctask.h>
35 #include <sys/dsl_deleg.h>
36 #include <sys/spa.h>
37 #include <sys/zap.h>
38 #include <sys/zio.h>
39 #include <sys/arc.h>
40 #include <sys/sunddi.h>
41 #include "zfs_namecheck.h"
42 
43 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
44 static void dsl_dir_set_reservation_sync(void *arg1, void *arg2,
45     cred_t *cr, dmu_tx_t *tx);
46 
47 
48 /* ARGSUSED */
49 static void
50 dsl_dir_evict(dmu_buf_t *db, void *arg)
51 {
52 	dsl_dir_t *dd = arg;
53 	dsl_pool_t *dp = dd->dd_pool;
54 	int t;
55 
56 	for (t = 0; t < TXG_SIZE; t++) {
57 		ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
58 		ASSERT(dd->dd_tempreserved[t] == 0);
59 		ASSERT(dd->dd_space_towrite[t] == 0);
60 	}
61 
62 	ASSERT3U(dd->dd_used_bytes, ==, dd->dd_phys->dd_used_bytes);
63 
64 	if (dd->dd_parent)
65 		dsl_dir_close(dd->dd_parent, dd);
66 
67 	spa_close(dd->dd_pool->dp_spa, dd);
68 
69 	/*
70 	 * The props callback list should be empty since they hold the
71 	 * dir open.
72 	 */
73 	list_destroy(&dd->dd_prop_cbs);
74 	mutex_destroy(&dd->dd_lock);
75 	kmem_free(dd, sizeof (dsl_dir_t));
76 }
77 
78 int
79 dsl_dir_open_obj(dsl_pool_t *dp, uint64_t ddobj,
80     const char *tail, void *tag, dsl_dir_t **ddp)
81 {
82 	dmu_buf_t *dbuf;
83 	dsl_dir_t *dd;
84 	int err;
85 
86 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
87 	    dsl_pool_sync_context(dp));
88 
89 	err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
90 	if (err)
91 		return (err);
92 	dd = dmu_buf_get_user(dbuf);
93 #ifdef ZFS_DEBUG
94 	{
95 		dmu_object_info_t doi;
96 		dmu_object_info_from_db(dbuf, &doi);
97 		ASSERT3U(doi.doi_type, ==, DMU_OT_DSL_DIR);
98 	}
99 #endif
100 	/* XXX assert bonus buffer size is correct */
101 	if (dd == NULL) {
102 		dsl_dir_t *winner;
103 		int err;
104 
105 		dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
106 		dd->dd_object = ddobj;
107 		dd->dd_dbuf = dbuf;
108 		dd->dd_pool = dp;
109 		dd->dd_phys = dbuf->db_data;
110 		dd->dd_used_bytes = dd->dd_phys->dd_used_bytes;
111 		mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
112 
113 		list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t),
114 		    offsetof(dsl_prop_cb_record_t, cbr_node));
115 
116 		if (dd->dd_phys->dd_parent_obj) {
117 			err = dsl_dir_open_obj(dp, dd->dd_phys->dd_parent_obj,
118 			    NULL, dd, &dd->dd_parent);
119 			if (err) {
120 				mutex_destroy(&dd->dd_lock);
121 				kmem_free(dd, sizeof (dsl_dir_t));
122 				dmu_buf_rele(dbuf, tag);
123 				return (err);
124 			}
125 			if (tail) {
126 #ifdef ZFS_DEBUG
127 				uint64_t foundobj;
128 
129 				err = zap_lookup(dp->dp_meta_objset,
130 				    dd->dd_parent->dd_phys->dd_child_dir_zapobj,
131 				    tail, sizeof (foundobj), 1, &foundobj);
132 				ASSERT(err || foundobj == ddobj);
133 #endif
134 				(void) strcpy(dd->dd_myname, tail);
135 			} else {
136 				err = zap_value_search(dp->dp_meta_objset,
137 				    dd->dd_parent->dd_phys->dd_child_dir_zapobj,
138 				    ddobj, 0, dd->dd_myname);
139 			}
140 			if (err) {
141 				dsl_dir_close(dd->dd_parent, dd);
142 				mutex_destroy(&dd->dd_lock);
143 				kmem_free(dd, sizeof (dsl_dir_t));
144 				dmu_buf_rele(dbuf, tag);
145 				return (err);
146 			}
147 		} else {
148 			(void) strcpy(dd->dd_myname, spa_name(dp->dp_spa));
149 		}
150 
151 		winner = dmu_buf_set_user_ie(dbuf, dd, &dd->dd_phys,
152 		    dsl_dir_evict);
153 		if (winner) {
154 			if (dd->dd_parent)
155 				dsl_dir_close(dd->dd_parent, dd);
156 			mutex_destroy(&dd->dd_lock);
157 			kmem_free(dd, sizeof (dsl_dir_t));
158 			dd = winner;
159 		} else {
160 			spa_open_ref(dp->dp_spa, dd);
161 		}
162 	}
163 
164 	/*
165 	 * The dsl_dir_t has both open-to-close and instantiate-to-evict
166 	 * holds on the spa.  We need the open-to-close holds because
167 	 * otherwise the spa_refcnt wouldn't change when we open a
168 	 * dir which the spa also has open, so we could incorrectly
169 	 * think it was OK to unload/export/destroy the pool.  We need
170 	 * the instantiate-to-evict hold because the dsl_dir_t has a
171 	 * pointer to the dd_pool, which has a pointer to the spa_t.
172 	 */
173 	spa_open_ref(dp->dp_spa, tag);
174 	ASSERT3P(dd->dd_pool, ==, dp);
175 	ASSERT3U(dd->dd_object, ==, ddobj);
176 	ASSERT3P(dd->dd_dbuf, ==, dbuf);
177 	*ddp = dd;
178 	return (0);
179 }
180 
181 void
182 dsl_dir_close(dsl_dir_t *dd, void *tag)
183 {
184 	dprintf_dd(dd, "%s\n", "");
185 	spa_close(dd->dd_pool->dp_spa, tag);
186 	dmu_buf_rele(dd->dd_dbuf, tag);
187 }
188 
189 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */
190 void
191 dsl_dir_name(dsl_dir_t *dd, char *buf)
192 {
193 	if (dd->dd_parent) {
194 		dsl_dir_name(dd->dd_parent, buf);
195 		(void) strcat(buf, "/");
196 	} else {
197 		buf[0] = '\0';
198 	}
199 	if (!MUTEX_HELD(&dd->dd_lock)) {
200 		/*
201 		 * recursive mutex so that we can use
202 		 * dprintf_dd() with dd_lock held
203 		 */
204 		mutex_enter(&dd->dd_lock);
205 		(void) strcat(buf, dd->dd_myname);
206 		mutex_exit(&dd->dd_lock);
207 	} else {
208 		(void) strcat(buf, dd->dd_myname);
209 	}
210 }
211 
212 /* Calculate name legnth, avoiding all the strcat calls of dsl_dir_name */
213 int
214 dsl_dir_namelen(dsl_dir_t *dd)
215 {
216 	int result = 0;
217 
218 	if (dd->dd_parent) {
219 		/* parent's name + 1 for the "/" */
220 		result = dsl_dir_namelen(dd->dd_parent) + 1;
221 	}
222 
223 	if (!MUTEX_HELD(&dd->dd_lock)) {
224 		/* see dsl_dir_name */
225 		mutex_enter(&dd->dd_lock);
226 		result += strlen(dd->dd_myname);
227 		mutex_exit(&dd->dd_lock);
228 	} else {
229 		result += strlen(dd->dd_myname);
230 	}
231 
232 	return (result);
233 }
234 
235 int
236 dsl_dir_is_private(dsl_dir_t *dd)
237 {
238 	int rv = FALSE;
239 
240 	if (dd->dd_parent && dsl_dir_is_private(dd->dd_parent))
241 		rv = TRUE;
242 	if (dataset_name_hidden(dd->dd_myname))
243 		rv = TRUE;
244 	return (rv);
245 }
246 
247 
248 static int
249 getcomponent(const char *path, char *component, const char **nextp)
250 {
251 	char *p;
252 	if (path == NULL)
253 		return (ENOENT);
254 	/* This would be a good place to reserve some namespace... */
255 	p = strpbrk(path, "/@");
256 	if (p && (p[1] == '/' || p[1] == '@')) {
257 		/* two separators in a row */
258 		return (EINVAL);
259 	}
260 	if (p == NULL || p == path) {
261 		/*
262 		 * if the first thing is an @ or /, it had better be an
263 		 * @ and it had better not have any more ats or slashes,
264 		 * and it had better have something after the @.
265 		 */
266 		if (p != NULL &&
267 		    (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
268 			return (EINVAL);
269 		if (strlen(path) >= MAXNAMELEN)
270 			return (ENAMETOOLONG);
271 		(void) strcpy(component, path);
272 		p = NULL;
273 	} else if (p[0] == '/') {
274 		if (p-path >= MAXNAMELEN)
275 			return (ENAMETOOLONG);
276 		(void) strncpy(component, path, p - path);
277 		component[p-path] = '\0';
278 		p++;
279 	} else if (p[0] == '@') {
280 		/*
281 		 * if the next separator is an @, there better not be
282 		 * any more slashes.
283 		 */
284 		if (strchr(path, '/'))
285 			return (EINVAL);
286 		if (p-path >= MAXNAMELEN)
287 			return (ENAMETOOLONG);
288 		(void) strncpy(component, path, p - path);
289 		component[p-path] = '\0';
290 	} else {
291 		ASSERT(!"invalid p");
292 	}
293 	*nextp = p;
294 	return (0);
295 }
296 
297 /*
298  * same as dsl_open_dir, ignore the first component of name and use the
299  * spa instead
300  */
301 int
302 dsl_dir_open_spa(spa_t *spa, const char *name, void *tag,
303     dsl_dir_t **ddp, const char **tailp)
304 {
305 	char buf[MAXNAMELEN];
306 	const char *next, *nextnext = NULL;
307 	int err;
308 	dsl_dir_t *dd;
309 	dsl_pool_t *dp;
310 	uint64_t ddobj;
311 	int openedspa = FALSE;
312 
313 	dprintf("%s\n", name);
314 
315 	err = getcomponent(name, buf, &next);
316 	if (err)
317 		return (err);
318 	if (spa == NULL) {
319 		err = spa_open(buf, &spa, FTAG);
320 		if (err) {
321 			dprintf("spa_open(%s) failed\n", buf);
322 			return (err);
323 		}
324 		openedspa = TRUE;
325 
326 		/* XXX this assertion belongs in spa_open */
327 		ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa)));
328 	}
329 
330 	dp = spa_get_dsl(spa);
331 
332 	rw_enter(&dp->dp_config_rwlock, RW_READER);
333 	err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
334 	if (err) {
335 		rw_exit(&dp->dp_config_rwlock);
336 		if (openedspa)
337 			spa_close(spa, FTAG);
338 		return (err);
339 	}
340 
341 	while (next != NULL) {
342 		dsl_dir_t *child_ds;
343 		err = getcomponent(next, buf, &nextnext);
344 		if (err)
345 			break;
346 		ASSERT(next[0] != '\0');
347 		if (next[0] == '@')
348 			break;
349 		dprintf("looking up %s in obj%lld\n",
350 		    buf, dd->dd_phys->dd_child_dir_zapobj);
351 
352 		err = zap_lookup(dp->dp_meta_objset,
353 		    dd->dd_phys->dd_child_dir_zapobj,
354 		    buf, sizeof (ddobj), 1, &ddobj);
355 		if (err) {
356 			if (err == ENOENT)
357 				err = 0;
358 			break;
359 		}
360 
361 		err = dsl_dir_open_obj(dp, ddobj, buf, tag, &child_ds);
362 		if (err)
363 			break;
364 		dsl_dir_close(dd, tag);
365 		dd = child_ds;
366 		next = nextnext;
367 	}
368 	rw_exit(&dp->dp_config_rwlock);
369 
370 	if (err) {
371 		dsl_dir_close(dd, tag);
372 		if (openedspa)
373 			spa_close(spa, FTAG);
374 		return (err);
375 	}
376 
377 	/*
378 	 * It's an error if there's more than one component left, or
379 	 * tailp==NULL and there's any component left.
380 	 */
381 	if (next != NULL &&
382 	    (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
383 		/* bad path name */
384 		dsl_dir_close(dd, tag);
385 		dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
386 		err = ENOENT;
387 	}
388 	if (tailp)
389 		*tailp = next;
390 	if (openedspa)
391 		spa_close(spa, FTAG);
392 	*ddp = dd;
393 	return (err);
394 }
395 
396 /*
397  * Return the dsl_dir_t, and possibly the last component which couldn't
398  * be found in *tail.  Return NULL if the path is bogus, or if
399  * tail==NULL and we couldn't parse the whole name.  (*tail)[0] == '@'
400  * means that the last component is a snapshot.
401  */
402 int
403 dsl_dir_open(const char *name, void *tag, dsl_dir_t **ddp, const char **tailp)
404 {
405 	return (dsl_dir_open_spa(NULL, name, tag, ddp, tailp));
406 }
407 
408 uint64_t
409 dsl_dir_create_sync(dsl_dir_t *pds, const char *name, dmu_tx_t *tx)
410 {
411 	objset_t *mos = pds->dd_pool->dp_meta_objset;
412 	uint64_t ddobj;
413 	dsl_dir_phys_t *dsphys;
414 	dmu_buf_t *dbuf;
415 
416 	ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
417 	    DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
418 	VERIFY(0 == zap_add(mos, pds->dd_phys->dd_child_dir_zapobj,
419 	    name, sizeof (uint64_t), 1, &ddobj, tx));
420 	VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
421 	dmu_buf_will_dirty(dbuf, tx);
422 	dsphys = dbuf->db_data;
423 
424 	dsphys->dd_creation_time = gethrestime_sec();
425 	dsphys->dd_parent_obj = pds->dd_object;
426 	dsphys->dd_props_zapobj = zap_create(mos,
427 	    DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
428 	dsphys->dd_child_dir_zapobj = zap_create(mos,
429 	    DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
430 	dmu_buf_rele(dbuf, FTAG);
431 
432 	return (ddobj);
433 }
434 
435 /* ARGSUSED */
436 int
437 dsl_dir_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
438 {
439 	dsl_dir_t *dd = arg1;
440 	dsl_pool_t *dp = dd->dd_pool;
441 	objset_t *mos = dp->dp_meta_objset;
442 	int err;
443 	uint64_t count;
444 
445 	/*
446 	 * There should be exactly two holds, both from
447 	 * dsl_dataset_destroy: one on the dd directory, and one on its
448 	 * head ds.  Otherwise, someone is trying to lookup something
449 	 * inside this dir while we want to destroy it.  The
450 	 * config_rwlock ensures that nobody else opens it after we
451 	 * check.
452 	 */
453 	if (dmu_buf_refcount(dd->dd_dbuf) > 2)
454 		return (EBUSY);
455 
456 	err = zap_count(mos, dd->dd_phys->dd_child_dir_zapobj, &count);
457 	if (err)
458 		return (err);
459 	if (count != 0)
460 		return (EEXIST);
461 
462 	return (0);
463 }
464 
465 void
466 dsl_dir_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx)
467 {
468 	dsl_dir_t *dd = arg1;
469 	objset_t *mos = dd->dd_pool->dp_meta_objset;
470 	uint64_t val, obj;
471 
472 	ASSERT(RW_WRITE_HELD(&dd->dd_pool->dp_config_rwlock));
473 	ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
474 
475 	/* Remove our reservation. */
476 	val = 0;
477 	dsl_dir_set_reservation_sync(dd, &val, cr, tx);
478 	ASSERT3U(dd->dd_used_bytes, ==, 0);
479 	ASSERT3U(dd->dd_phys->dd_reserved, ==, 0);
480 
481 	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
482 	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
483 	VERIFY(0 == dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
484 	VERIFY(0 == zap_remove(mos,
485 	    dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));
486 
487 	obj = dd->dd_object;
488 	dsl_dir_close(dd, tag);
489 	VERIFY(0 == dmu_object_free(mos, obj, tx));
490 }
491 
492 void
493 dsl_dir_create_root(objset_t *mos, uint64_t *ddobjp, dmu_tx_t *tx)
494 {
495 	dsl_dir_phys_t *dsp;
496 	dmu_buf_t *dbuf;
497 	int error;
498 
499 	*ddobjp = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
500 	    DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
501 
502 	error = zap_add(mos, DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ROOT_DATASET,
503 	    sizeof (uint64_t), 1, ddobjp, tx);
504 	ASSERT3U(error, ==, 0);
505 
506 	VERIFY(0 == dmu_bonus_hold(mos, *ddobjp, FTAG, &dbuf));
507 	dmu_buf_will_dirty(dbuf, tx);
508 	dsp = dbuf->db_data;
509 
510 	dsp->dd_creation_time = gethrestime_sec();
511 	dsp->dd_props_zapobj = zap_create(mos,
512 	    DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
513 	dsp->dd_child_dir_zapobj = zap_create(mos,
514 	    DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
515 
516 	dmu_buf_rele(dbuf, FTAG);
517 }
518 
519 void
520 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
521 {
522 	mutex_enter(&dd->dd_lock);
523 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED, dd->dd_used_bytes);
524 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA, dd->dd_phys->dd_quota);
525 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
526 	    dd->dd_phys->dd_reserved);
527 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
528 	    dd->dd_phys->dd_compressed_bytes == 0 ? 100 :
529 	    (dd->dd_phys->dd_uncompressed_bytes * 100 /
530 	    dd->dd_phys->dd_compressed_bytes));
531 	mutex_exit(&dd->dd_lock);
532 
533 	if (dd->dd_phys->dd_origin_obj) {
534 		dsl_dataset_t *ds;
535 		char buf[MAXNAMELEN];
536 
537 		rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
538 		VERIFY(0 == dsl_dataset_open_obj(dd->dd_pool,
539 		    dd->dd_phys->dd_origin_obj,
540 		    NULL, DS_MODE_NONE, FTAG, &ds));
541 		dsl_dataset_name(ds, buf);
542 		dsl_dataset_close(ds, DS_MODE_NONE, FTAG);
543 		rw_exit(&dd->dd_pool->dp_config_rwlock);
544 
545 		dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
546 	}
547 }
548 
549 void
550 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
551 {
552 	dsl_pool_t *dp = dd->dd_pool;
553 
554 	ASSERT(dd->dd_phys);
555 
556 	if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg) == 0) {
557 		/* up the hold count until we can be written out */
558 		dmu_buf_add_ref(dd->dd_dbuf, dd);
559 	}
560 }
561 
562 static int64_t
563 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
564 {
565 	uint64_t old_accounted = MAX(used, dd->dd_phys->dd_reserved);
566 	uint64_t new_accounted = MAX(used + delta, dd->dd_phys->dd_reserved);
567 	return (new_accounted - old_accounted);
568 }
569 
570 void
571 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
572 {
573 	ASSERT(dmu_tx_is_syncing(tx));
574 
575 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
576 
577 	mutex_enter(&dd->dd_lock);
578 	ASSERT3U(dd->dd_tempreserved[tx->tx_txg&TXG_MASK], ==, 0);
579 	dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
580 	    dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024);
581 	dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0;
582 	dd->dd_phys->dd_used_bytes = dd->dd_used_bytes;
583 	mutex_exit(&dd->dd_lock);
584 
585 	/* release the hold from dsl_dir_dirty */
586 	dmu_buf_rele(dd->dd_dbuf, dd);
587 }
588 
589 static uint64_t
590 dsl_dir_space_towrite(dsl_dir_t *dd)
591 {
592 	uint64_t space = 0;
593 	int i;
594 
595 	ASSERT(MUTEX_HELD(&dd->dd_lock));
596 
597 	for (i = 0; i < TXG_SIZE; i++) {
598 		space += dd->dd_space_towrite[i&TXG_MASK];
599 		ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0);
600 	}
601 	return (space);
602 }
603 
604 /*
605  * How much space would dd have available if ancestor had delta applied
606  * to it?  If ondiskonly is set, we're only interested in what's
607  * on-disk, not estimated pending changes.
608  */
609 uint64_t
610 dsl_dir_space_available(dsl_dir_t *dd,
611     dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
612 {
613 	uint64_t parentspace, myspace, quota, used;
614 
615 	/*
616 	 * If there are no restrictions otherwise, assume we have
617 	 * unlimited space available.
618 	 */
619 	quota = UINT64_MAX;
620 	parentspace = UINT64_MAX;
621 
622 	if (dd->dd_parent != NULL) {
623 		parentspace = dsl_dir_space_available(dd->dd_parent,
624 		    ancestor, delta, ondiskonly);
625 	}
626 
627 	mutex_enter(&dd->dd_lock);
628 	if (dd->dd_phys->dd_quota != 0)
629 		quota = dd->dd_phys->dd_quota;
630 	used = dd->dd_used_bytes;
631 	if (!ondiskonly)
632 		used += dsl_dir_space_towrite(dd);
633 	if (dd == ancestor)
634 		used += delta;
635 
636 	if (dd->dd_parent == NULL) {
637 		uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE);
638 		quota = MIN(quota, poolsize);
639 	}
640 
641 	if (dd->dd_phys->dd_reserved > used && parentspace != UINT64_MAX) {
642 		/*
643 		 * We have some space reserved, in addition to what our
644 		 * parent gave us.
645 		 */
646 		parentspace += dd->dd_phys->dd_reserved - used;
647 	}
648 
649 	if (used > quota) {
650 		/* over quota */
651 		myspace = 0;
652 
653 		/*
654 		 * While it's OK to be a little over quota, if
655 		 * we think we are using more space than there
656 		 * is in the pool (which is already 1.6% more than
657 		 * dsl_pool_adjustedsize()), something is very
658 		 * wrong.
659 		 */
660 		ASSERT3U(used, <=, spa_get_space(dd->dd_pool->dp_spa));
661 	} else {
662 		/*
663 		 * the lesser of the space provided by our parent and
664 		 * the space left in our quota
665 		 */
666 		myspace = MIN(parentspace, quota - used);
667 	}
668 
669 	mutex_exit(&dd->dd_lock);
670 
671 	return (myspace);
672 }
673 
674 struct tempreserve {
675 	list_node_t tr_node;
676 	dsl_dir_t *tr_ds;
677 	uint64_t tr_size;
678 };
679 
680 static int
681 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
682     boolean_t ignorequota, boolean_t checkrefquota, list_t *tr_list,
683     dmu_tx_t *tx)
684 {
685 	uint64_t txg = tx->tx_txg;
686 	uint64_t est_inflight, used_on_disk, quota, parent_rsrv;
687 	struct tempreserve *tr;
688 	int error = EDQUOT;
689 	int txgidx = txg & TXG_MASK;
690 	int i;
691 
692 	ASSERT3U(txg, !=, 0);
693 	ASSERT3S(asize, >, 0);
694 
695 	mutex_enter(&dd->dd_lock);
696 
697 	/*
698 	 * Check against the dsl_dir's quota.  We don't add in the delta
699 	 * when checking for over-quota because they get one free hit.
700 	 */
701 	est_inflight = dsl_dir_space_towrite(dd);
702 	for (i = 0; i < TXG_SIZE; i++)
703 		est_inflight += dd->dd_tempreserved[i];
704 	used_on_disk = dd->dd_used_bytes;
705 
706 	/*
707 	 * Check for dataset reference quota on first iteration.
708 	 */
709 	if (list_head(tr_list) == NULL && tx->tx_objset) {
710 		dsl_dataset_t *ds = tx->tx_objset->os->os_dsl_dataset;
711 		error = dsl_dataset_check_quota(ds, checkrefquota,
712 		    asize, est_inflight, &used_on_disk);
713 		if (error) {
714 			mutex_exit(&dd->dd_lock);
715 			return (error);
716 		}
717 	}
718 
719 	/*
720 	 * If this transaction will result in a net free of space,
721 	 * we want to let it through.
722 	 */
723 	if (ignorequota || netfree || dd->dd_phys->dd_quota == 0)
724 		quota = UINT64_MAX;
725 	else
726 		quota = dd->dd_phys->dd_quota;
727 
728 	/*
729 	 * Adjust the quota against the actual pool size at the root.
730 	 * To ensure that it's possible to remove files from a full
731 	 * pool without inducing transient overcommits, we throttle
732 	 * netfree transactions against a quota that is slightly larger,
733 	 * but still within the pool's allocation slop.  In cases where
734 	 * we're very close to full, this will allow a steady trickle of
735 	 * removes to get through.
736 	 */
737 	if (dd->dd_parent == NULL) {
738 		uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree);
739 		if (poolsize < quota) {
740 			quota = poolsize;
741 			error = ENOSPC;
742 		}
743 	}
744 
745 	/*
746 	 * If they are requesting more space, and our current estimate
747 	 * is over quota, they get to try again unless the actual
748 	 * on-disk is over quota and there are no pending changes (which
749 	 * may free up space for us).
750 	 */
751 	if (used_on_disk + est_inflight > quota) {
752 		if (est_inflight > 0 || used_on_disk < quota)
753 			error = ERESTART;
754 		dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
755 		    "quota=%lluK tr=%lluK err=%d\n",
756 		    used_on_disk>>10, est_inflight>>10,
757 		    quota>>10, asize>>10, error);
758 		mutex_exit(&dd->dd_lock);
759 		return (error);
760 	}
761 
762 	/* We need to up our estimated delta before dropping dd_lock */
763 	dd->dd_tempreserved[txgidx] += asize;
764 
765 	parent_rsrv = parent_delta(dd, used_on_disk + est_inflight, asize);
766 	mutex_exit(&dd->dd_lock);
767 
768 	tr = kmem_alloc(sizeof (struct tempreserve), KM_SLEEP);
769 	tr->tr_ds = dd;
770 	tr->tr_size = asize;
771 	list_insert_tail(tr_list, tr);
772 
773 	/* see if it's OK with our parent */
774 	if (dd->dd_parent && parent_rsrv) {
775 		boolean_t ismos = (dd->dd_phys->dd_head_dataset_obj == 0);
776 
777 		return (dsl_dir_tempreserve_impl(dd->dd_parent,
778 		    parent_rsrv, netfree, ismos, TRUE, tr_list, tx));
779 	} else {
780 		return (0);
781 	}
782 }
783 
784 /*
785  * Reserve space in this dsl_dir, to be used in this tx's txg.
786  * After the space has been dirtied (and dsl_dir_willuse_space()
787  * has been called), the reservation should be canceled, using
788  * dsl_dir_tempreserve_clear().
789  */
790 int
791 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
792     uint64_t fsize, uint64_t usize, void **tr_cookiep, dmu_tx_t *tx)
793 {
794 	int err = 0;
795 	list_t *tr_list;
796 
797 	if (asize == 0) {
798 		*tr_cookiep = NULL;
799 		return (0);
800 	}
801 
802 	tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
803 	list_create(tr_list, sizeof (struct tempreserve),
804 	    offsetof(struct tempreserve, tr_node));
805 	ASSERT3S(asize, >, 0);
806 	ASSERT3S(fsize, >=, 0);
807 
808 	err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize, FALSE,
809 	    asize > usize, tr_list, tx);
810 
811 	if (err == 0) {
812 		struct tempreserve *tr;
813 
814 		err = arc_tempreserve_space(lsize);
815 		if (err == 0) {
816 			tr = kmem_alloc(sizeof (struct tempreserve), KM_SLEEP);
817 			tr->tr_ds = NULL;
818 			tr->tr_size = lsize;
819 			list_insert_tail(tr_list, tr);
820 		}
821 	}
822 
823 	if (err)
824 		dsl_dir_tempreserve_clear(tr_list, tx);
825 	else
826 		*tr_cookiep = tr_list;
827 	return (err);
828 }
829 
830 /*
831  * Clear a temporary reservation that we previously made with
832  * dsl_dir_tempreserve_space().
833  */
834 void
835 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
836 {
837 	int txgidx = tx->tx_txg & TXG_MASK;
838 	list_t *tr_list = tr_cookie;
839 	struct tempreserve *tr;
840 
841 	ASSERT3U(tx->tx_txg, !=, 0);
842 
843 	if (tr_cookie == NULL)
844 		return;
845 
846 	while (tr = list_head(tr_list)) {
847 		if (tr->tr_ds == NULL) {
848 			arc_tempreserve_clear(tr->tr_size);
849 		} else {
850 			mutex_enter(&tr->tr_ds->dd_lock);
851 			ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
852 			    tr->tr_size);
853 			tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
854 			mutex_exit(&tr->tr_ds->dd_lock);
855 		}
856 		list_remove(tr_list, tr);
857 		kmem_free(tr, sizeof (struct tempreserve));
858 	}
859 
860 	kmem_free(tr_list, sizeof (list_t));
861 }
862 
863 /*
864  * Call in open context when we think we're going to write/free space,
865  * eg. when dirtying data.  Be conservative (ie. OK to write less than
866  * this or free more than this, but don't write more or free less).
867  */
868 void
869 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
870 {
871 	int64_t parent_space;
872 	uint64_t est_used;
873 
874 	mutex_enter(&dd->dd_lock);
875 	if (space > 0)
876 		dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
877 
878 	est_used = dsl_dir_space_towrite(dd) + dd->dd_used_bytes;
879 	parent_space = parent_delta(dd, est_used, space);
880 	mutex_exit(&dd->dd_lock);
881 
882 	/* Make sure that we clean up dd_space_to* */
883 	dsl_dir_dirty(dd, tx);
884 
885 	/* XXX this is potentially expensive and unnecessary... */
886 	if (parent_space && dd->dd_parent)
887 		dsl_dir_willuse_space(dd->dd_parent, parent_space, tx);
888 }
889 
890 /* call from syncing context when we actually write/free space for this dd */
891 void
892 dsl_dir_diduse_space(dsl_dir_t *dd,
893     int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
894 {
895 	int64_t accounted_delta;
896 
897 	ASSERT(dmu_tx_is_syncing(tx));
898 
899 	dsl_dir_dirty(dd, tx);
900 
901 	mutex_enter(&dd->dd_lock);
902 	accounted_delta = parent_delta(dd, dd->dd_used_bytes, used);
903 	ASSERT(used >= 0 || dd->dd_used_bytes >= -used);
904 	ASSERT(compressed >= 0 ||
905 	    dd->dd_phys->dd_compressed_bytes >= -compressed);
906 	ASSERT(uncompressed >= 0 ||
907 	    dd->dd_phys->dd_uncompressed_bytes >= -uncompressed);
908 	dd->dd_used_bytes += used;
909 	dd->dd_phys->dd_uncompressed_bytes += uncompressed;
910 	dd->dd_phys->dd_compressed_bytes += compressed;
911 	mutex_exit(&dd->dd_lock);
912 
913 	if (dd->dd_parent != NULL) {
914 		dsl_dir_diduse_space(dd->dd_parent,
915 		    accounted_delta, compressed, uncompressed, tx);
916 	}
917 }
918 
919 static int
920 dsl_dir_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
921 {
922 	dsl_dir_t *dd = arg1;
923 	uint64_t *quotap = arg2;
924 	uint64_t new_quota = *quotap;
925 	int err = 0;
926 	uint64_t towrite;
927 
928 	if (new_quota == 0)
929 		return (0);
930 
931 	mutex_enter(&dd->dd_lock);
932 	/*
933 	 * If we are doing the preliminary check in open context, and
934 	 * there are pending changes, then don't fail it, since the
935 	 * pending changes could under-estimate the amount of space to be
936 	 * freed up.
937 	 */
938 	towrite = dsl_dir_space_towrite(dd);
939 	if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
940 	    (new_quota < dd->dd_phys->dd_reserved ||
941 	    new_quota < dd->dd_used_bytes + towrite)) {
942 		err = ENOSPC;
943 	}
944 	mutex_exit(&dd->dd_lock);
945 	return (err);
946 }
947 
948 /* ARGSUSED */
949 static void
950 dsl_dir_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
951 {
952 	dsl_dir_t *dd = arg1;
953 	uint64_t *quotap = arg2;
954 	uint64_t new_quota = *quotap;
955 
956 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
957 
958 	mutex_enter(&dd->dd_lock);
959 	dd->dd_phys->dd_quota = new_quota;
960 	mutex_exit(&dd->dd_lock);
961 
962 	spa_history_internal_log(LOG_DS_QUOTA, dd->dd_pool->dp_spa,
963 	    tx, cr, "%lld dataset = %llu ",
964 	    (longlong_t)new_quota, dd->dd_phys->dd_head_dataset_obj);
965 }
966 
967 int
968 dsl_dir_set_quota(const char *ddname, uint64_t quota)
969 {
970 	dsl_dir_t *dd;
971 	int err;
972 
973 	err = dsl_dir_open(ddname, FTAG, &dd, NULL);
974 	if (err)
975 		return (err);
976 	/*
977 	 * If someone removes a file, then tries to set the quota, we
978 	 * want to make sure the file freeing takes effect.
979 	 */
980 	txg_wait_open(dd->dd_pool, 0);
981 
982 	err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_quota_check,
983 	    dsl_dir_set_quota_sync, dd, &quota, 0);
984 	dsl_dir_close(dd, FTAG);
985 	return (err);
986 }
987 
988 int
989 dsl_dir_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
990 {
991 	dsl_dir_t *dd = arg1;
992 	uint64_t *reservationp = arg2;
993 	uint64_t new_reservation = *reservationp;
994 	uint64_t used, avail;
995 	int64_t delta;
996 
997 	if (new_reservation > INT64_MAX)
998 		return (EOVERFLOW);
999 
1000 	/*
1001 	 * If we are doing the preliminary check in open context, the
1002 	 * space estimates may be inaccurate.
1003 	 */
1004 	if (!dmu_tx_is_syncing(tx))
1005 		return (0);
1006 
1007 	mutex_enter(&dd->dd_lock);
1008 	used = dd->dd_used_bytes;
1009 	delta = MAX(used, new_reservation) -
1010 	    MAX(used, dd->dd_phys->dd_reserved);
1011 	mutex_exit(&dd->dd_lock);
1012 
1013 	if (dd->dd_parent) {
1014 		avail = dsl_dir_space_available(dd->dd_parent,
1015 		    NULL, 0, FALSE);
1016 	} else {
1017 		avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used;
1018 	}
1019 
1020 	if (delta > 0 && delta > avail)
1021 		return (ENOSPC);
1022 	if (delta > 0 && dd->dd_phys->dd_quota > 0 &&
1023 	    new_reservation > dd->dd_phys->dd_quota)
1024 		return (ENOSPC);
1025 	return (0);
1026 }
1027 
1028 /* ARGSUSED */
1029 static void
1030 dsl_dir_set_reservation_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1031 {
1032 	dsl_dir_t *dd = arg1;
1033 	uint64_t *reservationp = arg2;
1034 	uint64_t new_reservation = *reservationp;
1035 	uint64_t used;
1036 	int64_t delta;
1037 
1038 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1039 
1040 	mutex_enter(&dd->dd_lock);
1041 	used = dd->dd_used_bytes;
1042 	delta = MAX(used, new_reservation) -
1043 	    MAX(used, dd->dd_phys->dd_reserved);
1044 	dd->dd_phys->dd_reserved = new_reservation;
1045 	mutex_exit(&dd->dd_lock);
1046 
1047 	if (dd->dd_parent != NULL) {
1048 		/* Roll up this additional usage into our ancestors */
1049 		dsl_dir_diduse_space(dd->dd_parent, delta, 0, 0, tx);
1050 	}
1051 
1052 	spa_history_internal_log(LOG_DS_RESERVATION, dd->dd_pool->dp_spa,
1053 	    tx, cr, "%lld dataset = %llu",
1054 	    (longlong_t)new_reservation, dd->dd_phys->dd_head_dataset_obj);
1055 }
1056 
1057 int
1058 dsl_dir_set_reservation(const char *ddname, uint64_t reservation)
1059 {
1060 	dsl_dir_t *dd;
1061 	int err;
1062 
1063 	err = dsl_dir_open(ddname, FTAG, &dd, NULL);
1064 	if (err)
1065 		return (err);
1066 	err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_reservation_check,
1067 	    dsl_dir_set_reservation_sync, dd, &reservation, 0);
1068 	dsl_dir_close(dd, FTAG);
1069 	return (err);
1070 }
1071 
1072 static dsl_dir_t *
1073 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1074 {
1075 	for (; ds1; ds1 = ds1->dd_parent) {
1076 		dsl_dir_t *dd;
1077 		for (dd = ds2; dd; dd = dd->dd_parent) {
1078 			if (ds1 == dd)
1079 				return (dd);
1080 		}
1081 	}
1082 	return (NULL);
1083 }
1084 
1085 /*
1086  * If delta is applied to dd, how much of that delta would be applied to
1087  * ancestor?  Syncing context only.
1088  */
1089 static int64_t
1090 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1091 {
1092 	if (dd == ancestor)
1093 		return (delta);
1094 
1095 	mutex_enter(&dd->dd_lock);
1096 	delta = parent_delta(dd, dd->dd_used_bytes, delta);
1097 	mutex_exit(&dd->dd_lock);
1098 	return (would_change(dd->dd_parent, delta, ancestor));
1099 }
1100 
1101 struct renamearg {
1102 	dsl_dir_t *newparent;
1103 	const char *mynewname;
1104 };
1105 
1106 /*ARGSUSED*/
1107 static int
1108 dsl_dir_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
1109 {
1110 	dsl_dir_t *dd = arg1;
1111 	struct renamearg *ra = arg2;
1112 	dsl_pool_t *dp = dd->dd_pool;
1113 	objset_t *mos = dp->dp_meta_objset;
1114 	int err;
1115 	uint64_t val;
1116 
1117 	/* There should be 2 references: the open and the dirty */
1118 	if (dmu_buf_refcount(dd->dd_dbuf) > 2)
1119 		return (EBUSY);
1120 
1121 	/* check for existing name */
1122 	err = zap_lookup(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1123 	    ra->mynewname, 8, 1, &val);
1124 	if (err == 0)
1125 		return (EEXIST);
1126 	if (err != ENOENT)
1127 		return (err);
1128 
1129 	if (ra->newparent != dd->dd_parent) {
1130 		/* is there enough space? */
1131 		uint64_t myspace =
1132 		    MAX(dd->dd_used_bytes, dd->dd_phys->dd_reserved);
1133 
1134 		/* no rename into our descendant */
1135 		if (closest_common_ancestor(dd, ra->newparent) == dd)
1136 			return (EINVAL);
1137 
1138 		if (err = dsl_dir_transfer_possible(dd->dd_parent,
1139 		    ra->newparent, myspace))
1140 			return (err);
1141 	}
1142 
1143 	return (0);
1144 }
1145 
1146 static void
1147 dsl_dir_rename_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1148 {
1149 	dsl_dir_t *dd = arg1;
1150 	struct renamearg *ra = arg2;
1151 	dsl_pool_t *dp = dd->dd_pool;
1152 	objset_t *mos = dp->dp_meta_objset;
1153 	int err;
1154 
1155 	ASSERT(dmu_buf_refcount(dd->dd_dbuf) <= 2);
1156 
1157 	if (ra->newparent != dd->dd_parent) {
1158 		uint64_t myspace =
1159 		    MAX(dd->dd_used_bytes, dd->dd_phys->dd_reserved);
1160 
1161 		dsl_dir_diduse_space(dd->dd_parent, -myspace,
1162 		    -dd->dd_phys->dd_compressed_bytes,
1163 		    -dd->dd_phys->dd_uncompressed_bytes, tx);
1164 		dsl_dir_diduse_space(ra->newparent, myspace,
1165 		    dd->dd_phys->dd_compressed_bytes,
1166 		    dd->dd_phys->dd_uncompressed_bytes, tx);
1167 	}
1168 
1169 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1170 
1171 	/* remove from old parent zapobj */
1172 	err = zap_remove(mos, dd->dd_parent->dd_phys->dd_child_dir_zapobj,
1173 	    dd->dd_myname, tx);
1174 	ASSERT3U(err, ==, 0);
1175 
1176 	(void) strcpy(dd->dd_myname, ra->mynewname);
1177 	dsl_dir_close(dd->dd_parent, dd);
1178 	dd->dd_phys->dd_parent_obj = ra->newparent->dd_object;
1179 	VERIFY(0 == dsl_dir_open_obj(dd->dd_pool,
1180 	    ra->newparent->dd_object, NULL, dd, &dd->dd_parent));
1181 
1182 	/* add to new parent zapobj */
1183 	err = zap_add(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1184 	    dd->dd_myname, 8, 1, &dd->dd_object, tx);
1185 	ASSERT3U(err, ==, 0);
1186 
1187 	spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa,
1188 	    tx, cr, "dataset = %llu", dd->dd_phys->dd_head_dataset_obj);
1189 }
1190 
1191 int
1192 dsl_dir_rename(dsl_dir_t *dd, const char *newname)
1193 {
1194 	struct renamearg ra;
1195 	int err;
1196 
1197 	/* new parent should exist */
1198 	err = dsl_dir_open(newname, FTAG, &ra.newparent, &ra.mynewname);
1199 	if (err)
1200 		return (err);
1201 
1202 	/* can't rename to different pool */
1203 	if (dd->dd_pool != ra.newparent->dd_pool) {
1204 		err = ENXIO;
1205 		goto out;
1206 	}
1207 
1208 	/* new name should not already exist */
1209 	if (ra.mynewname == NULL) {
1210 		err = EEXIST;
1211 		goto out;
1212 	}
1213 
1214 	err = dsl_sync_task_do(dd->dd_pool,
1215 	    dsl_dir_rename_check, dsl_dir_rename_sync, dd, &ra, 3);
1216 
1217 out:
1218 	dsl_dir_close(ra.newparent, FTAG);
1219 	return (err);
1220 }
1221 
1222 int
1223 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t space)
1224 {
1225 	dsl_dir_t *ancestor;
1226 	int64_t adelta;
1227 	uint64_t avail;
1228 
1229 	ancestor = closest_common_ancestor(sdd, tdd);
1230 	adelta = would_change(sdd, -space, ancestor);
1231 	avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
1232 	if (avail < space)
1233 		return (ENOSPC);
1234 
1235 	return (0);
1236 }
1237