xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu_objset.c (revision 5aaeed5c617553c4cec6328c1f4c19079a5a495a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /* Portions Copyright 2010 Robert Milkowski */
26 
27 #include <sys/cred.h>
28 #include <sys/zfs_context.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_prop.h>
33 #include <sys/dsl_pool.h>
34 #include <sys/dsl_synctask.h>
35 #include <sys/dsl_deleg.h>
36 #include <sys/dnode.h>
37 #include <sys/dbuf.h>
38 #include <sys/zvol.h>
39 #include <sys/dmu_tx.h>
40 #include <sys/zap.h>
41 #include <sys/zil.h>
42 #include <sys/dmu_impl.h>
43 #include <sys/zfs_ioctl.h>
44 #include <sys/sa.h>
45 
46 /*
47  * Needed to close a window in dnode_move() that allows the objset to be freed
48  * before it can be safely accessed.
49  */
50 krwlock_t os_lock;
51 
52 void
53 dmu_objset_init(void)
54 {
55 	rw_init(&os_lock, NULL, RW_DEFAULT, NULL);
56 }
57 
58 void
59 dmu_objset_fini(void)
60 {
61 	rw_destroy(&os_lock);
62 }
63 
64 spa_t *
65 dmu_objset_spa(objset_t *os)
66 {
67 	return (os->os_spa);
68 }
69 
70 zilog_t *
71 dmu_objset_zil(objset_t *os)
72 {
73 	return (os->os_zil);
74 }
75 
76 dsl_pool_t *
77 dmu_objset_pool(objset_t *os)
78 {
79 	dsl_dataset_t *ds;
80 
81 	if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir)
82 		return (ds->ds_dir->dd_pool);
83 	else
84 		return (spa_get_dsl(os->os_spa));
85 }
86 
87 dsl_dataset_t *
88 dmu_objset_ds(objset_t *os)
89 {
90 	return (os->os_dsl_dataset);
91 }
92 
93 dmu_objset_type_t
94 dmu_objset_type(objset_t *os)
95 {
96 	return (os->os_phys->os_type);
97 }
98 
99 void
100 dmu_objset_name(objset_t *os, char *buf)
101 {
102 	dsl_dataset_name(os->os_dsl_dataset, buf);
103 }
104 
105 uint64_t
106 dmu_objset_id(objset_t *os)
107 {
108 	dsl_dataset_t *ds = os->os_dsl_dataset;
109 
110 	return (ds ? ds->ds_object : 0);
111 }
112 
113 uint64_t
114 dmu_objset_syncprop(objset_t *os)
115 {
116 	return (os->os_sync);
117 }
118 
119 uint64_t
120 dmu_objset_logbias(objset_t *os)
121 {
122 	return (os->os_logbias);
123 }
124 
125 static void
126 checksum_changed_cb(void *arg, uint64_t newval)
127 {
128 	objset_t *os = arg;
129 
130 	/*
131 	 * Inheritance should have been done by now.
132 	 */
133 	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
134 
135 	os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
136 }
137 
138 static void
139 compression_changed_cb(void *arg, uint64_t newval)
140 {
141 	objset_t *os = arg;
142 
143 	/*
144 	 * Inheritance and range checking should have been done by now.
145 	 */
146 	ASSERT(newval != ZIO_COMPRESS_INHERIT);
147 
148 	os->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE);
149 }
150 
151 static void
152 copies_changed_cb(void *arg, uint64_t newval)
153 {
154 	objset_t *os = arg;
155 
156 	/*
157 	 * Inheritance and range checking should have been done by now.
158 	 */
159 	ASSERT(newval > 0);
160 	ASSERT(newval <= spa_max_replication(os->os_spa));
161 
162 	os->os_copies = newval;
163 }
164 
165 static void
166 dedup_changed_cb(void *arg, uint64_t newval)
167 {
168 	objset_t *os = arg;
169 	spa_t *spa = os->os_spa;
170 	enum zio_checksum checksum;
171 
172 	/*
173 	 * Inheritance should have been done by now.
174 	 */
175 	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
176 
177 	checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF);
178 
179 	os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK;
180 	os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY);
181 }
182 
183 static void
184 primary_cache_changed_cb(void *arg, uint64_t newval)
185 {
186 	objset_t *os = arg;
187 
188 	/*
189 	 * Inheritance and range checking should have been done by now.
190 	 */
191 	ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
192 	    newval == ZFS_CACHE_METADATA);
193 
194 	os->os_primary_cache = newval;
195 }
196 
197 static void
198 secondary_cache_changed_cb(void *arg, uint64_t newval)
199 {
200 	objset_t *os = arg;
201 
202 	/*
203 	 * Inheritance and range checking should have been done by now.
204 	 */
205 	ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
206 	    newval == ZFS_CACHE_METADATA);
207 
208 	os->os_secondary_cache = newval;
209 }
210 
211 static void
212 sync_changed_cb(void *arg, uint64_t newval)
213 {
214 	objset_t *os = arg;
215 
216 	/*
217 	 * Inheritance and range checking should have been done by now.
218 	 */
219 	ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS ||
220 	    newval == ZFS_SYNC_DISABLED);
221 
222 	os->os_sync = newval;
223 	if (os->os_zil)
224 		zil_set_sync(os->os_zil, newval);
225 }
226 
227 static void
228 logbias_changed_cb(void *arg, uint64_t newval)
229 {
230 	objset_t *os = arg;
231 
232 	ASSERT(newval == ZFS_LOGBIAS_LATENCY ||
233 	    newval == ZFS_LOGBIAS_THROUGHPUT);
234 	os->os_logbias = newval;
235 	if (os->os_zil)
236 		zil_set_logbias(os->os_zil, newval);
237 }
238 
239 void
240 dmu_objset_byteswap(void *buf, size_t size)
241 {
242 	objset_phys_t *osp = buf;
243 
244 	ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t));
245 	dnode_byteswap(&osp->os_meta_dnode);
246 	byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
247 	osp->os_type = BSWAP_64(osp->os_type);
248 	osp->os_flags = BSWAP_64(osp->os_flags);
249 	if (size == sizeof (objset_phys_t)) {
250 		dnode_byteswap(&osp->os_userused_dnode);
251 		dnode_byteswap(&osp->os_groupused_dnode);
252 	}
253 }
254 
255 int
256 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
257     objset_t **osp)
258 {
259 	objset_t *os;
260 	int i, err;
261 
262 	ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
263 
264 	os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
265 	os->os_dsl_dataset = ds;
266 	os->os_spa = spa;
267 	os->os_rootbp = bp;
268 	if (!BP_IS_HOLE(os->os_rootbp)) {
269 		uint32_t aflags = ARC_WAIT;
270 		zbookmark_t zb;
271 		SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
272 		    ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
273 
274 		if (DMU_OS_IS_L2CACHEABLE(os))
275 			aflags |= ARC_L2CACHE;
276 
277 		dprintf_bp(os->os_rootbp, "reading %s", "");
278 		/*
279 		 * XXX when bprewrite scrub can change the bp,
280 		 * and this is called from dmu_objset_open_ds_os, the bp
281 		 * could change, and we'll need a lock.
282 		 */
283 		err = dsl_read_nolock(NULL, spa, os->os_rootbp,
284 		    arc_getbuf_func, &os->os_phys_buf,
285 		    ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
286 		if (err) {
287 			kmem_free(os, sizeof (objset_t));
288 			/* convert checksum errors into IO errors */
289 			if (err == ECKSUM)
290 				err = EIO;
291 			return (err);
292 		}
293 
294 		/* Increase the blocksize if we are permitted. */
295 		if (spa_version(spa) >= SPA_VERSION_USERSPACE &&
296 		    arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) {
297 			arc_buf_t *buf = arc_buf_alloc(spa,
298 			    sizeof (objset_phys_t), &os->os_phys_buf,
299 			    ARC_BUFC_METADATA);
300 			bzero(buf->b_data, sizeof (objset_phys_t));
301 			bcopy(os->os_phys_buf->b_data, buf->b_data,
302 			    arc_buf_size(os->os_phys_buf));
303 			(void) arc_buf_remove_ref(os->os_phys_buf,
304 			    &os->os_phys_buf);
305 			os->os_phys_buf = buf;
306 		}
307 
308 		os->os_phys = os->os_phys_buf->b_data;
309 		os->os_flags = os->os_phys->os_flags;
310 	} else {
311 		int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
312 		    sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
313 		os->os_phys_buf = arc_buf_alloc(spa, size,
314 		    &os->os_phys_buf, ARC_BUFC_METADATA);
315 		os->os_phys = os->os_phys_buf->b_data;
316 		bzero(os->os_phys, size);
317 	}
318 
319 	/*
320 	 * Note: the changed_cb will be called once before the register
321 	 * func returns, thus changing the checksum/compression from the
322 	 * default (fletcher2/off).  Snapshots don't need to know about
323 	 * checksum/compression/copies.
324 	 */
325 	if (ds) {
326 		err = dsl_prop_register(ds, "primarycache",
327 		    primary_cache_changed_cb, os);
328 		if (err == 0)
329 			err = dsl_prop_register(ds, "secondarycache",
330 			    secondary_cache_changed_cb, os);
331 		if (!dsl_dataset_is_snapshot(ds)) {
332 			if (err == 0)
333 				err = dsl_prop_register(ds, "checksum",
334 				    checksum_changed_cb, os);
335 			if (err == 0)
336 				err = dsl_prop_register(ds, "compression",
337 				    compression_changed_cb, os);
338 			if (err == 0)
339 				err = dsl_prop_register(ds, "copies",
340 				    copies_changed_cb, os);
341 			if (err == 0)
342 				err = dsl_prop_register(ds, "dedup",
343 				    dedup_changed_cb, os);
344 			if (err == 0)
345 				err = dsl_prop_register(ds, "logbias",
346 				    logbias_changed_cb, os);
347 			if (err == 0)
348 				err = dsl_prop_register(ds, "sync",
349 				    sync_changed_cb, os);
350 		}
351 		if (err) {
352 			VERIFY(arc_buf_remove_ref(os->os_phys_buf,
353 			    &os->os_phys_buf) == 1);
354 			kmem_free(os, sizeof (objset_t));
355 			return (err);
356 		}
357 	} else if (ds == NULL) {
358 		/* It's the meta-objset. */
359 		os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
360 		os->os_compress = ZIO_COMPRESS_LZJB;
361 		os->os_copies = spa_max_replication(spa);
362 		os->os_dedup_checksum = ZIO_CHECKSUM_OFF;
363 		os->os_dedup_verify = 0;
364 		os->os_logbias = 0;
365 		os->os_sync = 0;
366 		os->os_primary_cache = ZFS_CACHE_ALL;
367 		os->os_secondary_cache = ZFS_CACHE_ALL;
368 	}
369 
370 	os->os_zil_header = os->os_phys->os_zil_header;
371 	os->os_zil = zil_alloc(os, &os->os_zil_header);
372 
373 	for (i = 0; i < TXG_SIZE; i++) {
374 		list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t),
375 		    offsetof(dnode_t, dn_dirty_link[i]));
376 		list_create(&os->os_free_dnodes[i], sizeof (dnode_t),
377 		    offsetof(dnode_t, dn_dirty_link[i]));
378 	}
379 	list_create(&os->os_dnodes, sizeof (dnode_t),
380 	    offsetof(dnode_t, dn_link));
381 	list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
382 	    offsetof(dmu_buf_impl_t, db_link));
383 
384 	mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
385 	mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
386 	mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
387 
388 	DMU_META_DNODE(os) = dnode_special_open(os,
389 	    &os->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT,
390 	    &os->os_meta_dnode);
391 	if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) {
392 		DMU_USERUSED_DNODE(os) = dnode_special_open(os,
393 		    &os->os_phys->os_userused_dnode, DMU_USERUSED_OBJECT,
394 		    &os->os_userused_dnode);
395 		DMU_GROUPUSED_DNODE(os) = dnode_special_open(os,
396 		    &os->os_phys->os_groupused_dnode, DMU_GROUPUSED_OBJECT,
397 		    &os->os_groupused_dnode);
398 	}
399 
400 	/*
401 	 * We should be the only thread trying to do this because we
402 	 * have ds_opening_lock
403 	 */
404 	if (ds) {
405 		mutex_enter(&ds->ds_lock);
406 		ASSERT(ds->ds_objset == NULL);
407 		ds->ds_objset = os;
408 		mutex_exit(&ds->ds_lock);
409 	}
410 
411 	*osp = os;
412 	return (0);
413 }
414 
415 int
416 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
417 {
418 	int err = 0;
419 
420 	mutex_enter(&ds->ds_opening_lock);
421 	*osp = ds->ds_objset;
422 	if (*osp == NULL) {
423 		err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
424 		    ds, &ds->ds_phys->ds_bp, osp);
425 	}
426 	mutex_exit(&ds->ds_opening_lock);
427 	return (err);
428 }
429 
430 /* called from zpl */
431 int
432 dmu_objset_hold(const char *name, void *tag, objset_t **osp)
433 {
434 	dsl_dataset_t *ds;
435 	int err;
436 
437 	err = dsl_dataset_hold(name, tag, &ds);
438 	if (err)
439 		return (err);
440 
441 	err = dmu_objset_from_ds(ds, osp);
442 	if (err)
443 		dsl_dataset_rele(ds, tag);
444 
445 	return (err);
446 }
447 
448 /* called from zpl */
449 int
450 dmu_objset_own(const char *name, dmu_objset_type_t type,
451     boolean_t readonly, void *tag, objset_t **osp)
452 {
453 	dsl_dataset_t *ds;
454 	int err;
455 
456 	err = dsl_dataset_own(name, B_FALSE, tag, &ds);
457 	if (err)
458 		return (err);
459 
460 	err = dmu_objset_from_ds(ds, osp);
461 	if (err) {
462 		dsl_dataset_disown(ds, tag);
463 	} else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
464 		dmu_objset_disown(*osp, tag);
465 		return (EINVAL);
466 	} else if (!readonly && dsl_dataset_is_snapshot(ds)) {
467 		dmu_objset_disown(*osp, tag);
468 		return (EROFS);
469 	}
470 	return (err);
471 }
472 
473 void
474 dmu_objset_rele(objset_t *os, void *tag)
475 {
476 	dsl_dataset_rele(os->os_dsl_dataset, tag);
477 }
478 
479 void
480 dmu_objset_disown(objset_t *os, void *tag)
481 {
482 	dsl_dataset_disown(os->os_dsl_dataset, tag);
483 }
484 
485 int
486 dmu_objset_evict_dbufs(objset_t *os)
487 {
488 	dnode_t *dn;
489 
490 	mutex_enter(&os->os_lock);
491 
492 	/* process the mdn last, since the other dnodes have holds on it */
493 	list_remove(&os->os_dnodes, DMU_META_DNODE(os));
494 	list_insert_tail(&os->os_dnodes, DMU_META_DNODE(os));
495 
496 	/*
497 	 * Find the first dnode with holds.  We have to do this dance
498 	 * because dnode_add_ref() only works if you already have a
499 	 * hold.  If there are no holds then it has no dbufs so OK to
500 	 * skip.
501 	 */
502 	for (dn = list_head(&os->os_dnodes);
503 	    dn && !dnode_add_ref(dn, FTAG);
504 	    dn = list_next(&os->os_dnodes, dn))
505 		continue;
506 
507 	while (dn) {
508 		dnode_t *next_dn = dn;
509 
510 		do {
511 			next_dn = list_next(&os->os_dnodes, next_dn);
512 		} while (next_dn && !dnode_add_ref(next_dn, FTAG));
513 
514 		mutex_exit(&os->os_lock);
515 		dnode_evict_dbufs(dn);
516 		dnode_rele(dn, FTAG);
517 		mutex_enter(&os->os_lock);
518 		dn = next_dn;
519 	}
520 	dn = list_head(&os->os_dnodes);
521 	mutex_exit(&os->os_lock);
522 	return (dn != DMU_META_DNODE(os));
523 }
524 
525 void
526 dmu_objset_evict(objset_t *os)
527 {
528 	dsl_dataset_t *ds = os->os_dsl_dataset;
529 
530 	for (int t = 0; t < TXG_SIZE; t++)
531 		ASSERT(!dmu_objset_is_dirty(os, t));
532 
533 	if (ds) {
534 		if (!dsl_dataset_is_snapshot(ds)) {
535 			VERIFY(0 == dsl_prop_unregister(ds, "checksum",
536 			    checksum_changed_cb, os));
537 			VERIFY(0 == dsl_prop_unregister(ds, "compression",
538 			    compression_changed_cb, os));
539 			VERIFY(0 == dsl_prop_unregister(ds, "copies",
540 			    copies_changed_cb, os));
541 			VERIFY(0 == dsl_prop_unregister(ds, "dedup",
542 			    dedup_changed_cb, os));
543 			VERIFY(0 == dsl_prop_unregister(ds, "logbias",
544 			    logbias_changed_cb, os));
545 			VERIFY(0 == dsl_prop_unregister(ds, "sync",
546 			    sync_changed_cb, os));
547 		}
548 		VERIFY(0 == dsl_prop_unregister(ds, "primarycache",
549 		    primary_cache_changed_cb, os));
550 		VERIFY(0 == dsl_prop_unregister(ds, "secondarycache",
551 		    secondary_cache_changed_cb, os));
552 	}
553 
554 	if (os->os_sa)
555 		sa_tear_down(os);
556 
557 	/*
558 	 * We should need only a single pass over the dnode list, since
559 	 * nothing can be added to the list at this point.
560 	 */
561 	(void) dmu_objset_evict_dbufs(os);
562 
563 	dnode_special_close(&os->os_meta_dnode);
564 	if (DMU_USERUSED_DNODE(os)) {
565 		dnode_special_close(&os->os_userused_dnode);
566 		dnode_special_close(&os->os_groupused_dnode);
567 	}
568 	zil_free(os->os_zil);
569 
570 	ASSERT3P(list_head(&os->os_dnodes), ==, NULL);
571 
572 	VERIFY(arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf) == 1);
573 
574 	/*
575 	 * This is a barrier to prevent the objset from going away in
576 	 * dnode_move() until we can safely ensure that the objset is still in
577 	 * use. We consider the objset valid before the barrier and invalid
578 	 * after the barrier.
579 	 */
580 	rw_enter(&os_lock, RW_READER);
581 	rw_exit(&os_lock);
582 
583 	mutex_destroy(&os->os_lock);
584 	mutex_destroy(&os->os_obj_lock);
585 	mutex_destroy(&os->os_user_ptr_lock);
586 	kmem_free(os, sizeof (objset_t));
587 }
588 
589 timestruc_t
590 dmu_objset_snap_cmtime(objset_t *os)
591 {
592 	return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir));
593 }
594 
595 /* called from dsl for meta-objset */
596 objset_t *
597 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
598     dmu_objset_type_t type, dmu_tx_t *tx)
599 {
600 	objset_t *os;
601 	dnode_t *mdn;
602 
603 	ASSERT(dmu_tx_is_syncing(tx));
604 	if (ds)
605 		mutex_enter(&ds->ds_opening_lock);
606 	VERIFY(0 == dmu_objset_open_impl(spa, ds, bp, &os));
607 	if (ds)
608 		mutex_exit(&ds->ds_opening_lock);
609 	mdn = DMU_META_DNODE(os);
610 
611 	dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
612 	    DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
613 
614 	/*
615 	 * We don't want to have to increase the meta-dnode's nlevels
616 	 * later, because then we could do it in quescing context while
617 	 * we are also accessing it in open context.
618 	 *
619 	 * This precaution is not necessary for the MOS (ds == NULL),
620 	 * because the MOS is only updated in syncing context.
621 	 * This is most fortunate: the MOS is the only objset that
622 	 * needs to be synced multiple times as spa_sync() iterates
623 	 * to convergence, so minimizing its dn_nlevels matters.
624 	 */
625 	if (ds != NULL) {
626 		int levels = 1;
627 
628 		/*
629 		 * Determine the number of levels necessary for the meta-dnode
630 		 * to contain DN_MAX_OBJECT dnodes.
631 		 */
632 		while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift +
633 		    (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
634 		    DN_MAX_OBJECT * sizeof (dnode_phys_t))
635 			levels++;
636 
637 		mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
638 		    mdn->dn_nlevels = levels;
639 	}
640 
641 	ASSERT(type != DMU_OST_NONE);
642 	ASSERT(type != DMU_OST_ANY);
643 	ASSERT(type < DMU_OST_NUMTYPES);
644 	os->os_phys->os_type = type;
645 	if (dmu_objset_userused_enabled(os)) {
646 		os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
647 		os->os_flags = os->os_phys->os_flags;
648 	}
649 
650 	dsl_dataset_dirty(ds, tx);
651 
652 	return (os);
653 }
654 
655 struct oscarg {
656 	void (*userfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx);
657 	void *userarg;
658 	dsl_dataset_t *clone_origin;
659 	const char *lastname;
660 	dmu_objset_type_t type;
661 	uint64_t flags;
662 	cred_t *cr;
663 };
664 
665 /*ARGSUSED*/
666 static int
667 dmu_objset_create_check(void *arg1, void *arg2, dmu_tx_t *tx)
668 {
669 	dsl_dir_t *dd = arg1;
670 	struct oscarg *oa = arg2;
671 	objset_t *mos = dd->dd_pool->dp_meta_objset;
672 	int err;
673 	uint64_t ddobj;
674 
675 	err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
676 	    oa->lastname, sizeof (uint64_t), 1, &ddobj);
677 	if (err != ENOENT)
678 		return (err ? err : EEXIST);
679 
680 	if (oa->clone_origin != NULL) {
681 		/* You can't clone across pools. */
682 		if (oa->clone_origin->ds_dir->dd_pool != dd->dd_pool)
683 			return (EXDEV);
684 
685 		/* You can only clone snapshots, not the head datasets. */
686 		if (!dsl_dataset_is_snapshot(oa->clone_origin))
687 			return (EINVAL);
688 	}
689 
690 	return (0);
691 }
692 
693 static void
694 dmu_objset_create_sync(void *arg1, void *arg2, dmu_tx_t *tx)
695 {
696 	dsl_dir_t *dd = arg1;
697 	struct oscarg *oa = arg2;
698 	uint64_t dsobj;
699 
700 	ASSERT(dmu_tx_is_syncing(tx));
701 
702 	dsobj = dsl_dataset_create_sync(dd, oa->lastname,
703 	    oa->clone_origin, oa->flags, oa->cr, tx);
704 
705 	if (oa->clone_origin == NULL) {
706 		dsl_dataset_t *ds;
707 		blkptr_t *bp;
708 		objset_t *os;
709 
710 		VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, dsobj,
711 		    FTAG, &ds));
712 		bp = dsl_dataset_get_blkptr(ds);
713 		ASSERT(BP_IS_HOLE(bp));
714 
715 		os = dmu_objset_create_impl(dsl_dataset_get_spa(ds),
716 		    ds, bp, oa->type, tx);
717 
718 		if (oa->userfunc)
719 			oa->userfunc(os, oa->userarg, oa->cr, tx);
720 		dsl_dataset_rele(ds, FTAG);
721 	}
722 
723 	spa_history_log_internal(LOG_DS_CREATE, dd->dd_pool->dp_spa,
724 	    tx, "dataset = %llu", dsobj);
725 }
726 
727 int
728 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
729     void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg)
730 {
731 	dsl_dir_t *pdd;
732 	const char *tail;
733 	int err = 0;
734 	struct oscarg oa = { 0 };
735 
736 	ASSERT(strchr(name, '@') == NULL);
737 	err = dsl_dir_open(name, FTAG, &pdd, &tail);
738 	if (err)
739 		return (err);
740 	if (tail == NULL) {
741 		dsl_dir_close(pdd, FTAG);
742 		return (EEXIST);
743 	}
744 
745 	oa.userfunc = func;
746 	oa.userarg = arg;
747 	oa.lastname = tail;
748 	oa.type = type;
749 	oa.flags = flags;
750 	oa.cr = CRED();
751 
752 	err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check,
753 	    dmu_objset_create_sync, pdd, &oa, 5);
754 	dsl_dir_close(pdd, FTAG);
755 	return (err);
756 }
757 
758 int
759 dmu_objset_clone(const char *name, dsl_dataset_t *clone_origin, uint64_t flags)
760 {
761 	dsl_dir_t *pdd;
762 	const char *tail;
763 	int err = 0;
764 	struct oscarg oa = { 0 };
765 
766 	ASSERT(strchr(name, '@') == NULL);
767 	err = dsl_dir_open(name, FTAG, &pdd, &tail);
768 	if (err)
769 		return (err);
770 	if (tail == NULL) {
771 		dsl_dir_close(pdd, FTAG);
772 		return (EEXIST);
773 	}
774 
775 	oa.lastname = tail;
776 	oa.clone_origin = clone_origin;
777 	oa.flags = flags;
778 	oa.cr = CRED();
779 
780 	err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check,
781 	    dmu_objset_create_sync, pdd, &oa, 5);
782 	dsl_dir_close(pdd, FTAG);
783 	return (err);
784 }
785 
786 int
787 dmu_objset_destroy(const char *name, boolean_t defer)
788 {
789 	dsl_dataset_t *ds;
790 	int error;
791 
792 	/*
793 	 * dsl_dataset_destroy() can free any claimed-but-unplayed
794 	 * intent log, but if there is an active log, it has blocks that
795 	 * are allocated, but may not yet be reflected in the on-disk
796 	 * structure.  Only the ZIL knows how to free them, so we have
797 	 * to call into it here.
798 	 */
799 	error = dsl_dataset_own(name, B_TRUE, FTAG, &ds);
800 	if (error == 0) {
801 		objset_t *os;
802 		if (dmu_objset_from_ds(ds, &os) == 0)
803 			zil_destroy(dmu_objset_zil(os), B_FALSE);
804 		error = dsl_dataset_destroy(ds, FTAG, defer);
805 		/* dsl_dataset_destroy() closes the ds. */
806 	}
807 
808 	return (error);
809 }
810 
811 struct snaparg {
812 	dsl_sync_task_group_t *dstg;
813 	char *snapname;
814 	char failed[MAXPATHLEN];
815 	boolean_t recursive;
816 	nvlist_t *props;
817 };
818 
819 static int
820 snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
821 {
822 	objset_t *os = arg1;
823 	struct snaparg *sn = arg2;
824 
825 	/* The props have already been checked by zfs_check_userprops(). */
826 
827 	return (dsl_dataset_snapshot_check(os->os_dsl_dataset,
828 	    sn->snapname, tx));
829 }
830 
831 static void
832 snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
833 {
834 	objset_t *os = arg1;
835 	dsl_dataset_t *ds = os->os_dsl_dataset;
836 	struct snaparg *sn = arg2;
837 
838 	dsl_dataset_snapshot_sync(ds, sn->snapname, tx);
839 
840 	if (sn->props) {
841 		dsl_props_arg_t pa;
842 		pa.pa_props = sn->props;
843 		pa.pa_source = ZPROP_SRC_LOCAL;
844 		dsl_props_set_sync(ds->ds_prev, &pa, tx);
845 	}
846 }
847 
848 static int
849 dmu_objset_snapshot_one(const char *name, void *arg)
850 {
851 	struct snaparg *sn = arg;
852 	objset_t *os;
853 	int err;
854 	char *cp;
855 
856 	/*
857 	 * If the objset starts with a '%', then ignore it unless it was
858 	 * explicitly named (ie, not recursive).  These hidden datasets
859 	 * are always inconsistent, and by not opening them here, we can
860 	 * avoid a race with dsl_dir_destroy_check().
861 	 */
862 	cp = strrchr(name, '/');
863 	if (cp && cp[1] == '%' && sn->recursive)
864 		return (0);
865 
866 	(void) strcpy(sn->failed, name);
867 
868 	/*
869 	 * Check permissions if we are doing a recursive snapshot.  The
870 	 * permission checks for the starting dataset have already been
871 	 * performed in zfs_secpolicy_snapshot()
872 	 */
873 	if (sn->recursive && (err = zfs_secpolicy_snapshot_perms(name, CRED())))
874 		return (err);
875 
876 	err = dmu_objset_hold(name, sn, &os);
877 	if (err != 0)
878 		return (err);
879 
880 	/*
881 	 * If the objset is in an inconsistent state (eg, in the process
882 	 * of being destroyed), don't snapshot it.  As with %hidden
883 	 * datasets, we return EBUSY if this name was explicitly
884 	 * requested (ie, not recursive), and otherwise ignore it.
885 	 */
886 	if (os->os_dsl_dataset->ds_phys->ds_flags & DS_FLAG_INCONSISTENT) {
887 		dmu_objset_rele(os, sn);
888 		return (sn->recursive ? 0 : EBUSY);
889 	}
890 
891 	/*
892 	 * NB: we need to wait for all in-flight changes to get to disk,
893 	 * so that we snapshot those changes.  zil_suspend does this as
894 	 * a side effect.
895 	 */
896 	err = zil_suspend(dmu_objset_zil(os));
897 	if (err == 0) {
898 		dsl_sync_task_create(sn->dstg, snapshot_check,
899 		    snapshot_sync, os, sn, 3);
900 	} else {
901 		dmu_objset_rele(os, sn);
902 	}
903 
904 	return (err);
905 }
906 
907 int
908 dmu_objset_snapshot(char *fsname, char *snapname,
909     nvlist_t *props, boolean_t recursive)
910 {
911 	dsl_sync_task_t *dst;
912 	struct snaparg sn;
913 	spa_t *spa;
914 	int err;
915 
916 	(void) strcpy(sn.failed, fsname);
917 
918 	err = spa_open(fsname, &spa, FTAG);
919 	if (err)
920 		return (err);
921 
922 	sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
923 	sn.snapname = snapname;
924 	sn.props = props;
925 	sn.recursive = recursive;
926 
927 	if (recursive) {
928 		err = dmu_objset_find(fsname,
929 		    dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN);
930 	} else {
931 		err = dmu_objset_snapshot_one(fsname, &sn);
932 	}
933 
934 	if (err == 0)
935 		err = dsl_sync_task_group_wait(sn.dstg);
936 
937 	for (dst = list_head(&sn.dstg->dstg_tasks); dst;
938 	    dst = list_next(&sn.dstg->dstg_tasks, dst)) {
939 		objset_t *os = dst->dst_arg1;
940 		dsl_dataset_t *ds = os->os_dsl_dataset;
941 		if (dst->dst_err)
942 			dsl_dataset_name(ds, sn.failed);
943 		zil_resume(dmu_objset_zil(os));
944 		dmu_objset_rele(os, &sn);
945 	}
946 
947 	if (err)
948 		(void) strcpy(fsname, sn.failed);
949 	dsl_sync_task_group_destroy(sn.dstg);
950 	spa_close(spa, FTAG);
951 	return (err);
952 }
953 
954 static void
955 dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx)
956 {
957 	dnode_t *dn;
958 
959 	while (dn = list_head(list)) {
960 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
961 		ASSERT(dn->dn_dbuf->db_data_pending);
962 		/*
963 		 * Initialize dn_zio outside dnode_sync() because the
964 		 * meta-dnode needs to set it ouside dnode_sync().
965 		 */
966 		dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
967 		ASSERT(dn->dn_zio);
968 
969 		ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
970 		list_remove(list, dn);
971 
972 		if (newlist) {
973 			(void) dnode_add_ref(dn, newlist);
974 			list_insert_tail(newlist, dn);
975 		}
976 
977 		dnode_sync(dn, tx);
978 	}
979 }
980 
981 /* ARGSUSED */
982 static void
983 dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
984 {
985 	blkptr_t *bp = zio->io_bp;
986 	objset_t *os = arg;
987 	dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
988 
989 	ASSERT(bp == os->os_rootbp);
990 	ASSERT(BP_GET_TYPE(bp) == DMU_OT_OBJSET);
991 	ASSERT(BP_GET_LEVEL(bp) == 0);
992 
993 	/*
994 	 * Update rootbp fill count: it should be the number of objects
995 	 * allocated in the object set (not counting the "special"
996 	 * objects that are stored in the objset_phys_t -- the meta
997 	 * dnode and user/group accounting objects).
998 	 */
999 	bp->blk_fill = 0;
1000 	for (int i = 0; i < dnp->dn_nblkptr; i++)
1001 		bp->blk_fill += dnp->dn_blkptr[i].blk_fill;
1002 }
1003 
1004 /* ARGSUSED */
1005 static void
1006 dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg)
1007 {
1008 	blkptr_t *bp = zio->io_bp;
1009 	blkptr_t *bp_orig = &zio->io_bp_orig;
1010 	objset_t *os = arg;
1011 
1012 	if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
1013 		ASSERT(BP_EQUAL(bp, bp_orig));
1014 	} else {
1015 		dsl_dataset_t *ds = os->os_dsl_dataset;
1016 		dmu_tx_t *tx = os->os_synctx;
1017 
1018 		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
1019 		dsl_dataset_block_born(ds, bp, tx);
1020 	}
1021 }
1022 
1023 /* called from dsl */
1024 void
1025 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
1026 {
1027 	int txgoff;
1028 	zbookmark_t zb;
1029 	zio_prop_t zp;
1030 	zio_t *zio;
1031 	list_t *list;
1032 	list_t *newlist = NULL;
1033 	dbuf_dirty_record_t *dr;
1034 
1035 	dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
1036 
1037 	ASSERT(dmu_tx_is_syncing(tx));
1038 	/* XXX the write_done callback should really give us the tx... */
1039 	os->os_synctx = tx;
1040 
1041 	if (os->os_dsl_dataset == NULL) {
1042 		/*
1043 		 * This is the MOS.  If we have upgraded,
1044 		 * spa_max_replication() could change, so reset
1045 		 * os_copies here.
1046 		 */
1047 		os->os_copies = spa_max_replication(os->os_spa);
1048 	}
1049 
1050 	/*
1051 	 * Create the root block IO
1052 	 */
1053 	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
1054 	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
1055 	    ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
1056 	VERIFY3U(0, ==, arc_release_bp(os->os_phys_buf, &os->os_phys_buf,
1057 	    os->os_rootbp, os->os_spa, &zb));
1058 
1059 	dmu_write_policy(os, NULL, 0, 0, &zp);
1060 
1061 	zio = arc_write(pio, os->os_spa, tx->tx_txg,
1062 	    os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), &zp,
1063 	    dmu_objset_write_ready, dmu_objset_write_done, os,
1064 	    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
1065 
1066 	/*
1067 	 * Sync special dnodes - the parent IO for the sync is the root block
1068 	 */
1069 	DMU_META_DNODE(os)->dn_zio = zio;
1070 	dnode_sync(DMU_META_DNODE(os), tx);
1071 
1072 	os->os_phys->os_flags = os->os_flags;
1073 
1074 	if (DMU_USERUSED_DNODE(os) &&
1075 	    DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
1076 		DMU_USERUSED_DNODE(os)->dn_zio = zio;
1077 		dnode_sync(DMU_USERUSED_DNODE(os), tx);
1078 		DMU_GROUPUSED_DNODE(os)->dn_zio = zio;
1079 		dnode_sync(DMU_GROUPUSED_DNODE(os), tx);
1080 	}
1081 
1082 	txgoff = tx->tx_txg & TXG_MASK;
1083 
1084 	if (dmu_objset_userused_enabled(os)) {
1085 		newlist = &os->os_synced_dnodes;
1086 		/*
1087 		 * We must create the list here because it uses the
1088 		 * dn_dirty_link[] of this txg.
1089 		 */
1090 		list_create(newlist, sizeof (dnode_t),
1091 		    offsetof(dnode_t, dn_dirty_link[txgoff]));
1092 	}
1093 
1094 	dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx);
1095 	dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx);
1096 
1097 	list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff];
1098 	while (dr = list_head(list)) {
1099 		ASSERT(dr->dr_dbuf->db_level == 0);
1100 		list_remove(list, dr);
1101 		if (dr->dr_zio)
1102 			zio_nowait(dr->dr_zio);
1103 	}
1104 	/*
1105 	 * Free intent log blocks up to this tx.
1106 	 */
1107 	zil_sync(os->os_zil, tx);
1108 	os->os_phys->os_zil_header = os->os_zil_header;
1109 	zio_nowait(zio);
1110 }
1111 
1112 boolean_t
1113 dmu_objset_is_dirty(objset_t *os, uint64_t txg)
1114 {
1115 	return (!list_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]) ||
1116 	    !list_is_empty(&os->os_free_dnodes[txg & TXG_MASK]));
1117 }
1118 
1119 boolean_t
1120 dmu_objset_is_dirty_anywhere(objset_t *os)
1121 {
1122 	for (int t = 0; t < TXG_SIZE; t++)
1123 		if (dmu_objset_is_dirty(os, t))
1124 			return (B_TRUE);
1125 	return (B_FALSE);
1126 }
1127 
1128 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES];
1129 
1130 void
1131 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb)
1132 {
1133 	used_cbs[ost] = cb;
1134 }
1135 
1136 boolean_t
1137 dmu_objset_userused_enabled(objset_t *os)
1138 {
1139 	return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
1140 	    used_cbs[os->os_phys->os_type] != NULL &&
1141 	    DMU_USERUSED_DNODE(os) != NULL);
1142 }
1143 
1144 static void
1145 do_userquota_update(objset_t *os, uint64_t used, uint64_t flags,
1146     uint64_t user, uint64_t group, boolean_t subtract, dmu_tx_t *tx)
1147 {
1148 	if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) {
1149 		int64_t delta = DNODE_SIZE + used;
1150 		if (subtract)
1151 			delta = -delta;
1152 		VERIFY3U(0, ==, zap_increment_int(os, DMU_USERUSED_OBJECT,
1153 		    user, delta, tx));
1154 		VERIFY3U(0, ==, zap_increment_int(os, DMU_GROUPUSED_OBJECT,
1155 		    group, delta, tx));
1156 	}
1157 }
1158 
1159 void
1160 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
1161 {
1162 	dnode_t *dn;
1163 	list_t *list = &os->os_synced_dnodes;
1164 
1165 	ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os));
1166 
1167 	while (dn = list_head(list)) {
1168 		int flags;
1169 		ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
1170 		ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
1171 		    dn->dn_phys->dn_flags &
1172 		    DNODE_FLAG_USERUSED_ACCOUNTED);
1173 
1174 		/* Allocate the user/groupused objects if necessary. */
1175 		if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
1176 			VERIFY(0 == zap_create_claim(os,
1177 			    DMU_USERUSED_OBJECT,
1178 			    DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1179 			VERIFY(0 == zap_create_claim(os,
1180 			    DMU_GROUPUSED_OBJECT,
1181 			    DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1182 		}
1183 
1184 		/*
1185 		 * We intentionally modify the zap object even if the
1186 		 * net delta is zero.  Otherwise
1187 		 * the block of the zap obj could be shared between
1188 		 * datasets but need to be different between them after
1189 		 * a bprewrite.
1190 		 */
1191 
1192 		flags = dn->dn_id_flags;
1193 		ASSERT(flags);
1194 		if (flags & DN_ID_OLD_EXIST)  {
1195 			do_userquota_update(os, dn->dn_oldused, dn->dn_oldflags,
1196 			    dn->dn_olduid, dn->dn_oldgid, B_TRUE, tx);
1197 		}
1198 		if (flags & DN_ID_NEW_EXIST) {
1199 			do_userquota_update(os, DN_USED_BYTES(dn->dn_phys),
1200 			    dn->dn_phys->dn_flags,  dn->dn_newuid,
1201 			    dn->dn_newgid, B_FALSE, tx);
1202 		}
1203 
1204 		mutex_enter(&dn->dn_mtx);
1205 		dn->dn_oldused = 0;
1206 		dn->dn_oldflags = 0;
1207 		if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
1208 			dn->dn_olduid = dn->dn_newuid;
1209 			dn->dn_oldgid = dn->dn_newgid;
1210 			dn->dn_id_flags |= DN_ID_OLD_EXIST;
1211 			if (dn->dn_bonuslen == 0)
1212 				dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1213 			else
1214 				dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1215 		}
1216 		dn->dn_id_flags &= ~(DN_ID_NEW_EXIST);
1217 		mutex_exit(&dn->dn_mtx);
1218 
1219 		list_remove(list, dn);
1220 		dnode_rele(dn, list);
1221 	}
1222 }
1223 
1224 /*
1225  * Returns a pointer to data to find uid/gid from
1226  *
1227  * If a dirty record for transaction group that is syncing can't
1228  * be found then NULL is returned.  In the NULL case it is assumed
1229  * the uid/gid aren't changing.
1230  */
1231 static void *
1232 dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx)
1233 {
1234 	dbuf_dirty_record_t *dr, **drp;
1235 	void *data;
1236 
1237 	if (db->db_dirtycnt == 0)
1238 		return (db->db.db_data);  /* Nothing is changing */
1239 
1240 	for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1241 		if (dr->dr_txg == tx->tx_txg)
1242 			break;
1243 
1244 	if (dr == NULL) {
1245 		data = NULL;
1246 	} else {
1247 		dnode_t *dn;
1248 
1249 		DB_DNODE_ENTER(dr->dr_dbuf);
1250 		dn = DB_DNODE(dr->dr_dbuf);
1251 
1252 		if (dn->dn_bonuslen == 0 &&
1253 		    dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID)
1254 			data = dr->dt.dl.dr_data->b_data;
1255 		else
1256 			data = dr->dt.dl.dr_data;
1257 
1258 		DB_DNODE_EXIT(dr->dr_dbuf);
1259 	}
1260 
1261 	return (data);
1262 }
1263 
1264 void
1265 dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
1266 {
1267 	objset_t *os = dn->dn_objset;
1268 	void *data = NULL;
1269 	dmu_buf_impl_t *db = NULL;
1270 	uint64_t *user, *group;
1271 	int flags = dn->dn_id_flags;
1272 	int error;
1273 	boolean_t have_spill = B_FALSE;
1274 
1275 	if (!dmu_objset_userused_enabled(dn->dn_objset))
1276 		return;
1277 
1278 	if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST|
1279 	    DN_ID_CHKED_SPILL)))
1280 		return;
1281 
1282 	if (before && dn->dn_bonuslen != 0)
1283 		data = DN_BONUS(dn->dn_phys);
1284 	else if (!before && dn->dn_bonuslen != 0) {
1285 		if (dn->dn_bonus) {
1286 			db = dn->dn_bonus;
1287 			mutex_enter(&db->db_mtx);
1288 			data = dmu_objset_userquota_find_data(db, tx);
1289 		} else {
1290 			data = DN_BONUS(dn->dn_phys);
1291 		}
1292 	} else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) {
1293 			int rf = 0;
1294 
1295 			if (RW_WRITE_HELD(&dn->dn_struct_rwlock))
1296 				rf |= DB_RF_HAVESTRUCT;
1297 			error = dmu_spill_hold_by_dnode(dn,
1298 			    rf | DB_RF_MUST_SUCCEED,
1299 			    FTAG, (dmu_buf_t **)&db);
1300 			ASSERT(error == 0);
1301 			mutex_enter(&db->db_mtx);
1302 			data = (before) ? db->db.db_data :
1303 			    dmu_objset_userquota_find_data(db, tx);
1304 			have_spill = B_TRUE;
1305 	} else {
1306 		mutex_enter(&dn->dn_mtx);
1307 		dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1308 		mutex_exit(&dn->dn_mtx);
1309 		return;
1310 	}
1311 
1312 	if (before) {
1313 		ASSERT(data);
1314 		user = &dn->dn_olduid;
1315 		group = &dn->dn_oldgid;
1316 	} else if (data) {
1317 		user = &dn->dn_newuid;
1318 		group = &dn->dn_newgid;
1319 	}
1320 
1321 	/*
1322 	 * Must always call the callback in case the object
1323 	 * type has changed and that type isn't an object type to track
1324 	 */
1325 	error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data,
1326 	    user, group);
1327 
1328 	/*
1329 	 * Preserve existing uid/gid when the callback can't determine
1330 	 * what the new uid/gid are and the callback returned EEXIST.
1331 	 * The EEXIST error tells us to just use the existing uid/gid.
1332 	 * If we don't know what the old values are then just assign
1333 	 * them to 0, since that is a new file  being created.
1334 	 */
1335 	if (!before && data == NULL && error == EEXIST) {
1336 		if (flags & DN_ID_OLD_EXIST) {
1337 			dn->dn_newuid = dn->dn_olduid;
1338 			dn->dn_newgid = dn->dn_oldgid;
1339 		} else {
1340 			dn->dn_newuid = 0;
1341 			dn->dn_newgid = 0;
1342 		}
1343 		error = 0;
1344 	}
1345 
1346 	if (db)
1347 		mutex_exit(&db->db_mtx);
1348 
1349 	mutex_enter(&dn->dn_mtx);
1350 	if (error == 0 && before)
1351 		dn->dn_id_flags |= DN_ID_OLD_EXIST;
1352 	if (error == 0 && !before)
1353 		dn->dn_id_flags |= DN_ID_NEW_EXIST;
1354 
1355 	if (have_spill) {
1356 		dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1357 	} else {
1358 		dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1359 	}
1360 	mutex_exit(&dn->dn_mtx);
1361 	if (have_spill)
1362 		dmu_buf_rele((dmu_buf_t *)db, FTAG);
1363 }
1364 
1365 boolean_t
1366 dmu_objset_userspace_present(objset_t *os)
1367 {
1368 	return (os->os_phys->os_flags &
1369 	    OBJSET_FLAG_USERACCOUNTING_COMPLETE);
1370 }
1371 
1372 int
1373 dmu_objset_userspace_upgrade(objset_t *os)
1374 {
1375 	uint64_t obj;
1376 	int err = 0;
1377 
1378 	if (dmu_objset_userspace_present(os))
1379 		return (0);
1380 	if (!dmu_objset_userused_enabled(os))
1381 		return (ENOTSUP);
1382 	if (dmu_objset_is_snapshot(os))
1383 		return (EINVAL);
1384 
1385 	/*
1386 	 * We simply need to mark every object dirty, so that it will be
1387 	 * synced out and now accounted.  If this is called
1388 	 * concurrently, or if we already did some work before crashing,
1389 	 * that's fine, since we track each object's accounted state
1390 	 * independently.
1391 	 */
1392 
1393 	for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
1394 		dmu_tx_t *tx;
1395 		dmu_buf_t *db;
1396 		int objerr;
1397 
1398 		if (issig(JUSTLOOKING) && issig(FORREAL))
1399 			return (EINTR);
1400 
1401 		objerr = dmu_bonus_hold(os, obj, FTAG, &db);
1402 		if (objerr)
1403 			continue;
1404 		tx = dmu_tx_create(os);
1405 		dmu_tx_hold_bonus(tx, obj);
1406 		objerr = dmu_tx_assign(tx, TXG_WAIT);
1407 		if (objerr) {
1408 			dmu_tx_abort(tx);
1409 			continue;
1410 		}
1411 		dmu_buf_will_dirty(db, tx);
1412 		dmu_buf_rele(db, FTAG);
1413 		dmu_tx_commit(tx);
1414 	}
1415 
1416 	os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
1417 	txg_wait_synced(dmu_objset_pool(os), 0);
1418 	return (0);
1419 }
1420 
1421 void
1422 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
1423     uint64_t *usedobjsp, uint64_t *availobjsp)
1424 {
1425 	dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp,
1426 	    usedobjsp, availobjsp);
1427 }
1428 
1429 uint64_t
1430 dmu_objset_fsid_guid(objset_t *os)
1431 {
1432 	return (dsl_dataset_fsid_guid(os->os_dsl_dataset));
1433 }
1434 
1435 void
1436 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
1437 {
1438 	stat->dds_type = os->os_phys->os_type;
1439 	if (os->os_dsl_dataset)
1440 		dsl_dataset_fast_stat(os->os_dsl_dataset, stat);
1441 }
1442 
1443 void
1444 dmu_objset_stats(objset_t *os, nvlist_t *nv)
1445 {
1446 	ASSERT(os->os_dsl_dataset ||
1447 	    os->os_phys->os_type == DMU_OST_META);
1448 
1449 	if (os->os_dsl_dataset != NULL)
1450 		dsl_dataset_stats(os->os_dsl_dataset, nv);
1451 
1452 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
1453 	    os->os_phys->os_type);
1454 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING,
1455 	    dmu_objset_userspace_present(os));
1456 }
1457 
1458 int
1459 dmu_objset_is_snapshot(objset_t *os)
1460 {
1461 	if (os->os_dsl_dataset != NULL)
1462 		return (dsl_dataset_is_snapshot(os->os_dsl_dataset));
1463 	else
1464 		return (B_FALSE);
1465 }
1466 
1467 int
1468 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen,
1469     boolean_t *conflict)
1470 {
1471 	dsl_dataset_t *ds = os->os_dsl_dataset;
1472 	uint64_t ignored;
1473 
1474 	if (ds->ds_phys->ds_snapnames_zapobj == 0)
1475 		return (ENOENT);
1476 
1477 	return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
1478 	    ds->ds_phys->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_FIRST,
1479 	    real, maxlen, conflict));
1480 }
1481 
1482 int
1483 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
1484     uint64_t *idp, uint64_t *offp, boolean_t *case_conflict)
1485 {
1486 	dsl_dataset_t *ds = os->os_dsl_dataset;
1487 	zap_cursor_t cursor;
1488 	zap_attribute_t attr;
1489 
1490 	if (ds->ds_phys->ds_snapnames_zapobj == 0)
1491 		return (ENOENT);
1492 
1493 	zap_cursor_init_serialized(&cursor,
1494 	    ds->ds_dir->dd_pool->dp_meta_objset,
1495 	    ds->ds_phys->ds_snapnames_zapobj, *offp);
1496 
1497 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1498 		zap_cursor_fini(&cursor);
1499 		return (ENOENT);
1500 	}
1501 
1502 	if (strlen(attr.za_name) + 1 > namelen) {
1503 		zap_cursor_fini(&cursor);
1504 		return (ENAMETOOLONG);
1505 	}
1506 
1507 	(void) strcpy(name, attr.za_name);
1508 	if (idp)
1509 		*idp = attr.za_first_integer;
1510 	if (case_conflict)
1511 		*case_conflict = attr.za_normalization_conflict;
1512 	zap_cursor_advance(&cursor);
1513 	*offp = zap_cursor_serialize(&cursor);
1514 	zap_cursor_fini(&cursor);
1515 
1516 	return (0);
1517 }
1518 
1519 int
1520 dmu_dir_list_next(objset_t *os, int namelen, char *name,
1521     uint64_t *idp, uint64_t *offp)
1522 {
1523 	dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
1524 	zap_cursor_t cursor;
1525 	zap_attribute_t attr;
1526 
1527 	/* there is no next dir on a snapshot! */
1528 	if (os->os_dsl_dataset->ds_object !=
1529 	    dd->dd_phys->dd_head_dataset_obj)
1530 		return (ENOENT);
1531 
1532 	zap_cursor_init_serialized(&cursor,
1533 	    dd->dd_pool->dp_meta_objset,
1534 	    dd->dd_phys->dd_child_dir_zapobj, *offp);
1535 
1536 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1537 		zap_cursor_fini(&cursor);
1538 		return (ENOENT);
1539 	}
1540 
1541 	if (strlen(attr.za_name) + 1 > namelen) {
1542 		zap_cursor_fini(&cursor);
1543 		return (ENAMETOOLONG);
1544 	}
1545 
1546 	(void) strcpy(name, attr.za_name);
1547 	if (idp)
1548 		*idp = attr.za_first_integer;
1549 	zap_cursor_advance(&cursor);
1550 	*offp = zap_cursor_serialize(&cursor);
1551 	zap_cursor_fini(&cursor);
1552 
1553 	return (0);
1554 }
1555 
1556 struct findarg {
1557 	int (*func)(const char *, void *);
1558 	void *arg;
1559 };
1560 
1561 /* ARGSUSED */
1562 static int
1563 findfunc(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
1564 {
1565 	struct findarg *fa = arg;
1566 	return (fa->func(dsname, fa->arg));
1567 }
1568 
1569 /*
1570  * Find all objsets under name, and for each, call 'func(child_name, arg)'.
1571  * Perhaps change all callers to use dmu_objset_find_spa()?
1572  */
1573 int
1574 dmu_objset_find(char *name, int func(const char *, void *), void *arg,
1575     int flags)
1576 {
1577 	struct findarg fa;
1578 	fa.func = func;
1579 	fa.arg = arg;
1580 	return (dmu_objset_find_spa(NULL, name, findfunc, &fa, flags));
1581 }
1582 
1583 /*
1584  * Find all objsets under name, call func on each
1585  */
1586 int
1587 dmu_objset_find_spa(spa_t *spa, const char *name,
1588     int func(spa_t *, uint64_t, const char *, void *), void *arg, int flags)
1589 {
1590 	dsl_dir_t *dd;
1591 	dsl_pool_t *dp;
1592 	dsl_dataset_t *ds;
1593 	zap_cursor_t zc;
1594 	zap_attribute_t *attr;
1595 	char *child;
1596 	uint64_t thisobj;
1597 	int err;
1598 
1599 	if (name == NULL)
1600 		name = spa_name(spa);
1601 	err = dsl_dir_open_spa(spa, name, FTAG, &dd, NULL);
1602 	if (err)
1603 		return (err);
1604 
1605 	/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1606 	if (dd->dd_myname[0] == '$') {
1607 		dsl_dir_close(dd, FTAG);
1608 		return (0);
1609 	}
1610 
1611 	thisobj = dd->dd_phys->dd_head_dataset_obj;
1612 	attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
1613 	dp = dd->dd_pool;
1614 
1615 	/*
1616 	 * Iterate over all children.
1617 	 */
1618 	if (flags & DS_FIND_CHILDREN) {
1619 		for (zap_cursor_init(&zc, dp->dp_meta_objset,
1620 		    dd->dd_phys->dd_child_dir_zapobj);
1621 		    zap_cursor_retrieve(&zc, attr) == 0;
1622 		    (void) zap_cursor_advance(&zc)) {
1623 			ASSERT(attr->za_integer_length == sizeof (uint64_t));
1624 			ASSERT(attr->za_num_integers == 1);
1625 
1626 			child = kmem_asprintf("%s/%s", name, attr->za_name);
1627 			err = dmu_objset_find_spa(spa, child, func, arg, flags);
1628 			strfree(child);
1629 			if (err)
1630 				break;
1631 		}
1632 		zap_cursor_fini(&zc);
1633 
1634 		if (err) {
1635 			dsl_dir_close(dd, FTAG);
1636 			kmem_free(attr, sizeof (zap_attribute_t));
1637 			return (err);
1638 		}
1639 	}
1640 
1641 	/*
1642 	 * Iterate over all snapshots.
1643 	 */
1644 	if (flags & DS_FIND_SNAPSHOTS) {
1645 		if (!dsl_pool_sync_context(dp))
1646 			rw_enter(&dp->dp_config_rwlock, RW_READER);
1647 		err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
1648 		if (!dsl_pool_sync_context(dp))
1649 			rw_exit(&dp->dp_config_rwlock);
1650 
1651 		if (err == 0) {
1652 			uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
1653 			dsl_dataset_rele(ds, FTAG);
1654 
1655 			for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
1656 			    zap_cursor_retrieve(&zc, attr) == 0;
1657 			    (void) zap_cursor_advance(&zc)) {
1658 				ASSERT(attr->za_integer_length ==
1659 				    sizeof (uint64_t));
1660 				ASSERT(attr->za_num_integers == 1);
1661 
1662 				child = kmem_asprintf("%s@%s",
1663 				    name, attr->za_name);
1664 				err = func(spa, attr->za_first_integer,
1665 				    child, arg);
1666 				strfree(child);
1667 				if (err)
1668 					break;
1669 			}
1670 			zap_cursor_fini(&zc);
1671 		}
1672 	}
1673 
1674 	dsl_dir_close(dd, FTAG);
1675 	kmem_free(attr, sizeof (zap_attribute_t));
1676 
1677 	if (err)
1678 		return (err);
1679 
1680 	/*
1681 	 * Apply to self if appropriate.
1682 	 */
1683 	err = func(spa, thisobj, name, arg);
1684 	return (err);
1685 }
1686 
1687 /* ARGSUSED */
1688 int
1689 dmu_objset_prefetch(const char *name, void *arg)
1690 {
1691 	dsl_dataset_t *ds;
1692 
1693 	if (dsl_dataset_hold(name, FTAG, &ds))
1694 		return (0);
1695 
1696 	if (!BP_IS_HOLE(&ds->ds_phys->ds_bp)) {
1697 		mutex_enter(&ds->ds_opening_lock);
1698 		if (ds->ds_objset == NULL) {
1699 			uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1700 			zbookmark_t zb;
1701 
1702 			SET_BOOKMARK(&zb, ds->ds_object, ZB_ROOT_OBJECT,
1703 			    ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
1704 
1705 			(void) dsl_read_nolock(NULL, dsl_dataset_get_spa(ds),
1706 			    &ds->ds_phys->ds_bp, NULL, NULL,
1707 			    ZIO_PRIORITY_ASYNC_READ,
1708 			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1709 			    &aflags, &zb);
1710 		}
1711 		mutex_exit(&ds->ds_opening_lock);
1712 	}
1713 
1714 	dsl_dataset_rele(ds, FTAG);
1715 	return (0);
1716 }
1717 
1718 void
1719 dmu_objset_set_user(objset_t *os, void *user_ptr)
1720 {
1721 	ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
1722 	os->os_user_ptr = user_ptr;
1723 }
1724 
1725 void *
1726 dmu_objset_get_user(objset_t *os)
1727 {
1728 	ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
1729 	return (os->os_user_ptr);
1730 }
1731