xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu_objset.c (revision c717a56157ae0e6fca6a1e3689ae1edc385716a3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_prop.h>
33 #include <sys/dsl_pool.h>
34 #include <sys/dsl_synctask.h>
35 #include <sys/dnode.h>
36 #include <sys/dbuf.h>
37 #include <sys/zvol.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/zio_checksum.h>
40 #include <sys/zap.h>
41 #include <sys/zil.h>
42 #include <sys/dmu_impl.h>
43 
44 
45 spa_t *
46 dmu_objset_spa(objset_t *os)
47 {
48 	return (os->os->os_spa);
49 }
50 
51 zilog_t *
52 dmu_objset_zil(objset_t *os)
53 {
54 	return (os->os->os_zil);
55 }
56 
57 dsl_pool_t *
58 dmu_objset_pool(objset_t *os)
59 {
60 	dsl_dataset_t *ds;
61 
62 	if ((ds = os->os->os_dsl_dataset) != NULL && ds->ds_dir)
63 		return (ds->ds_dir->dd_pool);
64 	else
65 		return (spa_get_dsl(os->os->os_spa));
66 }
67 
68 dsl_dataset_t *
69 dmu_objset_ds(objset_t *os)
70 {
71 	return (os->os->os_dsl_dataset);
72 }
73 
74 dmu_objset_type_t
75 dmu_objset_type(objset_t *os)
76 {
77 	return (os->os->os_phys->os_type);
78 }
79 
80 void
81 dmu_objset_name(objset_t *os, char *buf)
82 {
83 	dsl_dataset_name(os->os->os_dsl_dataset, buf);
84 }
85 
86 uint64_t
87 dmu_objset_id(objset_t *os)
88 {
89 	dsl_dataset_t *ds = os->os->os_dsl_dataset;
90 
91 	return (ds ? ds->ds_object : 0);
92 }
93 
94 static void
95 checksum_changed_cb(void *arg, uint64_t newval)
96 {
97 	objset_impl_t *osi = arg;
98 
99 	/*
100 	 * Inheritance should have been done by now.
101 	 */
102 	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
103 
104 	osi->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
105 }
106 
107 static void
108 compression_changed_cb(void *arg, uint64_t newval)
109 {
110 	objset_impl_t *osi = arg;
111 
112 	/*
113 	 * Inheritance and range checking should have been done by now.
114 	 */
115 	ASSERT(newval != ZIO_COMPRESS_INHERIT);
116 
117 	osi->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE);
118 }
119 
120 void
121 dmu_objset_byteswap(void *buf, size_t size)
122 {
123 	objset_phys_t *osp = buf;
124 
125 	ASSERT(size == sizeof (objset_phys_t));
126 	dnode_byteswap(&osp->os_meta_dnode);
127 	byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
128 	osp->os_type = BSWAP_64(osp->os_type);
129 }
130 
131 int
132 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
133     objset_impl_t **osip)
134 {
135 	objset_impl_t *winner, *osi;
136 	int i, err, checksum;
137 
138 	osi = kmem_zalloc(sizeof (objset_impl_t), KM_SLEEP);
139 	osi->os.os = osi;
140 	osi->os_dsl_dataset = ds;
141 	osi->os_spa = spa;
142 	osi->os_rootbp = bp;
143 	if (!BP_IS_HOLE(osi->os_rootbp)) {
144 		uint32_t aflags = ARC_WAIT;
145 		zbookmark_t zb;
146 		zb.zb_objset = ds ? ds->ds_object : 0;
147 		zb.zb_object = 0;
148 		zb.zb_level = -1;
149 		zb.zb_blkid = 0;
150 
151 		dprintf_bp(osi->os_rootbp, "reading %s", "");
152 		err = arc_read(NULL, spa, osi->os_rootbp,
153 		    dmu_ot[DMU_OT_OBJSET].ot_byteswap,
154 		    arc_getbuf_func, &osi->os_phys_buf,
155 		    ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
156 		if (err) {
157 			kmem_free(osi, sizeof (objset_impl_t));
158 			return (err);
159 		}
160 		osi->os_phys = osi->os_phys_buf->b_data;
161 		arc_release(osi->os_phys_buf, &osi->os_phys_buf);
162 	} else {
163 		osi->os_phys_buf = arc_buf_alloc(spa, sizeof (objset_phys_t),
164 		    &osi->os_phys_buf, ARC_BUFC_METADATA);
165 		osi->os_phys = osi->os_phys_buf->b_data;
166 		bzero(osi->os_phys, sizeof (objset_phys_t));
167 	}
168 
169 	/*
170 	 * Note: the changed_cb will be called once before the register
171 	 * func returns, thus changing the checksum/compression from the
172 	 * default (fletcher2/off).  Snapshots don't need to know, and
173 	 * registering would complicate clone promotion.
174 	 */
175 	if (ds && ds->ds_phys->ds_num_children == 0) {
176 		err = dsl_prop_register(ds, "checksum",
177 		    checksum_changed_cb, osi);
178 		if (err == 0)
179 			err = dsl_prop_register(ds, "compression",
180 			    compression_changed_cb, osi);
181 		if (err) {
182 			VERIFY(arc_buf_remove_ref(osi->os_phys_buf,
183 			    &osi->os_phys_buf) == 1);
184 			kmem_free(osi, sizeof (objset_impl_t));
185 			return (err);
186 		}
187 	} else if (ds == NULL) {
188 		/* It's the meta-objset. */
189 		osi->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
190 		osi->os_compress = ZIO_COMPRESS_LZJB;
191 	}
192 
193 	osi->os_zil = zil_alloc(&osi->os, &osi->os_phys->os_zil_header);
194 
195 	/*
196 	 * Metadata always gets compressed and checksummed.
197 	 * If the data checksum is multi-bit correctable, and it's not
198 	 * a ZBT-style checksum, then it's suitable for metadata as well.
199 	 * Otherwise, the metadata checksum defaults to fletcher4.
200 	 */
201 	checksum = osi->os_checksum;
202 
203 	if (zio_checksum_table[checksum].ci_correctable &&
204 	    !zio_checksum_table[checksum].ci_zbt)
205 		osi->os_md_checksum = checksum;
206 	else
207 		osi->os_md_checksum = ZIO_CHECKSUM_FLETCHER_4;
208 	osi->os_md_compress = ZIO_COMPRESS_LZJB;
209 
210 	for (i = 0; i < TXG_SIZE; i++) {
211 		list_create(&osi->os_dirty_dnodes[i], sizeof (dnode_t),
212 		    offsetof(dnode_t, dn_dirty_link[i]));
213 		list_create(&osi->os_free_dnodes[i], sizeof (dnode_t),
214 		    offsetof(dnode_t, dn_dirty_link[i]));
215 	}
216 	list_create(&osi->os_dnodes, sizeof (dnode_t),
217 	    offsetof(dnode_t, dn_link));
218 	list_create(&osi->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
219 	    offsetof(dmu_buf_impl_t, db_link));
220 
221 	mutex_init(&osi->os_lock, NULL, MUTEX_DEFAULT, NULL);
222 	mutex_init(&osi->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
223 
224 	osi->os_meta_dnode = dnode_special_open(osi,
225 	    &osi->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT);
226 
227 	if (ds != NULL) {
228 		winner = dsl_dataset_set_user_ptr(ds, osi, dmu_objset_evict);
229 		if (winner) {
230 			dmu_objset_evict(ds, osi);
231 			osi = winner;
232 		}
233 	}
234 
235 	*osip = osi;
236 	return (0);
237 }
238 
239 /* called from zpl */
240 int
241 dmu_objset_open(const char *name, dmu_objset_type_t type, int mode,
242     objset_t **osp)
243 {
244 	dsl_dataset_t *ds;
245 	int err;
246 	objset_t *os;
247 	objset_impl_t *osi;
248 
249 	os = kmem_alloc(sizeof (objset_t), KM_SLEEP);
250 	err = dsl_dataset_open(name, mode, os, &ds);
251 	if (err) {
252 		kmem_free(os, sizeof (objset_t));
253 		return (err);
254 	}
255 
256 	osi = dsl_dataset_get_user_ptr(ds);
257 	if (osi == NULL) {
258 		err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
259 		    ds, &ds->ds_phys->ds_bp, &osi);
260 		if (err) {
261 			dsl_dataset_close(ds, mode, os);
262 			kmem_free(os, sizeof (objset_t));
263 			return (err);
264 		}
265 	}
266 
267 	os->os = osi;
268 	os->os_mode = mode;
269 
270 	if (type != DMU_OST_ANY && type != os->os->os_phys->os_type) {
271 		dmu_objset_close(os);
272 		return (EINVAL);
273 	}
274 	*osp = os;
275 	return (0);
276 }
277 
278 void
279 dmu_objset_close(objset_t *os)
280 {
281 	dsl_dataset_close(os->os->os_dsl_dataset, os->os_mode, os);
282 	kmem_free(os, sizeof (objset_t));
283 }
284 
285 int
286 dmu_objset_evict_dbufs(objset_t *os, int try)
287 {
288 	objset_impl_t *osi = os->os;
289 	dnode_t *dn;
290 
291 	mutex_enter(&osi->os_lock);
292 
293 	/* process the mdn last, since the other dnodes have holds on it */
294 	list_remove(&osi->os_dnodes, osi->os_meta_dnode);
295 	list_insert_tail(&osi->os_dnodes, osi->os_meta_dnode);
296 
297 	/*
298 	 * Find the first dnode with holds.  We have to do this dance
299 	 * because dnode_add_ref() only works if you already have a
300 	 * hold.  If there are no holds then it has no dbufs so OK to
301 	 * skip.
302 	 */
303 	for (dn = list_head(&osi->os_dnodes);
304 	    dn && refcount_is_zero(&dn->dn_holds);
305 	    dn = list_next(&osi->os_dnodes, dn))
306 		continue;
307 	if (dn)
308 		dnode_add_ref(dn, FTAG);
309 
310 	while (dn) {
311 		dnode_t *next_dn = dn;
312 
313 		do {
314 			next_dn = list_next(&osi->os_dnodes, next_dn);
315 		} while (next_dn && refcount_is_zero(&next_dn->dn_holds));
316 		if (next_dn)
317 			dnode_add_ref(next_dn, FTAG);
318 
319 		mutex_exit(&osi->os_lock);
320 		if (dnode_evict_dbufs(dn, try)) {
321 			dnode_rele(dn, FTAG);
322 			if (next_dn)
323 				dnode_rele(next_dn, FTAG);
324 			return (1);
325 		}
326 		dnode_rele(dn, FTAG);
327 		mutex_enter(&osi->os_lock);
328 		dn = next_dn;
329 	}
330 	mutex_exit(&osi->os_lock);
331 	return (0);
332 }
333 
334 void
335 dmu_objset_evict(dsl_dataset_t *ds, void *arg)
336 {
337 	objset_impl_t *osi = arg;
338 	objset_t os;
339 	int i;
340 
341 	for (i = 0; i < TXG_SIZE; i++) {
342 		ASSERT(list_head(&osi->os_dirty_dnodes[i]) == NULL);
343 		ASSERT(list_head(&osi->os_free_dnodes[i]) == NULL);
344 	}
345 
346 	if (ds && ds->ds_phys->ds_num_children == 0) {
347 		VERIFY(0 == dsl_prop_unregister(ds, "checksum",
348 		    checksum_changed_cb, osi));
349 		VERIFY(0 == dsl_prop_unregister(ds, "compression",
350 		    compression_changed_cb, osi));
351 	}
352 
353 	/*
354 	 * We should need only a single pass over the dnode list, since
355 	 * nothing can be added to the list at this point.
356 	 */
357 	os.os = osi;
358 	(void) dmu_objset_evict_dbufs(&os, 0);
359 
360 	ASSERT3P(list_head(&osi->os_dnodes), ==, osi->os_meta_dnode);
361 	ASSERT3P(list_tail(&osi->os_dnodes), ==, osi->os_meta_dnode);
362 	ASSERT3P(list_head(&osi->os_meta_dnode->dn_dbufs), ==, NULL);
363 
364 	dnode_special_close(osi->os_meta_dnode);
365 	zil_free(osi->os_zil);
366 
367 	VERIFY(arc_buf_remove_ref(osi->os_phys_buf, &osi->os_phys_buf) == 1);
368 	mutex_destroy(&osi->os_lock);
369 	mutex_destroy(&osi->os_obj_lock);
370 	kmem_free(osi, sizeof (objset_impl_t));
371 }
372 
373 /* called from dsl for meta-objset */
374 objset_impl_t *
375 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
376     dmu_objset_type_t type, dmu_tx_t *tx)
377 {
378 	objset_impl_t *osi;
379 	dnode_t *mdn;
380 
381 	ASSERT(dmu_tx_is_syncing(tx));
382 	VERIFY(0 == dmu_objset_open_impl(spa, ds, bp, &osi));
383 	mdn = osi->os_meta_dnode;
384 
385 	dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
386 	    DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
387 
388 	/*
389 	 * We don't want to have to increase the meta-dnode's nlevels
390 	 * later, because then we could do it in quescing context while
391 	 * we are also accessing it in open context.
392 	 *
393 	 * This precaution is not necessary for the MOS (ds == NULL),
394 	 * because the MOS is only updated in syncing context.
395 	 * This is most fortunate: the MOS is the only objset that
396 	 * needs to be synced multiple times as spa_sync() iterates
397 	 * to convergence, so minimizing its dn_nlevels matters.
398 	 */
399 	if (ds != NULL) {
400 		int levels = 1;
401 
402 		/*
403 		 * Determine the number of levels necessary for the meta-dnode
404 		 * to contain DN_MAX_OBJECT dnodes.
405 		 */
406 		while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift +
407 		    (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
408 		    DN_MAX_OBJECT * sizeof (dnode_phys_t))
409 			levels++;
410 
411 		mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
412 		    mdn->dn_nlevels = levels;
413 	}
414 
415 	ASSERT(type != DMU_OST_NONE);
416 	ASSERT(type != DMU_OST_ANY);
417 	ASSERT(type < DMU_OST_NUMTYPES);
418 	osi->os_phys->os_type = type;
419 
420 	dsl_dataset_dirty(ds, tx);
421 
422 	return (osi);
423 }
424 
425 struct oscarg {
426 	void (*userfunc)(objset_t *os, void *arg, dmu_tx_t *tx);
427 	void *userarg;
428 	dsl_dataset_t *clone_parent;
429 	const char *lastname;
430 	dmu_objset_type_t type;
431 };
432 
433 /* ARGSUSED */
434 static int
435 dmu_objset_create_check(void *arg1, void *arg2, dmu_tx_t *tx)
436 {
437 	dsl_dir_t *dd = arg1;
438 	struct oscarg *oa = arg2;
439 	objset_t *mos = dd->dd_pool->dp_meta_objset;
440 	int err;
441 	uint64_t ddobj;
442 
443 	err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
444 	    oa->lastname, sizeof (uint64_t), 1, &ddobj);
445 	if (err != ENOENT)
446 		return (err ? err : EEXIST);
447 
448 	if (oa->clone_parent != NULL) {
449 		/*
450 		 * You can't clone across pools.
451 		 */
452 		if (oa->clone_parent->ds_dir->dd_pool != dd->dd_pool)
453 			return (EXDEV);
454 
455 		/*
456 		 * You can only clone snapshots, not the head datasets.
457 		 */
458 		if (oa->clone_parent->ds_phys->ds_num_children == 0)
459 			return (EINVAL);
460 	}
461 	return (0);
462 }
463 
464 static void
465 dmu_objset_create_sync(void *arg1, void *arg2, dmu_tx_t *tx)
466 {
467 	dsl_dir_t *dd = arg1;
468 	struct oscarg *oa = arg2;
469 	dsl_dataset_t *ds;
470 	blkptr_t *bp;
471 	uint64_t dsobj;
472 
473 	ASSERT(dmu_tx_is_syncing(tx));
474 
475 	dsobj = dsl_dataset_create_sync(dd, oa->lastname,
476 	    oa->clone_parent, tx);
477 
478 	VERIFY(0 == dsl_dataset_open_obj(dd->dd_pool, dsobj, NULL,
479 	    DS_MODE_STANDARD | DS_MODE_READONLY, FTAG, &ds));
480 	bp = dsl_dataset_get_blkptr(ds);
481 	if (BP_IS_HOLE(bp)) {
482 		objset_impl_t *osi;
483 
484 		/* This is an empty dmu_objset; not a clone. */
485 		osi = dmu_objset_create_impl(dsl_dataset_get_spa(ds),
486 		    ds, bp, oa->type, tx);
487 
488 		if (oa->userfunc)
489 			oa->userfunc(&osi->os, oa->userarg, tx);
490 	}
491 	dsl_dataset_close(ds, DS_MODE_STANDARD | DS_MODE_READONLY, FTAG);
492 }
493 
494 int
495 dmu_objset_create(const char *name, dmu_objset_type_t type,
496     objset_t *clone_parent,
497     void (*func)(objset_t *os, void *arg, dmu_tx_t *tx), void *arg)
498 {
499 	dsl_dir_t *pdd;
500 	const char *tail;
501 	int err = 0;
502 	struct oscarg oa = { 0 };
503 
504 	ASSERT(strchr(name, '@') == NULL);
505 	err = dsl_dir_open(name, FTAG, &pdd, &tail);
506 	if (err)
507 		return (err);
508 	if (tail == NULL) {
509 		dsl_dir_close(pdd, FTAG);
510 		return (EEXIST);
511 	}
512 
513 	dprintf("name=%s\n", name);
514 
515 	oa.userfunc = func;
516 	oa.userarg = arg;
517 	oa.lastname = tail;
518 	oa.type = type;
519 	if (clone_parent != NULL) {
520 		/*
521 		 * You can't clone to a different type.
522 		 */
523 		if (clone_parent->os->os_phys->os_type != type) {
524 			dsl_dir_close(pdd, FTAG);
525 			return (EINVAL);
526 		}
527 		oa.clone_parent = clone_parent->os->os_dsl_dataset;
528 	}
529 	err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check,
530 	    dmu_objset_create_sync, pdd, &oa, 5);
531 	dsl_dir_close(pdd, FTAG);
532 	return (err);
533 }
534 
535 int
536 dmu_objset_destroy(const char *name)
537 {
538 	objset_t *os;
539 	int error;
540 
541 	/*
542 	 * If it looks like we'll be able to destroy it, and there's
543 	 * an unplayed replay log sitting around, destroy the log.
544 	 * It would be nicer to do this in dsl_dataset_destroy_sync(),
545 	 * but the replay log objset is modified in open context.
546 	 */
547 	error = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_EXCLUSIVE, &os);
548 	if (error == 0) {
549 		zil_destroy(dmu_objset_zil(os), B_FALSE);
550 		dmu_objset_close(os);
551 	}
552 
553 	return (dsl_dataset_destroy(name));
554 }
555 
556 int
557 dmu_objset_rollback(const char *name)
558 {
559 	int err;
560 	objset_t *os;
561 
562 	err = dmu_objset_open(name, DMU_OST_ANY,
563 	    DS_MODE_EXCLUSIVE | DS_MODE_INCONSISTENT, &os);
564 	if (err == 0) {
565 		err = zil_suspend(dmu_objset_zil(os));
566 		if (err == 0)
567 			zil_resume(dmu_objset_zil(os));
568 		if (err == 0) {
569 			/* XXX uncache everything? */
570 			err = dsl_dataset_rollback(os->os->os_dsl_dataset);
571 		}
572 		dmu_objset_close(os);
573 	}
574 	return (err);
575 }
576 
577 struct snaparg {
578 	dsl_sync_task_group_t *dstg;
579 	char *snapname;
580 	char failed[MAXPATHLEN];
581 };
582 
583 static int
584 dmu_objset_snapshot_one(char *name, void *arg)
585 {
586 	struct snaparg *sn = arg;
587 	objset_t *os;
588 	int err;
589 
590 	(void) strcpy(sn->failed, name);
591 
592 	err = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_STANDARD, &os);
593 	if (err != 0)
594 		return (err);
595 
596 	/*
597 	 * NB: we need to wait for all in-flight changes to get to disk,
598 	 * so that we snapshot those changes.  zil_suspend does this as
599 	 * a side effect.
600 	 */
601 	err = zil_suspend(dmu_objset_zil(os));
602 	if (err == 0) {
603 		dsl_sync_task_create(sn->dstg, dsl_dataset_snapshot_check,
604 		    dsl_dataset_snapshot_sync, os, sn->snapname, 3);
605 	}
606 	return (err);
607 }
608 
609 int
610 dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive)
611 {
612 	dsl_sync_task_t *dst;
613 	struct snaparg sn = { 0 };
614 	char *cp;
615 	spa_t *spa;
616 	int err;
617 
618 	(void) strcpy(sn.failed, fsname);
619 
620 	cp = strchr(fsname, '/');
621 	if (cp) {
622 		*cp = '\0';
623 		err = spa_open(fsname, &spa, FTAG);
624 		*cp = '/';
625 	} else {
626 		err = spa_open(fsname, &spa, FTAG);
627 	}
628 	if (err)
629 		return (err);
630 
631 	sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
632 	sn.snapname = snapname;
633 
634 	if (recursive) {
635 		err = dmu_objset_find(fsname,
636 		    dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN);
637 	} else {
638 		err = dmu_objset_snapshot_one(fsname, &sn);
639 	}
640 
641 	if (err)
642 		goto out;
643 
644 	err = dsl_sync_task_group_wait(sn.dstg);
645 
646 	for (dst = list_head(&sn.dstg->dstg_tasks); dst;
647 	    dst = list_next(&sn.dstg->dstg_tasks, dst)) {
648 		objset_t *os = dst->dst_arg1;
649 		if (dst->dst_err)
650 			dmu_objset_name(os, sn.failed);
651 		zil_resume(dmu_objset_zil(os));
652 		dmu_objset_close(os);
653 	}
654 out:
655 	if (err)
656 		(void) strcpy(fsname, sn.failed);
657 	dsl_sync_task_group_destroy(sn.dstg);
658 	spa_close(spa, FTAG);
659 	return (err);
660 }
661 
662 static void
663 dmu_objset_sync_dnodes(list_t *list, dmu_tx_t *tx)
664 {
665 	dnode_t *dn;
666 
667 	while (dn = list_head(list)) {
668 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
669 		ASSERT(dn->dn_dbuf->db_data_pending);
670 		/*
671 		 * Initialize dn_zio outside dnode_sync()
672 		 * to accomodate meta-dnode
673 		 */
674 		dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
675 		ASSERT(dn->dn_zio);
676 
677 		ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
678 		list_remove(list, dn);
679 		dnode_sync(dn, tx);
680 	}
681 }
682 
683 /* ARGSUSED */
684 static void
685 ready(zio_t *zio, arc_buf_t *abuf, void *arg)
686 {
687 	objset_impl_t *os = arg;
688 	blkptr_t *bp = os->os_rootbp;
689 	dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
690 	int i;
691 
692 	/*
693 	 * Update rootbp fill count.
694 	 */
695 	bp->blk_fill = 1;	/* count the meta-dnode */
696 	for (i = 0; i < dnp->dn_nblkptr; i++)
697 		bp->blk_fill += dnp->dn_blkptr[i].blk_fill;
698 }
699 
700 /* ARGSUSED */
701 static void
702 killer(zio_t *zio, arc_buf_t *abuf, void *arg)
703 {
704 	objset_impl_t *os = arg;
705 
706 	ASSERT3U(zio->io_error, ==, 0);
707 
708 	BP_SET_TYPE(zio->io_bp, DMU_OT_OBJSET);
709 	BP_SET_LEVEL(zio->io_bp, 0);
710 
711 	if (!DVA_EQUAL(BP_IDENTITY(zio->io_bp),
712 	    BP_IDENTITY(&zio->io_bp_orig))) {
713 		if (zio->io_bp_orig.blk_birth == os->os_synctx->tx_txg)
714 			dsl_dataset_block_kill(os->os_dsl_dataset,
715 			    &zio->io_bp_orig, NULL, os->os_synctx);
716 		dsl_dataset_block_born(os->os_dsl_dataset, zio->io_bp,
717 		    os->os_synctx);
718 	}
719 	arc_release(os->os_phys_buf, &os->os_phys_buf);
720 
721 	if (os->os_dsl_dataset)
722 		dmu_buf_rele(os->os_dsl_dataset->ds_dbuf, os->os_dsl_dataset);
723 }
724 
725 /* called from dsl */
726 void
727 dmu_objset_sync(objset_impl_t *os, zio_t *pio, dmu_tx_t *tx)
728 {
729 	int txgoff;
730 	zbookmark_t zb;
731 	zio_t *zio;
732 	list_t *list;
733 	dbuf_dirty_record_t *dr;
734 
735 	dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
736 
737 	ASSERT(dmu_tx_is_syncing(tx));
738 	/* XXX the write_done callback should really give us the tx... */
739 	os->os_synctx = tx;
740 
741 	/*
742 	 * Create the root block IO
743 	 */
744 	zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0;
745 	zb.zb_object = 0;
746 	zb.zb_level = -1;
747 	zb.zb_blkid = 0;
748 	if (BP_IS_OLDER(os->os_rootbp, tx->tx_txg))
749 		dsl_dataset_block_kill(os->os_dsl_dataset,
750 		    os->os_rootbp, pio, tx);
751 	zio = arc_write(pio, os->os_spa, os->os_md_checksum,
752 	    os->os_md_compress,
753 	    dmu_get_replication_level(os->os_spa, &zb, DMU_OT_OBJSET),
754 	    tx->tx_txg, os->os_rootbp, os->os_phys_buf, ready, killer, os,
755 	    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
756 
757 	/*
758 	 * Sync meta-dnode - the parent IO for the sync is the root block
759 	 */
760 	os->os_meta_dnode->dn_zio = zio;
761 	dnode_sync(os->os_meta_dnode, tx);
762 
763 	txgoff = tx->tx_txg & TXG_MASK;
764 
765 	dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], tx);
766 	dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], tx);
767 
768 	list = &os->os_meta_dnode->dn_dirty_records[txgoff];
769 	while (dr = list_head(list)) {
770 		ASSERT(dr->dr_dbuf->db_level == 0);
771 		list_remove(list, dr);
772 		if (dr->dr_zio)
773 			zio_nowait(dr->dr_zio);
774 	}
775 	/*
776 	 * Free intent log blocks up to this tx.
777 	 */
778 	zil_sync(os->os_zil, tx);
779 	zio_nowait(zio);
780 }
781 
782 void
783 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
784     uint64_t *usedobjsp, uint64_t *availobjsp)
785 {
786 	dsl_dataset_space(os->os->os_dsl_dataset, refdbytesp, availbytesp,
787 	    usedobjsp, availobjsp);
788 }
789 
790 uint64_t
791 dmu_objset_fsid_guid(objset_t *os)
792 {
793 	return (dsl_dataset_fsid_guid(os->os->os_dsl_dataset));
794 }
795 
796 void
797 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
798 {
799 	stat->dds_type = os->os->os_phys->os_type;
800 	if (os->os->os_dsl_dataset)
801 		dsl_dataset_fast_stat(os->os->os_dsl_dataset, stat);
802 }
803 
804 void
805 dmu_objset_stats(objset_t *os, nvlist_t *nv)
806 {
807 	ASSERT(os->os->os_dsl_dataset ||
808 	    os->os->os_phys->os_type == DMU_OST_META);
809 
810 	if (os->os->os_dsl_dataset != NULL)
811 		dsl_dataset_stats(os->os->os_dsl_dataset, nv);
812 
813 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
814 	    os->os->os_phys->os_type);
815 }
816 
817 int
818 dmu_objset_is_snapshot(objset_t *os)
819 {
820 	if (os->os->os_dsl_dataset != NULL)
821 		return (dsl_dataset_is_snapshot(os->os->os_dsl_dataset));
822 	else
823 		return (B_FALSE);
824 }
825 
826 int
827 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
828     uint64_t *idp, uint64_t *offp)
829 {
830 	dsl_dataset_t *ds = os->os->os_dsl_dataset;
831 	zap_cursor_t cursor;
832 	zap_attribute_t attr;
833 
834 	if (ds->ds_phys->ds_snapnames_zapobj == 0)
835 		return (ENOENT);
836 
837 	zap_cursor_init_serialized(&cursor,
838 	    ds->ds_dir->dd_pool->dp_meta_objset,
839 	    ds->ds_phys->ds_snapnames_zapobj, *offp);
840 
841 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
842 		zap_cursor_fini(&cursor);
843 		return (ENOENT);
844 	}
845 
846 	if (strlen(attr.za_name) + 1 > namelen) {
847 		zap_cursor_fini(&cursor);
848 		return (ENAMETOOLONG);
849 	}
850 
851 	(void) strcpy(name, attr.za_name);
852 	if (idp)
853 		*idp = attr.za_first_integer;
854 	zap_cursor_advance(&cursor);
855 	*offp = zap_cursor_serialize(&cursor);
856 	zap_cursor_fini(&cursor);
857 
858 	return (0);
859 }
860 
861 int
862 dmu_dir_list_next(objset_t *os, int namelen, char *name,
863     uint64_t *idp, uint64_t *offp)
864 {
865 	dsl_dir_t *dd = os->os->os_dsl_dataset->ds_dir;
866 	zap_cursor_t cursor;
867 	zap_attribute_t attr;
868 
869 	/* there is no next dir on a snapshot! */
870 	if (os->os->os_dsl_dataset->ds_object !=
871 	    dd->dd_phys->dd_head_dataset_obj)
872 		return (ENOENT);
873 
874 	zap_cursor_init_serialized(&cursor,
875 	    dd->dd_pool->dp_meta_objset,
876 	    dd->dd_phys->dd_child_dir_zapobj, *offp);
877 
878 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
879 		zap_cursor_fini(&cursor);
880 		return (ENOENT);
881 	}
882 
883 	if (strlen(attr.za_name) + 1 > namelen) {
884 		zap_cursor_fini(&cursor);
885 		return (ENAMETOOLONG);
886 	}
887 
888 	(void) strcpy(name, attr.za_name);
889 	if (idp)
890 		*idp = attr.za_first_integer;
891 	zap_cursor_advance(&cursor);
892 	*offp = zap_cursor_serialize(&cursor);
893 	zap_cursor_fini(&cursor);
894 
895 	return (0);
896 }
897 
898 /*
899  * Find all objsets under name, and for each, call 'func(child_name, arg)'.
900  */
901 int
902 dmu_objset_find(char *name, int func(char *, void *), void *arg, int flags)
903 {
904 	dsl_dir_t *dd;
905 	objset_t *os;
906 	uint64_t snapobj;
907 	zap_cursor_t zc;
908 	zap_attribute_t attr;
909 	char *child;
910 	int do_self, err;
911 
912 	err = dsl_dir_open(name, FTAG, &dd, NULL);
913 	if (err)
914 		return (err);
915 
916 	/* NB: the $MOS dir doesn't have a head dataset */
917 	do_self = (dd->dd_phys->dd_head_dataset_obj != 0);
918 
919 	/*
920 	 * Iterate over all children.
921 	 */
922 	if (flags & DS_FIND_CHILDREN) {
923 		for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset,
924 		    dd->dd_phys->dd_child_dir_zapobj);
925 		    zap_cursor_retrieve(&zc, &attr) == 0;
926 		    (void) zap_cursor_advance(&zc)) {
927 			ASSERT(attr.za_integer_length == sizeof (uint64_t));
928 			ASSERT(attr.za_num_integers == 1);
929 
930 			/*
931 			 * No separating '/' because parent's name ends in /.
932 			 */
933 			child = kmem_alloc(MAXPATHLEN, KM_SLEEP);
934 			/* XXX could probably just use name here */
935 			dsl_dir_name(dd, child);
936 			(void) strcat(child, "/");
937 			(void) strcat(child, attr.za_name);
938 			err = dmu_objset_find(child, func, arg, flags);
939 			kmem_free(child, MAXPATHLEN);
940 			if (err)
941 				break;
942 		}
943 		zap_cursor_fini(&zc);
944 
945 		if (err) {
946 			dsl_dir_close(dd, FTAG);
947 			return (err);
948 		}
949 	}
950 
951 	/*
952 	 * Iterate over all snapshots.
953 	 */
954 	if ((flags & DS_FIND_SNAPSHOTS) &&
955 	    dmu_objset_open(name, DMU_OST_ANY,
956 	    DS_MODE_STANDARD | DS_MODE_READONLY, &os) == 0) {
957 
958 		snapobj = os->os->os_dsl_dataset->ds_phys->ds_snapnames_zapobj;
959 		dmu_objset_close(os);
960 
961 		for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset, snapobj);
962 		    zap_cursor_retrieve(&zc, &attr) == 0;
963 		    (void) zap_cursor_advance(&zc)) {
964 			ASSERT(attr.za_integer_length == sizeof (uint64_t));
965 			ASSERT(attr.za_num_integers == 1);
966 
967 			child = kmem_alloc(MAXPATHLEN, KM_SLEEP);
968 			/* XXX could probably just use name here */
969 			dsl_dir_name(dd, child);
970 			(void) strcat(child, "@");
971 			(void) strcat(child, attr.za_name);
972 			err = func(child, arg);
973 			kmem_free(child, MAXPATHLEN);
974 			if (err)
975 				break;
976 		}
977 		zap_cursor_fini(&zc);
978 	}
979 
980 	dsl_dir_close(dd, FTAG);
981 
982 	if (err)
983 		return (err);
984 
985 	/*
986 	 * Apply to self if appropriate.
987 	 */
988 	if (do_self)
989 		err = func(name, arg);
990 	return (err);
991 }
992