xref: /illumos-gate/usr/src/uts/common/fs/zfs/dsl_pool.c (revision a7a845e4bf22fd1b2a284729ccd95c7370a0438c)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
21fa9e4066Sahrens /*
223f9d6ad7SLin Ling  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23be6fd75aSMatthew Ahrens  * Copyright (c) 2013 by Delphix. All rights reserved.
24*a7a845e4SSteven Hartland  * Copyright (c) 2013 Steven Hartland. All rights reserved.
25fa9e4066Sahrens  */
26fa9e4066Sahrens 
27fa9e4066Sahrens #include <sys/dsl_pool.h>
28fa9e4066Sahrens #include <sys/dsl_dataset.h>
293f9d6ad7SLin Ling #include <sys/dsl_prop.h>
30fa9e4066Sahrens #include <sys/dsl_dir.h>
311d452cf5Sahrens #include <sys/dsl_synctask.h>
323f9d6ad7SLin Ling #include <sys/dsl_scan.h>
333f9d6ad7SLin Ling #include <sys/dnode.h>
34fa9e4066Sahrens #include <sys/dmu_tx.h>
35fa9e4066Sahrens #include <sys/dmu_objset.h>
36fa9e4066Sahrens #include <sys/arc.h>
37fa9e4066Sahrens #include <sys/zap.h>
38c717a561Smaybee #include <sys/zio.h>
39fa9e4066Sahrens #include <sys/zfs_context.h>
40fa9e4066Sahrens #include <sys/fs/zfs.h>
41088f3894Sahrens #include <sys/zfs_znode.h>
42088f3894Sahrens #include <sys/spa_impl.h>
43cde58dbcSMatthew Ahrens #include <sys/dsl_deadlist.h>
44ad135b5dSChristopher Siden #include <sys/bptree.h>
45ad135b5dSChristopher Siden #include <sys/zfeature.h>
46ce636f8bSMatthew Ahrens #include <sys/zil_impl.h>
473b2aab18SMatthew Ahrens #include <sys/dsl_userhold.h>
48fa9e4066Sahrens 
491ab7f2deSmaybee int zfs_no_write_throttle = 0;
5005715f94SMark Maybee int zfs_write_limit_shift = 3;			/* 1/8th of physical memory */
5144ecc532SGeorge Wilson int zfs_txg_synctime_ms = 1000;		/* target millisecs to sync a txg */
5205715f94SMark Maybee 
5305715f94SMark Maybee uint64_t zfs_write_limit_min = 32 << 20;	/* min write limit is 32MB */
5405715f94SMark Maybee uint64_t zfs_write_limit_max = 0;		/* max data payload per txg */
5505715f94SMark Maybee uint64_t zfs_write_limit_inflated = 0;
561ab7f2deSmaybee uint64_t zfs_write_limit_override = 0;
571ab7f2deSmaybee 
5805715f94SMark Maybee kmutex_t zfs_write_limit_lock;
5905715f94SMark Maybee 
6005715f94SMark Maybee static pgcnt_t old_physmem = 0;
61088f3894Sahrens 
620689f76cSAdam Leventhal hrtime_t zfs_throttle_delay = MSEC2NSEC(10);
630689f76cSAdam Leventhal hrtime_t zfs_throttle_resolution = MSEC2NSEC(10);
640689f76cSAdam Leventhal 
653f9d6ad7SLin Ling int
66088f3894Sahrens dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
67fa9e4066Sahrens {
68fa9e4066Sahrens 	uint64_t obj;
69fa9e4066Sahrens 	int err;
70fa9e4066Sahrens 
71fa9e4066Sahrens 	err = zap_lookup(dp->dp_meta_objset,
72fa9e4066Sahrens 	    dp->dp_root_dir->dd_phys->dd_child_dir_zapobj,
73088f3894Sahrens 	    name, sizeof (obj), 1, &obj);
74ea8dc4b6Seschrock 	if (err)
75ea8dc4b6Seschrock 		return (err);
76fa9e4066Sahrens 
773b2aab18SMatthew Ahrens 	return (dsl_dir_hold_obj(dp, obj, name, dp, ddp));
78fa9e4066Sahrens }
79fa9e4066Sahrens 
80fa9e4066Sahrens static dsl_pool_t *
81fa9e4066Sahrens dsl_pool_open_impl(spa_t *spa, uint64_t txg)
82fa9e4066Sahrens {
83fa9e4066Sahrens 	dsl_pool_t *dp;
84fa9e4066Sahrens 	blkptr_t *bp = spa_get_rootblkptr(spa);
85fa9e4066Sahrens 
86fa9e4066Sahrens 	dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
87fa9e4066Sahrens 	dp->dp_spa = spa;
88fa9e4066Sahrens 	dp->dp_meta_rootbp = *bp;
893b2aab18SMatthew Ahrens 	rrw_init(&dp->dp_config_rwlock, B_TRUE);
901ab7f2deSmaybee 	dp->dp_write_limit = zfs_write_limit_min;
91fa9e4066Sahrens 	txg_init(dp, txg);
92fa9e4066Sahrens 
93fa9e4066Sahrens 	txg_list_create(&dp->dp_dirty_datasets,
94fa9e4066Sahrens 	    offsetof(dsl_dataset_t, ds_dirty_link));
95ce636f8bSMatthew Ahrens 	txg_list_create(&dp->dp_dirty_zilogs,
96ce636f8bSMatthew Ahrens 	    offsetof(zilog_t, zl_dirty_link));
97fa9e4066Sahrens 	txg_list_create(&dp->dp_dirty_dirs,
98fa9e4066Sahrens 	    offsetof(dsl_dir_t, dd_dirty_link));
991d452cf5Sahrens 	txg_list_create(&dp->dp_sync_tasks,
1003b2aab18SMatthew Ahrens 	    offsetof(dsl_sync_task_t, dst_node));
101fa9e4066Sahrens 
1021ab7f2deSmaybee 	mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
1031ab7f2deSmaybee 
1049d3574bfSNeil Perrin 	dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri,
1059d3574bfSNeil Perrin 	    1, 4, 0);
1069d3574bfSNeil Perrin 
107fa9e4066Sahrens 	return (dp);
108fa9e4066Sahrens }
109fa9e4066Sahrens 
110ea8dc4b6Seschrock int
111ad135b5dSChristopher Siden dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
112fa9e4066Sahrens {
113fa9e4066Sahrens 	int err;
114fa9e4066Sahrens 	dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
115ad135b5dSChristopher Siden 
116ad135b5dSChristopher Siden 	err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
117ad135b5dSChristopher Siden 	    &dp->dp_meta_objset);
118ad135b5dSChristopher Siden 	if (err != 0)
119ad135b5dSChristopher Siden 		dsl_pool_close(dp);
120ad135b5dSChristopher Siden 	else
121ad135b5dSChristopher Siden 		*dpp = dp;
122ad135b5dSChristopher Siden 
123ad135b5dSChristopher Siden 	return (err);
124ad135b5dSChristopher Siden }
125ad135b5dSChristopher Siden 
126ad135b5dSChristopher Siden int
127ad135b5dSChristopher Siden dsl_pool_open(dsl_pool_t *dp)
128ad135b5dSChristopher Siden {
129ad135b5dSChristopher Siden 	int err;
130088f3894Sahrens 	dsl_dir_t *dd;
131088f3894Sahrens 	dsl_dataset_t *ds;
132cde58dbcSMatthew Ahrens 	uint64_t obj;
133fa9e4066Sahrens 
1343b2aab18SMatthew Ahrens 	rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
135fa9e4066Sahrens 	err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
136fa9e4066Sahrens 	    DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
137fa9e4066Sahrens 	    &dp->dp_root_dir_obj);
138ea8dc4b6Seschrock 	if (err)
139ea8dc4b6Seschrock 		goto out;
140ea8dc4b6Seschrock 
1413b2aab18SMatthew Ahrens 	err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
142ea8dc4b6Seschrock 	    NULL, dp, &dp->dp_root_dir);
143ea8dc4b6Seschrock 	if (err)
144ea8dc4b6Seschrock 		goto out;
145fa9e4066Sahrens 
146088f3894Sahrens 	err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
147ea8dc4b6Seschrock 	if (err)
148ea8dc4b6Seschrock 		goto out;
149ea8dc4b6Seschrock 
150ad135b5dSChristopher Siden 	if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) {
151088f3894Sahrens 		err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
152088f3894Sahrens 		if (err)
153088f3894Sahrens 			goto out;
154088f3894Sahrens 		err = dsl_dataset_hold_obj(dp, dd->dd_phys->dd_head_dataset_obj,
155088f3894Sahrens 		    FTAG, &ds);
1568f63aa46SLin Ling 		if (err == 0) {
1578f63aa46SLin Ling 			err = dsl_dataset_hold_obj(dp,
1588f63aa46SLin Ling 			    ds->ds_phys->ds_prev_snap_obj, dp,
1598f63aa46SLin Ling 			    &dp->dp_origin_snap);
1608f63aa46SLin Ling 			dsl_dataset_rele(ds, FTAG);
1618f63aa46SLin Ling 		}
1623b2aab18SMatthew Ahrens 		dsl_dir_rele(dd, dp);
163088f3894Sahrens 		if (err)
164088f3894Sahrens 			goto out;
165088f3894Sahrens 	}
166088f3894Sahrens 
167ad135b5dSChristopher Siden 	if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
168cde58dbcSMatthew Ahrens 		err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME,
169cde58dbcSMatthew Ahrens 		    &dp->dp_free_dir);
170cde58dbcSMatthew Ahrens 		if (err)
171cde58dbcSMatthew Ahrens 			goto out;
172cde58dbcSMatthew Ahrens 
173cde58dbcSMatthew Ahrens 		err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
174cde58dbcSMatthew Ahrens 		    DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj);
175cde58dbcSMatthew Ahrens 		if (err)
176cde58dbcSMatthew Ahrens 			goto out;
1773b2aab18SMatthew Ahrens 		VERIFY0(bpobj_open(&dp->dp_free_bpobj,
178cde58dbcSMatthew Ahrens 		    dp->dp_meta_objset, obj));
179cde58dbcSMatthew Ahrens 	}
180cde58dbcSMatthew Ahrens 
181ad135b5dSChristopher Siden 	if (spa_feature_is_active(dp->dp_spa,
182ad135b5dSChristopher Siden 	    &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
183ad135b5dSChristopher Siden 		err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
184ad135b5dSChristopher Siden 		    DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
185ad135b5dSChristopher Siden 		    &dp->dp_bptree_obj);
186ad135b5dSChristopher Siden 		if (err != 0)
187ad135b5dSChristopher Siden 			goto out;
188ad135b5dSChristopher Siden 	}
189ad135b5dSChristopher Siden 
190f1745736SMatthew Ahrens 	if (spa_feature_is_active(dp->dp_spa,
191f1745736SMatthew Ahrens 	    &spa_feature_table[SPA_FEATURE_EMPTY_BPOBJ])) {
192f1745736SMatthew Ahrens 		err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
193f1745736SMatthew Ahrens 		    DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
194f1745736SMatthew Ahrens 		    &dp->dp_empty_bpobj);
195f1745736SMatthew Ahrens 		if (err != 0)
196f1745736SMatthew Ahrens 			goto out;
197f1745736SMatthew Ahrens 	}
198f1745736SMatthew Ahrens 
199ca45db41SChris Kirby 	err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
200ca45db41SChris Kirby 	    DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1,
201ca45db41SChris Kirby 	    &dp->dp_tmp_userrefs_obj);
202ca45db41SChris Kirby 	if (err == ENOENT)
203ca45db41SChris Kirby 		err = 0;
204ca45db41SChris Kirby 	if (err)
205ca45db41SChris Kirby 		goto out;
206ca45db41SChris Kirby 
207ad135b5dSChristopher Siden 	err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg);
208088f3894Sahrens 
209ea8dc4b6Seschrock out:
2103b2aab18SMatthew Ahrens 	rrw_exit(&dp->dp_config_rwlock, FTAG);
211ea8dc4b6Seschrock 	return (err);
212fa9e4066Sahrens }
213fa9e4066Sahrens 
214fa9e4066Sahrens void
215fa9e4066Sahrens dsl_pool_close(dsl_pool_t *dp)
216fa9e4066Sahrens {
217088f3894Sahrens 	/* drop our references from dsl_pool_open() */
218088f3894Sahrens 
219088f3894Sahrens 	/*
220088f3894Sahrens 	 * Since we held the origin_snap from "syncing" context (which
221088f3894Sahrens 	 * includes pool-opening context), it actually only got a "ref"
222088f3894Sahrens 	 * and not a hold, so just drop that here.
223088f3894Sahrens 	 */
224088f3894Sahrens 	if (dp->dp_origin_snap)
2253b2aab18SMatthew Ahrens 		dsl_dataset_rele(dp->dp_origin_snap, dp);
226ea8dc4b6Seschrock 	if (dp->dp_mos_dir)
2273b2aab18SMatthew Ahrens 		dsl_dir_rele(dp->dp_mos_dir, dp);
228cde58dbcSMatthew Ahrens 	if (dp->dp_free_dir)
2293b2aab18SMatthew Ahrens 		dsl_dir_rele(dp->dp_free_dir, dp);
230ea8dc4b6Seschrock 	if (dp->dp_root_dir)
2313b2aab18SMatthew Ahrens 		dsl_dir_rele(dp->dp_root_dir, dp);
232fa9e4066Sahrens 
233cde58dbcSMatthew Ahrens 	bpobj_close(&dp->dp_free_bpobj);
234cde58dbcSMatthew Ahrens 
235fa9e4066Sahrens 	/* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
236ea8dc4b6Seschrock 	if (dp->dp_meta_objset)
237503ad85cSMatthew Ahrens 		dmu_objset_evict(dp->dp_meta_objset);
238fa9e4066Sahrens 
239fa9e4066Sahrens 	txg_list_destroy(&dp->dp_dirty_datasets);
240ce636f8bSMatthew Ahrens 	txg_list_destroy(&dp->dp_dirty_zilogs);
24154a91118SChris Kirby 	txg_list_destroy(&dp->dp_sync_tasks);
242fa9e4066Sahrens 	txg_list_destroy(&dp->dp_dirty_dirs);
243fa9e4066Sahrens 
244874395d5Smaybee 	arc_flush(dp->dp_spa);
245fa9e4066Sahrens 	txg_fini(dp);
2463f9d6ad7SLin Ling 	dsl_scan_fini(dp);
2473b2aab18SMatthew Ahrens 	rrw_destroy(&dp->dp_config_rwlock);
2481ab7f2deSmaybee 	mutex_destroy(&dp->dp_lock);
2499d3574bfSNeil Perrin 	taskq_destroy(dp->dp_vnrele_taskq);
25088b7b0f2SMatthew Ahrens 	if (dp->dp_blkstats)
25188b7b0f2SMatthew Ahrens 		kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
252fa9e4066Sahrens 	kmem_free(dp, sizeof (dsl_pool_t));
253fa9e4066Sahrens }
254fa9e4066Sahrens 
255fa9e4066Sahrens dsl_pool_t *
2560a48a24eStimh dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg)
257fa9e4066Sahrens {
258fa9e4066Sahrens 	int err;
259fa9e4066Sahrens 	dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
260fa9e4066Sahrens 	dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
261503ad85cSMatthew Ahrens 	objset_t *os;
262088f3894Sahrens 	dsl_dataset_t *ds;
263cde58dbcSMatthew Ahrens 	uint64_t obj;
264088f3894Sahrens 
2653b2aab18SMatthew Ahrens 	rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
2663b2aab18SMatthew Ahrens 
267088f3894Sahrens 	/* create and open the MOS (meta-objset) */
268503ad85cSMatthew Ahrens 	dp->dp_meta_objset = dmu_objset_create_impl(spa,
269503ad85cSMatthew Ahrens 	    NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx);
270fa9e4066Sahrens 
271fa9e4066Sahrens 	/* create the pool directory */
272fa9e4066Sahrens 	err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
273fa9e4066Sahrens 	    DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
274fb09f5aaSMadhav Suresh 	ASSERT0(err);
275fa9e4066Sahrens 
2763f9d6ad7SLin Ling 	/* Initialize scan structures */
2773b2aab18SMatthew Ahrens 	VERIFY0(dsl_scan_init(dp, txg));
2783f9d6ad7SLin Ling 
279fa9e4066Sahrens 	/* create and open the root dir */
280088f3894Sahrens 	dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
2813b2aab18SMatthew Ahrens 	VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
282ea8dc4b6Seschrock 	    NULL, dp, &dp->dp_root_dir));
283fa9e4066Sahrens 
284fa9e4066Sahrens 	/* create and open the meta-objset dir */
285088f3894Sahrens 	(void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
2863b2aab18SMatthew Ahrens 	VERIFY0(dsl_pool_open_special_dir(dp,
287088f3894Sahrens 	    MOS_DIR_NAME, &dp->dp_mos_dir));
288088f3894Sahrens 
289cde58dbcSMatthew Ahrens 	if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
290cde58dbcSMatthew Ahrens 		/* create and open the free dir */
291cde58dbcSMatthew Ahrens 		(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
292cde58dbcSMatthew Ahrens 		    FREE_DIR_NAME, tx);
2933b2aab18SMatthew Ahrens 		VERIFY0(dsl_pool_open_special_dir(dp,
294cde58dbcSMatthew Ahrens 		    FREE_DIR_NAME, &dp->dp_free_dir));
295cde58dbcSMatthew Ahrens 
296cde58dbcSMatthew Ahrens 		/* create and open the free_bplist */
297cde58dbcSMatthew Ahrens 		obj = bpobj_alloc(dp->dp_meta_objset, SPA_MAXBLOCKSIZE, tx);
298cde58dbcSMatthew Ahrens 		VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
299cde58dbcSMatthew Ahrens 		    DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
3003b2aab18SMatthew Ahrens 		VERIFY0(bpobj_open(&dp->dp_free_bpobj,
301cde58dbcSMatthew Ahrens 		    dp->dp_meta_objset, obj));
302cde58dbcSMatthew Ahrens 	}
303cde58dbcSMatthew Ahrens 
304088f3894Sahrens 	if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
305088f3894Sahrens 		dsl_pool_create_origin(dp, tx);
306088f3894Sahrens 
307088f3894Sahrens 	/* create the root dataset */
308cde58dbcSMatthew Ahrens 	obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx);
309088f3894Sahrens 
310088f3894Sahrens 	/* create the root objset */
3113b2aab18SMatthew Ahrens 	VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, &ds));
312503ad85cSMatthew Ahrens 	os = dmu_objset_create_impl(dp->dp_spa, ds,
313088f3894Sahrens 	    dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx);
314088f3894Sahrens #ifdef _KERNEL
315503ad85cSMatthew Ahrens 	zfs_create_fs(os, kcred, zplprops, tx);
316088f3894Sahrens #endif
317088f3894Sahrens 	dsl_dataset_rele(ds, FTAG);
318fa9e4066Sahrens 
319fa9e4066Sahrens 	dmu_tx_commit(tx);
320fa9e4066Sahrens 
3213b2aab18SMatthew Ahrens 	rrw_exit(&dp->dp_config_rwlock, FTAG);
3223b2aab18SMatthew Ahrens 
323fa9e4066Sahrens 	return (dp);
324fa9e4066Sahrens }
325fa9e4066Sahrens 
326ce636f8bSMatthew Ahrens /*
327ce636f8bSMatthew Ahrens  * Account for the meta-objset space in its placeholder dsl_dir.
328ce636f8bSMatthew Ahrens  */
329ce636f8bSMatthew Ahrens void
330ce636f8bSMatthew Ahrens dsl_pool_mos_diduse_space(dsl_pool_t *dp,
331ce636f8bSMatthew Ahrens     int64_t used, int64_t comp, int64_t uncomp)
332ce636f8bSMatthew Ahrens {
333ce636f8bSMatthew Ahrens 	ASSERT3U(comp, ==, uncomp); /* it's all metadata */
334ce636f8bSMatthew Ahrens 	mutex_enter(&dp->dp_lock);
335ce636f8bSMatthew Ahrens 	dp->dp_mos_used_delta += used;
336ce636f8bSMatthew Ahrens 	dp->dp_mos_compressed_delta += comp;
337ce636f8bSMatthew Ahrens 	dp->dp_mos_uncompressed_delta += uncomp;
338ce636f8bSMatthew Ahrens 	mutex_exit(&dp->dp_lock);
339ce636f8bSMatthew Ahrens }
340ce636f8bSMatthew Ahrens 
341cde58dbcSMatthew Ahrens static int
342cde58dbcSMatthew Ahrens deadlist_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
343cde58dbcSMatthew Ahrens {
344cde58dbcSMatthew Ahrens 	dsl_deadlist_t *dl = arg;
345cde58dbcSMatthew Ahrens 	dsl_deadlist_insert(dl, bp, tx);
346cde58dbcSMatthew Ahrens 	return (0);
347cde58dbcSMatthew Ahrens }
348cde58dbcSMatthew Ahrens 
349fa9e4066Sahrens void
350fa9e4066Sahrens dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
351fa9e4066Sahrens {
352c717a561Smaybee 	zio_t *zio;
353fa9e4066Sahrens 	dmu_tx_t *tx;
354c717a561Smaybee 	dsl_dir_t *dd;
355c717a561Smaybee 	dsl_dataset_t *ds;
356503ad85cSMatthew Ahrens 	objset_t *mos = dp->dp_meta_objset;
35705715f94SMark Maybee 	hrtime_t start, write_time;
35805715f94SMark Maybee 	uint64_t data_written;
359c717a561Smaybee 	int err;
360ce636f8bSMatthew Ahrens 	list_t synced_datasets;
361ce636f8bSMatthew Ahrens 
362ce636f8bSMatthew Ahrens 	list_create(&synced_datasets, sizeof (dsl_dataset_t),
363ce636f8bSMatthew Ahrens 	    offsetof(dsl_dataset_t, ds_synced_link));
364fa9e4066Sahrens 
3653f9d6ad7SLin Ling 	/*
3663f9d6ad7SLin Ling 	 * We need to copy dp_space_towrite() before doing
3673b2aab18SMatthew Ahrens 	 * dsl_sync_task_sync(), because
3683f9d6ad7SLin Ling 	 * dsl_dataset_snapshot_reserve_space() will increase
3693f9d6ad7SLin Ling 	 * dp_space_towrite but not actually write anything.
3703f9d6ad7SLin Ling 	 */
3713f9d6ad7SLin Ling 	data_written = dp->dp_space_towrite[txg & TXG_MASK];
3723f9d6ad7SLin Ling 
373fa9e4066Sahrens 	tx = dmu_tx_create_assigned(dp, txg);
374fa9e4066Sahrens 
37505715f94SMark Maybee 	dp->dp_read_overhead = 0;
3760fd90d51SMark Maybee 	start = gethrtime();
37714843421SMatthew Ahrens 
378c717a561Smaybee 	zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
379c717a561Smaybee 	while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) {
38014843421SMatthew Ahrens 		/*
38114843421SMatthew Ahrens 		 * We must not sync any non-MOS datasets twice, because
38214843421SMatthew Ahrens 		 * we may have taken a snapshot of them.  However, we
38314843421SMatthew Ahrens 		 * may sync newly-created datasets on pass 2.
38414843421SMatthew Ahrens 		 */
38514843421SMatthew Ahrens 		ASSERT(!list_link_active(&ds->ds_synced_link));
386ce636f8bSMatthew Ahrens 		list_insert_tail(&synced_datasets, ds);
387c717a561Smaybee 		dsl_dataset_sync(ds, zio, tx);
388c717a561Smaybee 	}
38905715f94SMark Maybee 	DTRACE_PROBE(pool_sync__1setup);
390c717a561Smaybee 	err = zio_wait(zio);
39114843421SMatthew Ahrens 
39205715f94SMark Maybee 	write_time = gethrtime() - start;
393c717a561Smaybee 	ASSERT(err == 0);
39405715f94SMark Maybee 	DTRACE_PROBE(pool_sync__2rootzio);
395c717a561Smaybee 
396ce636f8bSMatthew Ahrens 	/*
397ce636f8bSMatthew Ahrens 	 * After the data blocks have been written (ensured by the zio_wait()
398ce636f8bSMatthew Ahrens 	 * above), update the user/group space accounting.
399ce636f8bSMatthew Ahrens 	 */
400ce636f8bSMatthew Ahrens 	for (ds = list_head(&synced_datasets); ds;
401ce636f8bSMatthew Ahrens 	    ds = list_next(&synced_datasets, ds))
4020a586ceaSMark Shellenbaum 		dmu_objset_do_userquota_updates(ds->ds_objset, tx);
40314843421SMatthew Ahrens 
40414843421SMatthew Ahrens 	/*
40514843421SMatthew Ahrens 	 * Sync the datasets again to push out the changes due to
4063f9d6ad7SLin Ling 	 * userspace updates.  This must be done before we process the
407ce636f8bSMatthew Ahrens 	 * sync tasks, so that any snapshots will have the correct
408ce636f8bSMatthew Ahrens 	 * user accounting information (and we won't get confused
409ce636f8bSMatthew Ahrens 	 * about which blocks are part of the snapshot).
41014843421SMatthew Ahrens 	 */
41114843421SMatthew Ahrens 	zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
41214843421SMatthew Ahrens 	while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) {
41314843421SMatthew Ahrens 		ASSERT(list_link_active(&ds->ds_synced_link));
41414843421SMatthew Ahrens 		dmu_buf_rele(ds->ds_dbuf, ds);
41514843421SMatthew Ahrens 		dsl_dataset_sync(ds, zio, tx);
41614843421SMatthew Ahrens 	}
41714843421SMatthew Ahrens 	err = zio_wait(zio);
41814843421SMatthew Ahrens 
419b24ab676SJeff Bonwick 	/*
420ce636f8bSMatthew Ahrens 	 * Now that the datasets have been completely synced, we can
421ce636f8bSMatthew Ahrens 	 * clean up our in-memory structures accumulated while syncing:
422ce636f8bSMatthew Ahrens 	 *
423ce636f8bSMatthew Ahrens 	 *  - move dead blocks from the pending deadlist to the on-disk deadlist
424ce636f8bSMatthew Ahrens 	 *  - release hold from dsl_dataset_dirty()
425b24ab676SJeff Bonwick 	 */
426ce636f8bSMatthew Ahrens 	while (ds = list_remove_head(&synced_datasets)) {
427ce636f8bSMatthew Ahrens 		objset_t *os = ds->ds_objset;
428cde58dbcSMatthew Ahrens 		bplist_iterate(&ds->ds_pending_deadlist,
429cde58dbcSMatthew Ahrens 		    deadlist_enqueue_cb, &ds->ds_deadlist, tx);
430ce636f8bSMatthew Ahrens 		ASSERT(!dmu_objset_is_dirty(os, txg));
431ce636f8bSMatthew Ahrens 		dmu_buf_rele(ds->ds_dbuf, ds);
432cde58dbcSMatthew Ahrens 	}
433b24ab676SJeff Bonwick 
43405715f94SMark Maybee 	start = gethrtime();
435c717a561Smaybee 	while (dd = txg_list_remove(&dp->dp_dirty_dirs, txg))
436c717a561Smaybee 		dsl_dir_sync(dd, tx);
43705715f94SMark Maybee 	write_time += gethrtime() - start;
438fa9e4066Sahrens 
439ce636f8bSMatthew Ahrens 	/*
440ce636f8bSMatthew Ahrens 	 * The MOS's space is accounted for in the pool/$MOS
441ce636f8bSMatthew Ahrens 	 * (dp_mos_dir).  We can't modify the mos while we're syncing
442ce636f8bSMatthew Ahrens 	 * it, so we remember the deltas and apply them here.
443ce636f8bSMatthew Ahrens 	 */
444ce636f8bSMatthew Ahrens 	if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 ||
445ce636f8bSMatthew Ahrens 	    dp->dp_mos_uncompressed_delta != 0) {
446ce636f8bSMatthew Ahrens 		dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD,
447ce636f8bSMatthew Ahrens 		    dp->dp_mos_used_delta,
448ce636f8bSMatthew Ahrens 		    dp->dp_mos_compressed_delta,
449ce636f8bSMatthew Ahrens 		    dp->dp_mos_uncompressed_delta, tx);
450ce636f8bSMatthew Ahrens 		dp->dp_mos_used_delta = 0;
451ce636f8bSMatthew Ahrens 		dp->dp_mos_compressed_delta = 0;
452ce636f8bSMatthew Ahrens 		dp->dp_mos_uncompressed_delta = 0;
453ce636f8bSMatthew Ahrens 	}
454ce636f8bSMatthew Ahrens 
45505715f94SMark Maybee 	start = gethrtime();
456503ad85cSMatthew Ahrens 	if (list_head(&mos->os_dirty_dnodes[txg & TXG_MASK]) != NULL ||
457503ad85cSMatthew Ahrens 	    list_head(&mos->os_free_dnodes[txg & TXG_MASK]) != NULL) {
458c717a561Smaybee 		zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
459503ad85cSMatthew Ahrens 		dmu_objset_sync(mos, zio, tx);
460c717a561Smaybee 		err = zio_wait(zio);
461c717a561Smaybee 		ASSERT(err == 0);
462fa9e4066Sahrens 		dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
463fa9e4066Sahrens 		spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
464fa9e4066Sahrens 	}
46505715f94SMark Maybee 	write_time += gethrtime() - start;
46605715f94SMark Maybee 	DTRACE_PROBE2(pool_sync__4io, hrtime_t, write_time,
46705715f94SMark Maybee 	    hrtime_t, dp->dp_read_overhead);
46805715f94SMark Maybee 	write_time -= dp->dp_read_overhead;
469fa9e4066Sahrens 
470ce636f8bSMatthew Ahrens 	/*
471ce636f8bSMatthew Ahrens 	 * If we modify a dataset in the same txg that we want to destroy it,
472ce636f8bSMatthew Ahrens 	 * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
473ce636f8bSMatthew Ahrens 	 * dsl_dir_destroy_check() will fail if there are unexpected holds.
474ce636f8bSMatthew Ahrens 	 * Therefore, we want to sync the MOS (thus syncing the dd_dbuf
475ce636f8bSMatthew Ahrens 	 * and clearing the hold on it) before we process the sync_tasks.
476ce636f8bSMatthew Ahrens 	 * The MOS data dirtied by the sync_tasks will be synced on the next
477ce636f8bSMatthew Ahrens 	 * pass.
478ce636f8bSMatthew Ahrens 	 */
479ce636f8bSMatthew Ahrens 	DTRACE_PROBE(pool_sync__3task);
480ce636f8bSMatthew Ahrens 	if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
4813b2aab18SMatthew Ahrens 		dsl_sync_task_t *dst;
482ce636f8bSMatthew Ahrens 		/*
483ce636f8bSMatthew Ahrens 		 * No more sync tasks should have been added while we
484ce636f8bSMatthew Ahrens 		 * were syncing.
485ce636f8bSMatthew Ahrens 		 */
486ce636f8bSMatthew Ahrens 		ASSERT(spa_sync_pass(dp->dp_spa) == 1);
4873b2aab18SMatthew Ahrens 		while (dst = txg_list_remove(&dp->dp_sync_tasks, txg))
4883b2aab18SMatthew Ahrens 			dsl_sync_task_sync(dst, tx);
489ce636f8bSMatthew Ahrens 	}
490ce636f8bSMatthew Ahrens 
491fa9e4066Sahrens 	dmu_tx_commit(tx);
49205715f94SMark Maybee 
49305715f94SMark Maybee 	dp->dp_space_towrite[txg & TXG_MASK] = 0;
49405715f94SMark Maybee 	ASSERT(dp->dp_tempreserved[txg & TXG_MASK] == 0);
49505715f94SMark Maybee 
49605715f94SMark Maybee 	/*
49705715f94SMark Maybee 	 * If the write limit max has not been explicitly set, set it
49805715f94SMark Maybee 	 * to a fraction of available physical memory (default 1/8th).
49905715f94SMark Maybee 	 * Note that we must inflate the limit because the spa
50005715f94SMark Maybee 	 * inflates write sizes to account for data replication.
50105715f94SMark Maybee 	 * Check this each sync phase to catch changing memory size.
50205715f94SMark Maybee 	 */
50305715f94SMark Maybee 	if (physmem != old_physmem && zfs_write_limit_shift) {
50405715f94SMark Maybee 		mutex_enter(&zfs_write_limit_lock);
50505715f94SMark Maybee 		old_physmem = physmem;
50605715f94SMark Maybee 		zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
50705715f94SMark Maybee 		zfs_write_limit_inflated = MAX(zfs_write_limit_min,
50805715f94SMark Maybee 		    spa_get_asize(dp->dp_spa, zfs_write_limit_max));
50905715f94SMark Maybee 		mutex_exit(&zfs_write_limit_lock);
51005715f94SMark Maybee 	}
51105715f94SMark Maybee 
51205715f94SMark Maybee 	/*
51305715f94SMark Maybee 	 * Attempt to keep the sync time consistent by adjusting the
51405715f94SMark Maybee 	 * amount of write traffic allowed into each transaction group.
51505715f94SMark Maybee 	 * Weight the throughput calculation towards the current value:
51605715f94SMark Maybee 	 * 	thru = 3/4 old_thru + 1/4 new_thru
517fb5dd802SLin Ling 	 *
5180689f76cSAdam Leventhal 	 * Note: write_time is in nanosecs while dp_throughput is expressed in
5190689f76cSAdam Leventhal 	 * bytes per millisecond.
52005715f94SMark Maybee 	 */
52105715f94SMark Maybee 	ASSERT(zfs_write_limit_min > 0);
5220689f76cSAdam Leventhal 	if (data_written > zfs_write_limit_min / 8 &&
5230689f76cSAdam Leventhal 	    write_time > MSEC2NSEC(1)) {
5240689f76cSAdam Leventhal 		uint64_t throughput = data_written / NSEC2MSEC(write_time);
525fb5dd802SLin Ling 
52605715f94SMark Maybee 		if (dp->dp_throughput)
52705715f94SMark Maybee 			dp->dp_throughput = throughput / 4 +
52805715f94SMark Maybee 			    3 * dp->dp_throughput / 4;
52905715f94SMark Maybee 		else
53005715f94SMark Maybee 			dp->dp_throughput = throughput;
53105715f94SMark Maybee 		dp->dp_write_limit = MIN(zfs_write_limit_inflated,
53205715f94SMark Maybee 		    MAX(zfs_write_limit_min,
533fb5dd802SLin Ling 		    dp->dp_throughput * zfs_txg_synctime_ms));
53405715f94SMark Maybee 	}
535fa9e4066Sahrens }
536fa9e4066Sahrens 
537fa9e4066Sahrens void
538b24ab676SJeff Bonwick dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
539fa9e4066Sahrens {
540ce636f8bSMatthew Ahrens 	zilog_t *zilog;
541fa9e4066Sahrens 	dsl_dataset_t *ds;
542fa9e4066Sahrens 
543ce636f8bSMatthew Ahrens 	while (zilog = txg_list_remove(&dp->dp_dirty_zilogs, txg)) {
544ce636f8bSMatthew Ahrens 		ds = dmu_objset_ds(zilog->zl_os);
545ce636f8bSMatthew Ahrens 		zil_clean(zilog, txg);
546ce636f8bSMatthew Ahrens 		ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
547ce636f8bSMatthew Ahrens 		dmu_buf_rele(ds->ds_dbuf, zilog);
548fa9e4066Sahrens 	}
549b24ab676SJeff Bonwick 	ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
550fa9e4066Sahrens }
551fa9e4066Sahrens 
552c717a561Smaybee /*
553c717a561Smaybee  * TRUE if the current thread is the tx_sync_thread or if we
554c717a561Smaybee  * are being called from SPA context during pool initialization.
555c717a561Smaybee  */
556fa9e4066Sahrens int
557fa9e4066Sahrens dsl_pool_sync_context(dsl_pool_t *dp)
558fa9e4066Sahrens {
559fa9e4066Sahrens 	return (curthread == dp->dp_tx.tx_sync_thread ||
560ad135b5dSChristopher Siden 	    spa_is_initializing(dp->dp_spa));
561fa9e4066Sahrens }
562fa9e4066Sahrens 
563fa9e4066Sahrens uint64_t
564fa9e4066Sahrens dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree)
565fa9e4066Sahrens {
566fa9e4066Sahrens 	uint64_t space, resv;
567fa9e4066Sahrens 
568fa9e4066Sahrens 	/*
56944cd46caSbillm 	 * Reserve about 1.6% (1/64), or at least 32MB, for allocation
570fa9e4066Sahrens 	 * efficiency.
571fa9e4066Sahrens 	 * XXX The intent log is not accounted for, so it must fit
572fa9e4066Sahrens 	 * within this slop.
573fa9e4066Sahrens 	 *
574fa9e4066Sahrens 	 * If we're trying to assess whether it's OK to do a free,
575fa9e4066Sahrens 	 * cut the reservation in half to allow forward progress
576fa9e4066Sahrens 	 * (e.g. make it possible to rm(1) files from a full pool).
577fa9e4066Sahrens 	 */
578485bbbf5SGeorge Wilson 	space = spa_get_dspace(dp->dp_spa);
57944cd46caSbillm 	resv = MAX(space >> 6, SPA_MINDEVSIZE >> 1);
580fa9e4066Sahrens 	if (netfree)
581fa9e4066Sahrens 		resv >>= 1;
582fa9e4066Sahrens 
583fa9e4066Sahrens 	return (space - resv);
584fa9e4066Sahrens }
5851ab7f2deSmaybee 
5861ab7f2deSmaybee int
5871ab7f2deSmaybee dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx)
5881ab7f2deSmaybee {
5891ab7f2deSmaybee 	uint64_t reserved = 0;
5901ab7f2deSmaybee 	uint64_t write_limit = (zfs_write_limit_override ?
5911ab7f2deSmaybee 	    zfs_write_limit_override : dp->dp_write_limit);
5921ab7f2deSmaybee 
5931ab7f2deSmaybee 	if (zfs_no_write_throttle) {
594c5904d13Seschrock 		atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK],
595c5904d13Seschrock 		    space);
5961ab7f2deSmaybee 		return (0);
5971ab7f2deSmaybee 	}
5981ab7f2deSmaybee 
5991ab7f2deSmaybee 	/*
6001ab7f2deSmaybee 	 * Check to see if we have exceeded the maximum allowed IO for
6011ab7f2deSmaybee 	 * this transaction group.  We can do this without locks since
6021ab7f2deSmaybee 	 * a little slop here is ok.  Note that we do the reserved check
6031ab7f2deSmaybee 	 * with only half the requested reserve: this is because the
6041ab7f2deSmaybee 	 * reserve requests are worst-case, and we really don't want to
6051ab7f2deSmaybee 	 * throttle based off of worst-case estimates.
6061ab7f2deSmaybee 	 */
6071ab7f2deSmaybee 	if (write_limit > 0) {
6081ab7f2deSmaybee 		reserved = dp->dp_space_towrite[tx->tx_txg & TXG_MASK]
6091ab7f2deSmaybee 		    + dp->dp_tempreserved[tx->tx_txg & TXG_MASK] / 2;
6101ab7f2deSmaybee 
6111ab7f2deSmaybee 		if (reserved && reserved > write_limit)
612be6fd75aSMatthew Ahrens 			return (SET_ERROR(ERESTART));
6131ab7f2deSmaybee 	}
6141ab7f2deSmaybee 
6151ab7f2deSmaybee 	atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], space);
6161ab7f2deSmaybee 
6171ab7f2deSmaybee 	/*
6181ab7f2deSmaybee 	 * If this transaction group is over 7/8ths capacity, delay
6191ab7f2deSmaybee 	 * the caller 1 clock tick.  This will slow down the "fill"
6201ab7f2deSmaybee 	 * rate until the sync process can catch up with us.
6211ab7f2deSmaybee 	 */
6220689f76cSAdam Leventhal 	if (reserved && reserved > (write_limit - (write_limit >> 3))) {
6230689f76cSAdam Leventhal 		txg_delay(dp, tx->tx_txg, zfs_throttle_delay,
6240689f76cSAdam Leventhal 		    zfs_throttle_resolution);
6250689f76cSAdam Leventhal 	}
6261ab7f2deSmaybee 
6271ab7f2deSmaybee 	return (0);
6281ab7f2deSmaybee }
6291ab7f2deSmaybee 
6301ab7f2deSmaybee void
6311ab7f2deSmaybee dsl_pool_tempreserve_clear(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
6321ab7f2deSmaybee {
6331ab7f2deSmaybee 	ASSERT(dp->dp_tempreserved[tx->tx_txg & TXG_MASK] >= space);
6341ab7f2deSmaybee 	atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], -space);
6351ab7f2deSmaybee }
6361ab7f2deSmaybee 
6371ab7f2deSmaybee void
6381ab7f2deSmaybee dsl_pool_memory_pressure(dsl_pool_t *dp)
6391ab7f2deSmaybee {
6401ab7f2deSmaybee 	uint64_t space_inuse = 0;
6411ab7f2deSmaybee 	int i;
6421ab7f2deSmaybee 
6431ab7f2deSmaybee 	if (dp->dp_write_limit == zfs_write_limit_min)
6441ab7f2deSmaybee 		return;
6451ab7f2deSmaybee 
6461ab7f2deSmaybee 	for (i = 0; i < TXG_SIZE; i++) {
6471ab7f2deSmaybee 		space_inuse += dp->dp_space_towrite[i];
6481ab7f2deSmaybee 		space_inuse += dp->dp_tempreserved[i];
6491ab7f2deSmaybee 	}
6501ab7f2deSmaybee 	dp->dp_write_limit = MAX(zfs_write_limit_min,
6511ab7f2deSmaybee 	    MIN(dp->dp_write_limit, space_inuse / 4));
6521ab7f2deSmaybee }
6531ab7f2deSmaybee 
6541ab7f2deSmaybee void
6551ab7f2deSmaybee dsl_pool_willuse_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
6561ab7f2deSmaybee {
6571ab7f2deSmaybee 	if (space > 0) {
6581ab7f2deSmaybee 		mutex_enter(&dp->dp_lock);
6591ab7f2deSmaybee 		dp->dp_space_towrite[tx->tx_txg & TXG_MASK] += space;
6601ab7f2deSmaybee 		mutex_exit(&dp->dp_lock);
6611ab7f2deSmaybee 	}
6621ab7f2deSmaybee }
663088f3894Sahrens 
664088f3894Sahrens /* ARGSUSED */
665088f3894Sahrens static int
6663b2aab18SMatthew Ahrens upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
667088f3894Sahrens {
668088f3894Sahrens 	dmu_tx_t *tx = arg;
669088f3894Sahrens 	dsl_dataset_t *ds, *prev = NULL;
670088f3894Sahrens 	int err;
671088f3894Sahrens 
6723b2aab18SMatthew Ahrens 	err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
673088f3894Sahrens 	if (err)
674088f3894Sahrens 		return (err);
675088f3894Sahrens 
676088f3894Sahrens 	while (ds->ds_phys->ds_prev_snap_obj != 0) {
677088f3894Sahrens 		err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
678088f3894Sahrens 		    FTAG, &prev);
679088f3894Sahrens 		if (err) {
680088f3894Sahrens 			dsl_dataset_rele(ds, FTAG);
681088f3894Sahrens 			return (err);
682088f3894Sahrens 		}
683088f3894Sahrens 
684088f3894Sahrens 		if (prev->ds_phys->ds_next_snap_obj != ds->ds_object)
685088f3894Sahrens 			break;
686088f3894Sahrens 		dsl_dataset_rele(ds, FTAG);
687088f3894Sahrens 		ds = prev;
688088f3894Sahrens 		prev = NULL;
689088f3894Sahrens 	}
690088f3894Sahrens 
691088f3894Sahrens 	if (prev == NULL) {
692088f3894Sahrens 		prev = dp->dp_origin_snap;
693088f3894Sahrens 
694088f3894Sahrens 		/*
695088f3894Sahrens 		 * The $ORIGIN can't have any data, or the accounting
696088f3894Sahrens 		 * will be wrong.
697088f3894Sahrens 		 */
6983b2aab18SMatthew Ahrens 		ASSERT0(prev->ds_phys->ds_bp.blk_birth);
699088f3894Sahrens 
700088f3894Sahrens 		/* The origin doesn't get attached to itself */
701088f3894Sahrens 		if (ds->ds_object == prev->ds_object) {
702088f3894Sahrens 			dsl_dataset_rele(ds, FTAG);
703088f3894Sahrens 			return (0);
704088f3894Sahrens 		}
705088f3894Sahrens 
706088f3894Sahrens 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
707088f3894Sahrens 		ds->ds_phys->ds_prev_snap_obj = prev->ds_object;
708088f3894Sahrens 		ds->ds_phys->ds_prev_snap_txg = prev->ds_phys->ds_creation_txg;
709088f3894Sahrens 
710088f3894Sahrens 		dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
711088f3894Sahrens 		ds->ds_dir->dd_phys->dd_origin_obj = prev->ds_object;
712088f3894Sahrens 
713088f3894Sahrens 		dmu_buf_will_dirty(prev->ds_dbuf, tx);
714088f3894Sahrens 		prev->ds_phys->ds_num_children++;
715088f3894Sahrens 
716088f3894Sahrens 		if (ds->ds_phys->ds_next_snap_obj == 0) {
717088f3894Sahrens 			ASSERT(ds->ds_prev == NULL);
7183b2aab18SMatthew Ahrens 			VERIFY0(dsl_dataset_hold_obj(dp,
719088f3894Sahrens 			    ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
720088f3894Sahrens 		}
721088f3894Sahrens 	}
722088f3894Sahrens 
7233b2aab18SMatthew Ahrens 	ASSERT3U(ds->ds_dir->dd_phys->dd_origin_obj, ==, prev->ds_object);
7243b2aab18SMatthew Ahrens 	ASSERT3U(ds->ds_phys->ds_prev_snap_obj, ==, prev->ds_object);
725088f3894Sahrens 
726088f3894Sahrens 	if (prev->ds_phys->ds_next_clones_obj == 0) {
727c33e334fSMatthew Ahrens 		dmu_buf_will_dirty(prev->ds_dbuf, tx);
728088f3894Sahrens 		prev->ds_phys->ds_next_clones_obj =
729088f3894Sahrens 		    zap_create(dp->dp_meta_objset,
730088f3894Sahrens 		    DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
731088f3894Sahrens 	}
7323b2aab18SMatthew Ahrens 	VERIFY0(zap_add_int(dp->dp_meta_objset,
733088f3894Sahrens 	    prev->ds_phys->ds_next_clones_obj, ds->ds_object, tx));
734088f3894Sahrens 
735088f3894Sahrens 	dsl_dataset_rele(ds, FTAG);
736088f3894Sahrens 	if (prev != dp->dp_origin_snap)
737088f3894Sahrens 		dsl_dataset_rele(prev, FTAG);
738088f3894Sahrens 	return (0);
739088f3894Sahrens }
740088f3894Sahrens 
741088f3894Sahrens void
742088f3894Sahrens dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
743088f3894Sahrens {
744088f3894Sahrens 	ASSERT(dmu_tx_is_syncing(tx));
745088f3894Sahrens 	ASSERT(dp->dp_origin_snap != NULL);
746088f3894Sahrens 
7473b2aab18SMatthew Ahrens 	VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
748c33e334fSMatthew Ahrens 	    tx, DS_FIND_CHILDREN));
749088f3894Sahrens }
750088f3894Sahrens 
751cde58dbcSMatthew Ahrens /* ARGSUSED */
752cde58dbcSMatthew Ahrens static int
7533b2aab18SMatthew Ahrens upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
754cde58dbcSMatthew Ahrens {
755cde58dbcSMatthew Ahrens 	dmu_tx_t *tx = arg;
756cde58dbcSMatthew Ahrens 	objset_t *mos = dp->dp_meta_objset;
757cde58dbcSMatthew Ahrens 
7583b2aab18SMatthew Ahrens 	if (ds->ds_dir->dd_phys->dd_origin_obj != 0) {
759cde58dbcSMatthew Ahrens 		dsl_dataset_t *origin;
760cde58dbcSMatthew Ahrens 
7613b2aab18SMatthew Ahrens 		VERIFY0(dsl_dataset_hold_obj(dp,
762cde58dbcSMatthew Ahrens 		    ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &origin));
763cde58dbcSMatthew Ahrens 
764cde58dbcSMatthew Ahrens 		if (origin->ds_dir->dd_phys->dd_clones == 0) {
765cde58dbcSMatthew Ahrens 			dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
766cde58dbcSMatthew Ahrens 			origin->ds_dir->dd_phys->dd_clones = zap_create(mos,
767cde58dbcSMatthew Ahrens 			    DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
768cde58dbcSMatthew Ahrens 		}
769cde58dbcSMatthew Ahrens 
7703b2aab18SMatthew Ahrens 		VERIFY0(zap_add_int(dp->dp_meta_objset,
7713b2aab18SMatthew Ahrens 		    origin->ds_dir->dd_phys->dd_clones, ds->ds_object, tx));
772cde58dbcSMatthew Ahrens 
773cde58dbcSMatthew Ahrens 		dsl_dataset_rele(origin, FTAG);
774cde58dbcSMatthew Ahrens 	}
775cde58dbcSMatthew Ahrens 	return (0);
776cde58dbcSMatthew Ahrens }
777cde58dbcSMatthew Ahrens 
778cde58dbcSMatthew Ahrens void
779cde58dbcSMatthew Ahrens dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
780cde58dbcSMatthew Ahrens {
781cde58dbcSMatthew Ahrens 	ASSERT(dmu_tx_is_syncing(tx));
782cde58dbcSMatthew Ahrens 	uint64_t obj;
783cde58dbcSMatthew Ahrens 
784cde58dbcSMatthew Ahrens 	(void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
7853b2aab18SMatthew Ahrens 	VERIFY0(dsl_pool_open_special_dir(dp,
786cde58dbcSMatthew Ahrens 	    FREE_DIR_NAME, &dp->dp_free_dir));
787cde58dbcSMatthew Ahrens 
788cde58dbcSMatthew Ahrens 	/*
789cde58dbcSMatthew Ahrens 	 * We can't use bpobj_alloc(), because spa_version() still
790cde58dbcSMatthew Ahrens 	 * returns the old version, and we need a new-version bpobj with
791cde58dbcSMatthew Ahrens 	 * subobj support.  So call dmu_object_alloc() directly.
792cde58dbcSMatthew Ahrens 	 */
793cde58dbcSMatthew Ahrens 	obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
794cde58dbcSMatthew Ahrens 	    SPA_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
7953b2aab18SMatthew Ahrens 	VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
796cde58dbcSMatthew Ahrens 	    DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
7973b2aab18SMatthew Ahrens 	VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
798cde58dbcSMatthew Ahrens 
7993b2aab18SMatthew Ahrens 	VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
800cde58dbcSMatthew Ahrens 	    upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN));
801cde58dbcSMatthew Ahrens }
802cde58dbcSMatthew Ahrens 
803088f3894Sahrens void
804088f3894Sahrens dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
805088f3894Sahrens {
806088f3894Sahrens 	uint64_t dsobj;
807088f3894Sahrens 	dsl_dataset_t *ds;
808088f3894Sahrens 
809088f3894Sahrens 	ASSERT(dmu_tx_is_syncing(tx));
810088f3894Sahrens 	ASSERT(dp->dp_origin_snap == NULL);
8113b2aab18SMatthew Ahrens 	ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
812088f3894Sahrens 
813088f3894Sahrens 	/* create the origin dir, ds, & snap-ds */
814088f3894Sahrens 	dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
815088f3894Sahrens 	    NULL, 0, kcred, tx);
8163b2aab18SMatthew Ahrens 	VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
8173b2aab18SMatthew Ahrens 	dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
8183b2aab18SMatthew Ahrens 	VERIFY0(dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
819088f3894Sahrens 	    dp, &dp->dp_origin_snap));
820088f3894Sahrens 	dsl_dataset_rele(ds, FTAG);
821088f3894Sahrens }
8229d3574bfSNeil Perrin 
8239d3574bfSNeil Perrin taskq_t *
8249d3574bfSNeil Perrin dsl_pool_vnrele_taskq(dsl_pool_t *dp)
8259d3574bfSNeil Perrin {
8269d3574bfSNeil Perrin 	return (dp->dp_vnrele_taskq);
8279d3574bfSNeil Perrin }
828ca45db41SChris Kirby 
829ca45db41SChris Kirby /*
830ca45db41SChris Kirby  * Walk through the pool-wide zap object of temporary snapshot user holds
831ca45db41SChris Kirby  * and release them.
832ca45db41SChris Kirby  */
833ca45db41SChris Kirby void
834ca45db41SChris Kirby dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
835ca45db41SChris Kirby {
836ca45db41SChris Kirby 	zap_attribute_t za;
837ca45db41SChris Kirby 	zap_cursor_t zc;
838ca45db41SChris Kirby 	objset_t *mos = dp->dp_meta_objset;
839ca45db41SChris Kirby 	uint64_t zapobj = dp->dp_tmp_userrefs_obj;
840*a7a845e4SSteven Hartland 	nvlist_t *holds;
841ca45db41SChris Kirby 
842ca45db41SChris Kirby 	if (zapobj == 0)
843ca45db41SChris Kirby 		return;
844ca45db41SChris Kirby 	ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
845ca45db41SChris Kirby 
846*a7a845e4SSteven Hartland 	holds = fnvlist_alloc();
847*a7a845e4SSteven Hartland 
848ca45db41SChris Kirby 	for (zap_cursor_init(&zc, mos, zapobj);
849ca45db41SChris Kirby 	    zap_cursor_retrieve(&zc, &za) == 0;
850ca45db41SChris Kirby 	    zap_cursor_advance(&zc)) {
851ca45db41SChris Kirby 		char *htag;
852*a7a845e4SSteven Hartland 		nvlist_t *tags;
853ca45db41SChris Kirby 
854ca45db41SChris Kirby 		htag = strchr(za.za_name, '-');
855ca45db41SChris Kirby 		*htag = '\0';
856ca45db41SChris Kirby 		++htag;
857*a7a845e4SSteven Hartland 		if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) {
858*a7a845e4SSteven Hartland 			tags = fnvlist_alloc();
859*a7a845e4SSteven Hartland 			fnvlist_add_boolean(tags, htag);
860*a7a845e4SSteven Hartland 			fnvlist_add_nvlist(holds, za.za_name, tags);
861*a7a845e4SSteven Hartland 			fnvlist_free(tags);
862*a7a845e4SSteven Hartland 		} else {
863*a7a845e4SSteven Hartland 			fnvlist_add_boolean(tags, htag);
864*a7a845e4SSteven Hartland 		}
865ca45db41SChris Kirby 	}
866*a7a845e4SSteven Hartland 	dsl_dataset_user_release_tmp(dp, holds);
867*a7a845e4SSteven Hartland 	fnvlist_free(holds);
868ca45db41SChris Kirby 	zap_cursor_fini(&zc);
869ca45db41SChris Kirby }
870ca45db41SChris Kirby 
871ca45db41SChris Kirby /*
872ca45db41SChris Kirby  * Create the pool-wide zap object for storing temporary snapshot holds.
873ca45db41SChris Kirby  */
874ca45db41SChris Kirby void
875ca45db41SChris Kirby dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
876ca45db41SChris Kirby {
877ca45db41SChris Kirby 	objset_t *mos = dp->dp_meta_objset;
878ca45db41SChris Kirby 
879ca45db41SChris Kirby 	ASSERT(dp->dp_tmp_userrefs_obj == 0);
880ca45db41SChris Kirby 	ASSERT(dmu_tx_is_syncing(tx));
881ca45db41SChris Kirby 
882ad135b5dSChristopher Siden 	dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
883ad135b5dSChristopher Siden 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx);
884ca45db41SChris Kirby }
885ca45db41SChris Kirby 
886ca45db41SChris Kirby static int
887ca45db41SChris Kirby dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
8883b2aab18SMatthew Ahrens     const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding)
889ca45db41SChris Kirby {
890ca45db41SChris Kirby 	objset_t *mos = dp->dp_meta_objset;
891ca45db41SChris Kirby 	uint64_t zapobj = dp->dp_tmp_userrefs_obj;
892ca45db41SChris Kirby 	char *name;
893ca45db41SChris Kirby 	int error;
894ca45db41SChris Kirby 
895ca45db41SChris Kirby 	ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
896ca45db41SChris Kirby 	ASSERT(dmu_tx_is_syncing(tx));
897ca45db41SChris Kirby 
898ca45db41SChris Kirby 	/*
899ca45db41SChris Kirby 	 * If the pool was created prior to SPA_VERSION_USERREFS, the
900ca45db41SChris Kirby 	 * zap object for temporary holds might not exist yet.
901ca45db41SChris Kirby 	 */
902ca45db41SChris Kirby 	if (zapobj == 0) {
903ca45db41SChris Kirby 		if (holding) {
904ca45db41SChris Kirby 			dsl_pool_user_hold_create_obj(dp, tx);
905ca45db41SChris Kirby 			zapobj = dp->dp_tmp_userrefs_obj;
906ca45db41SChris Kirby 		} else {
907be6fd75aSMatthew Ahrens 			return (SET_ERROR(ENOENT));
908ca45db41SChris Kirby 		}
909ca45db41SChris Kirby 	}
910ca45db41SChris Kirby 
911ca45db41SChris Kirby 	name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag);
912ca45db41SChris Kirby 	if (holding)
9133b2aab18SMatthew Ahrens 		error = zap_add(mos, zapobj, name, 8, 1, &now, tx);
914ca45db41SChris Kirby 	else
915ca45db41SChris Kirby 		error = zap_remove(mos, zapobj, name, tx);
916ca45db41SChris Kirby 	strfree(name);
917ca45db41SChris Kirby 
918ca45db41SChris Kirby 	return (error);
919ca45db41SChris Kirby }
920ca45db41SChris Kirby 
921ca45db41SChris Kirby /*
922ca45db41SChris Kirby  * Add a temporary hold for the given dataset object and tag.
923ca45db41SChris Kirby  */
924ca45db41SChris Kirby int
925ca45db41SChris Kirby dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
9263b2aab18SMatthew Ahrens     uint64_t now, dmu_tx_t *tx)
927ca45db41SChris Kirby {
92815508ac0SChris Kirby 	return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE));
929ca45db41SChris Kirby }
930ca45db41SChris Kirby 
931ca45db41SChris Kirby /*
932ca45db41SChris Kirby  * Release a temporary hold for the given dataset object and tag.
933ca45db41SChris Kirby  */
934ca45db41SChris Kirby int
935ca45db41SChris Kirby dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
936ca45db41SChris Kirby     dmu_tx_t *tx)
937ca45db41SChris Kirby {
938ca45db41SChris Kirby 	return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, NULL,
939ca45db41SChris Kirby 	    tx, B_FALSE));
940ca45db41SChris Kirby }
9413b2aab18SMatthew Ahrens 
9423b2aab18SMatthew Ahrens /*
9433b2aab18SMatthew Ahrens  * DSL Pool Configuration Lock
9443b2aab18SMatthew Ahrens  *
9453b2aab18SMatthew Ahrens  * The dp_config_rwlock protects against changes to DSL state (e.g. dataset
9463b2aab18SMatthew Ahrens  * creation / destruction / rename / property setting).  It must be held for
9473b2aab18SMatthew Ahrens  * read to hold a dataset or dsl_dir.  I.e. you must call
9483b2aab18SMatthew Ahrens  * dsl_pool_config_enter() or dsl_pool_hold() before calling
9493b2aab18SMatthew Ahrens  * dsl_{dataset,dir}_hold{_obj}.  In most circumstances, the dp_config_rwlock
9503b2aab18SMatthew Ahrens  * must be held continuously until all datasets and dsl_dirs are released.
9513b2aab18SMatthew Ahrens  *
9523b2aab18SMatthew Ahrens  * The only exception to this rule is that if a "long hold" is placed on
9533b2aab18SMatthew Ahrens  * a dataset, then the dp_config_rwlock may be dropped while the dataset
9543b2aab18SMatthew Ahrens  * is still held.  The long hold will prevent the dataset from being
9553b2aab18SMatthew Ahrens  * destroyed -- the destroy will fail with EBUSY.  A long hold can be
9563b2aab18SMatthew Ahrens  * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset
9573b2aab18SMatthew Ahrens  * (by calling dsl_{dataset,objset}_{try}own{_obj}).
9583b2aab18SMatthew Ahrens  *
9593b2aab18SMatthew Ahrens  * Legitimate long-holders (including owners) should be long-running, cancelable
9603b2aab18SMatthew Ahrens  * tasks that should cause "zfs destroy" to fail.  This includes DMU
9613b2aab18SMatthew Ahrens  * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open),
9623b2aab18SMatthew Ahrens  * "zfs send", and "zfs diff".  There are several other long-holders whose
9633b2aab18SMatthew Ahrens  * uses are suboptimal (e.g. "zfs promote", and zil_suspend()).
9643b2aab18SMatthew Ahrens  *
9653b2aab18SMatthew Ahrens  * The usual formula for long-holding would be:
9663b2aab18SMatthew Ahrens  * dsl_pool_hold()
9673b2aab18SMatthew Ahrens  * dsl_dataset_hold()
9683b2aab18SMatthew Ahrens  * ... perform checks ...
9693b2aab18SMatthew Ahrens  * dsl_dataset_long_hold()
9703b2aab18SMatthew Ahrens  * dsl_pool_rele()
9713b2aab18SMatthew Ahrens  * ... perform long-running task ...
9723b2aab18SMatthew Ahrens  * dsl_dataset_long_rele()
9733b2aab18SMatthew Ahrens  * dsl_dataset_rele()
9743b2aab18SMatthew Ahrens  *
9753b2aab18SMatthew Ahrens  * Note that when the long hold is released, the dataset is still held but
9763b2aab18SMatthew Ahrens  * the pool is not held.  The dataset may change arbitrarily during this time
9773b2aab18SMatthew Ahrens  * (e.g. it could be destroyed).  Therefore you shouldn't do anything to the
9783b2aab18SMatthew Ahrens  * dataset except release it.
9793b2aab18SMatthew Ahrens  *
9803b2aab18SMatthew Ahrens  * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only
9813b2aab18SMatthew Ahrens  * or modifying operations.
9823b2aab18SMatthew Ahrens  *
9833b2aab18SMatthew Ahrens  * Modifying operations should generally use dsl_sync_task().  The synctask
9843b2aab18SMatthew Ahrens  * infrastructure enforces proper locking strategy with respect to the
9853b2aab18SMatthew Ahrens  * dp_config_rwlock.  See the comment above dsl_sync_task() for details.
9863b2aab18SMatthew Ahrens  *
9873b2aab18SMatthew Ahrens  * Read-only operations will manually hold the pool, then the dataset, obtain
9883b2aab18SMatthew Ahrens  * information from the dataset, then release the pool and dataset.
9893b2aab18SMatthew Ahrens  * dmu_objset_{hold,rele}() are convenience routines that also do the pool
9903b2aab18SMatthew Ahrens  * hold/rele.
9913b2aab18SMatthew Ahrens  */
9923b2aab18SMatthew Ahrens 
9933b2aab18SMatthew Ahrens int
9943b2aab18SMatthew Ahrens dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
9953b2aab18SMatthew Ahrens {
9963b2aab18SMatthew Ahrens 	spa_t *spa;
9973b2aab18SMatthew Ahrens 	int error;
9983b2aab18SMatthew Ahrens 
9993b2aab18SMatthew Ahrens 	error = spa_open(name, &spa, tag);
10003b2aab18SMatthew Ahrens 	if (error == 0) {
10013b2aab18SMatthew Ahrens 		*dp = spa_get_dsl(spa);
10023b2aab18SMatthew Ahrens 		dsl_pool_config_enter(*dp, tag);
10033b2aab18SMatthew Ahrens 	}
10043b2aab18SMatthew Ahrens 	return (error);
10053b2aab18SMatthew Ahrens }
10063b2aab18SMatthew Ahrens 
10073b2aab18SMatthew Ahrens void
10083b2aab18SMatthew Ahrens dsl_pool_rele(dsl_pool_t *dp, void *tag)
10093b2aab18SMatthew Ahrens {
10103b2aab18SMatthew Ahrens 	dsl_pool_config_exit(dp, tag);
10113b2aab18SMatthew Ahrens 	spa_close(dp->dp_spa, tag);
10123b2aab18SMatthew Ahrens }
10133b2aab18SMatthew Ahrens 
10143b2aab18SMatthew Ahrens void
10153b2aab18SMatthew Ahrens dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
10163b2aab18SMatthew Ahrens {
10173b2aab18SMatthew Ahrens 	/*
10183b2aab18SMatthew Ahrens 	 * We use a "reentrant" reader-writer lock, but not reentrantly.
10193b2aab18SMatthew Ahrens 	 *
10203b2aab18SMatthew Ahrens 	 * The rrwlock can (with the track_all flag) track all reading threads,
10213b2aab18SMatthew Ahrens 	 * which is very useful for debugging which code path failed to release
10223b2aab18SMatthew Ahrens 	 * the lock, and for verifying that the *current* thread does hold
10233b2aab18SMatthew Ahrens 	 * the lock.
10243b2aab18SMatthew Ahrens 	 *
10253b2aab18SMatthew Ahrens 	 * (Unlike a rwlock, which knows that N threads hold it for
10263b2aab18SMatthew Ahrens 	 * read, but not *which* threads, so rw_held(RW_READER) returns TRUE
10273b2aab18SMatthew Ahrens 	 * if any thread holds it for read, even if this thread doesn't).
10283b2aab18SMatthew Ahrens 	 */
10293b2aab18SMatthew Ahrens 	ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
10303b2aab18SMatthew Ahrens 	rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
10313b2aab18SMatthew Ahrens }
10323b2aab18SMatthew Ahrens 
10333b2aab18SMatthew Ahrens void
10343b2aab18SMatthew Ahrens dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
10353b2aab18SMatthew Ahrens {
10363b2aab18SMatthew Ahrens 	rrw_exit(&dp->dp_config_rwlock, tag);
10373b2aab18SMatthew Ahrens }
10383b2aab18SMatthew Ahrens 
10393b2aab18SMatthew Ahrens boolean_t
10403b2aab18SMatthew Ahrens dsl_pool_config_held(dsl_pool_t *dp)
10413b2aab18SMatthew Ahrens {
10423b2aab18SMatthew Ahrens 	return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
10433b2aab18SMatthew Ahrens }
1044