xref: /illumos-gate/usr/src/uts/common/fs/zfs/dsl_pool.c (revision 8671400134a11c848244896ca51a7db4d0f69da4)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
21fa9e4066Sahrens /*
223f9d6ad7SLin Ling  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
2394c2d0ebSMatthew Ahrens  * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
24a7a845e4SSteven Hartland  * Copyright (c) 2013 Steven Hartland. All rights reserved.
25bc9014e6SJustin Gibbs  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26c3d26abcSMatthew Ahrens  * Copyright (c) 2014 Integros [integros.com]
27ff5177eeSAlek Pinchuk  * Copyright 2016 Nexenta Systems, Inc.  All rights reserved.
28fa9e4066Sahrens  */
29fa9e4066Sahrens 
30fa9e4066Sahrens #include <sys/dsl_pool.h>
31fa9e4066Sahrens #include <sys/dsl_dataset.h>
323f9d6ad7SLin Ling #include <sys/dsl_prop.h>
33fa9e4066Sahrens #include <sys/dsl_dir.h>
341d452cf5Sahrens #include <sys/dsl_synctask.h>
353f9d6ad7SLin Ling #include <sys/dsl_scan.h>
363f9d6ad7SLin Ling #include <sys/dnode.h>
37fa9e4066Sahrens #include <sys/dmu_tx.h>
38fa9e4066Sahrens #include <sys/dmu_objset.h>
39fa9e4066Sahrens #include <sys/arc.h>
40fa9e4066Sahrens #include <sys/zap.h>
41c717a561Smaybee #include <sys/zio.h>
42fa9e4066Sahrens #include <sys/zfs_context.h>
43fa9e4066Sahrens #include <sys/fs/zfs.h>
44088f3894Sahrens #include <sys/zfs_znode.h>
45088f3894Sahrens #include <sys/spa_impl.h>
46cde58dbcSMatthew Ahrens #include <sys/dsl_deadlist.h>
47*86714001SSerapheim Dimitropoulos #include <sys/vdev_impl.h>
48*86714001SSerapheim Dimitropoulos #include <sys/metaslab_impl.h>
49ad135b5dSChristopher Siden #include <sys/bptree.h>
50ad135b5dSChristopher Siden #include <sys/zfeature.h>
51ce636f8bSMatthew Ahrens #include <sys/zil_impl.h>
523b2aab18SMatthew Ahrens #include <sys/dsl_userhold.h>
53fa9e4066Sahrens 
5469962b56SMatthew Ahrens /*
5569962b56SMatthew Ahrens  * ZFS Write Throttle
5669962b56SMatthew Ahrens  * ------------------
5769962b56SMatthew Ahrens  *
5869962b56SMatthew Ahrens  * ZFS must limit the rate of incoming writes to the rate at which it is able
5969962b56SMatthew Ahrens  * to sync data modifications to the backend storage. Throttling by too much
6069962b56SMatthew Ahrens  * creates an artificial limit; throttling by too little can only be sustained
6169962b56SMatthew Ahrens  * for short periods and would lead to highly lumpy performance. On a per-pool
6269962b56SMatthew Ahrens  * basis, ZFS tracks the amount of modified (dirty) data. As operations change
6369962b56SMatthew Ahrens  * data, the amount of dirty data increases; as ZFS syncs out data, the amount
6469962b56SMatthew Ahrens  * of dirty data decreases. When the amount of dirty data exceeds a
6569962b56SMatthew Ahrens  * predetermined threshold further modifications are blocked until the amount
6669962b56SMatthew Ahrens  * of dirty data decreases (as data is synced out).
6769962b56SMatthew Ahrens  *
6869962b56SMatthew Ahrens  * The limit on dirty data is tunable, and should be adjusted according to
6969962b56SMatthew Ahrens  * both the IO capacity and available memory of the system. The larger the
7069962b56SMatthew Ahrens  * window, the more ZFS is able to aggregate and amortize metadata (and data)
7169962b56SMatthew Ahrens  * changes. However, memory is a limited resource, and allowing for more dirty
7269962b56SMatthew Ahrens  * data comes at the cost of keeping other useful data in memory (for example
7369962b56SMatthew Ahrens  * ZFS data cached by the ARC).
7469962b56SMatthew Ahrens  *
7569962b56SMatthew Ahrens  * Implementation
7669962b56SMatthew Ahrens  *
7769962b56SMatthew Ahrens  * As buffers are modified dsl_pool_willuse_space() increments both the per-
7869962b56SMatthew Ahrens  * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
7969962b56SMatthew Ahrens  * dirty space used; dsl_pool_dirty_space() decrements those values as data
8069962b56SMatthew Ahrens  * is synced out from dsl_pool_sync(). While only the poolwide value is
8169962b56SMatthew Ahrens  * relevant, the per-txg value is useful for debugging. The tunable
8269962b56SMatthew Ahrens  * zfs_dirty_data_max determines the dirty space limit. Once that value is
8369962b56SMatthew Ahrens  * exceeded, new writes are halted until space frees up.
8469962b56SMatthew Ahrens  *
8569962b56SMatthew Ahrens  * The zfs_dirty_data_sync tunable dictates the threshold at which we
8669962b56SMatthew Ahrens  * ensure that there is a txg syncing (see the comment in txg.c for a full
8769962b56SMatthew Ahrens  * description of transaction group stages).
8869962b56SMatthew Ahrens  *
8969962b56SMatthew Ahrens  * The IO scheduler uses both the dirty space limit and current amount of
9069962b56SMatthew Ahrens  * dirty data as inputs. Those values affect the number of concurrent IOs ZFS
9169962b56SMatthew Ahrens  * issues. See the comment in vdev_queue.c for details of the IO scheduler.
9269962b56SMatthew Ahrens  *
9369962b56SMatthew Ahrens  * The delay is also calculated based on the amount of dirty data.  See the
9469962b56SMatthew Ahrens  * comment above dmu_tx_delay() for details.
9569962b56SMatthew Ahrens  */
9669962b56SMatthew Ahrens 
9769962b56SMatthew Ahrens /*
9869962b56SMatthew Ahrens  * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory,
9969962b56SMatthew Ahrens  * capped at zfs_dirty_data_max_max.  It can also be overridden in /etc/system.
10069962b56SMatthew Ahrens  */
10169962b56SMatthew Ahrens uint64_t zfs_dirty_data_max;
10269962b56SMatthew Ahrens uint64_t zfs_dirty_data_max_max = 4ULL * 1024 * 1024 * 1024;
10369962b56SMatthew Ahrens int zfs_dirty_data_max_percent = 10;
10469962b56SMatthew Ahrens 
10569962b56SMatthew Ahrens /*
10669962b56SMatthew Ahrens  * If there is at least this much dirty data, push out a txg.
10769962b56SMatthew Ahrens  */
10869962b56SMatthew Ahrens uint64_t zfs_dirty_data_sync = 64 * 1024 * 1024;
10969962b56SMatthew Ahrens 
11069962b56SMatthew Ahrens /*
11169962b56SMatthew Ahrens  * Once there is this amount of dirty data, the dmu_tx_delay() will kick in
11269962b56SMatthew Ahrens  * and delay each transaction.
11369962b56SMatthew Ahrens  * This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
11469962b56SMatthew Ahrens  */
11569962b56SMatthew Ahrens int zfs_delay_min_dirty_percent = 60;
11669962b56SMatthew Ahrens 
11769962b56SMatthew Ahrens /*
11869962b56SMatthew Ahrens  * This controls how quickly the delay approaches infinity.
119d85a1e96SMatthew Ahrens  * Larger values cause it to delay more for a given amount of dirty data.
120d85a1e96SMatthew Ahrens  * Therefore larger values will cause there to be less dirty data for a
12169962b56SMatthew Ahrens  * given throughput.
12269962b56SMatthew Ahrens  *
12369962b56SMatthew Ahrens  * For the smoothest delay, this value should be about 1 billion divided
12469962b56SMatthew Ahrens  * by the maximum number of operations per second.  This will smoothly
12569962b56SMatthew Ahrens  * handle between 10x and 1/10th this number.
12669962b56SMatthew Ahrens  *
12769962b56SMatthew Ahrens  * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the
12869962b56SMatthew Ahrens  * multiply in dmu_tx_delay().
12969962b56SMatthew Ahrens  */
13069962b56SMatthew Ahrens uint64_t zfs_delay_scale = 1000 * 1000 * 1000 / 2000;
13105715f94SMark Maybee 
13294c2d0ebSMatthew Ahrens /*
13394c2d0ebSMatthew Ahrens  * This determines the number of threads used by the dp_sync_taskq.
13494c2d0ebSMatthew Ahrens  */
13594c2d0ebSMatthew Ahrens int zfs_sync_taskq_batch_pct = 75;
1361ab7f2deSmaybee 
137216d7723SPrakash Surya /*
138216d7723SPrakash Surya  * These tunables determine the behavior of how zil_itxg_clean() is
139216d7723SPrakash Surya  * called via zil_clean() in the context of spa_sync(). When an itxg
140216d7723SPrakash Surya  * list needs to be cleaned, TQ_NOSLEEP will be used when dispatching.
141216d7723SPrakash Surya  * If the dispatch fails, the call to zil_itxg_clean() will occur
142216d7723SPrakash Surya  * synchronously in the context of spa_sync(), which can negatively
143216d7723SPrakash Surya  * impact the performance of spa_sync() (e.g. in the case of the itxg
144216d7723SPrakash Surya  * list having a large number of itxs that needs to be cleaned).
145216d7723SPrakash Surya  *
146216d7723SPrakash Surya  * Thus, these tunables can be used to manipulate the behavior of the
147216d7723SPrakash Surya  * taskq used by zil_clean(); they determine the number of taskq entries
148216d7723SPrakash Surya  * that are pre-populated when the taskq is first created (via the
149216d7723SPrakash Surya  * "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of
150216d7723SPrakash Surya  * taskq entries that are cached after an on-demand allocation (via the
151216d7723SPrakash Surya  * "zfs_zil_clean_taskq_maxalloc").
152216d7723SPrakash Surya  *
153216d7723SPrakash Surya  * The idea being, we want to try reasonably hard to ensure there will
154216d7723SPrakash Surya  * already be a taskq entry pre-allocated by the time that it is needed
155216d7723SPrakash Surya  * by zil_clean(). This way, we can avoid the possibility of an
156216d7723SPrakash Surya  * on-demand allocation of a new taskq entry from failing, which would
157216d7723SPrakash Surya  * result in zil_itxg_clean() being called synchronously from zil_clean()
158216d7723SPrakash Surya  * (which can adversely affect performance of spa_sync()).
159216d7723SPrakash Surya  *
160216d7723SPrakash Surya  * Additionally, the number of threads used by the taskq can be
161216d7723SPrakash Surya  * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable.
162216d7723SPrakash Surya  */
163216d7723SPrakash Surya int zfs_zil_clean_taskq_nthr_pct = 100;
164216d7723SPrakash Surya int zfs_zil_clean_taskq_minalloc = 1024;
165216d7723SPrakash Surya int zfs_zil_clean_taskq_maxalloc = 1024 * 1024;
166216d7723SPrakash Surya 
1673f9d6ad7SLin Ling int
168088f3894Sahrens dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
169fa9e4066Sahrens {
170fa9e4066Sahrens 	uint64_t obj;
171fa9e4066Sahrens 	int err;
172fa9e4066Sahrens 
173fa9e4066Sahrens 	err = zap_lookup(dp->dp_meta_objset,
174c1379625SJustin T. Gibbs 	    dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj,
175088f3894Sahrens 	    name, sizeof (obj), 1, &obj);
176ea8dc4b6Seschrock 	if (err)
177ea8dc4b6Seschrock 		return (err);
178fa9e4066Sahrens 
1793b2aab18SMatthew Ahrens 	return (dsl_dir_hold_obj(dp, obj, name, dp, ddp));
180fa9e4066Sahrens }
181fa9e4066Sahrens 
182fa9e4066Sahrens static dsl_pool_t *
183fa9e4066Sahrens dsl_pool_open_impl(spa_t *spa, uint64_t txg)
184fa9e4066Sahrens {
185fa9e4066Sahrens 	dsl_pool_t *dp;
186fa9e4066Sahrens 	blkptr_t *bp = spa_get_rootblkptr(spa);
187fa9e4066Sahrens 
188fa9e4066Sahrens 	dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
189fa9e4066Sahrens 	dp->dp_spa = spa;
190fa9e4066Sahrens 	dp->dp_meta_rootbp = *bp;
1913b2aab18SMatthew Ahrens 	rrw_init(&dp->dp_config_rwlock, B_TRUE);
192fa9e4066Sahrens 	txg_init(dp, txg);
193fa9e4066Sahrens 
194b7b2590dSMatthew Ahrens 	txg_list_create(&dp->dp_dirty_datasets, spa,
195fa9e4066Sahrens 	    offsetof(dsl_dataset_t, ds_dirty_link));
196b7b2590dSMatthew Ahrens 	txg_list_create(&dp->dp_dirty_zilogs, spa,
197ce636f8bSMatthew Ahrens 	    offsetof(zilog_t, zl_dirty_link));
198b7b2590dSMatthew Ahrens 	txg_list_create(&dp->dp_dirty_dirs, spa,
199fa9e4066Sahrens 	    offsetof(dsl_dir_t, dd_dirty_link));
200b7b2590dSMatthew Ahrens 	txg_list_create(&dp->dp_sync_tasks, spa,
2013b2aab18SMatthew Ahrens 	    offsetof(dsl_sync_task_t, dst_node));
202*86714001SSerapheim Dimitropoulos 	txg_list_create(&dp->dp_early_sync_tasks, spa,
203*86714001SSerapheim Dimitropoulos 	    offsetof(dsl_sync_task_t, dst_node));
204fa9e4066Sahrens 
20594c2d0ebSMatthew Ahrens 	dp->dp_sync_taskq = taskq_create("dp_sync_taskq",
20694c2d0ebSMatthew Ahrens 	    zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX,
20794c2d0ebSMatthew Ahrens 	    TASKQ_THREADS_CPU_PCT);
20894c2d0ebSMatthew Ahrens 
209216d7723SPrakash Surya 	dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq",
210216d7723SPrakash Surya 	    zfs_zil_clean_taskq_nthr_pct, minclsyspri,
211216d7723SPrakash Surya 	    zfs_zil_clean_taskq_minalloc,
212216d7723SPrakash Surya 	    zfs_zil_clean_taskq_maxalloc,
213216d7723SPrakash Surya 	    TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
214216d7723SPrakash Surya 
2151ab7f2deSmaybee 	mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
21669962b56SMatthew Ahrens 	cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);
2171ab7f2deSmaybee 
2189d3574bfSNeil Perrin 	dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri,
2199d3574bfSNeil Perrin 	    1, 4, 0);
2209d3574bfSNeil Perrin 
221fa9e4066Sahrens 	return (dp);
222fa9e4066Sahrens }
223fa9e4066Sahrens 
224ea8dc4b6Seschrock int
225ad135b5dSChristopher Siden dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
226fa9e4066Sahrens {
227fa9e4066Sahrens 	int err;
228fa9e4066Sahrens 	dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
229ad135b5dSChristopher Siden 
230ad135b5dSChristopher Siden 	err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
231ad135b5dSChristopher Siden 	    &dp->dp_meta_objset);
232ad135b5dSChristopher Siden 	if (err != 0)
233ad135b5dSChristopher Siden 		dsl_pool_close(dp);
234ad135b5dSChristopher Siden 	else
235ad135b5dSChristopher Siden 		*dpp = dp;
236ad135b5dSChristopher Siden 
237ad135b5dSChristopher Siden 	return (err);
238ad135b5dSChristopher Siden }
239ad135b5dSChristopher Siden 
240ad135b5dSChristopher Siden int
241ad135b5dSChristopher Siden dsl_pool_open(dsl_pool_t *dp)
242ad135b5dSChristopher Siden {
243ad135b5dSChristopher Siden 	int err;
244088f3894Sahrens 	dsl_dir_t *dd;
245088f3894Sahrens 	dsl_dataset_t *ds;
246cde58dbcSMatthew Ahrens 	uint64_t obj;
247fa9e4066Sahrens 
2483b2aab18SMatthew Ahrens 	rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
249fa9e4066Sahrens 	err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
250fa9e4066Sahrens 	    DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
251fa9e4066Sahrens 	    &dp->dp_root_dir_obj);
252ea8dc4b6Seschrock 	if (err)
253ea8dc4b6Seschrock 		goto out;
254ea8dc4b6Seschrock 
2553b2aab18SMatthew Ahrens 	err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
256ea8dc4b6Seschrock 	    NULL, dp, &dp->dp_root_dir);
257ea8dc4b6Seschrock 	if (err)
258ea8dc4b6Seschrock 		goto out;
259fa9e4066Sahrens 
260088f3894Sahrens 	err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
261ea8dc4b6Seschrock 	if (err)
262ea8dc4b6Seschrock 		goto out;
263ea8dc4b6Seschrock 
264ad135b5dSChristopher Siden 	if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) {
265088f3894Sahrens 		err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
266088f3894Sahrens 		if (err)
267088f3894Sahrens 			goto out;
268c1379625SJustin T. Gibbs 		err = dsl_dataset_hold_obj(dp,
269c1379625SJustin T. Gibbs 		    dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds);
2708f63aa46SLin Ling 		if (err == 0) {
2718f63aa46SLin Ling 			err = dsl_dataset_hold_obj(dp,
272c1379625SJustin T. Gibbs 			    dsl_dataset_phys(ds)->ds_prev_snap_obj, dp,
2738f63aa46SLin Ling 			    &dp->dp_origin_snap);
2748f63aa46SLin Ling 			dsl_dataset_rele(ds, FTAG);
2758f63aa46SLin Ling 		}
2763b2aab18SMatthew Ahrens 		dsl_dir_rele(dd, dp);
277088f3894Sahrens 		if (err)
278088f3894Sahrens 			goto out;
279088f3894Sahrens 	}
280088f3894Sahrens 
281ad135b5dSChristopher Siden 	if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
282cde58dbcSMatthew Ahrens 		err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME,
283cde58dbcSMatthew Ahrens 		    &dp->dp_free_dir);
284cde58dbcSMatthew Ahrens 		if (err)
285cde58dbcSMatthew Ahrens 			goto out;
286cde58dbcSMatthew Ahrens 
287cde58dbcSMatthew Ahrens 		err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
288cde58dbcSMatthew Ahrens 		    DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj);
289cde58dbcSMatthew Ahrens 		if (err)
290cde58dbcSMatthew Ahrens 			goto out;
2913b2aab18SMatthew Ahrens 		VERIFY0(bpobj_open(&dp->dp_free_bpobj,
292cde58dbcSMatthew Ahrens 		    dp->dp_meta_objset, obj));
293cde58dbcSMatthew Ahrens 	}
294cde58dbcSMatthew Ahrens 
2955cabbc6bSPrashanth Sreenivasa 	if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
2965cabbc6bSPrashanth Sreenivasa 		err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2975cabbc6bSPrashanth Sreenivasa 		    DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj);
2985cabbc6bSPrashanth Sreenivasa 		if (err == 0) {
2995cabbc6bSPrashanth Sreenivasa 			VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj,
3005cabbc6bSPrashanth Sreenivasa 			    dp->dp_meta_objset, obj));
3015cabbc6bSPrashanth Sreenivasa 		} else if (err == ENOENT) {
3025cabbc6bSPrashanth Sreenivasa 			/*
3035cabbc6bSPrashanth Sreenivasa 			 * We might not have created the remap bpobj yet.
3045cabbc6bSPrashanth Sreenivasa 			 */
3055cabbc6bSPrashanth Sreenivasa 			err = 0;
3065cabbc6bSPrashanth Sreenivasa 		} else {
3075cabbc6bSPrashanth Sreenivasa 			goto out;
3085cabbc6bSPrashanth Sreenivasa 		}
3095cabbc6bSPrashanth Sreenivasa 	}
3105cabbc6bSPrashanth Sreenivasa 
3117fd05ac4SMatthew Ahrens 	/*
3125cabbc6bSPrashanth Sreenivasa 	 * Note: errors ignored, because the these special dirs, used for
3135cabbc6bSPrashanth Sreenivasa 	 * space accounting, are only created on demand.
3147fd05ac4SMatthew Ahrens 	 */
3157fd05ac4SMatthew Ahrens 	(void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME,
3167fd05ac4SMatthew Ahrens 	    &dp->dp_leak_dir);
3177fd05ac4SMatthew Ahrens 
3182acef22dSMatthew Ahrens 	if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
319ad135b5dSChristopher Siden 		err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
320ad135b5dSChristopher Siden 		    DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
321ad135b5dSChristopher Siden 		    &dp->dp_bptree_obj);
322ad135b5dSChristopher Siden 		if (err != 0)
323ad135b5dSChristopher Siden 			goto out;
324ad135b5dSChristopher Siden 	}
325ad135b5dSChristopher Siden 
3262acef22dSMatthew Ahrens 	if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) {
327f1745736SMatthew Ahrens 		err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
328f1745736SMatthew Ahrens 		    DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
329f1745736SMatthew Ahrens 		    &dp->dp_empty_bpobj);
330f1745736SMatthew Ahrens 		if (err != 0)
331f1745736SMatthew Ahrens 			goto out;
332f1745736SMatthew Ahrens 	}
333f1745736SMatthew Ahrens 
334ca45db41SChris Kirby 	err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
335ca45db41SChris Kirby 	    DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1,
336ca45db41SChris Kirby 	    &dp->dp_tmp_userrefs_obj);
337ca45db41SChris Kirby 	if (err == ENOENT)
338ca45db41SChris Kirby 		err = 0;
339ca45db41SChris Kirby 	if (err)
340ca45db41SChris Kirby 		goto out;
341ca45db41SChris Kirby 
342ad135b5dSChristopher Siden 	err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg);
343088f3894Sahrens 
344ea8dc4b6Seschrock out:
3453b2aab18SMatthew Ahrens 	rrw_exit(&dp->dp_config_rwlock, FTAG);
346ea8dc4b6Seschrock 	return (err);
347fa9e4066Sahrens }
348fa9e4066Sahrens 
349fa9e4066Sahrens void
350fa9e4066Sahrens dsl_pool_close(dsl_pool_t *dp)
351fa9e4066Sahrens {
352088f3894Sahrens 	/*
35369962b56SMatthew Ahrens 	 * Drop our references from dsl_pool_open().
35469962b56SMatthew Ahrens 	 *
355088f3894Sahrens 	 * Since we held the origin_snap from "syncing" context (which
356088f3894Sahrens 	 * includes pool-opening context), it actually only got a "ref"
357088f3894Sahrens 	 * and not a hold, so just drop that here.
358088f3894Sahrens 	 */
3595cabbc6bSPrashanth Sreenivasa 	if (dp->dp_origin_snap != NULL)
3603b2aab18SMatthew Ahrens 		dsl_dataset_rele(dp->dp_origin_snap, dp);
3615cabbc6bSPrashanth Sreenivasa 	if (dp->dp_mos_dir != NULL)
3623b2aab18SMatthew Ahrens 		dsl_dir_rele(dp->dp_mos_dir, dp);
3635cabbc6bSPrashanth Sreenivasa 	if (dp->dp_free_dir != NULL)
3643b2aab18SMatthew Ahrens 		dsl_dir_rele(dp->dp_free_dir, dp);
3655cabbc6bSPrashanth Sreenivasa 	if (dp->dp_leak_dir != NULL)
3667fd05ac4SMatthew Ahrens 		dsl_dir_rele(dp->dp_leak_dir, dp);
3675cabbc6bSPrashanth Sreenivasa 	if (dp->dp_root_dir != NULL)
3683b2aab18SMatthew Ahrens 		dsl_dir_rele(dp->dp_root_dir, dp);
369fa9e4066Sahrens 
370cde58dbcSMatthew Ahrens 	bpobj_close(&dp->dp_free_bpobj);
3715cabbc6bSPrashanth Sreenivasa 	bpobj_close(&dp->dp_obsolete_bpobj);
372cde58dbcSMatthew Ahrens 
373fa9e4066Sahrens 	/* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
3745cabbc6bSPrashanth Sreenivasa 	if (dp->dp_meta_objset != NULL)
375503ad85cSMatthew Ahrens 		dmu_objset_evict(dp->dp_meta_objset);
376fa9e4066Sahrens 
377fa9e4066Sahrens 	txg_list_destroy(&dp->dp_dirty_datasets);
378ce636f8bSMatthew Ahrens 	txg_list_destroy(&dp->dp_dirty_zilogs);
37954a91118SChris Kirby 	txg_list_destroy(&dp->dp_sync_tasks);
380*86714001SSerapheim Dimitropoulos 	txg_list_destroy(&dp->dp_early_sync_tasks);
381fa9e4066Sahrens 	txg_list_destroy(&dp->dp_dirty_dirs);
382fa9e4066Sahrens 
383216d7723SPrakash Surya 	taskq_destroy(dp->dp_zil_clean_taskq);
38494c2d0ebSMatthew Ahrens 	taskq_destroy(dp->dp_sync_taskq);
38594c2d0ebSMatthew Ahrens 
386244781f1SPrakash Surya 	/*
387244781f1SPrakash Surya 	 * We can't set retry to TRUE since we're explicitly specifying
388244781f1SPrakash Surya 	 * a spa to flush. This is good enough; any missed buffers for
389244781f1SPrakash Surya 	 * this spa won't cause trouble, and they'll eventually fall
390244781f1SPrakash Surya 	 * out of the ARC just like any other unused buffer.
391244781f1SPrakash Surya 	 */
392244781f1SPrakash Surya 	arc_flush(dp->dp_spa, FALSE);
393244781f1SPrakash Surya 
394fa9e4066Sahrens 	txg_fini(dp);
3953f9d6ad7SLin Ling 	dsl_scan_fini(dp);
396bc9014e6SJustin Gibbs 	dmu_buf_user_evict_wait();
397bc9014e6SJustin Gibbs 
3983b2aab18SMatthew Ahrens 	rrw_destroy(&dp->dp_config_rwlock);
3991ab7f2deSmaybee 	mutex_destroy(&dp->dp_lock);
4009d3574bfSNeil Perrin 	taskq_destroy(dp->dp_vnrele_taskq);
4015cabbc6bSPrashanth Sreenivasa 	if (dp->dp_blkstats != NULL)
40288b7b0f2SMatthew Ahrens 		kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
403fa9e4066Sahrens 	kmem_free(dp, sizeof (dsl_pool_t));
404fa9e4066Sahrens }
405fa9e4066Sahrens 
4065cabbc6bSPrashanth Sreenivasa void
4075cabbc6bSPrashanth Sreenivasa dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
4085cabbc6bSPrashanth Sreenivasa {
4095cabbc6bSPrashanth Sreenivasa 	uint64_t obj;
4105cabbc6bSPrashanth Sreenivasa 	/*
4115cabbc6bSPrashanth Sreenivasa 	 * Currently, we only create the obsolete_bpobj where there are
4125cabbc6bSPrashanth Sreenivasa 	 * indirect vdevs with referenced mappings.
4135cabbc6bSPrashanth Sreenivasa 	 */
4145cabbc6bSPrashanth Sreenivasa 	ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_DEVICE_REMOVAL));
4155cabbc6bSPrashanth Sreenivasa 	/* create and open the obsolete_bpobj */
4165cabbc6bSPrashanth Sreenivasa 	obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
4175cabbc6bSPrashanth Sreenivasa 	VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, dp->dp_meta_objset, obj));
4185cabbc6bSPrashanth Sreenivasa 	VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
4195cabbc6bSPrashanth Sreenivasa 	    DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
4205cabbc6bSPrashanth Sreenivasa 	spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
4215cabbc6bSPrashanth Sreenivasa }
4225cabbc6bSPrashanth Sreenivasa 
4235cabbc6bSPrashanth Sreenivasa void
4245cabbc6bSPrashanth Sreenivasa dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
4255cabbc6bSPrashanth Sreenivasa {
4265cabbc6bSPrashanth Sreenivasa 	spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
4275cabbc6bSPrashanth Sreenivasa 	VERIFY0(zap_remove(dp->dp_meta_objset,
4285cabbc6bSPrashanth Sreenivasa 	    DMU_POOL_DIRECTORY_OBJECT,
4295cabbc6bSPrashanth Sreenivasa 	    DMU_POOL_OBSOLETE_BPOBJ, tx));
4305cabbc6bSPrashanth Sreenivasa 	bpobj_free(dp->dp_meta_objset,
4315cabbc6bSPrashanth Sreenivasa 	    dp->dp_obsolete_bpobj.bpo_object, tx);
4325cabbc6bSPrashanth Sreenivasa 	bpobj_close(&dp->dp_obsolete_bpobj);
4335cabbc6bSPrashanth Sreenivasa }
4345cabbc6bSPrashanth Sreenivasa 
435fa9e4066Sahrens dsl_pool_t *
4360a48a24eStimh dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg)
437fa9e4066Sahrens {
438fa9e4066Sahrens 	int err;
439fa9e4066Sahrens 	dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
440fa9e4066Sahrens 	dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
441088f3894Sahrens 	dsl_dataset_t *ds;
442cde58dbcSMatthew Ahrens 	uint64_t obj;
443088f3894Sahrens 
4443b2aab18SMatthew Ahrens 	rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
4453b2aab18SMatthew Ahrens 
446088f3894Sahrens 	/* create and open the MOS (meta-objset) */
447503ad85cSMatthew Ahrens 	dp->dp_meta_objset = dmu_objset_create_impl(spa,
448503ad85cSMatthew Ahrens 	    NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx);
449fa9e4066Sahrens 
450fa9e4066Sahrens 	/* create the pool directory */
451fa9e4066Sahrens 	err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
452fa9e4066Sahrens 	    DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
453fb09f5aaSMadhav Suresh 	ASSERT0(err);
454fa9e4066Sahrens 
4553f9d6ad7SLin Ling 	/* Initialize scan structures */
4563b2aab18SMatthew Ahrens 	VERIFY0(dsl_scan_init(dp, txg));
4573f9d6ad7SLin Ling 
458fa9e4066Sahrens 	/* create and open the root dir */
459088f3894Sahrens 	dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
4603b2aab18SMatthew Ahrens 	VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
461ea8dc4b6Seschrock 	    NULL, dp, &dp->dp_root_dir));
462fa9e4066Sahrens 
463fa9e4066Sahrens 	/* create and open the meta-objset dir */
464088f3894Sahrens 	(void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
4653b2aab18SMatthew Ahrens 	VERIFY0(dsl_pool_open_special_dir(dp,
466088f3894Sahrens 	    MOS_DIR_NAME, &dp->dp_mos_dir));
467088f3894Sahrens 
468cde58dbcSMatthew Ahrens 	if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
469cde58dbcSMatthew Ahrens 		/* create and open the free dir */
470cde58dbcSMatthew Ahrens 		(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
471cde58dbcSMatthew Ahrens 		    FREE_DIR_NAME, tx);
4723b2aab18SMatthew Ahrens 		VERIFY0(dsl_pool_open_special_dir(dp,
473cde58dbcSMatthew Ahrens 		    FREE_DIR_NAME, &dp->dp_free_dir));
474cde58dbcSMatthew Ahrens 
475cde58dbcSMatthew Ahrens 		/* create and open the free_bplist */
476b5152584SMatthew Ahrens 		obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
477cde58dbcSMatthew Ahrens 		VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
478cde58dbcSMatthew Ahrens 		    DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
4793b2aab18SMatthew Ahrens 		VERIFY0(bpobj_open(&dp->dp_free_bpobj,
480cde58dbcSMatthew Ahrens 		    dp->dp_meta_objset, obj));
481cde58dbcSMatthew Ahrens 	}
482cde58dbcSMatthew Ahrens 
483088f3894Sahrens 	if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
484088f3894Sahrens 		dsl_pool_create_origin(dp, tx);
485088f3894Sahrens 
486088f3894Sahrens 	/* create the root dataset */
487cde58dbcSMatthew Ahrens 	obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx);
488088f3894Sahrens 
489088f3894Sahrens 	/* create the root objset */
4903b2aab18SMatthew Ahrens 	VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, &ds));
491088f3894Sahrens #ifdef _KERNEL
492b852c2f5SToomas Soome 	{
493b852c2f5SToomas Soome 		objset_t *os;
494b852c2f5SToomas Soome 		rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
495b852c2f5SToomas Soome 		os = dmu_objset_create_impl(dp->dp_spa, ds,
496b852c2f5SToomas Soome 		    dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx);
497b852c2f5SToomas Soome 		rrw_exit(&ds->ds_bp_rwlock, FTAG);
498b852c2f5SToomas Soome 		zfs_create_fs(os, kcred, zplprops, tx);
499b852c2f5SToomas Soome 	}
500088f3894Sahrens #endif
501088f3894Sahrens 	dsl_dataset_rele(ds, FTAG);
502fa9e4066Sahrens 
503fa9e4066Sahrens 	dmu_tx_commit(tx);
504fa9e4066Sahrens 
5053b2aab18SMatthew Ahrens 	rrw_exit(&dp->dp_config_rwlock, FTAG);
5063b2aab18SMatthew Ahrens 
507fa9e4066Sahrens 	return (dp);
508fa9e4066Sahrens }
509fa9e4066Sahrens 
510ce636f8bSMatthew Ahrens /*
511ce636f8bSMatthew Ahrens  * Account for the meta-objset space in its placeholder dsl_dir.
512ce636f8bSMatthew Ahrens  */
513ce636f8bSMatthew Ahrens void
514ce636f8bSMatthew Ahrens dsl_pool_mos_diduse_space(dsl_pool_t *dp,
515ce636f8bSMatthew Ahrens     int64_t used, int64_t comp, int64_t uncomp)
516ce636f8bSMatthew Ahrens {
517ce636f8bSMatthew Ahrens 	ASSERT3U(comp, ==, uncomp); /* it's all metadata */
518ce636f8bSMatthew Ahrens 	mutex_enter(&dp->dp_lock);
519ce636f8bSMatthew Ahrens 	dp->dp_mos_used_delta += used;
520ce636f8bSMatthew Ahrens 	dp->dp_mos_compressed_delta += comp;
521ce636f8bSMatthew Ahrens 	dp->dp_mos_uncompressed_delta += uncomp;
522ce636f8bSMatthew Ahrens 	mutex_exit(&dp->dp_lock);
523ce636f8bSMatthew Ahrens }
524ce636f8bSMatthew Ahrens 
52569962b56SMatthew Ahrens static void
52669962b56SMatthew Ahrens dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx)
52769962b56SMatthew Ahrens {
52869962b56SMatthew Ahrens 	zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
52969962b56SMatthew Ahrens 	dmu_objset_sync(dp->dp_meta_objset, zio, tx);
53069962b56SMatthew Ahrens 	VERIFY0(zio_wait(zio));
53169962b56SMatthew Ahrens 	dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
53269962b56SMatthew Ahrens 	spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
53369962b56SMatthew Ahrens }
53469962b56SMatthew Ahrens 
53569962b56SMatthew Ahrens static void
53669962b56SMatthew Ahrens dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta)
53769962b56SMatthew Ahrens {
53869962b56SMatthew Ahrens 	ASSERT(MUTEX_HELD(&dp->dp_lock));
53969962b56SMatthew Ahrens 
54069962b56SMatthew Ahrens 	if (delta < 0)
54169962b56SMatthew Ahrens 		ASSERT3U(-delta, <=, dp->dp_dirty_total);
54269962b56SMatthew Ahrens 
54369962b56SMatthew Ahrens 	dp->dp_dirty_total += delta;
54469962b56SMatthew Ahrens 
54569962b56SMatthew Ahrens 	/*
54669962b56SMatthew Ahrens 	 * Note: we signal even when increasing dp_dirty_total.
54769962b56SMatthew Ahrens 	 * This ensures forward progress -- each thread wakes the next waiter.
54869962b56SMatthew Ahrens 	 */
549313ae1e1SAndriy Gapon 	if (dp->dp_dirty_total < zfs_dirty_data_max)
55069962b56SMatthew Ahrens 		cv_signal(&dp->dp_spaceavail_cv);
55169962b56SMatthew Ahrens }
55269962b56SMatthew Ahrens 
553*86714001SSerapheim Dimitropoulos static boolean_t
554*86714001SSerapheim Dimitropoulos dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg)
555*86714001SSerapheim Dimitropoulos {
556*86714001SSerapheim Dimitropoulos 	spa_t *spa = dp->dp_spa;
557*86714001SSerapheim Dimitropoulos 	vdev_t *rvd = spa->spa_root_vdev;
558*86714001SSerapheim Dimitropoulos 
559*86714001SSerapheim Dimitropoulos 	for (uint64_t c = 0; c < rvd->vdev_children; c++) {
560*86714001SSerapheim Dimitropoulos 		vdev_t *vd = rvd->vdev_child[c];
561*86714001SSerapheim Dimitropoulos 		txg_list_t *tl = &vd->vdev_ms_list;
562*86714001SSerapheim Dimitropoulos 		metaslab_t *ms;
563*86714001SSerapheim Dimitropoulos 
564*86714001SSerapheim Dimitropoulos 		for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms;
565*86714001SSerapheim Dimitropoulos 		    ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) {
566*86714001SSerapheim Dimitropoulos 			VERIFY(range_tree_is_empty(ms->ms_freeing));
567*86714001SSerapheim Dimitropoulos 			VERIFY(range_tree_is_empty(ms->ms_checkpointing));
568*86714001SSerapheim Dimitropoulos 		}
569*86714001SSerapheim Dimitropoulos 	}
570*86714001SSerapheim Dimitropoulos 
571*86714001SSerapheim Dimitropoulos 	return (B_TRUE);
572*86714001SSerapheim Dimitropoulos }
573*86714001SSerapheim Dimitropoulos 
574fa9e4066Sahrens void
575fa9e4066Sahrens dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
576fa9e4066Sahrens {
577c717a561Smaybee 	zio_t *zio;
578fa9e4066Sahrens 	dmu_tx_t *tx;
579c717a561Smaybee 	dsl_dir_t *dd;
580c717a561Smaybee 	dsl_dataset_t *ds;
581503ad85cSMatthew Ahrens 	objset_t *mos = dp->dp_meta_objset;
582ce636f8bSMatthew Ahrens 	list_t synced_datasets;
583ce636f8bSMatthew Ahrens 
584ce636f8bSMatthew Ahrens 	list_create(&synced_datasets, sizeof (dsl_dataset_t),
585ce636f8bSMatthew Ahrens 	    offsetof(dsl_dataset_t, ds_synced_link));
586fa9e4066Sahrens 
587fa9e4066Sahrens 	tx = dmu_tx_create_assigned(dp, txg);
588fa9e4066Sahrens 
589*86714001SSerapheim Dimitropoulos 	/*
590*86714001SSerapheim Dimitropoulos 	 * Run all early sync tasks before writing out any dirty blocks.
591*86714001SSerapheim Dimitropoulos 	 * For more info on early sync tasks see block comment in
592*86714001SSerapheim Dimitropoulos 	 * dsl_early_sync_task().
593*86714001SSerapheim Dimitropoulos 	 */
594*86714001SSerapheim Dimitropoulos 	if (!txg_list_empty(&dp->dp_early_sync_tasks, txg)) {
595*86714001SSerapheim Dimitropoulos 		dsl_sync_task_t *dst;
596*86714001SSerapheim Dimitropoulos 
597*86714001SSerapheim Dimitropoulos 		ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
598*86714001SSerapheim Dimitropoulos 		while ((dst =
599*86714001SSerapheim Dimitropoulos 		    txg_list_remove(&dp->dp_early_sync_tasks, txg)) != NULL) {
600*86714001SSerapheim Dimitropoulos 			ASSERT(dsl_early_sync_task_verify(dp, txg));
601*86714001SSerapheim Dimitropoulos 			dsl_sync_task_sync(dst, tx);
602*86714001SSerapheim Dimitropoulos 		}
603*86714001SSerapheim Dimitropoulos 		ASSERT(dsl_early_sync_task_verify(dp, txg));
604*86714001SSerapheim Dimitropoulos 	}
605*86714001SSerapheim Dimitropoulos 
60669962b56SMatthew Ahrens 	/*
60769962b56SMatthew Ahrens 	 * Write out all dirty blocks of dirty datasets.
60869962b56SMatthew Ahrens 	 */
609c717a561Smaybee 	zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
61069962b56SMatthew Ahrens 	while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
61114843421SMatthew Ahrens 		/*
61214843421SMatthew Ahrens 		 * We must not sync any non-MOS datasets twice, because
61314843421SMatthew Ahrens 		 * we may have taken a snapshot of them.  However, we
61414843421SMatthew Ahrens 		 * may sync newly-created datasets on pass 2.
61514843421SMatthew Ahrens 		 */
61614843421SMatthew Ahrens 		ASSERT(!list_link_active(&ds->ds_synced_link));
617ce636f8bSMatthew Ahrens 		list_insert_tail(&synced_datasets, ds);
618c717a561Smaybee 		dsl_dataset_sync(ds, zio, tx);
619c717a561Smaybee 	}
62069962b56SMatthew Ahrens 	VERIFY0(zio_wait(zio));
62114843421SMatthew Ahrens 
62269962b56SMatthew Ahrens 	/*
62369962b56SMatthew Ahrens 	 * We have written all of the accounted dirty data, so our
62469962b56SMatthew Ahrens 	 * dp_space_towrite should now be zero.  However, some seldom-used
62569962b56SMatthew Ahrens 	 * code paths do not adhere to this (e.g. dbuf_undirty(), also
62669962b56SMatthew Ahrens 	 * rounding error in dbuf_write_physdone).
62769962b56SMatthew Ahrens 	 * Shore up the accounting of any dirtied space now.
62869962b56SMatthew Ahrens 	 */
62969962b56SMatthew Ahrens 	dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg);
630c717a561Smaybee 
631ff5177eeSAlek Pinchuk 	/*
632ff5177eeSAlek Pinchuk 	 * Update the long range free counter after
633ff5177eeSAlek Pinchuk 	 * we're done syncing user data
634ff5177eeSAlek Pinchuk 	 */
635ff5177eeSAlek Pinchuk 	mutex_enter(&dp->dp_lock);
636ff5177eeSAlek Pinchuk 	ASSERT(spa_sync_pass(dp->dp_spa) == 1 ||
637ff5177eeSAlek Pinchuk 	    dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0);
638ff5177eeSAlek Pinchuk 	dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0;
639ff5177eeSAlek Pinchuk 	mutex_exit(&dp->dp_lock);
640ff5177eeSAlek Pinchuk 
641ce636f8bSMatthew Ahrens 	/*
642ce636f8bSMatthew Ahrens 	 * After the data blocks have been written (ensured by the zio_wait()
64394c2d0ebSMatthew Ahrens 	 * above), update the user/group space accounting.  This happens
64494c2d0ebSMatthew Ahrens 	 * in tasks dispatched to dp_sync_taskq, so wait for them before
64594c2d0ebSMatthew Ahrens 	 * continuing.
646ce636f8bSMatthew Ahrens 	 */
64769962b56SMatthew Ahrens 	for (ds = list_head(&synced_datasets); ds != NULL;
64869962b56SMatthew Ahrens 	    ds = list_next(&synced_datasets, ds)) {
6490a586ceaSMark Shellenbaum 		dmu_objset_do_userquota_updates(ds->ds_objset, tx);
65069962b56SMatthew Ahrens 	}
65194c2d0ebSMatthew Ahrens 	taskq_wait(dp->dp_sync_taskq);
65214843421SMatthew Ahrens 
65314843421SMatthew Ahrens 	/*
65414843421SMatthew Ahrens 	 * Sync the datasets again to push out the changes due to
6553f9d6ad7SLin Ling 	 * userspace updates.  This must be done before we process the
656ce636f8bSMatthew Ahrens 	 * sync tasks, so that any snapshots will have the correct
657ce636f8bSMatthew Ahrens 	 * user accounting information (and we won't get confused
658ce636f8bSMatthew Ahrens 	 * about which blocks are part of the snapshot).
65914843421SMatthew Ahrens 	 */
66014843421SMatthew Ahrens 	zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
66169962b56SMatthew Ahrens 	while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
66214843421SMatthew Ahrens 		ASSERT(list_link_active(&ds->ds_synced_link));
66314843421SMatthew Ahrens 		dmu_buf_rele(ds->ds_dbuf, ds);
66414843421SMatthew Ahrens 		dsl_dataset_sync(ds, zio, tx);
66514843421SMatthew Ahrens 	}
66669962b56SMatthew Ahrens 	VERIFY0(zio_wait(zio));
66714843421SMatthew Ahrens 
668b24ab676SJeff Bonwick 	/*
669ce636f8bSMatthew Ahrens 	 * Now that the datasets have been completely synced, we can
670ce636f8bSMatthew Ahrens 	 * clean up our in-memory structures accumulated while syncing:
671ce636f8bSMatthew Ahrens 	 *
672ce636f8bSMatthew Ahrens 	 *  - move dead blocks from the pending deadlist to the on-disk deadlist
673ce636f8bSMatthew Ahrens 	 *  - release hold from dsl_dataset_dirty()
674b24ab676SJeff Bonwick 	 */
67569962b56SMatthew Ahrens 	while ((ds = list_remove_head(&synced_datasets)) != NULL) {
676bfaed0b9SAndriy Gapon 		dsl_dataset_sync_done(ds, tx);
677cde58dbcSMatthew Ahrens 	}
67869962b56SMatthew Ahrens 	while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) {
679c717a561Smaybee 		dsl_dir_sync(dd, tx);
68069962b56SMatthew Ahrens 	}
681fa9e4066Sahrens 
682ce636f8bSMatthew Ahrens 	/*
683ce636f8bSMatthew Ahrens 	 * The MOS's space is accounted for in the pool/$MOS
684ce636f8bSMatthew Ahrens 	 * (dp_mos_dir).  We can't modify the mos while we're syncing
685ce636f8bSMatthew Ahrens 	 * it, so we remember the deltas and apply them here.
686ce636f8bSMatthew Ahrens 	 */
687ce636f8bSMatthew Ahrens 	if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 ||
688ce636f8bSMatthew Ahrens 	    dp->dp_mos_uncompressed_delta != 0) {
689ce636f8bSMatthew Ahrens 		dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD,
690ce636f8bSMatthew Ahrens 		    dp->dp_mos_used_delta,
691ce636f8bSMatthew Ahrens 		    dp->dp_mos_compressed_delta,
692ce636f8bSMatthew Ahrens 		    dp->dp_mos_uncompressed_delta, tx);
693ce636f8bSMatthew Ahrens 		dp->dp_mos_used_delta = 0;
694ce636f8bSMatthew Ahrens 		dp->dp_mos_compressed_delta = 0;
695ce636f8bSMatthew Ahrens 		dp->dp_mos_uncompressed_delta = 0;
696ce636f8bSMatthew Ahrens 	}
697ce636f8bSMatthew Ahrens 
69894c2d0ebSMatthew Ahrens 	if (!multilist_is_empty(mos->os_dirty_dnodes[txg & TXG_MASK])) {
69969962b56SMatthew Ahrens 		dsl_pool_sync_mos(dp, tx);
700fa9e4066Sahrens 	}
701fa9e4066Sahrens 
702ce636f8bSMatthew Ahrens 	/*
703ce636f8bSMatthew Ahrens 	 * If we modify a dataset in the same txg that we want to destroy it,
704ce636f8bSMatthew Ahrens 	 * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
705ce636f8bSMatthew Ahrens 	 * dsl_dir_destroy_check() will fail if there are unexpected holds.
706ce636f8bSMatthew Ahrens 	 * Therefore, we want to sync the MOS (thus syncing the dd_dbuf
707ce636f8bSMatthew Ahrens 	 * and clearing the hold on it) before we process the sync_tasks.
708ce636f8bSMatthew Ahrens 	 * The MOS data dirtied by the sync_tasks will be synced on the next
709ce636f8bSMatthew Ahrens 	 * pass.
710ce636f8bSMatthew Ahrens 	 */
711ce636f8bSMatthew Ahrens 	if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
7123b2aab18SMatthew Ahrens 		dsl_sync_task_t *dst;
713ce636f8bSMatthew Ahrens 		/*
714ce636f8bSMatthew Ahrens 		 * No more sync tasks should have been added while we
715ce636f8bSMatthew Ahrens 		 * were syncing.
716ce636f8bSMatthew Ahrens 		 */
71769962b56SMatthew Ahrens 		ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
71869962b56SMatthew Ahrens 		while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL)
7193b2aab18SMatthew Ahrens 			dsl_sync_task_sync(dst, tx);
720ce636f8bSMatthew Ahrens 	}
721ce636f8bSMatthew Ahrens 
722fa9e4066Sahrens 	dmu_tx_commit(tx);
72305715f94SMark Maybee 
72469962b56SMatthew Ahrens 	DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg);
725fa9e4066Sahrens }
726fa9e4066Sahrens 
727fa9e4066Sahrens void
728b24ab676SJeff Bonwick dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
729fa9e4066Sahrens {
730ce636f8bSMatthew Ahrens 	zilog_t *zilog;
731fa9e4066Sahrens 
73243297f97SGeorge Wilson 	while (zilog = txg_list_head(&dp->dp_dirty_zilogs, txg)) {
73369962b56SMatthew Ahrens 		dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
73443297f97SGeorge Wilson 		/*
73543297f97SGeorge Wilson 		 * We don't remove the zilog from the dp_dirty_zilogs
73643297f97SGeorge Wilson 		 * list until after we've cleaned it. This ensures that
73743297f97SGeorge Wilson 		 * callers of zilog_is_dirty() receive an accurate
73843297f97SGeorge Wilson 		 * answer when they are racing with the spa sync thread.
73943297f97SGeorge Wilson 		 */
740ce636f8bSMatthew Ahrens 		zil_clean(zilog, txg);
74143297f97SGeorge Wilson 		(void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg);
742ce636f8bSMatthew Ahrens 		ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
743ce636f8bSMatthew Ahrens 		dmu_buf_rele(ds->ds_dbuf, zilog);
744fa9e4066Sahrens 	}
745b24ab676SJeff Bonwick 	ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
746fa9e4066Sahrens }
747fa9e4066Sahrens 
748c717a561Smaybee /*
749c717a561Smaybee  * TRUE if the current thread is the tx_sync_thread or if we
750c717a561Smaybee  * are being called from SPA context during pool initialization.
751c717a561Smaybee  */
752fa9e4066Sahrens int
753fa9e4066Sahrens dsl_pool_sync_context(dsl_pool_t *dp)
754fa9e4066Sahrens {
755fa9e4066Sahrens 	return (curthread == dp->dp_tx.tx_sync_thread ||
75694c2d0ebSMatthew Ahrens 	    spa_is_initializing(dp->dp_spa) ||
75794c2d0ebSMatthew Ahrens 	    taskq_member(dp->dp_sync_taskq, curthread));
758fa9e4066Sahrens }
759fa9e4066Sahrens 
760*86714001SSerapheim Dimitropoulos /*
761*86714001SSerapheim Dimitropoulos  * This function returns the amount of allocatable space in the pool
762*86714001SSerapheim Dimitropoulos  * minus whatever space is currently reserved by ZFS for specific
763*86714001SSerapheim Dimitropoulos  * purposes. Specifically:
764*86714001SSerapheim Dimitropoulos  *
765*86714001SSerapheim Dimitropoulos  * 1] Any reserved SLOP space
766*86714001SSerapheim Dimitropoulos  * 2] Any space used by the checkpoint
767*86714001SSerapheim Dimitropoulos  * 3] Any space used for deferred frees
768*86714001SSerapheim Dimitropoulos  *
769*86714001SSerapheim Dimitropoulos  * The latter 2 are especially important because they are needed to
770*86714001SSerapheim Dimitropoulos  * rectify the SPA's and DMU's different understanding of how much space
771*86714001SSerapheim Dimitropoulos  * is used. Now the DMU is aware of that extra space tracked by the SPA
772*86714001SSerapheim Dimitropoulos  * without having to maintain a separate special dir (e.g similar to
773*86714001SSerapheim Dimitropoulos  * $MOS, $FREEING, and $LEAKED).
774*86714001SSerapheim Dimitropoulos  *
775*86714001SSerapheim Dimitropoulos  * Note: By deferred frees here, we mean the frees that were deferred
776*86714001SSerapheim Dimitropoulos  * in spa_sync() after sync pass 1 (spa_deferred_bpobj), and not the
777*86714001SSerapheim Dimitropoulos  * segments placed in ms_defer trees during metaslab_sync_done().
778*86714001SSerapheim Dimitropoulos  */
779fa9e4066Sahrens uint64_t
780*86714001SSerapheim Dimitropoulos dsl_pool_adjustedsize(dsl_pool_t *dp, zfs_space_check_t slop_policy)
781fa9e4066Sahrens {
782*86714001SSerapheim Dimitropoulos 	spa_t *spa = dp->dp_spa;
783*86714001SSerapheim Dimitropoulos 	uint64_t space, resv, adjustedsize;
784*86714001SSerapheim Dimitropoulos 	uint64_t spa_deferred_frees =
785*86714001SSerapheim Dimitropoulos 	    spa->spa_deferred_bpobj.bpo_phys->bpo_bytes;
786*86714001SSerapheim Dimitropoulos 
787*86714001SSerapheim Dimitropoulos 	space = spa_get_dspace(spa)
788*86714001SSerapheim Dimitropoulos 	    - spa_get_checkpoint_space(spa) - spa_deferred_frees;
789*86714001SSerapheim Dimitropoulos 	resv = spa_get_slop_space(spa);
790*86714001SSerapheim Dimitropoulos 
791*86714001SSerapheim Dimitropoulos 	switch (slop_policy) {
792*86714001SSerapheim Dimitropoulos 	case ZFS_SPACE_CHECK_NORMAL:
793*86714001SSerapheim Dimitropoulos 		break;
794*86714001SSerapheim Dimitropoulos 	case ZFS_SPACE_CHECK_RESERVED:
795fa9e4066Sahrens 		resv >>= 1;
796*86714001SSerapheim Dimitropoulos 		break;
797*86714001SSerapheim Dimitropoulos 	case ZFS_SPACE_CHECK_EXTRA_RESERVED:
798*86714001SSerapheim Dimitropoulos 		resv >>= 2;
799*86714001SSerapheim Dimitropoulos 		break;
800*86714001SSerapheim Dimitropoulos 	case ZFS_SPACE_CHECK_NONE:
801*86714001SSerapheim Dimitropoulos 		resv = 0;
802*86714001SSerapheim Dimitropoulos 		break;
803*86714001SSerapheim Dimitropoulos 	default:
804*86714001SSerapheim Dimitropoulos 		panic("invalid slop policy value: %d", slop_policy);
805*86714001SSerapheim Dimitropoulos 		break;
806*86714001SSerapheim Dimitropoulos 	}
807*86714001SSerapheim Dimitropoulos 	adjustedsize = (space >= resv) ? (space - resv) : 0;
808fa9e4066Sahrens 
809*86714001SSerapheim Dimitropoulos 	return (adjustedsize);
810*86714001SSerapheim Dimitropoulos }
811*86714001SSerapheim Dimitropoulos 
812*86714001SSerapheim Dimitropoulos uint64_t
813*86714001SSerapheim Dimitropoulos dsl_pool_unreserved_space(dsl_pool_t *dp, zfs_space_check_t slop_policy)
814*86714001SSerapheim Dimitropoulos {
815*86714001SSerapheim Dimitropoulos 	uint64_t poolsize = dsl_pool_adjustedsize(dp, slop_policy);
816*86714001SSerapheim Dimitropoulos 	uint64_t deferred =
817*86714001SSerapheim Dimitropoulos 	    metaslab_class_get_deferred(spa_normal_class(dp->dp_spa));
818*86714001SSerapheim Dimitropoulos 	uint64_t quota = (poolsize >= deferred) ? (poolsize - deferred) : 0;
819*86714001SSerapheim Dimitropoulos 	return (quota);
820fa9e4066Sahrens }
8211ab7f2deSmaybee 
82269962b56SMatthew Ahrens boolean_t
82369962b56SMatthew Ahrens dsl_pool_need_dirty_delay(dsl_pool_t *dp)
8241ab7f2deSmaybee {
82569962b56SMatthew Ahrens 	uint64_t delay_min_bytes =
82669962b56SMatthew Ahrens 	    zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
82769962b56SMatthew Ahrens 	boolean_t rv;
8281ab7f2deSmaybee 
82969962b56SMatthew Ahrens 	mutex_enter(&dp->dp_lock);
83069962b56SMatthew Ahrens 	if (dp->dp_dirty_total > zfs_dirty_data_sync)
83169962b56SMatthew Ahrens 		txg_kick(dp);
83269962b56SMatthew Ahrens 	rv = (dp->dp_dirty_total > delay_min_bytes);
83369962b56SMatthew Ahrens 	mutex_exit(&dp->dp_lock);
83469962b56SMatthew Ahrens 	return (rv);
8351ab7f2deSmaybee }
8361ab7f2deSmaybee 
8371ab7f2deSmaybee void
83869962b56SMatthew Ahrens dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
8391ab7f2deSmaybee {
84069962b56SMatthew Ahrens 	if (space > 0) {
84169962b56SMatthew Ahrens 		mutex_enter(&dp->dp_lock);
84269962b56SMatthew Ahrens 		dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
84369962b56SMatthew Ahrens 		dsl_pool_dirty_delta(dp, space);
84469962b56SMatthew Ahrens 		mutex_exit(&dp->dp_lock);
8451ab7f2deSmaybee 	}
8461ab7f2deSmaybee }
8471ab7f2deSmaybee 
8481ab7f2deSmaybee void
84969962b56SMatthew Ahrens dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg)
8501ab7f2deSmaybee {
85169962b56SMatthew Ahrens 	ASSERT3S(space, >=, 0);
85269962b56SMatthew Ahrens 	if (space == 0)
85369962b56SMatthew Ahrens 		return;
85469962b56SMatthew Ahrens 	mutex_enter(&dp->dp_lock);
85569962b56SMatthew Ahrens 	if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) {
85669962b56SMatthew Ahrens 		/* XXX writing something we didn't dirty? */
85769962b56SMatthew Ahrens 		space = dp->dp_dirty_pertxg[txg & TXG_MASK];
8581ab7f2deSmaybee 	}
85969962b56SMatthew Ahrens 	ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space);
86069962b56SMatthew Ahrens 	dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
86169962b56SMatthew Ahrens 	ASSERT3U(dp->dp_dirty_total, >=, space);
86269962b56SMatthew Ahrens 	dsl_pool_dirty_delta(dp, -space);
86369962b56SMatthew Ahrens 	mutex_exit(&dp->dp_lock);
8641ab7f2deSmaybee }
865088f3894Sahrens 
866088f3894Sahrens /* ARGSUSED */
867088f3894Sahrens static int
8683b2aab18SMatthew Ahrens upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
869088f3894Sahrens {
870088f3894Sahrens 	dmu_tx_t *tx = arg;
871088f3894Sahrens 	dsl_dataset_t *ds, *prev = NULL;
872088f3894Sahrens 	int err;
873088f3894Sahrens 
8743b2aab18SMatthew Ahrens 	err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
875088f3894Sahrens 	if (err)
876088f3894Sahrens 		return (err);
877088f3894Sahrens 
878c1379625SJustin T. Gibbs 	while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
879c1379625SJustin T. Gibbs 		err = dsl_dataset_hold_obj(dp,
880c1379625SJustin T. Gibbs 		    dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
881088f3894Sahrens 		if (err) {
882088f3894Sahrens 			dsl_dataset_rele(ds, FTAG);
883088f3894Sahrens 			return (err);
884088f3894Sahrens 		}
885088f3894Sahrens 
886c1379625SJustin T. Gibbs 		if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object)
887088f3894Sahrens 			break;
888088f3894Sahrens 		dsl_dataset_rele(ds, FTAG);
889088f3894Sahrens 		ds = prev;
890088f3894Sahrens 		prev = NULL;
891088f3894Sahrens 	}
892088f3894Sahrens 
893088f3894Sahrens 	if (prev == NULL) {
894088f3894Sahrens 		prev = dp->dp_origin_snap;
895088f3894Sahrens 
896088f3894Sahrens 		/*
897088f3894Sahrens 		 * The $ORIGIN can't have any data, or the accounting
898088f3894Sahrens 		 * will be wrong.
899088f3894Sahrens 		 */
900c166b69dSPaul Dagnelie 		rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
901c1379625SJustin T. Gibbs 		ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth);
902c166b69dSPaul Dagnelie 		rrw_exit(&ds->ds_bp_rwlock, FTAG);
903088f3894Sahrens 
904088f3894Sahrens 		/* The origin doesn't get attached to itself */
905088f3894Sahrens 		if (ds->ds_object == prev->ds_object) {
906088f3894Sahrens 			dsl_dataset_rele(ds, FTAG);
907088f3894Sahrens 			return (0);
908088f3894Sahrens 		}
909088f3894Sahrens 
910088f3894Sahrens 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
911c1379625SJustin T. Gibbs 		dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object;
912c1379625SJustin T. Gibbs 		dsl_dataset_phys(ds)->ds_prev_snap_txg =
913c1379625SJustin T. Gibbs 		    dsl_dataset_phys(prev)->ds_creation_txg;
914088f3894Sahrens 
915088f3894Sahrens 		dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
916c1379625SJustin T. Gibbs 		dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object;
917088f3894Sahrens 
918088f3894Sahrens 		dmu_buf_will_dirty(prev->ds_dbuf, tx);
919c1379625SJustin T. Gibbs 		dsl_dataset_phys(prev)->ds_num_children++;
920088f3894Sahrens 
921c1379625SJustin T. Gibbs 		if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) {
922088f3894Sahrens 			ASSERT(ds->ds_prev == NULL);
9233b2aab18SMatthew Ahrens 			VERIFY0(dsl_dataset_hold_obj(dp,
924c1379625SJustin T. Gibbs 			    dsl_dataset_phys(ds)->ds_prev_snap_obj,
925c1379625SJustin T. Gibbs 			    ds, &ds->ds_prev));
926088f3894Sahrens 		}
927088f3894Sahrens 	}
928088f3894Sahrens 
929c1379625SJustin T. Gibbs 	ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object);
930c1379625SJustin T. Gibbs 	ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object);
931088f3894Sahrens 
932c1379625SJustin T. Gibbs 	if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) {
933c33e334fSMatthew Ahrens 		dmu_buf_will_dirty(prev->ds_dbuf, tx);
934c1379625SJustin T. Gibbs 		dsl_dataset_phys(prev)->ds_next_clones_obj =
935088f3894Sahrens 		    zap_create(dp->dp_meta_objset,
936088f3894Sahrens 		    DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
937088f3894Sahrens 	}
9383b2aab18SMatthew Ahrens 	VERIFY0(zap_add_int(dp->dp_meta_objset,
939c1379625SJustin T. Gibbs 	    dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx));
940088f3894Sahrens 
941088f3894Sahrens 	dsl_dataset_rele(ds, FTAG);
942088f3894Sahrens 	if (prev != dp->dp_origin_snap)
943088f3894Sahrens 		dsl_dataset_rele(prev, FTAG);
944088f3894Sahrens 	return (0);
945088f3894Sahrens }
946088f3894Sahrens 
947088f3894Sahrens void
948088f3894Sahrens dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
949088f3894Sahrens {
950088f3894Sahrens 	ASSERT(dmu_tx_is_syncing(tx));
951088f3894Sahrens 	ASSERT(dp->dp_origin_snap != NULL);
952088f3894Sahrens 
9533b2aab18SMatthew Ahrens 	VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
95412380e1eSArne Jansen 	    tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
955088f3894Sahrens }
956088f3894Sahrens 
957cde58dbcSMatthew Ahrens /* ARGSUSED */
958cde58dbcSMatthew Ahrens static int
9593b2aab18SMatthew Ahrens upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
960cde58dbcSMatthew Ahrens {
961cde58dbcSMatthew Ahrens 	dmu_tx_t *tx = arg;
962cde58dbcSMatthew Ahrens 	objset_t *mos = dp->dp_meta_objset;
963cde58dbcSMatthew Ahrens 
964c1379625SJustin T. Gibbs 	if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) {
965cde58dbcSMatthew Ahrens 		dsl_dataset_t *origin;
966cde58dbcSMatthew Ahrens 
9673b2aab18SMatthew Ahrens 		VERIFY0(dsl_dataset_hold_obj(dp,
968c1379625SJustin T. Gibbs 		    dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin));
969cde58dbcSMatthew Ahrens 
970c1379625SJustin T. Gibbs 		if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
971cde58dbcSMatthew Ahrens 			dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
972c1379625SJustin T. Gibbs 			dsl_dir_phys(origin->ds_dir)->dd_clones =
973c1379625SJustin T. Gibbs 			    zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE,
974c1379625SJustin T. Gibbs 			    0, tx);
975cde58dbcSMatthew Ahrens 		}
976cde58dbcSMatthew Ahrens 
9773b2aab18SMatthew Ahrens 		VERIFY0(zap_add_int(dp->dp_meta_objset,
978c1379625SJustin T. Gibbs 		    dsl_dir_phys(origin->ds_dir)->dd_clones,
979c1379625SJustin T. Gibbs 		    ds->ds_object, tx));
980cde58dbcSMatthew Ahrens 
981cde58dbcSMatthew Ahrens 		dsl_dataset_rele(origin, FTAG);
982cde58dbcSMatthew Ahrens 	}
983cde58dbcSMatthew Ahrens 	return (0);
984cde58dbcSMatthew Ahrens }
985cde58dbcSMatthew Ahrens 
986cde58dbcSMatthew Ahrens void
987cde58dbcSMatthew Ahrens dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
988cde58dbcSMatthew Ahrens {
989cde58dbcSMatthew Ahrens 	ASSERT(dmu_tx_is_syncing(tx));
990cde58dbcSMatthew Ahrens 	uint64_t obj;
991cde58dbcSMatthew Ahrens 
992cde58dbcSMatthew Ahrens 	(void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
9933b2aab18SMatthew Ahrens 	VERIFY0(dsl_pool_open_special_dir(dp,
994cde58dbcSMatthew Ahrens 	    FREE_DIR_NAME, &dp->dp_free_dir));
995cde58dbcSMatthew Ahrens 
996cde58dbcSMatthew Ahrens 	/*
997cde58dbcSMatthew Ahrens 	 * We can't use bpobj_alloc(), because spa_version() still
998cde58dbcSMatthew Ahrens 	 * returns the old version, and we need a new-version bpobj with
999cde58dbcSMatthew Ahrens 	 * subobj support.  So call dmu_object_alloc() directly.
1000cde58dbcSMatthew Ahrens 	 */
1001cde58dbcSMatthew Ahrens 	obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
1002b5152584SMatthew Ahrens 	    SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
10033b2aab18SMatthew Ahrens 	VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1004cde58dbcSMatthew Ahrens 	    DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
10053b2aab18SMatthew Ahrens 	VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
1006cde58dbcSMatthew Ahrens 
10073b2aab18SMatthew Ahrens 	VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
100812380e1eSArne Jansen 	    upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
1009cde58dbcSMatthew Ahrens }
1010cde58dbcSMatthew Ahrens 
1011088f3894Sahrens void
1012088f3894Sahrens dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
1013088f3894Sahrens {
1014088f3894Sahrens 	uint64_t dsobj;
1015088f3894Sahrens 	dsl_dataset_t *ds;
1016088f3894Sahrens 
1017088f3894Sahrens 	ASSERT(dmu_tx_is_syncing(tx));
1018088f3894Sahrens 	ASSERT(dp->dp_origin_snap == NULL);
10193b2aab18SMatthew Ahrens 	ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
1020088f3894Sahrens 
1021088f3894Sahrens 	/* create the origin dir, ds, & snap-ds */
1022088f3894Sahrens 	dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
1023088f3894Sahrens 	    NULL, 0, kcred, tx);
10243b2aab18SMatthew Ahrens 	VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
10253b2aab18SMatthew Ahrens 	dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
1026c1379625SJustin T. Gibbs 	VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj,
1027088f3894Sahrens 	    dp, &dp->dp_origin_snap));
1028088f3894Sahrens 	dsl_dataset_rele(ds, FTAG);
1029088f3894Sahrens }
10309d3574bfSNeil Perrin 
10319d3574bfSNeil Perrin taskq_t *
10329d3574bfSNeil Perrin dsl_pool_vnrele_taskq(dsl_pool_t *dp)
10339d3574bfSNeil Perrin {
10349d3574bfSNeil Perrin 	return (dp->dp_vnrele_taskq);
10359d3574bfSNeil Perrin }
1036ca45db41SChris Kirby 
1037ca45db41SChris Kirby /*
1038ca45db41SChris Kirby  * Walk through the pool-wide zap object of temporary snapshot user holds
1039ca45db41SChris Kirby  * and release them.
1040ca45db41SChris Kirby  */
1041ca45db41SChris Kirby void
1042ca45db41SChris Kirby dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
1043ca45db41SChris Kirby {
1044ca45db41SChris Kirby 	zap_attribute_t za;
1045ca45db41SChris Kirby 	zap_cursor_t zc;
1046ca45db41SChris Kirby 	objset_t *mos = dp->dp_meta_objset;
1047ca45db41SChris Kirby 	uint64_t zapobj = dp->dp_tmp_userrefs_obj;
1048a7a845e4SSteven Hartland 	nvlist_t *holds;
1049ca45db41SChris Kirby 
1050ca45db41SChris Kirby 	if (zapobj == 0)
1051ca45db41SChris Kirby 		return;
1052ca45db41SChris Kirby 	ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1053ca45db41SChris Kirby 
1054a7a845e4SSteven Hartland 	holds = fnvlist_alloc();
1055a7a845e4SSteven Hartland 
1056ca45db41SChris Kirby 	for (zap_cursor_init(&zc, mos, zapobj);
1057ca45db41SChris Kirby 	    zap_cursor_retrieve(&zc, &za) == 0;
1058ca45db41SChris Kirby 	    zap_cursor_advance(&zc)) {
1059ca45db41SChris Kirby 		char *htag;
1060a7a845e4SSteven Hartland 		nvlist_t *tags;
1061ca45db41SChris Kirby 
1062ca45db41SChris Kirby 		htag = strchr(za.za_name, '-');
1063ca45db41SChris Kirby 		*htag = '\0';
1064ca45db41SChris Kirby 		++htag;
1065a7a845e4SSteven Hartland 		if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) {
1066a7a845e4SSteven Hartland 			tags = fnvlist_alloc();
1067a7a845e4SSteven Hartland 			fnvlist_add_boolean(tags, htag);
1068a7a845e4SSteven Hartland 			fnvlist_add_nvlist(holds, za.za_name, tags);
1069a7a845e4SSteven Hartland 			fnvlist_free(tags);
1070a7a845e4SSteven Hartland 		} else {
1071a7a845e4SSteven Hartland 			fnvlist_add_boolean(tags, htag);
1072a7a845e4SSteven Hartland 		}
1073ca45db41SChris Kirby 	}
1074a7a845e4SSteven Hartland 	dsl_dataset_user_release_tmp(dp, holds);
1075a7a845e4SSteven Hartland 	fnvlist_free(holds);
1076ca45db41SChris Kirby 	zap_cursor_fini(&zc);
1077ca45db41SChris Kirby }
1078ca45db41SChris Kirby 
1079ca45db41SChris Kirby /*
1080ca45db41SChris Kirby  * Create the pool-wide zap object for storing temporary snapshot holds.
1081ca45db41SChris Kirby  */
1082ca45db41SChris Kirby void
1083ca45db41SChris Kirby dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
1084ca45db41SChris Kirby {
1085ca45db41SChris Kirby 	objset_t *mos = dp->dp_meta_objset;
1086ca45db41SChris Kirby 
1087ca45db41SChris Kirby 	ASSERT(dp->dp_tmp_userrefs_obj == 0);
1088ca45db41SChris Kirby 	ASSERT(dmu_tx_is_syncing(tx));
1089ca45db41SChris Kirby 
1090ad135b5dSChristopher Siden 	dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
1091ad135b5dSChristopher Siden 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx);
1092ca45db41SChris Kirby }
1093ca45db41SChris Kirby 
1094ca45db41SChris Kirby static int
1095ca45db41SChris Kirby dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
10963b2aab18SMatthew Ahrens     const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding)
1097ca45db41SChris Kirby {
1098ca45db41SChris Kirby 	objset_t *mos = dp->dp_meta_objset;
1099ca45db41SChris Kirby 	uint64_t zapobj = dp->dp_tmp_userrefs_obj;
1100ca45db41SChris Kirby 	char *name;
1101ca45db41SChris Kirby 	int error;
1102ca45db41SChris Kirby 
1103ca45db41SChris Kirby 	ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1104ca45db41SChris Kirby 	ASSERT(dmu_tx_is_syncing(tx));
1105ca45db41SChris Kirby 
1106ca45db41SChris Kirby 	/*
1107ca45db41SChris Kirby 	 * If the pool was created prior to SPA_VERSION_USERREFS, the
1108ca45db41SChris Kirby 	 * zap object for temporary holds might not exist yet.
1109ca45db41SChris Kirby 	 */
1110ca45db41SChris Kirby 	if (zapobj == 0) {
1111ca45db41SChris Kirby 		if (holding) {
1112ca45db41SChris Kirby 			dsl_pool_user_hold_create_obj(dp, tx);
1113ca45db41SChris Kirby 			zapobj = dp->dp_tmp_userrefs_obj;
1114ca45db41SChris Kirby 		} else {
1115be6fd75aSMatthew Ahrens 			return (SET_ERROR(ENOENT));
1116ca45db41SChris Kirby 		}
1117ca45db41SChris Kirby 	}
1118ca45db41SChris Kirby 
1119ca45db41SChris Kirby 	name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag);
1120ca45db41SChris Kirby 	if (holding)
11213b2aab18SMatthew Ahrens 		error = zap_add(mos, zapobj, name, 8, 1, &now, tx);
1122ca45db41SChris Kirby 	else
1123ca45db41SChris Kirby 		error = zap_remove(mos, zapobj, name, tx);
1124ca45db41SChris Kirby 	strfree(name);
1125ca45db41SChris Kirby 
1126ca45db41SChris Kirby 	return (error);
1127ca45db41SChris Kirby }
1128ca45db41SChris Kirby 
1129ca45db41SChris Kirby /*
1130ca45db41SChris Kirby  * Add a temporary hold for the given dataset object and tag.
1131ca45db41SChris Kirby  */
1132ca45db41SChris Kirby int
1133ca45db41SChris Kirby dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
11343b2aab18SMatthew Ahrens     uint64_t now, dmu_tx_t *tx)
1135ca45db41SChris Kirby {
113615508ac0SChris Kirby 	return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE));
1137ca45db41SChris Kirby }
1138ca45db41SChris Kirby 
1139ca45db41SChris Kirby /*
1140ca45db41SChris Kirby  * Release a temporary hold for the given dataset object and tag.
1141ca45db41SChris Kirby  */
1142ca45db41SChris Kirby int
1143ca45db41SChris Kirby dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
1144ca45db41SChris Kirby     dmu_tx_t *tx)
1145ca45db41SChris Kirby {
1146ca45db41SChris Kirby 	return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, NULL,
1147ca45db41SChris Kirby 	    tx, B_FALSE));
1148ca45db41SChris Kirby }
11493b2aab18SMatthew Ahrens 
11503b2aab18SMatthew Ahrens /*
11513b2aab18SMatthew Ahrens  * DSL Pool Configuration Lock
11523b2aab18SMatthew Ahrens  *
11533b2aab18SMatthew Ahrens  * The dp_config_rwlock protects against changes to DSL state (e.g. dataset
11543b2aab18SMatthew Ahrens  * creation / destruction / rename / property setting).  It must be held for
11553b2aab18SMatthew Ahrens  * read to hold a dataset or dsl_dir.  I.e. you must call
11563b2aab18SMatthew Ahrens  * dsl_pool_config_enter() or dsl_pool_hold() before calling
11573b2aab18SMatthew Ahrens  * dsl_{dataset,dir}_hold{_obj}.  In most circumstances, the dp_config_rwlock
11583b2aab18SMatthew Ahrens  * must be held continuously until all datasets and dsl_dirs are released.
11593b2aab18SMatthew Ahrens  *
11603b2aab18SMatthew Ahrens  * The only exception to this rule is that if a "long hold" is placed on
11613b2aab18SMatthew Ahrens  * a dataset, then the dp_config_rwlock may be dropped while the dataset
11623b2aab18SMatthew Ahrens  * is still held.  The long hold will prevent the dataset from being
11633b2aab18SMatthew Ahrens  * destroyed -- the destroy will fail with EBUSY.  A long hold can be
11643b2aab18SMatthew Ahrens  * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset
11653b2aab18SMatthew Ahrens  * (by calling dsl_{dataset,objset}_{try}own{_obj}).
11663b2aab18SMatthew Ahrens  *
11673b2aab18SMatthew Ahrens  * Legitimate long-holders (including owners) should be long-running, cancelable
11683b2aab18SMatthew Ahrens  * tasks that should cause "zfs destroy" to fail.  This includes DMU
11693b2aab18SMatthew Ahrens  * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open),
11703b2aab18SMatthew Ahrens  * "zfs send", and "zfs diff".  There are several other long-holders whose
11713b2aab18SMatthew Ahrens  * uses are suboptimal (e.g. "zfs promote", and zil_suspend()).
11723b2aab18SMatthew Ahrens  *
11733b2aab18SMatthew Ahrens  * The usual formula for long-holding would be:
11743b2aab18SMatthew Ahrens  * dsl_pool_hold()
11753b2aab18SMatthew Ahrens  * dsl_dataset_hold()
11763b2aab18SMatthew Ahrens  * ... perform checks ...
11773b2aab18SMatthew Ahrens  * dsl_dataset_long_hold()
11783b2aab18SMatthew Ahrens  * dsl_pool_rele()
11793b2aab18SMatthew Ahrens  * ... perform long-running task ...
11803b2aab18SMatthew Ahrens  * dsl_dataset_long_rele()
11813b2aab18SMatthew Ahrens  * dsl_dataset_rele()
11823b2aab18SMatthew Ahrens  *
11833b2aab18SMatthew Ahrens  * Note that when the long hold is released, the dataset is still held but
11843b2aab18SMatthew Ahrens  * the pool is not held.  The dataset may change arbitrarily during this time
11853b2aab18SMatthew Ahrens  * (e.g. it could be destroyed).  Therefore you shouldn't do anything to the
11863b2aab18SMatthew Ahrens  * dataset except release it.
11873b2aab18SMatthew Ahrens  *
11883b2aab18SMatthew Ahrens  * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only
11893b2aab18SMatthew Ahrens  * or modifying operations.
11903b2aab18SMatthew Ahrens  *
11913b2aab18SMatthew Ahrens  * Modifying operations should generally use dsl_sync_task().  The synctask
11923b2aab18SMatthew Ahrens  * infrastructure enforces proper locking strategy with respect to the
11933b2aab18SMatthew Ahrens  * dp_config_rwlock.  See the comment above dsl_sync_task() for details.
11943b2aab18SMatthew Ahrens  *
11953b2aab18SMatthew Ahrens  * Read-only operations will manually hold the pool, then the dataset, obtain
11963b2aab18SMatthew Ahrens  * information from the dataset, then release the pool and dataset.
11973b2aab18SMatthew Ahrens  * dmu_objset_{hold,rele}() are convenience routines that also do the pool
11983b2aab18SMatthew Ahrens  * hold/rele.
11993b2aab18SMatthew Ahrens  */
12003b2aab18SMatthew Ahrens 
12013b2aab18SMatthew Ahrens int
12023b2aab18SMatthew Ahrens dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
12033b2aab18SMatthew Ahrens {
12043b2aab18SMatthew Ahrens 	spa_t *spa;
12053b2aab18SMatthew Ahrens 	int error;
12063b2aab18SMatthew Ahrens 
12073b2aab18SMatthew Ahrens 	error = spa_open(name, &spa, tag);
12083b2aab18SMatthew Ahrens 	if (error == 0) {
12093b2aab18SMatthew Ahrens 		*dp = spa_get_dsl(spa);
12103b2aab18SMatthew Ahrens 		dsl_pool_config_enter(*dp, tag);
12113b2aab18SMatthew Ahrens 	}
12123b2aab18SMatthew Ahrens 	return (error);
12133b2aab18SMatthew Ahrens }
12143b2aab18SMatthew Ahrens 
12153b2aab18SMatthew Ahrens void
12163b2aab18SMatthew Ahrens dsl_pool_rele(dsl_pool_t *dp, void *tag)
12173b2aab18SMatthew Ahrens {
12183b2aab18SMatthew Ahrens 	dsl_pool_config_exit(dp, tag);
12193b2aab18SMatthew Ahrens 	spa_close(dp->dp_spa, tag);
12203b2aab18SMatthew Ahrens }
12213b2aab18SMatthew Ahrens 
12223b2aab18SMatthew Ahrens void
12233b2aab18SMatthew Ahrens dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
12243b2aab18SMatthew Ahrens {
12253b2aab18SMatthew Ahrens 	/*
12263b2aab18SMatthew Ahrens 	 * We use a "reentrant" reader-writer lock, but not reentrantly.
12273b2aab18SMatthew Ahrens 	 *
12283b2aab18SMatthew Ahrens 	 * The rrwlock can (with the track_all flag) track all reading threads,
12293b2aab18SMatthew Ahrens 	 * which is very useful for debugging which code path failed to release
12303b2aab18SMatthew Ahrens 	 * the lock, and for verifying that the *current* thread does hold
12313b2aab18SMatthew Ahrens 	 * the lock.
12323b2aab18SMatthew Ahrens 	 *
12333b2aab18SMatthew Ahrens 	 * (Unlike a rwlock, which knows that N threads hold it for
12343b2aab18SMatthew Ahrens 	 * read, but not *which* threads, so rw_held(RW_READER) returns TRUE
12353b2aab18SMatthew Ahrens 	 * if any thread holds it for read, even if this thread doesn't).
12363b2aab18SMatthew Ahrens 	 */
12373b2aab18SMatthew Ahrens 	ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
12383b2aab18SMatthew Ahrens 	rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
12393b2aab18SMatthew Ahrens }
12403b2aab18SMatthew Ahrens 
12411d3f896fSArne Jansen void
12421d3f896fSArne Jansen dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag)
12431d3f896fSArne Jansen {
12441d3f896fSArne Jansen 	ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
12451d3f896fSArne Jansen 	rrw_enter_read_prio(&dp->dp_config_rwlock, tag);
12461d3f896fSArne Jansen }
12471d3f896fSArne Jansen 
12483b2aab18SMatthew Ahrens void
12493b2aab18SMatthew Ahrens dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
12503b2aab18SMatthew Ahrens {
12513b2aab18SMatthew Ahrens 	rrw_exit(&dp->dp_config_rwlock, tag);
12523b2aab18SMatthew Ahrens }
12533b2aab18SMatthew Ahrens 
12543b2aab18SMatthew Ahrens boolean_t
12553b2aab18SMatthew Ahrens dsl_pool_config_held(dsl_pool_t *dp)
12563b2aab18SMatthew Ahrens {
12573b2aab18SMatthew Ahrens 	return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
12583b2aab18SMatthew Ahrens }
125912380e1eSArne Jansen 
126012380e1eSArne Jansen boolean_t
126112380e1eSArne Jansen dsl_pool_config_held_writer(dsl_pool_t *dp)
126212380e1eSArne Jansen {
126312380e1eSArne Jansen 	return (RRW_WRITE_HELD(&dp->dp_config_rwlock));
126412380e1eSArne Jansen }
1265