1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 223f9d6ad7SLin Ling * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 2394c2d0ebSMatthew Ahrens * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 24a7a845e4SSteven Hartland * Copyright (c) 2013 Steven Hartland. All rights reserved. 25bc9014e6SJustin Gibbs * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 27ff5177eeSAlek Pinchuk * Copyright 2016 Nexenta Systems, Inc. All rights reserved. 28fa9e4066Sahrens */ 29fa9e4066Sahrens 30fa9e4066Sahrens #include <sys/dsl_pool.h> 31fa9e4066Sahrens #include <sys/dsl_dataset.h> 323f9d6ad7SLin Ling #include <sys/dsl_prop.h> 33fa9e4066Sahrens #include <sys/dsl_dir.h> 341d452cf5Sahrens #include <sys/dsl_synctask.h> 353f9d6ad7SLin Ling #include <sys/dsl_scan.h> 363f9d6ad7SLin Ling #include <sys/dnode.h> 37fa9e4066Sahrens #include <sys/dmu_tx.h> 38fa9e4066Sahrens #include <sys/dmu_objset.h> 39fa9e4066Sahrens #include <sys/arc.h> 40fa9e4066Sahrens #include <sys/zap.h> 41c717a561Smaybee #include <sys/zio.h> 42fa9e4066Sahrens #include <sys/zfs_context.h> 43fa9e4066Sahrens #include <sys/fs/zfs.h> 44088f3894Sahrens #include <sys/zfs_znode.h> 45088f3894Sahrens #include <sys/spa_impl.h> 46cde58dbcSMatthew Ahrens #include <sys/dsl_deadlist.h> 47ad135b5dSChristopher Siden #include <sys/bptree.h> 48ad135b5dSChristopher Siden #include <sys/zfeature.h> 49ce636f8bSMatthew Ahrens #include <sys/zil_impl.h> 503b2aab18SMatthew Ahrens #include <sys/dsl_userhold.h> 51fa9e4066Sahrens 5269962b56SMatthew Ahrens /* 5369962b56SMatthew Ahrens * ZFS Write Throttle 5469962b56SMatthew Ahrens * ------------------ 5569962b56SMatthew Ahrens * 5669962b56SMatthew Ahrens * ZFS must limit the rate of incoming writes to the rate at which it is able 5769962b56SMatthew Ahrens * to sync data modifications to the backend storage. Throttling by too much 5869962b56SMatthew Ahrens * creates an artificial limit; throttling by too little can only be sustained 5969962b56SMatthew Ahrens * for short periods and would lead to highly lumpy performance. On a per-pool 6069962b56SMatthew Ahrens * basis, ZFS tracks the amount of modified (dirty) data. As operations change 6169962b56SMatthew Ahrens * data, the amount of dirty data increases; as ZFS syncs out data, the amount 6269962b56SMatthew Ahrens * of dirty data decreases. When the amount of dirty data exceeds a 6369962b56SMatthew Ahrens * predetermined threshold further modifications are blocked until the amount 6469962b56SMatthew Ahrens * of dirty data decreases (as data is synced out). 6569962b56SMatthew Ahrens * 6669962b56SMatthew Ahrens * The limit on dirty data is tunable, and should be adjusted according to 6769962b56SMatthew Ahrens * both the IO capacity and available memory of the system. The larger the 6869962b56SMatthew Ahrens * window, the more ZFS is able to aggregate and amortize metadata (and data) 6969962b56SMatthew Ahrens * changes. However, memory is a limited resource, and allowing for more dirty 7069962b56SMatthew Ahrens * data comes at the cost of keeping other useful data in memory (for example 7169962b56SMatthew Ahrens * ZFS data cached by the ARC). 7269962b56SMatthew Ahrens * 7369962b56SMatthew Ahrens * Implementation 7469962b56SMatthew Ahrens * 7569962b56SMatthew Ahrens * As buffers are modified dsl_pool_willuse_space() increments both the per- 7669962b56SMatthew Ahrens * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of 7769962b56SMatthew Ahrens * dirty space used; dsl_pool_dirty_space() decrements those values as data 7869962b56SMatthew Ahrens * is synced out from dsl_pool_sync(). While only the poolwide value is 7969962b56SMatthew Ahrens * relevant, the per-txg value is useful for debugging. The tunable 8069962b56SMatthew Ahrens * zfs_dirty_data_max determines the dirty space limit. Once that value is 8169962b56SMatthew Ahrens * exceeded, new writes are halted until space frees up. 8269962b56SMatthew Ahrens * 8369962b56SMatthew Ahrens * The zfs_dirty_data_sync tunable dictates the threshold at which we 8469962b56SMatthew Ahrens * ensure that there is a txg syncing (see the comment in txg.c for a full 8569962b56SMatthew Ahrens * description of transaction group stages). 8669962b56SMatthew Ahrens * 8769962b56SMatthew Ahrens * The IO scheduler uses both the dirty space limit and current amount of 8869962b56SMatthew Ahrens * dirty data as inputs. Those values affect the number of concurrent IOs ZFS 8969962b56SMatthew Ahrens * issues. See the comment in vdev_queue.c for details of the IO scheduler. 9069962b56SMatthew Ahrens * 9169962b56SMatthew Ahrens * The delay is also calculated based on the amount of dirty data. See the 9269962b56SMatthew Ahrens * comment above dmu_tx_delay() for details. 9369962b56SMatthew Ahrens */ 9469962b56SMatthew Ahrens 9569962b56SMatthew Ahrens /* 9669962b56SMatthew Ahrens * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory, 9769962b56SMatthew Ahrens * capped at zfs_dirty_data_max_max. It can also be overridden in /etc/system. 9869962b56SMatthew Ahrens */ 9969962b56SMatthew Ahrens uint64_t zfs_dirty_data_max; 10069962b56SMatthew Ahrens uint64_t zfs_dirty_data_max_max = 4ULL * 1024 * 1024 * 1024; 10169962b56SMatthew Ahrens int zfs_dirty_data_max_percent = 10; 10269962b56SMatthew Ahrens 10369962b56SMatthew Ahrens /* 10469962b56SMatthew Ahrens * If there is at least this much dirty data, push out a txg. 10569962b56SMatthew Ahrens */ 10669962b56SMatthew Ahrens uint64_t zfs_dirty_data_sync = 64 * 1024 * 1024; 10769962b56SMatthew Ahrens 10869962b56SMatthew Ahrens /* 10969962b56SMatthew Ahrens * Once there is this amount of dirty data, the dmu_tx_delay() will kick in 11069962b56SMatthew Ahrens * and delay each transaction. 11169962b56SMatthew Ahrens * This value should be >= zfs_vdev_async_write_active_max_dirty_percent. 11269962b56SMatthew Ahrens */ 11369962b56SMatthew Ahrens int zfs_delay_min_dirty_percent = 60; 11469962b56SMatthew Ahrens 11569962b56SMatthew Ahrens /* 11669962b56SMatthew Ahrens * This controls how quickly the delay approaches infinity. 117d85a1e96SMatthew Ahrens * Larger values cause it to delay more for a given amount of dirty data. 118d85a1e96SMatthew Ahrens * Therefore larger values will cause there to be less dirty data for a 11969962b56SMatthew Ahrens * given throughput. 12069962b56SMatthew Ahrens * 12169962b56SMatthew Ahrens * For the smoothest delay, this value should be about 1 billion divided 12269962b56SMatthew Ahrens * by the maximum number of operations per second. This will smoothly 12369962b56SMatthew Ahrens * handle between 10x and 1/10th this number. 12469962b56SMatthew Ahrens * 12569962b56SMatthew Ahrens * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the 12669962b56SMatthew Ahrens * multiply in dmu_tx_delay(). 12769962b56SMatthew Ahrens */ 12869962b56SMatthew Ahrens uint64_t zfs_delay_scale = 1000 * 1000 * 1000 / 2000; 12905715f94SMark Maybee 13094c2d0ebSMatthew Ahrens /* 13194c2d0ebSMatthew Ahrens * This determines the number of threads used by the dp_sync_taskq. 13294c2d0ebSMatthew Ahrens */ 13394c2d0ebSMatthew Ahrens int zfs_sync_taskq_batch_pct = 75; 1341ab7f2deSmaybee 135*216d7723SPrakash Surya /* 136*216d7723SPrakash Surya * These tunables determine the behavior of how zil_itxg_clean() is 137*216d7723SPrakash Surya * called via zil_clean() in the context of spa_sync(). When an itxg 138*216d7723SPrakash Surya * list needs to be cleaned, TQ_NOSLEEP will be used when dispatching. 139*216d7723SPrakash Surya * If the dispatch fails, the call to zil_itxg_clean() will occur 140*216d7723SPrakash Surya * synchronously in the context of spa_sync(), which can negatively 141*216d7723SPrakash Surya * impact the performance of spa_sync() (e.g. in the case of the itxg 142*216d7723SPrakash Surya * list having a large number of itxs that needs to be cleaned). 143*216d7723SPrakash Surya * 144*216d7723SPrakash Surya * Thus, these tunables can be used to manipulate the behavior of the 145*216d7723SPrakash Surya * taskq used by zil_clean(); they determine the number of taskq entries 146*216d7723SPrakash Surya * that are pre-populated when the taskq is first created (via the 147*216d7723SPrakash Surya * "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of 148*216d7723SPrakash Surya * taskq entries that are cached after an on-demand allocation (via the 149*216d7723SPrakash Surya * "zfs_zil_clean_taskq_maxalloc"). 150*216d7723SPrakash Surya * 151*216d7723SPrakash Surya * The idea being, we want to try reasonably hard to ensure there will 152*216d7723SPrakash Surya * already be a taskq entry pre-allocated by the time that it is needed 153*216d7723SPrakash Surya * by zil_clean(). This way, we can avoid the possibility of an 154*216d7723SPrakash Surya * on-demand allocation of a new taskq entry from failing, which would 155*216d7723SPrakash Surya * result in zil_itxg_clean() being called synchronously from zil_clean() 156*216d7723SPrakash Surya * (which can adversely affect performance of spa_sync()). 157*216d7723SPrakash Surya * 158*216d7723SPrakash Surya * Additionally, the number of threads used by the taskq can be 159*216d7723SPrakash Surya * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable. 160*216d7723SPrakash Surya */ 161*216d7723SPrakash Surya int zfs_zil_clean_taskq_nthr_pct = 100; 162*216d7723SPrakash Surya int zfs_zil_clean_taskq_minalloc = 1024; 163*216d7723SPrakash Surya int zfs_zil_clean_taskq_maxalloc = 1024 * 1024; 164*216d7723SPrakash Surya 1653f9d6ad7SLin Ling int 166088f3894Sahrens dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) 167fa9e4066Sahrens { 168fa9e4066Sahrens uint64_t obj; 169fa9e4066Sahrens int err; 170fa9e4066Sahrens 171fa9e4066Sahrens err = zap_lookup(dp->dp_meta_objset, 172c1379625SJustin T. Gibbs dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj, 173088f3894Sahrens name, sizeof (obj), 1, &obj); 174ea8dc4b6Seschrock if (err) 175ea8dc4b6Seschrock return (err); 176fa9e4066Sahrens 1773b2aab18SMatthew Ahrens return (dsl_dir_hold_obj(dp, obj, name, dp, ddp)); 178fa9e4066Sahrens } 179fa9e4066Sahrens 180fa9e4066Sahrens static dsl_pool_t * 181fa9e4066Sahrens dsl_pool_open_impl(spa_t *spa, uint64_t txg) 182fa9e4066Sahrens { 183fa9e4066Sahrens dsl_pool_t *dp; 184fa9e4066Sahrens blkptr_t *bp = spa_get_rootblkptr(spa); 185fa9e4066Sahrens 186fa9e4066Sahrens dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); 187fa9e4066Sahrens dp->dp_spa = spa; 188fa9e4066Sahrens dp->dp_meta_rootbp = *bp; 1893b2aab18SMatthew Ahrens rrw_init(&dp->dp_config_rwlock, B_TRUE); 190fa9e4066Sahrens txg_init(dp, txg); 191fa9e4066Sahrens 192b7b2590dSMatthew Ahrens txg_list_create(&dp->dp_dirty_datasets, spa, 193fa9e4066Sahrens offsetof(dsl_dataset_t, ds_dirty_link)); 194b7b2590dSMatthew Ahrens txg_list_create(&dp->dp_dirty_zilogs, spa, 195ce636f8bSMatthew Ahrens offsetof(zilog_t, zl_dirty_link)); 196b7b2590dSMatthew Ahrens txg_list_create(&dp->dp_dirty_dirs, spa, 197fa9e4066Sahrens offsetof(dsl_dir_t, dd_dirty_link)); 198b7b2590dSMatthew Ahrens txg_list_create(&dp->dp_sync_tasks, spa, 1993b2aab18SMatthew Ahrens offsetof(dsl_sync_task_t, dst_node)); 200fa9e4066Sahrens 20194c2d0ebSMatthew Ahrens dp->dp_sync_taskq = taskq_create("dp_sync_taskq", 20294c2d0ebSMatthew Ahrens zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX, 20394c2d0ebSMatthew Ahrens TASKQ_THREADS_CPU_PCT); 20494c2d0ebSMatthew Ahrens 205*216d7723SPrakash Surya dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq", 206*216d7723SPrakash Surya zfs_zil_clean_taskq_nthr_pct, minclsyspri, 207*216d7723SPrakash Surya zfs_zil_clean_taskq_minalloc, 208*216d7723SPrakash Surya zfs_zil_clean_taskq_maxalloc, 209*216d7723SPrakash Surya TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT); 210*216d7723SPrakash Surya 2111ab7f2deSmaybee mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); 21269962b56SMatthew Ahrens cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL); 2131ab7f2deSmaybee 2149d3574bfSNeil Perrin dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri, 2159d3574bfSNeil Perrin 1, 4, 0); 2169d3574bfSNeil Perrin 217fa9e4066Sahrens return (dp); 218fa9e4066Sahrens } 219fa9e4066Sahrens 220ea8dc4b6Seschrock int 221ad135b5dSChristopher Siden dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) 222fa9e4066Sahrens { 223fa9e4066Sahrens int err; 224fa9e4066Sahrens dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 225ad135b5dSChristopher Siden 226ad135b5dSChristopher Siden err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, 227ad135b5dSChristopher Siden &dp->dp_meta_objset); 228ad135b5dSChristopher Siden if (err != 0) 229ad135b5dSChristopher Siden dsl_pool_close(dp); 230ad135b5dSChristopher Siden else 231ad135b5dSChristopher Siden *dpp = dp; 232ad135b5dSChristopher Siden 233ad135b5dSChristopher Siden return (err); 234ad135b5dSChristopher Siden } 235ad135b5dSChristopher Siden 236ad135b5dSChristopher Siden int 237ad135b5dSChristopher Siden dsl_pool_open(dsl_pool_t *dp) 238ad135b5dSChristopher Siden { 239ad135b5dSChristopher Siden int err; 240088f3894Sahrens dsl_dir_t *dd; 241088f3894Sahrens dsl_dataset_t *ds; 242cde58dbcSMatthew Ahrens uint64_t obj; 243fa9e4066Sahrens 2443b2aab18SMatthew Ahrens rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 245fa9e4066Sahrens err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 246fa9e4066Sahrens DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, 247fa9e4066Sahrens &dp->dp_root_dir_obj); 248ea8dc4b6Seschrock if (err) 249ea8dc4b6Seschrock goto out; 250ea8dc4b6Seschrock 2513b2aab18SMatthew Ahrens err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, 252ea8dc4b6Seschrock NULL, dp, &dp->dp_root_dir); 253ea8dc4b6Seschrock if (err) 254ea8dc4b6Seschrock goto out; 255fa9e4066Sahrens 256088f3894Sahrens err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir); 257ea8dc4b6Seschrock if (err) 258ea8dc4b6Seschrock goto out; 259ea8dc4b6Seschrock 260ad135b5dSChristopher Siden if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) { 261088f3894Sahrens err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); 262088f3894Sahrens if (err) 263088f3894Sahrens goto out; 264c1379625SJustin T. Gibbs err = dsl_dataset_hold_obj(dp, 265c1379625SJustin T. Gibbs dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds); 2668f63aa46SLin Ling if (err == 0) { 2678f63aa46SLin Ling err = dsl_dataset_hold_obj(dp, 268c1379625SJustin T. Gibbs dsl_dataset_phys(ds)->ds_prev_snap_obj, dp, 2698f63aa46SLin Ling &dp->dp_origin_snap); 2708f63aa46SLin Ling dsl_dataset_rele(ds, FTAG); 2718f63aa46SLin Ling } 2723b2aab18SMatthew Ahrens dsl_dir_rele(dd, dp); 273088f3894Sahrens if (err) 274088f3894Sahrens goto out; 275088f3894Sahrens } 276088f3894Sahrens 277ad135b5dSChristopher Siden if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 278cde58dbcSMatthew Ahrens err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME, 279cde58dbcSMatthew Ahrens &dp->dp_free_dir); 280cde58dbcSMatthew Ahrens if (err) 281cde58dbcSMatthew Ahrens goto out; 282cde58dbcSMatthew Ahrens 283cde58dbcSMatthew Ahrens err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 284cde58dbcSMatthew Ahrens DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj); 285cde58dbcSMatthew Ahrens if (err) 286cde58dbcSMatthew Ahrens goto out; 2873b2aab18SMatthew Ahrens VERIFY0(bpobj_open(&dp->dp_free_bpobj, 288cde58dbcSMatthew Ahrens dp->dp_meta_objset, obj)); 289cde58dbcSMatthew Ahrens } 290cde58dbcSMatthew Ahrens 2917fd05ac4SMatthew Ahrens /* 2927fd05ac4SMatthew Ahrens * Note: errors ignored, because the leak dir will not exist if we 2937fd05ac4SMatthew Ahrens * have not encountered a leak yet. 2947fd05ac4SMatthew Ahrens */ 2957fd05ac4SMatthew Ahrens (void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME, 2967fd05ac4SMatthew Ahrens &dp->dp_leak_dir); 2977fd05ac4SMatthew Ahrens 2982acef22dSMatthew Ahrens if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) { 299ad135b5dSChristopher Siden err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 300ad135b5dSChristopher Siden DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1, 301ad135b5dSChristopher Siden &dp->dp_bptree_obj); 302ad135b5dSChristopher Siden if (err != 0) 303ad135b5dSChristopher Siden goto out; 304ad135b5dSChristopher Siden } 305ad135b5dSChristopher Siden 3062acef22dSMatthew Ahrens if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) { 307f1745736SMatthew Ahrens err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 308f1745736SMatthew Ahrens DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1, 309f1745736SMatthew Ahrens &dp->dp_empty_bpobj); 310f1745736SMatthew Ahrens if (err != 0) 311f1745736SMatthew Ahrens goto out; 312f1745736SMatthew Ahrens } 313f1745736SMatthew Ahrens 314ca45db41SChris Kirby err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 315ca45db41SChris Kirby DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1, 316ca45db41SChris Kirby &dp->dp_tmp_userrefs_obj); 317ca45db41SChris Kirby if (err == ENOENT) 318ca45db41SChris Kirby err = 0; 319ca45db41SChris Kirby if (err) 320ca45db41SChris Kirby goto out; 321ca45db41SChris Kirby 322ad135b5dSChristopher Siden err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg); 323088f3894Sahrens 324ea8dc4b6Seschrock out: 3253b2aab18SMatthew Ahrens rrw_exit(&dp->dp_config_rwlock, FTAG); 326ea8dc4b6Seschrock return (err); 327fa9e4066Sahrens } 328fa9e4066Sahrens 329fa9e4066Sahrens void 330fa9e4066Sahrens dsl_pool_close(dsl_pool_t *dp) 331fa9e4066Sahrens { 332088f3894Sahrens /* 33369962b56SMatthew Ahrens * Drop our references from dsl_pool_open(). 33469962b56SMatthew Ahrens * 335088f3894Sahrens * Since we held the origin_snap from "syncing" context (which 336088f3894Sahrens * includes pool-opening context), it actually only got a "ref" 337088f3894Sahrens * and not a hold, so just drop that here. 338088f3894Sahrens */ 339088f3894Sahrens if (dp->dp_origin_snap) 3403b2aab18SMatthew Ahrens dsl_dataset_rele(dp->dp_origin_snap, dp); 341ea8dc4b6Seschrock if (dp->dp_mos_dir) 3423b2aab18SMatthew Ahrens dsl_dir_rele(dp->dp_mos_dir, dp); 343cde58dbcSMatthew Ahrens if (dp->dp_free_dir) 3443b2aab18SMatthew Ahrens dsl_dir_rele(dp->dp_free_dir, dp); 3457fd05ac4SMatthew Ahrens if (dp->dp_leak_dir) 3467fd05ac4SMatthew Ahrens dsl_dir_rele(dp->dp_leak_dir, dp); 347ea8dc4b6Seschrock if (dp->dp_root_dir) 3483b2aab18SMatthew Ahrens dsl_dir_rele(dp->dp_root_dir, dp); 349fa9e4066Sahrens 350cde58dbcSMatthew Ahrens bpobj_close(&dp->dp_free_bpobj); 351cde58dbcSMatthew Ahrens 352fa9e4066Sahrens /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ 353ea8dc4b6Seschrock if (dp->dp_meta_objset) 354503ad85cSMatthew Ahrens dmu_objset_evict(dp->dp_meta_objset); 355fa9e4066Sahrens 356fa9e4066Sahrens txg_list_destroy(&dp->dp_dirty_datasets); 357ce636f8bSMatthew Ahrens txg_list_destroy(&dp->dp_dirty_zilogs); 35854a91118SChris Kirby txg_list_destroy(&dp->dp_sync_tasks); 359fa9e4066Sahrens txg_list_destroy(&dp->dp_dirty_dirs); 360fa9e4066Sahrens 361*216d7723SPrakash Surya taskq_destroy(dp->dp_zil_clean_taskq); 36294c2d0ebSMatthew Ahrens taskq_destroy(dp->dp_sync_taskq); 36394c2d0ebSMatthew Ahrens 364244781f1SPrakash Surya /* 365244781f1SPrakash Surya * We can't set retry to TRUE since we're explicitly specifying 366244781f1SPrakash Surya * a spa to flush. This is good enough; any missed buffers for 367244781f1SPrakash Surya * this spa won't cause trouble, and they'll eventually fall 368244781f1SPrakash Surya * out of the ARC just like any other unused buffer. 369244781f1SPrakash Surya */ 370244781f1SPrakash Surya arc_flush(dp->dp_spa, FALSE); 371244781f1SPrakash Surya 372fa9e4066Sahrens txg_fini(dp); 3733f9d6ad7SLin Ling dsl_scan_fini(dp); 374bc9014e6SJustin Gibbs dmu_buf_user_evict_wait(); 375bc9014e6SJustin Gibbs 3763b2aab18SMatthew Ahrens rrw_destroy(&dp->dp_config_rwlock); 3771ab7f2deSmaybee mutex_destroy(&dp->dp_lock); 3789d3574bfSNeil Perrin taskq_destroy(dp->dp_vnrele_taskq); 37988b7b0f2SMatthew Ahrens if (dp->dp_blkstats) 38088b7b0f2SMatthew Ahrens kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 381fa9e4066Sahrens kmem_free(dp, sizeof (dsl_pool_t)); 382fa9e4066Sahrens } 383fa9e4066Sahrens 384fa9e4066Sahrens dsl_pool_t * 3850a48a24eStimh dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg) 386fa9e4066Sahrens { 387fa9e4066Sahrens int err; 388fa9e4066Sahrens dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 389fa9e4066Sahrens dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); 390503ad85cSMatthew Ahrens objset_t *os; 391088f3894Sahrens dsl_dataset_t *ds; 392cde58dbcSMatthew Ahrens uint64_t obj; 393088f3894Sahrens 3943b2aab18SMatthew Ahrens rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 3953b2aab18SMatthew Ahrens 396088f3894Sahrens /* create and open the MOS (meta-objset) */ 397503ad85cSMatthew Ahrens dp->dp_meta_objset = dmu_objset_create_impl(spa, 398503ad85cSMatthew Ahrens NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx); 399fa9e4066Sahrens 400fa9e4066Sahrens /* create the pool directory */ 401fa9e4066Sahrens err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 402fa9e4066Sahrens DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); 403fb09f5aaSMadhav Suresh ASSERT0(err); 404fa9e4066Sahrens 4053f9d6ad7SLin Ling /* Initialize scan structures */ 4063b2aab18SMatthew Ahrens VERIFY0(dsl_scan_init(dp, txg)); 4073f9d6ad7SLin Ling 408fa9e4066Sahrens /* create and open the root dir */ 409088f3894Sahrens dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); 4103b2aab18SMatthew Ahrens VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, 411ea8dc4b6Seschrock NULL, dp, &dp->dp_root_dir)); 412fa9e4066Sahrens 413fa9e4066Sahrens /* create and open the meta-objset dir */ 414088f3894Sahrens (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); 4153b2aab18SMatthew Ahrens VERIFY0(dsl_pool_open_special_dir(dp, 416088f3894Sahrens MOS_DIR_NAME, &dp->dp_mos_dir)); 417088f3894Sahrens 418cde58dbcSMatthew Ahrens if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 419cde58dbcSMatthew Ahrens /* create and open the free dir */ 420cde58dbcSMatthew Ahrens (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 421cde58dbcSMatthew Ahrens FREE_DIR_NAME, tx); 4223b2aab18SMatthew Ahrens VERIFY0(dsl_pool_open_special_dir(dp, 423cde58dbcSMatthew Ahrens FREE_DIR_NAME, &dp->dp_free_dir)); 424cde58dbcSMatthew Ahrens 425cde58dbcSMatthew Ahrens /* create and open the free_bplist */ 426b5152584SMatthew Ahrens obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); 427cde58dbcSMatthew Ahrens VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 428cde58dbcSMatthew Ahrens DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0); 4293b2aab18SMatthew Ahrens VERIFY0(bpobj_open(&dp->dp_free_bpobj, 430cde58dbcSMatthew Ahrens dp->dp_meta_objset, obj)); 431cde58dbcSMatthew Ahrens } 432cde58dbcSMatthew Ahrens 433088f3894Sahrens if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) 434088f3894Sahrens dsl_pool_create_origin(dp, tx); 435088f3894Sahrens 436088f3894Sahrens /* create the root dataset */ 437cde58dbcSMatthew Ahrens obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx); 438088f3894Sahrens 439088f3894Sahrens /* create the root objset */ 4403b2aab18SMatthew Ahrens VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, &ds)); 441c166b69dSPaul Dagnelie rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 442503ad85cSMatthew Ahrens os = dmu_objset_create_impl(dp->dp_spa, ds, 443088f3894Sahrens dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); 444c166b69dSPaul Dagnelie rrw_exit(&ds->ds_bp_rwlock, FTAG); 445088f3894Sahrens #ifdef _KERNEL 446503ad85cSMatthew Ahrens zfs_create_fs(os, kcred, zplprops, tx); 447088f3894Sahrens #endif 448088f3894Sahrens dsl_dataset_rele(ds, FTAG); 449fa9e4066Sahrens 450fa9e4066Sahrens dmu_tx_commit(tx); 451fa9e4066Sahrens 4523b2aab18SMatthew Ahrens rrw_exit(&dp->dp_config_rwlock, FTAG); 4533b2aab18SMatthew Ahrens 454fa9e4066Sahrens return (dp); 455fa9e4066Sahrens } 456fa9e4066Sahrens 457ce636f8bSMatthew Ahrens /* 458ce636f8bSMatthew Ahrens * Account for the meta-objset space in its placeholder dsl_dir. 459ce636f8bSMatthew Ahrens */ 460ce636f8bSMatthew Ahrens void 461ce636f8bSMatthew Ahrens dsl_pool_mos_diduse_space(dsl_pool_t *dp, 462ce636f8bSMatthew Ahrens int64_t used, int64_t comp, int64_t uncomp) 463ce636f8bSMatthew Ahrens { 464ce636f8bSMatthew Ahrens ASSERT3U(comp, ==, uncomp); /* it's all metadata */ 465ce636f8bSMatthew Ahrens mutex_enter(&dp->dp_lock); 466ce636f8bSMatthew Ahrens dp->dp_mos_used_delta += used; 467ce636f8bSMatthew Ahrens dp->dp_mos_compressed_delta += comp; 468ce636f8bSMatthew Ahrens dp->dp_mos_uncompressed_delta += uncomp; 469ce636f8bSMatthew Ahrens mutex_exit(&dp->dp_lock); 470ce636f8bSMatthew Ahrens } 471ce636f8bSMatthew Ahrens 47269962b56SMatthew Ahrens static void 47369962b56SMatthew Ahrens dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx) 47469962b56SMatthew Ahrens { 47569962b56SMatthew Ahrens zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 47669962b56SMatthew Ahrens dmu_objset_sync(dp->dp_meta_objset, zio, tx); 47769962b56SMatthew Ahrens VERIFY0(zio_wait(zio)); 47869962b56SMatthew Ahrens dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); 47969962b56SMatthew Ahrens spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 48069962b56SMatthew Ahrens } 48169962b56SMatthew Ahrens 48269962b56SMatthew Ahrens static void 48369962b56SMatthew Ahrens dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta) 48469962b56SMatthew Ahrens { 48569962b56SMatthew Ahrens ASSERT(MUTEX_HELD(&dp->dp_lock)); 48669962b56SMatthew Ahrens 48769962b56SMatthew Ahrens if (delta < 0) 48869962b56SMatthew Ahrens ASSERT3U(-delta, <=, dp->dp_dirty_total); 48969962b56SMatthew Ahrens 49069962b56SMatthew Ahrens dp->dp_dirty_total += delta; 49169962b56SMatthew Ahrens 49269962b56SMatthew Ahrens /* 49369962b56SMatthew Ahrens * Note: we signal even when increasing dp_dirty_total. 49469962b56SMatthew Ahrens * This ensures forward progress -- each thread wakes the next waiter. 49569962b56SMatthew Ahrens */ 496313ae1e1SAndriy Gapon if (dp->dp_dirty_total < zfs_dirty_data_max) 49769962b56SMatthew Ahrens cv_signal(&dp->dp_spaceavail_cv); 49869962b56SMatthew Ahrens } 49969962b56SMatthew Ahrens 500fa9e4066Sahrens void 501fa9e4066Sahrens dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) 502fa9e4066Sahrens { 503c717a561Smaybee zio_t *zio; 504fa9e4066Sahrens dmu_tx_t *tx; 505c717a561Smaybee dsl_dir_t *dd; 506c717a561Smaybee dsl_dataset_t *ds; 507503ad85cSMatthew Ahrens objset_t *mos = dp->dp_meta_objset; 508ce636f8bSMatthew Ahrens list_t synced_datasets; 509ce636f8bSMatthew Ahrens 510ce636f8bSMatthew Ahrens list_create(&synced_datasets, sizeof (dsl_dataset_t), 511ce636f8bSMatthew Ahrens offsetof(dsl_dataset_t, ds_synced_link)); 512fa9e4066Sahrens 513fa9e4066Sahrens tx = dmu_tx_create_assigned(dp, txg); 514fa9e4066Sahrens 51569962b56SMatthew Ahrens /* 51669962b56SMatthew Ahrens * Write out all dirty blocks of dirty datasets. 51769962b56SMatthew Ahrens */ 518c717a561Smaybee zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 51969962b56SMatthew Ahrens while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { 52014843421SMatthew Ahrens /* 52114843421SMatthew Ahrens * We must not sync any non-MOS datasets twice, because 52214843421SMatthew Ahrens * we may have taken a snapshot of them. However, we 52314843421SMatthew Ahrens * may sync newly-created datasets on pass 2. 52414843421SMatthew Ahrens */ 52514843421SMatthew Ahrens ASSERT(!list_link_active(&ds->ds_synced_link)); 526ce636f8bSMatthew Ahrens list_insert_tail(&synced_datasets, ds); 527c717a561Smaybee dsl_dataset_sync(ds, zio, tx); 528c717a561Smaybee } 52969962b56SMatthew Ahrens VERIFY0(zio_wait(zio)); 53014843421SMatthew Ahrens 53169962b56SMatthew Ahrens /* 53269962b56SMatthew Ahrens * We have written all of the accounted dirty data, so our 53369962b56SMatthew Ahrens * dp_space_towrite should now be zero. However, some seldom-used 53469962b56SMatthew Ahrens * code paths do not adhere to this (e.g. dbuf_undirty(), also 53569962b56SMatthew Ahrens * rounding error in dbuf_write_physdone). 53669962b56SMatthew Ahrens * Shore up the accounting of any dirtied space now. 53769962b56SMatthew Ahrens */ 53869962b56SMatthew Ahrens dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg); 539c717a561Smaybee 540ff5177eeSAlek Pinchuk /* 541ff5177eeSAlek Pinchuk * Update the long range free counter after 542ff5177eeSAlek Pinchuk * we're done syncing user data 543ff5177eeSAlek Pinchuk */ 544ff5177eeSAlek Pinchuk mutex_enter(&dp->dp_lock); 545ff5177eeSAlek Pinchuk ASSERT(spa_sync_pass(dp->dp_spa) == 1 || 546ff5177eeSAlek Pinchuk dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0); 547ff5177eeSAlek Pinchuk dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0; 548ff5177eeSAlek Pinchuk mutex_exit(&dp->dp_lock); 549ff5177eeSAlek Pinchuk 550ce636f8bSMatthew Ahrens /* 551ce636f8bSMatthew Ahrens * After the data blocks have been written (ensured by the zio_wait() 55294c2d0ebSMatthew Ahrens * above), update the user/group space accounting. This happens 55394c2d0ebSMatthew Ahrens * in tasks dispatched to dp_sync_taskq, so wait for them before 55494c2d0ebSMatthew Ahrens * continuing. 555ce636f8bSMatthew Ahrens */ 55669962b56SMatthew Ahrens for (ds = list_head(&synced_datasets); ds != NULL; 55769962b56SMatthew Ahrens ds = list_next(&synced_datasets, ds)) { 5580a586ceaSMark Shellenbaum dmu_objset_do_userquota_updates(ds->ds_objset, tx); 55969962b56SMatthew Ahrens } 56094c2d0ebSMatthew Ahrens taskq_wait(dp->dp_sync_taskq); 56114843421SMatthew Ahrens 56214843421SMatthew Ahrens /* 56314843421SMatthew Ahrens * Sync the datasets again to push out the changes due to 5643f9d6ad7SLin Ling * userspace updates. This must be done before we process the 565ce636f8bSMatthew Ahrens * sync tasks, so that any snapshots will have the correct 566ce636f8bSMatthew Ahrens * user accounting information (and we won't get confused 567ce636f8bSMatthew Ahrens * about which blocks are part of the snapshot). 56814843421SMatthew Ahrens */ 56914843421SMatthew Ahrens zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 57069962b56SMatthew Ahrens while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { 57114843421SMatthew Ahrens ASSERT(list_link_active(&ds->ds_synced_link)); 57214843421SMatthew Ahrens dmu_buf_rele(ds->ds_dbuf, ds); 57314843421SMatthew Ahrens dsl_dataset_sync(ds, zio, tx); 57414843421SMatthew Ahrens } 57569962b56SMatthew Ahrens VERIFY0(zio_wait(zio)); 57614843421SMatthew Ahrens 577b24ab676SJeff Bonwick /* 578ce636f8bSMatthew Ahrens * Now that the datasets have been completely synced, we can 579ce636f8bSMatthew Ahrens * clean up our in-memory structures accumulated while syncing: 580ce636f8bSMatthew Ahrens * 581ce636f8bSMatthew Ahrens * - move dead blocks from the pending deadlist to the on-disk deadlist 582ce636f8bSMatthew Ahrens * - release hold from dsl_dataset_dirty() 583b24ab676SJeff Bonwick */ 58469962b56SMatthew Ahrens while ((ds = list_remove_head(&synced_datasets)) != NULL) { 585bfaed0b9SAndriy Gapon dsl_dataset_sync_done(ds, tx); 586cde58dbcSMatthew Ahrens } 58769962b56SMatthew Ahrens while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) { 588c717a561Smaybee dsl_dir_sync(dd, tx); 58969962b56SMatthew Ahrens } 590fa9e4066Sahrens 591ce636f8bSMatthew Ahrens /* 592ce636f8bSMatthew Ahrens * The MOS's space is accounted for in the pool/$MOS 593ce636f8bSMatthew Ahrens * (dp_mos_dir). We can't modify the mos while we're syncing 594ce636f8bSMatthew Ahrens * it, so we remember the deltas and apply them here. 595ce636f8bSMatthew Ahrens */ 596ce636f8bSMatthew Ahrens if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 || 597ce636f8bSMatthew Ahrens dp->dp_mos_uncompressed_delta != 0) { 598ce636f8bSMatthew Ahrens dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD, 599ce636f8bSMatthew Ahrens dp->dp_mos_used_delta, 600ce636f8bSMatthew Ahrens dp->dp_mos_compressed_delta, 601ce636f8bSMatthew Ahrens dp->dp_mos_uncompressed_delta, tx); 602ce636f8bSMatthew Ahrens dp->dp_mos_used_delta = 0; 603ce636f8bSMatthew Ahrens dp->dp_mos_compressed_delta = 0; 604ce636f8bSMatthew Ahrens dp->dp_mos_uncompressed_delta = 0; 605ce636f8bSMatthew Ahrens } 606ce636f8bSMatthew Ahrens 60794c2d0ebSMatthew Ahrens if (!multilist_is_empty(mos->os_dirty_dnodes[txg & TXG_MASK])) { 60869962b56SMatthew Ahrens dsl_pool_sync_mos(dp, tx); 609fa9e4066Sahrens } 610fa9e4066Sahrens 611ce636f8bSMatthew Ahrens /* 612ce636f8bSMatthew Ahrens * If we modify a dataset in the same txg that we want to destroy it, 613ce636f8bSMatthew Ahrens * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it. 614ce636f8bSMatthew Ahrens * dsl_dir_destroy_check() will fail if there are unexpected holds. 615ce636f8bSMatthew Ahrens * Therefore, we want to sync the MOS (thus syncing the dd_dbuf 616ce636f8bSMatthew Ahrens * and clearing the hold on it) before we process the sync_tasks. 617ce636f8bSMatthew Ahrens * The MOS data dirtied by the sync_tasks will be synced on the next 618ce636f8bSMatthew Ahrens * pass. 619ce636f8bSMatthew Ahrens */ 620ce636f8bSMatthew Ahrens if (!txg_list_empty(&dp->dp_sync_tasks, txg)) { 6213b2aab18SMatthew Ahrens dsl_sync_task_t *dst; 622ce636f8bSMatthew Ahrens /* 623ce636f8bSMatthew Ahrens * No more sync tasks should have been added while we 624ce636f8bSMatthew Ahrens * were syncing. 625ce636f8bSMatthew Ahrens */ 62669962b56SMatthew Ahrens ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1); 62769962b56SMatthew Ahrens while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL) 6283b2aab18SMatthew Ahrens dsl_sync_task_sync(dst, tx); 629ce636f8bSMatthew Ahrens } 630ce636f8bSMatthew Ahrens 631fa9e4066Sahrens dmu_tx_commit(tx); 63205715f94SMark Maybee 63369962b56SMatthew Ahrens DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg); 634fa9e4066Sahrens } 635fa9e4066Sahrens 636fa9e4066Sahrens void 637b24ab676SJeff Bonwick dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) 638fa9e4066Sahrens { 639ce636f8bSMatthew Ahrens zilog_t *zilog; 640fa9e4066Sahrens 64143297f97SGeorge Wilson while (zilog = txg_list_head(&dp->dp_dirty_zilogs, txg)) { 64269962b56SMatthew Ahrens dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 64343297f97SGeorge Wilson /* 64443297f97SGeorge Wilson * We don't remove the zilog from the dp_dirty_zilogs 64543297f97SGeorge Wilson * list until after we've cleaned it. This ensures that 64643297f97SGeorge Wilson * callers of zilog_is_dirty() receive an accurate 64743297f97SGeorge Wilson * answer when they are racing with the spa sync thread. 64843297f97SGeorge Wilson */ 649ce636f8bSMatthew Ahrens zil_clean(zilog, txg); 65043297f97SGeorge Wilson (void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg); 651ce636f8bSMatthew Ahrens ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg)); 652ce636f8bSMatthew Ahrens dmu_buf_rele(ds->ds_dbuf, zilog); 653fa9e4066Sahrens } 654b24ab676SJeff Bonwick ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg)); 655fa9e4066Sahrens } 656fa9e4066Sahrens 657c717a561Smaybee /* 658c717a561Smaybee * TRUE if the current thread is the tx_sync_thread or if we 659c717a561Smaybee * are being called from SPA context during pool initialization. 660c717a561Smaybee */ 661fa9e4066Sahrens int 662fa9e4066Sahrens dsl_pool_sync_context(dsl_pool_t *dp) 663fa9e4066Sahrens { 664fa9e4066Sahrens return (curthread == dp->dp_tx.tx_sync_thread || 66594c2d0ebSMatthew Ahrens spa_is_initializing(dp->dp_spa) || 66694c2d0ebSMatthew Ahrens taskq_member(dp->dp_sync_taskq, curthread)); 667fa9e4066Sahrens } 668fa9e4066Sahrens 669fa9e4066Sahrens uint64_t 670fa9e4066Sahrens dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree) 671fa9e4066Sahrens { 672fa9e4066Sahrens uint64_t space, resv; 673fa9e4066Sahrens 674fa9e4066Sahrens /* 675fa9e4066Sahrens * If we're trying to assess whether it's OK to do a free, 676fa9e4066Sahrens * cut the reservation in half to allow forward progress 677fa9e4066Sahrens * (e.g. make it possible to rm(1) files from a full pool). 678fa9e4066Sahrens */ 679485bbbf5SGeorge Wilson space = spa_get_dspace(dp->dp_spa); 680c39f2c8cSChristopher Siden resv = spa_get_slop_space(dp->dp_spa); 681fa9e4066Sahrens if (netfree) 682fa9e4066Sahrens resv >>= 1; 683fa9e4066Sahrens 684fa9e4066Sahrens return (space - resv); 685fa9e4066Sahrens } 6861ab7f2deSmaybee 68769962b56SMatthew Ahrens boolean_t 68869962b56SMatthew Ahrens dsl_pool_need_dirty_delay(dsl_pool_t *dp) 6891ab7f2deSmaybee { 69069962b56SMatthew Ahrens uint64_t delay_min_bytes = 69169962b56SMatthew Ahrens zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 69269962b56SMatthew Ahrens boolean_t rv; 6931ab7f2deSmaybee 69469962b56SMatthew Ahrens mutex_enter(&dp->dp_lock); 69569962b56SMatthew Ahrens if (dp->dp_dirty_total > zfs_dirty_data_sync) 69669962b56SMatthew Ahrens txg_kick(dp); 69769962b56SMatthew Ahrens rv = (dp->dp_dirty_total > delay_min_bytes); 69869962b56SMatthew Ahrens mutex_exit(&dp->dp_lock); 69969962b56SMatthew Ahrens return (rv); 7001ab7f2deSmaybee } 7011ab7f2deSmaybee 7021ab7f2deSmaybee void 70369962b56SMatthew Ahrens dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) 7041ab7f2deSmaybee { 70569962b56SMatthew Ahrens if (space > 0) { 70669962b56SMatthew Ahrens mutex_enter(&dp->dp_lock); 70769962b56SMatthew Ahrens dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space; 70869962b56SMatthew Ahrens dsl_pool_dirty_delta(dp, space); 70969962b56SMatthew Ahrens mutex_exit(&dp->dp_lock); 7101ab7f2deSmaybee } 7111ab7f2deSmaybee } 7121ab7f2deSmaybee 7131ab7f2deSmaybee void 71469962b56SMatthew Ahrens dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg) 7151ab7f2deSmaybee { 71669962b56SMatthew Ahrens ASSERT3S(space, >=, 0); 71769962b56SMatthew Ahrens if (space == 0) 71869962b56SMatthew Ahrens return; 71969962b56SMatthew Ahrens mutex_enter(&dp->dp_lock); 72069962b56SMatthew Ahrens if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) { 72169962b56SMatthew Ahrens /* XXX writing something we didn't dirty? */ 72269962b56SMatthew Ahrens space = dp->dp_dirty_pertxg[txg & TXG_MASK]; 7231ab7f2deSmaybee } 72469962b56SMatthew Ahrens ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space); 72569962b56SMatthew Ahrens dp->dp_dirty_pertxg[txg & TXG_MASK] -= space; 72669962b56SMatthew Ahrens ASSERT3U(dp->dp_dirty_total, >=, space); 72769962b56SMatthew Ahrens dsl_pool_dirty_delta(dp, -space); 72869962b56SMatthew Ahrens mutex_exit(&dp->dp_lock); 7291ab7f2deSmaybee } 730088f3894Sahrens 731088f3894Sahrens /* ARGSUSED */ 732088f3894Sahrens static int 7333b2aab18SMatthew Ahrens upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 734088f3894Sahrens { 735088f3894Sahrens dmu_tx_t *tx = arg; 736088f3894Sahrens dsl_dataset_t *ds, *prev = NULL; 737088f3894Sahrens int err; 738088f3894Sahrens 7393b2aab18SMatthew Ahrens err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 740088f3894Sahrens if (err) 741088f3894Sahrens return (err); 742088f3894Sahrens 743c1379625SJustin T. Gibbs while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { 744c1379625SJustin T. Gibbs err = dsl_dataset_hold_obj(dp, 745c1379625SJustin T. Gibbs dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 746088f3894Sahrens if (err) { 747088f3894Sahrens dsl_dataset_rele(ds, FTAG); 748088f3894Sahrens return (err); 749088f3894Sahrens } 750088f3894Sahrens 751c1379625SJustin T. Gibbs if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) 752088f3894Sahrens break; 753088f3894Sahrens dsl_dataset_rele(ds, FTAG); 754088f3894Sahrens ds = prev; 755088f3894Sahrens prev = NULL; 756088f3894Sahrens } 757088f3894Sahrens 758088f3894Sahrens if (prev == NULL) { 759088f3894Sahrens prev = dp->dp_origin_snap; 760088f3894Sahrens 761088f3894Sahrens /* 762088f3894Sahrens * The $ORIGIN can't have any data, or the accounting 763088f3894Sahrens * will be wrong. 764088f3894Sahrens */ 765c166b69dSPaul Dagnelie rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 766c1379625SJustin T. Gibbs ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth); 767c166b69dSPaul Dagnelie rrw_exit(&ds->ds_bp_rwlock, FTAG); 768088f3894Sahrens 769088f3894Sahrens /* The origin doesn't get attached to itself */ 770088f3894Sahrens if (ds->ds_object == prev->ds_object) { 771088f3894Sahrens dsl_dataset_rele(ds, FTAG); 772088f3894Sahrens return (0); 773088f3894Sahrens } 774088f3894Sahrens 775088f3894Sahrens dmu_buf_will_dirty(ds->ds_dbuf, tx); 776c1379625SJustin T. Gibbs dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object; 777c1379625SJustin T. Gibbs dsl_dataset_phys(ds)->ds_prev_snap_txg = 778c1379625SJustin T. Gibbs dsl_dataset_phys(prev)->ds_creation_txg; 779088f3894Sahrens 780088f3894Sahrens dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); 781c1379625SJustin T. Gibbs dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object; 782088f3894Sahrens 783088f3894Sahrens dmu_buf_will_dirty(prev->ds_dbuf, tx); 784c1379625SJustin T. Gibbs dsl_dataset_phys(prev)->ds_num_children++; 785088f3894Sahrens 786c1379625SJustin T. Gibbs if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) { 787088f3894Sahrens ASSERT(ds->ds_prev == NULL); 7883b2aab18SMatthew Ahrens VERIFY0(dsl_dataset_hold_obj(dp, 789c1379625SJustin T. Gibbs dsl_dataset_phys(ds)->ds_prev_snap_obj, 790c1379625SJustin T. Gibbs ds, &ds->ds_prev)); 791088f3894Sahrens } 792088f3894Sahrens } 793088f3894Sahrens 794c1379625SJustin T. Gibbs ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object); 795c1379625SJustin T. Gibbs ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object); 796088f3894Sahrens 797c1379625SJustin T. Gibbs if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) { 798c33e334fSMatthew Ahrens dmu_buf_will_dirty(prev->ds_dbuf, tx); 799c1379625SJustin T. Gibbs dsl_dataset_phys(prev)->ds_next_clones_obj = 800088f3894Sahrens zap_create(dp->dp_meta_objset, 801088f3894Sahrens DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); 802088f3894Sahrens } 8033b2aab18SMatthew Ahrens VERIFY0(zap_add_int(dp->dp_meta_objset, 804c1379625SJustin T. Gibbs dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx)); 805088f3894Sahrens 806088f3894Sahrens dsl_dataset_rele(ds, FTAG); 807088f3894Sahrens if (prev != dp->dp_origin_snap) 808088f3894Sahrens dsl_dataset_rele(prev, FTAG); 809088f3894Sahrens return (0); 810088f3894Sahrens } 811088f3894Sahrens 812088f3894Sahrens void 813088f3894Sahrens dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) 814088f3894Sahrens { 815088f3894Sahrens ASSERT(dmu_tx_is_syncing(tx)); 816088f3894Sahrens ASSERT(dp->dp_origin_snap != NULL); 817088f3894Sahrens 8183b2aab18SMatthew Ahrens VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb, 81912380e1eSArne Jansen tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); 820088f3894Sahrens } 821088f3894Sahrens 822cde58dbcSMatthew Ahrens /* ARGSUSED */ 823cde58dbcSMatthew Ahrens static int 8243b2aab18SMatthew Ahrens upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) 825cde58dbcSMatthew Ahrens { 826cde58dbcSMatthew Ahrens dmu_tx_t *tx = arg; 827cde58dbcSMatthew Ahrens objset_t *mos = dp->dp_meta_objset; 828cde58dbcSMatthew Ahrens 829c1379625SJustin T. Gibbs if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) { 830cde58dbcSMatthew Ahrens dsl_dataset_t *origin; 831cde58dbcSMatthew Ahrens 8323b2aab18SMatthew Ahrens VERIFY0(dsl_dataset_hold_obj(dp, 833c1379625SJustin T. Gibbs dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin)); 834cde58dbcSMatthew Ahrens 835c1379625SJustin T. Gibbs if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) { 836cde58dbcSMatthew Ahrens dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx); 837c1379625SJustin T. Gibbs dsl_dir_phys(origin->ds_dir)->dd_clones = 838c1379625SJustin T. Gibbs zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE, 839c1379625SJustin T. Gibbs 0, tx); 840cde58dbcSMatthew Ahrens } 841cde58dbcSMatthew Ahrens 8423b2aab18SMatthew Ahrens VERIFY0(zap_add_int(dp->dp_meta_objset, 843c1379625SJustin T. Gibbs dsl_dir_phys(origin->ds_dir)->dd_clones, 844c1379625SJustin T. Gibbs ds->ds_object, tx)); 845cde58dbcSMatthew Ahrens 846cde58dbcSMatthew Ahrens dsl_dataset_rele(origin, FTAG); 847cde58dbcSMatthew Ahrens } 848cde58dbcSMatthew Ahrens return (0); 849cde58dbcSMatthew Ahrens } 850cde58dbcSMatthew Ahrens 851cde58dbcSMatthew Ahrens void 852cde58dbcSMatthew Ahrens dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx) 853cde58dbcSMatthew Ahrens { 854cde58dbcSMatthew Ahrens ASSERT(dmu_tx_is_syncing(tx)); 855cde58dbcSMatthew Ahrens uint64_t obj; 856cde58dbcSMatthew Ahrens 857cde58dbcSMatthew Ahrens (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx); 8583b2aab18SMatthew Ahrens VERIFY0(dsl_pool_open_special_dir(dp, 859cde58dbcSMatthew Ahrens FREE_DIR_NAME, &dp->dp_free_dir)); 860cde58dbcSMatthew Ahrens 861cde58dbcSMatthew Ahrens /* 862cde58dbcSMatthew Ahrens * We can't use bpobj_alloc(), because spa_version() still 863cde58dbcSMatthew Ahrens * returns the old version, and we need a new-version bpobj with 864cde58dbcSMatthew Ahrens * subobj support. So call dmu_object_alloc() directly. 865cde58dbcSMatthew Ahrens */ 866cde58dbcSMatthew Ahrens obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ, 867b5152584SMatthew Ahrens SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx); 8683b2aab18SMatthew Ahrens VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 869cde58dbcSMatthew Ahrens DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); 8703b2aab18SMatthew Ahrens VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj)); 871cde58dbcSMatthew Ahrens 8723b2aab18SMatthew Ahrens VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 87312380e1eSArne Jansen upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); 874cde58dbcSMatthew Ahrens } 875cde58dbcSMatthew Ahrens 876088f3894Sahrens void 877088f3894Sahrens dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) 878088f3894Sahrens { 879088f3894Sahrens uint64_t dsobj; 880088f3894Sahrens dsl_dataset_t *ds; 881088f3894Sahrens 882088f3894Sahrens ASSERT(dmu_tx_is_syncing(tx)); 883088f3894Sahrens ASSERT(dp->dp_origin_snap == NULL); 8843b2aab18SMatthew Ahrens ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER)); 885088f3894Sahrens 886088f3894Sahrens /* create the origin dir, ds, & snap-ds */ 887088f3894Sahrens dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, 888088f3894Sahrens NULL, 0, kcred, tx); 8893b2aab18SMatthew Ahrens VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 8903b2aab18SMatthew Ahrens dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx); 891c1379625SJustin T. Gibbs VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj, 892088f3894Sahrens dp, &dp->dp_origin_snap)); 893088f3894Sahrens dsl_dataset_rele(ds, FTAG); 894088f3894Sahrens } 8959d3574bfSNeil Perrin 8969d3574bfSNeil Perrin taskq_t * 8979d3574bfSNeil Perrin dsl_pool_vnrele_taskq(dsl_pool_t *dp) 8989d3574bfSNeil Perrin { 8999d3574bfSNeil Perrin return (dp->dp_vnrele_taskq); 9009d3574bfSNeil Perrin } 901ca45db41SChris Kirby 902ca45db41SChris Kirby /* 903ca45db41SChris Kirby * Walk through the pool-wide zap object of temporary snapshot user holds 904ca45db41SChris Kirby * and release them. 905ca45db41SChris Kirby */ 906ca45db41SChris Kirby void 907ca45db41SChris Kirby dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp) 908ca45db41SChris Kirby { 909ca45db41SChris Kirby zap_attribute_t za; 910ca45db41SChris Kirby zap_cursor_t zc; 911ca45db41SChris Kirby objset_t *mos = dp->dp_meta_objset; 912ca45db41SChris Kirby uint64_t zapobj = dp->dp_tmp_userrefs_obj; 913a7a845e4SSteven Hartland nvlist_t *holds; 914ca45db41SChris Kirby 915ca45db41SChris Kirby if (zapobj == 0) 916ca45db41SChris Kirby return; 917ca45db41SChris Kirby ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); 918ca45db41SChris Kirby 919a7a845e4SSteven Hartland holds = fnvlist_alloc(); 920a7a845e4SSteven Hartland 921ca45db41SChris Kirby for (zap_cursor_init(&zc, mos, zapobj); 922ca45db41SChris Kirby zap_cursor_retrieve(&zc, &za) == 0; 923ca45db41SChris Kirby zap_cursor_advance(&zc)) { 924ca45db41SChris Kirby char *htag; 925a7a845e4SSteven Hartland nvlist_t *tags; 926ca45db41SChris Kirby 927ca45db41SChris Kirby htag = strchr(za.za_name, '-'); 928ca45db41SChris Kirby *htag = '\0'; 929ca45db41SChris Kirby ++htag; 930a7a845e4SSteven Hartland if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) { 931a7a845e4SSteven Hartland tags = fnvlist_alloc(); 932a7a845e4SSteven Hartland fnvlist_add_boolean(tags, htag); 933a7a845e4SSteven Hartland fnvlist_add_nvlist(holds, za.za_name, tags); 934a7a845e4SSteven Hartland fnvlist_free(tags); 935a7a845e4SSteven Hartland } else { 936a7a845e4SSteven Hartland fnvlist_add_boolean(tags, htag); 937a7a845e4SSteven Hartland } 938ca45db41SChris Kirby } 939a7a845e4SSteven Hartland dsl_dataset_user_release_tmp(dp, holds); 940a7a845e4SSteven Hartland fnvlist_free(holds); 941ca45db41SChris Kirby zap_cursor_fini(&zc); 942ca45db41SChris Kirby } 943ca45db41SChris Kirby 944ca45db41SChris Kirby /* 945ca45db41SChris Kirby * Create the pool-wide zap object for storing temporary snapshot holds. 946ca45db41SChris Kirby */ 947ca45db41SChris Kirby void 948ca45db41SChris Kirby dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx) 949ca45db41SChris Kirby { 950ca45db41SChris Kirby objset_t *mos = dp->dp_meta_objset; 951ca45db41SChris Kirby 952ca45db41SChris Kirby ASSERT(dp->dp_tmp_userrefs_obj == 0); 953ca45db41SChris Kirby ASSERT(dmu_tx_is_syncing(tx)); 954ca45db41SChris Kirby 955ad135b5dSChristopher Siden dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS, 956ad135b5dSChristopher Siden DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx); 957ca45db41SChris Kirby } 958ca45db41SChris Kirby 959ca45db41SChris Kirby static int 960ca45db41SChris Kirby dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj, 9613b2aab18SMatthew Ahrens const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding) 962ca45db41SChris Kirby { 963ca45db41SChris Kirby objset_t *mos = dp->dp_meta_objset; 964ca45db41SChris Kirby uint64_t zapobj = dp->dp_tmp_userrefs_obj; 965ca45db41SChris Kirby char *name; 966ca45db41SChris Kirby int error; 967ca45db41SChris Kirby 968ca45db41SChris Kirby ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); 969ca45db41SChris Kirby ASSERT(dmu_tx_is_syncing(tx)); 970ca45db41SChris Kirby 971ca45db41SChris Kirby /* 972ca45db41SChris Kirby * If the pool was created prior to SPA_VERSION_USERREFS, the 973ca45db41SChris Kirby * zap object for temporary holds might not exist yet. 974ca45db41SChris Kirby */ 975ca45db41SChris Kirby if (zapobj == 0) { 976ca45db41SChris Kirby if (holding) { 977ca45db41SChris Kirby dsl_pool_user_hold_create_obj(dp, tx); 978ca45db41SChris Kirby zapobj = dp->dp_tmp_userrefs_obj; 979ca45db41SChris Kirby } else { 980be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 981ca45db41SChris Kirby } 982ca45db41SChris Kirby } 983ca45db41SChris Kirby 984ca45db41SChris Kirby name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag); 985ca45db41SChris Kirby if (holding) 9863b2aab18SMatthew Ahrens error = zap_add(mos, zapobj, name, 8, 1, &now, tx); 987ca45db41SChris Kirby else 988ca45db41SChris Kirby error = zap_remove(mos, zapobj, name, tx); 989ca45db41SChris Kirby strfree(name); 990ca45db41SChris Kirby 991ca45db41SChris Kirby return (error); 992ca45db41SChris Kirby } 993ca45db41SChris Kirby 994ca45db41SChris Kirby /* 995ca45db41SChris Kirby * Add a temporary hold for the given dataset object and tag. 996ca45db41SChris Kirby */ 997ca45db41SChris Kirby int 998ca45db41SChris Kirby dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag, 9993b2aab18SMatthew Ahrens uint64_t now, dmu_tx_t *tx) 1000ca45db41SChris Kirby { 100115508ac0SChris Kirby return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE)); 1002ca45db41SChris Kirby } 1003ca45db41SChris Kirby 1004ca45db41SChris Kirby /* 1005ca45db41SChris Kirby * Release a temporary hold for the given dataset object and tag. 1006ca45db41SChris Kirby */ 1007ca45db41SChris Kirby int 1008ca45db41SChris Kirby dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag, 1009ca45db41SChris Kirby dmu_tx_t *tx) 1010ca45db41SChris Kirby { 1011ca45db41SChris Kirby return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, NULL, 1012ca45db41SChris Kirby tx, B_FALSE)); 1013ca45db41SChris Kirby } 10143b2aab18SMatthew Ahrens 10153b2aab18SMatthew Ahrens /* 10163b2aab18SMatthew Ahrens * DSL Pool Configuration Lock 10173b2aab18SMatthew Ahrens * 10183b2aab18SMatthew Ahrens * The dp_config_rwlock protects against changes to DSL state (e.g. dataset 10193b2aab18SMatthew Ahrens * creation / destruction / rename / property setting). It must be held for 10203b2aab18SMatthew Ahrens * read to hold a dataset or dsl_dir. I.e. you must call 10213b2aab18SMatthew Ahrens * dsl_pool_config_enter() or dsl_pool_hold() before calling 10223b2aab18SMatthew Ahrens * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock 10233b2aab18SMatthew Ahrens * must be held continuously until all datasets and dsl_dirs are released. 10243b2aab18SMatthew Ahrens * 10253b2aab18SMatthew Ahrens * The only exception to this rule is that if a "long hold" is placed on 10263b2aab18SMatthew Ahrens * a dataset, then the dp_config_rwlock may be dropped while the dataset 10273b2aab18SMatthew Ahrens * is still held. The long hold will prevent the dataset from being 10283b2aab18SMatthew Ahrens * destroyed -- the destroy will fail with EBUSY. A long hold can be 10293b2aab18SMatthew Ahrens * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset 10303b2aab18SMatthew Ahrens * (by calling dsl_{dataset,objset}_{try}own{_obj}). 10313b2aab18SMatthew Ahrens * 10323b2aab18SMatthew Ahrens * Legitimate long-holders (including owners) should be long-running, cancelable 10333b2aab18SMatthew Ahrens * tasks that should cause "zfs destroy" to fail. This includes DMU 10343b2aab18SMatthew Ahrens * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open), 10353b2aab18SMatthew Ahrens * "zfs send", and "zfs diff". There are several other long-holders whose 10363b2aab18SMatthew Ahrens * uses are suboptimal (e.g. "zfs promote", and zil_suspend()). 10373b2aab18SMatthew Ahrens * 10383b2aab18SMatthew Ahrens * The usual formula for long-holding would be: 10393b2aab18SMatthew Ahrens * dsl_pool_hold() 10403b2aab18SMatthew Ahrens * dsl_dataset_hold() 10413b2aab18SMatthew Ahrens * ... perform checks ... 10423b2aab18SMatthew Ahrens * dsl_dataset_long_hold() 10433b2aab18SMatthew Ahrens * dsl_pool_rele() 10443b2aab18SMatthew Ahrens * ... perform long-running task ... 10453b2aab18SMatthew Ahrens * dsl_dataset_long_rele() 10463b2aab18SMatthew Ahrens * dsl_dataset_rele() 10473b2aab18SMatthew Ahrens * 10483b2aab18SMatthew Ahrens * Note that when the long hold is released, the dataset is still held but 10493b2aab18SMatthew Ahrens * the pool is not held. The dataset may change arbitrarily during this time 10503b2aab18SMatthew Ahrens * (e.g. it could be destroyed). Therefore you shouldn't do anything to the 10513b2aab18SMatthew Ahrens * dataset except release it. 10523b2aab18SMatthew Ahrens * 10533b2aab18SMatthew Ahrens * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only 10543b2aab18SMatthew Ahrens * or modifying operations. 10553b2aab18SMatthew Ahrens * 10563b2aab18SMatthew Ahrens * Modifying operations should generally use dsl_sync_task(). The synctask 10573b2aab18SMatthew Ahrens * infrastructure enforces proper locking strategy with respect to the 10583b2aab18SMatthew Ahrens * dp_config_rwlock. See the comment above dsl_sync_task() for details. 10593b2aab18SMatthew Ahrens * 10603b2aab18SMatthew Ahrens * Read-only operations will manually hold the pool, then the dataset, obtain 10613b2aab18SMatthew Ahrens * information from the dataset, then release the pool and dataset. 10623b2aab18SMatthew Ahrens * dmu_objset_{hold,rele}() are convenience routines that also do the pool 10633b2aab18SMatthew Ahrens * hold/rele. 10643b2aab18SMatthew Ahrens */ 10653b2aab18SMatthew Ahrens 10663b2aab18SMatthew Ahrens int 10673b2aab18SMatthew Ahrens dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp) 10683b2aab18SMatthew Ahrens { 10693b2aab18SMatthew Ahrens spa_t *spa; 10703b2aab18SMatthew Ahrens int error; 10713b2aab18SMatthew Ahrens 10723b2aab18SMatthew Ahrens error = spa_open(name, &spa, tag); 10733b2aab18SMatthew Ahrens if (error == 0) { 10743b2aab18SMatthew Ahrens *dp = spa_get_dsl(spa); 10753b2aab18SMatthew Ahrens dsl_pool_config_enter(*dp, tag); 10763b2aab18SMatthew Ahrens } 10773b2aab18SMatthew Ahrens return (error); 10783b2aab18SMatthew Ahrens } 10793b2aab18SMatthew Ahrens 10803b2aab18SMatthew Ahrens void 10813b2aab18SMatthew Ahrens dsl_pool_rele(dsl_pool_t *dp, void *tag) 10823b2aab18SMatthew Ahrens { 10833b2aab18SMatthew Ahrens dsl_pool_config_exit(dp, tag); 10843b2aab18SMatthew Ahrens spa_close(dp->dp_spa, tag); 10853b2aab18SMatthew Ahrens } 10863b2aab18SMatthew Ahrens 10873b2aab18SMatthew Ahrens void 10883b2aab18SMatthew Ahrens dsl_pool_config_enter(dsl_pool_t *dp, void *tag) 10893b2aab18SMatthew Ahrens { 10903b2aab18SMatthew Ahrens /* 10913b2aab18SMatthew Ahrens * We use a "reentrant" reader-writer lock, but not reentrantly. 10923b2aab18SMatthew Ahrens * 10933b2aab18SMatthew Ahrens * The rrwlock can (with the track_all flag) track all reading threads, 10943b2aab18SMatthew Ahrens * which is very useful for debugging which code path failed to release 10953b2aab18SMatthew Ahrens * the lock, and for verifying that the *current* thread does hold 10963b2aab18SMatthew Ahrens * the lock. 10973b2aab18SMatthew Ahrens * 10983b2aab18SMatthew Ahrens * (Unlike a rwlock, which knows that N threads hold it for 10993b2aab18SMatthew Ahrens * read, but not *which* threads, so rw_held(RW_READER) returns TRUE 11003b2aab18SMatthew Ahrens * if any thread holds it for read, even if this thread doesn't). 11013b2aab18SMatthew Ahrens */ 11023b2aab18SMatthew Ahrens ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); 11033b2aab18SMatthew Ahrens rrw_enter(&dp->dp_config_rwlock, RW_READER, tag); 11043b2aab18SMatthew Ahrens } 11053b2aab18SMatthew Ahrens 11061d3f896fSArne Jansen void 11071d3f896fSArne Jansen dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag) 11081d3f896fSArne Jansen { 11091d3f896fSArne Jansen ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); 11101d3f896fSArne Jansen rrw_enter_read_prio(&dp->dp_config_rwlock, tag); 11111d3f896fSArne Jansen } 11121d3f896fSArne Jansen 11133b2aab18SMatthew Ahrens void 11143b2aab18SMatthew Ahrens dsl_pool_config_exit(dsl_pool_t *dp, void *tag) 11153b2aab18SMatthew Ahrens { 11163b2aab18SMatthew Ahrens rrw_exit(&dp->dp_config_rwlock, tag); 11173b2aab18SMatthew Ahrens } 11183b2aab18SMatthew Ahrens 11193b2aab18SMatthew Ahrens boolean_t 11203b2aab18SMatthew Ahrens dsl_pool_config_held(dsl_pool_t *dp) 11213b2aab18SMatthew Ahrens { 11223b2aab18SMatthew Ahrens return (RRW_LOCK_HELD(&dp->dp_config_rwlock)); 11233b2aab18SMatthew Ahrens } 112412380e1eSArne Jansen 112512380e1eSArne Jansen boolean_t 112612380e1eSArne Jansen dsl_pool_config_held_writer(dsl_pool_t *dp) 112712380e1eSArne Jansen { 112812380e1eSArne Jansen return (RRW_WRITE_HELD(&dp->dp_config_rwlock)); 112912380e1eSArne Jansen } 1130