1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 223f9d6ad7SLin Ling * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 2394c2d0ebSMatthew Ahrens * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 24a7a845e4SSteven Hartland * Copyright (c) 2013 Steven Hartland. All rights reserved. 25bc9014e6SJustin Gibbs * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 27ff5177eeSAlek Pinchuk * Copyright 2016 Nexenta Systems, Inc. All rights reserved. 28fa9e4066Sahrens */ 29fa9e4066Sahrens 30fa9e4066Sahrens #include <sys/dsl_pool.h> 31fa9e4066Sahrens #include <sys/dsl_dataset.h> 323f9d6ad7SLin Ling #include <sys/dsl_prop.h> 33fa9e4066Sahrens #include <sys/dsl_dir.h> 341d452cf5Sahrens #include <sys/dsl_synctask.h> 353f9d6ad7SLin Ling #include <sys/dsl_scan.h> 363f9d6ad7SLin Ling #include <sys/dnode.h> 37fa9e4066Sahrens #include <sys/dmu_tx.h> 38fa9e4066Sahrens #include <sys/dmu_objset.h> 39fa9e4066Sahrens #include <sys/arc.h> 40fa9e4066Sahrens #include <sys/zap.h> 41c717a561Smaybee #include <sys/zio.h> 42fa9e4066Sahrens #include <sys/zfs_context.h> 43fa9e4066Sahrens #include <sys/fs/zfs.h> 44088f3894Sahrens #include <sys/zfs_znode.h> 45088f3894Sahrens #include <sys/spa_impl.h> 46cde58dbcSMatthew Ahrens #include <sys/dsl_deadlist.h> 4786714001SSerapheim Dimitropoulos #include <sys/vdev_impl.h> 4886714001SSerapheim Dimitropoulos #include <sys/metaslab_impl.h> 49ad135b5dSChristopher Siden #include <sys/bptree.h> 50ad135b5dSChristopher Siden #include <sys/zfeature.h> 51ce636f8bSMatthew Ahrens #include <sys/zil_impl.h> 523b2aab18SMatthew Ahrens #include <sys/dsl_userhold.h> 53e0f1c0afSOlaf Faaland #include <sys/mmp.h> 54fa9e4066Sahrens 5569962b56SMatthew Ahrens /* 5669962b56SMatthew Ahrens * ZFS Write Throttle 5769962b56SMatthew Ahrens * ------------------ 5869962b56SMatthew Ahrens * 5969962b56SMatthew Ahrens * ZFS must limit the rate of incoming writes to the rate at which it is able 6069962b56SMatthew Ahrens * to sync data modifications to the backend storage. Throttling by too much 6169962b56SMatthew Ahrens * creates an artificial limit; throttling by too little can only be sustained 6269962b56SMatthew Ahrens * for short periods and would lead to highly lumpy performance. On a per-pool 6369962b56SMatthew Ahrens * basis, ZFS tracks the amount of modified (dirty) data. As operations change 6469962b56SMatthew Ahrens * data, the amount of dirty data increases; as ZFS syncs out data, the amount 6569962b56SMatthew Ahrens * of dirty data decreases. When the amount of dirty data exceeds a 6669962b56SMatthew Ahrens * predetermined threshold further modifications are blocked until the amount 6769962b56SMatthew Ahrens * of dirty data decreases (as data is synced out). 6869962b56SMatthew Ahrens * 6969962b56SMatthew Ahrens * The limit on dirty data is tunable, and should be adjusted according to 7069962b56SMatthew Ahrens * both the IO capacity and available memory of the system. The larger the 7169962b56SMatthew Ahrens * window, the more ZFS is able to aggregate and amortize metadata (and data) 7269962b56SMatthew Ahrens * changes. However, memory is a limited resource, and allowing for more dirty 7369962b56SMatthew Ahrens * data comes at the cost of keeping other useful data in memory (for example 7469962b56SMatthew Ahrens * ZFS data cached by the ARC). 7569962b56SMatthew Ahrens * 7669962b56SMatthew Ahrens * Implementation 7769962b56SMatthew Ahrens * 7869962b56SMatthew Ahrens * As buffers are modified dsl_pool_willuse_space() increments both the per- 7969962b56SMatthew Ahrens * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of 8069962b56SMatthew Ahrens * dirty space used; dsl_pool_dirty_space() decrements those values as data 8169962b56SMatthew Ahrens * is synced out from dsl_pool_sync(). While only the poolwide value is 8269962b56SMatthew Ahrens * relevant, the per-txg value is useful for debugging. The tunable 8369962b56SMatthew Ahrens * zfs_dirty_data_max determines the dirty space limit. Once that value is 8469962b56SMatthew Ahrens * exceeded, new writes are halted until space frees up. 8569962b56SMatthew Ahrens * 8669962b56SMatthew Ahrens * The zfs_dirty_data_sync tunable dictates the threshold at which we 8769962b56SMatthew Ahrens * ensure that there is a txg syncing (see the comment in txg.c for a full 8869962b56SMatthew Ahrens * description of transaction group stages). 8969962b56SMatthew Ahrens * 9069962b56SMatthew Ahrens * The IO scheduler uses both the dirty space limit and current amount of 9169962b56SMatthew Ahrens * dirty data as inputs. Those values affect the number of concurrent IOs ZFS 9269962b56SMatthew Ahrens * issues. See the comment in vdev_queue.c for details of the IO scheduler. 9369962b56SMatthew Ahrens * 9469962b56SMatthew Ahrens * The delay is also calculated based on the amount of dirty data. See the 9569962b56SMatthew Ahrens * comment above dmu_tx_delay() for details. 9669962b56SMatthew Ahrens */ 9769962b56SMatthew Ahrens 9869962b56SMatthew Ahrens /* 9969962b56SMatthew Ahrens * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory, 10069962b56SMatthew Ahrens * capped at zfs_dirty_data_max_max. It can also be overridden in /etc/system. 10169962b56SMatthew Ahrens */ 10269962b56SMatthew Ahrens uint64_t zfs_dirty_data_max; 10369962b56SMatthew Ahrens uint64_t zfs_dirty_data_max_max = 4ULL * 1024 * 1024 * 1024; 10469962b56SMatthew Ahrens int zfs_dirty_data_max_percent = 10; 10569962b56SMatthew Ahrens 10669962b56SMatthew Ahrens /* 1077928f4baSMatthew Ahrens * If there's at least this much dirty data (as a percentage of 1087928f4baSMatthew Ahrens * zfs_dirty_data_max), push out a txg. This should be less than 1097928f4baSMatthew Ahrens * zfs_vdev_async_write_active_min_dirty_percent. 11069962b56SMatthew Ahrens */ 1117928f4baSMatthew Ahrens uint64_t zfs_dirty_data_sync_pct = 20; 11269962b56SMatthew Ahrens 11369962b56SMatthew Ahrens /* 11469962b56SMatthew Ahrens * Once there is this amount of dirty data, the dmu_tx_delay() will kick in 11569962b56SMatthew Ahrens * and delay each transaction. 11669962b56SMatthew Ahrens * This value should be >= zfs_vdev_async_write_active_max_dirty_percent. 11769962b56SMatthew Ahrens */ 11869962b56SMatthew Ahrens int zfs_delay_min_dirty_percent = 60; 11969962b56SMatthew Ahrens 12069962b56SMatthew Ahrens /* 12169962b56SMatthew Ahrens * This controls how quickly the delay approaches infinity. 122d85a1e96SMatthew Ahrens * Larger values cause it to delay more for a given amount of dirty data. 123d85a1e96SMatthew Ahrens * Therefore larger values will cause there to be less dirty data for a 12469962b56SMatthew Ahrens * given throughput. 12569962b56SMatthew Ahrens * 12669962b56SMatthew Ahrens * For the smoothest delay, this value should be about 1 billion divided 12769962b56SMatthew Ahrens * by the maximum number of operations per second. This will smoothly 12869962b56SMatthew Ahrens * handle between 10x and 1/10th this number. 12969962b56SMatthew Ahrens * 13069962b56SMatthew Ahrens * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the 13169962b56SMatthew Ahrens * multiply in dmu_tx_delay(). 13269962b56SMatthew Ahrens */ 13369962b56SMatthew Ahrens uint64_t zfs_delay_scale = 1000 * 1000 * 1000 / 2000; 13405715f94SMark Maybee 13594c2d0ebSMatthew Ahrens /* 13694c2d0ebSMatthew Ahrens * This determines the number of threads used by the dp_sync_taskq. 13794c2d0ebSMatthew Ahrens */ 13894c2d0ebSMatthew Ahrens int zfs_sync_taskq_batch_pct = 75; 1391ab7f2deSmaybee 140216d7723SPrakash Surya /* 141216d7723SPrakash Surya * These tunables determine the behavior of how zil_itxg_clean() is 142216d7723SPrakash Surya * called via zil_clean() in the context of spa_sync(). When an itxg 143216d7723SPrakash Surya * list needs to be cleaned, TQ_NOSLEEP will be used when dispatching. 144216d7723SPrakash Surya * If the dispatch fails, the call to zil_itxg_clean() will occur 145216d7723SPrakash Surya * synchronously in the context of spa_sync(), which can negatively 146216d7723SPrakash Surya * impact the performance of spa_sync() (e.g. in the case of the itxg 147216d7723SPrakash Surya * list having a large number of itxs that needs to be cleaned). 148216d7723SPrakash Surya * 149216d7723SPrakash Surya * Thus, these tunables can be used to manipulate the behavior of the 150216d7723SPrakash Surya * taskq used by zil_clean(); they determine the number of taskq entries 151216d7723SPrakash Surya * that are pre-populated when the taskq is first created (via the 152216d7723SPrakash Surya * "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of 153216d7723SPrakash Surya * taskq entries that are cached after an on-demand allocation (via the 154216d7723SPrakash Surya * "zfs_zil_clean_taskq_maxalloc"). 155216d7723SPrakash Surya * 156216d7723SPrakash Surya * The idea being, we want to try reasonably hard to ensure there will 157216d7723SPrakash Surya * already be a taskq entry pre-allocated by the time that it is needed 158216d7723SPrakash Surya * by zil_clean(). This way, we can avoid the possibility of an 159216d7723SPrakash Surya * on-demand allocation of a new taskq entry from failing, which would 160216d7723SPrakash Surya * result in zil_itxg_clean() being called synchronously from zil_clean() 161216d7723SPrakash Surya * (which can adversely affect performance of spa_sync()). 162216d7723SPrakash Surya * 163216d7723SPrakash Surya * Additionally, the number of threads used by the taskq can be 164216d7723SPrakash Surya * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable. 165216d7723SPrakash Surya */ 166216d7723SPrakash Surya int zfs_zil_clean_taskq_nthr_pct = 100; 167216d7723SPrakash Surya int zfs_zil_clean_taskq_minalloc = 1024; 168216d7723SPrakash Surya int zfs_zil_clean_taskq_maxalloc = 1024 * 1024; 169216d7723SPrakash Surya 1703f9d6ad7SLin Ling int 171088f3894Sahrens dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) 172fa9e4066Sahrens { 173fa9e4066Sahrens uint64_t obj; 174fa9e4066Sahrens int err; 175fa9e4066Sahrens 176fa9e4066Sahrens err = zap_lookup(dp->dp_meta_objset, 177c1379625SJustin T. Gibbs dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj, 178088f3894Sahrens name, sizeof (obj), 1, &obj); 179ea8dc4b6Seschrock if (err) 180ea8dc4b6Seschrock return (err); 181fa9e4066Sahrens 1823b2aab18SMatthew Ahrens return (dsl_dir_hold_obj(dp, obj, name, dp, ddp)); 183fa9e4066Sahrens } 184fa9e4066Sahrens 185fa9e4066Sahrens static dsl_pool_t * 186fa9e4066Sahrens dsl_pool_open_impl(spa_t *spa, uint64_t txg) 187fa9e4066Sahrens { 188fa9e4066Sahrens dsl_pool_t *dp; 189fa9e4066Sahrens blkptr_t *bp = spa_get_rootblkptr(spa); 190fa9e4066Sahrens 191fa9e4066Sahrens dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); 192fa9e4066Sahrens dp->dp_spa = spa; 193fa9e4066Sahrens dp->dp_meta_rootbp = *bp; 1943b2aab18SMatthew Ahrens rrw_init(&dp->dp_config_rwlock, B_TRUE); 195fa9e4066Sahrens txg_init(dp, txg); 196e0f1c0afSOlaf Faaland mmp_init(spa); 197fa9e4066Sahrens 198b7b2590dSMatthew Ahrens txg_list_create(&dp->dp_dirty_datasets, spa, 199fa9e4066Sahrens offsetof(dsl_dataset_t, ds_dirty_link)); 200b7b2590dSMatthew Ahrens txg_list_create(&dp->dp_dirty_zilogs, spa, 201ce636f8bSMatthew Ahrens offsetof(zilog_t, zl_dirty_link)); 202b7b2590dSMatthew Ahrens txg_list_create(&dp->dp_dirty_dirs, spa, 203fa9e4066Sahrens offsetof(dsl_dir_t, dd_dirty_link)); 204b7b2590dSMatthew Ahrens txg_list_create(&dp->dp_sync_tasks, spa, 2053b2aab18SMatthew Ahrens offsetof(dsl_sync_task_t, dst_node)); 20686714001SSerapheim Dimitropoulos txg_list_create(&dp->dp_early_sync_tasks, spa, 20786714001SSerapheim Dimitropoulos offsetof(dsl_sync_task_t, dst_node)); 208fa9e4066Sahrens 20994c2d0ebSMatthew Ahrens dp->dp_sync_taskq = taskq_create("dp_sync_taskq", 21094c2d0ebSMatthew Ahrens zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX, 21194c2d0ebSMatthew Ahrens TASKQ_THREADS_CPU_PCT); 21294c2d0ebSMatthew Ahrens 213216d7723SPrakash Surya dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq", 214216d7723SPrakash Surya zfs_zil_clean_taskq_nthr_pct, minclsyspri, 215216d7723SPrakash Surya zfs_zil_clean_taskq_minalloc, 216216d7723SPrakash Surya zfs_zil_clean_taskq_maxalloc, 217216d7723SPrakash Surya TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT); 218216d7723SPrakash Surya 2191ab7f2deSmaybee mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); 22069962b56SMatthew Ahrens cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL); 2211ab7f2deSmaybee 2229d3574bfSNeil Perrin dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri, 2239d3574bfSNeil Perrin 1, 4, 0); 2249d3574bfSNeil Perrin 225fa9e4066Sahrens return (dp); 226fa9e4066Sahrens } 227fa9e4066Sahrens 228ea8dc4b6Seschrock int 229ad135b5dSChristopher Siden dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) 230fa9e4066Sahrens { 231fa9e4066Sahrens int err; 232fa9e4066Sahrens dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 233ad135b5dSChristopher Siden 234ad135b5dSChristopher Siden err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, 235ad135b5dSChristopher Siden &dp->dp_meta_objset); 236ad135b5dSChristopher Siden if (err != 0) 237ad135b5dSChristopher Siden dsl_pool_close(dp); 238ad135b5dSChristopher Siden else 239ad135b5dSChristopher Siden *dpp = dp; 240ad135b5dSChristopher Siden 241ad135b5dSChristopher Siden return (err); 242ad135b5dSChristopher Siden } 243ad135b5dSChristopher Siden 244ad135b5dSChristopher Siden int 245ad135b5dSChristopher Siden dsl_pool_open(dsl_pool_t *dp) 246ad135b5dSChristopher Siden { 247ad135b5dSChristopher Siden int err; 248088f3894Sahrens dsl_dir_t *dd; 249088f3894Sahrens dsl_dataset_t *ds; 250cde58dbcSMatthew Ahrens uint64_t obj; 251fa9e4066Sahrens 2523b2aab18SMatthew Ahrens rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 253fa9e4066Sahrens err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 254fa9e4066Sahrens DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, 255fa9e4066Sahrens &dp->dp_root_dir_obj); 256ea8dc4b6Seschrock if (err) 257ea8dc4b6Seschrock goto out; 258ea8dc4b6Seschrock 2593b2aab18SMatthew Ahrens err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, 260ea8dc4b6Seschrock NULL, dp, &dp->dp_root_dir); 261ea8dc4b6Seschrock if (err) 262ea8dc4b6Seschrock goto out; 263fa9e4066Sahrens 264088f3894Sahrens err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir); 265ea8dc4b6Seschrock if (err) 266ea8dc4b6Seschrock goto out; 267ea8dc4b6Seschrock 268ad135b5dSChristopher Siden if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) { 269088f3894Sahrens err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); 270088f3894Sahrens if (err) 271088f3894Sahrens goto out; 272c1379625SJustin T. Gibbs err = dsl_dataset_hold_obj(dp, 273c1379625SJustin T. Gibbs dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds); 2748f63aa46SLin Ling if (err == 0) { 2758f63aa46SLin Ling err = dsl_dataset_hold_obj(dp, 276c1379625SJustin T. Gibbs dsl_dataset_phys(ds)->ds_prev_snap_obj, dp, 2778f63aa46SLin Ling &dp->dp_origin_snap); 2788f63aa46SLin Ling dsl_dataset_rele(ds, FTAG); 2798f63aa46SLin Ling } 2803b2aab18SMatthew Ahrens dsl_dir_rele(dd, dp); 281088f3894Sahrens if (err) 282088f3894Sahrens goto out; 283088f3894Sahrens } 284088f3894Sahrens 285ad135b5dSChristopher Siden if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 286cde58dbcSMatthew Ahrens err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME, 287cde58dbcSMatthew Ahrens &dp->dp_free_dir); 288cde58dbcSMatthew Ahrens if (err) 289cde58dbcSMatthew Ahrens goto out; 290cde58dbcSMatthew Ahrens 291cde58dbcSMatthew Ahrens err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 292cde58dbcSMatthew Ahrens DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj); 293cde58dbcSMatthew Ahrens if (err) 294cde58dbcSMatthew Ahrens goto out; 2953b2aab18SMatthew Ahrens VERIFY0(bpobj_open(&dp->dp_free_bpobj, 296cde58dbcSMatthew Ahrens dp->dp_meta_objset, obj)); 297cde58dbcSMatthew Ahrens } 298cde58dbcSMatthew Ahrens 2995cabbc6bSPrashanth Sreenivasa if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 3005cabbc6bSPrashanth Sreenivasa err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 3015cabbc6bSPrashanth Sreenivasa DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj); 3025cabbc6bSPrashanth Sreenivasa if (err == 0) { 3035cabbc6bSPrashanth Sreenivasa VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, 3045cabbc6bSPrashanth Sreenivasa dp->dp_meta_objset, obj)); 3055cabbc6bSPrashanth Sreenivasa } else if (err == ENOENT) { 3065cabbc6bSPrashanth Sreenivasa /* 3075cabbc6bSPrashanth Sreenivasa * We might not have created the remap bpobj yet. 3085cabbc6bSPrashanth Sreenivasa */ 3095cabbc6bSPrashanth Sreenivasa err = 0; 3105cabbc6bSPrashanth Sreenivasa } else { 3115cabbc6bSPrashanth Sreenivasa goto out; 3125cabbc6bSPrashanth Sreenivasa } 3135cabbc6bSPrashanth Sreenivasa } 3145cabbc6bSPrashanth Sreenivasa 3157fd05ac4SMatthew Ahrens /* 3165cabbc6bSPrashanth Sreenivasa * Note: errors ignored, because the these special dirs, used for 3175cabbc6bSPrashanth Sreenivasa * space accounting, are only created on demand. 3187fd05ac4SMatthew Ahrens */ 3197fd05ac4SMatthew Ahrens (void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME, 3207fd05ac4SMatthew Ahrens &dp->dp_leak_dir); 3217fd05ac4SMatthew Ahrens 3222acef22dSMatthew Ahrens if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) { 323ad135b5dSChristopher Siden err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 324ad135b5dSChristopher Siden DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1, 325ad135b5dSChristopher Siden &dp->dp_bptree_obj); 326ad135b5dSChristopher Siden if (err != 0) 327ad135b5dSChristopher Siden goto out; 328ad135b5dSChristopher Siden } 329ad135b5dSChristopher Siden 3302acef22dSMatthew Ahrens if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) { 331f1745736SMatthew Ahrens err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 332f1745736SMatthew Ahrens DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1, 333f1745736SMatthew Ahrens &dp->dp_empty_bpobj); 334f1745736SMatthew Ahrens if (err != 0) 335f1745736SMatthew Ahrens goto out; 336f1745736SMatthew Ahrens } 337f1745736SMatthew Ahrens 338ca45db41SChris Kirby err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 339ca45db41SChris Kirby DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1, 340ca45db41SChris Kirby &dp->dp_tmp_userrefs_obj); 341ca45db41SChris Kirby if (err == ENOENT) 342ca45db41SChris Kirby err = 0; 343ca45db41SChris Kirby if (err) 344ca45db41SChris Kirby goto out; 345ca45db41SChris Kirby 346ad135b5dSChristopher Siden err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg); 347088f3894Sahrens 348ea8dc4b6Seschrock out: 3493b2aab18SMatthew Ahrens rrw_exit(&dp->dp_config_rwlock, FTAG); 350ea8dc4b6Seschrock return (err); 351fa9e4066Sahrens } 352fa9e4066Sahrens 353fa9e4066Sahrens void 354fa9e4066Sahrens dsl_pool_close(dsl_pool_t *dp) 355fa9e4066Sahrens { 356088f3894Sahrens /* 35769962b56SMatthew Ahrens * Drop our references from dsl_pool_open(). 35869962b56SMatthew Ahrens * 359088f3894Sahrens * Since we held the origin_snap from "syncing" context (which 360088f3894Sahrens * includes pool-opening context), it actually only got a "ref" 361088f3894Sahrens * and not a hold, so just drop that here. 362088f3894Sahrens */ 3635cabbc6bSPrashanth Sreenivasa if (dp->dp_origin_snap != NULL) 3643b2aab18SMatthew Ahrens dsl_dataset_rele(dp->dp_origin_snap, dp); 3655cabbc6bSPrashanth Sreenivasa if (dp->dp_mos_dir != NULL) 3663b2aab18SMatthew Ahrens dsl_dir_rele(dp->dp_mos_dir, dp); 3675cabbc6bSPrashanth Sreenivasa if (dp->dp_free_dir != NULL) 3683b2aab18SMatthew Ahrens dsl_dir_rele(dp->dp_free_dir, dp); 3695cabbc6bSPrashanth Sreenivasa if (dp->dp_leak_dir != NULL) 3707fd05ac4SMatthew Ahrens dsl_dir_rele(dp->dp_leak_dir, dp); 3715cabbc6bSPrashanth Sreenivasa if (dp->dp_root_dir != NULL) 3723b2aab18SMatthew Ahrens dsl_dir_rele(dp->dp_root_dir, dp); 373fa9e4066Sahrens 374cde58dbcSMatthew Ahrens bpobj_close(&dp->dp_free_bpobj); 3755cabbc6bSPrashanth Sreenivasa bpobj_close(&dp->dp_obsolete_bpobj); 376cde58dbcSMatthew Ahrens 377fa9e4066Sahrens /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ 3785cabbc6bSPrashanth Sreenivasa if (dp->dp_meta_objset != NULL) 379503ad85cSMatthew Ahrens dmu_objset_evict(dp->dp_meta_objset); 380fa9e4066Sahrens 381fa9e4066Sahrens txg_list_destroy(&dp->dp_dirty_datasets); 382ce636f8bSMatthew Ahrens txg_list_destroy(&dp->dp_dirty_zilogs); 38354a91118SChris Kirby txg_list_destroy(&dp->dp_sync_tasks); 38486714001SSerapheim Dimitropoulos txg_list_destroy(&dp->dp_early_sync_tasks); 385fa9e4066Sahrens txg_list_destroy(&dp->dp_dirty_dirs); 386fa9e4066Sahrens 387216d7723SPrakash Surya taskq_destroy(dp->dp_zil_clean_taskq); 38894c2d0ebSMatthew Ahrens taskq_destroy(dp->dp_sync_taskq); 38994c2d0ebSMatthew Ahrens 390244781f1SPrakash Surya /* 391244781f1SPrakash Surya * We can't set retry to TRUE since we're explicitly specifying 392244781f1SPrakash Surya * a spa to flush. This is good enough; any missed buffers for 393244781f1SPrakash Surya * this spa won't cause trouble, and they'll eventually fall 394244781f1SPrakash Surya * out of the ARC just like any other unused buffer. 395244781f1SPrakash Surya */ 396244781f1SPrakash Surya arc_flush(dp->dp_spa, FALSE); 397244781f1SPrakash Surya 398e0f1c0afSOlaf Faaland mmp_fini(dp->dp_spa); 399fa9e4066Sahrens txg_fini(dp); 4003f9d6ad7SLin Ling dsl_scan_fini(dp); 401bc9014e6SJustin Gibbs dmu_buf_user_evict_wait(); 402bc9014e6SJustin Gibbs 4033b2aab18SMatthew Ahrens rrw_destroy(&dp->dp_config_rwlock); 4041ab7f2deSmaybee mutex_destroy(&dp->dp_lock); 4059d3574bfSNeil Perrin taskq_destroy(dp->dp_vnrele_taskq); 4065cabbc6bSPrashanth Sreenivasa if (dp->dp_blkstats != NULL) 40788b7b0f2SMatthew Ahrens kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 408fa9e4066Sahrens kmem_free(dp, sizeof (dsl_pool_t)); 409fa9e4066Sahrens } 410fa9e4066Sahrens 4115cabbc6bSPrashanth Sreenivasa void 4125cabbc6bSPrashanth Sreenivasa dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx) 4135cabbc6bSPrashanth Sreenivasa { 4145cabbc6bSPrashanth Sreenivasa uint64_t obj; 4155cabbc6bSPrashanth Sreenivasa /* 4165cabbc6bSPrashanth Sreenivasa * Currently, we only create the obsolete_bpobj where there are 4175cabbc6bSPrashanth Sreenivasa * indirect vdevs with referenced mappings. 4185cabbc6bSPrashanth Sreenivasa */ 4195cabbc6bSPrashanth Sreenivasa ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_DEVICE_REMOVAL)); 4205cabbc6bSPrashanth Sreenivasa /* create and open the obsolete_bpobj */ 4215cabbc6bSPrashanth Sreenivasa obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); 4225cabbc6bSPrashanth Sreenivasa VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, dp->dp_meta_objset, obj)); 4235cabbc6bSPrashanth Sreenivasa VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 4245cabbc6bSPrashanth Sreenivasa DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); 4255cabbc6bSPrashanth Sreenivasa spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 4265cabbc6bSPrashanth Sreenivasa } 4275cabbc6bSPrashanth Sreenivasa 4285cabbc6bSPrashanth Sreenivasa void 4295cabbc6bSPrashanth Sreenivasa dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx) 4305cabbc6bSPrashanth Sreenivasa { 4315cabbc6bSPrashanth Sreenivasa spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 4325cabbc6bSPrashanth Sreenivasa VERIFY0(zap_remove(dp->dp_meta_objset, 4335cabbc6bSPrashanth Sreenivasa DMU_POOL_DIRECTORY_OBJECT, 4345cabbc6bSPrashanth Sreenivasa DMU_POOL_OBSOLETE_BPOBJ, tx)); 4355cabbc6bSPrashanth Sreenivasa bpobj_free(dp->dp_meta_objset, 4365cabbc6bSPrashanth Sreenivasa dp->dp_obsolete_bpobj.bpo_object, tx); 4375cabbc6bSPrashanth Sreenivasa bpobj_close(&dp->dp_obsolete_bpobj); 4385cabbc6bSPrashanth Sreenivasa } 4395cabbc6bSPrashanth Sreenivasa 440fa9e4066Sahrens dsl_pool_t * 441*eb633035STom Caputi dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp, 442*eb633035STom Caputi uint64_t txg) 443fa9e4066Sahrens { 444fa9e4066Sahrens int err; 445fa9e4066Sahrens dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 446fa9e4066Sahrens dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); 447088f3894Sahrens dsl_dataset_t *ds; 448cde58dbcSMatthew Ahrens uint64_t obj; 449088f3894Sahrens 4503b2aab18SMatthew Ahrens rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 4513b2aab18SMatthew Ahrens 452088f3894Sahrens /* create and open the MOS (meta-objset) */ 453503ad85cSMatthew Ahrens dp->dp_meta_objset = dmu_objset_create_impl(spa, 454503ad85cSMatthew Ahrens NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx); 455*eb633035STom Caputi spa->spa_meta_objset = dp->dp_meta_objset; 456fa9e4066Sahrens 457fa9e4066Sahrens /* create the pool directory */ 458fa9e4066Sahrens err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 459fa9e4066Sahrens DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); 460fb09f5aaSMadhav Suresh ASSERT0(err); 461fa9e4066Sahrens 4623f9d6ad7SLin Ling /* Initialize scan structures */ 4633b2aab18SMatthew Ahrens VERIFY0(dsl_scan_init(dp, txg)); 4643f9d6ad7SLin Ling 465fa9e4066Sahrens /* create and open the root dir */ 466088f3894Sahrens dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); 4673b2aab18SMatthew Ahrens VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, 468ea8dc4b6Seschrock NULL, dp, &dp->dp_root_dir)); 469fa9e4066Sahrens 470fa9e4066Sahrens /* create and open the meta-objset dir */ 471088f3894Sahrens (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); 4723b2aab18SMatthew Ahrens VERIFY0(dsl_pool_open_special_dir(dp, 473088f3894Sahrens MOS_DIR_NAME, &dp->dp_mos_dir)); 474088f3894Sahrens 475cde58dbcSMatthew Ahrens if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 476cde58dbcSMatthew Ahrens /* create and open the free dir */ 477cde58dbcSMatthew Ahrens (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 478cde58dbcSMatthew Ahrens FREE_DIR_NAME, tx); 4793b2aab18SMatthew Ahrens VERIFY0(dsl_pool_open_special_dir(dp, 480cde58dbcSMatthew Ahrens FREE_DIR_NAME, &dp->dp_free_dir)); 481cde58dbcSMatthew Ahrens 482cde58dbcSMatthew Ahrens /* create and open the free_bplist */ 483b5152584SMatthew Ahrens obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); 484cde58dbcSMatthew Ahrens VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 485cde58dbcSMatthew Ahrens DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0); 4863b2aab18SMatthew Ahrens VERIFY0(bpobj_open(&dp->dp_free_bpobj, 487cde58dbcSMatthew Ahrens dp->dp_meta_objset, obj)); 488cde58dbcSMatthew Ahrens } 489cde58dbcSMatthew Ahrens 490088f3894Sahrens if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) 491088f3894Sahrens dsl_pool_create_origin(dp, tx); 492088f3894Sahrens 493*eb633035STom Caputi /* 494*eb633035STom Caputi * Some features may be needed when creating the root dataset, so we 495*eb633035STom Caputi * create the feature objects here. 496*eb633035STom Caputi */ 497*eb633035STom Caputi if (spa_version(spa) >= SPA_VERSION_FEATURES) 498*eb633035STom Caputi spa_feature_create_zap_objects(spa, tx); 499*eb633035STom Caputi 500*eb633035STom Caputi if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF && 501*eb633035STom Caputi dcp->cp_crypt != ZIO_CRYPT_INHERIT) 502*eb633035STom Caputi spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx); 503*eb633035STom Caputi 504088f3894Sahrens /* create the root dataset */ 505*eb633035STom Caputi obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx); 506088f3894Sahrens 507088f3894Sahrens /* create the root objset */ 508*eb633035STom Caputi VERIFY0(dsl_dataset_hold_obj_flags(dp, obj, 509*eb633035STom Caputi DS_HOLD_FLAG_DECRYPT, FTAG, &ds)); 510088f3894Sahrens #ifdef _KERNEL 511b852c2f5SToomas Soome { 512b852c2f5SToomas Soome objset_t *os; 513b852c2f5SToomas Soome rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 514b852c2f5SToomas Soome os = dmu_objset_create_impl(dp->dp_spa, ds, 515b852c2f5SToomas Soome dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); 516b852c2f5SToomas Soome rrw_exit(&ds->ds_bp_rwlock, FTAG); 517b852c2f5SToomas Soome zfs_create_fs(os, kcred, zplprops, tx); 518b852c2f5SToomas Soome } 519088f3894Sahrens #endif 520*eb633035STom Caputi dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 521fa9e4066Sahrens 522fa9e4066Sahrens dmu_tx_commit(tx); 523fa9e4066Sahrens 5243b2aab18SMatthew Ahrens rrw_exit(&dp->dp_config_rwlock, FTAG); 5253b2aab18SMatthew Ahrens 526fa9e4066Sahrens return (dp); 527fa9e4066Sahrens } 528fa9e4066Sahrens 529ce636f8bSMatthew Ahrens /* 530ce636f8bSMatthew Ahrens * Account for the meta-objset space in its placeholder dsl_dir. 531ce636f8bSMatthew Ahrens */ 532ce636f8bSMatthew Ahrens void 533ce636f8bSMatthew Ahrens dsl_pool_mos_diduse_space(dsl_pool_t *dp, 534ce636f8bSMatthew Ahrens int64_t used, int64_t comp, int64_t uncomp) 535ce636f8bSMatthew Ahrens { 536ce636f8bSMatthew Ahrens ASSERT3U(comp, ==, uncomp); /* it's all metadata */ 537ce636f8bSMatthew Ahrens mutex_enter(&dp->dp_lock); 538ce636f8bSMatthew Ahrens dp->dp_mos_used_delta += used; 539ce636f8bSMatthew Ahrens dp->dp_mos_compressed_delta += comp; 540ce636f8bSMatthew Ahrens dp->dp_mos_uncompressed_delta += uncomp; 541ce636f8bSMatthew Ahrens mutex_exit(&dp->dp_lock); 542ce636f8bSMatthew Ahrens } 543ce636f8bSMatthew Ahrens 54469962b56SMatthew Ahrens static void 54569962b56SMatthew Ahrens dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx) 54669962b56SMatthew Ahrens { 54769962b56SMatthew Ahrens zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 54869962b56SMatthew Ahrens dmu_objset_sync(dp->dp_meta_objset, zio, tx); 54969962b56SMatthew Ahrens VERIFY0(zio_wait(zio)); 55069962b56SMatthew Ahrens dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); 55169962b56SMatthew Ahrens spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 55269962b56SMatthew Ahrens } 55369962b56SMatthew Ahrens 55469962b56SMatthew Ahrens static void 55569962b56SMatthew Ahrens dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta) 55669962b56SMatthew Ahrens { 55769962b56SMatthew Ahrens ASSERT(MUTEX_HELD(&dp->dp_lock)); 55869962b56SMatthew Ahrens 55969962b56SMatthew Ahrens if (delta < 0) 56069962b56SMatthew Ahrens ASSERT3U(-delta, <=, dp->dp_dirty_total); 56169962b56SMatthew Ahrens 56269962b56SMatthew Ahrens dp->dp_dirty_total += delta; 56369962b56SMatthew Ahrens 56469962b56SMatthew Ahrens /* 56569962b56SMatthew Ahrens * Note: we signal even when increasing dp_dirty_total. 56669962b56SMatthew Ahrens * This ensures forward progress -- each thread wakes the next waiter. 56769962b56SMatthew Ahrens */ 568313ae1e1SAndriy Gapon if (dp->dp_dirty_total < zfs_dirty_data_max) 56969962b56SMatthew Ahrens cv_signal(&dp->dp_spaceavail_cv); 57069962b56SMatthew Ahrens } 57169962b56SMatthew Ahrens 57286714001SSerapheim Dimitropoulos static boolean_t 57386714001SSerapheim Dimitropoulos dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg) 57486714001SSerapheim Dimitropoulos { 57586714001SSerapheim Dimitropoulos spa_t *spa = dp->dp_spa; 57686714001SSerapheim Dimitropoulos vdev_t *rvd = spa->spa_root_vdev; 57786714001SSerapheim Dimitropoulos 57886714001SSerapheim Dimitropoulos for (uint64_t c = 0; c < rvd->vdev_children; c++) { 57986714001SSerapheim Dimitropoulos vdev_t *vd = rvd->vdev_child[c]; 58086714001SSerapheim Dimitropoulos txg_list_t *tl = &vd->vdev_ms_list; 58186714001SSerapheim Dimitropoulos metaslab_t *ms; 58286714001SSerapheim Dimitropoulos 58386714001SSerapheim Dimitropoulos for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms; 58486714001SSerapheim Dimitropoulos ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) { 58586714001SSerapheim Dimitropoulos VERIFY(range_tree_is_empty(ms->ms_freeing)); 58686714001SSerapheim Dimitropoulos VERIFY(range_tree_is_empty(ms->ms_checkpointing)); 58786714001SSerapheim Dimitropoulos } 58886714001SSerapheim Dimitropoulos } 58986714001SSerapheim Dimitropoulos 59086714001SSerapheim Dimitropoulos return (B_TRUE); 59186714001SSerapheim Dimitropoulos } 59286714001SSerapheim Dimitropoulos 593fa9e4066Sahrens void 594fa9e4066Sahrens dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) 595fa9e4066Sahrens { 596c717a561Smaybee zio_t *zio; 597fa9e4066Sahrens dmu_tx_t *tx; 598c717a561Smaybee dsl_dir_t *dd; 599c717a561Smaybee dsl_dataset_t *ds; 600503ad85cSMatthew Ahrens objset_t *mos = dp->dp_meta_objset; 601ce636f8bSMatthew Ahrens list_t synced_datasets; 602ce636f8bSMatthew Ahrens 603ce636f8bSMatthew Ahrens list_create(&synced_datasets, sizeof (dsl_dataset_t), 604ce636f8bSMatthew Ahrens offsetof(dsl_dataset_t, ds_synced_link)); 605fa9e4066Sahrens 606fa9e4066Sahrens tx = dmu_tx_create_assigned(dp, txg); 607fa9e4066Sahrens 60886714001SSerapheim Dimitropoulos /* 60986714001SSerapheim Dimitropoulos * Run all early sync tasks before writing out any dirty blocks. 61086714001SSerapheim Dimitropoulos * For more info on early sync tasks see block comment in 61186714001SSerapheim Dimitropoulos * dsl_early_sync_task(). 61286714001SSerapheim Dimitropoulos */ 61386714001SSerapheim Dimitropoulos if (!txg_list_empty(&dp->dp_early_sync_tasks, txg)) { 61486714001SSerapheim Dimitropoulos dsl_sync_task_t *dst; 61586714001SSerapheim Dimitropoulos 61686714001SSerapheim Dimitropoulos ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1); 61786714001SSerapheim Dimitropoulos while ((dst = 61886714001SSerapheim Dimitropoulos txg_list_remove(&dp->dp_early_sync_tasks, txg)) != NULL) { 61986714001SSerapheim Dimitropoulos ASSERT(dsl_early_sync_task_verify(dp, txg)); 62086714001SSerapheim Dimitropoulos dsl_sync_task_sync(dst, tx); 62186714001SSerapheim Dimitropoulos } 62286714001SSerapheim Dimitropoulos ASSERT(dsl_early_sync_task_verify(dp, txg)); 62386714001SSerapheim Dimitropoulos } 62486714001SSerapheim Dimitropoulos 62569962b56SMatthew Ahrens /* 62669962b56SMatthew Ahrens * Write out all dirty blocks of dirty datasets. 62769962b56SMatthew Ahrens */ 628c717a561Smaybee zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 62969962b56SMatthew Ahrens while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { 63014843421SMatthew Ahrens /* 63114843421SMatthew Ahrens * We must not sync any non-MOS datasets twice, because 63214843421SMatthew Ahrens * we may have taken a snapshot of them. However, we 63314843421SMatthew Ahrens * may sync newly-created datasets on pass 2. 63414843421SMatthew Ahrens */ 63514843421SMatthew Ahrens ASSERT(!list_link_active(&ds->ds_synced_link)); 636ce636f8bSMatthew Ahrens list_insert_tail(&synced_datasets, ds); 637c717a561Smaybee dsl_dataset_sync(ds, zio, tx); 638c717a561Smaybee } 63969962b56SMatthew Ahrens VERIFY0(zio_wait(zio)); 64014843421SMatthew Ahrens 64169962b56SMatthew Ahrens /* 64269962b56SMatthew Ahrens * We have written all of the accounted dirty data, so our 64369962b56SMatthew Ahrens * dp_space_towrite should now be zero. However, some seldom-used 64469962b56SMatthew Ahrens * code paths do not adhere to this (e.g. dbuf_undirty(), also 64569962b56SMatthew Ahrens * rounding error in dbuf_write_physdone). 64669962b56SMatthew Ahrens * Shore up the accounting of any dirtied space now. 64769962b56SMatthew Ahrens */ 64869962b56SMatthew Ahrens dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg); 649c717a561Smaybee 650ff5177eeSAlek Pinchuk /* 651ff5177eeSAlek Pinchuk * Update the long range free counter after 652ff5177eeSAlek Pinchuk * we're done syncing user data 653ff5177eeSAlek Pinchuk */ 654ff5177eeSAlek Pinchuk mutex_enter(&dp->dp_lock); 655ff5177eeSAlek Pinchuk ASSERT(spa_sync_pass(dp->dp_spa) == 1 || 656ff5177eeSAlek Pinchuk dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0); 657ff5177eeSAlek Pinchuk dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0; 658ff5177eeSAlek Pinchuk mutex_exit(&dp->dp_lock); 659ff5177eeSAlek Pinchuk 660ce636f8bSMatthew Ahrens /* 661ce636f8bSMatthew Ahrens * After the data blocks have been written (ensured by the zio_wait() 66294c2d0ebSMatthew Ahrens * above), update the user/group space accounting. This happens 66394c2d0ebSMatthew Ahrens * in tasks dispatched to dp_sync_taskq, so wait for them before 66494c2d0ebSMatthew Ahrens * continuing. 665ce636f8bSMatthew Ahrens */ 66669962b56SMatthew Ahrens for (ds = list_head(&synced_datasets); ds != NULL; 66769962b56SMatthew Ahrens ds = list_next(&synced_datasets, ds)) { 6680a586ceaSMark Shellenbaum dmu_objset_do_userquota_updates(ds->ds_objset, tx); 66969962b56SMatthew Ahrens } 67094c2d0ebSMatthew Ahrens taskq_wait(dp->dp_sync_taskq); 67114843421SMatthew Ahrens 67214843421SMatthew Ahrens /* 67314843421SMatthew Ahrens * Sync the datasets again to push out the changes due to 6743f9d6ad7SLin Ling * userspace updates. This must be done before we process the 675ce636f8bSMatthew Ahrens * sync tasks, so that any snapshots will have the correct 676ce636f8bSMatthew Ahrens * user accounting information (and we won't get confused 677ce636f8bSMatthew Ahrens * about which blocks are part of the snapshot). 67814843421SMatthew Ahrens */ 67914843421SMatthew Ahrens zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 68069962b56SMatthew Ahrens while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { 681*eb633035STom Caputi objset_t *os = ds->ds_objset; 682*eb633035STom Caputi 68314843421SMatthew Ahrens ASSERT(list_link_active(&ds->ds_synced_link)); 68414843421SMatthew Ahrens dmu_buf_rele(ds->ds_dbuf, ds); 68514843421SMatthew Ahrens dsl_dataset_sync(ds, zio, tx); 686*eb633035STom Caputi 687*eb633035STom Caputi /* 688*eb633035STom Caputi * Release any key mappings created by calls to 689*eb633035STom Caputi * dsl_dataset_dirty() from the userquota accounting 690*eb633035STom Caputi * code paths. 691*eb633035STom Caputi */ 692*eb633035STom Caputi if (os->os_encrypted && !os->os_raw_receive && 693*eb633035STom Caputi !os->os_next_write_raw[txg & TXG_MASK]) { 694*eb633035STom Caputi ASSERT3P(ds->ds_key_mapping, !=, NULL); 695*eb633035STom Caputi key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds); 696*eb633035STom Caputi } 69714843421SMatthew Ahrens } 69869962b56SMatthew Ahrens VERIFY0(zio_wait(zio)); 69914843421SMatthew Ahrens 700b24ab676SJeff Bonwick /* 701ce636f8bSMatthew Ahrens * Now that the datasets have been completely synced, we can 702ce636f8bSMatthew Ahrens * clean up our in-memory structures accumulated while syncing: 703ce636f8bSMatthew Ahrens * 704ce636f8bSMatthew Ahrens * - move dead blocks from the pending deadlist to the on-disk deadlist 705ce636f8bSMatthew Ahrens * - release hold from dsl_dataset_dirty() 706*eb633035STom Caputi * - release key mapping hold from dsl_dataset_dirty() 707b24ab676SJeff Bonwick */ 70869962b56SMatthew Ahrens while ((ds = list_remove_head(&synced_datasets)) != NULL) { 709*eb633035STom Caputi objset_t *os = ds->ds_objset; 710*eb633035STom Caputi 711*eb633035STom Caputi if (os->os_encrypted && !os->os_raw_receive && 712*eb633035STom Caputi !os->os_next_write_raw[txg & TXG_MASK]) { 713*eb633035STom Caputi ASSERT3P(ds->ds_key_mapping, !=, NULL); 714*eb633035STom Caputi key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds); 715*eb633035STom Caputi } 716*eb633035STom Caputi 717bfaed0b9SAndriy Gapon dsl_dataset_sync_done(ds, tx); 718cde58dbcSMatthew Ahrens } 71969962b56SMatthew Ahrens while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) { 720c717a561Smaybee dsl_dir_sync(dd, tx); 72169962b56SMatthew Ahrens } 722fa9e4066Sahrens 723ce636f8bSMatthew Ahrens /* 724ce636f8bSMatthew Ahrens * The MOS's space is accounted for in the pool/$MOS 725ce636f8bSMatthew Ahrens * (dp_mos_dir). We can't modify the mos while we're syncing 726ce636f8bSMatthew Ahrens * it, so we remember the deltas and apply them here. 727ce636f8bSMatthew Ahrens */ 728ce636f8bSMatthew Ahrens if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 || 729ce636f8bSMatthew Ahrens dp->dp_mos_uncompressed_delta != 0) { 730ce636f8bSMatthew Ahrens dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD, 731ce636f8bSMatthew Ahrens dp->dp_mos_used_delta, 732ce636f8bSMatthew Ahrens dp->dp_mos_compressed_delta, 733ce636f8bSMatthew Ahrens dp->dp_mos_uncompressed_delta, tx); 734ce636f8bSMatthew Ahrens dp->dp_mos_used_delta = 0; 735ce636f8bSMatthew Ahrens dp->dp_mos_compressed_delta = 0; 736ce636f8bSMatthew Ahrens dp->dp_mos_uncompressed_delta = 0; 737ce636f8bSMatthew Ahrens } 738ce636f8bSMatthew Ahrens 73994c2d0ebSMatthew Ahrens if (!multilist_is_empty(mos->os_dirty_dnodes[txg & TXG_MASK])) { 74069962b56SMatthew Ahrens dsl_pool_sync_mos(dp, tx); 741fa9e4066Sahrens } 742fa9e4066Sahrens 743ce636f8bSMatthew Ahrens /* 744ce636f8bSMatthew Ahrens * If we modify a dataset in the same txg that we want to destroy it, 745ce636f8bSMatthew Ahrens * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it. 746ce636f8bSMatthew Ahrens * dsl_dir_destroy_check() will fail if there are unexpected holds. 747ce636f8bSMatthew Ahrens * Therefore, we want to sync the MOS (thus syncing the dd_dbuf 748ce636f8bSMatthew Ahrens * and clearing the hold on it) before we process the sync_tasks. 749ce636f8bSMatthew Ahrens * The MOS data dirtied by the sync_tasks will be synced on the next 750ce636f8bSMatthew Ahrens * pass. 751ce636f8bSMatthew Ahrens */ 752ce636f8bSMatthew Ahrens if (!txg_list_empty(&dp->dp_sync_tasks, txg)) { 7533b2aab18SMatthew Ahrens dsl_sync_task_t *dst; 754ce636f8bSMatthew Ahrens /* 755ce636f8bSMatthew Ahrens * No more sync tasks should have been added while we 756ce636f8bSMatthew Ahrens * were syncing. 757ce636f8bSMatthew Ahrens */ 75869962b56SMatthew Ahrens ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1); 75969962b56SMatthew Ahrens while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL) 7603b2aab18SMatthew Ahrens dsl_sync_task_sync(dst, tx); 761ce636f8bSMatthew Ahrens } 762ce636f8bSMatthew Ahrens 763fa9e4066Sahrens dmu_tx_commit(tx); 76405715f94SMark Maybee 76569962b56SMatthew Ahrens DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg); 766fa9e4066Sahrens } 767fa9e4066Sahrens 768fa9e4066Sahrens void 769b24ab676SJeff Bonwick dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) 770fa9e4066Sahrens { 771ce636f8bSMatthew Ahrens zilog_t *zilog; 772fa9e4066Sahrens 77343297f97SGeorge Wilson while (zilog = txg_list_head(&dp->dp_dirty_zilogs, txg)) { 77469962b56SMatthew Ahrens dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 77543297f97SGeorge Wilson /* 77643297f97SGeorge Wilson * We don't remove the zilog from the dp_dirty_zilogs 77743297f97SGeorge Wilson * list until after we've cleaned it. This ensures that 77843297f97SGeorge Wilson * callers of zilog_is_dirty() receive an accurate 77943297f97SGeorge Wilson * answer when they are racing with the spa sync thread. 78043297f97SGeorge Wilson */ 781ce636f8bSMatthew Ahrens zil_clean(zilog, txg); 78243297f97SGeorge Wilson (void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg); 783ce636f8bSMatthew Ahrens ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg)); 784ce636f8bSMatthew Ahrens dmu_buf_rele(ds->ds_dbuf, zilog); 785fa9e4066Sahrens } 786b24ab676SJeff Bonwick ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg)); 787fa9e4066Sahrens } 788fa9e4066Sahrens 789c717a561Smaybee /* 790c717a561Smaybee * TRUE if the current thread is the tx_sync_thread or if we 791c717a561Smaybee * are being called from SPA context during pool initialization. 792c717a561Smaybee */ 793fa9e4066Sahrens int 794fa9e4066Sahrens dsl_pool_sync_context(dsl_pool_t *dp) 795fa9e4066Sahrens { 796fa9e4066Sahrens return (curthread == dp->dp_tx.tx_sync_thread || 79794c2d0ebSMatthew Ahrens spa_is_initializing(dp->dp_spa) || 79894c2d0ebSMatthew Ahrens taskq_member(dp->dp_sync_taskq, curthread)); 799fa9e4066Sahrens } 800fa9e4066Sahrens 80186714001SSerapheim Dimitropoulos /* 80286714001SSerapheim Dimitropoulos * This function returns the amount of allocatable space in the pool 80386714001SSerapheim Dimitropoulos * minus whatever space is currently reserved by ZFS for specific 80486714001SSerapheim Dimitropoulos * purposes. Specifically: 80586714001SSerapheim Dimitropoulos * 80686714001SSerapheim Dimitropoulos * 1] Any reserved SLOP space 80786714001SSerapheim Dimitropoulos * 2] Any space used by the checkpoint 80886714001SSerapheim Dimitropoulos * 3] Any space used for deferred frees 80986714001SSerapheim Dimitropoulos * 81086714001SSerapheim Dimitropoulos * The latter 2 are especially important because they are needed to 81186714001SSerapheim Dimitropoulos * rectify the SPA's and DMU's different understanding of how much space 81286714001SSerapheim Dimitropoulos * is used. Now the DMU is aware of that extra space tracked by the SPA 81386714001SSerapheim Dimitropoulos * without having to maintain a separate special dir (e.g similar to 81486714001SSerapheim Dimitropoulos * $MOS, $FREEING, and $LEAKED). 81586714001SSerapheim Dimitropoulos * 81686714001SSerapheim Dimitropoulos * Note: By deferred frees here, we mean the frees that were deferred 81786714001SSerapheim Dimitropoulos * in spa_sync() after sync pass 1 (spa_deferred_bpobj), and not the 81886714001SSerapheim Dimitropoulos * segments placed in ms_defer trees during metaslab_sync_done(). 81986714001SSerapheim Dimitropoulos */ 820fa9e4066Sahrens uint64_t 82186714001SSerapheim Dimitropoulos dsl_pool_adjustedsize(dsl_pool_t *dp, zfs_space_check_t slop_policy) 822fa9e4066Sahrens { 82386714001SSerapheim Dimitropoulos spa_t *spa = dp->dp_spa; 82486714001SSerapheim Dimitropoulos uint64_t space, resv, adjustedsize; 82586714001SSerapheim Dimitropoulos uint64_t spa_deferred_frees = 82686714001SSerapheim Dimitropoulos spa->spa_deferred_bpobj.bpo_phys->bpo_bytes; 82786714001SSerapheim Dimitropoulos 82886714001SSerapheim Dimitropoulos space = spa_get_dspace(spa) 82986714001SSerapheim Dimitropoulos - spa_get_checkpoint_space(spa) - spa_deferred_frees; 83086714001SSerapheim Dimitropoulos resv = spa_get_slop_space(spa); 83186714001SSerapheim Dimitropoulos 83286714001SSerapheim Dimitropoulos switch (slop_policy) { 83386714001SSerapheim Dimitropoulos case ZFS_SPACE_CHECK_NORMAL: 83486714001SSerapheim Dimitropoulos break; 83586714001SSerapheim Dimitropoulos case ZFS_SPACE_CHECK_RESERVED: 836fa9e4066Sahrens resv >>= 1; 83786714001SSerapheim Dimitropoulos break; 83886714001SSerapheim Dimitropoulos case ZFS_SPACE_CHECK_EXTRA_RESERVED: 83986714001SSerapheim Dimitropoulos resv >>= 2; 84086714001SSerapheim Dimitropoulos break; 84186714001SSerapheim Dimitropoulos case ZFS_SPACE_CHECK_NONE: 84286714001SSerapheim Dimitropoulos resv = 0; 84386714001SSerapheim Dimitropoulos break; 84486714001SSerapheim Dimitropoulos default: 84586714001SSerapheim Dimitropoulos panic("invalid slop policy value: %d", slop_policy); 84686714001SSerapheim Dimitropoulos break; 84786714001SSerapheim Dimitropoulos } 84886714001SSerapheim Dimitropoulos adjustedsize = (space >= resv) ? (space - resv) : 0; 849fa9e4066Sahrens 85086714001SSerapheim Dimitropoulos return (adjustedsize); 85186714001SSerapheim Dimitropoulos } 85286714001SSerapheim Dimitropoulos 85386714001SSerapheim Dimitropoulos uint64_t 85486714001SSerapheim Dimitropoulos dsl_pool_unreserved_space(dsl_pool_t *dp, zfs_space_check_t slop_policy) 85586714001SSerapheim Dimitropoulos { 85686714001SSerapheim Dimitropoulos uint64_t poolsize = dsl_pool_adjustedsize(dp, slop_policy); 85786714001SSerapheim Dimitropoulos uint64_t deferred = 85886714001SSerapheim Dimitropoulos metaslab_class_get_deferred(spa_normal_class(dp->dp_spa)); 85986714001SSerapheim Dimitropoulos uint64_t quota = (poolsize >= deferred) ? (poolsize - deferred) : 0; 86086714001SSerapheim Dimitropoulos return (quota); 861fa9e4066Sahrens } 8621ab7f2deSmaybee 86369962b56SMatthew Ahrens boolean_t 86469962b56SMatthew Ahrens dsl_pool_need_dirty_delay(dsl_pool_t *dp) 8651ab7f2deSmaybee { 86669962b56SMatthew Ahrens uint64_t delay_min_bytes = 86769962b56SMatthew Ahrens zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 8687928f4baSMatthew Ahrens uint64_t dirty_min_bytes = 8697928f4baSMatthew Ahrens zfs_dirty_data_max * zfs_dirty_data_sync_pct / 100; 87069962b56SMatthew Ahrens boolean_t rv; 8711ab7f2deSmaybee 87269962b56SMatthew Ahrens mutex_enter(&dp->dp_lock); 8737928f4baSMatthew Ahrens if (dp->dp_dirty_total > dirty_min_bytes) 87469962b56SMatthew Ahrens txg_kick(dp); 87569962b56SMatthew Ahrens rv = (dp->dp_dirty_total > delay_min_bytes); 87669962b56SMatthew Ahrens mutex_exit(&dp->dp_lock); 87769962b56SMatthew Ahrens return (rv); 8781ab7f2deSmaybee } 8791ab7f2deSmaybee 8801ab7f2deSmaybee void 88169962b56SMatthew Ahrens dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) 8821ab7f2deSmaybee { 88369962b56SMatthew Ahrens if (space > 0) { 88469962b56SMatthew Ahrens mutex_enter(&dp->dp_lock); 88569962b56SMatthew Ahrens dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space; 88669962b56SMatthew Ahrens dsl_pool_dirty_delta(dp, space); 88769962b56SMatthew Ahrens mutex_exit(&dp->dp_lock); 8881ab7f2deSmaybee } 8891ab7f2deSmaybee } 8901ab7f2deSmaybee 8911ab7f2deSmaybee void 89269962b56SMatthew Ahrens dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg) 8931ab7f2deSmaybee { 89469962b56SMatthew Ahrens ASSERT3S(space, >=, 0); 89569962b56SMatthew Ahrens if (space == 0) 89669962b56SMatthew Ahrens return; 89769962b56SMatthew Ahrens mutex_enter(&dp->dp_lock); 89869962b56SMatthew Ahrens if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) { 89969962b56SMatthew Ahrens /* XXX writing something we didn't dirty? */ 90069962b56SMatthew Ahrens space = dp->dp_dirty_pertxg[txg & TXG_MASK]; 9011ab7f2deSmaybee } 90269962b56SMatthew Ahrens ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space); 90369962b56SMatthew Ahrens dp->dp_dirty_pertxg[txg & TXG_MASK] -= space; 90469962b56SMatthew Ahrens ASSERT3U(dp->dp_dirty_total, >=, space); 90569962b56SMatthew Ahrens dsl_pool_dirty_delta(dp, -space); 90669962b56SMatthew Ahrens mutex_exit(&dp->dp_lock); 9071ab7f2deSmaybee } 908088f3894Sahrens 909088f3894Sahrens /* ARGSUSED */ 910088f3894Sahrens static int 9113b2aab18SMatthew Ahrens upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 912088f3894Sahrens { 913088f3894Sahrens dmu_tx_t *tx = arg; 914088f3894Sahrens dsl_dataset_t *ds, *prev = NULL; 915088f3894Sahrens int err; 916088f3894Sahrens 9173b2aab18SMatthew Ahrens err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 918088f3894Sahrens if (err) 919088f3894Sahrens return (err); 920088f3894Sahrens 921c1379625SJustin T. Gibbs while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { 922c1379625SJustin T. Gibbs err = dsl_dataset_hold_obj(dp, 923c1379625SJustin T. Gibbs dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 924088f3894Sahrens if (err) { 925088f3894Sahrens dsl_dataset_rele(ds, FTAG); 926088f3894Sahrens return (err); 927088f3894Sahrens } 928088f3894Sahrens 929c1379625SJustin T. Gibbs if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) 930088f3894Sahrens break; 931088f3894Sahrens dsl_dataset_rele(ds, FTAG); 932088f3894Sahrens ds = prev; 933088f3894Sahrens prev = NULL; 934088f3894Sahrens } 935088f3894Sahrens 936088f3894Sahrens if (prev == NULL) { 937088f3894Sahrens prev = dp->dp_origin_snap; 938088f3894Sahrens 939088f3894Sahrens /* 940088f3894Sahrens * The $ORIGIN can't have any data, or the accounting 941088f3894Sahrens * will be wrong. 942088f3894Sahrens */ 943c166b69dSPaul Dagnelie rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 944c1379625SJustin T. Gibbs ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth); 945c166b69dSPaul Dagnelie rrw_exit(&ds->ds_bp_rwlock, FTAG); 946088f3894Sahrens 947088f3894Sahrens /* The origin doesn't get attached to itself */ 948088f3894Sahrens if (ds->ds_object == prev->ds_object) { 949088f3894Sahrens dsl_dataset_rele(ds, FTAG); 950088f3894Sahrens return (0); 951088f3894Sahrens } 952088f3894Sahrens 953088f3894Sahrens dmu_buf_will_dirty(ds->ds_dbuf, tx); 954c1379625SJustin T. Gibbs dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object; 955c1379625SJustin T. Gibbs dsl_dataset_phys(ds)->ds_prev_snap_txg = 956c1379625SJustin T. Gibbs dsl_dataset_phys(prev)->ds_creation_txg; 957088f3894Sahrens 958088f3894Sahrens dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); 959c1379625SJustin T. Gibbs dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object; 960088f3894Sahrens 961088f3894Sahrens dmu_buf_will_dirty(prev->ds_dbuf, tx); 962c1379625SJustin T. Gibbs dsl_dataset_phys(prev)->ds_num_children++; 963088f3894Sahrens 964c1379625SJustin T. Gibbs if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) { 965088f3894Sahrens ASSERT(ds->ds_prev == NULL); 9663b2aab18SMatthew Ahrens VERIFY0(dsl_dataset_hold_obj(dp, 967c1379625SJustin T. Gibbs dsl_dataset_phys(ds)->ds_prev_snap_obj, 968c1379625SJustin T. Gibbs ds, &ds->ds_prev)); 969088f3894Sahrens } 970088f3894Sahrens } 971088f3894Sahrens 972c1379625SJustin T. Gibbs ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object); 973c1379625SJustin T. Gibbs ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object); 974088f3894Sahrens 975c1379625SJustin T. Gibbs if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) { 976c33e334fSMatthew Ahrens dmu_buf_will_dirty(prev->ds_dbuf, tx); 977c1379625SJustin T. Gibbs dsl_dataset_phys(prev)->ds_next_clones_obj = 978088f3894Sahrens zap_create(dp->dp_meta_objset, 979088f3894Sahrens DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); 980088f3894Sahrens } 9813b2aab18SMatthew Ahrens VERIFY0(zap_add_int(dp->dp_meta_objset, 982c1379625SJustin T. Gibbs dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx)); 983088f3894Sahrens 984088f3894Sahrens dsl_dataset_rele(ds, FTAG); 985088f3894Sahrens if (prev != dp->dp_origin_snap) 986088f3894Sahrens dsl_dataset_rele(prev, FTAG); 987088f3894Sahrens return (0); 988088f3894Sahrens } 989088f3894Sahrens 990088f3894Sahrens void 991088f3894Sahrens dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) 992088f3894Sahrens { 993088f3894Sahrens ASSERT(dmu_tx_is_syncing(tx)); 994088f3894Sahrens ASSERT(dp->dp_origin_snap != NULL); 995088f3894Sahrens 9963b2aab18SMatthew Ahrens VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb, 99712380e1eSArne Jansen tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); 998088f3894Sahrens } 999088f3894Sahrens 1000cde58dbcSMatthew Ahrens /* ARGSUSED */ 1001cde58dbcSMatthew Ahrens static int 10023b2aab18SMatthew Ahrens upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) 1003cde58dbcSMatthew Ahrens { 1004cde58dbcSMatthew Ahrens dmu_tx_t *tx = arg; 1005cde58dbcSMatthew Ahrens objset_t *mos = dp->dp_meta_objset; 1006cde58dbcSMatthew Ahrens 1007c1379625SJustin T. Gibbs if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) { 1008cde58dbcSMatthew Ahrens dsl_dataset_t *origin; 1009cde58dbcSMatthew Ahrens 10103b2aab18SMatthew Ahrens VERIFY0(dsl_dataset_hold_obj(dp, 1011c1379625SJustin T. Gibbs dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin)); 1012cde58dbcSMatthew Ahrens 1013c1379625SJustin T. Gibbs if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) { 1014cde58dbcSMatthew Ahrens dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx); 1015c1379625SJustin T. Gibbs dsl_dir_phys(origin->ds_dir)->dd_clones = 1016c1379625SJustin T. Gibbs zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE, 1017c1379625SJustin T. Gibbs 0, tx); 1018cde58dbcSMatthew Ahrens } 1019cde58dbcSMatthew Ahrens 10203b2aab18SMatthew Ahrens VERIFY0(zap_add_int(dp->dp_meta_objset, 1021c1379625SJustin T. Gibbs dsl_dir_phys(origin->ds_dir)->dd_clones, 1022c1379625SJustin T. Gibbs ds->ds_object, tx)); 1023cde58dbcSMatthew Ahrens 1024cde58dbcSMatthew Ahrens dsl_dataset_rele(origin, FTAG); 1025cde58dbcSMatthew Ahrens } 1026cde58dbcSMatthew Ahrens return (0); 1027cde58dbcSMatthew Ahrens } 1028cde58dbcSMatthew Ahrens 1029cde58dbcSMatthew Ahrens void 1030cde58dbcSMatthew Ahrens dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx) 1031cde58dbcSMatthew Ahrens { 1032cde58dbcSMatthew Ahrens ASSERT(dmu_tx_is_syncing(tx)); 1033cde58dbcSMatthew Ahrens uint64_t obj; 1034cde58dbcSMatthew Ahrens 1035cde58dbcSMatthew Ahrens (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx); 10363b2aab18SMatthew Ahrens VERIFY0(dsl_pool_open_special_dir(dp, 1037cde58dbcSMatthew Ahrens FREE_DIR_NAME, &dp->dp_free_dir)); 1038cde58dbcSMatthew Ahrens 1039cde58dbcSMatthew Ahrens /* 1040cde58dbcSMatthew Ahrens * We can't use bpobj_alloc(), because spa_version() still 1041cde58dbcSMatthew Ahrens * returns the old version, and we need a new-version bpobj with 1042cde58dbcSMatthew Ahrens * subobj support. So call dmu_object_alloc() directly. 1043cde58dbcSMatthew Ahrens */ 1044cde58dbcSMatthew Ahrens obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ, 1045b5152584SMatthew Ahrens SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx); 10463b2aab18SMatthew Ahrens VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1047cde58dbcSMatthew Ahrens DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); 10483b2aab18SMatthew Ahrens VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj)); 1049cde58dbcSMatthew Ahrens 10503b2aab18SMatthew Ahrens VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 105112380e1eSArne Jansen upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); 1052cde58dbcSMatthew Ahrens } 1053cde58dbcSMatthew Ahrens 1054088f3894Sahrens void 1055088f3894Sahrens dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) 1056088f3894Sahrens { 1057088f3894Sahrens uint64_t dsobj; 1058088f3894Sahrens dsl_dataset_t *ds; 1059088f3894Sahrens 1060088f3894Sahrens ASSERT(dmu_tx_is_syncing(tx)); 1061088f3894Sahrens ASSERT(dp->dp_origin_snap == NULL); 10623b2aab18SMatthew Ahrens ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER)); 1063088f3894Sahrens 1064088f3894Sahrens /* create the origin dir, ds, & snap-ds */ 1065088f3894Sahrens dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, 1066*eb633035STom Caputi NULL, 0, kcred, NULL, tx); 10673b2aab18SMatthew Ahrens VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 10683b2aab18SMatthew Ahrens dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx); 1069c1379625SJustin T. Gibbs VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj, 1070088f3894Sahrens dp, &dp->dp_origin_snap)); 1071088f3894Sahrens dsl_dataset_rele(ds, FTAG); 1072088f3894Sahrens } 10739d3574bfSNeil Perrin 10749d3574bfSNeil Perrin taskq_t * 10759d3574bfSNeil Perrin dsl_pool_vnrele_taskq(dsl_pool_t *dp) 10769d3574bfSNeil Perrin { 10779d3574bfSNeil Perrin return (dp->dp_vnrele_taskq); 10789d3574bfSNeil Perrin } 1079ca45db41SChris Kirby 1080ca45db41SChris Kirby /* 1081ca45db41SChris Kirby * Walk through the pool-wide zap object of temporary snapshot user holds 1082ca45db41SChris Kirby * and release them. 1083ca45db41SChris Kirby */ 1084ca45db41SChris Kirby void 1085ca45db41SChris Kirby dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp) 1086ca45db41SChris Kirby { 1087ca45db41SChris Kirby zap_attribute_t za; 1088ca45db41SChris Kirby zap_cursor_t zc; 1089ca45db41SChris Kirby objset_t *mos = dp->dp_meta_objset; 1090ca45db41SChris Kirby uint64_t zapobj = dp->dp_tmp_userrefs_obj; 1091a7a845e4SSteven Hartland nvlist_t *holds; 1092ca45db41SChris Kirby 1093ca45db41SChris Kirby if (zapobj == 0) 1094ca45db41SChris Kirby return; 1095ca45db41SChris Kirby ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); 1096ca45db41SChris Kirby 1097a7a845e4SSteven Hartland holds = fnvlist_alloc(); 1098a7a845e4SSteven Hartland 1099ca45db41SChris Kirby for (zap_cursor_init(&zc, mos, zapobj); 1100ca45db41SChris Kirby zap_cursor_retrieve(&zc, &za) == 0; 1101ca45db41SChris Kirby zap_cursor_advance(&zc)) { 1102ca45db41SChris Kirby char *htag; 1103a7a845e4SSteven Hartland nvlist_t *tags; 1104ca45db41SChris Kirby 1105ca45db41SChris Kirby htag = strchr(za.za_name, '-'); 1106ca45db41SChris Kirby *htag = '\0'; 1107ca45db41SChris Kirby ++htag; 1108a7a845e4SSteven Hartland if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) { 1109a7a845e4SSteven Hartland tags = fnvlist_alloc(); 1110a7a845e4SSteven Hartland fnvlist_add_boolean(tags, htag); 1111a7a845e4SSteven Hartland fnvlist_add_nvlist(holds, za.za_name, tags); 1112a7a845e4SSteven Hartland fnvlist_free(tags); 1113a7a845e4SSteven Hartland } else { 1114a7a845e4SSteven Hartland fnvlist_add_boolean(tags, htag); 1115a7a845e4SSteven Hartland } 1116ca45db41SChris Kirby } 1117a7a845e4SSteven Hartland dsl_dataset_user_release_tmp(dp, holds); 1118a7a845e4SSteven Hartland fnvlist_free(holds); 1119ca45db41SChris Kirby zap_cursor_fini(&zc); 1120ca45db41SChris Kirby } 1121ca45db41SChris Kirby 1122ca45db41SChris Kirby /* 1123ca45db41SChris Kirby * Create the pool-wide zap object for storing temporary snapshot holds. 1124ca45db41SChris Kirby */ 1125ca45db41SChris Kirby void 1126ca45db41SChris Kirby dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx) 1127ca45db41SChris Kirby { 1128ca45db41SChris Kirby objset_t *mos = dp->dp_meta_objset; 1129ca45db41SChris Kirby 1130ca45db41SChris Kirby ASSERT(dp->dp_tmp_userrefs_obj == 0); 1131ca45db41SChris Kirby ASSERT(dmu_tx_is_syncing(tx)); 1132ca45db41SChris Kirby 1133ad135b5dSChristopher Siden dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS, 1134ad135b5dSChristopher Siden DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx); 1135ca45db41SChris Kirby } 1136ca45db41SChris Kirby 1137ca45db41SChris Kirby static int 1138ca45db41SChris Kirby dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj, 11393b2aab18SMatthew Ahrens const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding) 1140ca45db41SChris Kirby { 1141ca45db41SChris Kirby objset_t *mos = dp->dp_meta_objset; 1142ca45db41SChris Kirby uint64_t zapobj = dp->dp_tmp_userrefs_obj; 1143ca45db41SChris Kirby char *name; 1144ca45db41SChris Kirby int error; 1145ca45db41SChris Kirby 1146ca45db41SChris Kirby ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); 1147ca45db41SChris Kirby ASSERT(dmu_tx_is_syncing(tx)); 1148ca45db41SChris Kirby 1149ca45db41SChris Kirby /* 1150ca45db41SChris Kirby * If the pool was created prior to SPA_VERSION_USERREFS, the 1151ca45db41SChris Kirby * zap object for temporary holds might not exist yet. 1152ca45db41SChris Kirby */ 1153ca45db41SChris Kirby if (zapobj == 0) { 1154ca45db41SChris Kirby if (holding) { 1155ca45db41SChris Kirby dsl_pool_user_hold_create_obj(dp, tx); 1156ca45db41SChris Kirby zapobj = dp->dp_tmp_userrefs_obj; 1157ca45db41SChris Kirby } else { 1158be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 1159ca45db41SChris Kirby } 1160ca45db41SChris Kirby } 1161ca45db41SChris Kirby 1162ca45db41SChris Kirby name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag); 1163ca45db41SChris Kirby if (holding) 11643b2aab18SMatthew Ahrens error = zap_add(mos, zapobj, name, 8, 1, &now, tx); 1165ca45db41SChris Kirby else 1166ca45db41SChris Kirby error = zap_remove(mos, zapobj, name, tx); 1167ca45db41SChris Kirby strfree(name); 1168ca45db41SChris Kirby 1169ca45db41SChris Kirby return (error); 1170ca45db41SChris Kirby } 1171ca45db41SChris Kirby 1172ca45db41SChris Kirby /* 1173ca45db41SChris Kirby * Add a temporary hold for the given dataset object and tag. 1174ca45db41SChris Kirby */ 1175ca45db41SChris Kirby int 1176ca45db41SChris Kirby dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag, 11773b2aab18SMatthew Ahrens uint64_t now, dmu_tx_t *tx) 1178ca45db41SChris Kirby { 117915508ac0SChris Kirby return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE)); 1180ca45db41SChris Kirby } 1181ca45db41SChris Kirby 1182ca45db41SChris Kirby /* 1183ca45db41SChris Kirby * Release a temporary hold for the given dataset object and tag. 1184ca45db41SChris Kirby */ 1185ca45db41SChris Kirby int 1186ca45db41SChris Kirby dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag, 1187ca45db41SChris Kirby dmu_tx_t *tx) 1188ca45db41SChris Kirby { 1189dd328bf6SToomas Soome return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0, tx, B_FALSE)); 1190ca45db41SChris Kirby } 11913b2aab18SMatthew Ahrens 11923b2aab18SMatthew Ahrens /* 11933b2aab18SMatthew Ahrens * DSL Pool Configuration Lock 11943b2aab18SMatthew Ahrens * 11953b2aab18SMatthew Ahrens * The dp_config_rwlock protects against changes to DSL state (e.g. dataset 11963b2aab18SMatthew Ahrens * creation / destruction / rename / property setting). It must be held for 11973b2aab18SMatthew Ahrens * read to hold a dataset or dsl_dir. I.e. you must call 11983b2aab18SMatthew Ahrens * dsl_pool_config_enter() or dsl_pool_hold() before calling 11993b2aab18SMatthew Ahrens * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock 12003b2aab18SMatthew Ahrens * must be held continuously until all datasets and dsl_dirs are released. 12013b2aab18SMatthew Ahrens * 12023b2aab18SMatthew Ahrens * The only exception to this rule is that if a "long hold" is placed on 12033b2aab18SMatthew Ahrens * a dataset, then the dp_config_rwlock may be dropped while the dataset 12043b2aab18SMatthew Ahrens * is still held. The long hold will prevent the dataset from being 12053b2aab18SMatthew Ahrens * destroyed -- the destroy will fail with EBUSY. A long hold can be 12063b2aab18SMatthew Ahrens * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset 12073b2aab18SMatthew Ahrens * (by calling dsl_{dataset,objset}_{try}own{_obj}). 12083b2aab18SMatthew Ahrens * 12093b2aab18SMatthew Ahrens * Legitimate long-holders (including owners) should be long-running, cancelable 12103b2aab18SMatthew Ahrens * tasks that should cause "zfs destroy" to fail. This includes DMU 12113b2aab18SMatthew Ahrens * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open), 12123b2aab18SMatthew Ahrens * "zfs send", and "zfs diff". There are several other long-holders whose 12133b2aab18SMatthew Ahrens * uses are suboptimal (e.g. "zfs promote", and zil_suspend()). 12143b2aab18SMatthew Ahrens * 12153b2aab18SMatthew Ahrens * The usual formula for long-holding would be: 12163b2aab18SMatthew Ahrens * dsl_pool_hold() 12173b2aab18SMatthew Ahrens * dsl_dataset_hold() 12183b2aab18SMatthew Ahrens * ... perform checks ... 12193b2aab18SMatthew Ahrens * dsl_dataset_long_hold() 12203b2aab18SMatthew Ahrens * dsl_pool_rele() 12213b2aab18SMatthew Ahrens * ... perform long-running task ... 12223b2aab18SMatthew Ahrens * dsl_dataset_long_rele() 12233b2aab18SMatthew Ahrens * dsl_dataset_rele() 12243b2aab18SMatthew Ahrens * 12253b2aab18SMatthew Ahrens * Note that when the long hold is released, the dataset is still held but 12263b2aab18SMatthew Ahrens * the pool is not held. The dataset may change arbitrarily during this time 12273b2aab18SMatthew Ahrens * (e.g. it could be destroyed). Therefore you shouldn't do anything to the 12283b2aab18SMatthew Ahrens * dataset except release it. 12293b2aab18SMatthew Ahrens * 12303b2aab18SMatthew Ahrens * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only 12313b2aab18SMatthew Ahrens * or modifying operations. 12323b2aab18SMatthew Ahrens * 12333b2aab18SMatthew Ahrens * Modifying operations should generally use dsl_sync_task(). The synctask 12343b2aab18SMatthew Ahrens * infrastructure enforces proper locking strategy with respect to the 12353b2aab18SMatthew Ahrens * dp_config_rwlock. See the comment above dsl_sync_task() for details. 12363b2aab18SMatthew Ahrens * 12373b2aab18SMatthew Ahrens * Read-only operations will manually hold the pool, then the dataset, obtain 12383b2aab18SMatthew Ahrens * information from the dataset, then release the pool and dataset. 12393b2aab18SMatthew Ahrens * dmu_objset_{hold,rele}() are convenience routines that also do the pool 12403b2aab18SMatthew Ahrens * hold/rele. 12413b2aab18SMatthew Ahrens */ 12423b2aab18SMatthew Ahrens 12433b2aab18SMatthew Ahrens int 12443b2aab18SMatthew Ahrens dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp) 12453b2aab18SMatthew Ahrens { 12463b2aab18SMatthew Ahrens spa_t *spa; 12473b2aab18SMatthew Ahrens int error; 12483b2aab18SMatthew Ahrens 12493b2aab18SMatthew Ahrens error = spa_open(name, &spa, tag); 12503b2aab18SMatthew Ahrens if (error == 0) { 12513b2aab18SMatthew Ahrens *dp = spa_get_dsl(spa); 12523b2aab18SMatthew Ahrens dsl_pool_config_enter(*dp, tag); 12533b2aab18SMatthew Ahrens } 12543b2aab18SMatthew Ahrens return (error); 12553b2aab18SMatthew Ahrens } 12563b2aab18SMatthew Ahrens 12573b2aab18SMatthew Ahrens void 12583b2aab18SMatthew Ahrens dsl_pool_rele(dsl_pool_t *dp, void *tag) 12593b2aab18SMatthew Ahrens { 12603b2aab18SMatthew Ahrens dsl_pool_config_exit(dp, tag); 12613b2aab18SMatthew Ahrens spa_close(dp->dp_spa, tag); 12623b2aab18SMatthew Ahrens } 12633b2aab18SMatthew Ahrens 12643b2aab18SMatthew Ahrens void 12653b2aab18SMatthew Ahrens dsl_pool_config_enter(dsl_pool_t *dp, void *tag) 12663b2aab18SMatthew Ahrens { 12673b2aab18SMatthew Ahrens /* 12683b2aab18SMatthew Ahrens * We use a "reentrant" reader-writer lock, but not reentrantly. 12693b2aab18SMatthew Ahrens * 12703b2aab18SMatthew Ahrens * The rrwlock can (with the track_all flag) track all reading threads, 12713b2aab18SMatthew Ahrens * which is very useful for debugging which code path failed to release 12723b2aab18SMatthew Ahrens * the lock, and for verifying that the *current* thread does hold 12733b2aab18SMatthew Ahrens * the lock. 12743b2aab18SMatthew Ahrens * 12753b2aab18SMatthew Ahrens * (Unlike a rwlock, which knows that N threads hold it for 12763b2aab18SMatthew Ahrens * read, but not *which* threads, so rw_held(RW_READER) returns TRUE 12773b2aab18SMatthew Ahrens * if any thread holds it for read, even if this thread doesn't). 12783b2aab18SMatthew Ahrens */ 12793b2aab18SMatthew Ahrens ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); 12803b2aab18SMatthew Ahrens rrw_enter(&dp->dp_config_rwlock, RW_READER, tag); 12813b2aab18SMatthew Ahrens } 12823b2aab18SMatthew Ahrens 12831d3f896fSArne Jansen void 12841d3f896fSArne Jansen dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag) 12851d3f896fSArne Jansen { 12861d3f896fSArne Jansen ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); 12871d3f896fSArne Jansen rrw_enter_read_prio(&dp->dp_config_rwlock, tag); 12881d3f896fSArne Jansen } 12891d3f896fSArne Jansen 12903b2aab18SMatthew Ahrens void 12913b2aab18SMatthew Ahrens dsl_pool_config_exit(dsl_pool_t *dp, void *tag) 12923b2aab18SMatthew Ahrens { 12933b2aab18SMatthew Ahrens rrw_exit(&dp->dp_config_rwlock, tag); 12943b2aab18SMatthew Ahrens } 12953b2aab18SMatthew Ahrens 12963b2aab18SMatthew Ahrens boolean_t 12973b2aab18SMatthew Ahrens dsl_pool_config_held(dsl_pool_t *dp) 12983b2aab18SMatthew Ahrens { 12993b2aab18SMatthew Ahrens return (RRW_LOCK_HELD(&dp->dp_config_rwlock)); 13003b2aab18SMatthew Ahrens } 130112380e1eSArne Jansen 130212380e1eSArne Jansen boolean_t 130312380e1eSArne Jansen dsl_pool_config_held_writer(dsl_pool_t *dp) 130412380e1eSArne Jansen { 130512380e1eSArne Jansen return (RRW_WRITE_HELD(&dp->dp_config_rwlock)); 130612380e1eSArne Jansen } 1307