1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 229842588bSGeorge Wilson * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23be6fd75aSMatthew Ahrens * Copyright (c) 2013 by Delphix. All rights reserved. 24e9103aaeSGarrett D'Amore * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25fa9e4066Sahrens */ 26fa9e4066Sahrens 27fa9e4066Sahrens #include <sys/zfs_context.h> 28fa9e4066Sahrens #include <sys/spa_impl.h> 29283b8460SGeorge.Wilson #include <sys/spa_boot.h> 30fa9e4066Sahrens #include <sys/zio.h> 31fa9e4066Sahrens #include <sys/zio_checksum.h> 32fa9e4066Sahrens #include <sys/zio_compress.h> 33fa9e4066Sahrens #include <sys/dmu.h> 34fa9e4066Sahrens #include <sys/dmu_tx.h> 35fa9e4066Sahrens #include <sys/zap.h> 36fa9e4066Sahrens #include <sys/zil.h> 37fa9e4066Sahrens #include <sys/vdev_impl.h> 38fa9e4066Sahrens #include <sys/metaslab.h> 39fa9e4066Sahrens #include <sys/uberblock_impl.h> 40fa9e4066Sahrens #include <sys/txg.h> 41fa9e4066Sahrens #include <sys/avl.h> 42fa9e4066Sahrens #include <sys/unique.h> 43fa9e4066Sahrens #include <sys/dsl_pool.h> 44fa9e4066Sahrens #include <sys/dsl_dir.h> 45fa9e4066Sahrens #include <sys/dsl_prop.h> 463f9d6ad7SLin Ling #include <sys/dsl_scan.h> 47fa9e4066Sahrens #include <sys/fs/zfs.h> 486ce0521aSperrin #include <sys/metaslab_impl.h> 49e14bb325SJeff Bonwick #include <sys/arc.h> 50485bbbf5SGeorge Wilson #include <sys/ddt.h> 5191ebeef5Sahrens #include "zfs_prop.h" 52ad135b5dSChristopher Siden #include "zfeature_common.h" 53fa9e4066Sahrens 54fa9e4066Sahrens /* 55fa9e4066Sahrens * SPA locking 56fa9e4066Sahrens * 57fa9e4066Sahrens * There are four basic locks for managing spa_t structures: 58fa9e4066Sahrens * 59fa9e4066Sahrens * spa_namespace_lock (global mutex) 60fa9e4066Sahrens * 6144cd46caSbillm * This lock must be acquired to do any of the following: 62fa9e4066Sahrens * 6344cd46caSbillm * - Lookup a spa_t by name 6444cd46caSbillm * - Add or remove a spa_t from the namespace 6544cd46caSbillm * - Increase spa_refcount from non-zero 6644cd46caSbillm * - Check if spa_refcount is zero 6744cd46caSbillm * - Rename a spa_t 68ea8dc4b6Seschrock * - add/remove/attach/detach devices 6944cd46caSbillm * - Held for the duration of create/destroy/import/export 70fa9e4066Sahrens * 7144cd46caSbillm * It does not need to handle recursion. A create or destroy may 7244cd46caSbillm * reference objects (files or zvols) in other pools, but by 7344cd46caSbillm * definition they must have an existing reference, and will never need 7444cd46caSbillm * to lookup a spa_t by name. 75fa9e4066Sahrens * 76fa9e4066Sahrens * spa_refcount (per-spa refcount_t protected by mutex) 77fa9e4066Sahrens * 7844cd46caSbillm * This reference count keep track of any active users of the spa_t. The 7944cd46caSbillm * spa_t cannot be destroyed or freed while this is non-zero. Internally, 8044cd46caSbillm * the refcount is never really 'zero' - opening a pool implicitly keeps 81088f3894Sahrens * some references in the DMU. Internally we check against spa_minref, but 8244cd46caSbillm * present the image of a zero/non-zero value to consumers. 83fa9e4066Sahrens * 84e14bb325SJeff Bonwick * spa_config_lock[] (per-spa array of rwlocks) 85fa9e4066Sahrens * 8691ebeef5Sahrens * This protects the spa_t from config changes, and must be held in 8791ebeef5Sahrens * the following circumstances: 88fa9e4066Sahrens * 8944cd46caSbillm * - RW_READER to perform I/O to the spa 9044cd46caSbillm * - RW_WRITER to change the vdev config 91fa9e4066Sahrens * 92fa9e4066Sahrens * The locking order is fairly straightforward: 93fa9e4066Sahrens * 9444cd46caSbillm * spa_namespace_lock -> spa_refcount 95fa9e4066Sahrens * 9644cd46caSbillm * The namespace lock must be acquired to increase the refcount from 0 9744cd46caSbillm * or to check if it is zero. 98fa9e4066Sahrens * 99e14bb325SJeff Bonwick * spa_refcount -> spa_config_lock[] 100fa9e4066Sahrens * 10144cd46caSbillm * There must be at least one valid reference on the spa_t to acquire 10244cd46caSbillm * the config lock. 103fa9e4066Sahrens * 104e14bb325SJeff Bonwick * spa_namespace_lock -> spa_config_lock[] 105fa9e4066Sahrens * 10644cd46caSbillm * The namespace lock must always be taken before the config lock. 107fa9e4066Sahrens * 108fa9e4066Sahrens * 109e14bb325SJeff Bonwick * The spa_namespace_lock can be acquired directly and is globally visible. 110fa9e4066Sahrens * 111e14bb325SJeff Bonwick * The namespace is manipulated using the following functions, all of which 112e14bb325SJeff Bonwick * require the spa_namespace_lock to be held. 113fa9e4066Sahrens * 11444cd46caSbillm * spa_lookup() Lookup a spa_t by name. 115fa9e4066Sahrens * 11644cd46caSbillm * spa_add() Create a new spa_t in the namespace. 117fa9e4066Sahrens * 11844cd46caSbillm * spa_remove() Remove a spa_t from the namespace. This also 11944cd46caSbillm * frees up any memory associated with the spa_t. 120fa9e4066Sahrens * 12144cd46caSbillm * spa_next() Returns the next spa_t in the system, or the 12244cd46caSbillm * first if NULL is passed. 123fa9e4066Sahrens * 12444cd46caSbillm * spa_evict_all() Shutdown and remove all spa_t structures in 12544cd46caSbillm * the system. 126fa9e4066Sahrens * 127ea8dc4b6Seschrock * spa_guid_exists() Determine whether a pool/device guid exists. 128fa9e4066Sahrens * 129fa9e4066Sahrens * The spa_refcount is manipulated using the following functions: 130fa9e4066Sahrens * 13144cd46caSbillm * spa_open_ref() Adds a reference to the given spa_t. Must be 13244cd46caSbillm * called with spa_namespace_lock held if the 13344cd46caSbillm * refcount is currently zero. 134fa9e4066Sahrens * 13544cd46caSbillm * spa_close() Remove a reference from the spa_t. This will 13644cd46caSbillm * not free the spa_t or remove it from the 13744cd46caSbillm * namespace. No locking is required. 138fa9e4066Sahrens * 13944cd46caSbillm * spa_refcount_zero() Returns true if the refcount is currently 14044cd46caSbillm * zero. Must be called with spa_namespace_lock 14144cd46caSbillm * held. 142fa9e4066Sahrens * 143e14bb325SJeff Bonwick * The spa_config_lock[] is an array of rwlocks, ordered as follows: 144e14bb325SJeff Bonwick * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 145e14bb325SJeff Bonwick * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 146e14bb325SJeff Bonwick * 147e14bb325SJeff Bonwick * To read the configuration, it suffices to hold one of these locks as reader. 148e14bb325SJeff Bonwick * To modify the configuration, you must hold all locks as writer. To modify 149e14bb325SJeff Bonwick * vdev state without altering the vdev tree's topology (e.g. online/offline), 150e14bb325SJeff Bonwick * you must hold SCL_STATE and SCL_ZIO as writer. 151e14bb325SJeff Bonwick * 152e14bb325SJeff Bonwick * We use these distinct config locks to avoid recursive lock entry. 153e14bb325SJeff Bonwick * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 154e14bb325SJeff Bonwick * block allocations (SCL_ALLOC), which may require reading space maps 155e14bb325SJeff Bonwick * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 156e14bb325SJeff Bonwick * 157e14bb325SJeff Bonwick * The spa config locks cannot be normal rwlocks because we need the 158e14bb325SJeff Bonwick * ability to hand off ownership. For example, SCL_ZIO is acquired 159e14bb325SJeff Bonwick * by the issuing thread and later released by an interrupt thread. 160e14bb325SJeff Bonwick * They do, however, obey the usual write-wanted semantics to prevent 161e14bb325SJeff Bonwick * writer (i.e. system administrator) starvation. 162e14bb325SJeff Bonwick * 163e14bb325SJeff Bonwick * The lock acquisition rules are as follows: 164e14bb325SJeff Bonwick * 165e14bb325SJeff Bonwick * SCL_CONFIG 166e14bb325SJeff Bonwick * Protects changes to the vdev tree topology, such as vdev 167e14bb325SJeff Bonwick * add/remove/attach/detach. Protects the dirty config list 168e14bb325SJeff Bonwick * (spa_config_dirty_list) and the set of spares and l2arc devices. 169e14bb325SJeff Bonwick * 170e14bb325SJeff Bonwick * SCL_STATE 171e14bb325SJeff Bonwick * Protects changes to pool state and vdev state, such as vdev 172e14bb325SJeff Bonwick * online/offline/fault/degrade/clear. Protects the dirty state list 173e14bb325SJeff Bonwick * (spa_state_dirty_list) and global pool state (spa_state). 174e14bb325SJeff Bonwick * 175e14bb325SJeff Bonwick * SCL_ALLOC 176e14bb325SJeff Bonwick * Protects changes to metaslab groups and classes. 177e14bb325SJeff Bonwick * Held as reader by metaslab_alloc() and metaslab_claim(). 178e14bb325SJeff Bonwick * 179e14bb325SJeff Bonwick * SCL_ZIO 180e14bb325SJeff Bonwick * Held by bp-level zios (those which have no io_vd upon entry) 181e14bb325SJeff Bonwick * to prevent changes to the vdev tree. The bp-level zio implicitly 182e14bb325SJeff Bonwick * protects all of its vdev child zios, which do not hold SCL_ZIO. 183e14bb325SJeff Bonwick * 184e14bb325SJeff Bonwick * SCL_FREE 185e14bb325SJeff Bonwick * Protects changes to metaslab groups and classes. 186e14bb325SJeff Bonwick * Held as reader by metaslab_free(). SCL_FREE is distinct from 187e14bb325SJeff Bonwick * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 188e14bb325SJeff Bonwick * blocks in zio_done() while another i/o that holds either 189e14bb325SJeff Bonwick * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 190e14bb325SJeff Bonwick * 191e14bb325SJeff Bonwick * SCL_VDEV 192e14bb325SJeff Bonwick * Held as reader to prevent changes to the vdev tree during trivial 193b24ab676SJeff Bonwick * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 194e14bb325SJeff Bonwick * other locks, and lower than all of them, to ensure that it's safe 195e14bb325SJeff Bonwick * to acquire regardless of caller context. 196e14bb325SJeff Bonwick * 197e14bb325SJeff Bonwick * In addition, the following rules apply: 198e14bb325SJeff Bonwick * 199e14bb325SJeff Bonwick * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 200e14bb325SJeff Bonwick * The lock ordering is SCL_CONFIG > spa_props_lock. 201e14bb325SJeff Bonwick * 202e14bb325SJeff Bonwick * (b) I/O operations on leaf vdevs. For any zio operation that takes 203e14bb325SJeff Bonwick * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 204e14bb325SJeff Bonwick * or zio_write_phys() -- the caller must ensure that the config cannot 205e14bb325SJeff Bonwick * cannot change in the interim, and that the vdev cannot be reopened. 206e14bb325SJeff Bonwick * SCL_STATE as reader suffices for both. 207fa9e4066Sahrens * 208ea8dc4b6Seschrock * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 209fa9e4066Sahrens * 21044cd46caSbillm * spa_vdev_enter() Acquire the namespace lock and the config lock 211ea8dc4b6Seschrock * for writing. 212fa9e4066Sahrens * 21344cd46caSbillm * spa_vdev_exit() Release the config lock, wait for all I/O 21444cd46caSbillm * to complete, sync the updated configs to the 215ea8dc4b6Seschrock * cache, and release the namespace lock. 216fa9e4066Sahrens * 217e14bb325SJeff Bonwick * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 218e14bb325SJeff Bonwick * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 219e14bb325SJeff Bonwick * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 220e14bb325SJeff Bonwick * 221ad135b5dSChristopher Siden * spa_rename() is also implemented within this file since it requires 222e14bb325SJeff Bonwick * manipulation of the namespace. 223fa9e4066Sahrens */ 224fa9e4066Sahrens 225fa9e4066Sahrens static avl_tree_t spa_namespace_avl; 226fa9e4066Sahrens kmutex_t spa_namespace_lock; 227fa9e4066Sahrens static kcondvar_t spa_namespace_cv; 2280373e76bSbonwick static int spa_active_count; 229416e0cd8Sek int spa_max_replication_override = SPA_DVAS_PER_BP; 230fa9e4066Sahrens 23199653d4eSeschrock static kmutex_t spa_spare_lock; 23239c23413Seschrock static avl_tree_t spa_spare_avl; 233fa94a07fSbrendan static kmutex_t spa_l2cache_lock; 234fa94a07fSbrendan static avl_tree_t spa_l2cache_avl; 23599653d4eSeschrock 236fa9e4066Sahrens kmem_cache_t *spa_buffer_pool; 2378ad4d6ddSJeff Bonwick int spa_mode_global; 238fa9e4066Sahrens 239fa9e4066Sahrens #ifdef ZFS_DEBUG 2403b2aab18SMatthew Ahrens /* Everything except dprintf and spa is on by default in debug builds */ 2413b2aab18SMatthew Ahrens int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA); 242fa9e4066Sahrens #else 243fa9e4066Sahrens int zfs_flags = 0; 244fa9e4066Sahrens #endif 245fa9e4066Sahrens 2460125049cSahrens /* 2470125049cSahrens * zfs_recover can be set to nonzero to attempt to recover from 2480125049cSahrens * otherwise-fatal errors, typically caused by on-disk corruption. When 2490125049cSahrens * set, calls to zfs_panic_recover() will turn into warning messages. 2508b36997aSMatthew Ahrens * This should only be used as a last resort, as it typically results 2518b36997aSMatthew Ahrens * in leaked space, or worse. 2520125049cSahrens */ 253*7fd05ac4SMatthew Ahrens boolean_t zfs_recover = B_FALSE; 254*7fd05ac4SMatthew Ahrens 255*7fd05ac4SMatthew Ahrens /* 256*7fd05ac4SMatthew Ahrens * If destroy encounters an EIO while reading metadata (e.g. indirect 257*7fd05ac4SMatthew Ahrens * blocks), space referenced by the missing metadata can not be freed. 258*7fd05ac4SMatthew Ahrens * Normally this causes the background destroy to become "stalled", as 259*7fd05ac4SMatthew Ahrens * it is unable to make forward progress. While in this stalled state, 260*7fd05ac4SMatthew Ahrens * all remaining space to free from the error-encountering filesystem is 261*7fd05ac4SMatthew Ahrens * "temporarily leaked". Set this flag to cause it to ignore the EIO, 262*7fd05ac4SMatthew Ahrens * permanently leak the space from indirect blocks that can not be read, 263*7fd05ac4SMatthew Ahrens * and continue to free everything else that it can. 264*7fd05ac4SMatthew Ahrens * 265*7fd05ac4SMatthew Ahrens * The default, "stalling" behavior is useful if the storage partially 266*7fd05ac4SMatthew Ahrens * fails (i.e. some but not all i/os fail), and then later recovers. In 267*7fd05ac4SMatthew Ahrens * this case, we will be able to continue pool operations while it is 268*7fd05ac4SMatthew Ahrens * partially failed, and when it recovers, we can continue to free the 269*7fd05ac4SMatthew Ahrens * space, with no leaks. However, note that this case is actually 270*7fd05ac4SMatthew Ahrens * fairly rare. 271*7fd05ac4SMatthew Ahrens * 272*7fd05ac4SMatthew Ahrens * Typically pools either (a) fail completely (but perhaps temporarily, 273*7fd05ac4SMatthew Ahrens * e.g. a top-level vdev going offline), or (b) have localized, 274*7fd05ac4SMatthew Ahrens * permanent errors (e.g. disk returns the wrong data due to bit flip or 275*7fd05ac4SMatthew Ahrens * firmware bug). In case (a), this setting does not matter because the 276*7fd05ac4SMatthew Ahrens * pool will be suspended and the sync thread will not be able to make 277*7fd05ac4SMatthew Ahrens * forward progress regardless. In case (b), because the error is 278*7fd05ac4SMatthew Ahrens * permanent, the best we can do is leak the minimum amount of space, 279*7fd05ac4SMatthew Ahrens * which is what setting this flag will do. Therefore, it is reasonable 280*7fd05ac4SMatthew Ahrens * for this flag to normally be set, but we chose the more conservative 281*7fd05ac4SMatthew Ahrens * approach of not setting it, so that there is no possibility of 282*7fd05ac4SMatthew Ahrens * leaking space in the "partial temporary" failure case. 283*7fd05ac4SMatthew Ahrens */ 284*7fd05ac4SMatthew Ahrens boolean_t zfs_free_leak_on_eio = B_FALSE; 2850125049cSahrens 28669962b56SMatthew Ahrens /* 28769962b56SMatthew Ahrens * Expiration time in milliseconds. This value has two meanings. First it is 28869962b56SMatthew Ahrens * used to determine when the spa_deadman() logic should fire. By default the 28969962b56SMatthew Ahrens * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 29069962b56SMatthew Ahrens * Secondly, the value determines if an I/O is considered "hung". Any I/O that 29169962b56SMatthew Ahrens * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 29269962b56SMatthew Ahrens * in a system panic. 29369962b56SMatthew Ahrens */ 29469962b56SMatthew Ahrens uint64_t zfs_deadman_synctime_ms = 1000000ULL; 295283b8460SGeorge.Wilson 296283b8460SGeorge.Wilson /* 29769962b56SMatthew Ahrens * Check time in milliseconds. This defines the frequency at which we check 29869962b56SMatthew Ahrens * for hung I/O. 299283b8460SGeorge.Wilson */ 30069962b56SMatthew Ahrens uint64_t zfs_deadman_checktime_ms = 5000ULL; 301283b8460SGeorge.Wilson 302283b8460SGeorge.Wilson /* 303283b8460SGeorge.Wilson * Override the zfs deadman behavior via /etc/system. By default the 304283b8460SGeorge.Wilson * deadman is enabled except on VMware and sparc deployments. 305283b8460SGeorge.Wilson */ 306283b8460SGeorge.Wilson int zfs_deadman_enabled = -1; 307283b8460SGeorge.Wilson 30869962b56SMatthew Ahrens /* 30969962b56SMatthew Ahrens * The worst case is single-sector max-parity RAID-Z blocks, in which 31069962b56SMatthew Ahrens * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 31169962b56SMatthew Ahrens * times the size; so just assume that. Add to this the fact that 31269962b56SMatthew Ahrens * we can have up to 3 DVAs per bp, and one more factor of 2 because 31369962b56SMatthew Ahrens * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 31469962b56SMatthew Ahrens * the worst case is: 31569962b56SMatthew Ahrens * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 31669962b56SMatthew Ahrens */ 31769962b56SMatthew Ahrens int spa_asize_inflation = 24; 318fa9e4066Sahrens 319e05725b1Sbonwick /* 320e05725b1Sbonwick * ========================================================================== 321e05725b1Sbonwick * SPA config locking 322e05725b1Sbonwick * ========================================================================== 323e05725b1Sbonwick */ 324e05725b1Sbonwick static void 325e14bb325SJeff Bonwick spa_config_lock_init(spa_t *spa) 326e14bb325SJeff Bonwick { 327e14bb325SJeff Bonwick for (int i = 0; i < SCL_LOCKS; i++) { 328e14bb325SJeff Bonwick spa_config_lock_t *scl = &spa->spa_config_lock[i]; 329e14bb325SJeff Bonwick mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 330e14bb325SJeff Bonwick cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 3313b2aab18SMatthew Ahrens refcount_create_untracked(&scl->scl_count); 332e14bb325SJeff Bonwick scl->scl_writer = NULL; 333e14bb325SJeff Bonwick scl->scl_write_wanted = 0; 334e14bb325SJeff Bonwick } 335e05725b1Sbonwick } 336e05725b1Sbonwick 337e05725b1Sbonwick static void 338e14bb325SJeff Bonwick spa_config_lock_destroy(spa_t *spa) 339e14bb325SJeff Bonwick { 340e14bb325SJeff Bonwick for (int i = 0; i < SCL_LOCKS; i++) { 341e14bb325SJeff Bonwick spa_config_lock_t *scl = &spa->spa_config_lock[i]; 342e14bb325SJeff Bonwick mutex_destroy(&scl->scl_lock); 343e14bb325SJeff Bonwick cv_destroy(&scl->scl_cv); 344e14bb325SJeff Bonwick refcount_destroy(&scl->scl_count); 345e14bb325SJeff Bonwick ASSERT(scl->scl_writer == NULL); 346e14bb325SJeff Bonwick ASSERT(scl->scl_write_wanted == 0); 347e14bb325SJeff Bonwick } 348e14bb325SJeff Bonwick } 349e14bb325SJeff Bonwick 350e14bb325SJeff Bonwick int 351e14bb325SJeff Bonwick spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 352e05725b1Sbonwick { 353e14bb325SJeff Bonwick for (int i = 0; i < SCL_LOCKS; i++) { 354e14bb325SJeff Bonwick spa_config_lock_t *scl = &spa->spa_config_lock[i]; 355e14bb325SJeff Bonwick if (!(locks & (1 << i))) 356e14bb325SJeff Bonwick continue; 357e14bb325SJeff Bonwick mutex_enter(&scl->scl_lock); 358e14bb325SJeff Bonwick if (rw == RW_READER) { 359e14bb325SJeff Bonwick if (scl->scl_writer || scl->scl_write_wanted) { 360e14bb325SJeff Bonwick mutex_exit(&scl->scl_lock); 361e14bb325SJeff Bonwick spa_config_exit(spa, locks ^ (1 << i), tag); 362e14bb325SJeff Bonwick return (0); 363e14bb325SJeff Bonwick } 364e14bb325SJeff Bonwick } else { 365e14bb325SJeff Bonwick ASSERT(scl->scl_writer != curthread); 366e14bb325SJeff Bonwick if (!refcount_is_zero(&scl->scl_count)) { 367e14bb325SJeff Bonwick mutex_exit(&scl->scl_lock); 368e14bb325SJeff Bonwick spa_config_exit(spa, locks ^ (1 << i), tag); 369e14bb325SJeff Bonwick return (0); 370e14bb325SJeff Bonwick } 371e14bb325SJeff Bonwick scl->scl_writer = curthread; 372e14bb325SJeff Bonwick } 373e14bb325SJeff Bonwick (void) refcount_add(&scl->scl_count, tag); 374e14bb325SJeff Bonwick mutex_exit(&scl->scl_lock); 375e14bb325SJeff Bonwick } 376e14bb325SJeff Bonwick return (1); 377e05725b1Sbonwick } 378e05725b1Sbonwick 379e05725b1Sbonwick void 380e14bb325SJeff Bonwick spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 381e05725b1Sbonwick { 382f64c0e34SEric Taylor int wlocks_held = 0; 383f64c0e34SEric Taylor 3843b2aab18SMatthew Ahrens ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 3853b2aab18SMatthew Ahrens 386e14bb325SJeff Bonwick for (int i = 0; i < SCL_LOCKS; i++) { 387e14bb325SJeff Bonwick spa_config_lock_t *scl = &spa->spa_config_lock[i]; 388f64c0e34SEric Taylor if (scl->scl_writer == curthread) 389f64c0e34SEric Taylor wlocks_held |= (1 << i); 390e14bb325SJeff Bonwick if (!(locks & (1 << i))) 391e14bb325SJeff Bonwick continue; 392e14bb325SJeff Bonwick mutex_enter(&scl->scl_lock); 393e14bb325SJeff Bonwick if (rw == RW_READER) { 394e14bb325SJeff Bonwick while (scl->scl_writer || scl->scl_write_wanted) { 395e14bb325SJeff Bonwick cv_wait(&scl->scl_cv, &scl->scl_lock); 396e14bb325SJeff Bonwick } 397e14bb325SJeff Bonwick } else { 398e14bb325SJeff Bonwick ASSERT(scl->scl_writer != curthread); 399e14bb325SJeff Bonwick while (!refcount_is_zero(&scl->scl_count)) { 400e14bb325SJeff Bonwick scl->scl_write_wanted++; 401e14bb325SJeff Bonwick cv_wait(&scl->scl_cv, &scl->scl_lock); 402e14bb325SJeff Bonwick scl->scl_write_wanted--; 403e14bb325SJeff Bonwick } 404e14bb325SJeff Bonwick scl->scl_writer = curthread; 405e14bb325SJeff Bonwick } 406e14bb325SJeff Bonwick (void) refcount_add(&scl->scl_count, tag); 407e14bb325SJeff Bonwick mutex_exit(&scl->scl_lock); 408e05725b1Sbonwick } 409f64c0e34SEric Taylor ASSERT(wlocks_held <= locks); 410e05725b1Sbonwick } 411e05725b1Sbonwick 412e05725b1Sbonwick void 413e14bb325SJeff Bonwick spa_config_exit(spa_t *spa, int locks, void *tag) 414e05725b1Sbonwick { 415e14bb325SJeff Bonwick for (int i = SCL_LOCKS - 1; i >= 0; i--) { 416e14bb325SJeff Bonwick spa_config_lock_t *scl = &spa->spa_config_lock[i]; 417e14bb325SJeff Bonwick if (!(locks & (1 << i))) 418e14bb325SJeff Bonwick continue; 419e14bb325SJeff Bonwick mutex_enter(&scl->scl_lock); 420e14bb325SJeff Bonwick ASSERT(!refcount_is_zero(&scl->scl_count)); 421e14bb325SJeff Bonwick if (refcount_remove(&scl->scl_count, tag) == 0) { 422e14bb325SJeff Bonwick ASSERT(scl->scl_writer == NULL || 423e14bb325SJeff Bonwick scl->scl_writer == curthread); 424e14bb325SJeff Bonwick scl->scl_writer = NULL; /* OK in either case */ 425e14bb325SJeff Bonwick cv_broadcast(&scl->scl_cv); 426e14bb325SJeff Bonwick } 427e14bb325SJeff Bonwick mutex_exit(&scl->scl_lock); 428e05725b1Sbonwick } 429e05725b1Sbonwick } 430e05725b1Sbonwick 431e14bb325SJeff Bonwick int 432e14bb325SJeff Bonwick spa_config_held(spa_t *spa, int locks, krw_t rw) 433e05725b1Sbonwick { 434e14bb325SJeff Bonwick int locks_held = 0; 435e05725b1Sbonwick 436e14bb325SJeff Bonwick for (int i = 0; i < SCL_LOCKS; i++) { 437e14bb325SJeff Bonwick spa_config_lock_t *scl = &spa->spa_config_lock[i]; 438e14bb325SJeff Bonwick if (!(locks & (1 << i))) 439e14bb325SJeff Bonwick continue; 440e14bb325SJeff Bonwick if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 441e14bb325SJeff Bonwick (rw == RW_WRITER && scl->scl_writer == curthread)) 442e14bb325SJeff Bonwick locks_held |= 1 << i; 443e14bb325SJeff Bonwick } 444e14bb325SJeff Bonwick 445e14bb325SJeff Bonwick return (locks_held); 446e05725b1Sbonwick } 447e05725b1Sbonwick 448fa9e4066Sahrens /* 449fa9e4066Sahrens * ========================================================================== 450fa9e4066Sahrens * SPA namespace functions 451fa9e4066Sahrens * ========================================================================== 452fa9e4066Sahrens */ 453fa9e4066Sahrens 454fa9e4066Sahrens /* 455fa9e4066Sahrens * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 456fa9e4066Sahrens * Returns NULL if no matching spa_t is found. 457fa9e4066Sahrens */ 458fa9e4066Sahrens spa_t * 459fa9e4066Sahrens spa_lookup(const char *name) 460fa9e4066Sahrens { 461e14bb325SJeff Bonwick static spa_t search; /* spa_t is large; don't allocate on stack */ 462e14bb325SJeff Bonwick spa_t *spa; 463fa9e4066Sahrens avl_index_t where; 46440feaa91Sahrens char *cp; 465fa9e4066Sahrens 466fa9e4066Sahrens ASSERT(MUTEX_HELD(&spa_namespace_lock)); 467fa9e4066Sahrens 4683b2aab18SMatthew Ahrens (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 4693b2aab18SMatthew Ahrens 47040feaa91Sahrens /* 47140feaa91Sahrens * If it's a full dataset name, figure out the pool name and 47240feaa91Sahrens * just use that. 47340feaa91Sahrens */ 47478f17100SMatthew Ahrens cp = strpbrk(search.spa_name, "/@#"); 4753b2aab18SMatthew Ahrens if (cp != NULL) 47640feaa91Sahrens *cp = '\0'; 47740feaa91Sahrens 478fa9e4066Sahrens spa = avl_find(&spa_namespace_avl, &search, &where); 479fa9e4066Sahrens 480fa9e4066Sahrens return (spa); 481fa9e4066Sahrens } 482fa9e4066Sahrens 483283b8460SGeorge.Wilson /* 484283b8460SGeorge.Wilson * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 485283b8460SGeorge.Wilson * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 486283b8460SGeorge.Wilson * looking for potentially hung I/Os. 487283b8460SGeorge.Wilson */ 488283b8460SGeorge.Wilson void 489283b8460SGeorge.Wilson spa_deadman(void *arg) 490283b8460SGeorge.Wilson { 491283b8460SGeorge.Wilson spa_t *spa = arg; 492283b8460SGeorge.Wilson 4930713e232SGeorge Wilson /* 4940713e232SGeorge Wilson * Disable the deadman timer if the pool is suspended. 4950713e232SGeorge Wilson */ 4960713e232SGeorge Wilson if (spa_suspended(spa)) { 4970713e232SGeorge Wilson VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 4980713e232SGeorge Wilson return; 4990713e232SGeorge Wilson } 5000713e232SGeorge Wilson 501283b8460SGeorge.Wilson zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 502283b8460SGeorge.Wilson (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 503283b8460SGeorge.Wilson ++spa->spa_deadman_calls); 504283b8460SGeorge.Wilson if (zfs_deadman_enabled) 505283b8460SGeorge.Wilson vdev_deadman(spa->spa_root_vdev); 506283b8460SGeorge.Wilson } 507283b8460SGeorge.Wilson 508fa9e4066Sahrens /* 509fa9e4066Sahrens * Create an uninitialized spa_t with the given name. Requires 510fa9e4066Sahrens * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 511fa9e4066Sahrens * exist by calling spa_lookup() first. 512fa9e4066Sahrens */ 513fa9e4066Sahrens spa_t * 514468c413aSTim Haley spa_add(const char *name, nvlist_t *config, const char *altroot) 515fa9e4066Sahrens { 516fa9e4066Sahrens spa_t *spa; 517c5904d13Seschrock spa_config_dirent_t *dp; 518283b8460SGeorge.Wilson cyc_handler_t hdlr; 519283b8460SGeorge.Wilson cyc_time_t when; 520fa9e4066Sahrens 521fa9e4066Sahrens ASSERT(MUTEX_HELD(&spa_namespace_lock)); 522fa9e4066Sahrens 523fa9e4066Sahrens spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 524fa9e4066Sahrens 525c25056deSgw mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 526c25056deSgw mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 52735a5a358SJonathan Adams mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 528c25056deSgw mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 52935a5a358SJonathan Adams mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 530c25056deSgw mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 53135a5a358SJonathan Adams mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 532a1521560SJeff Bonwick mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 533a1521560SJeff Bonwick mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 534c3a66015SMatthew Ahrens mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL); 535c25056deSgw 536c25056deSgw cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 53735a5a358SJonathan Adams cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 538c25056deSgw cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 539e14bb325SJeff Bonwick cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 540c25056deSgw 541b24ab676SJeff Bonwick for (int t = 0; t < TXG_SIZE; t++) 542cde58dbcSMatthew Ahrens bplist_create(&spa->spa_free_bplist[t]); 543b24ab676SJeff Bonwick 544e14bb325SJeff Bonwick (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 545fa9e4066Sahrens spa->spa_state = POOL_STATE_UNINITIALIZED; 546fa9e4066Sahrens spa->spa_freeze_txg = UINT64_MAX; 5470373e76bSbonwick spa->spa_final_txg = UINT64_MAX; 548468c413aSTim Haley spa->spa_load_max_txg = UINT64_MAX; 54935a5a358SJonathan Adams spa->spa_proc = &p0; 55035a5a358SJonathan Adams spa->spa_proc_state = SPA_PROC_NONE; 551fa9e4066Sahrens 552283b8460SGeorge.Wilson hdlr.cyh_func = spa_deadman; 553283b8460SGeorge.Wilson hdlr.cyh_arg = spa; 554283b8460SGeorge.Wilson hdlr.cyh_level = CY_LOW_LEVEL; 555283b8460SGeorge.Wilson 55669962b56SMatthew Ahrens spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 557283b8460SGeorge.Wilson 558283b8460SGeorge.Wilson /* 559283b8460SGeorge.Wilson * This determines how often we need to check for hung I/Os after 560283b8460SGeorge.Wilson * the cyclic has already fired. Since checking for hung I/Os is 561283b8460SGeorge.Wilson * an expensive operation we don't want to check too frequently. 56269962b56SMatthew Ahrens * Instead wait for 5 seconds before checking again. 563283b8460SGeorge.Wilson */ 56469962b56SMatthew Ahrens when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 565283b8460SGeorge.Wilson when.cyt_when = CY_INFINITY; 566283b8460SGeorge.Wilson mutex_enter(&cpu_lock); 567283b8460SGeorge.Wilson spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 568283b8460SGeorge.Wilson mutex_exit(&cpu_lock); 569283b8460SGeorge.Wilson 570fa9e4066Sahrens refcount_create(&spa->spa_refcount); 571e14bb325SJeff Bonwick spa_config_lock_init(spa); 572fa9e4066Sahrens 573fa9e4066Sahrens avl_add(&spa_namespace_avl, spa); 574fa9e4066Sahrens 5750373e76bSbonwick /* 5760373e76bSbonwick * Set the alternate root, if there is one. 5770373e76bSbonwick */ 5780373e76bSbonwick if (altroot) { 5790373e76bSbonwick spa->spa_root = spa_strdup(altroot); 5800373e76bSbonwick spa_active_count++; 5810373e76bSbonwick } 5820373e76bSbonwick 583c5904d13Seschrock /* 584c5904d13Seschrock * Every pool starts with the default cachefile 585c5904d13Seschrock */ 586c5904d13Seschrock list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 587c5904d13Seschrock offsetof(spa_config_dirent_t, scd_link)); 588c5904d13Seschrock 589c5904d13Seschrock dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 590ef912c80STim Haley dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 591c5904d13Seschrock list_insert_head(&spa->spa_config_list, dp); 592c5904d13Seschrock 5934b964adaSGeorge Wilson VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 5944b964adaSGeorge Wilson KM_SLEEP) == 0); 5954b964adaSGeorge Wilson 596ad135b5dSChristopher Siden if (config != NULL) { 597ad135b5dSChristopher Siden nvlist_t *features; 598ad135b5dSChristopher Siden 599ad135b5dSChristopher Siden if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 600ad135b5dSChristopher Siden &features) == 0) { 601ad135b5dSChristopher Siden VERIFY(nvlist_dup(features, &spa->spa_label_features, 602ad135b5dSChristopher Siden 0) == 0); 603ad135b5dSChristopher Siden } 604ad135b5dSChristopher Siden 605468c413aSTim Haley VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 606ad135b5dSChristopher Siden } 607ad135b5dSChristopher Siden 608ad135b5dSChristopher Siden if (spa->spa_label_features == NULL) { 609ad135b5dSChristopher Siden VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 610ad135b5dSChristopher Siden KM_SLEEP) == 0); 611ad135b5dSChristopher Siden } 612468c413aSTim Haley 613c3a66015SMatthew Ahrens spa->spa_iokstat = kstat_create("zfs", 0, name, 614c3a66015SMatthew Ahrens "disk", KSTAT_TYPE_IO, 1, 0); 615c3a66015SMatthew Ahrens if (spa->spa_iokstat) { 616c3a66015SMatthew Ahrens spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock; 617c3a66015SMatthew Ahrens kstat_install(spa->spa_iokstat); 618c3a66015SMatthew Ahrens } 619c3a66015SMatthew Ahrens 6203b2aab18SMatthew Ahrens spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 6213b2aab18SMatthew Ahrens 62243466aaeSMax Grossman /* 62343466aaeSMax Grossman * As a pool is being created, treat all features as disabled by 62443466aaeSMax Grossman * setting SPA_FEATURE_DISABLED for all entries in the feature 62543466aaeSMax Grossman * refcount cache. 62643466aaeSMax Grossman */ 62743466aaeSMax Grossman for (int i = 0; i < SPA_FEATURES; i++) { 62843466aaeSMax Grossman spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 62943466aaeSMax Grossman } 63043466aaeSMax Grossman 631fa9e4066Sahrens return (spa); 632fa9e4066Sahrens } 633fa9e4066Sahrens 634fa9e4066Sahrens /* 635fa9e4066Sahrens * Removes a spa_t from the namespace, freeing up any memory used. Requires 636fa9e4066Sahrens * spa_namespace_lock. This is called only after the spa_t has been closed and 637fa9e4066Sahrens * deactivated. 638fa9e4066Sahrens */ 639fa9e4066Sahrens void 640fa9e4066Sahrens spa_remove(spa_t *spa) 641fa9e4066Sahrens { 642c5904d13Seschrock spa_config_dirent_t *dp; 643c5904d13Seschrock 644fa9e4066Sahrens ASSERT(MUTEX_HELD(&spa_namespace_lock)); 645fa9e4066Sahrens ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 646fa9e4066Sahrens 6471195e687SMark J Musante nvlist_free(spa->spa_config_splitting); 6481195e687SMark J Musante 649fa9e4066Sahrens avl_remove(&spa_namespace_avl, spa); 650fa9e4066Sahrens cv_broadcast(&spa_namespace_cv); 651fa9e4066Sahrens 6520373e76bSbonwick if (spa->spa_root) { 653fa9e4066Sahrens spa_strfree(spa->spa_root); 6540373e76bSbonwick spa_active_count--; 6550373e76bSbonwick } 656fa9e4066Sahrens 657c5904d13Seschrock while ((dp = list_head(&spa->spa_config_list)) != NULL) { 658c5904d13Seschrock list_remove(&spa->spa_config_list, dp); 659c5904d13Seschrock if (dp->scd_path != NULL) 660c5904d13Seschrock spa_strfree(dp->scd_path); 661c5904d13Seschrock kmem_free(dp, sizeof (spa_config_dirent_t)); 662c5904d13Seschrock } 663c5904d13Seschrock 664c5904d13Seschrock list_destroy(&spa->spa_config_list); 6652f8aaab3Seschrock 666ad135b5dSChristopher Siden nvlist_free(spa->spa_label_features); 6674b964adaSGeorge Wilson nvlist_free(spa->spa_load_info); 668fa9e4066Sahrens spa_config_set(spa, NULL); 669fa9e4066Sahrens 670283b8460SGeorge.Wilson mutex_enter(&cpu_lock); 671283b8460SGeorge.Wilson if (spa->spa_deadman_cycid != CYCLIC_NONE) 672283b8460SGeorge.Wilson cyclic_remove(spa->spa_deadman_cycid); 673283b8460SGeorge.Wilson mutex_exit(&cpu_lock); 674283b8460SGeorge.Wilson spa->spa_deadman_cycid = CYCLIC_NONE; 675283b8460SGeorge.Wilson 676fa9e4066Sahrens refcount_destroy(&spa->spa_refcount); 67791ebeef5Sahrens 678e14bb325SJeff Bonwick spa_config_lock_destroy(spa); 679fa9e4066Sahrens 680c3a66015SMatthew Ahrens kstat_delete(spa->spa_iokstat); 681c3a66015SMatthew Ahrens spa->spa_iokstat = NULL; 682c3a66015SMatthew Ahrens 683b24ab676SJeff Bonwick for (int t = 0; t < TXG_SIZE; t++) 684cde58dbcSMatthew Ahrens bplist_destroy(&spa->spa_free_bplist[t]); 685b24ab676SJeff Bonwick 686c25056deSgw cv_destroy(&spa->spa_async_cv); 68735a5a358SJonathan Adams cv_destroy(&spa->spa_proc_cv); 688c25056deSgw cv_destroy(&spa->spa_scrub_io_cv); 689e14bb325SJeff Bonwick cv_destroy(&spa->spa_suspend_cv); 690c25056deSgw 6915ad82045Snd mutex_destroy(&spa->spa_async_lock); 692c25056deSgw mutex_destroy(&spa->spa_errlist_lock); 69335a5a358SJonathan Adams mutex_destroy(&spa->spa_errlog_lock); 69406eeb2adSek mutex_destroy(&spa->spa_history_lock); 69535a5a358SJonathan Adams mutex_destroy(&spa->spa_proc_lock); 696b1b8ab34Slling mutex_destroy(&spa->spa_props_lock); 69735a5a358SJonathan Adams mutex_destroy(&spa->spa_scrub_lock); 698e14bb325SJeff Bonwick mutex_destroy(&spa->spa_suspend_lock); 699a1521560SJeff Bonwick mutex_destroy(&spa->spa_vdev_top_lock); 700c3a66015SMatthew Ahrens mutex_destroy(&spa->spa_iokstat_lock); 7015ad82045Snd 702fa9e4066Sahrens kmem_free(spa, sizeof (spa_t)); 703fa9e4066Sahrens } 704fa9e4066Sahrens 705fa9e4066Sahrens /* 706fa9e4066Sahrens * Given a pool, return the next pool in the namespace, or NULL if there is 707fa9e4066Sahrens * none. If 'prev' is NULL, return the first pool. 708fa9e4066Sahrens */ 709fa9e4066Sahrens spa_t * 710fa9e4066Sahrens spa_next(spa_t *prev) 711fa9e4066Sahrens { 712fa9e4066Sahrens ASSERT(MUTEX_HELD(&spa_namespace_lock)); 713fa9e4066Sahrens 714fa9e4066Sahrens if (prev) 715fa9e4066Sahrens return (AVL_NEXT(&spa_namespace_avl, prev)); 716fa9e4066Sahrens else 717fa9e4066Sahrens return (avl_first(&spa_namespace_avl)); 718fa9e4066Sahrens } 719fa9e4066Sahrens 720fa9e4066Sahrens /* 721fa9e4066Sahrens * ========================================================================== 722fa9e4066Sahrens * SPA refcount functions 723fa9e4066Sahrens * ========================================================================== 724fa9e4066Sahrens */ 725fa9e4066Sahrens 726fa9e4066Sahrens /* 727fa9e4066Sahrens * Add a reference to the given spa_t. Must have at least one reference, or 728fa9e4066Sahrens * have the namespace lock held. 729fa9e4066Sahrens */ 730fa9e4066Sahrens void 731fa9e4066Sahrens spa_open_ref(spa_t *spa, void *tag) 732fa9e4066Sahrens { 733088f3894Sahrens ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 734fa9e4066Sahrens MUTEX_HELD(&spa_namespace_lock)); 735fa9e4066Sahrens (void) refcount_add(&spa->spa_refcount, tag); 736fa9e4066Sahrens } 737fa9e4066Sahrens 738fa9e4066Sahrens /* 739fa9e4066Sahrens * Remove a reference to the given spa_t. Must have at least one reference, or 740fa9e4066Sahrens * have the namespace lock held. 741fa9e4066Sahrens */ 742fa9e4066Sahrens void 743fa9e4066Sahrens spa_close(spa_t *spa, void *tag) 744fa9e4066Sahrens { 745088f3894Sahrens ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 746fa9e4066Sahrens MUTEX_HELD(&spa_namespace_lock)); 747fa9e4066Sahrens (void) refcount_remove(&spa->spa_refcount, tag); 748fa9e4066Sahrens } 749fa9e4066Sahrens 750fa9e4066Sahrens /* 751fa9e4066Sahrens * Check to see if the spa refcount is zero. Must be called with 752088f3894Sahrens * spa_namespace_lock held. We really compare against spa_minref, which is the 753fa9e4066Sahrens * number of references acquired when opening a pool 754fa9e4066Sahrens */ 755fa9e4066Sahrens boolean_t 756fa9e4066Sahrens spa_refcount_zero(spa_t *spa) 757fa9e4066Sahrens { 758fa9e4066Sahrens ASSERT(MUTEX_HELD(&spa_namespace_lock)); 759fa9e4066Sahrens 760088f3894Sahrens return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 761fa9e4066Sahrens } 762fa9e4066Sahrens 76399653d4eSeschrock /* 76499653d4eSeschrock * ========================================================================== 765fa94a07fSbrendan * SPA spare and l2cache tracking 76699653d4eSeschrock * ========================================================================== 76799653d4eSeschrock */ 76899653d4eSeschrock 769fa94a07fSbrendan /* 770fa94a07fSbrendan * Hot spares and cache devices are tracked using the same code below, 771fa94a07fSbrendan * for 'auxiliary' devices. 772fa94a07fSbrendan */ 773fa94a07fSbrendan 774fa94a07fSbrendan typedef struct spa_aux { 775fa94a07fSbrendan uint64_t aux_guid; 776fa94a07fSbrendan uint64_t aux_pool; 777fa94a07fSbrendan avl_node_t aux_avl; 778fa94a07fSbrendan int aux_count; 779fa94a07fSbrendan } spa_aux_t; 780fa94a07fSbrendan 781fa94a07fSbrendan static int 782fa94a07fSbrendan spa_aux_compare(const void *a, const void *b) 783fa94a07fSbrendan { 784fa94a07fSbrendan const spa_aux_t *sa = a; 785fa94a07fSbrendan const spa_aux_t *sb = b; 786fa94a07fSbrendan 787fa94a07fSbrendan if (sa->aux_guid < sb->aux_guid) 788fa94a07fSbrendan return (-1); 789fa94a07fSbrendan else if (sa->aux_guid > sb->aux_guid) 790fa94a07fSbrendan return (1); 791fa94a07fSbrendan else 792fa94a07fSbrendan return (0); 793fa94a07fSbrendan } 794fa94a07fSbrendan 795fa94a07fSbrendan void 796fa94a07fSbrendan spa_aux_add(vdev_t *vd, avl_tree_t *avl) 797fa94a07fSbrendan { 798fa94a07fSbrendan avl_index_t where; 799fa94a07fSbrendan spa_aux_t search; 800fa94a07fSbrendan spa_aux_t *aux; 801fa94a07fSbrendan 802fa94a07fSbrendan search.aux_guid = vd->vdev_guid; 803fa94a07fSbrendan if ((aux = avl_find(avl, &search, &where)) != NULL) { 804fa94a07fSbrendan aux->aux_count++; 805fa94a07fSbrendan } else { 806fa94a07fSbrendan aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 807fa94a07fSbrendan aux->aux_guid = vd->vdev_guid; 808fa94a07fSbrendan aux->aux_count = 1; 809fa94a07fSbrendan avl_insert(avl, aux, where); 810fa94a07fSbrendan } 811fa94a07fSbrendan } 812fa94a07fSbrendan 813fa94a07fSbrendan void 814fa94a07fSbrendan spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 815fa94a07fSbrendan { 816fa94a07fSbrendan spa_aux_t search; 817fa94a07fSbrendan spa_aux_t *aux; 818fa94a07fSbrendan avl_index_t where; 819fa94a07fSbrendan 820fa94a07fSbrendan search.aux_guid = vd->vdev_guid; 821fa94a07fSbrendan aux = avl_find(avl, &search, &where); 822fa94a07fSbrendan 823fa94a07fSbrendan ASSERT(aux != NULL); 824fa94a07fSbrendan 825fa94a07fSbrendan if (--aux->aux_count == 0) { 826fa94a07fSbrendan avl_remove(avl, aux); 827fa94a07fSbrendan kmem_free(aux, sizeof (spa_aux_t)); 828fa94a07fSbrendan } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 829fa94a07fSbrendan aux->aux_pool = 0ULL; 830fa94a07fSbrendan } 831fa94a07fSbrendan } 832fa94a07fSbrendan 833fa94a07fSbrendan boolean_t 83489a89ebfSlling spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 835fa94a07fSbrendan { 836fa94a07fSbrendan spa_aux_t search, *found; 837fa94a07fSbrendan 838fa94a07fSbrendan search.aux_guid = guid; 83989a89ebfSlling found = avl_find(avl, &search, NULL); 840fa94a07fSbrendan 841fa94a07fSbrendan if (pool) { 842fa94a07fSbrendan if (found) 843fa94a07fSbrendan *pool = found->aux_pool; 844fa94a07fSbrendan else 845fa94a07fSbrendan *pool = 0ULL; 846fa94a07fSbrendan } 847fa94a07fSbrendan 84889a89ebfSlling if (refcnt) { 84989a89ebfSlling if (found) 85089a89ebfSlling *refcnt = found->aux_count; 85189a89ebfSlling else 85289a89ebfSlling *refcnt = 0; 85389a89ebfSlling } 85489a89ebfSlling 855fa94a07fSbrendan return (found != NULL); 856fa94a07fSbrendan } 857fa94a07fSbrendan 858fa94a07fSbrendan void 859fa94a07fSbrendan spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 860fa94a07fSbrendan { 861fa94a07fSbrendan spa_aux_t search, *found; 862fa94a07fSbrendan avl_index_t where; 863fa94a07fSbrendan 864fa94a07fSbrendan search.aux_guid = vd->vdev_guid; 865fa94a07fSbrendan found = avl_find(avl, &search, &where); 866fa94a07fSbrendan ASSERT(found != NULL); 867fa94a07fSbrendan ASSERT(found->aux_pool == 0ULL); 868fa94a07fSbrendan 869fa94a07fSbrendan found->aux_pool = spa_guid(vd->vdev_spa); 870fa94a07fSbrendan } 871fa94a07fSbrendan 87299653d4eSeschrock /* 87339c23413Seschrock * Spares are tracked globally due to the following constraints: 87439c23413Seschrock * 87539c23413Seschrock * - A spare may be part of multiple pools. 87639c23413Seschrock * - A spare may be added to a pool even if it's actively in use within 87739c23413Seschrock * another pool. 87839c23413Seschrock * - A spare in use in any pool can only be the source of a replacement if 87939c23413Seschrock * the target is a spare in the same pool. 88039c23413Seschrock * 88139c23413Seschrock * We keep track of all spares on the system through the use of a reference 88239c23413Seschrock * counted AVL tree. When a vdev is added as a spare, or used as a replacement 88339c23413Seschrock * spare, then we bump the reference count in the AVL tree. In addition, we set 88439c23413Seschrock * the 'vdev_isspare' member to indicate that the device is a spare (active or 88539c23413Seschrock * inactive). When a spare is made active (used to replace a device in the 88639c23413Seschrock * pool), we also keep track of which pool its been made a part of. 88739c23413Seschrock * 88839c23413Seschrock * The 'spa_spare_lock' protects the AVL tree. These functions are normally 88939c23413Seschrock * called under the spa_namespace lock as part of vdev reconfiguration. The 89039c23413Seschrock * separate spare lock exists for the status query path, which does not need to 89139c23413Seschrock * be completely consistent with respect to other vdev configuration changes. 89299653d4eSeschrock */ 89339c23413Seschrock 89499653d4eSeschrock static int 89599653d4eSeschrock spa_spare_compare(const void *a, const void *b) 89699653d4eSeschrock { 897fa94a07fSbrendan return (spa_aux_compare(a, b)); 89899653d4eSeschrock } 89999653d4eSeschrock 90099653d4eSeschrock void 90139c23413Seschrock spa_spare_add(vdev_t *vd) 90299653d4eSeschrock { 90399653d4eSeschrock mutex_enter(&spa_spare_lock); 90439c23413Seschrock ASSERT(!vd->vdev_isspare); 905fa94a07fSbrendan spa_aux_add(vd, &spa_spare_avl); 90639c23413Seschrock vd->vdev_isspare = B_TRUE; 90799653d4eSeschrock mutex_exit(&spa_spare_lock); 90899653d4eSeschrock } 90999653d4eSeschrock 91099653d4eSeschrock void 91139c23413Seschrock spa_spare_remove(vdev_t *vd) 91299653d4eSeschrock { 91399653d4eSeschrock mutex_enter(&spa_spare_lock); 91439c23413Seschrock ASSERT(vd->vdev_isspare); 915fa94a07fSbrendan spa_aux_remove(vd, &spa_spare_avl); 91639c23413Seschrock vd->vdev_isspare = B_FALSE; 91799653d4eSeschrock mutex_exit(&spa_spare_lock); 91899653d4eSeschrock } 91999653d4eSeschrock 92099653d4eSeschrock boolean_t 92189a89ebfSlling spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 92299653d4eSeschrock { 923fa94a07fSbrendan boolean_t found; 92499653d4eSeschrock 92599653d4eSeschrock mutex_enter(&spa_spare_lock); 92689a89ebfSlling found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 92799653d4eSeschrock mutex_exit(&spa_spare_lock); 92899653d4eSeschrock 929fa94a07fSbrendan return (found); 93039c23413Seschrock } 93139c23413Seschrock 93239c23413Seschrock void 93339c23413Seschrock spa_spare_activate(vdev_t *vd) 93439c23413Seschrock { 93539c23413Seschrock mutex_enter(&spa_spare_lock); 93639c23413Seschrock ASSERT(vd->vdev_isspare); 937fa94a07fSbrendan spa_aux_activate(vd, &spa_spare_avl); 938fa94a07fSbrendan mutex_exit(&spa_spare_lock); 939fa94a07fSbrendan } 94039c23413Seschrock 941fa94a07fSbrendan /* 942fa94a07fSbrendan * Level 2 ARC devices are tracked globally for the same reasons as spares. 943fa94a07fSbrendan * Cache devices currently only support one pool per cache device, and so 944fa94a07fSbrendan * for these devices the aux reference count is currently unused beyond 1. 945fa94a07fSbrendan */ 94639c23413Seschrock 947fa94a07fSbrendan static int 948fa94a07fSbrendan spa_l2cache_compare(const void *a, const void *b) 949fa94a07fSbrendan { 950fa94a07fSbrendan return (spa_aux_compare(a, b)); 951fa94a07fSbrendan } 952fa94a07fSbrendan 953fa94a07fSbrendan void 954fa94a07fSbrendan spa_l2cache_add(vdev_t *vd) 955fa94a07fSbrendan { 956fa94a07fSbrendan mutex_enter(&spa_l2cache_lock); 957fa94a07fSbrendan ASSERT(!vd->vdev_isl2cache); 958fa94a07fSbrendan spa_aux_add(vd, &spa_l2cache_avl); 959fa94a07fSbrendan vd->vdev_isl2cache = B_TRUE; 960fa94a07fSbrendan mutex_exit(&spa_l2cache_lock); 961fa94a07fSbrendan } 962fa94a07fSbrendan 963fa94a07fSbrendan void 964fa94a07fSbrendan spa_l2cache_remove(vdev_t *vd) 965fa94a07fSbrendan { 966fa94a07fSbrendan mutex_enter(&spa_l2cache_lock); 967fa94a07fSbrendan ASSERT(vd->vdev_isl2cache); 968fa94a07fSbrendan spa_aux_remove(vd, &spa_l2cache_avl); 969fa94a07fSbrendan vd->vdev_isl2cache = B_FALSE; 970fa94a07fSbrendan mutex_exit(&spa_l2cache_lock); 971fa94a07fSbrendan } 972fa94a07fSbrendan 973fa94a07fSbrendan boolean_t 974fa94a07fSbrendan spa_l2cache_exists(uint64_t guid, uint64_t *pool) 975fa94a07fSbrendan { 976fa94a07fSbrendan boolean_t found; 977fa94a07fSbrendan 978fa94a07fSbrendan mutex_enter(&spa_l2cache_lock); 97989a89ebfSlling found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 980fa94a07fSbrendan mutex_exit(&spa_l2cache_lock); 981fa94a07fSbrendan 982fa94a07fSbrendan return (found); 983fa94a07fSbrendan } 984fa94a07fSbrendan 985fa94a07fSbrendan void 986fa94a07fSbrendan spa_l2cache_activate(vdev_t *vd) 987fa94a07fSbrendan { 988fa94a07fSbrendan mutex_enter(&spa_l2cache_lock); 989fa94a07fSbrendan ASSERT(vd->vdev_isl2cache); 990fa94a07fSbrendan spa_aux_activate(vd, &spa_l2cache_avl); 991fa94a07fSbrendan mutex_exit(&spa_l2cache_lock); 992fa94a07fSbrendan } 993fa94a07fSbrendan 994fa9e4066Sahrens /* 995fa9e4066Sahrens * ========================================================================== 996fa9e4066Sahrens * SPA vdev locking 997fa9e4066Sahrens * ========================================================================== 998fa9e4066Sahrens */ 999fa9e4066Sahrens 1000fa9e4066Sahrens /* 1001ea8dc4b6Seschrock * Lock the given spa_t for the purpose of adding or removing a vdev. 1002ea8dc4b6Seschrock * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1003fa9e4066Sahrens * It returns the next transaction group for the spa_t. 1004fa9e4066Sahrens */ 1005fa9e4066Sahrens uint64_t 1006fa9e4066Sahrens spa_vdev_enter(spa_t *spa) 1007fa9e4066Sahrens { 1008a1521560SJeff Bonwick mutex_enter(&spa->spa_vdev_top_lock); 1009bbfd46c4SJeff Bonwick mutex_enter(&spa_namespace_lock); 101088ecc943SGeorge Wilson return (spa_vdev_config_enter(spa)); 101188ecc943SGeorge Wilson } 101288ecc943SGeorge Wilson 101388ecc943SGeorge Wilson /* 101488ecc943SGeorge Wilson * Internal implementation for spa_vdev_enter(). Used when a vdev 101588ecc943SGeorge Wilson * operation requires multiple syncs (i.e. removing a device) while 101688ecc943SGeorge Wilson * keeping the spa_namespace_lock held. 101788ecc943SGeorge Wilson */ 101888ecc943SGeorge Wilson uint64_t 101988ecc943SGeorge Wilson spa_vdev_config_enter(spa_t *spa) 102088ecc943SGeorge Wilson { 102188ecc943SGeorge Wilson ASSERT(MUTEX_HELD(&spa_namespace_lock)); 10223d7072f8Seschrock 1023e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1024fa9e4066Sahrens 1025fa9e4066Sahrens return (spa_last_synced_txg(spa) + 1); 1026fa9e4066Sahrens } 1027fa9e4066Sahrens 1028fa9e4066Sahrens /* 102988ecc943SGeorge Wilson * Used in combination with spa_vdev_config_enter() to allow the syncing 103088ecc943SGeorge Wilson * of multiple transactions without releasing the spa_namespace_lock. 1031fa9e4066Sahrens */ 103288ecc943SGeorge Wilson void 103388ecc943SGeorge Wilson spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1034fa9e4066Sahrens { 103588ecc943SGeorge Wilson ASSERT(MUTEX_HELD(&spa_namespace_lock)); 103688ecc943SGeorge Wilson 10370e34b6a7Sbonwick int config_changed = B_FALSE; 1038ea8dc4b6Seschrock 10390373e76bSbonwick ASSERT(txg > spa_last_synced_txg(spa)); 10400e34b6a7Sbonwick 1041e14bb325SJeff Bonwick spa->spa_pending_vdev = NULL; 1042e14bb325SJeff Bonwick 10430e34b6a7Sbonwick /* 10440e34b6a7Sbonwick * Reassess the DTLs. 10450e34b6a7Sbonwick */ 10460373e76bSbonwick vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 10470e34b6a7Sbonwick 1048e14bb325SJeff Bonwick if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 10490e34b6a7Sbonwick config_changed = B_TRUE; 10508f18d1faSGeorge Wilson spa->spa_config_generation++; 10510e34b6a7Sbonwick } 1052ea8dc4b6Seschrock 105388ecc943SGeorge Wilson /* 105488ecc943SGeorge Wilson * Verify the metaslab classes. 105588ecc943SGeorge Wilson */ 1056b24ab676SJeff Bonwick ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1057b24ab676SJeff Bonwick ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 105888ecc943SGeorge Wilson 1059e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALL, spa); 1060fa9e4066Sahrens 106188ecc943SGeorge Wilson /* 106288ecc943SGeorge Wilson * Panic the system if the specified tag requires it. This 106388ecc943SGeorge Wilson * is useful for ensuring that configurations are updated 106488ecc943SGeorge Wilson * transactionally. 106588ecc943SGeorge Wilson */ 106688ecc943SGeorge Wilson if (zio_injection_enabled) 10671195e687SMark J Musante zio_handle_panic_injection(spa, tag, 0); 106888ecc943SGeorge Wilson 1069fa9e4066Sahrens /* 1070fa9e4066Sahrens * Note: this txg_wait_synced() is important because it ensures 1071fa9e4066Sahrens * that there won't be more than one config change per txg. 1072fa9e4066Sahrens * This allows us to use the txg as the generation number. 1073fa9e4066Sahrens */ 1074fa9e4066Sahrens if (error == 0) 1075fa9e4066Sahrens txg_wait_synced(spa->spa_dsl_pool, txg); 1076fa9e4066Sahrens 1077fa9e4066Sahrens if (vd != NULL) { 10780713e232SGeorge Wilson ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 10798ad4d6ddSJeff Bonwick spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1080fa9e4066Sahrens vdev_free(vd); 10818ad4d6ddSJeff Bonwick spa_config_exit(spa, SCL_ALL, spa); 1082fa9e4066Sahrens } 1083fa9e4066Sahrens 1084fa9e4066Sahrens /* 10850e34b6a7Sbonwick * If the config changed, update the config cache. 1086fa9e4066Sahrens */ 10870e34b6a7Sbonwick if (config_changed) 1088c5904d13Seschrock spa_config_sync(spa, B_FALSE, B_TRUE); 108988ecc943SGeorge Wilson } 1090ea8dc4b6Seschrock 109188ecc943SGeorge Wilson /* 109288ecc943SGeorge Wilson * Unlock the spa_t after adding or removing a vdev. Besides undoing the 109388ecc943SGeorge Wilson * locking of spa_vdev_enter(), we also want make sure the transactions have 109488ecc943SGeorge Wilson * synced to disk, and then update the global configuration cache with the new 109588ecc943SGeorge Wilson * information. 109688ecc943SGeorge Wilson */ 109788ecc943SGeorge Wilson int 109888ecc943SGeorge Wilson spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 109988ecc943SGeorge Wilson { 110088ecc943SGeorge Wilson spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1101ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 1102bbfd46c4SJeff Bonwick mutex_exit(&spa->spa_vdev_top_lock); 1103fa9e4066Sahrens 1104fa9e4066Sahrens return (error); 1105fa9e4066Sahrens } 1106fa9e4066Sahrens 1107e14bb325SJeff Bonwick /* 1108e14bb325SJeff Bonwick * Lock the given spa_t for the purpose of changing vdev state. 1109e14bb325SJeff Bonwick */ 1110e14bb325SJeff Bonwick void 11118f18d1faSGeorge Wilson spa_vdev_state_enter(spa_t *spa, int oplocks) 1112e14bb325SJeff Bonwick { 11138f18d1faSGeorge Wilson int locks = SCL_STATE_ALL | oplocks; 11148f18d1faSGeorge Wilson 1115dcba9f3fSGeorge Wilson /* 1116dcba9f3fSGeorge Wilson * Root pools may need to read of the underlying devfs filesystem 1117dcba9f3fSGeorge Wilson * when opening up a vdev. Unfortunately if we're holding the 1118dcba9f3fSGeorge Wilson * SCL_ZIO lock it will result in a deadlock when we try to issue 1119dcba9f3fSGeorge Wilson * the read from the root filesystem. Instead we "prefetch" 1120dcba9f3fSGeorge Wilson * the associated vnodes that we need prior to opening the 1121dcba9f3fSGeorge Wilson * underlying devices and cache them so that we can prevent 1122dcba9f3fSGeorge Wilson * any I/O when we are doing the actual open. 1123dcba9f3fSGeorge Wilson */ 1124dcba9f3fSGeorge Wilson if (spa_is_root(spa)) { 11259842588bSGeorge Wilson int low = locks & ~(SCL_ZIO - 1); 11269842588bSGeorge Wilson int high = locks & ~low; 11279842588bSGeorge Wilson 11289842588bSGeorge Wilson spa_config_enter(spa, high, spa, RW_WRITER); 1129dcba9f3fSGeorge Wilson vdev_hold(spa->spa_root_vdev); 11309842588bSGeorge Wilson spa_config_enter(spa, low, spa, RW_WRITER); 1131dcba9f3fSGeorge Wilson } else { 1132dcba9f3fSGeorge Wilson spa_config_enter(spa, locks, spa, RW_WRITER); 1133dcba9f3fSGeorge Wilson } 11348f18d1faSGeorge Wilson spa->spa_vdev_locks = locks; 1135e14bb325SJeff Bonwick } 1136e14bb325SJeff Bonwick 1137e14bb325SJeff Bonwick int 1138e14bb325SJeff Bonwick spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1139e14bb325SJeff Bonwick { 1140c6065d0fSGeorge Wilson boolean_t config_changed = B_FALSE; 1141c6065d0fSGeorge Wilson 1142b24ab676SJeff Bonwick if (vd != NULL || error == 0) 1143b24ab676SJeff Bonwick vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1144b24ab676SJeff Bonwick 0, 0, B_FALSE); 1145b24ab676SJeff Bonwick 11468f18d1faSGeorge Wilson if (vd != NULL) { 1147e14bb325SJeff Bonwick vdev_state_dirty(vd->vdev_top); 1148c6065d0fSGeorge Wilson config_changed = B_TRUE; 11498f18d1faSGeorge Wilson spa->spa_config_generation++; 11508f18d1faSGeorge Wilson } 1151e14bb325SJeff Bonwick 1152dcba9f3fSGeorge Wilson if (spa_is_root(spa)) 1153dcba9f3fSGeorge Wilson vdev_rele(spa->spa_root_vdev); 1154dcba9f3fSGeorge Wilson 11558f18d1faSGeorge Wilson ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 11568f18d1faSGeorge Wilson spa_config_exit(spa, spa->spa_vdev_locks, spa); 1157e14bb325SJeff Bonwick 11588ad4d6ddSJeff Bonwick /* 11598ad4d6ddSJeff Bonwick * If anything changed, wait for it to sync. This ensures that, 11608ad4d6ddSJeff Bonwick * from the system administrator's perspective, zpool(1M) commands 11618ad4d6ddSJeff Bonwick * are synchronous. This is important for things like zpool offline: 11628ad4d6ddSJeff Bonwick * when the command completes, you expect no further I/O from ZFS. 11638ad4d6ddSJeff Bonwick */ 11648ad4d6ddSJeff Bonwick if (vd != NULL) 11658ad4d6ddSJeff Bonwick txg_wait_synced(spa->spa_dsl_pool, 0); 11668ad4d6ddSJeff Bonwick 1167c6065d0fSGeorge Wilson /* 1168c6065d0fSGeorge Wilson * If the config changed, update the config cache. 1169c6065d0fSGeorge Wilson */ 1170c6065d0fSGeorge Wilson if (config_changed) { 1171c6065d0fSGeorge Wilson mutex_enter(&spa_namespace_lock); 1172c6065d0fSGeorge Wilson spa_config_sync(spa, B_FALSE, B_TRUE); 1173c6065d0fSGeorge Wilson mutex_exit(&spa_namespace_lock); 1174c6065d0fSGeorge Wilson } 1175c6065d0fSGeorge Wilson 1176e14bb325SJeff Bonwick return (error); 1177e14bb325SJeff Bonwick } 1178e14bb325SJeff Bonwick 1179fa9e4066Sahrens /* 1180fa9e4066Sahrens * ========================================================================== 1181fa9e4066Sahrens * Miscellaneous functions 1182fa9e4066Sahrens * ========================================================================== 1183fa9e4066Sahrens */ 1184fa9e4066Sahrens 1185ad135b5dSChristopher Siden void 118643466aaeSMax Grossman spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1187ad135b5dSChristopher Siden { 11882acef22dSMatthew Ahrens if (!nvlist_exists(spa->spa_label_features, feature)) { 11892acef22dSMatthew Ahrens fnvlist_add_boolean(spa->spa_label_features, feature); 119043466aaeSMax Grossman /* 119143466aaeSMax Grossman * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 119243466aaeSMax Grossman * dirty the vdev config because lock SCL_CONFIG is not held. 119343466aaeSMax Grossman * Thankfully, in this case we don't need to dirty the config 119443466aaeSMax Grossman * because it will be written out anyway when we finish 119543466aaeSMax Grossman * creating the pool. 119643466aaeSMax Grossman */ 119743466aaeSMax Grossman if (tx->tx_txg != TXG_INITIAL) 119843466aaeSMax Grossman vdev_config_dirty(spa->spa_root_vdev); 11992acef22dSMatthew Ahrens } 1200ad135b5dSChristopher Siden } 1201ad135b5dSChristopher Siden 1202ad135b5dSChristopher Siden void 1203ad135b5dSChristopher Siden spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1204ad135b5dSChristopher Siden { 12052acef22dSMatthew Ahrens if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 12062acef22dSMatthew Ahrens vdev_config_dirty(spa->spa_root_vdev); 1207ad135b5dSChristopher Siden } 1208ad135b5dSChristopher Siden 1209fa9e4066Sahrens /* 1210fa9e4066Sahrens * Rename a spa_t. 1211fa9e4066Sahrens */ 1212fa9e4066Sahrens int 1213fa9e4066Sahrens spa_rename(const char *name, const char *newname) 1214fa9e4066Sahrens { 1215fa9e4066Sahrens spa_t *spa; 1216fa9e4066Sahrens int err; 1217fa9e4066Sahrens 1218fa9e4066Sahrens /* 1219fa9e4066Sahrens * Lookup the spa_t and grab the config lock for writing. We need to 1220fa9e4066Sahrens * actually open the pool so that we can sync out the necessary labels. 1221fa9e4066Sahrens * It's OK to call spa_open() with the namespace lock held because we 1222ea8dc4b6Seschrock * allow recursive calls for other reasons. 1223fa9e4066Sahrens */ 1224fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 1225fa9e4066Sahrens if ((err = spa_open(name, &spa, FTAG)) != 0) { 1226fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1227fa9e4066Sahrens return (err); 1228fa9e4066Sahrens } 1229fa9e4066Sahrens 1230e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1231fa9e4066Sahrens 1232fa9e4066Sahrens avl_remove(&spa_namespace_avl, spa); 1233e14bb325SJeff Bonwick (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1234fa9e4066Sahrens avl_add(&spa_namespace_avl, spa); 1235fa9e4066Sahrens 1236fa9e4066Sahrens /* 1237fa9e4066Sahrens * Sync all labels to disk with the new names by marking the root vdev 1238fa9e4066Sahrens * dirty and waiting for it to sync. It will pick up the new pool name 1239fa9e4066Sahrens * during the sync. 1240fa9e4066Sahrens */ 1241fa9e4066Sahrens vdev_config_dirty(spa->spa_root_vdev); 1242fa9e4066Sahrens 1243e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 1244fa9e4066Sahrens 12450373e76bSbonwick txg_wait_synced(spa->spa_dsl_pool, 0); 1246fa9e4066Sahrens 1247fa9e4066Sahrens /* 1248fa9e4066Sahrens * Sync the updated config cache. 1249fa9e4066Sahrens */ 1250c5904d13Seschrock spa_config_sync(spa, B_FALSE, B_TRUE); 1251fa9e4066Sahrens 1252fa9e4066Sahrens spa_close(spa, FTAG); 1253fa9e4066Sahrens 1254fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1255fa9e4066Sahrens 1256fa9e4066Sahrens return (0); 1257fa9e4066Sahrens } 1258fa9e4066Sahrens 1259fa9e4066Sahrens /* 1260f9af39baSGeorge Wilson * Return the spa_t associated with given pool_guid, if it exists. If 1261f9af39baSGeorge Wilson * device_guid is non-zero, determine whether the pool exists *and* contains 1262f9af39baSGeorge Wilson * a device with the specified device_guid. 1263fa9e4066Sahrens */ 1264f9af39baSGeorge Wilson spa_t * 1265f9af39baSGeorge Wilson spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1266fa9e4066Sahrens { 1267fa9e4066Sahrens spa_t *spa; 1268fa9e4066Sahrens avl_tree_t *t = &spa_namespace_avl; 1269fa9e4066Sahrens 1270ea8dc4b6Seschrock ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1271fa9e4066Sahrens 1272fa9e4066Sahrens for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1273fa9e4066Sahrens if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1274fa9e4066Sahrens continue; 1275fa9e4066Sahrens if (spa->spa_root_vdev == NULL) 1276fa9e4066Sahrens continue; 127739c23413Seschrock if (spa_guid(spa) == pool_guid) { 127839c23413Seschrock if (device_guid == 0) 127939c23413Seschrock break; 128039c23413Seschrock 128139c23413Seschrock if (vdev_lookup_by_guid(spa->spa_root_vdev, 128239c23413Seschrock device_guid) != NULL) 128339c23413Seschrock break; 128439c23413Seschrock 128539c23413Seschrock /* 12868654d025Sperrin * Check any devices we may be in the process of adding. 128739c23413Seschrock */ 128839c23413Seschrock if (spa->spa_pending_vdev) { 128939c23413Seschrock if (vdev_lookup_by_guid(spa->spa_pending_vdev, 129039c23413Seschrock device_guid) != NULL) 129139c23413Seschrock break; 129239c23413Seschrock } 129339c23413Seschrock } 1294fa9e4066Sahrens } 1295fa9e4066Sahrens 1296f9af39baSGeorge Wilson return (spa); 1297f9af39baSGeorge Wilson } 1298f9af39baSGeorge Wilson 1299f9af39baSGeorge Wilson /* 1300f9af39baSGeorge Wilson * Determine whether a pool with the given pool_guid exists. 1301f9af39baSGeorge Wilson */ 1302f9af39baSGeorge Wilson boolean_t 1303f9af39baSGeorge Wilson spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1304f9af39baSGeorge Wilson { 1305f9af39baSGeorge Wilson return (spa_by_guid(pool_guid, device_guid) != NULL); 1306fa9e4066Sahrens } 1307fa9e4066Sahrens 1308fa9e4066Sahrens char * 1309fa9e4066Sahrens spa_strdup(const char *s) 1310fa9e4066Sahrens { 1311fa9e4066Sahrens size_t len; 1312fa9e4066Sahrens char *new; 1313fa9e4066Sahrens 1314fa9e4066Sahrens len = strlen(s); 1315fa9e4066Sahrens new = kmem_alloc(len + 1, KM_SLEEP); 1316fa9e4066Sahrens bcopy(s, new, len); 1317fa9e4066Sahrens new[len] = '\0'; 1318fa9e4066Sahrens 1319fa9e4066Sahrens return (new); 1320fa9e4066Sahrens } 1321fa9e4066Sahrens 1322fa9e4066Sahrens void 1323fa9e4066Sahrens spa_strfree(char *s) 1324fa9e4066Sahrens { 1325fa9e4066Sahrens kmem_free(s, strlen(s) + 1); 1326fa9e4066Sahrens } 1327fa9e4066Sahrens 1328fa9e4066Sahrens uint64_t 1329fa9e4066Sahrens spa_get_random(uint64_t range) 1330fa9e4066Sahrens { 1331fa9e4066Sahrens uint64_t r; 1332fa9e4066Sahrens 1333fa9e4066Sahrens ASSERT(range != 0); 1334fa9e4066Sahrens 1335fa9e4066Sahrens (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1336fa9e4066Sahrens 1337fa9e4066Sahrens return (r % range); 1338fa9e4066Sahrens } 1339fa9e4066Sahrens 13401195e687SMark J Musante uint64_t 13411195e687SMark J Musante spa_generate_guid(spa_t *spa) 13421195e687SMark J Musante { 13431195e687SMark J Musante uint64_t guid = spa_get_random(-1ULL); 13441195e687SMark J Musante 13451195e687SMark J Musante if (spa != NULL) { 13461195e687SMark J Musante while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 13471195e687SMark J Musante guid = spa_get_random(-1ULL); 13481195e687SMark J Musante } else { 13491195e687SMark J Musante while (guid == 0 || spa_guid_exists(guid, 0)) 13501195e687SMark J Musante guid = spa_get_random(-1ULL); 13511195e687SMark J Musante } 13521195e687SMark J Musante 13531195e687SMark J Musante return (guid); 13541195e687SMark J Musante } 13551195e687SMark J Musante 1356fa9e4066Sahrens void 135743466aaeSMax Grossman snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1358fa9e4066Sahrens { 1359ad135b5dSChristopher Siden char type[256]; 1360f0ba89beSJeff Bonwick char *checksum = NULL; 1361f0ba89beSJeff Bonwick char *compress = NULL; 1362f0ba89beSJeff Bonwick 1363f0ba89beSJeff Bonwick if (bp != NULL) { 1364ad135b5dSChristopher Siden if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1365ad135b5dSChristopher Siden dmu_object_byteswap_t bswap = 1366ad135b5dSChristopher Siden DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1367ad135b5dSChristopher Siden (void) snprintf(type, sizeof (type), "bswap %s %s", 1368ad135b5dSChristopher Siden DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1369ad135b5dSChristopher Siden "metadata" : "data", 1370ad135b5dSChristopher Siden dmu_ot_byteswap[bswap].ob_name); 1371ad135b5dSChristopher Siden } else { 1372ad135b5dSChristopher Siden (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1373ad135b5dSChristopher Siden sizeof (type)); 1374ad135b5dSChristopher Siden } 13755d7b4d43SMatthew Ahrens if (!BP_IS_EMBEDDED(bp)) { 13765d7b4d43SMatthew Ahrens checksum = 13775d7b4d43SMatthew Ahrens zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 13785d7b4d43SMatthew Ahrens } 1379f0ba89beSJeff Bonwick compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1380f0ba89beSJeff Bonwick } 1381fa9e4066Sahrens 138243466aaeSMax Grossman SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 138343466aaeSMax Grossman compress); 1384fa9e4066Sahrens } 1385fa9e4066Sahrens 1386fa9e4066Sahrens void 1387fa9e4066Sahrens spa_freeze(spa_t *spa) 1388fa9e4066Sahrens { 1389fa9e4066Sahrens uint64_t freeze_txg = 0; 1390fa9e4066Sahrens 1391e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1392fa9e4066Sahrens if (spa->spa_freeze_txg == UINT64_MAX) { 1393fa9e4066Sahrens freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1394fa9e4066Sahrens spa->spa_freeze_txg = freeze_txg; 1395fa9e4066Sahrens } 1396e14bb325SJeff Bonwick spa_config_exit(spa, SCL_ALL, FTAG); 1397fa9e4066Sahrens if (freeze_txg != 0) 1398fa9e4066Sahrens txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1399fa9e4066Sahrens } 1400fa9e4066Sahrens 14010125049cSahrens void 14020125049cSahrens zfs_panic_recover(const char *fmt, ...) 14030125049cSahrens { 14040125049cSahrens va_list adx; 14050125049cSahrens 14060125049cSahrens va_start(adx, fmt); 14070125049cSahrens vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 14080125049cSahrens va_end(adx); 14090125049cSahrens } 14100125049cSahrens 14113f9d6ad7SLin Ling /* 14123f9d6ad7SLin Ling * This is a stripped-down version of strtoull, suitable only for converting 1413f7170741SWill Andrews * lowercase hexadecimal numbers that don't overflow. 14143f9d6ad7SLin Ling */ 14153f9d6ad7SLin Ling uint64_t 14163f9d6ad7SLin Ling strtonum(const char *str, char **nptr) 14173f9d6ad7SLin Ling { 14183f9d6ad7SLin Ling uint64_t val = 0; 14193f9d6ad7SLin Ling char c; 14203f9d6ad7SLin Ling int digit; 14213f9d6ad7SLin Ling 14223f9d6ad7SLin Ling while ((c = *str) != '\0') { 14233f9d6ad7SLin Ling if (c >= '0' && c <= '9') 14243f9d6ad7SLin Ling digit = c - '0'; 14253f9d6ad7SLin Ling else if (c >= 'a' && c <= 'f') 14263f9d6ad7SLin Ling digit = 10 + c - 'a'; 14273f9d6ad7SLin Ling else 14283f9d6ad7SLin Ling break; 14293f9d6ad7SLin Ling 14303f9d6ad7SLin Ling val *= 16; 14313f9d6ad7SLin Ling val += digit; 14323f9d6ad7SLin Ling 14333f9d6ad7SLin Ling str++; 14343f9d6ad7SLin Ling } 14353f9d6ad7SLin Ling 14363f9d6ad7SLin Ling if (nptr) 14373f9d6ad7SLin Ling *nptr = (char *)str; 14383f9d6ad7SLin Ling 14393f9d6ad7SLin Ling return (val); 14403f9d6ad7SLin Ling } 14413f9d6ad7SLin Ling 1442fa9e4066Sahrens /* 1443fa9e4066Sahrens * ========================================================================== 1444fa9e4066Sahrens * Accessor functions 1445fa9e4066Sahrens * ========================================================================== 1446fa9e4066Sahrens */ 1447fa9e4066Sahrens 1448088f3894Sahrens boolean_t 144988b7b0f2SMatthew Ahrens spa_shutting_down(spa_t *spa) 1450fa9e4066Sahrens { 145188b7b0f2SMatthew Ahrens return (spa->spa_async_suspended); 1452fa9e4066Sahrens } 1453fa9e4066Sahrens 1454fa9e4066Sahrens dsl_pool_t * 1455fa9e4066Sahrens spa_get_dsl(spa_t *spa) 1456fa9e4066Sahrens { 1457fa9e4066Sahrens return (spa->spa_dsl_pool); 1458fa9e4066Sahrens } 1459fa9e4066Sahrens 1460ad135b5dSChristopher Siden boolean_t 1461ad135b5dSChristopher Siden spa_is_initializing(spa_t *spa) 1462ad135b5dSChristopher Siden { 1463ad135b5dSChristopher Siden return (spa->spa_is_initializing); 1464ad135b5dSChristopher Siden } 1465ad135b5dSChristopher Siden 1466fa9e4066Sahrens blkptr_t * 1467fa9e4066Sahrens spa_get_rootblkptr(spa_t *spa) 1468fa9e4066Sahrens { 1469fa9e4066Sahrens return (&spa->spa_ubsync.ub_rootbp); 1470fa9e4066Sahrens } 1471fa9e4066Sahrens 1472fa9e4066Sahrens void 1473fa9e4066Sahrens spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1474fa9e4066Sahrens { 1475fa9e4066Sahrens spa->spa_uberblock.ub_rootbp = *bp; 1476fa9e4066Sahrens } 1477fa9e4066Sahrens 1478fa9e4066Sahrens void 1479fa9e4066Sahrens spa_altroot(spa_t *spa, char *buf, size_t buflen) 1480fa9e4066Sahrens { 1481fa9e4066Sahrens if (spa->spa_root == NULL) 1482fa9e4066Sahrens buf[0] = '\0'; 1483fa9e4066Sahrens else 1484fa9e4066Sahrens (void) strncpy(buf, spa->spa_root, buflen); 1485fa9e4066Sahrens } 1486fa9e4066Sahrens 1487fa9e4066Sahrens int 1488fa9e4066Sahrens spa_sync_pass(spa_t *spa) 1489fa9e4066Sahrens { 1490fa9e4066Sahrens return (spa->spa_sync_pass); 1491fa9e4066Sahrens } 1492fa9e4066Sahrens 1493fa9e4066Sahrens char * 1494fa9e4066Sahrens spa_name(spa_t *spa) 1495fa9e4066Sahrens { 1496fa9e4066Sahrens return (spa->spa_name); 1497fa9e4066Sahrens } 1498fa9e4066Sahrens 1499fa9e4066Sahrens uint64_t 1500fa9e4066Sahrens spa_guid(spa_t *spa) 1501fa9e4066Sahrens { 1502dfbb9432SGeorge Wilson dsl_pool_t *dp = spa_get_dsl(spa); 1503dfbb9432SGeorge Wilson uint64_t guid; 1504dfbb9432SGeorge Wilson 1505b5989ec7Seschrock /* 1506b5989ec7Seschrock * If we fail to parse the config during spa_load(), we can go through 1507b5989ec7Seschrock * the error path (which posts an ereport) and end up here with no root 1508e9103aaeSGarrett D'Amore * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1509b5989ec7Seschrock * this case. 1510b5989ec7Seschrock */ 1511dfbb9432SGeorge Wilson if (spa->spa_root_vdev == NULL) 1512dfbb9432SGeorge Wilson return (spa->spa_config_guid); 1513dfbb9432SGeorge Wilson 1514dfbb9432SGeorge Wilson guid = spa->spa_last_synced_guid != 0 ? 1515dfbb9432SGeorge Wilson spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1516dfbb9432SGeorge Wilson 1517dfbb9432SGeorge Wilson /* 1518dfbb9432SGeorge Wilson * Return the most recently synced out guid unless we're 1519dfbb9432SGeorge Wilson * in syncing context. 1520dfbb9432SGeorge Wilson */ 1521dfbb9432SGeorge Wilson if (dp && dsl_pool_sync_context(dp)) 1522b5989ec7Seschrock return (spa->spa_root_vdev->vdev_guid); 1523b5989ec7Seschrock else 1524dfbb9432SGeorge Wilson return (guid); 1525e9103aaeSGarrett D'Amore } 1526e9103aaeSGarrett D'Amore 1527e9103aaeSGarrett D'Amore uint64_t 1528e9103aaeSGarrett D'Amore spa_load_guid(spa_t *spa) 1529e9103aaeSGarrett D'Amore { 1530e9103aaeSGarrett D'Amore /* 1531e9103aaeSGarrett D'Amore * This is a GUID that exists solely as a reference for the 1532e9103aaeSGarrett D'Amore * purposes of the arc. It is generated at load time, and 1533e9103aaeSGarrett D'Amore * is never written to persistent storage. 1534e9103aaeSGarrett D'Amore */ 1535e9103aaeSGarrett D'Amore return (spa->spa_load_guid); 1536fa9e4066Sahrens } 1537fa9e4066Sahrens 1538fa9e4066Sahrens uint64_t 1539fa9e4066Sahrens spa_last_synced_txg(spa_t *spa) 1540fa9e4066Sahrens { 1541fa9e4066Sahrens return (spa->spa_ubsync.ub_txg); 1542fa9e4066Sahrens } 1543fa9e4066Sahrens 1544fa9e4066Sahrens uint64_t 1545fa9e4066Sahrens spa_first_txg(spa_t *spa) 1546fa9e4066Sahrens { 1547fa9e4066Sahrens return (spa->spa_first_txg); 1548fa9e4066Sahrens } 1549fa9e4066Sahrens 1550b24ab676SJeff Bonwick uint64_t 1551b24ab676SJeff Bonwick spa_syncing_txg(spa_t *spa) 1552b24ab676SJeff Bonwick { 1553b24ab676SJeff Bonwick return (spa->spa_syncing_txg); 1554b24ab676SJeff Bonwick } 1555b24ab676SJeff Bonwick 155688b7b0f2SMatthew Ahrens pool_state_t 1557fa9e4066Sahrens spa_state(spa_t *spa) 1558fa9e4066Sahrens { 1559fa9e4066Sahrens return (spa->spa_state); 1560fa9e4066Sahrens } 1561fa9e4066Sahrens 1562b16da2e2SGeorge Wilson spa_load_state_t 1563b16da2e2SGeorge Wilson spa_load_state(spa_t *spa) 1564b16da2e2SGeorge Wilson { 1565b16da2e2SGeorge Wilson return (spa->spa_load_state); 1566b16da2e2SGeorge Wilson } 1567b16da2e2SGeorge Wilson 1568fa9e4066Sahrens uint64_t 1569fa9e4066Sahrens spa_freeze_txg(spa_t *spa) 1570fa9e4066Sahrens { 1571fa9e4066Sahrens return (spa->spa_freeze_txg); 1572fa9e4066Sahrens } 1573fa9e4066Sahrens 1574fa9e4066Sahrens /* ARGSUSED */ 1575fa9e4066Sahrens uint64_t 1576fa9e4066Sahrens spa_get_asize(spa_t *spa, uint64_t lsize) 1577fa9e4066Sahrens { 157869962b56SMatthew Ahrens return (lsize * spa_asize_inflation); 157944cd46caSbillm } 158044cd46caSbillm 1581485bbbf5SGeorge Wilson uint64_t 1582485bbbf5SGeorge Wilson spa_get_dspace(spa_t *spa) 1583485bbbf5SGeorge Wilson { 1584485bbbf5SGeorge Wilson return (spa->spa_dspace); 1585485bbbf5SGeorge Wilson } 1586485bbbf5SGeorge Wilson 1587485bbbf5SGeorge Wilson void 1588485bbbf5SGeorge Wilson spa_update_dspace(spa_t *spa) 1589485bbbf5SGeorge Wilson { 1590485bbbf5SGeorge Wilson spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1591485bbbf5SGeorge Wilson ddt_get_dedup_dspace(spa); 1592485bbbf5SGeorge Wilson } 1593485bbbf5SGeorge Wilson 15940a4e9518Sgw /* 15950a4e9518Sgw * Return the failure mode that has been set to this pool. The default 15960a4e9518Sgw * behavior will be to block all I/Os when a complete failure occurs. 15970a4e9518Sgw */ 15980a4e9518Sgw uint8_t 15990a4e9518Sgw spa_get_failmode(spa_t *spa) 16000a4e9518Sgw { 16010a4e9518Sgw return (spa->spa_failmode); 16020a4e9518Sgw } 16030a4e9518Sgw 1604e14bb325SJeff Bonwick boolean_t 1605e14bb325SJeff Bonwick spa_suspended(spa_t *spa) 1606e14bb325SJeff Bonwick { 1607e14bb325SJeff Bonwick return (spa->spa_suspended); 1608e14bb325SJeff Bonwick } 1609e14bb325SJeff Bonwick 161044cd46caSbillm uint64_t 161144cd46caSbillm spa_version(spa_t *spa) 161244cd46caSbillm { 161344cd46caSbillm return (spa->spa_ubsync.ub_version); 161444cd46caSbillm } 161544cd46caSbillm 1616b24ab676SJeff Bonwick boolean_t 1617b24ab676SJeff Bonwick spa_deflate(spa_t *spa) 1618b24ab676SJeff Bonwick { 1619b24ab676SJeff Bonwick return (spa->spa_deflate); 1620b24ab676SJeff Bonwick } 1621b24ab676SJeff Bonwick 1622b24ab676SJeff Bonwick metaslab_class_t * 1623b24ab676SJeff Bonwick spa_normal_class(spa_t *spa) 1624b24ab676SJeff Bonwick { 1625b24ab676SJeff Bonwick return (spa->spa_normal_class); 1626b24ab676SJeff Bonwick } 1627b24ab676SJeff Bonwick 1628b24ab676SJeff Bonwick metaslab_class_t * 1629b24ab676SJeff Bonwick spa_log_class(spa_t *spa) 1630b24ab676SJeff Bonwick { 1631b24ab676SJeff Bonwick return (spa->spa_log_class); 1632b24ab676SJeff Bonwick } 1633b24ab676SJeff Bonwick 163444cd46caSbillm int 163544cd46caSbillm spa_max_replication(spa_t *spa) 163644cd46caSbillm { 163744cd46caSbillm /* 1638e7437265Sahrens * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 163944cd46caSbillm * handle BPs with more than one DVA allocated. Set our max 164044cd46caSbillm * replication level accordingly. 1641fa9e4066Sahrens */ 1642e7437265Sahrens if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 164344cd46caSbillm return (1); 164444cd46caSbillm return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1645fa9e4066Sahrens } 1646fa9e4066Sahrens 16473f9d6ad7SLin Ling int 16483f9d6ad7SLin Ling spa_prev_software_version(spa_t *spa) 16493f9d6ad7SLin Ling { 16503f9d6ad7SLin Ling return (spa->spa_prev_software_version); 16513f9d6ad7SLin Ling } 16523f9d6ad7SLin Ling 1653283b8460SGeorge.Wilson uint64_t 1654283b8460SGeorge.Wilson spa_deadman_synctime(spa_t *spa) 1655283b8460SGeorge.Wilson { 1656283b8460SGeorge.Wilson return (spa->spa_deadman_synctime); 1657283b8460SGeorge.Wilson } 1658283b8460SGeorge.Wilson 165999653d4eSeschrock uint64_t 1660b24ab676SJeff Bonwick dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 166199653d4eSeschrock { 1662b24ab676SJeff Bonwick uint64_t asize = DVA_GET_ASIZE(dva); 1663b24ab676SJeff Bonwick uint64_t dsize = asize; 166499653d4eSeschrock 1665b24ab676SJeff Bonwick ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 166699653d4eSeschrock 1667b24ab676SJeff Bonwick if (asize != 0 && spa->spa_deflate) { 1668b24ab676SJeff Bonwick vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 1669b24ab676SJeff Bonwick dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 167099653d4eSeschrock } 1671b24ab676SJeff Bonwick 1672b24ab676SJeff Bonwick return (dsize); 1673b24ab676SJeff Bonwick } 1674b24ab676SJeff Bonwick 1675b24ab676SJeff Bonwick uint64_t 1676b24ab676SJeff Bonwick bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1677b24ab676SJeff Bonwick { 1678b24ab676SJeff Bonwick uint64_t dsize = 0; 1679b24ab676SJeff Bonwick 16805d7b4d43SMatthew Ahrens for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1681b24ab676SJeff Bonwick dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1682b24ab676SJeff Bonwick 1683b24ab676SJeff Bonwick return (dsize); 1684b24ab676SJeff Bonwick } 1685b24ab676SJeff Bonwick 1686b24ab676SJeff Bonwick uint64_t 1687b24ab676SJeff Bonwick bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1688b24ab676SJeff Bonwick { 1689b24ab676SJeff Bonwick uint64_t dsize = 0; 1690b24ab676SJeff Bonwick 1691b24ab676SJeff Bonwick spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1692b24ab676SJeff Bonwick 16935d7b4d43SMatthew Ahrens for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1694b24ab676SJeff Bonwick dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1695b24ab676SJeff Bonwick 1696e14bb325SJeff Bonwick spa_config_exit(spa, SCL_VDEV, FTAG); 1697b24ab676SJeff Bonwick 1698b24ab676SJeff Bonwick return (dsize); 169999653d4eSeschrock } 170099653d4eSeschrock 1701fa9e4066Sahrens /* 1702fa9e4066Sahrens * ========================================================================== 1703fa9e4066Sahrens * Initialization and Termination 1704fa9e4066Sahrens * ========================================================================== 1705fa9e4066Sahrens */ 1706fa9e4066Sahrens 1707fa9e4066Sahrens static int 1708fa9e4066Sahrens spa_name_compare(const void *a1, const void *a2) 1709fa9e4066Sahrens { 1710fa9e4066Sahrens const spa_t *s1 = a1; 1711fa9e4066Sahrens const spa_t *s2 = a2; 1712fa9e4066Sahrens int s; 1713fa9e4066Sahrens 1714fa9e4066Sahrens s = strcmp(s1->spa_name, s2->spa_name); 1715fa9e4066Sahrens if (s > 0) 1716fa9e4066Sahrens return (1); 1717fa9e4066Sahrens if (s < 0) 1718fa9e4066Sahrens return (-1); 1719fa9e4066Sahrens return (0); 1720fa9e4066Sahrens } 1721fa9e4066Sahrens 17220373e76bSbonwick int 17230373e76bSbonwick spa_busy(void) 17240373e76bSbonwick { 17250373e76bSbonwick return (spa_active_count); 17260373e76bSbonwick } 17270373e76bSbonwick 1728e7cbe64fSgw void 1729e7cbe64fSgw spa_boot_init() 1730e7cbe64fSgw { 1731e7cbe64fSgw spa_config_load(); 1732e7cbe64fSgw } 1733e7cbe64fSgw 1734fa9e4066Sahrens void 1735fa9e4066Sahrens spa_init(int mode) 1736fa9e4066Sahrens { 1737fa9e4066Sahrens mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1738c25056deSgw mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1739fa94a07fSbrendan mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1740fa9e4066Sahrens cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1741fa9e4066Sahrens 1742fa9e4066Sahrens avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1743fa9e4066Sahrens offsetof(spa_t, spa_avl)); 1744fa9e4066Sahrens 1745fa94a07fSbrendan avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1746fa94a07fSbrendan offsetof(spa_aux_t, aux_avl)); 1747fa94a07fSbrendan 1748fa94a07fSbrendan avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1749fa94a07fSbrendan offsetof(spa_aux_t, aux_avl)); 175099653d4eSeschrock 17518ad4d6ddSJeff Bonwick spa_mode_global = mode; 1752fa9e4066Sahrens 1753283b8460SGeorge.Wilson #ifdef _KERNEL 1754283b8460SGeorge.Wilson spa_arch_init(); 1755283b8460SGeorge.Wilson #else 1756cd1c8b85SMatthew Ahrens if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1757cd1c8b85SMatthew Ahrens arc_procfd = open("/proc/self/ctl", O_WRONLY); 1758cd1c8b85SMatthew Ahrens if (arc_procfd == -1) { 1759cd1c8b85SMatthew Ahrens perror("could not enable watchpoints: " 1760cd1c8b85SMatthew Ahrens "opening /proc/self/ctl failed: "); 1761cd1c8b85SMatthew Ahrens } else { 1762cd1c8b85SMatthew Ahrens arc_watch = B_TRUE; 1763cd1c8b85SMatthew Ahrens } 1764cd1c8b85SMatthew Ahrens } 1765cd1c8b85SMatthew Ahrens #endif 1766cd1c8b85SMatthew Ahrens 1767fa9e4066Sahrens refcount_init(); 1768fa9e4066Sahrens unique_init(); 17690713e232SGeorge Wilson range_tree_init(); 1770fa9e4066Sahrens zio_init(); 1771fa9e4066Sahrens dmu_init(); 1772fa9e4066Sahrens zil_init(); 177387db74c1Sek vdev_cache_stat_init(); 177491ebeef5Sahrens zfs_prop_init(); 1775990b4856Slling zpool_prop_init(); 1776ad135b5dSChristopher Siden zpool_feature_init(); 1777fa9e4066Sahrens spa_config_load(); 1778e14bb325SJeff Bonwick l2arc_start(); 1779fa9e4066Sahrens } 1780fa9e4066Sahrens 1781fa9e4066Sahrens void 1782fa9e4066Sahrens spa_fini(void) 1783fa9e4066Sahrens { 1784e14bb325SJeff Bonwick l2arc_stop(); 1785e14bb325SJeff Bonwick 1786fa9e4066Sahrens spa_evict_all(); 1787fa9e4066Sahrens 178887db74c1Sek vdev_cache_stat_fini(); 1789fa9e4066Sahrens zil_fini(); 1790fa9e4066Sahrens dmu_fini(); 1791fa9e4066Sahrens zio_fini(); 17920713e232SGeorge Wilson range_tree_fini(); 179391ebeef5Sahrens unique_fini(); 1794fa9e4066Sahrens refcount_fini(); 1795fa9e4066Sahrens 1796fa9e4066Sahrens avl_destroy(&spa_namespace_avl); 179799653d4eSeschrock avl_destroy(&spa_spare_avl); 1798fa94a07fSbrendan avl_destroy(&spa_l2cache_avl); 1799fa9e4066Sahrens 1800fa9e4066Sahrens cv_destroy(&spa_namespace_cv); 1801fa9e4066Sahrens mutex_destroy(&spa_namespace_lock); 1802c25056deSgw mutex_destroy(&spa_spare_lock); 1803fa94a07fSbrendan mutex_destroy(&spa_l2cache_lock); 1804fa9e4066Sahrens } 18056ce0521aSperrin 18066ce0521aSperrin /* 18076ce0521aSperrin * Return whether this pool has slogs. No locking needed. 18086ce0521aSperrin * It's not a problem if the wrong answer is returned as it's only for 18096ce0521aSperrin * performance and not correctness 18106ce0521aSperrin */ 18116ce0521aSperrin boolean_t 18126ce0521aSperrin spa_has_slogs(spa_t *spa) 18136ce0521aSperrin { 18146ce0521aSperrin return (spa->spa_log_class->mc_rotor != NULL); 18156ce0521aSperrin } 1816bf82a41bSeschrock 1817b24ab676SJeff Bonwick spa_log_state_t 1818b24ab676SJeff Bonwick spa_get_log_state(spa_t *spa) 1819b24ab676SJeff Bonwick { 1820b24ab676SJeff Bonwick return (spa->spa_log_state); 1821b24ab676SJeff Bonwick } 1822b24ab676SJeff Bonwick 1823b24ab676SJeff Bonwick void 1824b24ab676SJeff Bonwick spa_set_log_state(spa_t *spa, spa_log_state_t state) 1825b24ab676SJeff Bonwick { 1826b24ab676SJeff Bonwick spa->spa_log_state = state; 1827b24ab676SJeff Bonwick } 1828b24ab676SJeff Bonwick 1829bf82a41bSeschrock boolean_t 1830bf82a41bSeschrock spa_is_root(spa_t *spa) 1831bf82a41bSeschrock { 1832bf82a41bSeschrock return (spa->spa_is_root); 1833bf82a41bSeschrock } 18348ad4d6ddSJeff Bonwick 18358ad4d6ddSJeff Bonwick boolean_t 18368ad4d6ddSJeff Bonwick spa_writeable(spa_t *spa) 18378ad4d6ddSJeff Bonwick { 18388ad4d6ddSJeff Bonwick return (!!(spa->spa_mode & FWRITE)); 18398ad4d6ddSJeff Bonwick } 18408ad4d6ddSJeff Bonwick 18418ad4d6ddSJeff Bonwick int 18428ad4d6ddSJeff Bonwick spa_mode(spa_t *spa) 18438ad4d6ddSJeff Bonwick { 18448ad4d6ddSJeff Bonwick return (spa->spa_mode); 18458ad4d6ddSJeff Bonwick } 1846b24ab676SJeff Bonwick 1847b24ab676SJeff Bonwick uint64_t 1848b24ab676SJeff Bonwick spa_bootfs(spa_t *spa) 1849b24ab676SJeff Bonwick { 1850b24ab676SJeff Bonwick return (spa->spa_bootfs); 1851b24ab676SJeff Bonwick } 1852b24ab676SJeff Bonwick 1853b24ab676SJeff Bonwick uint64_t 1854b24ab676SJeff Bonwick spa_delegation(spa_t *spa) 1855b24ab676SJeff Bonwick { 1856b24ab676SJeff Bonwick return (spa->spa_delegation); 1857b24ab676SJeff Bonwick } 1858b24ab676SJeff Bonwick 1859b24ab676SJeff Bonwick objset_t * 1860b24ab676SJeff Bonwick spa_meta_objset(spa_t *spa) 1861b24ab676SJeff Bonwick { 1862b24ab676SJeff Bonwick return (spa->spa_meta_objset); 1863b24ab676SJeff Bonwick } 1864b24ab676SJeff Bonwick 1865b24ab676SJeff Bonwick enum zio_checksum 1866b24ab676SJeff Bonwick spa_dedup_checksum(spa_t *spa) 1867b24ab676SJeff Bonwick { 1868b24ab676SJeff Bonwick return (spa->spa_dedup_checksum); 1869b24ab676SJeff Bonwick } 18703f9d6ad7SLin Ling 18713f9d6ad7SLin Ling /* 18723f9d6ad7SLin Ling * Reset pool scan stat per scan pass (or reboot). 18733f9d6ad7SLin Ling */ 18743f9d6ad7SLin Ling void 18753f9d6ad7SLin Ling spa_scan_stat_init(spa_t *spa) 18763f9d6ad7SLin Ling { 18773f9d6ad7SLin Ling /* data not stored on disk */ 18783f9d6ad7SLin Ling spa->spa_scan_pass_start = gethrestime_sec(); 18793f9d6ad7SLin Ling spa->spa_scan_pass_exam = 0; 18803f9d6ad7SLin Ling vdev_scan_stat_init(spa->spa_root_vdev); 18813f9d6ad7SLin Ling } 18823f9d6ad7SLin Ling 18833f9d6ad7SLin Ling /* 18843f9d6ad7SLin Ling * Get scan stats for zpool status reports 18853f9d6ad7SLin Ling */ 18863f9d6ad7SLin Ling int 18873f9d6ad7SLin Ling spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 18883f9d6ad7SLin Ling { 18893f9d6ad7SLin Ling dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 18903f9d6ad7SLin Ling 18913f9d6ad7SLin Ling if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 1892be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 18933f9d6ad7SLin Ling bzero(ps, sizeof (pool_scan_stat_t)); 18943f9d6ad7SLin Ling 18953f9d6ad7SLin Ling /* data stored on disk */ 18963f9d6ad7SLin Ling ps->pss_func = scn->scn_phys.scn_func; 18973f9d6ad7SLin Ling ps->pss_start_time = scn->scn_phys.scn_start_time; 18983f9d6ad7SLin Ling ps->pss_end_time = scn->scn_phys.scn_end_time; 18993f9d6ad7SLin Ling ps->pss_to_examine = scn->scn_phys.scn_to_examine; 19003f9d6ad7SLin Ling ps->pss_examined = scn->scn_phys.scn_examined; 19013f9d6ad7SLin Ling ps->pss_to_process = scn->scn_phys.scn_to_process; 19023f9d6ad7SLin Ling ps->pss_processed = scn->scn_phys.scn_processed; 19033f9d6ad7SLin Ling ps->pss_errors = scn->scn_phys.scn_errors; 19043f9d6ad7SLin Ling ps->pss_state = scn->scn_phys.scn_state; 19053f9d6ad7SLin Ling 19063f9d6ad7SLin Ling /* data not stored on disk */ 19073f9d6ad7SLin Ling ps->pss_pass_start = spa->spa_scan_pass_start; 19083f9d6ad7SLin Ling ps->pss_pass_exam = spa->spa_scan_pass_exam; 19093f9d6ad7SLin Ling 19103f9d6ad7SLin Ling return (0); 19113f9d6ad7SLin Ling } 191209c9d376SGeorge Wilson 191309c9d376SGeorge Wilson boolean_t 191409c9d376SGeorge Wilson spa_debug_enabled(spa_t *spa) 191509c9d376SGeorge Wilson { 191609c9d376SGeorge Wilson return (spa->spa_debug); 191709c9d376SGeorge Wilson } 1918