xref: /illumos-gate/usr/src/uts/common/fs/zfs/spa_misc.c (revision 73527f44)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
21fa9e4066Sahrens /*
229842588bSGeorge Wilson  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
232a104a52SAlex Reece  * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24e9103aaeSGarrett D'Amore  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
25fa9e4066Sahrens  */
26fa9e4066Sahrens 
27fa9e4066Sahrens #include <sys/zfs_context.h>
28fa9e4066Sahrens #include <sys/spa_impl.h>
29283b8460SGeorge.Wilson #include <sys/spa_boot.h>
30fa9e4066Sahrens #include <sys/zio.h>
31fa9e4066Sahrens #include <sys/zio_checksum.h>
32fa9e4066Sahrens #include <sys/zio_compress.h>
33fa9e4066Sahrens #include <sys/dmu.h>
34fa9e4066Sahrens #include <sys/dmu_tx.h>
35fa9e4066Sahrens #include <sys/zap.h>
36fa9e4066Sahrens #include <sys/zil.h>
37fa9e4066Sahrens #include <sys/vdev_impl.h>
38fa9e4066Sahrens #include <sys/metaslab.h>
39fa9e4066Sahrens #include <sys/uberblock_impl.h>
40fa9e4066Sahrens #include <sys/txg.h>
41fa9e4066Sahrens #include <sys/avl.h>
42fa9e4066Sahrens #include <sys/unique.h>
43fa9e4066Sahrens #include <sys/dsl_pool.h>
44fa9e4066Sahrens #include <sys/dsl_dir.h>
45fa9e4066Sahrens #include <sys/dsl_prop.h>
463f9d6ad7SLin Ling #include <sys/dsl_scan.h>
47fa9e4066Sahrens #include <sys/fs/zfs.h>
486ce0521aSperrin #include <sys/metaslab_impl.h>
49e14bb325SJeff Bonwick #include <sys/arc.h>
50485bbbf5SGeorge Wilson #include <sys/ddt.h>
5191ebeef5Sahrens #include "zfs_prop.h"
52ad135b5dSChristopher Siden #include "zfeature_common.h"
53fa9e4066Sahrens 
54fa9e4066Sahrens /*
55fa9e4066Sahrens  * SPA locking
56fa9e4066Sahrens  *
57fa9e4066Sahrens  * There are four basic locks for managing spa_t structures:
58fa9e4066Sahrens  *
59fa9e4066Sahrens  * spa_namespace_lock (global mutex)
60fa9e4066Sahrens  *
6144cd46caSbillm  *	This lock must be acquired to do any of the following:
62fa9e4066Sahrens  *
6344cd46caSbillm  *		- Lookup a spa_t by name
6444cd46caSbillm  *		- Add or remove a spa_t from the namespace
6544cd46caSbillm  *		- Increase spa_refcount from non-zero
6644cd46caSbillm  *		- Check if spa_refcount is zero
6744cd46caSbillm  *		- Rename a spa_t
68ea8dc4b6Seschrock  *		- add/remove/attach/detach devices
6944cd46caSbillm  *		- Held for the duration of create/destroy/import/export
70fa9e4066Sahrens  *
7144cd46caSbillm  *	It does not need to handle recursion.  A create or destroy may
7244cd46caSbillm  *	reference objects (files or zvols) in other pools, but by
7344cd46caSbillm  *	definition they must have an existing reference, and will never need
7444cd46caSbillm  *	to lookup a spa_t by name.
75fa9e4066Sahrens  *
76fa9e4066Sahrens  * spa_refcount (per-spa refcount_t protected by mutex)
77fa9e4066Sahrens  *
7844cd46caSbillm  *	This reference count keep track of any active users of the spa_t.  The
7944cd46caSbillm  *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
8044cd46caSbillm  *	the refcount is never really 'zero' - opening a pool implicitly keeps
81088f3894Sahrens  *	some references in the DMU.  Internally we check against spa_minref, but
8244cd46caSbillm  *	present the image of a zero/non-zero value to consumers.
83fa9e4066Sahrens  *
84e14bb325SJeff Bonwick  * spa_config_lock[] (per-spa array of rwlocks)
85fa9e4066Sahrens  *
8691ebeef5Sahrens  *	This protects the spa_t from config changes, and must be held in
8791ebeef5Sahrens  *	the following circumstances:
88fa9e4066Sahrens  *
8944cd46caSbillm  *		- RW_READER to perform I/O to the spa
9044cd46caSbillm  *		- RW_WRITER to change the vdev config
91fa9e4066Sahrens  *
92fa9e4066Sahrens  * The locking order is fairly straightforward:
93fa9e4066Sahrens  *
9444cd46caSbillm  *		spa_namespace_lock	->	spa_refcount
95fa9e4066Sahrens  *
9644cd46caSbillm  *	The namespace lock must be acquired to increase the refcount from 0
9744cd46caSbillm  *	or to check if it is zero.
98fa9e4066Sahrens  *
99e14bb325SJeff Bonwick  *		spa_refcount		->	spa_config_lock[]
100fa9e4066Sahrens  *
10144cd46caSbillm  *	There must be at least one valid reference on the spa_t to acquire
10244cd46caSbillm  *	the config lock.
103fa9e4066Sahrens  *
104e14bb325SJeff Bonwick  *		spa_namespace_lock	->	spa_config_lock[]
105fa9e4066Sahrens  *
10644cd46caSbillm  *	The namespace lock must always be taken before the config lock.
107fa9e4066Sahrens  *
108fa9e4066Sahrens  *
109e14bb325SJeff Bonwick  * The spa_namespace_lock can be acquired directly and is globally visible.
110fa9e4066Sahrens  *
111e14bb325SJeff Bonwick  * The namespace is manipulated using the following functions, all of which
112e14bb325SJeff Bonwick  * require the spa_namespace_lock to be held.
113fa9e4066Sahrens  *
11444cd46caSbillm  *	spa_lookup()		Lookup a spa_t by name.
115fa9e4066Sahrens  *
11644cd46caSbillm  *	spa_add()		Create a new spa_t in the namespace.
117fa9e4066Sahrens  *
11844cd46caSbillm  *	spa_remove()		Remove a spa_t from the namespace.  This also
11944cd46caSbillm  *				frees up any memory associated with the spa_t.
120fa9e4066Sahrens  *
12144cd46caSbillm  *	spa_next()		Returns the next spa_t in the system, or the
12244cd46caSbillm  *				first if NULL is passed.
123fa9e4066Sahrens  *
12444cd46caSbillm  *	spa_evict_all()		Shutdown and remove all spa_t structures in
12544cd46caSbillm  *				the system.
126fa9e4066Sahrens  *
127ea8dc4b6Seschrock  *	spa_guid_exists()	Determine whether a pool/device guid exists.
128fa9e4066Sahrens  *
129fa9e4066Sahrens  * The spa_refcount is manipulated using the following functions:
130fa9e4066Sahrens  *
13144cd46caSbillm  *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
13244cd46caSbillm  *				called with spa_namespace_lock held if the
13344cd46caSbillm  *				refcount is currently zero.
134fa9e4066Sahrens  *
13544cd46caSbillm  *	spa_close()		Remove a reference from the spa_t.  This will
13644cd46caSbillm  *				not free the spa_t or remove it from the
13744cd46caSbillm  *				namespace.  No locking is required.
138fa9e4066Sahrens  *
13944cd46caSbillm  *	spa_refcount_zero()	Returns true if the refcount is currently
14044cd46caSbillm  *				zero.  Must be called with spa_namespace_lock
14144cd46caSbillm  *				held.
142fa9e4066Sahrens  *
143e14bb325SJeff Bonwick  * The spa_config_lock[] is an array of rwlocks, ordered as follows:
144e14bb325SJeff Bonwick  * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
145e14bb325SJeff Bonwick  * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
146e14bb325SJeff Bonwick  *
147e14bb325SJeff Bonwick  * To read the configuration, it suffices to hold one of these locks as reader.
148e14bb325SJeff Bonwick  * To modify the configuration, you must hold all locks as writer.  To modify
149e14bb325SJeff Bonwick  * vdev state without altering the vdev tree's topology (e.g. online/offline),
150e14bb325SJeff Bonwick  * you must hold SCL_STATE and SCL_ZIO as writer.
151e14bb325SJeff Bonwick  *
152e14bb325SJeff Bonwick  * We use these distinct config locks to avoid recursive lock entry.
153e14bb325SJeff Bonwick  * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
154e14bb325SJeff Bonwick  * block allocations (SCL_ALLOC), which may require reading space maps
155e14bb325SJeff Bonwick  * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
156e14bb325SJeff Bonwick  *
157e14bb325SJeff Bonwick  * The spa config locks cannot be normal rwlocks because we need the
158e14bb325SJeff Bonwick  * ability to hand off ownership.  For example, SCL_ZIO is acquired
159e14bb325SJeff Bonwick  * by the issuing thread and later released by an interrupt thread.
160e14bb325SJeff Bonwick  * They do, however, obey the usual write-wanted semantics to prevent
161e14bb325SJeff Bonwick  * writer (i.e. system administrator) starvation.
162e14bb325SJeff Bonwick  *
163e14bb325SJeff Bonwick  * The lock acquisition rules are as follows:
164e14bb325SJeff Bonwick  *
165e14bb325SJeff Bonwick  * SCL_CONFIG
166e14bb325SJeff Bonwick  *	Protects changes to the vdev tree topology, such as vdev
167e14bb325SJeff Bonwick  *	add/remove/attach/detach.  Protects the dirty config list
168e14bb325SJeff Bonwick  *	(spa_config_dirty_list) and the set of spares and l2arc devices.
169e14bb325SJeff Bonwick  *
170e14bb325SJeff Bonwick  * SCL_STATE
171e14bb325SJeff Bonwick  *	Protects changes to pool state and vdev state, such as vdev
172e14bb325SJeff Bonwick  *	online/offline/fault/degrade/clear.  Protects the dirty state list
173e14bb325SJeff Bonwick  *	(spa_state_dirty_list) and global pool state (spa_state).
174e14bb325SJeff Bonwick  *
175e14bb325SJeff Bonwick  * SCL_ALLOC
176e14bb325SJeff Bonwick  *	Protects changes to metaslab groups and classes.
177e14bb325SJeff Bonwick  *	Held as reader by metaslab_alloc() and metaslab_claim().
178e14bb325SJeff Bonwick  *
179e14bb325SJeff Bonwick  * SCL_ZIO
180e14bb325SJeff Bonwick  *	Held by bp-level zios (those which have no io_vd upon entry)
181e14bb325SJeff Bonwick  *	to prevent changes to the vdev tree.  The bp-level zio implicitly
182e14bb325SJeff Bonwick  *	protects all of its vdev child zios, which do not hold SCL_ZIO.
183e14bb325SJeff Bonwick  *
184e14bb325SJeff Bonwick  * SCL_FREE
185e14bb325SJeff Bonwick  *	Protects changes to metaslab groups and classes.
186e14bb325SJeff Bonwick  *	Held as reader by metaslab_free().  SCL_FREE is distinct from
187e14bb325SJeff Bonwick  *	SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
188e14bb325SJeff Bonwick  *	blocks in zio_done() while another i/o that holds either
189e14bb325SJeff Bonwick  *	SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
190e14bb325SJeff Bonwick  *
191e14bb325SJeff Bonwick  * SCL_VDEV
192e14bb325SJeff Bonwick  *	Held as reader to prevent changes to the vdev tree during trivial
193b24ab676SJeff Bonwick  *	inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
194e14bb325SJeff Bonwick  *	other locks, and lower than all of them, to ensure that it's safe
195e14bb325SJeff Bonwick  *	to acquire regardless of caller context.
196e14bb325SJeff Bonwick  *
197e14bb325SJeff Bonwick  * In addition, the following rules apply:
198e14bb325SJeff Bonwick  *
199e14bb325SJeff Bonwick  * (a)	spa_props_lock protects pool properties, spa_config and spa_config_list.
200e14bb325SJeff Bonwick  *	The lock ordering is SCL_CONFIG > spa_props_lock.
201e14bb325SJeff Bonwick  *
202e14bb325SJeff Bonwick  * (b)	I/O operations on leaf vdevs.  For any zio operation that takes
203e14bb325SJeff Bonwick  *	an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
204e14bb325SJeff Bonwick  *	or zio_write_phys() -- the caller must ensure that the config cannot
205e14bb325SJeff Bonwick  *	cannot change in the interim, and that the vdev cannot be reopened.
206e14bb325SJeff Bonwick  *	SCL_STATE as reader suffices for both.
207fa9e4066Sahrens  *
208ea8dc4b6Seschrock  * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
209fa9e4066Sahrens  *
21044cd46caSbillm  *	spa_vdev_enter()	Acquire the namespace lock and the config lock
211ea8dc4b6Seschrock  *				for writing.
212fa9e4066Sahrens  *
21344cd46caSbillm  *	spa_vdev_exit()		Release the config lock, wait for all I/O
21444cd46caSbillm  *				to complete, sync the updated configs to the
215ea8dc4b6Seschrock  *				cache, and release the namespace lock.
216fa9e4066Sahrens  *
217e14bb325SJeff Bonwick  * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
218e14bb325SJeff Bonwick  * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
219e14bb325SJeff Bonwick  * locking is, always, based on spa_namespace_lock and spa_config_lock[].
220e14bb325SJeff Bonwick  *
221ad135b5dSChristopher Siden  * spa_rename() is also implemented within this file since it requires
222e14bb325SJeff Bonwick  * manipulation of the namespace.
223fa9e4066Sahrens  */
224fa9e4066Sahrens 
225fa9e4066Sahrens static avl_tree_t spa_namespace_avl;
226fa9e4066Sahrens kmutex_t spa_namespace_lock;
227fa9e4066Sahrens static kcondvar_t spa_namespace_cv;
2280373e76bSbonwick static int spa_active_count;
229416e0cd8Sek int spa_max_replication_override = SPA_DVAS_PER_BP;
230fa9e4066Sahrens 
23199653d4eSeschrock static kmutex_t spa_spare_lock;
23239c23413Seschrock static avl_tree_t spa_spare_avl;
233fa94a07fSbrendan static kmutex_t spa_l2cache_lock;
234fa94a07fSbrendan static avl_tree_t spa_l2cache_avl;
23599653d4eSeschrock 
236fa9e4066Sahrens kmem_cache_t *spa_buffer_pool;
2378ad4d6ddSJeff Bonwick int spa_mode_global;
238fa9e4066Sahrens 
239fa9e4066Sahrens #ifdef ZFS_DEBUG
2403b2aab18SMatthew Ahrens /* Everything except dprintf and spa is on by default in debug builds */
2413b2aab18SMatthew Ahrens int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA);
242fa9e4066Sahrens #else
243fa9e4066Sahrens int zfs_flags = 0;
244fa9e4066Sahrens #endif
245fa9e4066Sahrens 
2460125049cSahrens /*
2470125049cSahrens  * zfs_recover can be set to nonzero to attempt to recover from
2480125049cSahrens  * otherwise-fatal errors, typically caused by on-disk corruption.  When
2490125049cSahrens  * set, calls to zfs_panic_recover() will turn into warning messages.
2508b36997aSMatthew Ahrens  * This should only be used as a last resort, as it typically results
2518b36997aSMatthew Ahrens  * in leaked space, or worse.
2520125049cSahrens  */
2537fd05ac4SMatthew Ahrens boolean_t zfs_recover = B_FALSE;
2547fd05ac4SMatthew Ahrens 
2557fd05ac4SMatthew Ahrens /*
2567fd05ac4SMatthew Ahrens  * If destroy encounters an EIO while reading metadata (e.g. indirect
2577fd05ac4SMatthew Ahrens  * blocks), space referenced by the missing metadata can not be freed.
2587fd05ac4SMatthew Ahrens  * Normally this causes the background destroy to become "stalled", as
2597fd05ac4SMatthew Ahrens  * it is unable to make forward progress.  While in this stalled state,
2607fd05ac4SMatthew Ahrens  * all remaining space to free from the error-encountering filesystem is
2617fd05ac4SMatthew Ahrens  * "temporarily leaked".  Set this flag to cause it to ignore the EIO,
2627fd05ac4SMatthew Ahrens  * permanently leak the space from indirect blocks that can not be read,
2637fd05ac4SMatthew Ahrens  * and continue to free everything else that it can.
2647fd05ac4SMatthew Ahrens  *
2657fd05ac4SMatthew Ahrens  * The default, "stalling" behavior is useful if the storage partially
2667fd05ac4SMatthew Ahrens  * fails (i.e. some but not all i/os fail), and then later recovers.  In
2677fd05ac4SMatthew Ahrens  * this case, we will be able to continue pool operations while it is
2687fd05ac4SMatthew Ahrens  * partially failed, and when it recovers, we can continue to free the
2697fd05ac4SMatthew Ahrens  * space, with no leaks.  However, note that this case is actually
2707fd05ac4SMatthew Ahrens  * fairly rare.
2717fd05ac4SMatthew Ahrens  *
2727fd05ac4SMatthew Ahrens  * Typically pools either (a) fail completely (but perhaps temporarily,
2737fd05ac4SMatthew Ahrens  * e.g. a top-level vdev going offline), or (b) have localized,
2747fd05ac4SMatthew Ahrens  * permanent errors (e.g. disk returns the wrong data due to bit flip or
2757fd05ac4SMatthew Ahrens  * firmware bug).  In case (a), this setting does not matter because the
2767fd05ac4SMatthew Ahrens  * pool will be suspended and the sync thread will not be able to make
2777fd05ac4SMatthew Ahrens  * forward progress regardless.  In case (b), because the error is
2787fd05ac4SMatthew Ahrens  * permanent, the best we can do is leak the minimum amount of space,
2797fd05ac4SMatthew Ahrens  * which is what setting this flag will do.  Therefore, it is reasonable
2807fd05ac4SMatthew Ahrens  * for this flag to normally be set, but we chose the more conservative
2817fd05ac4SMatthew Ahrens  * approach of not setting it, so that there is no possibility of
2827fd05ac4SMatthew Ahrens  * leaking space in the "partial temporary" failure case.
2837fd05ac4SMatthew Ahrens  */
2847fd05ac4SMatthew Ahrens boolean_t zfs_free_leak_on_eio = B_FALSE;
2850125049cSahrens 
28669962b56SMatthew Ahrens /*
28769962b56SMatthew Ahrens  * Expiration time in milliseconds. This value has two meanings. First it is
28869962b56SMatthew Ahrens  * used to determine when the spa_deadman() logic should fire. By default the
28969962b56SMatthew Ahrens  * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
29069962b56SMatthew Ahrens  * Secondly, the value determines if an I/O is considered "hung". Any I/O that
29169962b56SMatthew Ahrens  * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
29269962b56SMatthew Ahrens  * in a system panic.
29369962b56SMatthew Ahrens  */
29469962b56SMatthew Ahrens uint64_t zfs_deadman_synctime_ms = 1000000ULL;
295283b8460SGeorge.Wilson 
296283b8460SGeorge.Wilson /*
29769962b56SMatthew Ahrens  * Check time in milliseconds. This defines the frequency at which we check
29869962b56SMatthew Ahrens  * for hung I/O.
299283b8460SGeorge.Wilson  */
30069962b56SMatthew Ahrens uint64_t zfs_deadman_checktime_ms = 5000ULL;
301283b8460SGeorge.Wilson 
302283b8460SGeorge.Wilson /*
303283b8460SGeorge.Wilson  * Override the zfs deadman behavior via /etc/system. By default the
304283b8460SGeorge.Wilson  * deadman is enabled except on VMware and sparc deployments.
305283b8460SGeorge.Wilson  */
306283b8460SGeorge.Wilson int zfs_deadman_enabled = -1;
307283b8460SGeorge.Wilson 
30869962b56SMatthew Ahrens /*
30969962b56SMatthew Ahrens  * The worst case is single-sector max-parity RAID-Z blocks, in which
31069962b56SMatthew Ahrens  * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
31169962b56SMatthew Ahrens  * times the size; so just assume that.  Add to this the fact that
31269962b56SMatthew Ahrens  * we can have up to 3 DVAs per bp, and one more factor of 2 because
31369962b56SMatthew Ahrens  * the block may be dittoed with up to 3 DVAs by ddt_sync().  All together,
31469962b56SMatthew Ahrens  * the worst case is:
31569962b56SMatthew Ahrens  *     (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
31669962b56SMatthew Ahrens  */
31769962b56SMatthew Ahrens int spa_asize_inflation = 24;
318fa9e4066Sahrens 
3197d46dc6cSMatthew Ahrens /*
3207d46dc6cSMatthew Ahrens  * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
3217d46dc6cSMatthew Ahrens  * the pool to be consumed.  This ensures that we don't run the pool
3227d46dc6cSMatthew Ahrens  * completely out of space, due to unaccounted changes (e.g. to the MOS).
3237d46dc6cSMatthew Ahrens  * It also limits the worst-case time to allocate space.  If we have
3247d46dc6cSMatthew Ahrens  * less than this amount of free space, most ZPL operations (e.g. write,
3257d46dc6cSMatthew Ahrens  * create) will return ENOSPC.
3267d46dc6cSMatthew Ahrens  *
3277d46dc6cSMatthew Ahrens  * Certain operations (e.g. file removal, most administrative actions) can
3287d46dc6cSMatthew Ahrens  * use half the slop space.  They will only return ENOSPC if less than half
3297d46dc6cSMatthew Ahrens  * the slop space is free.  Typically, once the pool has less than the slop
3307d46dc6cSMatthew Ahrens  * space free, the user will use these operations to free up space in the pool.
3317d46dc6cSMatthew Ahrens  * These are the operations that call dsl_pool_adjustedsize() with the netfree
3327d46dc6cSMatthew Ahrens  * argument set to TRUE.
3337d46dc6cSMatthew Ahrens  *
3347d46dc6cSMatthew Ahrens  * A very restricted set of operations are always permitted, regardless of
3357d46dc6cSMatthew Ahrens  * the amount of free space.  These are the operations that call
3367d46dc6cSMatthew Ahrens  * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy".  If these
3377d46dc6cSMatthew Ahrens  * operations result in a net increase in the amount of space used,
3387d46dc6cSMatthew Ahrens  * it is possible to run the pool completely out of space, causing it to
3397d46dc6cSMatthew Ahrens  * be permanently read-only.
3407d46dc6cSMatthew Ahrens  *
3417d46dc6cSMatthew Ahrens  * See also the comments in zfs_space_check_t.
3427d46dc6cSMatthew Ahrens  */
3437d46dc6cSMatthew Ahrens int spa_slop_shift = 5;
3447d46dc6cSMatthew Ahrens 
345e05725b1Sbonwick /*
346e05725b1Sbonwick  * ==========================================================================
347e05725b1Sbonwick  * SPA config locking
348e05725b1Sbonwick  * ==========================================================================
349e05725b1Sbonwick  */
350e05725b1Sbonwick static void
351e14bb325SJeff Bonwick spa_config_lock_init(spa_t *spa)
352e14bb325SJeff Bonwick {
353e14bb325SJeff Bonwick 	for (int i = 0; i < SCL_LOCKS; i++) {
354e14bb325SJeff Bonwick 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
355e14bb325SJeff Bonwick 		mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
356e14bb325SJeff Bonwick 		cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
3573b2aab18SMatthew Ahrens 		refcount_create_untracked(&scl->scl_count);
358e14bb325SJeff Bonwick 		scl->scl_writer = NULL;
359e14bb325SJeff Bonwick 		scl->scl_write_wanted = 0;
360e14bb325SJeff Bonwick 	}
361e05725b1Sbonwick }
362e05725b1Sbonwick 
363e05725b1Sbonwick static void
364e14bb325SJeff Bonwick spa_config_lock_destroy(spa_t *spa)
365e14bb325SJeff Bonwick {
366e14bb325SJeff Bonwick 	for (int i = 0; i < SCL_LOCKS; i++) {
367e14bb325SJeff Bonwick 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
368e14bb325SJeff Bonwick 		mutex_destroy(&scl->scl_lock);
369e14bb325SJeff Bonwick 		cv_destroy(&scl->scl_cv);
370e14bb325SJeff Bonwick 		refcount_destroy(&scl->scl_count);
371e14bb325SJeff Bonwick 		ASSERT(scl->scl_writer == NULL);
372e14bb325SJeff Bonwick 		ASSERT(scl->scl_write_wanted == 0);
373e14bb325SJeff Bonwick 	}
374e14bb325SJeff Bonwick }
375e14bb325SJeff Bonwick 
376e14bb325SJeff Bonwick int
377e14bb325SJeff Bonwick spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
378e05725b1Sbonwick {
379e14bb325SJeff Bonwick 	for (int i = 0; i < SCL_LOCKS; i++) {
380e14bb325SJeff Bonwick 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
381e14bb325SJeff Bonwick 		if (!(locks & (1 << i)))
382e14bb325SJeff Bonwick 			continue;
383e14bb325SJeff Bonwick 		mutex_enter(&scl->scl_lock);
384e14bb325SJeff Bonwick 		if (rw == RW_READER) {
385e14bb325SJeff Bonwick 			if (scl->scl_writer || scl->scl_write_wanted) {
386e14bb325SJeff Bonwick 				mutex_exit(&scl->scl_lock);
387e14bb325SJeff Bonwick 				spa_config_exit(spa, locks ^ (1 << i), tag);
388e14bb325SJeff Bonwick 				return (0);
389e14bb325SJeff Bonwick 			}
390e14bb325SJeff Bonwick 		} else {
391e14bb325SJeff Bonwick 			ASSERT(scl->scl_writer != curthread);
392e14bb325SJeff Bonwick 			if (!refcount_is_zero(&scl->scl_count)) {
393e14bb325SJeff Bonwick 				mutex_exit(&scl->scl_lock);
394e14bb325SJeff Bonwick 				spa_config_exit(spa, locks ^ (1 << i), tag);
395e14bb325SJeff Bonwick 				return (0);
396e14bb325SJeff Bonwick 			}
397e14bb325SJeff Bonwick 			scl->scl_writer = curthread;
398e14bb325SJeff Bonwick 		}
399e14bb325SJeff Bonwick 		(void) refcount_add(&scl->scl_count, tag);
400e14bb325SJeff Bonwick 		mutex_exit(&scl->scl_lock);
401e14bb325SJeff Bonwick 	}
402e14bb325SJeff Bonwick 	return (1);
403e05725b1Sbonwick }
404e05725b1Sbonwick 
405e05725b1Sbonwick void
406e14bb325SJeff Bonwick spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
407e05725b1Sbonwick {
408f64c0e34SEric Taylor 	int wlocks_held = 0;
409f64c0e34SEric Taylor 
4103b2aab18SMatthew Ahrens 	ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
4113b2aab18SMatthew Ahrens 
412e14bb325SJeff Bonwick 	for (int i = 0; i < SCL_LOCKS; i++) {
413e14bb325SJeff Bonwick 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
414f64c0e34SEric Taylor 		if (scl->scl_writer == curthread)
415f64c0e34SEric Taylor 			wlocks_held |= (1 << i);
416e14bb325SJeff Bonwick 		if (!(locks & (1 << i)))
417e14bb325SJeff Bonwick 			continue;
418e14bb325SJeff Bonwick 		mutex_enter(&scl->scl_lock);
419e14bb325SJeff Bonwick 		if (rw == RW_READER) {
420e14bb325SJeff Bonwick 			while (scl->scl_writer || scl->scl_write_wanted) {
421e14bb325SJeff Bonwick 				cv_wait(&scl->scl_cv, &scl->scl_lock);
422e14bb325SJeff Bonwick 			}
423e14bb325SJeff Bonwick 		} else {
424e14bb325SJeff Bonwick 			ASSERT(scl->scl_writer != curthread);
425e14bb325SJeff Bonwick 			while (!refcount_is_zero(&scl->scl_count)) {
426e14bb325SJeff Bonwick 				scl->scl_write_wanted++;
427e14bb325SJeff Bonwick 				cv_wait(&scl->scl_cv, &scl->scl_lock);
428e14bb325SJeff Bonwick 				scl->scl_write_wanted--;
429e14bb325SJeff Bonwick 			}
430e14bb325SJeff Bonwick 			scl->scl_writer = curthread;
431e14bb325SJeff Bonwick 		}
432e14bb325SJeff Bonwick 		(void) refcount_add(&scl->scl_count, tag);
433e14bb325SJeff Bonwick 		mutex_exit(&scl->scl_lock);
434e05725b1Sbonwick 	}
435f64c0e34SEric Taylor 	ASSERT(wlocks_held <= locks);
436e05725b1Sbonwick }
437e05725b1Sbonwick 
438e05725b1Sbonwick void
439e14bb325SJeff Bonwick spa_config_exit(spa_t *spa, int locks, void *tag)
440e05725b1Sbonwick {
441e14bb325SJeff Bonwick 	for (int i = SCL_LOCKS - 1; i >= 0; i--) {
442e14bb325SJeff Bonwick 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
443e14bb325SJeff Bonwick 		if (!(locks & (1 << i)))
444e14bb325SJeff Bonwick 			continue;
445e14bb325SJeff Bonwick 		mutex_enter(&scl->scl_lock);
446e14bb325SJeff Bonwick 		ASSERT(!refcount_is_zero(&scl->scl_count));
447e14bb325SJeff Bonwick 		if (refcount_remove(&scl->scl_count, tag) == 0) {
448e14bb325SJeff Bonwick 			ASSERT(scl->scl_writer == NULL ||
449e14bb325SJeff Bonwick 			    scl->scl_writer == curthread);
450e14bb325SJeff Bonwick 			scl->scl_writer = NULL;	/* OK in either case */
451e14bb325SJeff Bonwick 			cv_broadcast(&scl->scl_cv);
452e14bb325SJeff Bonwick 		}
453e14bb325SJeff Bonwick 		mutex_exit(&scl->scl_lock);
454e05725b1Sbonwick 	}
455e05725b1Sbonwick }
456e05725b1Sbonwick 
457e14bb325SJeff Bonwick int
458e14bb325SJeff Bonwick spa_config_held(spa_t *spa, int locks, krw_t rw)
459e05725b1Sbonwick {
460e14bb325SJeff Bonwick 	int locks_held = 0;
461e05725b1Sbonwick 
462e14bb325SJeff Bonwick 	for (int i = 0; i < SCL_LOCKS; i++) {
463e14bb325SJeff Bonwick 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
464e14bb325SJeff Bonwick 		if (!(locks & (1 << i)))
465e14bb325SJeff Bonwick 			continue;
466e14bb325SJeff Bonwick 		if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
467e14bb325SJeff Bonwick 		    (rw == RW_WRITER && scl->scl_writer == curthread))
468e14bb325SJeff Bonwick 			locks_held |= 1 << i;
469e14bb325SJeff Bonwick 	}
470e14bb325SJeff Bonwick 
471e14bb325SJeff Bonwick 	return (locks_held);
472e05725b1Sbonwick }
473e05725b1Sbonwick 
474fa9e4066Sahrens /*
475fa9e4066Sahrens  * ==========================================================================
476fa9e4066Sahrens  * SPA namespace functions
477fa9e4066Sahrens  * ==========================================================================
478fa9e4066Sahrens  */
479fa9e4066Sahrens 
480fa9e4066Sahrens /*
481fa9e4066Sahrens  * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
482fa9e4066Sahrens  * Returns NULL if no matching spa_t is found.
483fa9e4066Sahrens  */
484fa9e4066Sahrens spa_t *
485fa9e4066Sahrens spa_lookup(const char *name)
486fa9e4066Sahrens {
487e14bb325SJeff Bonwick 	static spa_t search;	/* spa_t is large; don't allocate on stack */
488e14bb325SJeff Bonwick 	spa_t *spa;
489fa9e4066Sahrens 	avl_index_t where;
49040feaa91Sahrens 	char *cp;
491fa9e4066Sahrens 
492fa9e4066Sahrens 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
493fa9e4066Sahrens 
4943b2aab18SMatthew Ahrens 	(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
4953b2aab18SMatthew Ahrens 
49640feaa91Sahrens 	/*
49740feaa91Sahrens 	 * If it's a full dataset name, figure out the pool name and
49840feaa91Sahrens 	 * just use that.
49940feaa91Sahrens 	 */
50078f17100SMatthew Ahrens 	cp = strpbrk(search.spa_name, "/@#");
5013b2aab18SMatthew Ahrens 	if (cp != NULL)
50240feaa91Sahrens 		*cp = '\0';
50340feaa91Sahrens 
504fa9e4066Sahrens 	spa = avl_find(&spa_namespace_avl, &search, &where);
505fa9e4066Sahrens 
506fa9e4066Sahrens 	return (spa);
507fa9e4066Sahrens }
508fa9e4066Sahrens 
509283b8460SGeorge.Wilson /*
510283b8460SGeorge.Wilson  * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
511283b8460SGeorge.Wilson  * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
512283b8460SGeorge.Wilson  * looking for potentially hung I/Os.
513283b8460SGeorge.Wilson  */
514283b8460SGeorge.Wilson void
515283b8460SGeorge.Wilson spa_deadman(void *arg)
516283b8460SGeorge.Wilson {
517283b8460SGeorge.Wilson 	spa_t *spa = arg;
518283b8460SGeorge.Wilson 
5190713e232SGeorge Wilson 	/*
5200713e232SGeorge Wilson 	 * Disable the deadman timer if the pool is suspended.
5210713e232SGeorge Wilson 	 */
5220713e232SGeorge Wilson 	if (spa_suspended(spa)) {
5230713e232SGeorge Wilson 		VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
5240713e232SGeorge Wilson 		return;
5250713e232SGeorge Wilson 	}
5260713e232SGeorge Wilson 
527283b8460SGeorge.Wilson 	zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
528283b8460SGeorge.Wilson 	    (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
529283b8460SGeorge.Wilson 	    ++spa->spa_deadman_calls);
530283b8460SGeorge.Wilson 	if (zfs_deadman_enabled)
531283b8460SGeorge.Wilson 		vdev_deadman(spa->spa_root_vdev);
532283b8460SGeorge.Wilson }
533283b8460SGeorge.Wilson 
534fa9e4066Sahrens /*
535fa9e4066Sahrens  * Create an uninitialized spa_t with the given name.  Requires
536fa9e4066Sahrens  * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
537fa9e4066Sahrens  * exist by calling spa_lookup() first.
538fa9e4066Sahrens  */
539fa9e4066Sahrens spa_t *
540468c413aSTim Haley spa_add(const char *name, nvlist_t *config, const char *altroot)
541fa9e4066Sahrens {
542fa9e4066Sahrens 	spa_t *spa;
543c5904d13Seschrock 	spa_config_dirent_t *dp;
544283b8460SGeorge.Wilson 	cyc_handler_t hdlr;
545283b8460SGeorge.Wilson 	cyc_time_t when;
546fa9e4066Sahrens 
547fa9e4066Sahrens 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
548fa9e4066Sahrens 
549fa9e4066Sahrens 	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
550fa9e4066Sahrens 
551c25056deSgw 	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
552c25056deSgw 	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
55335a5a358SJonathan Adams 	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
554c25056deSgw 	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
55535a5a358SJonathan Adams 	mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
556c25056deSgw 	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
55735a5a358SJonathan Adams 	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
558a1521560SJeff Bonwick 	mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
559a1521560SJeff Bonwick 	mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
560c3a66015SMatthew Ahrens 	mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL);
561c25056deSgw 
562c25056deSgw 	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
56335a5a358SJonathan Adams 	cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
564c25056deSgw 	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
565e14bb325SJeff Bonwick 	cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
566c25056deSgw 
567b24ab676SJeff Bonwick 	for (int t = 0; t < TXG_SIZE; t++)
568cde58dbcSMatthew Ahrens 		bplist_create(&spa->spa_free_bplist[t]);
569b24ab676SJeff Bonwick 
570e14bb325SJeff Bonwick 	(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
571fa9e4066Sahrens 	spa->spa_state = POOL_STATE_UNINITIALIZED;
572fa9e4066Sahrens 	spa->spa_freeze_txg = UINT64_MAX;
5730373e76bSbonwick 	spa->spa_final_txg = UINT64_MAX;
574468c413aSTim Haley 	spa->spa_load_max_txg = UINT64_MAX;
57535a5a358SJonathan Adams 	spa->spa_proc = &p0;
57635a5a358SJonathan Adams 	spa->spa_proc_state = SPA_PROC_NONE;
577fa9e4066Sahrens 
578283b8460SGeorge.Wilson 	hdlr.cyh_func = spa_deadman;
579283b8460SGeorge.Wilson 	hdlr.cyh_arg = spa;
580283b8460SGeorge.Wilson 	hdlr.cyh_level = CY_LOW_LEVEL;
581283b8460SGeorge.Wilson 
58269962b56SMatthew Ahrens 	spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
583283b8460SGeorge.Wilson 
584283b8460SGeorge.Wilson 	/*
585283b8460SGeorge.Wilson 	 * This determines how often we need to check for hung I/Os after
586283b8460SGeorge.Wilson 	 * the cyclic has already fired. Since checking for hung I/Os is
587283b8460SGeorge.Wilson 	 * an expensive operation we don't want to check too frequently.
58869962b56SMatthew Ahrens 	 * Instead wait for 5 seconds before checking again.
589283b8460SGeorge.Wilson 	 */
59069962b56SMatthew Ahrens 	when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms);
591283b8460SGeorge.Wilson 	when.cyt_when = CY_INFINITY;
592283b8460SGeorge.Wilson 	mutex_enter(&cpu_lock);
593283b8460SGeorge.Wilson 	spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
594283b8460SGeorge.Wilson 	mutex_exit(&cpu_lock);
595283b8460SGeorge.Wilson 
596fa9e4066Sahrens 	refcount_create(&spa->spa_refcount);
597e14bb325SJeff Bonwick 	spa_config_lock_init(spa);
598fa9e4066Sahrens 
599fa9e4066Sahrens 	avl_add(&spa_namespace_avl, spa);
600fa9e4066Sahrens 
6010373e76bSbonwick 	/*
6020373e76bSbonwick 	 * Set the alternate root, if there is one.
6030373e76bSbonwick 	 */
6040373e76bSbonwick 	if (altroot) {
6050373e76bSbonwick 		spa->spa_root = spa_strdup(altroot);
6060373e76bSbonwick 		spa_active_count++;
6070373e76bSbonwick 	}
6080373e76bSbonwick 
609c5904d13Seschrock 	/*
610c5904d13Seschrock 	 * Every pool starts with the default cachefile
611c5904d13Seschrock 	 */
612c5904d13Seschrock 	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
613c5904d13Seschrock 	    offsetof(spa_config_dirent_t, scd_link));
614c5904d13Seschrock 
615c5904d13Seschrock 	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
616ef912c80STim Haley 	dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
617c5904d13Seschrock 	list_insert_head(&spa->spa_config_list, dp);
618c5904d13Seschrock 
6194b964adaSGeorge Wilson 	VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
6204b964adaSGeorge Wilson 	    KM_SLEEP) == 0);
6214b964adaSGeorge Wilson 
622ad135b5dSChristopher Siden 	if (config != NULL) {
623ad135b5dSChristopher Siden 		nvlist_t *features;
624ad135b5dSChristopher Siden 
625ad135b5dSChristopher Siden 		if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
626ad135b5dSChristopher Siden 		    &features) == 0) {
627ad135b5dSChristopher Siden 			VERIFY(nvlist_dup(features, &spa->spa_label_features,
628ad135b5dSChristopher Siden 			    0) == 0);
629ad135b5dSChristopher Siden 		}
630ad135b5dSChristopher Siden 
631468c413aSTim Haley 		VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
632ad135b5dSChristopher Siden 	}
633ad135b5dSChristopher Siden 
634ad135b5dSChristopher Siden 	if (spa->spa_label_features == NULL) {
635ad135b5dSChristopher Siden 		VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
636ad135b5dSChristopher Siden 		    KM_SLEEP) == 0);
637ad135b5dSChristopher Siden 	}
638468c413aSTim Haley 
639c3a66015SMatthew Ahrens 	spa->spa_iokstat = kstat_create("zfs", 0, name,
640c3a66015SMatthew Ahrens 	    "disk", KSTAT_TYPE_IO, 1, 0);
641c3a66015SMatthew Ahrens 	if (spa->spa_iokstat) {
642c3a66015SMatthew Ahrens 		spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock;
643c3a66015SMatthew Ahrens 		kstat_install(spa->spa_iokstat);
644c3a66015SMatthew Ahrens 	}
645c3a66015SMatthew Ahrens 
6463b2aab18SMatthew Ahrens 	spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0);
6473b2aab18SMatthew Ahrens 
64843466aaeSMax Grossman 	/*
64943466aaeSMax Grossman 	 * As a pool is being created, treat all features as disabled by
65043466aaeSMax Grossman 	 * setting SPA_FEATURE_DISABLED for all entries in the feature
65143466aaeSMax Grossman 	 * refcount cache.
65243466aaeSMax Grossman 	 */
65343466aaeSMax Grossman 	for (int i = 0; i < SPA_FEATURES; i++) {
65443466aaeSMax Grossman 		spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
65543466aaeSMax Grossman 	}
65643466aaeSMax Grossman 
657fa9e4066Sahrens 	return (spa);
658fa9e4066Sahrens }
659fa9e4066Sahrens 
660fa9e4066Sahrens /*
661fa9e4066Sahrens  * Removes a spa_t from the namespace, freeing up any memory used.  Requires
662fa9e4066Sahrens  * spa_namespace_lock.  This is called only after the spa_t has been closed and
663fa9e4066Sahrens  * deactivated.
664fa9e4066Sahrens  */
665fa9e4066Sahrens void
666fa9e4066Sahrens spa_remove(spa_t *spa)
667fa9e4066Sahrens {
668c5904d13Seschrock 	spa_config_dirent_t *dp;
669c5904d13Seschrock 
670fa9e4066Sahrens 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
671fa9e4066Sahrens 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
672fa9e4066Sahrens 
6731195e687SMark J Musante 	nvlist_free(spa->spa_config_splitting);
6741195e687SMark J Musante 
675fa9e4066Sahrens 	avl_remove(&spa_namespace_avl, spa);
676fa9e4066Sahrens 	cv_broadcast(&spa_namespace_cv);
677fa9e4066Sahrens 
6780373e76bSbonwick 	if (spa->spa_root) {
679fa9e4066Sahrens 		spa_strfree(spa->spa_root);
6800373e76bSbonwick 		spa_active_count--;
6810373e76bSbonwick 	}
682fa9e4066Sahrens 
683c5904d13Seschrock 	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
684c5904d13Seschrock 		list_remove(&spa->spa_config_list, dp);
685c5904d13Seschrock 		if (dp->scd_path != NULL)
686c5904d13Seschrock 			spa_strfree(dp->scd_path);
687c5904d13Seschrock 		kmem_free(dp, sizeof (spa_config_dirent_t));
688c5904d13Seschrock 	}
689c5904d13Seschrock 
690c5904d13Seschrock 	list_destroy(&spa->spa_config_list);
6912f8aaab3Seschrock 
692ad135b5dSChristopher Siden 	nvlist_free(spa->spa_label_features);
6934b964adaSGeorge Wilson 	nvlist_free(spa->spa_load_info);
694fa9e4066Sahrens 	spa_config_set(spa, NULL);
695fa9e4066Sahrens 
696283b8460SGeorge.Wilson 	mutex_enter(&cpu_lock);
697283b8460SGeorge.Wilson 	if (spa->spa_deadman_cycid != CYCLIC_NONE)
698283b8460SGeorge.Wilson 		cyclic_remove(spa->spa_deadman_cycid);
699283b8460SGeorge.Wilson 	mutex_exit(&cpu_lock);
700283b8460SGeorge.Wilson 	spa->spa_deadman_cycid = CYCLIC_NONE;
701283b8460SGeorge.Wilson 
702fa9e4066Sahrens 	refcount_destroy(&spa->spa_refcount);
70391ebeef5Sahrens 
704e14bb325SJeff Bonwick 	spa_config_lock_destroy(spa);
705fa9e4066Sahrens 
706c3a66015SMatthew Ahrens 	kstat_delete(spa->spa_iokstat);
707c3a66015SMatthew Ahrens 	spa->spa_iokstat = NULL;
708c3a66015SMatthew Ahrens 
709b24ab676SJeff Bonwick 	for (int t = 0; t < TXG_SIZE; t++)
710cde58dbcSMatthew Ahrens 		bplist_destroy(&spa->spa_free_bplist[t]);
711b24ab676SJeff Bonwick 
712c25056deSgw 	cv_destroy(&spa->spa_async_cv);
71335a5a358SJonathan Adams 	cv_destroy(&spa->spa_proc_cv);
714c25056deSgw 	cv_destroy(&spa->spa_scrub_io_cv);
715e14bb325SJeff Bonwick 	cv_destroy(&spa->spa_suspend_cv);
716c25056deSgw 
7175ad82045Snd 	mutex_destroy(&spa->spa_async_lock);
718c25056deSgw 	mutex_destroy(&spa->spa_errlist_lock);
71935a5a358SJonathan Adams 	mutex_destroy(&spa->spa_errlog_lock);
72006eeb2adSek 	mutex_destroy(&spa->spa_history_lock);
72135a5a358SJonathan Adams 	mutex_destroy(&spa->spa_proc_lock);
722b1b8ab34Slling 	mutex_destroy(&spa->spa_props_lock);
72335a5a358SJonathan Adams 	mutex_destroy(&spa->spa_scrub_lock);
724e14bb325SJeff Bonwick 	mutex_destroy(&spa->spa_suspend_lock);
725a1521560SJeff Bonwick 	mutex_destroy(&spa->spa_vdev_top_lock);
726c3a66015SMatthew Ahrens 	mutex_destroy(&spa->spa_iokstat_lock);
7275ad82045Snd 
728fa9e4066Sahrens 	kmem_free(spa, sizeof (spa_t));
729fa9e4066Sahrens }
730fa9e4066Sahrens 
731fa9e4066Sahrens /*
732fa9e4066Sahrens  * Given a pool, return the next pool in the namespace, or NULL if there is
733fa9e4066Sahrens  * none.  If 'prev' is NULL, return the first pool.
734fa9e4066Sahrens  */
735fa9e4066Sahrens spa_t *
736fa9e4066Sahrens spa_next(spa_t *prev)
737fa9e4066Sahrens {
738fa9e4066Sahrens 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
739fa9e4066Sahrens 
740fa9e4066Sahrens 	if (prev)
741fa9e4066Sahrens 		return (AVL_NEXT(&spa_namespace_avl, prev));
742fa9e4066Sahrens 	else
743fa9e4066Sahrens 		return (avl_first(&spa_namespace_avl));
744fa9e4066Sahrens }
745fa9e4066Sahrens 
746fa9e4066Sahrens /*
747fa9e4066Sahrens  * ==========================================================================
748fa9e4066Sahrens  * SPA refcount functions
749fa9e4066Sahrens  * ==========================================================================
750fa9e4066Sahrens  */
751fa9e4066Sahrens 
752fa9e4066Sahrens /*
753fa9e4066Sahrens  * Add a reference to the given spa_t.  Must have at least one reference, or
754fa9e4066Sahrens  * have the namespace lock held.
755fa9e4066Sahrens  */
756fa9e4066Sahrens void
757fa9e4066Sahrens spa_open_ref(spa_t *spa, void *tag)
758fa9e4066Sahrens {
759088f3894Sahrens 	ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
760fa9e4066Sahrens 	    MUTEX_HELD(&spa_namespace_lock));
761fa9e4066Sahrens 	(void) refcount_add(&spa->spa_refcount, tag);
762fa9e4066Sahrens }
763fa9e4066Sahrens 
764fa9e4066Sahrens /*
765fa9e4066Sahrens  * Remove a reference to the given spa_t.  Must have at least one reference, or
766fa9e4066Sahrens  * have the namespace lock held.
767fa9e4066Sahrens  */
768fa9e4066Sahrens void
769fa9e4066Sahrens spa_close(spa_t *spa, void *tag)
770fa9e4066Sahrens {
771088f3894Sahrens 	ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
772fa9e4066Sahrens 	    MUTEX_HELD(&spa_namespace_lock));
773fa9e4066Sahrens 	(void) refcount_remove(&spa->spa_refcount, tag);
774fa9e4066Sahrens }
775fa9e4066Sahrens 
776fa9e4066Sahrens /*
777fa9e4066Sahrens  * Check to see if the spa refcount is zero.  Must be called with
778088f3894Sahrens  * spa_namespace_lock held.  We really compare against spa_minref, which is the
779fa9e4066Sahrens  * number of references acquired when opening a pool
780fa9e4066Sahrens  */
781fa9e4066Sahrens boolean_t
782fa9e4066Sahrens spa_refcount_zero(spa_t *spa)
783fa9e4066Sahrens {
784fa9e4066Sahrens 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
785fa9e4066Sahrens 
786088f3894Sahrens 	return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
787fa9e4066Sahrens }
788fa9e4066Sahrens 
78999653d4eSeschrock /*
79099653d4eSeschrock  * ==========================================================================
791fa94a07fSbrendan  * SPA spare and l2cache tracking
79299653d4eSeschrock  * ==========================================================================
79399653d4eSeschrock  */
79499653d4eSeschrock 
795fa94a07fSbrendan /*
796fa94a07fSbrendan  * Hot spares and cache devices are tracked using the same code below,
797fa94a07fSbrendan  * for 'auxiliary' devices.
798fa94a07fSbrendan  */
799fa94a07fSbrendan 
800fa94a07fSbrendan typedef struct spa_aux {
801fa94a07fSbrendan 	uint64_t	aux_guid;
802fa94a07fSbrendan 	uint64_t	aux_pool;
803fa94a07fSbrendan 	avl_node_t	aux_avl;
804fa94a07fSbrendan 	int		aux_count;
805fa94a07fSbrendan } spa_aux_t;
806fa94a07fSbrendan 
807fa94a07fSbrendan static int
808fa94a07fSbrendan spa_aux_compare(const void *a, const void *b)
809fa94a07fSbrendan {
810fa94a07fSbrendan 	const spa_aux_t *sa = a;
811fa94a07fSbrendan 	const spa_aux_t *sb = b;
812fa94a07fSbrendan 
813fa94a07fSbrendan 	if (sa->aux_guid < sb->aux_guid)
814fa94a07fSbrendan 		return (-1);
815fa94a07fSbrendan 	else if (sa->aux_guid > sb->aux_guid)
816fa94a07fSbrendan 		return (1);
817fa94a07fSbrendan 	else
818fa94a07fSbrendan 		return (0);
819fa94a07fSbrendan }
820fa94a07fSbrendan 
821fa94a07fSbrendan void
822fa94a07fSbrendan spa_aux_add(vdev_t *vd, avl_tree_t *avl)
823fa94a07fSbrendan {
824fa94a07fSbrendan 	avl_index_t where;
825fa94a07fSbrendan 	spa_aux_t search;
826fa94a07fSbrendan 	spa_aux_t *aux;
827fa94a07fSbrendan 
828fa94a07fSbrendan 	search.aux_guid = vd->vdev_guid;
829fa94a07fSbrendan 	if ((aux = avl_find(avl, &search, &where)) != NULL) {
830fa94a07fSbrendan 		aux->aux_count++;
831fa94a07fSbrendan 	} else {
832fa94a07fSbrendan 		aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
833fa94a07fSbrendan 		aux->aux_guid = vd->vdev_guid;
834fa94a07fSbrendan 		aux->aux_count = 1;
835fa94a07fSbrendan 		avl_insert(avl, aux, where);
836fa94a07fSbrendan 	}
837fa94a07fSbrendan }
838fa94a07fSbrendan 
839fa94a07fSbrendan void
840fa94a07fSbrendan spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
841fa94a07fSbrendan {
842fa94a07fSbrendan 	spa_aux_t search;
843fa94a07fSbrendan 	spa_aux_t *aux;
844fa94a07fSbrendan 	avl_index_t where;
845fa94a07fSbrendan 
846fa94a07fSbrendan 	search.aux_guid = vd->vdev_guid;
847fa94a07fSbrendan 	aux = avl_find(avl, &search, &where);
848fa94a07fSbrendan 
849fa94a07fSbrendan 	ASSERT(aux != NULL);
850fa94a07fSbrendan 
851fa94a07fSbrendan 	if (--aux->aux_count == 0) {
852fa94a07fSbrendan 		avl_remove(avl, aux);
853fa94a07fSbrendan 		kmem_free(aux, sizeof (spa_aux_t));
854fa94a07fSbrendan 	} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
855fa94a07fSbrendan 		aux->aux_pool = 0ULL;
856fa94a07fSbrendan 	}
857fa94a07fSbrendan }
858fa94a07fSbrendan 
859fa94a07fSbrendan boolean_t
86089a89ebfSlling spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
861fa94a07fSbrendan {
862fa94a07fSbrendan 	spa_aux_t search, *found;
863fa94a07fSbrendan 
864fa94a07fSbrendan 	search.aux_guid = guid;
86589a89ebfSlling 	found = avl_find(avl, &search, NULL);
866fa94a07fSbrendan 
867fa94a07fSbrendan 	if (pool) {
868fa94a07fSbrendan 		if (found)
869fa94a07fSbrendan 			*pool = found->aux_pool;
870fa94a07fSbrendan 		else
871fa94a07fSbrendan 			*pool = 0ULL;
872fa94a07fSbrendan 	}
873fa94a07fSbrendan 
87489a89ebfSlling 	if (refcnt) {
87589a89ebfSlling 		if (found)
87689a89ebfSlling 			*refcnt = found->aux_count;
87789a89ebfSlling 		else
87889a89ebfSlling 			*refcnt = 0;
87989a89ebfSlling 	}
88089a89ebfSlling 
881fa94a07fSbrendan 	return (found != NULL);
882fa94a07fSbrendan }
883fa94a07fSbrendan 
884fa94a07fSbrendan void
885fa94a07fSbrendan spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
886fa94a07fSbrendan {
887fa94a07fSbrendan 	spa_aux_t search, *found;
888fa94a07fSbrendan 	avl_index_t where;
889fa94a07fSbrendan 
890fa94a07fSbrendan 	search.aux_guid = vd->vdev_guid;
891fa94a07fSbrendan 	found = avl_find(avl, &search, &where);
892fa94a07fSbrendan 	ASSERT(found != NULL);
893fa94a07fSbrendan 	ASSERT(found->aux_pool == 0ULL);
894fa94a07fSbrendan 
895fa94a07fSbrendan 	found->aux_pool = spa_guid(vd->vdev_spa);
896fa94a07fSbrendan }
897fa94a07fSbrendan 
89899653d4eSeschrock /*
89939c23413Seschrock  * Spares are tracked globally due to the following constraints:
90039c23413Seschrock  *
90139c23413Seschrock  * 	- A spare may be part of multiple pools.
90239c23413Seschrock  * 	- A spare may be added to a pool even if it's actively in use within
90339c23413Seschrock  *	  another pool.
90439c23413Seschrock  * 	- A spare in use in any pool can only be the source of a replacement if
90539c23413Seschrock  *	  the target is a spare in the same pool.
90639c23413Seschrock  *
90739c23413Seschrock  * We keep track of all spares on the system through the use of a reference
90839c23413Seschrock  * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
90939c23413Seschrock  * spare, then we bump the reference count in the AVL tree.  In addition, we set
91039c23413Seschrock  * the 'vdev_isspare' member to indicate that the device is a spare (active or
91139c23413Seschrock  * inactive).  When a spare is made active (used to replace a device in the
91239c23413Seschrock  * pool), we also keep track of which pool its been made a part of.
91339c23413Seschrock  *
91439c23413Seschrock  * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
91539c23413Seschrock  * called under the spa_namespace lock as part of vdev reconfiguration.  The
91639c23413Seschrock  * separate spare lock exists for the status query path, which does not need to
91739c23413Seschrock  * be completely consistent with respect to other vdev configuration changes.
91899653d4eSeschrock  */
91939c23413Seschrock 
92099653d4eSeschrock static int
92199653d4eSeschrock spa_spare_compare(const void *a, const void *b)
92299653d4eSeschrock {
923fa94a07fSbrendan 	return (spa_aux_compare(a, b));
92499653d4eSeschrock }
92599653d4eSeschrock 
92699653d4eSeschrock void
92739c23413Seschrock spa_spare_add(vdev_t *vd)
92899653d4eSeschrock {
92999653d4eSeschrock 	mutex_enter(&spa_spare_lock);
93039c23413Seschrock 	ASSERT(!vd->vdev_isspare);
931fa94a07fSbrendan 	spa_aux_add(vd, &spa_spare_avl);
93239c23413Seschrock 	vd->vdev_isspare = B_TRUE;
93399653d4eSeschrock 	mutex_exit(&spa_spare_lock);
93499653d4eSeschrock }
93599653d4eSeschrock 
93699653d4eSeschrock void
93739c23413Seschrock spa_spare_remove(vdev_t *vd)
93899653d4eSeschrock {
93999653d4eSeschrock 	mutex_enter(&spa_spare_lock);
94039c23413Seschrock 	ASSERT(vd->vdev_isspare);
941fa94a07fSbrendan 	spa_aux_remove(vd, &spa_spare_avl);
94239c23413Seschrock 	vd->vdev_isspare = B_FALSE;
94399653d4eSeschrock 	mutex_exit(&spa_spare_lock);
94499653d4eSeschrock }
94599653d4eSeschrock 
94699653d4eSeschrock boolean_t
94789a89ebfSlling spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
94899653d4eSeschrock {
949fa94a07fSbrendan 	boolean_t found;
95099653d4eSeschrock 
95199653d4eSeschrock 	mutex_enter(&spa_spare_lock);
95289a89ebfSlling 	found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
95399653d4eSeschrock 	mutex_exit(&spa_spare_lock);
95499653d4eSeschrock 
955fa94a07fSbrendan 	return (found);
95639c23413Seschrock }
95739c23413Seschrock 
95839c23413Seschrock void
95939c23413Seschrock spa_spare_activate(vdev_t *vd)
96039c23413Seschrock {
96139c23413Seschrock 	mutex_enter(&spa_spare_lock);
96239c23413Seschrock 	ASSERT(vd->vdev_isspare);
963fa94a07fSbrendan 	spa_aux_activate(vd, &spa_spare_avl);
964fa94a07fSbrendan 	mutex_exit(&spa_spare_lock);
965fa94a07fSbrendan }
96639c23413Seschrock 
967fa94a07fSbrendan /*
968fa94a07fSbrendan  * Level 2 ARC devices are tracked globally for the same reasons as spares.
969fa94a07fSbrendan  * Cache devices currently only support one pool per cache device, and so
970fa94a07fSbrendan  * for these devices the aux reference count is currently unused beyond 1.
971fa94a07fSbrendan  */
97239c23413Seschrock 
973fa94a07fSbrendan static int
974fa94a07fSbrendan spa_l2cache_compare(const void *a, const void *b)
975fa94a07fSbrendan {
976fa94a07fSbrendan 	return (spa_aux_compare(a, b));
977fa94a07fSbrendan }
978fa94a07fSbrendan 
979fa94a07fSbrendan void
980fa94a07fSbrendan spa_l2cache_add(vdev_t *vd)
981fa94a07fSbrendan {
982fa94a07fSbrendan 	mutex_enter(&spa_l2cache_lock);
983fa94a07fSbrendan 	ASSERT(!vd->vdev_isl2cache);
984fa94a07fSbrendan 	spa_aux_add(vd, &spa_l2cache_avl);
985fa94a07fSbrendan 	vd->vdev_isl2cache = B_TRUE;
986fa94a07fSbrendan 	mutex_exit(&spa_l2cache_lock);
987fa94a07fSbrendan }
988fa94a07fSbrendan 
989fa94a07fSbrendan void
990fa94a07fSbrendan spa_l2cache_remove(vdev_t *vd)
991fa94a07fSbrendan {
992fa94a07fSbrendan 	mutex_enter(&spa_l2cache_lock);
993fa94a07fSbrendan 	ASSERT(vd->vdev_isl2cache);
994fa94a07fSbrendan 	spa_aux_remove(vd, &spa_l2cache_avl);
995fa94a07fSbrendan 	vd->vdev_isl2cache = B_FALSE;
996fa94a07fSbrendan 	mutex_exit(&spa_l2cache_lock);
997fa94a07fSbrendan }
998fa94a07fSbrendan 
999fa94a07fSbrendan boolean_t
1000fa94a07fSbrendan spa_l2cache_exists(uint64_t guid, uint64_t *pool)
1001fa94a07fSbrendan {
1002fa94a07fSbrendan 	boolean_t found;
1003fa94a07fSbrendan 
1004fa94a07fSbrendan 	mutex_enter(&spa_l2cache_lock);
100589a89ebfSlling 	found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
1006fa94a07fSbrendan 	mutex_exit(&spa_l2cache_lock);
1007fa94a07fSbrendan 
1008fa94a07fSbrendan 	return (found);
1009fa94a07fSbrendan }
1010fa94a07fSbrendan 
1011fa94a07fSbrendan void
1012fa94a07fSbrendan spa_l2cache_activate(vdev_t *vd)
1013fa94a07fSbrendan {
1014fa94a07fSbrendan 	mutex_enter(&spa_l2cache_lock);
1015fa94a07fSbrendan 	ASSERT(vd->vdev_isl2cache);
1016fa94a07fSbrendan 	spa_aux_activate(vd, &spa_l2cache_avl);
1017fa94a07fSbrendan 	mutex_exit(&spa_l2cache_lock);
1018fa94a07fSbrendan }
1019fa94a07fSbrendan 
1020fa9e4066Sahrens /*
1021fa9e4066Sahrens  * ==========================================================================
1022fa9e4066Sahrens  * SPA vdev locking
1023fa9e4066Sahrens  * ==========================================================================
1024fa9e4066Sahrens  */
1025fa9e4066Sahrens 
1026fa9e4066Sahrens /*
1027ea8dc4b6Seschrock  * Lock the given spa_t for the purpose of adding or removing a vdev.
1028ea8dc4b6Seschrock  * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1029fa9e4066Sahrens  * It returns the next transaction group for the spa_t.
1030fa9e4066Sahrens  */
1031fa9e4066Sahrens uint64_t
1032fa9e4066Sahrens spa_vdev_enter(spa_t *spa)
1033fa9e4066Sahrens {
1034a1521560SJeff Bonwick 	mutex_enter(&spa->spa_vdev_top_lock);
1035bbfd46c4SJeff Bonwick 	mutex_enter(&spa_namespace_lock);
103688ecc943SGeorge Wilson 	return (spa_vdev_config_enter(spa));
103788ecc943SGeorge Wilson }
103888ecc943SGeorge Wilson 
103988ecc943SGeorge Wilson /*
104088ecc943SGeorge Wilson  * Internal implementation for spa_vdev_enter().  Used when a vdev
104188ecc943SGeorge Wilson  * operation requires multiple syncs (i.e. removing a device) while
104288ecc943SGeorge Wilson  * keeping the spa_namespace_lock held.
104388ecc943SGeorge Wilson  */
104488ecc943SGeorge Wilson uint64_t
104588ecc943SGeorge Wilson spa_vdev_config_enter(spa_t *spa)
104688ecc943SGeorge Wilson {
104788ecc943SGeorge Wilson 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
10483d7072f8Seschrock 
1049e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1050fa9e4066Sahrens 
1051fa9e4066Sahrens 	return (spa_last_synced_txg(spa) + 1);
1052fa9e4066Sahrens }
1053fa9e4066Sahrens 
1054fa9e4066Sahrens /*
105588ecc943SGeorge Wilson  * Used in combination with spa_vdev_config_enter() to allow the syncing
105688ecc943SGeorge Wilson  * of multiple transactions without releasing the spa_namespace_lock.
1057fa9e4066Sahrens  */
105888ecc943SGeorge Wilson void
105988ecc943SGeorge Wilson spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
1060fa9e4066Sahrens {
106188ecc943SGeorge Wilson 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
106288ecc943SGeorge Wilson 
10630e34b6a7Sbonwick 	int config_changed = B_FALSE;
1064ea8dc4b6Seschrock 
10650373e76bSbonwick 	ASSERT(txg > spa_last_synced_txg(spa));
10660e34b6a7Sbonwick 
1067e14bb325SJeff Bonwick 	spa->spa_pending_vdev = NULL;
1068e14bb325SJeff Bonwick 
10690e34b6a7Sbonwick 	/*
10700e34b6a7Sbonwick 	 * Reassess the DTLs.
10710e34b6a7Sbonwick 	 */
10720373e76bSbonwick 	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
10730e34b6a7Sbonwick 
1074e14bb325SJeff Bonwick 	if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
10750e34b6a7Sbonwick 		config_changed = B_TRUE;
10768f18d1faSGeorge Wilson 		spa->spa_config_generation++;
10770e34b6a7Sbonwick 	}
1078ea8dc4b6Seschrock 
107988ecc943SGeorge Wilson 	/*
108088ecc943SGeorge Wilson 	 * Verify the metaslab classes.
108188ecc943SGeorge Wilson 	 */
1082b24ab676SJeff Bonwick 	ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1083b24ab676SJeff Bonwick 	ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
108488ecc943SGeorge Wilson 
1085e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_ALL, spa);
1086fa9e4066Sahrens 
108788ecc943SGeorge Wilson 	/*
108888ecc943SGeorge Wilson 	 * Panic the system if the specified tag requires it.  This
108988ecc943SGeorge Wilson 	 * is useful for ensuring that configurations are updated
109088ecc943SGeorge Wilson 	 * transactionally.
109188ecc943SGeorge Wilson 	 */
109288ecc943SGeorge Wilson 	if (zio_injection_enabled)
10931195e687SMark J Musante 		zio_handle_panic_injection(spa, tag, 0);
109488ecc943SGeorge Wilson 
1095fa9e4066Sahrens 	/*
1096fa9e4066Sahrens 	 * Note: this txg_wait_synced() is important because it ensures
1097fa9e4066Sahrens 	 * that there won't be more than one config change per txg.
1098fa9e4066Sahrens 	 * This allows us to use the txg as the generation number.
1099fa9e4066Sahrens 	 */
1100fa9e4066Sahrens 	if (error == 0)
1101fa9e4066Sahrens 		txg_wait_synced(spa->spa_dsl_pool, txg);
1102fa9e4066Sahrens 
1103fa9e4066Sahrens 	if (vd != NULL) {
11040713e232SGeorge Wilson 		ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
11058ad4d6ddSJeff Bonwick 		spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1106fa9e4066Sahrens 		vdev_free(vd);
11078ad4d6ddSJeff Bonwick 		spa_config_exit(spa, SCL_ALL, spa);
1108fa9e4066Sahrens 	}
1109fa9e4066Sahrens 
1110fa9e4066Sahrens 	/*
11110e34b6a7Sbonwick 	 * If the config changed, update the config cache.
1112fa9e4066Sahrens 	 */
11130e34b6a7Sbonwick 	if (config_changed)
1114c5904d13Seschrock 		spa_config_sync(spa, B_FALSE, B_TRUE);
111588ecc943SGeorge Wilson }
1116ea8dc4b6Seschrock 
111788ecc943SGeorge Wilson /*
111888ecc943SGeorge Wilson  * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
111988ecc943SGeorge Wilson  * locking of spa_vdev_enter(), we also want make sure the transactions have
112088ecc943SGeorge Wilson  * synced to disk, and then update the global configuration cache with the new
112188ecc943SGeorge Wilson  * information.
112288ecc943SGeorge Wilson  */
112388ecc943SGeorge Wilson int
112488ecc943SGeorge Wilson spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
112588ecc943SGeorge Wilson {
112688ecc943SGeorge Wilson 	spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1127ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
1128bbfd46c4SJeff Bonwick 	mutex_exit(&spa->spa_vdev_top_lock);
1129fa9e4066Sahrens 
1130fa9e4066Sahrens 	return (error);
1131fa9e4066Sahrens }
1132fa9e4066Sahrens 
1133e14bb325SJeff Bonwick /*
1134e14bb325SJeff Bonwick  * Lock the given spa_t for the purpose of changing vdev state.
1135e14bb325SJeff Bonwick  */
1136e14bb325SJeff Bonwick void
11378f18d1faSGeorge Wilson spa_vdev_state_enter(spa_t *spa, int oplocks)
1138e14bb325SJeff Bonwick {
11398f18d1faSGeorge Wilson 	int locks = SCL_STATE_ALL | oplocks;
11408f18d1faSGeorge Wilson 
1141dcba9f3fSGeorge Wilson 	/*
1142dcba9f3fSGeorge Wilson 	 * Root pools may need to read of the underlying devfs filesystem
1143dcba9f3fSGeorge Wilson 	 * when opening up a vdev.  Unfortunately if we're holding the
1144dcba9f3fSGeorge Wilson 	 * SCL_ZIO lock it will result in a deadlock when we try to issue
1145dcba9f3fSGeorge Wilson 	 * the read from the root filesystem.  Instead we "prefetch"
1146dcba9f3fSGeorge Wilson 	 * the associated vnodes that we need prior to opening the
1147dcba9f3fSGeorge Wilson 	 * underlying devices and cache them so that we can prevent
1148dcba9f3fSGeorge Wilson 	 * any I/O when we are doing the actual open.
1149dcba9f3fSGeorge Wilson 	 */
1150dcba9f3fSGeorge Wilson 	if (spa_is_root(spa)) {
11519842588bSGeorge Wilson 		int low = locks & ~(SCL_ZIO - 1);
11529842588bSGeorge Wilson 		int high = locks & ~low;
11539842588bSGeorge Wilson 
11549842588bSGeorge Wilson 		spa_config_enter(spa, high, spa, RW_WRITER);
1155dcba9f3fSGeorge Wilson 		vdev_hold(spa->spa_root_vdev);
11569842588bSGeorge Wilson 		spa_config_enter(spa, low, spa, RW_WRITER);
1157dcba9f3fSGeorge Wilson 	} else {
1158dcba9f3fSGeorge Wilson 		spa_config_enter(spa, locks, spa, RW_WRITER);
1159dcba9f3fSGeorge Wilson 	}
11608f18d1faSGeorge Wilson 	spa->spa_vdev_locks = locks;
1161e14bb325SJeff Bonwick }
1162e14bb325SJeff Bonwick 
1163e14bb325SJeff Bonwick int
1164e14bb325SJeff Bonwick spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1165e14bb325SJeff Bonwick {
1166c6065d0fSGeorge Wilson 	boolean_t config_changed = B_FALSE;
1167c6065d0fSGeorge Wilson 
1168b24ab676SJeff Bonwick 	if (vd != NULL || error == 0)
1169b24ab676SJeff Bonwick 		vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
1170b24ab676SJeff Bonwick 		    0, 0, B_FALSE);
1171b24ab676SJeff Bonwick 
11728f18d1faSGeorge Wilson 	if (vd != NULL) {
1173e14bb325SJeff Bonwick 		vdev_state_dirty(vd->vdev_top);
1174c6065d0fSGeorge Wilson 		config_changed = B_TRUE;
11758f18d1faSGeorge Wilson 		spa->spa_config_generation++;
11768f18d1faSGeorge Wilson 	}
1177e14bb325SJeff Bonwick 
1178dcba9f3fSGeorge Wilson 	if (spa_is_root(spa))
1179dcba9f3fSGeorge Wilson 		vdev_rele(spa->spa_root_vdev);
1180dcba9f3fSGeorge Wilson 
11818f18d1faSGeorge Wilson 	ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
11828f18d1faSGeorge Wilson 	spa_config_exit(spa, spa->spa_vdev_locks, spa);
1183e14bb325SJeff Bonwick 
11848ad4d6ddSJeff Bonwick 	/*
11858ad4d6ddSJeff Bonwick 	 * If anything changed, wait for it to sync.  This ensures that,
11868ad4d6ddSJeff Bonwick 	 * from the system administrator's perspective, zpool(1M) commands
11878ad4d6ddSJeff Bonwick 	 * are synchronous.  This is important for things like zpool offline:
11888ad4d6ddSJeff Bonwick 	 * when the command completes, you expect no further I/O from ZFS.
11898ad4d6ddSJeff Bonwick 	 */
11908ad4d6ddSJeff Bonwick 	if (vd != NULL)
11918ad4d6ddSJeff Bonwick 		txg_wait_synced(spa->spa_dsl_pool, 0);
11928ad4d6ddSJeff Bonwick 
1193c6065d0fSGeorge Wilson 	/*
1194c6065d0fSGeorge Wilson 	 * If the config changed, update the config cache.
1195c6065d0fSGeorge Wilson 	 */
1196c6065d0fSGeorge Wilson 	if (config_changed) {
1197c6065d0fSGeorge Wilson 		mutex_enter(&spa_namespace_lock);
1198c6065d0fSGeorge Wilson 		spa_config_sync(spa, B_FALSE, B_TRUE);
1199c6065d0fSGeorge Wilson 		mutex_exit(&spa_namespace_lock);
1200c6065d0fSGeorge Wilson 	}
1201c6065d0fSGeorge Wilson 
1202e14bb325SJeff Bonwick 	return (error);
1203e14bb325SJeff Bonwick }
1204e14bb325SJeff Bonwick 
1205fa9e4066Sahrens /*
1206fa9e4066Sahrens  * ==========================================================================
1207fa9e4066Sahrens  * Miscellaneous functions
1208fa9e4066Sahrens  * ==========================================================================
1209fa9e4066Sahrens  */
1210fa9e4066Sahrens 
1211ad135b5dSChristopher Siden void
121243466aaeSMax Grossman spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
1213ad135b5dSChristopher Siden {
12142acef22dSMatthew Ahrens 	if (!nvlist_exists(spa->spa_label_features, feature)) {
12152acef22dSMatthew Ahrens 		fnvlist_add_boolean(spa->spa_label_features, feature);
121643466aaeSMax Grossman 		/*
121743466aaeSMax Grossman 		 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
121843466aaeSMax Grossman 		 * dirty the vdev config because lock SCL_CONFIG is not held.
121943466aaeSMax Grossman 		 * Thankfully, in this case we don't need to dirty the config
122043466aaeSMax Grossman 		 * because it will be written out anyway when we finish
122143466aaeSMax Grossman 		 * creating the pool.
122243466aaeSMax Grossman 		 */
122343466aaeSMax Grossman 		if (tx->tx_txg != TXG_INITIAL)
122443466aaeSMax Grossman 			vdev_config_dirty(spa->spa_root_vdev);
12252acef22dSMatthew Ahrens 	}
1226ad135b5dSChristopher Siden }
1227ad135b5dSChristopher Siden 
1228ad135b5dSChristopher Siden void
1229ad135b5dSChristopher Siden spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1230ad135b5dSChristopher Siden {
12312acef22dSMatthew Ahrens 	if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
12322acef22dSMatthew Ahrens 		vdev_config_dirty(spa->spa_root_vdev);
1233ad135b5dSChristopher Siden }
1234ad135b5dSChristopher Siden 
1235fa9e4066Sahrens /*
1236fa9e4066Sahrens  * Rename a spa_t.
1237fa9e4066Sahrens  */
1238fa9e4066Sahrens int
1239fa9e4066Sahrens spa_rename(const char *name, const char *newname)
1240fa9e4066Sahrens {
1241fa9e4066Sahrens 	spa_t *spa;
1242fa9e4066Sahrens 	int err;
1243fa9e4066Sahrens 
1244fa9e4066Sahrens 	/*
1245fa9e4066Sahrens 	 * Lookup the spa_t and grab the config lock for writing.  We need to
1246fa9e4066Sahrens 	 * actually open the pool so that we can sync out the necessary labels.
1247fa9e4066Sahrens 	 * It's OK to call spa_open() with the namespace lock held because we
1248ea8dc4b6Seschrock 	 * allow recursive calls for other reasons.
1249fa9e4066Sahrens 	 */
1250fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
1251fa9e4066Sahrens 	if ((err = spa_open(name, &spa, FTAG)) != 0) {
1252fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1253fa9e4066Sahrens 		return (err);
1254fa9e4066Sahrens 	}
1255fa9e4066Sahrens 
1256e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1257fa9e4066Sahrens 
1258fa9e4066Sahrens 	avl_remove(&spa_namespace_avl, spa);
1259e14bb325SJeff Bonwick 	(void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
1260fa9e4066Sahrens 	avl_add(&spa_namespace_avl, spa);
1261fa9e4066Sahrens 
1262fa9e4066Sahrens 	/*
1263fa9e4066Sahrens 	 * Sync all labels to disk with the new names by marking the root vdev
1264fa9e4066Sahrens 	 * dirty and waiting for it to sync.  It will pick up the new pool name
1265fa9e4066Sahrens 	 * during the sync.
1266fa9e4066Sahrens 	 */
1267fa9e4066Sahrens 	vdev_config_dirty(spa->spa_root_vdev);
1268fa9e4066Sahrens 
1269e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
1270fa9e4066Sahrens 
12710373e76bSbonwick 	txg_wait_synced(spa->spa_dsl_pool, 0);
1272fa9e4066Sahrens 
1273fa9e4066Sahrens 	/*
1274fa9e4066Sahrens 	 * Sync the updated config cache.
1275fa9e4066Sahrens 	 */
1276c5904d13Seschrock 	spa_config_sync(spa, B_FALSE, B_TRUE);
1277fa9e4066Sahrens 
1278fa9e4066Sahrens 	spa_close(spa, FTAG);
1279fa9e4066Sahrens 
1280fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
1281fa9e4066Sahrens 
1282fa9e4066Sahrens 	return (0);
1283fa9e4066Sahrens }
1284fa9e4066Sahrens 
1285fa9e4066Sahrens /*
1286f9af39baSGeorge Wilson  * Return the spa_t associated with given pool_guid, if it exists.  If
1287f9af39baSGeorge Wilson  * device_guid is non-zero, determine whether the pool exists *and* contains
1288f9af39baSGeorge Wilson  * a device with the specified device_guid.
1289fa9e4066Sahrens  */
1290f9af39baSGeorge Wilson spa_t *
1291f9af39baSGeorge Wilson spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1292fa9e4066Sahrens {
1293fa9e4066Sahrens 	spa_t *spa;
1294fa9e4066Sahrens 	avl_tree_t *t = &spa_namespace_avl;
1295fa9e4066Sahrens 
1296ea8dc4b6Seschrock 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1297fa9e4066Sahrens 
1298fa9e4066Sahrens 	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1299fa9e4066Sahrens 		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1300fa9e4066Sahrens 			continue;
1301fa9e4066Sahrens 		if (spa->spa_root_vdev == NULL)
1302fa9e4066Sahrens 			continue;
130339c23413Seschrock 		if (spa_guid(spa) == pool_guid) {
130439c23413Seschrock 			if (device_guid == 0)
130539c23413Seschrock 				break;
130639c23413Seschrock 
130739c23413Seschrock 			if (vdev_lookup_by_guid(spa->spa_root_vdev,
130839c23413Seschrock 			    device_guid) != NULL)
130939c23413Seschrock 				break;
131039c23413Seschrock 
131139c23413Seschrock 			/*
13128654d025Sperrin 			 * Check any devices we may be in the process of adding.
131339c23413Seschrock 			 */
131439c23413Seschrock 			if (spa->spa_pending_vdev) {
131539c23413Seschrock 				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
131639c23413Seschrock 				    device_guid) != NULL)
131739c23413Seschrock 					break;
131839c23413Seschrock 			}
131939c23413Seschrock 		}
1320fa9e4066Sahrens 	}
1321fa9e4066Sahrens 
1322f9af39baSGeorge Wilson 	return (spa);
1323f9af39baSGeorge Wilson }
1324f9af39baSGeorge Wilson 
1325f9af39baSGeorge Wilson /*
1326f9af39baSGeorge Wilson  * Determine whether a pool with the given pool_guid exists.
1327f9af39baSGeorge Wilson  */
1328f9af39baSGeorge Wilson boolean_t
1329f9af39baSGeorge Wilson spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1330f9af39baSGeorge Wilson {
1331f9af39baSGeorge Wilson 	return (spa_by_guid(pool_guid, device_guid) != NULL);
1332fa9e4066Sahrens }
1333fa9e4066Sahrens 
1334fa9e4066Sahrens char *
1335fa9e4066Sahrens spa_strdup(const char *s)
1336fa9e4066Sahrens {
1337fa9e4066Sahrens 	size_t len;
1338fa9e4066Sahrens 	char *new;
1339fa9e4066Sahrens 
1340fa9e4066Sahrens 	len = strlen(s);
1341fa9e4066Sahrens 	new = kmem_alloc(len + 1, KM_SLEEP);
1342fa9e4066Sahrens 	bcopy(s, new, len);
1343fa9e4066Sahrens 	new[len] = '\0';
1344fa9e4066Sahrens 
1345fa9e4066Sahrens 	return (new);
1346fa9e4066Sahrens }
1347fa9e4066Sahrens 
1348fa9e4066Sahrens void
1349fa9e4066Sahrens spa_strfree(char *s)
1350fa9e4066Sahrens {
1351fa9e4066Sahrens 	kmem_free(s, strlen(s) + 1);
1352fa9e4066Sahrens }
1353fa9e4066Sahrens 
1354fa9e4066Sahrens uint64_t
1355fa9e4066Sahrens spa_get_random(uint64_t range)
1356fa9e4066Sahrens {
1357fa9e4066Sahrens 	uint64_t r;
1358fa9e4066Sahrens 
1359fa9e4066Sahrens 	ASSERT(range != 0);
1360fa9e4066Sahrens 
1361fa9e4066Sahrens 	(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1362fa9e4066Sahrens 
1363fa9e4066Sahrens 	return (r % range);
1364fa9e4066Sahrens }
1365fa9e4066Sahrens 
13661195e687SMark J Musante uint64_t
13671195e687SMark J Musante spa_generate_guid(spa_t *spa)
13681195e687SMark J Musante {
13691195e687SMark J Musante 	uint64_t guid = spa_get_random(-1ULL);
13701195e687SMark J Musante 
13711195e687SMark J Musante 	if (spa != NULL) {
13721195e687SMark J Musante 		while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
13731195e687SMark J Musante 			guid = spa_get_random(-1ULL);
13741195e687SMark J Musante 	} else {
13751195e687SMark J Musante 		while (guid == 0 || spa_guid_exists(guid, 0))
13761195e687SMark J Musante 			guid = spa_get_random(-1ULL);
13771195e687SMark J Musante 	}
13781195e687SMark J Musante 
13791195e687SMark J Musante 	return (guid);
13801195e687SMark J Musante }
13811195e687SMark J Musante 
1382fa9e4066Sahrens void
138343466aaeSMax Grossman snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
1384fa9e4066Sahrens {
1385ad135b5dSChristopher Siden 	char type[256];
1386f0ba89beSJeff Bonwick 	char *checksum = NULL;
1387f0ba89beSJeff Bonwick 	char *compress = NULL;
1388f0ba89beSJeff Bonwick 
1389f0ba89beSJeff Bonwick 	if (bp != NULL) {
1390ad135b5dSChristopher Siden 		if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1391ad135b5dSChristopher Siden 			dmu_object_byteswap_t bswap =
1392ad135b5dSChristopher Siden 			    DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1393ad135b5dSChristopher Siden 			(void) snprintf(type, sizeof (type), "bswap %s %s",
1394ad135b5dSChristopher Siden 			    DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1395ad135b5dSChristopher Siden 			    "metadata" : "data",
1396ad135b5dSChristopher Siden 			    dmu_ot_byteswap[bswap].ob_name);
1397ad135b5dSChristopher Siden 		} else {
1398ad135b5dSChristopher Siden 			(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1399ad135b5dSChristopher Siden 			    sizeof (type));
1400ad135b5dSChristopher Siden 		}
14015d7b4d43SMatthew Ahrens 		if (!BP_IS_EMBEDDED(bp)) {
14025d7b4d43SMatthew Ahrens 			checksum =
14035d7b4d43SMatthew Ahrens 			    zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
14045d7b4d43SMatthew Ahrens 		}
1405f0ba89beSJeff Bonwick 		compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1406f0ba89beSJeff Bonwick 	}
1407fa9e4066Sahrens 
140843466aaeSMax Grossman 	SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
140943466aaeSMax Grossman 	    compress);
1410fa9e4066Sahrens }
1411fa9e4066Sahrens 
1412fa9e4066Sahrens void
1413fa9e4066Sahrens spa_freeze(spa_t *spa)
1414fa9e4066Sahrens {
1415fa9e4066Sahrens 	uint64_t freeze_txg = 0;
1416fa9e4066Sahrens 
1417e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1418fa9e4066Sahrens 	if (spa->spa_freeze_txg == UINT64_MAX) {
1419fa9e4066Sahrens 		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1420fa9e4066Sahrens 		spa->spa_freeze_txg = freeze_txg;
1421fa9e4066Sahrens 	}
1422e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
1423fa9e4066Sahrens 	if (freeze_txg != 0)
1424fa9e4066Sahrens 		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1425fa9e4066Sahrens }
1426fa9e4066Sahrens 
14270125049cSahrens void
14280125049cSahrens zfs_panic_recover(const char *fmt, ...)
14290125049cSahrens {
14300125049cSahrens 	va_list adx;
14310125049cSahrens 
14320125049cSahrens 	va_start(adx, fmt);
14330125049cSahrens 	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
14340125049cSahrens 	va_end(adx);
14350125049cSahrens }
14360125049cSahrens 
14373f9d6ad7SLin Ling /*
14383f9d6ad7SLin Ling  * This is a stripped-down version of strtoull, suitable only for converting
1439f7170741SWill Andrews  * lowercase hexadecimal numbers that don't overflow.
14403f9d6ad7SLin Ling  */
14413f9d6ad7SLin Ling uint64_t
14423f9d6ad7SLin Ling strtonum(const char *str, char **nptr)
14433f9d6ad7SLin Ling {
14443f9d6ad7SLin Ling 	uint64_t val = 0;
14453f9d6ad7SLin Ling 	char c;
14463f9d6ad7SLin Ling 	int digit;
14473f9d6ad7SLin Ling 
14483f9d6ad7SLin Ling 	while ((c = *str) != '\0') {
14493f9d6ad7SLin Ling 		if (c >= '0' && c <= '9')
14503f9d6ad7SLin Ling 			digit = c - '0';
14513f9d6ad7SLin Ling 		else if (c >= 'a' && c <= 'f')
14523f9d6ad7SLin Ling 			digit = 10 + c - 'a';
14533f9d6ad7SLin Ling 		else
14543f9d6ad7SLin Ling 			break;
14553f9d6ad7SLin Ling 
14563f9d6ad7SLin Ling 		val *= 16;
14573f9d6ad7SLin Ling 		val += digit;
14583f9d6ad7SLin Ling 
14593f9d6ad7SLin Ling 		str++;
14603f9d6ad7SLin Ling 	}
14613f9d6ad7SLin Ling 
14623f9d6ad7SLin Ling 	if (nptr)
14633f9d6ad7SLin Ling 		*nptr = (char *)str;
14643f9d6ad7SLin Ling 
14653f9d6ad7SLin Ling 	return (val);
14663f9d6ad7SLin Ling }
14673f9d6ad7SLin Ling 
1468fa9e4066Sahrens /*
1469fa9e4066Sahrens  * ==========================================================================
1470fa9e4066Sahrens  * Accessor functions
1471fa9e4066Sahrens  * ==========================================================================
1472fa9e4066Sahrens  */
1473fa9e4066Sahrens 
1474088f3894Sahrens boolean_t
147588b7b0f2SMatthew Ahrens spa_shutting_down(spa_t *spa)
1476fa9e4066Sahrens {
147788b7b0f2SMatthew Ahrens 	return (spa->spa_async_suspended);
1478fa9e4066Sahrens }
1479fa9e4066Sahrens 
1480fa9e4066Sahrens dsl_pool_t *
1481fa9e4066Sahrens spa_get_dsl(spa_t *spa)
1482fa9e4066Sahrens {
1483fa9e4066Sahrens 	return (spa->spa_dsl_pool);
1484fa9e4066Sahrens }
1485fa9e4066Sahrens 
1486ad135b5dSChristopher Siden boolean_t
1487ad135b5dSChristopher Siden spa_is_initializing(spa_t *spa)
1488ad135b5dSChristopher Siden {
1489ad135b5dSChristopher Siden 	return (spa->spa_is_initializing);
1490ad135b5dSChristopher Siden }
1491ad135b5dSChristopher Siden 
1492fa9e4066Sahrens blkptr_t *
1493fa9e4066Sahrens spa_get_rootblkptr(spa_t *spa)
1494fa9e4066Sahrens {
1495fa9e4066Sahrens 	return (&spa->spa_ubsync.ub_rootbp);
1496fa9e4066Sahrens }
1497fa9e4066Sahrens 
1498fa9e4066Sahrens void
1499fa9e4066Sahrens spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1500fa9e4066Sahrens {
1501fa9e4066Sahrens 	spa->spa_uberblock.ub_rootbp = *bp;
1502fa9e4066Sahrens }
1503fa9e4066Sahrens 
1504fa9e4066Sahrens void
1505fa9e4066Sahrens spa_altroot(spa_t *spa, char *buf, size_t buflen)
1506fa9e4066Sahrens {
1507fa9e4066Sahrens 	if (spa->spa_root == NULL)
1508fa9e4066Sahrens 		buf[0] = '\0';
1509fa9e4066Sahrens 	else
1510fa9e4066Sahrens 		(void) strncpy(buf, spa->spa_root, buflen);
1511fa9e4066Sahrens }
1512fa9e4066Sahrens 
1513fa9e4066Sahrens int
1514fa9e4066Sahrens spa_sync_pass(spa_t *spa)
1515fa9e4066Sahrens {
1516fa9e4066Sahrens 	return (spa->spa_sync_pass);
1517fa9e4066Sahrens }
1518fa9e4066Sahrens 
1519fa9e4066Sahrens char *
1520fa9e4066Sahrens spa_name(spa_t *spa)
1521fa9e4066Sahrens {
1522fa9e4066Sahrens 	return (spa->spa_name);
1523fa9e4066Sahrens }
1524fa9e4066Sahrens 
1525fa9e4066Sahrens uint64_t
1526fa9e4066Sahrens spa_guid(spa_t *spa)
1527fa9e4066Sahrens {
1528dfbb9432SGeorge Wilson 	dsl_pool_t *dp = spa_get_dsl(spa);
1529dfbb9432SGeorge Wilson 	uint64_t guid;
1530dfbb9432SGeorge Wilson 
1531b5989ec7Seschrock 	/*
1532b5989ec7Seschrock 	 * If we fail to parse the config during spa_load(), we can go through
1533b5989ec7Seschrock 	 * the error path (which posts an ereport) and end up here with no root
1534e9103aaeSGarrett D'Amore 	 * vdev.  We stash the original pool guid in 'spa_config_guid' to handle
1535b5989ec7Seschrock 	 * this case.
1536b5989ec7Seschrock 	 */
1537dfbb9432SGeorge Wilson 	if (spa->spa_root_vdev == NULL)
1538dfbb9432SGeorge Wilson 		return (spa->spa_config_guid);
1539dfbb9432SGeorge Wilson 
1540dfbb9432SGeorge Wilson 	guid = spa->spa_last_synced_guid != 0 ?
1541dfbb9432SGeorge Wilson 	    spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1542dfbb9432SGeorge Wilson 
1543dfbb9432SGeorge Wilson 	/*
1544dfbb9432SGeorge Wilson 	 * Return the most recently synced out guid unless we're
1545dfbb9432SGeorge Wilson 	 * in syncing context.
1546dfbb9432SGeorge Wilson 	 */
1547dfbb9432SGeorge Wilson 	if (dp && dsl_pool_sync_context(dp))
1548b5989ec7Seschrock 		return (spa->spa_root_vdev->vdev_guid);
1549b5989ec7Seschrock 	else
1550dfbb9432SGeorge Wilson 		return (guid);
1551e9103aaeSGarrett D'Amore }
1552e9103aaeSGarrett D'Amore 
1553e9103aaeSGarrett D'Amore uint64_t
1554e9103aaeSGarrett D'Amore spa_load_guid(spa_t *spa)
1555e9103aaeSGarrett D'Amore {
1556e9103aaeSGarrett D'Amore 	/*
1557e9103aaeSGarrett D'Amore 	 * This is a GUID that exists solely as a reference for the
1558e9103aaeSGarrett D'Amore 	 * purposes of the arc.  It is generated at load time, and
1559e9103aaeSGarrett D'Amore 	 * is never written to persistent storage.
1560e9103aaeSGarrett D'Amore 	 */
1561e9103aaeSGarrett D'Amore 	return (spa->spa_load_guid);
1562fa9e4066Sahrens }
1563fa9e4066Sahrens 
1564fa9e4066Sahrens uint64_t
1565fa9e4066Sahrens spa_last_synced_txg(spa_t *spa)
1566fa9e4066Sahrens {
1567fa9e4066Sahrens 	return (spa->spa_ubsync.ub_txg);
1568fa9e4066Sahrens }
1569fa9e4066Sahrens 
1570fa9e4066Sahrens uint64_t
1571fa9e4066Sahrens spa_first_txg(spa_t *spa)
1572fa9e4066Sahrens {
1573fa9e4066Sahrens 	return (spa->spa_first_txg);
1574fa9e4066Sahrens }
1575fa9e4066Sahrens 
1576b24ab676SJeff Bonwick uint64_t
1577b24ab676SJeff Bonwick spa_syncing_txg(spa_t *spa)
1578b24ab676SJeff Bonwick {
1579b24ab676SJeff Bonwick 	return (spa->spa_syncing_txg);
1580b24ab676SJeff Bonwick }
1581b24ab676SJeff Bonwick 
158288b7b0f2SMatthew Ahrens pool_state_t
1583fa9e4066Sahrens spa_state(spa_t *spa)
1584fa9e4066Sahrens {
1585fa9e4066Sahrens 	return (spa->spa_state);
1586fa9e4066Sahrens }
1587fa9e4066Sahrens 
1588b16da2e2SGeorge Wilson spa_load_state_t
1589b16da2e2SGeorge Wilson spa_load_state(spa_t *spa)
1590b16da2e2SGeorge Wilson {
1591b16da2e2SGeorge Wilson 	return (spa->spa_load_state);
1592b16da2e2SGeorge Wilson }
1593b16da2e2SGeorge Wilson 
1594fa9e4066Sahrens uint64_t
1595fa9e4066Sahrens spa_freeze_txg(spa_t *spa)
1596fa9e4066Sahrens {
1597fa9e4066Sahrens 	return (spa->spa_freeze_txg);
1598fa9e4066Sahrens }
1599fa9e4066Sahrens 
1600fa9e4066Sahrens /* ARGSUSED */
1601fa9e4066Sahrens uint64_t
1602fa9e4066Sahrens spa_get_asize(spa_t *spa, uint64_t lsize)
1603fa9e4066Sahrens {
160469962b56SMatthew Ahrens 	return (lsize * spa_asize_inflation);
160544cd46caSbillm }
160644cd46caSbillm 
16077d46dc6cSMatthew Ahrens /*
16087d46dc6cSMatthew Ahrens  * Return the amount of slop space in bytes.  It is 1/32 of the pool (3.2%),
16097d46dc6cSMatthew Ahrens  * or at least 32MB.
16107d46dc6cSMatthew Ahrens  *
16117d46dc6cSMatthew Ahrens  * See the comment above spa_slop_shift for details.
16127d46dc6cSMatthew Ahrens  */
16137d46dc6cSMatthew Ahrens uint64_t
16147d46dc6cSMatthew Ahrens spa_get_slop_space(spa_t *spa) {
16157d46dc6cSMatthew Ahrens 	uint64_t space = spa_get_dspace(spa);
16167d46dc6cSMatthew Ahrens 	return (MAX(space >> spa_slop_shift, SPA_MINDEVSIZE >> 1));
16177d46dc6cSMatthew Ahrens }
16187d46dc6cSMatthew Ahrens 
1619485bbbf5SGeorge Wilson uint64_t
1620485bbbf5SGeorge Wilson spa_get_dspace(spa_t *spa)
1621485bbbf5SGeorge Wilson {
1622485bbbf5SGeorge Wilson 	return (spa->spa_dspace);
1623485bbbf5SGeorge Wilson }
1624485bbbf5SGeorge Wilson 
1625485bbbf5SGeorge Wilson void
1626485bbbf5SGeorge Wilson spa_update_dspace(spa_t *spa)
1627485bbbf5SGeorge Wilson {
1628485bbbf5SGeorge Wilson 	spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1629485bbbf5SGeorge Wilson 	    ddt_get_dedup_dspace(spa);
1630485bbbf5SGeorge Wilson }
1631485bbbf5SGeorge Wilson 
16320a4e9518Sgw /*
16330a4e9518Sgw  * Return the failure mode that has been set to this pool. The default
16340a4e9518Sgw  * behavior will be to block all I/Os when a complete failure occurs.
16350a4e9518Sgw  */
16360a4e9518Sgw uint8_t
16370a4e9518Sgw spa_get_failmode(spa_t *spa)
16380a4e9518Sgw {
16390a4e9518Sgw 	return (spa->spa_failmode);
16400a4e9518Sgw }
16410a4e9518Sgw 
1642e14bb325SJeff Bonwick boolean_t
1643e14bb325SJeff Bonwick spa_suspended(spa_t *spa)
1644e14bb325SJeff Bonwick {
1645e14bb325SJeff Bonwick 	return (spa->spa_suspended);
1646e14bb325SJeff Bonwick }
1647e14bb325SJeff Bonwick 
164844cd46caSbillm uint64_t
164944cd46caSbillm spa_version(spa_t *spa)
165044cd46caSbillm {
165144cd46caSbillm 	return (spa->spa_ubsync.ub_version);
165244cd46caSbillm }
165344cd46caSbillm 
1654b24ab676SJeff Bonwick boolean_t
1655b24ab676SJeff Bonwick spa_deflate(spa_t *spa)
1656b24ab676SJeff Bonwick {
1657b24ab676SJeff Bonwick 	return (spa->spa_deflate);
1658b24ab676SJeff Bonwick }
1659b24ab676SJeff Bonwick 
1660b24ab676SJeff Bonwick metaslab_class_t *
1661b24ab676SJeff Bonwick spa_normal_class(spa_t *spa)
1662b24ab676SJeff Bonwick {
1663b24ab676SJeff Bonwick 	return (spa->spa_normal_class);
1664b24ab676SJeff Bonwick }
1665b24ab676SJeff Bonwick 
1666b24ab676SJeff Bonwick metaslab_class_t *
1667b24ab676SJeff Bonwick spa_log_class(spa_t *spa)
1668b24ab676SJeff Bonwick {
1669b24ab676SJeff Bonwick 	return (spa->spa_log_class);
1670b24ab676SJeff Bonwick }
1671b24ab676SJeff Bonwick 
167244cd46caSbillm int
167344cd46caSbillm spa_max_replication(spa_t *spa)
167444cd46caSbillm {
167544cd46caSbillm 	/*
1676e7437265Sahrens 	 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
167744cd46caSbillm 	 * handle BPs with more than one DVA allocated.  Set our max
167844cd46caSbillm 	 * replication level accordingly.
1679fa9e4066Sahrens 	 */
1680e7437265Sahrens 	if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
168144cd46caSbillm 		return (1);
168244cd46caSbillm 	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1683fa9e4066Sahrens }
1684fa9e4066Sahrens 
16853f9d6ad7SLin Ling int
16863f9d6ad7SLin Ling spa_prev_software_version(spa_t *spa)
16873f9d6ad7SLin Ling {
16883f9d6ad7SLin Ling 	return (spa->spa_prev_software_version);
16893f9d6ad7SLin Ling }
16903f9d6ad7SLin Ling 
1691283b8460SGeorge.Wilson uint64_t
1692283b8460SGeorge.Wilson spa_deadman_synctime(spa_t *spa)
1693283b8460SGeorge.Wilson {
1694283b8460SGeorge.Wilson 	return (spa->spa_deadman_synctime);
1695283b8460SGeorge.Wilson }
1696283b8460SGeorge.Wilson 
169799653d4eSeschrock uint64_t
1698b24ab676SJeff Bonwick dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
169999653d4eSeschrock {
1700b24ab676SJeff Bonwick 	uint64_t asize = DVA_GET_ASIZE(dva);
1701b24ab676SJeff Bonwick 	uint64_t dsize = asize;
170299653d4eSeschrock 
1703b24ab676SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
170499653d4eSeschrock 
1705b24ab676SJeff Bonwick 	if (asize != 0 && spa->spa_deflate) {
1706b24ab676SJeff Bonwick 		vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
1707b24ab676SJeff Bonwick 		dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
170899653d4eSeschrock 	}
1709b24ab676SJeff Bonwick 
1710b24ab676SJeff Bonwick 	return (dsize);
1711b24ab676SJeff Bonwick }
1712b24ab676SJeff Bonwick 
1713b24ab676SJeff Bonwick uint64_t
1714b24ab676SJeff Bonwick bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
1715b24ab676SJeff Bonwick {
1716b24ab676SJeff Bonwick 	uint64_t dsize = 0;
1717b24ab676SJeff Bonwick 
17185d7b4d43SMatthew Ahrens 	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
1719b24ab676SJeff Bonwick 		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1720b24ab676SJeff Bonwick 
1721b24ab676SJeff Bonwick 	return (dsize);
1722b24ab676SJeff Bonwick }
1723b24ab676SJeff Bonwick 
1724b24ab676SJeff Bonwick uint64_t
1725b24ab676SJeff Bonwick bp_get_dsize(spa_t *spa, const blkptr_t *bp)
1726b24ab676SJeff Bonwick {
1727b24ab676SJeff Bonwick 	uint64_t dsize = 0;
1728b24ab676SJeff Bonwick 
1729b24ab676SJeff Bonwick 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1730b24ab676SJeff Bonwick 
17315d7b4d43SMatthew Ahrens 	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
1732b24ab676SJeff Bonwick 		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1733b24ab676SJeff Bonwick 
1734e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_VDEV, FTAG);
1735b24ab676SJeff Bonwick 
1736b24ab676SJeff Bonwick 	return (dsize);
173799653d4eSeschrock }
173899653d4eSeschrock 
1739fa9e4066Sahrens /*
1740fa9e4066Sahrens  * ==========================================================================
1741fa9e4066Sahrens  * Initialization and Termination
1742fa9e4066Sahrens  * ==========================================================================
1743fa9e4066Sahrens  */
1744fa9e4066Sahrens 
1745fa9e4066Sahrens static int
1746fa9e4066Sahrens spa_name_compare(const void *a1, const void *a2)
1747fa9e4066Sahrens {
1748fa9e4066Sahrens 	const spa_t *s1 = a1;
1749fa9e4066Sahrens 	const spa_t *s2 = a2;
1750fa9e4066Sahrens 	int s;
1751fa9e4066Sahrens 
1752fa9e4066Sahrens 	s = strcmp(s1->spa_name, s2->spa_name);
1753fa9e4066Sahrens 	if (s > 0)
1754fa9e4066Sahrens 		return (1);
1755fa9e4066Sahrens 	if (s < 0)
1756fa9e4066Sahrens 		return (-1);
1757fa9e4066Sahrens 	return (0);
1758fa9e4066Sahrens }
1759fa9e4066Sahrens 
17600373e76bSbonwick int
17610373e76bSbonwick spa_busy(void)
17620373e76bSbonwick {
17630373e76bSbonwick 	return (spa_active_count);
17640373e76bSbonwick }
17650373e76bSbonwick 
1766e7cbe64fSgw void
1767e7cbe64fSgw spa_boot_init()
1768e7cbe64fSgw {
1769e7cbe64fSgw 	spa_config_load();
1770e7cbe64fSgw }
1771e7cbe64fSgw 
1772fa9e4066Sahrens void
1773fa9e4066Sahrens spa_init(int mode)
1774fa9e4066Sahrens {
1775fa9e4066Sahrens 	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1776c25056deSgw 	mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1777fa94a07fSbrendan 	mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1778fa9e4066Sahrens 	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1779fa9e4066Sahrens 
1780fa9e4066Sahrens 	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1781fa9e4066Sahrens 	    offsetof(spa_t, spa_avl));
1782fa9e4066Sahrens 
1783fa94a07fSbrendan 	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1784fa94a07fSbrendan 	    offsetof(spa_aux_t, aux_avl));
1785fa94a07fSbrendan 
1786fa94a07fSbrendan 	avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1787fa94a07fSbrendan 	    offsetof(spa_aux_t, aux_avl));
178899653d4eSeschrock 
17898ad4d6ddSJeff Bonwick 	spa_mode_global = mode;
1790fa9e4066Sahrens 
1791283b8460SGeorge.Wilson #ifdef _KERNEL
1792283b8460SGeorge.Wilson 	spa_arch_init();
1793283b8460SGeorge.Wilson #else
1794cd1c8b85SMatthew Ahrens 	if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
1795cd1c8b85SMatthew Ahrens 		arc_procfd = open("/proc/self/ctl", O_WRONLY);
1796cd1c8b85SMatthew Ahrens 		if (arc_procfd == -1) {
1797cd1c8b85SMatthew Ahrens 			perror("could not enable watchpoints: "
1798cd1c8b85SMatthew Ahrens 			    "opening /proc/self/ctl failed: ");
1799cd1c8b85SMatthew Ahrens 		} else {
1800cd1c8b85SMatthew Ahrens 			arc_watch = B_TRUE;
1801cd1c8b85SMatthew Ahrens 		}
1802cd1c8b85SMatthew Ahrens 	}
1803cd1c8b85SMatthew Ahrens #endif
1804cd1c8b85SMatthew Ahrens 
1805fa9e4066Sahrens 	refcount_init();
1806fa9e4066Sahrens 	unique_init();
18070713e232SGeorge Wilson 	range_tree_init();
1808fa9e4066Sahrens 	zio_init();
1809fa9e4066Sahrens 	dmu_init();
1810fa9e4066Sahrens 	zil_init();
181187db74c1Sek 	vdev_cache_stat_init();
181291ebeef5Sahrens 	zfs_prop_init();
1813990b4856Slling 	zpool_prop_init();
1814ad135b5dSChristopher Siden 	zpool_feature_init();
1815fa9e4066Sahrens 	spa_config_load();
1816e14bb325SJeff Bonwick 	l2arc_start();
1817fa9e4066Sahrens }
1818fa9e4066Sahrens 
1819fa9e4066Sahrens void
1820fa9e4066Sahrens spa_fini(void)
1821fa9e4066Sahrens {
1822e14bb325SJeff Bonwick 	l2arc_stop();
1823e14bb325SJeff Bonwick 
1824fa9e4066Sahrens 	spa_evict_all();
1825fa9e4066Sahrens 
182687db74c1Sek 	vdev_cache_stat_fini();
1827fa9e4066Sahrens 	zil_fini();
1828fa9e4066Sahrens 	dmu_fini();
1829fa9e4066Sahrens 	zio_fini();
18300713e232SGeorge Wilson 	range_tree_fini();
183191ebeef5Sahrens 	unique_fini();
1832fa9e4066Sahrens 	refcount_fini();
1833fa9e4066Sahrens 
1834fa9e4066Sahrens 	avl_destroy(&spa_namespace_avl);
183599653d4eSeschrock 	avl_destroy(&spa_spare_avl);
1836fa94a07fSbrendan 	avl_destroy(&spa_l2cache_avl);
1837fa9e4066Sahrens 
1838fa9e4066Sahrens 	cv_destroy(&spa_namespace_cv);
1839fa9e4066Sahrens 	mutex_destroy(&spa_namespace_lock);
1840c25056deSgw 	mutex_destroy(&spa_spare_lock);
1841fa94a07fSbrendan 	mutex_destroy(&spa_l2cache_lock);
1842fa9e4066Sahrens }
18436ce0521aSperrin 
18446ce0521aSperrin /*
18456ce0521aSperrin  * Return whether this pool has slogs. No locking needed.
18466ce0521aSperrin  * It's not a problem if the wrong answer is returned as it's only for
18476ce0521aSperrin  * performance and not correctness
18486ce0521aSperrin  */
18496ce0521aSperrin boolean_t
18506ce0521aSperrin spa_has_slogs(spa_t *spa)
18516ce0521aSperrin {
18526ce0521aSperrin 	return (spa->spa_log_class->mc_rotor != NULL);
18536ce0521aSperrin }
1854bf82a41bSeschrock 
1855b24ab676SJeff Bonwick spa_log_state_t
1856b24ab676SJeff Bonwick spa_get_log_state(spa_t *spa)
1857b24ab676SJeff Bonwick {
1858b24ab676SJeff Bonwick 	return (spa->spa_log_state);
1859b24ab676SJeff Bonwick }
1860b24ab676SJeff Bonwick 
1861b24ab676SJeff Bonwick void
1862b24ab676SJeff Bonwick spa_set_log_state(spa_t *spa, spa_log_state_t state)
1863b24ab676SJeff Bonwick {
1864b24ab676SJeff Bonwick 	spa->spa_log_state = state;
1865b24ab676SJeff Bonwick }
1866b24ab676SJeff Bonwick 
1867bf82a41bSeschrock boolean_t
1868bf82a41bSeschrock spa_is_root(spa_t *spa)
1869bf82a41bSeschrock {
1870bf82a41bSeschrock 	return (spa->spa_is_root);
1871bf82a41bSeschrock }
18728ad4d6ddSJeff Bonwick 
18738ad4d6ddSJeff Bonwick boolean_t
18748ad4d6ddSJeff Bonwick spa_writeable(spa_t *spa)
18758ad4d6ddSJeff Bonwick {
18768ad4d6ddSJeff Bonwick 	return (!!(spa->spa_mode & FWRITE));
18778ad4d6ddSJeff Bonwick }
18788ad4d6ddSJeff Bonwick 
1879*73527f44SAlex Reece /*
1880*73527f44SAlex Reece  * Returns true if there is a pending sync task in any of the current
1881*73527f44SAlex Reece  * syncing txg, the current quiescing txg, or the current open txg.
1882*73527f44SAlex Reece  */
1883*73527f44SAlex Reece boolean_t
1884*73527f44SAlex Reece spa_has_pending_synctask(spa_t *spa)
1885*73527f44SAlex Reece {
1886*73527f44SAlex Reece 	return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks));
1887*73527f44SAlex Reece }
1888*73527f44SAlex Reece 
18898ad4d6ddSJeff Bonwick int
18908ad4d6ddSJeff Bonwick spa_mode(spa_t *spa)
18918ad4d6ddSJeff Bonwick {
18928ad4d6ddSJeff Bonwick 	return (spa->spa_mode);
18938ad4d6ddSJeff Bonwick }
1894b24ab676SJeff Bonwick 
1895b24ab676SJeff Bonwick uint64_t
1896b24ab676SJeff Bonwick spa_bootfs(spa_t *spa)
1897b24ab676SJeff Bonwick {
1898b24ab676SJeff Bonwick 	return (spa->spa_bootfs);
1899b24ab676SJeff Bonwick }
1900b24ab676SJeff Bonwick 
1901b24ab676SJeff Bonwick uint64_t
1902b24ab676SJeff Bonwick spa_delegation(spa_t *spa)
1903b24ab676SJeff Bonwick {
1904b24ab676SJeff Bonwick 	return (spa->spa_delegation);
1905b24ab676SJeff Bonwick }
1906b24ab676SJeff Bonwick 
1907b24ab676SJeff Bonwick objset_t *
1908b24ab676SJeff Bonwick spa_meta_objset(spa_t *spa)
1909b24ab676SJeff Bonwick {
1910b24ab676SJeff Bonwick 	return (spa->spa_meta_objset);
1911b24ab676SJeff Bonwick }
1912b24ab676SJeff Bonwick 
1913b24ab676SJeff Bonwick enum zio_checksum
1914b24ab676SJeff Bonwick spa_dedup_checksum(spa_t *spa)
1915b24ab676SJeff Bonwick {
1916b24ab676SJeff Bonwick 	return (spa->spa_dedup_checksum);
1917b24ab676SJeff Bonwick }
19183f9d6ad7SLin Ling 
19193f9d6ad7SLin Ling /*
19203f9d6ad7SLin Ling  * Reset pool scan stat per scan pass (or reboot).
19213f9d6ad7SLin Ling  */
19223f9d6ad7SLin Ling void
19233f9d6ad7SLin Ling spa_scan_stat_init(spa_t *spa)
19243f9d6ad7SLin Ling {
19253f9d6ad7SLin Ling 	/* data not stored on disk */
19263f9d6ad7SLin Ling 	spa->spa_scan_pass_start = gethrestime_sec();
19273f9d6ad7SLin Ling 	spa->spa_scan_pass_exam = 0;
19283f9d6ad7SLin Ling 	vdev_scan_stat_init(spa->spa_root_vdev);
19293f9d6ad7SLin Ling }
19303f9d6ad7SLin Ling 
19313f9d6ad7SLin Ling /*
19323f9d6ad7SLin Ling  * Get scan stats for zpool status reports
19333f9d6ad7SLin Ling  */
19343f9d6ad7SLin Ling int
19353f9d6ad7SLin Ling spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
19363f9d6ad7SLin Ling {
19373f9d6ad7SLin Ling 	dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
19383f9d6ad7SLin Ling 
19393f9d6ad7SLin Ling 	if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
1940be6fd75aSMatthew Ahrens 		return (SET_ERROR(ENOENT));
19413f9d6ad7SLin Ling 	bzero(ps, sizeof (pool_scan_stat_t));
19423f9d6ad7SLin Ling 
19433f9d6ad7SLin Ling 	/* data stored on disk */
19443f9d6ad7SLin Ling 	ps->pss_func = scn->scn_phys.scn_func;
19453f9d6ad7SLin Ling 	ps->pss_start_time = scn->scn_phys.scn_start_time;
19463f9d6ad7SLin Ling 	ps->pss_end_time = scn->scn_phys.scn_end_time;
19473f9d6ad7SLin Ling 	ps->pss_to_examine = scn->scn_phys.scn_to_examine;
19483f9d6ad7SLin Ling 	ps->pss_examined = scn->scn_phys.scn_examined;
19493f9d6ad7SLin Ling 	ps->pss_to_process = scn->scn_phys.scn_to_process;
19503f9d6ad7SLin Ling 	ps->pss_processed = scn->scn_phys.scn_processed;
19513f9d6ad7SLin Ling 	ps->pss_errors = scn->scn_phys.scn_errors;
19523f9d6ad7SLin Ling 	ps->pss_state = scn->scn_phys.scn_state;
19533f9d6ad7SLin Ling 
19543f9d6ad7SLin Ling 	/* data not stored on disk */
19553f9d6ad7SLin Ling 	ps->pss_pass_start = spa->spa_scan_pass_start;
19563f9d6ad7SLin Ling 	ps->pss_pass_exam = spa->spa_scan_pass_exam;
19573f9d6ad7SLin Ling 
19583f9d6ad7SLin Ling 	return (0);
19593f9d6ad7SLin Ling }
196009c9d376SGeorge Wilson 
196109c9d376SGeorge Wilson boolean_t
196209c9d376SGeorge Wilson spa_debug_enabled(spa_t *spa)
196309c9d376SGeorge Wilson {
196409c9d376SGeorge Wilson 	return (spa->spa_debug);
196509c9d376SGeorge Wilson }
1966