spa_misc.c revision f78cdc34af236a6199dd9e21376f4a46348c0d56
1fa9e406ahrens/*
2fa9e406ahrens * CDDL HEADER START
3fa9e406ahrens *
4fa9e406ahrens * The contents of this file are subject to the terms of the
5ea8dc4beschrock * Common Development and Distribution License (the "License").
6ea8dc4beschrock * You may not use this file except in compliance with the License.
7fa9e406ahrens *
8fa9e406ahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e406ahrens * or http://www.opensolaris.org/os/licensing.
10fa9e406ahrens * See the License for the specific language governing permissions
11fa9e406ahrens * and limitations under the License.
12fa9e406ahrens *
13fa9e406ahrens * When distributing Covered Code, include this CDDL HEADER in each
14fa9e406ahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e406ahrens * If applicable, add the following below this CDDL HEADER, with the
16fa9e406ahrens * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e406ahrens * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e406ahrens *
19fa9e406ahrens * CDDL HEADER END
20fa9e406ahrens */
21fa9e406ahrens/*
229842588George Wilson * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23f78cdc3Paul Dagnelie * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
24e495b6eSaso Kiselkov * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
25bc9014eJustin Gibbs * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
2645818eeMatthew Ahrens * Copyright 2013 Saso Kiselkov. All rights reserved.
27c3d26abMatthew Ahrens * Copyright (c) 2014 Integros [integros.com]
281702cceAlek Pinchuk * Copyright (c) 2017 Datto Inc.
29fa9e406ahrens */
30fa9e406ahrens
31fa9e406ahrens#include <sys/zfs_context.h>
32fa9e406ahrens#include <sys/spa_impl.h>
33283b846George.Wilson#include <sys/spa_boot.h>
34fa9e406ahrens#include <sys/zio.h>
35fa9e406ahrens#include <sys/zio_checksum.h>
36fa9e406ahrens#include <sys/zio_compress.h>
37fa9e406ahrens#include <sys/dmu.h>
38fa9e406ahrens#include <sys/dmu_tx.h>
39fa9e406ahrens#include <sys/zap.h>
40fa9e406ahrens#include <sys/zil.h>
41fa9e406ahrens#include <sys/vdev_impl.h>
42fa9e406ahrens#include <sys/metaslab.h>
43fa9e406ahrens#include <sys/uberblock_impl.h>
44fa9e406ahrens#include <sys/txg.h>
45fa9e406ahrens#include <sys/avl.h>
46fa9e406ahrens#include <sys/unique.h>
47fa9e406ahrens#include <sys/dsl_pool.h>
48fa9e406ahrens#include <sys/dsl_dir.h>
49fa9e406ahrens#include <sys/dsl_prop.h>
503f9d6adLin Ling#include <sys/dsl_scan.h>
51fa9e406ahrens#include <sys/fs/zfs.h>
526ce0521perrin#include <sys/metaslab_impl.h>
53e14bb32Jeff Bonwick#include <sys/arc.h>
54485bbbfGeorge Wilson#include <sys/ddt.h>
5591ebeefahrens#include "zfs_prop.h"
5645818eeMatthew Ahrens#include <sys/zfeature.h>
57fa9e406ahrens
58fa9e406ahrens/*
59fa9e406ahrens * SPA locking
60fa9e406ahrens *
61fa9e406ahrens * There are four basic locks for managing spa_t structures:
62fa9e406ahrens *
63fa9e406ahrens * spa_namespace_lock (global mutex)
64fa9e406ahrens *
6544cd46cbillm *	This lock must be acquired to do any of the following:
66fa9e406ahrens *
6744cd46cbillm *		- Lookup a spa_t by name
6844cd46cbillm *		- Add or remove a spa_t from the namespace
6944cd46cbillm *		- Increase spa_refcount from non-zero
7044cd46cbillm *		- Check if spa_refcount is zero
7144cd46cbillm *		- Rename a spa_t
72ea8dc4beschrock *		- add/remove/attach/detach devices
7344cd46cbillm *		- Held for the duration of create/destroy/import/export
74fa9e406ahrens *
7544cd46cbillm *	It does not need to handle recursion.  A create or destroy may
7644cd46cbillm *	reference objects (files or zvols) in other pools, but by
7744cd46cbillm *	definition they must have an existing reference, and will never need
7844cd46cbillm *	to lookup a spa_t by name.
79fa9e406ahrens *
80fa9e406ahrens * spa_refcount (per-spa refcount_t protected by mutex)
81fa9e406ahrens *
8244cd46cbillm *	This reference count keep track of any active users of the spa_t.  The
8344cd46cbillm *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
8444cd46cbillm *	the refcount is never really 'zero' - opening a pool implicitly keeps
85088f389ahrens *	some references in the DMU.  Internally we check against spa_minref, but
8644cd46cbillm *	present the image of a zero/non-zero value to consumers.
87fa9e406ahrens *
88e14bb32Jeff Bonwick * spa_config_lock[] (per-spa array of rwlocks)
89fa9e406ahrens *
9091ebeefahrens *	This protects the spa_t from config changes, and must be held in
9191ebeefahrens *	the following circumstances:
92fa9e406ahrens *
9344cd46cbillm *		- RW_READER to perform I/O to the spa
9444cd46cbillm *		- RW_WRITER to change the vdev config
95fa9e406ahrens *
96fa9e406ahrens * The locking order is fairly straightforward:
97fa9e406ahrens *
9844cd46cbillm *		spa_namespace_lock	->	spa_refcount
99fa9e406ahrens *
10044cd46cbillm *	The namespace lock must be acquired to increase the refcount from 0
10144cd46cbillm *	or to check if it is zero.
102fa9e406ahrens *
103e14bb32Jeff Bonwick *		spa_refcount		->	spa_config_lock[]
104fa9e406ahrens *
10544cd46cbillm *	There must be at least one valid reference on the spa_t to acquire
10644cd46cbillm *	the config lock.
107fa9e406ahrens *
108e14bb32Jeff Bonwick *		spa_namespace_lock	->	spa_config_lock[]
109fa9e406ahrens *
11044cd46cbillm *	The namespace lock must always be taken before the config lock.
111fa9e406ahrens *
112fa9e406ahrens *
113e14bb32Jeff Bonwick * The spa_namespace_lock can be acquired directly and is globally visible.
114fa9e406ahrens *
115e14bb32Jeff Bonwick * The namespace is manipulated using the following functions, all of which
116e14bb32Jeff Bonwick * require the spa_namespace_lock to be held.
117fa9e406ahrens *
11844cd46cbillm *	spa_lookup()		Lookup a spa_t by name.
119fa9e406ahrens *
12044cd46cbillm *	spa_add()		Create a new spa_t in the namespace.
121fa9e406ahrens *
12244cd46cbillm *	spa_remove()		Remove a spa_t from the namespace.  This also
12344cd46cbillm *				frees up any memory associated with the spa_t.
124fa9e406ahrens *
12544cd46cbillm *	spa_next()		Returns the next spa_t in the system, or the
12644cd46cbillm *				first if NULL is passed.
127fa9e406ahrens *
12844cd46cbillm *	spa_evict_all()		Shutdown and remove all spa_t structures in
12944cd46cbillm *				the system.
130fa9e406ahrens *
131ea8dc4beschrock *	spa_guid_exists()	Determine whether a pool/device guid exists.
132fa9e406ahrens *
133fa9e406ahrens * The spa_refcount is manipulated using the following functions:
134fa9e406ahrens *
13544cd46cbillm *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
13644cd46cbillm *				called with spa_namespace_lock held if the
13744cd46cbillm *				refcount is currently zero.
138fa9e406ahrens *
13944cd46cbillm *	spa_close()		Remove a reference from the spa_t.  This will
14044cd46cbillm *				not free the spa_t or remove it from the
14144cd46cbillm *				namespace.  No locking is required.
142fa9e406ahrens *
14344cd46cbillm *	spa_refcount_zero()	Returns true if the refcount is currently
14444cd46cbillm *				zero.  Must be called with spa_namespace_lock
14544cd46cbillm *				held.
146fa9e406ahrens *
147e14bb32Jeff Bonwick * The spa_config_lock[] is an array of rwlocks, ordered as follows:
148e14bb32Jeff Bonwick * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
149e14bb32Jeff Bonwick * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
150e14bb32Jeff Bonwick *
151e14bb32Jeff Bonwick * To read the configuration, it suffices to hold one of these locks as reader.
152e14bb32Jeff Bonwick * To modify the configuration, you must hold all locks as writer.  To modify
153e14bb32Jeff Bonwick * vdev state without altering the vdev tree's topology (e.g. online/offline),
154e14bb32Jeff Bonwick * you must hold SCL_STATE and SCL_ZIO as writer.
155e14bb32Jeff Bonwick *
156e14bb32Jeff Bonwick * We use these distinct config locks to avoid recursive lock entry.
157e14bb32Jeff Bonwick * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
158e14bb32Jeff Bonwick * block allocations (SCL_ALLOC), which may require reading space maps
159e14bb32Jeff Bonwick * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
160e14bb32Jeff Bonwick *
161e14bb32Jeff Bonwick * The spa config locks cannot be normal rwlocks because we need the
162e14bb32Jeff Bonwick * ability to hand off ownership.  For example, SCL_ZIO is acquired
163e14bb32Jeff Bonwick * by the issuing thread and later released by an interrupt thread.
164e14bb32Jeff Bonwick * They do, however, obey the usual write-wanted semantics to prevent
165e14bb32Jeff Bonwick * writer (i.e. system administrator) starvation.
166e14bb32Jeff Bonwick *
167e14bb32Jeff Bonwick * The lock acquisition rules are as follows:
168e14bb32Jeff Bonwick *
169e14bb32Jeff Bonwick * SCL_CONFIG
170e14bb32Jeff Bonwick *	Protects changes to the vdev tree topology, such as vdev
171e14bb32Jeff Bonwick *	add/remove/attach/detach.  Protects the dirty config list
172e14bb32Jeff Bonwick *	(spa_config_dirty_list) and the set of spares and l2arc devices.
173e14bb32Jeff Bonwick *
174e14bb32Jeff Bonwick * SCL_STATE
175e14bb32Jeff Bonwick *	Protects changes to pool state and vdev state, such as vdev
176e14bb32Jeff Bonwick *	online/offline/fault/degrade/clear.  Protects the dirty state list
177e14bb32Jeff Bonwick *	(spa_state_dirty_list) and global pool state (spa_state).
178e14bb32Jeff Bonwick *
179e14bb32Jeff Bonwick * SCL_ALLOC
180e14bb32Jeff Bonwick *	Protects changes to metaslab groups and classes.
181e14bb32Jeff Bonwick *	Held as reader by metaslab_alloc() and metaslab_claim().
182e14bb32Jeff Bonwick *
183e14bb32Jeff Bonwick * SCL_ZIO
184e14bb32Jeff Bonwick *	Held by bp-level zios (those which have no io_vd upon entry)
185e14bb32Jeff Bonwick *	to prevent changes to the vdev tree.  The bp-level zio implicitly
186e14bb32Jeff Bonwick *	protects all of its vdev child zios, which do not hold SCL_ZIO.
187e14bb32Jeff Bonwick *
188e14bb32Jeff Bonwick * SCL_FREE
189e14bb32Jeff Bonwick *	Protects changes to metaslab groups and classes.
190e14bb32Jeff Bonwick *	Held as reader by metaslab_free().  SCL_FREE is distinct from
191e14bb32Jeff Bonwick *	SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
192e14bb32Jeff Bonwick *	blocks in zio_done() while another i/o that holds either
193e14bb32Jeff Bonwick *	SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
194e14bb32Jeff Bonwick *
195e14bb32Jeff Bonwick * SCL_VDEV
196e14bb32Jeff Bonwick *	Held as reader to prevent changes to the vdev tree during trivial
197b24ab67Jeff Bonwick *	inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
198e14bb32Jeff Bonwick *	other locks, and lower than all of them, to ensure that it's safe
199e14bb32Jeff Bonwick *	to acquire regardless of caller context.
200e14bb32Jeff Bonwick *
201e14bb32Jeff Bonwick * In addition, the following rules apply:
202e14bb32Jeff Bonwick *
203e14bb32Jeff Bonwick * (a)	spa_props_lock protects pool properties, spa_config and spa_config_list.
204e14bb32Jeff Bonwick *	The lock ordering is SCL_CONFIG > spa_props_lock.
205e14bb32Jeff Bonwick *
206e14bb32Jeff Bonwick * (b)	I/O operations on leaf vdevs.  For any zio operation that takes
207e14bb32Jeff Bonwick *	an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
208e14bb32Jeff Bonwick *	or zio_write_phys() -- the caller must ensure that the config cannot
209e14bb32Jeff Bonwick *	cannot change in the interim, and that the vdev cannot be reopened.
210e14bb32Jeff Bonwick *	SCL_STATE as reader suffices for both.
211fa9e406ahrens *
212ea8dc4beschrock * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
213fa9e406ahrens *
21444cd46cbillm *	spa_vdev_enter()	Acquire the namespace lock and the config lock
215ea8dc4beschrock *				for writing.
216fa9e406ahrens *
21744cd46cbillm *	spa_vdev_exit()		Release the config lock, wait for all I/O
21844cd46cbillm *				to complete, sync the updated configs to the
219ea8dc4beschrock *				cache, and release the namespace lock.
220fa9e406ahrens *
221e14bb32Jeff Bonwick * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
222e14bb32Jeff Bonwick * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
223e14bb32Jeff Bonwick * locking is, always, based on spa_namespace_lock and spa_config_lock[].
224e14bb32Jeff Bonwick *
225ad135b5Christopher Siden * spa_rename() is also implemented within this file since it requires
226e14bb32Jeff Bonwick * manipulation of the namespace.
227fa9e406ahrens */
228fa9e406ahrens
229fa9e406ahrensstatic avl_tree_t spa_namespace_avl;
230fa9e406ahrenskmutex_t spa_namespace_lock;
231fa9e406ahrensstatic kcondvar_t spa_namespace_cv;
2320373e76bonwickstatic int spa_active_count;
233416e0cdekint spa_max_replication_override = SPA_DVAS_PER_BP;
234fa9e406ahrens
23599653d4eschrockstatic kmutex_t spa_spare_lock;
23639c2341eschrockstatic avl_tree_t spa_spare_avl;
237fa94a07brendanstatic kmutex_t spa_l2cache_lock;
238fa94a07brendanstatic avl_tree_t spa_l2cache_avl;
23999653d4eschrock
240fa9e406ahrenskmem_cache_t *spa_buffer_pool;
2418ad4d6dJeff Bonwickint spa_mode_global;
242fa9e406ahrens
243fa9e406ahrens#ifdef ZFS_DEBUG
2445cabbc6Prashanth Sreenivasa/*
2455cabbc6Prashanth Sreenivasa * Everything except dprintf, spa, and indirect_remap is on by default
2465cabbc6Prashanth Sreenivasa * in debug builds.
2475cabbc6Prashanth Sreenivasa */
2485cabbc6Prashanth Sreenivasaint zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA | ZFS_DEBUG_INDIRECT_REMAP);
249fa9e406ahrens#else
250fa9e406ahrensint zfs_flags = 0;
251fa9e406ahrens#endif
252fa9e406ahrens
2530125049ahrens/*
2540125049ahrens * zfs_recover can be set to nonzero to attempt to recover from
2550125049ahrens * otherwise-fatal errors, typically caused by on-disk corruption.  When
2560125049ahrens * set, calls to zfs_panic_recover() will turn into warning messages.
2578b36997Matthew Ahrens * This should only be used as a last resort, as it typically results
2588b36997Matthew Ahrens * in leaked space, or worse.
2590125049ahrens */
2607fd05acMatthew Ahrensboolean_t zfs_recover = B_FALSE;
2617fd05acMatthew Ahrens
2627fd05acMatthew Ahrens/*
2637fd05acMatthew Ahrens * If destroy encounters an EIO while reading metadata (e.g. indirect
2647fd05acMatthew Ahrens * blocks), space referenced by the missing metadata can not be freed.
2657fd05acMatthew Ahrens * Normally this causes the background destroy to become "stalled", as
2667fd05acMatthew Ahrens * it is unable to make forward progress.  While in this stalled state,
2677fd05acMatthew Ahrens * all remaining space to free from the error-encountering filesystem is
2687fd05acMatthew Ahrens * "temporarily leaked".  Set this flag to cause it to ignore the EIO,
2697fd05acMatthew Ahrens * permanently leak the space from indirect blocks that can not be read,
2707fd05acMatthew Ahrens * and continue to free everything else that it can.
2717fd05acMatthew Ahrens *
2727fd05acMatthew Ahrens * The default, "stalling" behavior is useful if the storage partially
2737fd05acMatthew Ahrens * fails (i.e. some but not all i/os fail), and then later recovers.  In
2747fd05acMatthew Ahrens * this case, we will be able to continue pool operations while it is
2757fd05acMatthew Ahrens * partially failed, and when it recovers, we can continue to free the
2767fd05acMatthew Ahrens * space, with no leaks.  However, note that this case is actually
2777fd05acMatthew Ahrens * fairly rare.
2787fd05acMatthew Ahrens *
2797fd05acMatthew Ahrens * Typically pools either (a) fail completely (but perhaps temporarily,
2807fd05acMatthew Ahrens * e.g. a top-level vdev going offline), or (b) have localized,
2817fd05acMatthew Ahrens * permanent errors (e.g. disk returns the wrong data due to bit flip or
2827fd05acMatthew Ahrens * firmware bug).  In case (a), this setting does not matter because the
2837fd05acMatthew Ahrens * pool will be suspended and the sync thread will not be able to make
2847fd05acMatthew Ahrens * forward progress regardless.  In case (b), because the error is
2857fd05acMatthew Ahrens * permanent, the best we can do is leak the minimum amount of space,
2867fd05acMatthew Ahrens * which is what setting this flag will do.  Therefore, it is reasonable
2877fd05acMatthew Ahrens * for this flag to normally be set, but we chose the more conservative
2887fd05acMatthew Ahrens * approach of not setting it, so that there is no possibility of
2897fd05acMatthew Ahrens * leaking space in the "partial temporary" failure case.
2907fd05acMatthew Ahrens */
2917fd05acMatthew Ahrensboolean_t zfs_free_leak_on_eio = B_FALSE;
2920125049ahrens
29369962b5Matthew Ahrens/*
29469962b5Matthew Ahrens * Expiration time in milliseconds. This value has two meanings. First it is
29569962b5Matthew Ahrens * used to determine when the spa_deadman() logic should fire. By default the
29669962b5Matthew Ahrens * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
29769962b5Matthew Ahrens * Secondly, the value determines if an I/O is considered "hung". Any I/O that
29869962b5Matthew Ahrens * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
29969962b5Matthew Ahrens * in a system panic.
30069962b5Matthew Ahrens */
30169962b5Matthew Ahrensuint64_t zfs_deadman_synctime_ms = 1000000ULL;
302283b846George.Wilson
303283b846George.Wilson/*
30469962b5Matthew Ahrens * Check time in milliseconds. This defines the frequency at which we check
30569962b5Matthew Ahrens * for hung I/O.
306283b846George.Wilson */
30769962b5Matthew Ahrensuint64_t zfs_deadman_checktime_ms = 5000ULL;
308283b846George.Wilson
309283b846George.Wilson/*
310283b846George.Wilson * Override the zfs deadman behavior via /etc/system. By default the
311283b846George.Wilson * deadman is enabled except on VMware and sparc deployments.
312283b846George.Wilson */
313283b846George.Wilsonint zfs_deadman_enabled = -1;
314283b846George.Wilson
31569962b5Matthew Ahrens/*
31669962b5Matthew Ahrens * The worst case is single-sector max-parity RAID-Z blocks, in which
31769962b5Matthew Ahrens * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
31869962b5Matthew Ahrens * times the size; so just assume that.  Add to this the fact that
31969962b5Matthew Ahrens * we can have up to 3 DVAs per bp, and one more factor of 2 because
32069962b5Matthew Ahrens * the block may be dittoed with up to 3 DVAs by ddt_sync().  All together,
32169962b5Matthew Ahrens * the worst case is:
32269962b5Matthew Ahrens *     (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
32369962b5Matthew Ahrens */
32469962b5Matthew Ahrensint spa_asize_inflation = 24;
325fa9e406ahrens
326fa9e406ahrens/*
3277d46dc6Matthew Ahrens * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
3287d46dc6Matthew Ahrens * the pool to be consumed.  This ensures that we don't run the pool
3297d46dc6Matthew Ahrens * completely out of space, due to unaccounted changes (e.g. to the MOS).
3307d46dc6Matthew Ahrens * It also limits the worst-case time to allocate space.  If we have
3317d46dc6Matthew Ahrens * less than this amount of free space, most ZPL operations (e.g. write,
3327d46dc6Matthew Ahrens * create) will return ENOSPC.
3337d46dc6Matthew Ahrens *
3347d46dc6Matthew Ahrens * Certain operations (e.g. file removal, most administrative actions) can
3357d46dc6Matthew Ahrens * use half the slop space.  They will only return ENOSPC if less than half
3367d46dc6Matthew Ahrens * the slop space is free.  Typically, once the pool has less than the slop
3377d46dc6Matthew Ahrens * space free, the user will use these operations to free up space in the pool.
3387d46dc6Matthew Ahrens * These are the operations that call dsl_pool_adjustedsize() with the netfree
3397d46dc6Matthew Ahrens * argument set to TRUE.
3407d46dc6Matthew Ahrens *
3418671400Serapheim Dimitropoulos * Operations that are almost guaranteed to free up space in the absence of
3428671400Serapheim Dimitropoulos * a pool checkpoint can use up to three quarters of the slop space
3438671400Serapheim Dimitropoulos * (e.g zfs destroy).
3448671400Serapheim Dimitropoulos *
3457d46dc6Matthew Ahrens * A very restricted set of operations are always permitted, regardless of
3467d46dc6Matthew Ahrens * the amount of free space.  These are the operations that call
3478671400Serapheim Dimitropoulos * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
3488671400Serapheim Dimitropoulos * increase in the amount of space used, it is possible to run the pool
3498671400Serapheim Dimitropoulos * completely out of space, causing it to be permanently read-only.
3507d46dc6Matthew Ahrens *
3514b5c8e9Matthew Ahrens * Note that on very small pools, the slop space will be larger than
3524b5c8e9Matthew Ahrens * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
3534b5c8e9Matthew Ahrens * but we never allow it to be more than half the pool size.
3544b5c8e9Matthew Ahrens *
3557d46dc6Matthew Ahrens * See also the comments in zfs_space_check_t.
3567d46dc6Matthew Ahrens */
3577d46dc6Matthew Ahrensint spa_slop_shift = 5;
3584b5c8e9Matthew Ahrensuint64_t spa_min_slop = 128 * 1024 * 1024;
3597d46dc6Matthew Ahrens
360f78cdc3Paul Dagnelieint spa_allocators = 4;
361f78cdc3Paul Dagnelie
3623ee8c80Pavel Zakharov/*PRINTFLIKE2*/
3633ee8c80Pavel Zakharovvoid
3643ee8c80Pavel Zakharovspa_load_failed(spa_t *spa, const char *fmt, ...)
3653ee8c80Pavel Zakharov{
3663ee8c80Pavel Zakharov	va_list adx;
3673ee8c80Pavel Zakharov	char buf[256];
3683ee8c80Pavel Zakharov
3693ee8c80Pavel Zakharov	va_start(adx, fmt);
3703ee8c80Pavel Zakharov	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
3713ee8c80Pavel Zakharov	va_end(adx);
3723ee8c80Pavel Zakharov
3736f79381Pavel Zakharov	zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
3746f79381Pavel Zakharov	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
3753ee8c80Pavel Zakharov}
3763ee8c80Pavel Zakharov
3773ee8c80Pavel Zakharov/*PRINTFLIKE2*/
3783ee8c80Pavel Zakharovvoid
3793ee8c80Pavel Zakharovspa_load_note(spa_t *spa, const char *fmt, ...)
3803ee8c80Pavel Zakharov{
3813ee8c80Pavel Zakharov	va_list adx;
3823ee8c80Pavel Zakharov	char buf[256];
3833ee8c80Pavel Zakharov
3843ee8c80Pavel Zakharov	va_start(adx, fmt);
3853ee8c80Pavel Zakharov	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
3863ee8c80Pavel Zakharov	va_end(adx);
3873ee8c80Pavel Zakharov
3886f79381Pavel Zakharov	zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
3896f79381Pavel Zakharov	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
3903ee8c80Pavel Zakharov}
3913ee8c80Pavel Zakharov
3927d46dc6Matthew Ahrens/*
393fa9e406ahrens * ==========================================================================
394e05725bbonwick * SPA config locking
395e05725bbonwick * ==========================================================================
396e05725bbonwick */
397e05725bbonwickstatic void
398e14bb32Jeff Bonwickspa_config_lock_init(spa_t *spa)
399e14bb32Jeff Bonwick{
400e14bb32Jeff Bonwick	for (int i = 0; i < SCL_LOCKS; i++) {
401e14bb32Jeff Bonwick		spa_config_lock_t *scl = &spa->spa_config_lock[i];
402e14bb32Jeff Bonwick		mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
403e14bb32Jeff Bonwick		cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
4043b2aab1Matthew Ahrens		refcount_create_untracked(&scl->scl_count);
405e14bb32Jeff Bonwick		scl->scl_writer = NULL;
406e14bb32Jeff Bonwick		scl->scl_write_wanted = 0;
407e14bb32Jeff Bonwick	}
408e05725bbonwick}
409e05725bbonwick
410e05725bbonwickstatic void
411e14bb32Jeff Bonwickspa_config_lock_destroy(spa_t *spa)
412e14bb32Jeff Bonwick{
413e14bb32Jeff Bonwick	for (int i = 0; i < SCL_LOCKS; i++) {
414e14bb32Jeff Bonwick		spa_config_lock_t *scl = &spa->spa_config_lock[i];
415e14bb32Jeff Bonwick		mutex_destroy(&scl->scl_lock);
416e14bb32Jeff Bonwick		cv_destroy(&scl->scl_cv);
417e14bb32Jeff Bonwick		refcount_destroy(&scl->scl_count);
418e14bb32Jeff Bonwick		ASSERT(scl->scl_writer == NULL);
419e14bb32Jeff Bonwick		ASSERT(scl->scl_write_wanted == 0);
420e14bb32Jeff Bonwick	}
421e14bb32Jeff Bonwick}
422e14bb32Jeff Bonwick
423e14bb32Jeff Bonwickint
424e14bb32Jeff Bonwickspa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
425e05725bbonwick{
426e14bb32Jeff Bonwick	for (int i = 0; i < SCL_LOCKS; i++) {
427e14bb32Jeff Bonwick		spa_config_lock_t *scl = &spa->spa_config_lock[i];
428e14bb32Jeff Bonwick		if (!(locks & (1 << i)))
429e14bb32Jeff Bonwick			continue;
430e14bb32Jeff Bonwick		mutex_enter(&scl->scl_lock);
431e14bb32Jeff Bonwick		if (rw == RW_READER) {
432e14bb32Jeff Bonwick			if (scl->scl_writer || scl->scl_write_wanted) {
433e14bb32Jeff Bonwick				mutex_exit(&scl->scl_lock);
434e495b6eSaso Kiselkov				spa_config_exit(spa, locks & ((1 << i) - 1),
435e495b6eSaso Kiselkov				    tag);
436e14bb32Jeff Bonwick				return (0);
437e14bb32Jeff Bonwick			}
438e14bb32Jeff Bonwick		} else {
439e14bb32Jeff Bonwick			ASSERT(scl->scl_writer != curthread);
440e14bb32Jeff Bonwick			if (!refcount_is_zero(&scl->scl_count)) {
441e14bb32Jeff Bonwick				mutex_exit(&scl->scl_lock);
442e495b6eSaso Kiselkov				spa_config_exit(spa, locks & ((1 << i) - 1),
443e495b6eSaso Kiselkov				    tag);
444e14bb32Jeff Bonwick				return (0);
445e14bb32Jeff Bonwick			}
446e14bb32Jeff Bonwick			scl->scl_writer = curthread;
447e14bb32Jeff Bonwick		}
448e14bb32Jeff Bonwick		(void) refcount_add(&scl->scl_count, tag);
449e14bb32Jeff Bonwick		mutex_exit(&scl->scl_lock);
450e14bb32Jeff Bonwick	}
451e14bb32Jeff Bonwick	return (1);
452e05725bbonwick}
453e05725bbonwick
454e05725bbonwickvoid
455e14bb32Jeff Bonwickspa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
456e05725bbonwick{
457f64c0e3Eric Taylor	int wlocks_held = 0;
458f64c0e3Eric Taylor
4593b2aab1Matthew Ahrens	ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
4603b2aab1Matthew Ahrens
461e14bb32Jeff Bonwick	for (int i = 0; i < SCL_LOCKS; i++) {
462e14bb32Jeff Bonwick		spa_config_lock_t *scl = &spa->spa_config_lock[i];
463f64c0e3Eric Taylor		if (scl->scl_writer == curthread)
464f64c0e3Eric Taylor			wlocks_held |= (1 << i);
465e14bb32Jeff Bonwick		if (!(locks & (1 << i)))
466e14bb32Jeff Bonwick			continue;
467e14bb32Jeff Bonwick		mutex_enter(&scl->scl_lock);
468e14bb32Jeff Bonwick		if (rw == RW_READER) {
469e14bb32Jeff Bonwick			while (scl->scl_writer || scl->scl_write_wanted) {
470e14bb32Jeff Bonwick				cv_wait(&scl->scl_cv, &scl->scl_lock);
471e14bb32Jeff Bonwick			}
472e14bb32Jeff Bonwick		} else {
473e14bb32Jeff Bonwick			ASSERT(scl->scl_writer != curthread);
474e14bb32Jeff Bonwick			while (!refcount_is_zero(&scl->scl_count)) {
475e14bb32Jeff Bonwick				scl->scl_write_wanted++;
476e14bb32Jeff Bonwick				cv_wait(&scl->scl_cv, &scl->scl_lock);
477e14bb32Jeff Bonwick				scl->scl_write_wanted--;
478e14bb32Jeff Bonwick			}
479e14bb32Jeff Bonwick			scl->scl_writer = curthread;
480e14bb32Jeff Bonwick		}
481e14bb32Jeff Bonwick		(void) refcount_add(&scl->scl_count, tag);
482e14bb32Jeff Bonwick		mutex_exit(&scl->scl_lock);
483e05725bbonwick	}
4845cabbc6Prashanth Sreenivasa	ASSERT3U(wlocks_held, <=, locks);
485e05725bbonwick}
486e05725bbonwick
487e05725bbonwickvoid
488e14bb32Jeff Bonwickspa_config_exit(spa_t *spa, int locks, void *tag)
489e05725bbonwick{
490e14bb32Jeff Bonwick	for (int i = SCL_LOCKS - 1; i >= 0; i--) {
491e14bb32Jeff Bonwick		spa_config_lock_t *scl = &spa->spa_config_lock[i];
492e14bb32Jeff Bonwick		if (!(locks & (1 << i)))
493e14bb32Jeff Bonwick			continue;
494e14bb32Jeff Bonwick		mutex_enter(&scl->scl_lock);
495e14bb32Jeff Bonwick		ASSERT(!refcount_is_zero(&scl->scl_count));
496e14bb32Jeff Bonwick		if (refcount_remove(&scl->scl_count, tag) == 0) {
497e14bb32Jeff Bonwick			ASSERT(scl->scl_writer == NULL ||
498e14bb32Jeff Bonwick			    scl->scl_writer == curthread);
499e14bb32Jeff Bonwick			scl->scl_writer = NULL;	/* OK in either case */
500e14bb32Jeff Bonwick			cv_broadcast(&scl->scl_cv);
501e14bb32Jeff Bonwick		}
502e14bb32Jeff Bonwick		mutex_exit(&scl->scl_lock);
503e05725bbonwick	}
504e05725bbonwick}
505e05725bbonwick
506e14bb32Jeff Bonwickint
507e14bb32Jeff Bonwickspa_config_held(spa_t *spa, int locks, krw_t rw)
508e05725bbonwick{
509e14bb32Jeff Bonwick	int locks_held = 0;
510e05725bbonwick
511e14bb32Jeff Bonwick	for (int i = 0; i < SCL_LOCKS; i++) {
512e14bb32Jeff Bonwick		spa_config_lock_t *scl = &spa->spa_config_lock[i];
513e14bb32Jeff Bonwick		if (!(locks & (1 << i)))
514e14bb32Jeff Bonwick			continue;
515e14bb32Jeff Bonwick		if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
516e14bb32Jeff Bonwick		    (rw == RW_WRITER && scl->scl_writer == curthread))
517e14bb32Jeff Bonwick			locks_held |= 1 << i;
518e14bb32Jeff Bonwick	}
519e14bb32Jeff Bonwick
520e14bb32Jeff Bonwick	return (locks_held);
521e05725bbonwick}
522e05725bbonwick
523e05725bbonwick/*
524e05725bbonwick * ==========================================================================
525fa9e406ahrens * SPA namespace functions
526fa9e406ahrens * ==========================================================================
527fa9e406ahrens */
528fa9e406ahrens
529fa9e406ahrens/*
530fa9e406ahrens * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
531fa9e406ahrens * Returns NULL if no matching spa_t is found.
532fa9e406ahrens */
533fa9e406ahrensspa_t *
534fa9e406ahrensspa_lookup(const char *name)
535fa9e406ahrens{
536e14bb32Jeff Bonwick	static spa_t search;	/* spa_t is large; don't allocate on stack */
537e14bb32Jeff Bonwick	spa_t *spa;
538fa9e406ahrens	avl_index_t where;
53940feaa9ahrens	char *cp;
540fa9e406ahrens
541fa9e406ahrens	ASSERT(MUTEX_HELD(&spa_namespace_lock));
542fa9e406ahrens
5433b2aab1Matthew Ahrens	(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
5443b2aab1Matthew Ahrens
54540feaa9ahrens	/*
54640feaa9ahrens	 * If it's a full dataset name, figure out the pool name and
54740feaa9ahrens	 * just use that.
54840feaa9ahrens	 */
54978f1710Matthew Ahrens	cp = strpbrk(search.spa_name, "/@#");
5503b2aab1Matthew Ahrens	if (cp != NULL)
55140feaa9ahrens		*cp = '\0';
55240feaa9ahrens
553fa9e406ahrens	spa = avl_find(&spa_namespace_avl, &search, &where);
554fa9e406ahrens
555fa9e406ahrens	return (spa);
556fa9e406ahrens}
557fa9e406ahrens
558fa9e406ahrens/*
559283b846George.Wilson * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
560283b846George.Wilson * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
561283b846George.Wilson * looking for potentially hung I/Os.
562283b846George.Wilson */
563283b846George.Wilsonvoid
564283b846George.Wilsonspa_deadman(void *arg)
565283b846George.Wilson{
566283b846George.Wilson	spa_t *spa = arg;
567283b846George.Wilson
5680713e23George Wilson	/*
5690713e23George Wilson	 * Disable the deadman timer if the pool is suspended.
5700713e23George Wilson	 */
5710713e23George Wilson	if (spa_suspended(spa)) {
5720713e23George Wilson		VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
5730713e23George Wilson		return;
5740713e23George Wilson	}
5750713e23George Wilson
576283b846George.Wilson	zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
577283b846George.Wilson	    (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
578283b846George.Wilson	    ++spa->spa_deadman_calls);
579283b846George.Wilson	if (zfs_deadman_enabled)
580283b846George.Wilson		vdev_deadman(spa->spa_root_vdev);
581283b846George.Wilson}
582283b846George.Wilson
583283b846George.Wilson/*
584fa9e406ahrens * Create an uninitialized spa_t with the given name.  Requires
585fa9e406ahrens * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
586fa9e406ahrens * exist by calling spa_lookup() first.
587fa9e406ahrens */
588fa9e406ahrensspa_t *
589468c413Tim Haleyspa_add(const char *name, nvlist_t *config, const char *altroot)
590fa9e406ahrens{
591fa9e406ahrens	spa_t *spa;
592c5904d1eschrock	spa_config_dirent_t *dp;
593283b846George.Wilson	cyc_handler_t hdlr;
594283b846George.Wilson	cyc_time_t when;
595fa9e406ahrens
596fa9e406ahrens	ASSERT(MUTEX_HELD(&spa_namespace_lock));
597fa9e406ahrens
598fa9e406ahrens	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
599fa9e406ahrens
600c25056dgw	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
601c25056dgw	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
60235a5a35Jonathan Adams	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
603bc9014eJustin Gibbs	mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
604c25056dgw	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
60535a5a35Jonathan Adams	mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
606c25056dgw	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
60745818eeMatthew Ahrens	mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
60835a5a35Jonathan Adams	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
609a152156Jeff Bonwick	mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
610a152156Jeff Bonwick	mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
611c3a6601Matthew Ahrens	mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL);
612c25056dgw
613c25056dgw	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
614bc9014eJustin Gibbs	cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
61535a5a35Jonathan Adams	cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
616c25056dgw	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
617e14bb32Jeff Bonwick	cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
618c25056dgw
619b24ab67Jeff Bonwick	for (int t = 0; t < TXG_SIZE; t++)
620cde58dbMatthew Ahrens		bplist_create(&spa->spa_free_bplist[t]);
621b24ab67Jeff Bonwick
622e14bb32Jeff Bonwick	(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
623fa9e406ahrens	spa->spa_state = POOL_STATE_UNINITIALIZED;
624fa9e406ahrens	spa->spa_freeze_txg = UINT64_MAX;
6250373e76bonwick	spa->spa_final_txg = UINT64_MAX;
626468c413Tim Haley	spa->spa_load_max_txg = UINT64_MAX;
62735a5a35Jonathan Adams	spa->spa_proc = &p0;
62835a5a35Jonathan Adams	spa->spa_proc_state = SPA_PROC_NONE;
6296f79381Pavel Zakharov	spa->spa_trust_config = B_TRUE;
630fa9e406ahrens
631283b846George.Wilson	hdlr.cyh_func = spa_deadman;
632283b846George.Wilson	hdlr.cyh_arg = spa;
633283b846George.Wilson	hdlr.cyh_level = CY_LOW_LEVEL;
634283b846George.Wilson
63569962b5Matthew Ahrens	spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
636283b846George.Wilson
637283b846George.Wilson	/*
638283b846George.Wilson	 * This determines how often we need to check for hung I/Os after
639283b846George.Wilson	 * the cyclic has already fired. Since checking for hung I/Os is
640283b846George.Wilson	 * an expensive operation we don't want to check too frequently.
64169962b5Matthew Ahrens	 * Instead wait for 5 seconds before checking again.
642283b846George.Wilson	 */
64369962b5Matthew Ahrens	when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms);
644283b846George.Wilson	when.cyt_when = CY_INFINITY;
645283b846George.Wilson	mutex_enter(&cpu_lock);
646283b846George.Wilson	spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
647283b846George.Wilson	mutex_exit(&cpu_lock);
648283b846George.Wilson
649fa9e406ahrens	refcount_create(&spa->spa_refcount);
650e14bb32Jeff Bonwick	spa_config_lock_init(spa);
651fa9e406ahrens
652fa9e406ahrens	avl_add(&spa_namespace_avl, spa);
653fa9e406ahrens
6540373e76bonwick	/*
6550373e76bonwick	 * Set the alternate root, if there is one.
6560373e76bonwick	 */
6570373e76bonwick	if (altroot) {
6580373e76bonwick		spa->spa_root = spa_strdup(altroot);
6590373e76bonwick		spa_active_count++;
6600373e76bonwick	}
6610373e76bonwick
662f78cdc3Paul Dagnelie	spa->spa_alloc_count = spa_allocators;
663f78cdc3Paul Dagnelie	spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count *
664f78cdc3Paul Dagnelie	    sizeof (kmutex_t), KM_SLEEP);
665f78cdc3Paul Dagnelie	spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count *
666f78cdc3Paul Dagnelie	    sizeof (avl_tree_t), KM_SLEEP);
667f78cdc3Paul Dagnelie	for (int i = 0; i < spa->spa_alloc_count; i++) {
668f78cdc3Paul Dagnelie		mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL);
669f78cdc3Paul Dagnelie		avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare,
670f78cdc3Paul Dagnelie		    sizeof (zio_t), offsetof(zio_t, io_alloc_node));
671f78cdc3Paul Dagnelie	}
6720f7643cGeorge Wilson
673c5904d1eschrock	/*
674c5904d1eschrock	 * Every pool starts with the default cachefile
675c5904d1eschrock	 */
676c5904d1eschrock	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
677c5904d1eschrock	    offsetof(spa_config_dirent_t, scd_link));
678c5904d1eschrock
679c5904d1eschrock	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
680ef912c8Tim Haley	dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
681c5904d1eschrock	list_insert_head(&spa->spa_config_list, dp);
682c5904d1eschrock
6834b964adGeorge Wilson	VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
6844b964adGeorge Wilson	    KM_SLEEP) == 0);
6854b964adGeorge Wilson
686ad135b5Christopher Siden	if (config != NULL) {
687ad135b5Christopher Siden		nvlist_t *features;
688ad135b5Christopher Siden
689ad135b5Christopher Siden		if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
690ad135b5Christopher Siden		    &features) == 0) {
691ad135b5Christopher Siden			VERIFY(nvlist_dup(features, &spa->spa_label_features,
692ad135b5Christopher Siden			    0) == 0);
693ad135b5Christopher Siden		}
694ad135b5Christopher Siden
695468c413Tim Haley		VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
696ad135b5Christopher Siden	}
697ad135b5Christopher Siden
698ad135b5Christopher Siden	if (spa->spa_label_features == NULL) {
699ad135b5Christopher Siden		VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
700ad135b5Christopher Siden		    KM_SLEEP) == 0);
701ad135b5Christopher Siden	}
702468c413Tim Haley
703c3a6601Matthew Ahrens	spa->spa_iokstat = kstat_create("zfs", 0, name,
704c3a6601Matthew Ahrens	    "disk", KSTAT_TYPE_IO, 1, 0);
705c3a6601Matthew Ahrens	if (spa->spa_iokstat) {
706c3a6601Matthew Ahrens		spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock;
707c3a6601Matthew Ahrens		kstat_install(spa->spa_iokstat);
708c3a6601Matthew Ahrens	}
709c3a6601Matthew Ahrens
7103b2aab1Matthew Ahrens	spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0);
7113b2aab1Matthew Ahrens
71281cd5c5Matthew Ahrens	spa->spa_min_ashift = INT_MAX;
71381cd5c5Matthew Ahrens	spa->spa_max_ashift = 0;
71481cd5c5Matthew Ahrens
71543466aaMax Grossman	/*
71643466aaMax Grossman	 * As a pool is being created, treat all features as disabled by
71743466aaMax Grossman	 * setting SPA_FEATURE_DISABLED for all entries in the feature
71843466aaMax Grossman	 * refcount cache.
71943466aaMax Grossman	 */
72043466aaMax Grossman	for (int i = 0; i < SPA_FEATURES; i++) {
72143466aaMax Grossman		spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
72243466aaMax Grossman	}
72343466aaMax Grossman
724fa9e406ahrens	return (spa);
725fa9e406ahrens}
726fa9e406ahrens
727fa9e406ahrens/*
728fa9e406ahrens * Removes a spa_t from the namespace, freeing up any memory used.  Requires
729fa9e406ahrens * spa_namespace_lock.  This is called only after the spa_t has been closed and
730fa9e406ahrens * deactivated.
731fa9e406ahrens */
732fa9e406ahrensvoid
733fa9e406ahrensspa_remove(spa_t *spa)
734fa9e406ahrens{
735c5904d1eschrock	spa_config_dirent_t *dp;
736c5904d1eschrock
737fa9e406ahrens	ASSERT(MUTEX_HELD(&spa_namespace_lock));
738fa9e406ahrens	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
739bc9014eJustin Gibbs	ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0);
740fa9e406ahrens
7411195e68Mark J Musante	nvlist_free(spa->spa_config_splitting);
7421195e68Mark J Musante
743fa9e406ahrens	avl_remove(&spa_namespace_avl, spa);
744fa9e406ahrens	cv_broadcast(&spa_namespace_cv);
745fa9e406ahrens
7460373e76bonwick	if (spa->spa_root) {
747fa9e406ahrens		spa_strfree(spa->spa_root);
7480373e76bonwick		spa_active_count--;
7490373e76bonwick	}
750fa9e406ahrens
751c5904d1eschrock	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
752c5904d1eschrock		list_remove(&spa->spa_config_list, dp);
753c5904d1eschrock		if (dp->scd_path != NULL)
754c5904d1eschrock			spa_strfree(dp->scd_path);
755c5904d1eschrock		kmem_free(dp, sizeof (spa_config_dirent_t));
756c5904d1eschrock	}
757c5904d1eschrock
758f78cdc3Paul Dagnelie	for (int i = 0; i < spa->spa_alloc_count; i++) {
759f78cdc3Paul Dagnelie		avl_destroy(&spa->spa_alloc_trees[i]);
760f78cdc3Paul Dagnelie		mutex_destroy(&spa->spa_alloc_locks[i]);
761f78cdc3Paul Dagnelie	}
762f78cdc3Paul Dagnelie	kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count *
763f78cdc3Paul Dagnelie	    sizeof (kmutex_t));
764f78cdc3Paul Dagnelie	kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count *
765f78cdc3Paul Dagnelie	    sizeof (avl_tree_t));
766f78cdc3Paul Dagnelie
767c5904d1eschrock	list_destroy(&spa->spa_config_list);
7682f8aaabeschrock
769ad135b5Christopher Siden	nvlist_free(spa->spa_label_features);
7704b964adGeorge Wilson	nvlist_free(spa->spa_load_info);
771fa9e406ahrens	spa_config_set(spa, NULL);
772fa9e406ahrens
773283b846George.Wilson	mutex_enter(&cpu_lock);
774283b846George.Wilson	if (spa->spa_deadman_cycid != CYCLIC_NONE)
775283b846George.Wilson		cyclic_remove(spa->spa_deadman_cycid);
776283b846George.Wilson	mutex_exit(&cpu_lock);
777283b846George.Wilson	spa->spa_deadman_cycid = CYCLIC_NONE;
778283b846George.Wilson
779fa9e406ahrens	refcount_destroy(&spa->spa_refcount);
78091ebeefahrens
781e14bb32Jeff Bonwick	spa_config_lock_destroy(spa);
782fa9e406ahrens
783c3a6601Matthew Ahrens	kstat_delete(spa->spa_iokstat);
784c3a6601Matthew Ahrens	spa->spa_iokstat = NULL;
785c3a6601Matthew Ahrens
786b24ab67Jeff Bonwick	for (int t = 0; t < TXG_SIZE; t++)
787cde58dbMatthew Ahrens		bplist_destroy(&spa->spa_free_bplist[t]);
788b24ab67Jeff Bonwick
78945818eeMatthew Ahrens	zio_checksum_templates_free(spa);
79045818eeMatthew Ahrens
791c25056dgw	cv_destroy(&spa->spa_async_cv);
792bc9014eJustin Gibbs	cv_destroy(&spa->spa_evicting_os_cv);
79335a5a35Jonathan Adams	cv_destroy(&spa->spa_proc_cv);
794c25056dgw	cv_destroy(&spa->spa_scrub_io_cv);
795e14bb32Jeff Bonwick	cv_destroy(&spa->spa_suspend_cv);
796c25056dgw
7975ad8204nd	mutex_destroy(&spa->spa_async_lock);
798c25056dgw	mutex_destroy(&spa->spa_errlist_lock);
79935a5a35Jonathan Adams	mutex_destroy(&spa->spa_errlog_lock);
800bc9014eJustin Gibbs	mutex_destroy(&spa->spa_evicting_os_lock);
80106eeb2aek	mutex_destroy(&spa->spa_history_lock);
80235a5a35Jonathan Adams	mutex_destroy(&spa->spa_proc_lock);
803b1b8ab3lling	mutex_destroy(&spa->spa_props_lock);
80445818eeMatthew Ahrens	mutex_destroy(&spa->spa_cksum_tmpls_lock);
80535a5a35Jonathan Adams	mutex_destroy(&spa->spa_scrub_lock);
806e14bb32Jeff Bonwick	mutex_destroy(&spa->spa_suspend_lock);
807a152156Jeff Bonwick	mutex_destroy(&spa->spa_vdev_top_lock);
808c3a6601Matthew Ahrens	mutex_destroy(&spa->spa_iokstat_lock);
8095ad8204nd
810fa9e406ahrens	kmem_free(spa, sizeof (spa_t));
811fa9e406ahrens}
812fa9e406ahrens
813fa9e406ahrens/*
814fa9e406ahrens * Given a pool, return the next pool in the namespace, or NULL if there is
815fa9e406ahrens * none.  If 'prev' is NULL, return the first pool.
816fa9e406ahrens */
817fa9e406ahrensspa_t *
818fa9e406ahrensspa_next(spa_t *prev)
819fa9e406ahrens{
820fa9e406ahrens	ASSERT(MUTEX_HELD(&spa_namespace_lock));
821fa9e406ahrens
822fa9e406ahrens	if (prev)
823fa9e406ahrens		return (AVL_NEXT(&spa_namespace_avl, prev));
824fa9e406ahrens	else
825fa9e406ahrens		return (avl_first(&spa_namespace_avl));
826fa9e406ahrens}
827fa9e406ahrens
828fa9e406ahrens/*
829fa9e406ahrens * ==========================================================================
830fa9e406ahrens * SPA refcount functions
831fa9e406ahrens * ==========================================================================
832fa9e406ahrens */
833fa9e406ahrens
834fa9e406ahrens/*
835fa9e406ahrens * Add a reference to the given spa_t.  Must have at least one reference, or
836fa9e406ahrens * have the namespace lock held.
837fa9e406ahrens */
838fa9e406ahrensvoid
839fa9e406ahrensspa_open_ref(spa_t *spa, void *tag)
840fa9e406ahrens{
841088f389ahrens	ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
842fa9e406ahrens	    MUTEX_HELD(&spa_namespace_lock));
843fa9e406ahrens	(void) refcount_add(&spa->spa_refcount, tag);
844fa9e406ahrens}
845fa9e406ahrens
846fa9e406ahrens/*
847fa9e406ahrens * Remove a reference to the given spa_t.  Must have at least one reference, or
848fa9e406ahrens * have the namespace lock held.
849fa9e406ahrens */
850fa9e406ahrensvoid
851fa9e406ahrensspa_close(spa_t *spa, void *tag)
852fa9e406ahrens{
853088f389ahrens	ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
854fa9e406ahrens	    MUTEX_HELD(&spa_namespace_lock));
855fa9e406ahrens	(void) refcount_remove(&spa->spa_refcount, tag);
856fa9e406ahrens}
857fa9e406ahrens
858fa9e406ahrens/*
859bc9014eJustin Gibbs * Remove a reference to the given spa_t held by a dsl dir that is
860bc9014eJustin Gibbs * being asynchronously released.  Async releases occur from a taskq
861bc9014eJustin Gibbs * performing eviction of dsl datasets and dirs.  The namespace lock
862bc9014eJustin Gibbs * isn't held and the hold by the object being evicted may contribute to
863bc9014eJustin Gibbs * spa_minref (e.g. dataset or directory released during pool export),
864bc9014eJustin Gibbs * so the asserts in spa_close() do not apply.
865bc9014eJustin Gibbs */
866bc9014eJustin Gibbsvoid
867bc9014eJustin Gibbsspa_async_close(spa_t *spa, void *tag)
868bc9014eJustin Gibbs{
869bc9014eJustin Gibbs	(void) refcount_remove(&spa->spa_refcount, tag);
870bc9014eJustin Gibbs}
871bc9014eJustin Gibbs
872bc9014eJustin Gibbs/*
873fa9e406ahrens * Check to see if the spa refcount is zero.  Must be called with
874088f389ahrens * spa_namespace_lock held.  We really compare against spa_minref, which is the
875fa9e406ahrens * number of references acquired when opening a pool
876fa9e406ahrens */
877fa9e406ahrensboolean_t
878