spa_misc.c revision e914ace2e9d9bf2dbf9a1f1ce81cb776022096f5
2fa9e406ahrens * CDDL HEADER START
3fa9e406ahrens *
4fa9e406ahrens * The contents of this file are subject to the terms of the
5ea8dc4beschrock * Common Development and Distribution License (the "License").
6ea8dc4beschrock * You may not use this file except in compliance with the License.
7fa9e406ahrens *
8fa9e406ahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e406ahrens * or
10fa9e406ahrens * See the License for the specific language governing permissions
11fa9e406ahrens * and limitations under the License.
12fa9e406ahrens *
13fa9e406ahrens * When distributing Covered Code, include this CDDL HEADER in each
14fa9e406ahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e406ahrens * If applicable, add the following below this CDDL HEADER, with the
16fa9e406ahrens * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e406ahrens * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e406ahrens *
19fa9e406ahrens * CDDL HEADER END
20fa9e406ahrens */
229842588George Wilson * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23f78cdc3Paul Dagnelie * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
24e495b6eSaso Kiselkov * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
25bc9014eJustin Gibbs * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
2645818eeMatthew Ahrens * Copyright 2013 Saso Kiselkov. All rights reserved.
27c3d26abMatthew Ahrens * Copyright (c) 2014 Integros []
281702cceAlek Pinchuk * Copyright (c) 2017 Datto Inc.
29fa9e406ahrens */
31fa9e406ahrens#include <sys/zfs_context.h>
32fa9e406ahrens#include <sys/spa_impl.h>
33283b846George.Wilson#include <sys/spa_boot.h>
34fa9e406ahrens#include <sys/zio.h>
35fa9e406ahrens#include <sys/zio_checksum.h>
36fa9e406ahrens#include <sys/zio_compress.h>
37fa9e406ahrens#include <sys/dmu.h>
38fa9e406ahrens#include <sys/dmu_tx.h>
39fa9e406ahrens#include <sys/zap.h>
40fa9e406ahrens#include <sys/zil.h>
41fa9e406ahrens#include <sys/vdev_impl.h>
42094e47eGeorge Wilson#include <sys/vdev_initialize.h>
43fa9e406ahrens#include <sys/metaslab.h>
44fa9e406ahrens#include <sys/uberblock_impl.h>
45fa9e406ahrens#include <sys/txg.h>
46fa9e406ahrens#include <sys/avl.h>
47fa9e406ahrens#include <sys/unique.h>
48fa9e406ahrens#include <sys/dsl_pool.h>
49fa9e406ahrens#include <sys/dsl_dir.h>
50fa9e406ahrens#include <sys/dsl_prop.h>
513f9d6adLin Ling#include <sys/dsl_scan.h>
52fa9e406ahrens#include <sys/fs/zfs.h>
536ce0521perrin#include <sys/metaslab_impl.h>
54e14bb32Jeff Bonwick#include <sys/arc.h>
55485bbbfGeorge Wilson#include <sys/ddt.h>
5691ebeefahrens#include "zfs_prop.h"
5745818eeMatthew Ahrens#include <sys/zfeature.h>
60fa9e406ahrens * SPA locking
61fa9e406ahrens *
62fa9e406ahrens * There are four basic locks for managing spa_t structures:
63fa9e406ahrens *
64fa9e406ahrens * spa_namespace_lock (global mutex)
65fa9e406ahrens *
6644cd46cbillm *	This lock must be acquired to do any of the following:
67fa9e406ahrens *
6844cd46cbillm *		- Lookup a spa_t by name
6944cd46cbillm *		- Add or remove a spa_t from the namespace
7044cd46cbillm *		- Increase spa_refcount from non-zero
7144cd46cbillm *		- Check if spa_refcount is zero
7244cd46cbillm *		- Rename a spa_t
73ea8dc4beschrock *		- add/remove/attach/detach devices
7444cd46cbillm *		- Held for the duration of create/destroy/import/export
75fa9e406ahrens *
7644cd46cbillm *	It does not need to handle recursion.  A create or destroy may
7744cd46cbillm *	reference objects (files or zvols) in other pools, but by
7844cd46cbillm *	definition they must have an existing reference, and will never need
7944cd46cbillm *	to lookup a spa_t by name.
80fa9e406ahrens *
81e914aceTim Schumacher * spa_refcount (per-spa zfs_refcount_t protected by mutex)
82fa9e406ahrens *
8344cd46cbillm *	This reference count keep track of any active users of the spa_t.  The
8444cd46cbillm *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
8544cd46cbillm *	the refcount is never really 'zero' - opening a pool implicitly keeps
86088f389ahrens *	some references in the DMU.  Internally we check against spa_minref, but
8744cd46cbillm *	present the image of a zero/non-zero value to consumers.
88fa9e406ahrens *
89e14bb32Jeff Bonwick * spa_config_lock[] (per-spa array of rwlocks)
90fa9e406ahrens *
9191ebeefahrens *	This protects the spa_t from config changes, and must be held in
9291ebeefahrens *	the following circumstances:
93fa9e406ahrens *
9444cd46cbillm *		- RW_READER to perform I/O to the spa
9544cd46cbillm *		- RW_WRITER to change the vdev config
96fa9e406ahrens *
97fa9e406ahrens * The locking order is fairly straightforward:
98fa9e406ahrens *
9944cd46cbillm *		spa_namespace_lock	->	spa_refcount
100fa9e406ahrens *
10144cd46cbillm *	The namespace lock must be acquired to increase the refcount from 0
10244cd46cbillm *	or to check if it is zero.
103fa9e406ahrens *
104e14bb32Jeff Bonwick *		spa_refcount		->	spa_config_lock[]
105fa9e406ahrens *
10644cd46cbillm *	There must be at least one valid reference on the spa_t to acquire
10744cd46cbillm *	the config lock.
108fa9e406ahrens *
109e14bb32Jeff Bonwick *		spa_namespace_lock	->	spa_config_lock[]
110fa9e406ahrens *
11144cd46cbillm *	The namespace lock must always be taken before the config lock.
112fa9e406ahrens *
113fa9e406ahrens *
114e14bb32Jeff Bonwick * The spa_namespace_lock can be acquired directly and is globally visible.
115fa9e406ahrens *
116e14bb32Jeff Bonwick * The namespace is manipulated using the following functions, all of which
117e14bb32Jeff Bonwick * require the spa_namespace_lock to be held.
118fa9e406ahrens *
11944cd46cbillm *	spa_lookup()		Lookup a spa_t by name.
120fa9e406ahrens *
12144cd46cbillm *	spa_add()		Create a new spa_t in the namespace.
122fa9e406ahrens *
12344cd46cbillm *	spa_remove()		Remove a spa_t from the namespace.  This also
12444cd46cbillm *				frees up any memory associated with the spa_t.
125fa9e406ahrens *
12644cd46cbillm *	spa_next()		Returns the next spa_t in the system, or the
12744cd46cbillm *				first if NULL is passed.
128fa9e406ahrens *
12944cd46cbillm *	spa_evict_all()		Shutdown and remove all spa_t structures in
13044cd46cbillm *				the system.
131fa9e406ahrens *
132ea8dc4beschrock *	spa_guid_exists()	Determine whether a pool/device guid exists.
133fa9e406ahrens *
134fa9e406ahrens * The spa_refcount is manipulated using the following functions:
135fa9e406ahrens *
13644cd46cbillm *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
13744cd46cbillm *				called with spa_namespace_lock held if the
13844cd46cbillm *				refcount is currently zero.
139fa9e406ahrens *
14044cd46cbillm *	spa_close()		Remove a reference from the spa_t.  This will
14144cd46cbillm *				not free the spa_t or remove it from the
14244cd46cbillm *				namespace.  No locking is required.
143fa9e406ahrens *
14444cd46cbillm *	spa_refcount_zero()	Returns true if the refcount is currently
14544cd46cbillm *				zero.  Must be called with spa_namespace_lock
14644cd46cbillm *				held.
147fa9e406ahrens *
148e14bb32Jeff Bonwick * The spa_config_lock[] is an array of rwlocks, ordered as follows:
150e14bb32Jeff Bonwick * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
151e14bb32Jeff Bonwick *
152e14bb32Jeff Bonwick * To read the configuration, it suffices to hold one of these locks as reader.
153e14bb32Jeff Bonwick * To modify the configuration, you must hold all locks as writer.  To modify
154e14bb32Jeff Bonwick * vdev state without altering the vdev tree's topology (e.g. online/offline),
155e14bb32Jeff Bonwick * you must hold SCL_STATE and SCL_ZIO as writer.
156e14bb32Jeff Bonwick *
157e14bb32Jeff Bonwick * We use these distinct config locks to avoid recursive lock entry.
158e14bb32Jeff Bonwick * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
159e14bb32Jeff Bonwick * block allocations (SCL_ALLOC), which may require reading space maps
160e14bb32Jeff Bonwick * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
161e14bb32Jeff Bonwick *
162e14bb32Jeff Bonwick * The spa config locks cannot be normal rwlocks because we need the
163e14bb32Jeff Bonwick * ability to hand off ownership.  For example, SCL_ZIO is acquired
164e14bb32Jeff Bonwick * by the issuing thread and later released by an interrupt thread.
165e14bb32Jeff Bonwick * They do, however, obey the usual write-wanted semantics to prevent
166e14bb32Jeff Bonwick * writer (i.e. system administrator) starvation.
167e14bb32Jeff Bonwick *
168e14bb32Jeff Bonwick * The lock acquisition rules are as follows:
169e14bb32Jeff Bonwick *
170e14bb32Jeff Bonwick * SCL_CONFIG
171e14bb32Jeff Bonwick *	Protects changes to the vdev tree topology, such as vdev
172e14bb32Jeff Bonwick *	add/remove/attach/detach.  Protects the dirty config list
173e14bb32Jeff Bonwick *	(spa_config_dirty_list) and the set of spares and l2arc devices.
174e14bb32Jeff Bonwick *
175e14bb32Jeff Bonwick * SCL_STATE
176e14bb32Jeff Bonwick *	Protects changes to pool state and vdev state, such as vdev
177e14bb32Jeff Bonwick *	online/offline/fault/degrade/clear.  Protects the dirty state list
178e14bb32Jeff Bonwick *	(spa_state_dirty_list) and global pool state (spa_state).
179e14bb32Jeff Bonwick *
180e14bb32Jeff Bonwick * SCL_ALLOC
181e14bb32Jeff Bonwick *	Protects changes to metaslab groups and classes.
182e14bb32Jeff Bonwick *	Held as reader by metaslab_alloc() and metaslab_claim().
183e14bb32Jeff Bonwick *
184e14bb32Jeff Bonwick * SCL_ZIO
185e14bb32Jeff Bonwick *	Held by bp-level zios (those which have no io_vd upon entry)
186e14bb32Jeff Bonwick *	to prevent changes to the vdev tree.  The bp-level zio implicitly
187e14bb32Jeff Bonwick *	protects all of its vdev child zios, which do not hold SCL_ZIO.
188e14bb32Jeff Bonwick *
189e14bb32Jeff Bonwick * SCL_FREE
190e14bb32Jeff Bonwick *	Protects changes to metaslab groups and classes.
191e14bb32Jeff Bonwick *	Held as reader by metaslab_free().  SCL_FREE is distinct from
192e14bb32Jeff Bonwick *	SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
193e14bb32Jeff Bonwick *	blocks in zio_done() while another i/o that holds either
194e14bb32Jeff Bonwick *	SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
195e14bb32Jeff Bonwick *
196e14bb32Jeff Bonwick * SCL_VDEV
197e14bb32Jeff Bonwick *	Held as reader to prevent changes to the vdev tree during trivial
198b24ab67Jeff Bonwick *	inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
199e14bb32Jeff Bonwick *	other locks, and lower than all of them, to ensure that it's safe
200e14bb32Jeff Bonwick *	to acquire regardless of caller context.
201e14bb32Jeff Bonwick *
202e14bb32Jeff Bonwick * In addition, the following rules apply:
203e14bb32Jeff Bonwick *
204e14bb32Jeff Bonwick * (a)	spa_props_lock protects pool properties, spa_config and spa_config_list.
205e14bb32Jeff Bonwick *	The lock ordering is SCL_CONFIG > spa_props_lock.
206e14bb32Jeff Bonwick *
207e14bb32Jeff Bonwick * (b)	I/O operations on leaf vdevs.  For any zio operation that takes
208e14bb32Jeff Bonwick *	an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
209e14bb32Jeff Bonwick *	or zio_write_phys() -- the caller must ensure that the config cannot
210e14bb32Jeff Bonwick *	cannot change in the interim, and that the vdev cannot be reopened.
211e14bb32Jeff Bonwick *	SCL_STATE as reader suffices for both.
212fa9e406ahrens *
213ea8dc4beschrock * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
214fa9e406ahrens *
21544cd46cbillm *	spa_vdev_enter()	Acquire the namespace lock and the config lock
216ea8dc4beschrock *				for writing.
217fa9e406ahrens *
21844cd46cbillm *	spa_vdev_exit()		Release the config lock, wait for all I/O
21944cd46cbillm *				to complete, sync the updated configs to the
220ea8dc4beschrock *				cache, and release the namespace lock.
221fa9e406ahrens *
222e14bb32Jeff Bonwick * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
223e14bb32Jeff Bonwick * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
224e14bb32Jeff Bonwick * locking is, always, based on spa_namespace_lock and spa_config_lock[].
225fa9e406ahrens */
227fa9e406ahrensstatic avl_tree_t spa_namespace_avl;
228fa9e406ahrenskmutex_t spa_namespace_lock;
229fa9e406ahrensstatic kcondvar_t spa_namespace_cv;
2300373e76bonwickstatic int spa_active_count;
231416e0cdekint spa_max_replication_override = SPA_DVAS_PER_BP;
23399653d4eschrockstatic kmutex_t spa_spare_lock;
23439c2341eschrockstatic avl_tree_t spa_spare_avl;
235fa94a07brendanstatic kmutex_t spa_l2cache_lock;
236fa94a07brendanstatic avl_tree_t spa_l2cache_avl;
238fa9e406ahrenskmem_cache_t *spa_buffer_pool;
2398ad4d6dJeff Bonwickint spa_mode_global;
241fa9e406ahrens#ifdef ZFS_DEBUG
2425cabbc6Prashanth Sreenivasa/*
2435cabbc6Prashanth Sreenivasa * Everything except dprintf, spa, and indirect_remap is on by default
2445cabbc6Prashanth Sreenivasa * in debug builds.
2455cabbc6Prashanth Sreenivasa */
24621f7c81Matthew Ahrensint zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_INDIRECT_REMAP);
248fa9e406ahrensint zfs_flags = 0;
2520125049ahrens * zfs_recover can be set to nonzero to attempt to recover from
2530125049ahrens * otherwise-fatal errors, typically caused by on-disk corruption.  When
2540125049ahrens * set, calls to zfs_panic_recover() will turn into warning messages.
2558b36997Matthew Ahrens * This should only be used as a last resort, as it typically results
2568b36997Matthew Ahrens * in leaked space, or worse.
2570125049ahrens */
2587fd05acMatthew Ahrensboolean_t zfs_recover = B_FALSE;
2597fd05acMatthew Ahrens
2607fd05acMatthew Ahrens/*
2617fd05acMatthew Ahrens * If destroy encounters an EIO while reading metadata (e.g. indirect
2627fd05acMatthew Ahrens * blocks), space referenced by the missing metadata can not be freed.
2637fd05acMatthew Ahrens * Normally this causes the background destroy to become "stalled", as
2647fd05acMatthew Ahrens * it is unable to make forward progress.  While in this stalled state,
2657fd05acMatthew Ahrens * all remaining space to free from the error-encountering filesystem is
2667fd05acMatthew Ahrens * "temporarily leaked".  Set this flag to cause it to ignore the EIO,
2677fd05acMatthew Ahrens * permanently leak the space from indirect blocks that can not be read,
2687fd05acMatthew Ahrens * and continue to free everything else that it can.
2697fd05acMatthew Ahrens *
2707fd05acMatthew Ahrens * The default, "stalling" behavior is useful if the storage partially
2717fd05acMatthew Ahrens * fails (i.e. some but not all i/os fail), and then later recovers.  In
2727fd05acMatthew Ahrens * this case, we will be able to continue pool operations while it is
2737fd05acMatthew Ahrens * partially failed, and when it recovers, we can continue to free the
2747fd05acMatthew Ahrens * space, with no leaks.  However, note that this case is actually
2757fd05acMatthew Ahrens * fairly rare.
2767fd05acMatthew Ahrens *
2777fd05acMatthew Ahrens * Typically pools either (a) fail completely (but perhaps temporarily,
2787fd05acMatthew Ahrens * e.g. a top-level vdev going offline), or (b) have localized,
2797fd05acMatthew Ahrens * permanent errors (e.g. disk returns the wrong data due to bit flip or
2807fd05acMatthew Ahrens * firmware bug).  In case (a), this setting does not matter because the
2817fd05acMatthew Ahrens * pool will be suspended and the sync thread will not be able to make
2827fd05acMatthew Ahrens * forward progress regardless.  In case (b), because the error is
2837fd05acMatthew Ahrens * permanent, the best we can do is leak the minimum amount of space,
2847fd05acMatthew Ahrens * which is what setting this flag will do.  Therefore, it is reasonable
2857fd05acMatthew Ahrens * for this flag to normally be set, but we chose the more conservative
2867fd05acMatthew Ahrens * approach of not setting it, so that there is no possibility of
2877fd05acMatthew Ahrens * leaking space in the "partial temporary" failure case.
2887fd05acMatthew Ahrens */
2897fd05acMatthew Ahrensboolean_t zfs_free_leak_on_eio = B_FALSE;
29169962b5Matthew Ahrens/*
29269962b5Matthew Ahrens * Expiration time in milliseconds. This value has two meanings. First it is
29369962b5Matthew Ahrens * used to determine when the spa_deadman() logic should fire. By default the
29469962b5Matthew Ahrens * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
29569962b5Matthew Ahrens * Secondly, the value determines if an I/O is considered "hung". Any I/O that
29669962b5Matthew Ahrens * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
29769962b5Matthew Ahrens * in a system panic.
29869962b5Matthew Ahrens */
29969962b5Matthew Ahrensuint64_t zfs_deadman_synctime_ms = 1000000ULL;
30269962b5Matthew Ahrens * Check time in milliseconds. This defines the frequency at which we check
30369962b5Matthew Ahrens * for hung I/O.
304283b846George.Wilson */
30569962b5Matthew Ahrensuint64_t zfs_deadman_checktime_ms = 5000ULL;
308283b846George.Wilson * Override the zfs deadman behavior via /etc/system. By default the
309283b846George.Wilson * deadman is enabled except on VMware and sparc deployments.
310283b846George.Wilson */
311283b846George.Wilsonint zfs_deadman_enabled = -1;
31369962b5Matthew Ahrens/*
31469962b5Matthew Ahrens * The worst case is single-sector max-parity RAID-Z blocks, in which
31569962b5Matthew Ahrens * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
31669962b5Matthew Ahrens * times the size; so just assume that.  Add to this the fact that
31769962b5Matthew Ahrens * we can have up to 3 DVAs per bp, and one more factor of 2 because
31869962b5Matthew Ahrens * the block may be dittoed with up to 3 DVAs by ddt_sync().  All together,
31969962b5Matthew Ahrens * the worst case is:
32069962b5Matthew Ahrens *     (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
32169962b5Matthew Ahrens */
32269962b5Matthew Ahrensint spa_asize_inflation = 24;
3257d46dc6Matthew Ahrens * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
3267d46dc6Matthew Ahrens * the pool to be consumed.  This ensures that we don't run the pool
3277d46dc6Matthew Ahrens * completely out of space, due to unaccounted changes (e.g. to the MOS).
3287d46dc6Matthew Ahrens * It also limits the worst-case time to allocate space.  If we have
3297d46dc6Matthew Ahrens * less than this amount of free space, most ZPL operations (e.g. write,
3307d46dc6Matthew Ahrens * create) will return ENOSPC.
3317d46dc6Matthew Ahrens *
3327d46dc6Matthew Ahrens * Certain operations (e.g. file removal, most administrative actions) can
3337d46dc6Matthew Ahrens * use half the slop space.  They will only return ENOSPC if less than half
3347d46dc6Matthew Ahrens * the slop space is free.  Typically, once the pool has less than the slop
3357d46dc6Matthew Ahrens * space free, the user will use these operations to free up space in the pool.
3367d46dc6Matthew Ahrens * These are the operations that call dsl_pool_adjustedsize() with the netfree
3377d46dc6Matthew Ahrens * argument set to TRUE.
3387d46dc6Matthew Ahrens *
3398671400Serapheim Dimitropoulos * Operations that are almost guaranteed to free up space in the absence of
3408671400Serapheim Dimitropoulos * a pool checkpoint can use up to three quarters of the slop space
3418671400Serapheim Dimitropoulos * (e.g zfs destroy).
3428671400Serapheim Dimitropoulos *
3437d46dc6Matthew Ahrens * A very restricted set of operations are always permitted, regardless of
3447d46dc6Matthew Ahrens * the amount of free space.  These are the operations that call
3458671400Serapheim Dimitropoulos * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
3468671400Serapheim Dimitropoulos * increase in the amount of space used, it is possible to run the pool
3478671400Serapheim Dimitropoulos * completely out of space, causing it to be permanently read-only.
3487d46dc6Matthew Ahrens *
3494b5c8e9Matthew Ahrens * Note that on very small pools, the slop space will be larger than
3504b5c8e9Matthew Ahrens * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
3514b5c8e9Matthew Ahrens * but we never allow it to be more than half the pool size.
3524b5c8e9Matthew Ahrens *
3537d46dc6Matthew Ahrens * See also the comments in zfs_space_check_t.
3547d46dc6Matthew Ahrens */
3557d46dc6Matthew Ahrensint spa_slop_shift = 5;
3564b5c8e9Matthew Ahrensuint64_t spa_min_slop = 128 * 1024 * 1024;
3577d46dc6Matthew Ahrens
358f78cdc3Paul Dagnelieint spa_allocators = 4;
359f78cdc3Paul Dagnelie
3603ee8c80Pavel Zakharov/*PRINTFLIKE2*/
3613ee8c80Pavel Zakharovvoid
3623ee8c80Pavel Zakharovspa_load_failed(spa_t *spa, const char *fmt, ...)
3633ee8c80Pavel Zakharov{
3643ee8c80Pavel Zakharov	va_list adx;
3653ee8c80Pavel Zakharov	char buf[256];
3663ee8c80Pavel Zakharov
3673ee8c80Pavel Zakharov	va_start(adx, fmt);
3683ee8c80Pavel Zakharov	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
3693ee8c80Pavel Zakharov	va_end(adx);
3703ee8c80Pavel Zakharov
3716f79381Pavel Zakharov	zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
3726f79381Pavel Zakharov	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
3733ee8c80Pavel Zakharov}
3743ee8c80Pavel Zakharov
3753ee8c80Pavel Zakharov/*PRINTFLIKE2*/
3763ee8c80Pavel Zakharovvoid
3773ee8c80Pavel Zakharovspa_load_note(spa_t *spa, const char *fmt, ...)
3783ee8c80Pavel Zakharov{
3793ee8c80Pavel Zakharov	va_list adx;
3803ee8c80Pavel Zakharov	char buf[256];
3813ee8c80Pavel Zakharov
3823ee8c80Pavel Zakharov	va_start(adx, fmt);
3833ee8c80Pavel Zakharov	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
3843ee8c80Pavel Zakharov	va_end(adx);
3853ee8c80Pavel Zakharov
3866f79381Pavel Zakharov	zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
3876f79381Pavel Zakharov	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
3883ee8c80Pavel Zakharov}
3893ee8c80Pavel Zakharov
3907d46dc6Matthew Ahrens/*
391fa9e406ahrens * ==========================================================================
392e05725bbonwick * SPA config locking
393e05725bbonwick * ==========================================================================
394e05725bbonwick */
395e05725bbonwickstatic void
396e14bb32Jeff Bonwickspa_config_lock_init(spa_t *spa)
397e14bb32Jeff Bonwick{
398e14bb32Jeff Bonwick	for (int i = 0; i < SCL_LOCKS; i++) {
399e14bb32Jeff Bonwick		spa_config_lock_t *scl = &spa->spa_config_lock[i];
400e14bb32Jeff Bonwick		mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
401e14bb32Jeff Bonwick		cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
402e914aceTim Schumacher		zfs_refcount_create_untracked(&scl->scl_count);
403e14bb32Jeff Bonwick		scl->scl_writer = NULL;
404e14bb32Jeff Bonwick		scl->scl_write_wanted = 0;
405e14bb32Jeff Bonwick	}
408e05725bbonwickstatic void
409e14bb32Jeff Bonwickspa_config_lock_destroy(spa_t *spa)
410e14bb32Jeff Bonwick{
411e14bb32Jeff Bonwick	for (int i = 0; i < SCL_LOCKS; i++) {
412e14bb32Jeff Bonwick		spa_config_lock_t *scl = &spa->spa_config_lock[i];
413e14bb32Jeff Bonwick		mutex_destroy(&scl->scl_lock);
414e14bb32Jeff Bonwick		cv_destroy(&scl->scl_cv);
415e914aceTim Schumacher		zfs_refcount_destroy(&scl->scl_count);
416e14bb32Jeff Bonwick		ASSERT(scl->scl_writer == NULL);
417e14bb32Jeff Bonwick		ASSERT(scl->scl_write_wanted == 0);
418e14bb32Jeff Bonwick	}
419e14bb32Jeff Bonwick}
420e14bb32Jeff Bonwick
421e14bb32Jeff Bonwickint
422e14bb32Jeff Bonwickspa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
424e14bb32Jeff Bonwick	for (int i = 0; i < SCL_LOCKS; i++) {
425e14bb32Jeff Bonwick		spa_config_lock_t *scl = &spa->spa_config_lock[i];
426e14bb32Jeff Bonwick		if (!(locks & (1 << i)))
427e14bb32Jeff Bonwick			continue;
428e14bb32Jeff Bonwick		mutex_enter(&scl->scl_lock);
429e14bb32Jeff Bonwick		if (rw == RW_READER) {
430e14bb32Jeff Bonwick			if (scl->scl_writer || scl->scl_write_wanted) {
431e14bb32Jeff Bonwick				mutex_exit(&scl->scl_lock);
432e495b6eSaso Kiselkov				spa_config_exit(spa, locks & ((1 << i) - 1),
433e495b6eSaso Kiselkov				    tag);
434e14bb32Jeff Bonwick				return (0);
435e14bb32Jeff Bonwick			}
436e14bb32Jeff Bonwick		} else {
437e14bb32Jeff Bonwick			ASSERT(scl->scl_writer != curthread);
438e914aceTim Schumacher			if (!zfs_refcount_is_zero(&scl->scl_count)) {
439e14bb32Jeff Bonwick				mutex_exit(&scl->scl_lock);
440e495b6eSaso Kiselkov				spa_config_exit(spa, locks & ((1 << i) - 1),
441e495b6eSaso Kiselkov				    tag);
442e14bb32Jeff Bonwick				return (0);
443e14bb32Jeff Bonwick			}
444e14bb32Jeff Bonwick			scl->scl_writer = curthread;
445e14bb32Jeff Bonwick		}
446e914aceTim Schumacher		(void) zfs_refcount_add(&scl->scl_count, tag);
447e14bb32Jeff Bonwick		mutex_exit(&scl->scl_lock);
448e14bb32Jeff Bonwick	}
449e14bb32Jeff Bonwick	return (1);
453e14bb32Jeff Bonwickspa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
455f64c0e3Eric Taylor	int wlocks_held = 0;
456f64c0e3Eric Taylor
4573b2aab1Matthew Ahrens	ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
4583b2aab1Matthew Ahrens
459e14bb32Jeff Bonwick	for (int i = 0; i < SCL_LOCKS; i++) {
460e14bb32Jeff Bonwick		spa_config_lock_t *scl = &spa->spa_config_lock[i];
461f64c0e3Eric Taylor		if (scl->scl_writer == curthread)
462f64c0e3Eric Taylor			wlocks_held |= (1 << i);
463e14bb32Jeff Bonwick		if (!(locks & (1 << i)))
464e14bb32Jeff Bonwick			continue;
465e14bb32Jeff Bonwick		mutex_enter(&scl->scl_lock);
466e14bb32Jeff Bonwick		if (rw == RW_READER) {
467e14bb32Jeff Bonwick			while (scl->scl_writer || scl->scl_write_wanted) {
468e14bb32Jeff Bonwick				cv_wait(&scl->scl_cv, &scl->scl_lock);
469e14bb32Jeff Bonwick			}
470e14bb32Jeff Bonwick		} else {
471e14bb32Jeff Bonwick			ASSERT(scl->scl_writer != curthread);
472e914aceTim Schumacher			while (!zfs_refcount_is_zero(&scl->scl_count)) {
473e14bb32Jeff Bonwick				scl->scl_write_wanted++;
474e14bb32Jeff Bonwick				cv_wait(&scl->scl_cv, &scl->scl_lock);
475e14bb32Jeff Bonwick				scl->scl_write_wanted--;
476e14bb32Jeff Bonwick			}
477e14bb32Jeff Bonwick			scl->scl_writer = curthread;
478e14bb32Jeff Bonwick		}
479e914aceTim Schumacher		(void) zfs_refcount_add(&scl->scl_count, tag);
480e14bb32Jeff Bonwick		mutex_exit(&scl->scl_lock);
481e05725bbonwick	}
4825cabbc6Prashanth Sreenivasa	ASSERT3U(wlocks_held, <=, locks);
486e14bb32Jeff Bonwickspa_config_exit(spa_t *spa, int locks, void *tag)
488e14bb32Jeff Bonwick	for (int i = SCL_LOCKS - 1; i >= 0; i--) {
489e14bb32Jeff Bonwick		spa_config_lock_t *scl = &spa->spa_config_lock[i];
490e14bb32Jeff Bonwick		if (!(locks & (1 << i)))
491e14bb32Jeff Bonwick			continue;
492e14bb32Jeff Bonwick		mutex_enter(&scl->scl_lock);
493e914aceTim Schumacher		ASSERT(!zfs_refcount_is_zero(&scl->scl_count));
494e914aceTim Schumacher		if (zfs_refcount_remove(&scl->scl_count, tag) == 0) {
495e14bb32Jeff Bonwick			ASSERT(scl->scl_writer == NULL ||
496e14bb32Jeff Bonwick			    scl->scl_writer == curthread);
497e14bb32Jeff Bonwick			scl->scl_writer = NULL;	/* OK in either case */
498e14bb32Jeff Bonwick			cv_broadcast(&scl->scl_cv);
499e14bb32Jeff Bonwick		}
500e14bb32Jeff Bonwick		mutex_exit(&scl->scl_lock);
501e05725bbonwick	}
504e14bb32Jeff Bonwickint
505e14bb32Jeff Bonwickspa_config_held(spa_t *spa, int locks, krw_t rw)
507e14bb32Jeff Bonwick	int locks_held = 0;
509e14bb32Jeff Bonwick	for (int i = 0; i < SCL_LOCKS; i++) {
510e14bb32Jeff Bonwick		spa_config_lock_t *scl = &spa->spa_config_lock[i];
511e14bb32Jeff Bonwick		if (!(locks & (1 << i)))
512e14bb32Jeff Bonwick			continue;
513e914aceTim Schumacher		if ((rw == RW_READER &&
514e914aceTim Schumacher		    !zfs_refcount_is_zero(&scl->scl_count)) ||
515e14bb32Jeff Bonwick		    (rw == RW_WRITER && scl->scl_writer == curthread))
516e14bb32Jeff Bonwick			locks_held |= 1 << i;
517e14bb32Jeff Bonwick	}
518e14bb32Jeff Bonwick
519e14bb32Jeff Bonwick	return (locks_held);
523e05725bbonwick * ==========================================================================
524fa9e406ahrens * SPA namespace functions
525fa9e406ahrens * ==========================================================================
526fa9e406ahrens */
529fa9e406ahrens * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
530fa9e406ahrens * Returns NULL if no matching spa_t is found.
531fa9e406ahrens */
532fa9e406ahrensspa_t *
533fa9e406ahrensspa_lookup(const char *name)
535e14bb32Jeff Bonwick	static spa_t search;	/* spa_t is large; don't allocate on stack */
536e14bb32Jeff Bonwick	spa_t *spa;
537fa9e406ahrens	avl_index_t where;
53840feaa9ahrens	char *cp;
540fa9e406ahrens	ASSERT(MUTEX_HELD(&spa_namespace_lock));
5423b2aab1Matthew Ahrens	(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
5433b2aab1Matthew Ahrens
54440feaa9ahrens	/*
54540feaa9ahrens	 * If it's a full dataset name, figure out the pool name and
54640feaa9ahrens	 * just use that.
54740feaa9ahrens	 */
54878f1710Matthew Ahrens	cp = strpbrk(search.spa_name, "/@#");
5493b2aab1Matthew Ahrens	if (cp != NULL)
55040feaa9ahrens		*cp = '\0';
552fa9e406ahrens	spa = avl_find(&spa_namespace_avl, &search, &where);
554fa9e406ahrens	return (spa);
558283b846George.Wilson * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
559283b846George.Wilson * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
560283b846George.Wilson * looking for potentially hung I/Os.
561283b846George.Wilson */
563283b846George.Wilsonspa_deadman(void *arg)
565283b846George.Wilson	spa_t *spa = arg;
5670713e23George Wilson	/*
5680713e23George Wilson	 * Disable the deadman timer if the pool is suspended.
5690713e23George Wilson	 */
5700713e23George Wilson	if (spa_suspended(spa)) {
5710713e23George Wilson		VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
5720713e23George Wilson		return;
5730713e23George Wilson	}
5740713e23George Wilson
575283b846George.Wilson	zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
576283b846George.Wilson	    (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
577283b846George.Wilson	    ++spa->spa_deadman_calls);
578283b846George.Wilson	if (zfs_deadman_enabled)
579283b846George.Wilson		vdev_deadman(spa->spa_root_vdev);
583fa9e406ahrens * Create an uninitialized spa_t with the given name.  Requires
584fa9e406ahrens * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
585fa9e406ahrens * exist by calling spa_lookup() first.
586fa9e406ahrens */
587fa9e406ahrensspa_t *
588468c413Tim Haleyspa_add(const char *name, nvlist_t *config, const char *altroot)
590fa9e406ahrens	spa_t *spa;
591c5904d1eschrock	spa_config_dirent_t *dp;
592283b846George.Wilson	cyc_handler_t hdlr;
593283b846George.Wilson	cyc_time_t when;
595fa9e406ahrens	ASSERT(MUTEX_HELD(&spa_namespace_lock));
597fa9e406ahrens	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
599c25056dgw	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
600c25056dgw	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
60135a5a35Jonathan Adams	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
602bc9014eJustin Gibbs	mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
603c25056dgw	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
60435a5a35Jonathan Adams	mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
605c25056dgw	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
60645818eeMatthew Ahrens	mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
60735a5a35Jonathan Adams	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
608a152156Jeff Bonwick	mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
609a152156Jeff Bonwick	mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
610c3a6601Matthew Ahrens	mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL);
612c25056dgw	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
613bc9014eJustin Gibbs	cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
61435a5a35Jonathan Adams	cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
615c25056dgw	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
616e14bb32Jeff Bonwick	cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
618b24ab67Jeff Bonwick	for (int t = 0; t < TXG_SIZE; t++)
619cde58dbMatthew Ahrens		bplist_create(&spa->spa_free_bplist[t]);
620b24ab67Jeff Bonwick
621e14bb32Jeff Bonwick	(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
622fa9e406ahrens	spa->spa_state = POOL_STATE_UNINITIALIZED;
623fa9e406ahrens	spa->spa_freeze_txg = UINT64_MAX;
6240373e76bonwick	spa->spa_final_txg = UINT64_MAX;
625468c413Tim Haley	spa->spa_load_max_txg = UINT64_MAX;
62635a5a35Jonathan Adams	spa->spa_proc = &p0;
62735a5a35Jonathan Adams	spa->spa_proc_state = SPA_PROC_NONE;
6286f79381Pavel Zakharov	spa->spa_trust_config = B_TRUE;
630283b846George.Wilson	hdlr.cyh_func = spa_deadman;
631283b846George.Wilson	hdlr.cyh_arg = spa;
632283b846George.Wilson	hdlr.cyh_level = CY_LOW_LEVEL;
63469962b5Matthew Ahrens	spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
636283b846George.Wilson	/*
637283b846George.Wilson	 * This determines how often we need to check for hung I/Os after
638283b846George.Wilson	 * the cyclic has already fired. Since checking for hung I/Os is
639283b846George.Wilson	 * an expensive operation we don't want to check too frequently.
64069962b5Matthew Ahrens	 * Instead wait for 5 seconds before checking again.
641283b846George.Wilson	 */
64269962b5Matthew Ahrens	when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms);
643283b846George.Wilson	when.cyt_when = CY_INFINITY;
644283b846George.Wilson	mutex_enter(&cpu_lock);
645283b846George.Wilson	spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
646283b846George.Wilson	mutex_exit(&cpu_lock);
648e914aceTim Schumacher	zfs_refcount_create(&spa->spa_refcount);
649e14bb32Jeff Bonwick	spa_config_lock_init(spa);
651fa9e406ahrens	avl_add(&spa_namespace_avl, spa);
6530373e76bonwick	/*
6540373e76bonwick	 * Set the alternate root, if there is one.
6550373e76bonwick	 */
6560373e76bonwick	if (altroot) {
6570373e76bonwick		spa->spa_root = spa_strdup(altroot);
6580373e76bonwick		spa_active_count++;
6590373e76bonwick	}
661f78cdc3Paul Dagnelie	spa->spa_alloc_count = spa_allocators;
662f78cdc3Paul Dagnelie	spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count *
663f78cdc3Paul Dagnelie	    sizeof (kmutex_t), KM_SLEEP);
664f78cdc3Paul Dagnelie	spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count *
665f78cdc3Paul Dagnelie	    sizeof (avl_tree_t), KM_SLEEP);
666f78cdc3Paul Dagnelie	for (int i = 0; i < spa->spa_alloc_count; i++) {
667f78cdc3Paul Dagnelie		mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL);
668f78cdc3Paul Dagnelie		avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare,
669f78cdc3Paul Dagnelie		    sizeof (zio_t), offsetof(zio_t, io_alloc_node));
670f78cdc3Paul Dagnelie	}
6710f7643cGeorge Wilson
672c5904d1eschrock	/*
673c5904d1eschrock	 * Every pool starts with the default cachefile
674c5904d1eschrock	 */
675c5904d1eschrock	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
676c5904d1eschrock	    offsetof(spa_config_dirent_t, scd_link));
678c5904d1eschrock	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
679ef912c8Tim Haley	dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
680c5904d1eschrock	list_insert_head(&spa->spa_config_list, dp);
6824b964adGeorge Wilson	VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
6834b964adGeorge Wilson	    KM_SLEEP) == 0);
6844b964adGeorge Wilson
685ad135b5Christopher Siden	if (config != NULL) {
686ad135b5Christopher Siden		nvlist_t *features;
687ad135b5Christopher Siden
688ad135b5Christopher Siden		if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
689ad135b5Christopher Siden		    &features) == 0) {
690ad135b5Christopher Siden			VERIFY(nvlist_dup(features, &spa->spa_label_features,
691ad135b5Christopher Siden			    0) == 0);
692ad135b5Christopher Siden		}
693ad135b5Christopher Siden
694468c413Tim Haley		VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
695ad135b5Christopher Siden	}
696ad135b5Christopher Siden
697ad135b5Christopher Siden	if (spa->spa_label_features == NULL) {
698ad135b5Christopher Siden		VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
699ad135b5Christopher Siden		    KM_SLEEP) == 0);
700ad135b5Christopher Siden	}
701468c413Tim Haley
702c3a6601Matthew Ahrens	spa->spa_iokstat = kstat_create("zfs", 0, name,
703c3a6601Matthew Ahrens	    "disk", KSTAT_TYPE_IO, 1, 0);
704c3a6601Matthew Ahrens	if (spa->spa_iokstat) {
705c3a6601Matthew Ahrens		spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock;
706c3a6601Matthew Ahrens		kstat_install(spa->spa_iokstat);
707c3a6601Matthew Ahrens	}
708c3a6601Matthew Ahrens
70981cd5c5Matthew Ahrens	spa->spa_min_ashift = INT_MAX;
71081cd5c5Matthew Ahrens	spa->spa_max_ashift = 0;
71181cd5c5Matthew Ahrens
71243466aaMax Grossman	/*
71343466aaMax Grossman	 * As a pool is being created, treat all features as disabled by
71443466aaMax Grossman	 * setting SPA_FEATURE_DISABLED for all entries in the feature
71543466aaMax Grossman	 * refcount cache.
71643466aaMax Grossman	 */
71743466aaMax Grossman	for (int i = 0; i < SPA_FEATURES; i++) {
71843466aaMax Grossman		spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
71943466aaMax Grossman	}
72043466aaMax Grossman
721fa9e406ahrens	return (spa);
725fa9e406ahrens * Removes a spa_t from the namespace, freeing up any memory used.  Requires
726fa9e406ahrens * spa_namespace_lock.  This is called only after the spa_t has been closed and
727fa9e406ahrens * deactivated.
728fa9e406ahrens */
730fa9e406ahrensspa_remove(spa_t *spa)
732c5904d1eschrock	spa_config_dirent_t *dp;
734fa9e406ahrens	ASSERT(MUTEX_HELD(&spa_namespace_lock));
735fa9e406ahrens	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
736e914aceTim Schumacher	ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
7381195e68Mark J Musante	nvlist_free(spa->spa_config_splitting);
7391195e68Mark J Musante
740fa9e406ahrens	avl_remove(&spa_namespace_avl, spa);
741fa9e406ahrens	cv_broadcast(&spa_namespace_cv);
7430373e76bonwick	if (spa->spa_root) {
744fa9e406ahrens		spa_strfree(spa->spa_root);
7450373e76bonwick		spa_active_count--;
7460373e76bonwick	}
748c5904d1eschrock	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
749c5904d1eschrock		list_remove(&spa->spa_config_list, dp);
750c5904d1eschrock		if (dp->scd_path != NULL)
751c5904d1eschrock			spa_strfree(dp->scd_path);
752c5904d1eschrock		kmem_free(dp, sizeof (spa_config_dirent_t));
753c5904d1eschrock	}
755f78cdc3Paul Dagnelie	for (int i = 0; i < spa->spa_alloc_count; i++) {
756f78cdc3Paul Dagnelie		avl_destroy(&spa->spa_alloc_trees[i]);
757f78cdc3Paul Dagnelie		mutex_destroy(&spa->spa_alloc_locks[i]);
758f78cdc3Paul Dagnelie	}
759f78cdc3Paul Dagnelie	kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count *
760f78cdc3Paul Dagnelie	    sizeof (kmutex_t));
761f78cdc3Paul Dagnelie	kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count *
762f78cdc3Paul Dagnelie	    sizeof (avl_tree_t));
763f78cdc3Paul Dagnelie
764c5904d1eschrock	list_destroy(&spa->spa_config_list);
766ad135b5Christopher Siden	nvlist_free(spa->spa_label_features);
7674b964adGeorge Wilson	nvlist_free(spa->spa_load_info);
768fa9e406ahrens	spa_config_set(spa, NULL);
770283b846George.Wilson	mutex_enter(&cpu_lock);
771283b846George.Wilson	if (spa->spa_deadman_cycid != CYCLIC_NONE)
772283b846George.Wilson		cyclic_remove(spa->spa_deadman_cycid);
773283b846George.Wilson	mutex_exit(&cpu_lock);
774283b846George.Wilson	spa->spa_deadman_cycid = CYCLIC_NONE;
776e914aceTim Schumacher	zfs_refcount_destroy(&spa->spa_refcount);
778e14bb32Jeff Bonwick	spa_config_lock_destroy(spa);
780c3a6601Matthew Ahrens	kstat_delete(spa->spa_iokstat);
781c3a6601Matthew Ahrens	spa->spa_iokstat = NULL;
782c3a6601Matthew Ahrens
783b24ab67Jeff Bonwick	for (int t = 0; t < TXG_SIZE; t++)
784cde58dbMatthew Ahrens		bplist_destroy(&spa->spa_free_bplist[t]);
785b24ab67Jeff Bonwick
78645818eeMatthew Ahrens	zio_checksum_templates_free(spa);
78745818eeMatthew Ahrens
788c25056dgw	cv_destroy(&spa->spa_async_cv);
789bc9014eJustin Gibbs	cv_destroy(&spa->spa_evicting_os_cv);
79035a5a35Jonathan Adams	cv_destroy(&spa->spa_proc_cv);
791c25056dgw	cv_destroy(&spa->spa_scrub_io_cv);
792e14bb32Jeff Bonwick	cv_destroy(&spa->spa_suspend_cv);
7945ad8204nd	mutex_destroy(&spa->spa_async_lock);
795c25056dgw	mutex_destroy(&spa->spa_errlist_lock);
79635a5a35Jonathan Adams	mutex_destroy(&spa->spa_errlog_lock);
797bc9014eJustin Gibbs	mutex_destroy(&spa->spa_evicting_os_lock);
79806eeb2aek	mutex_destroy(&spa->spa_history_lock);
79935a5a35Jonathan Adams	mutex_destroy(&spa->spa_proc_lock);
800b1b8ab3lling	mutex_destroy(&spa->spa_props_lock);
80145818eeMatthew Ahrens	mutex_destroy(&spa->spa_cksum_tmpls_lock);
80235a5a35Jonathan Adams	mutex_destroy(&spa->spa_scrub_lock);
803e14bb32Jeff Bonwick	mutex_destroy(&spa->spa_suspend_lock);
804a152156Jeff Bonwick	mutex_destroy(&spa->spa_vdev_top_lock);
805c3a6601Matthew Ahrens	mutex_destroy(&spa->spa_iokstat_lock);
807fa9e406ahrens	kmem_free(spa, sizeof (spa_t));
811fa9e406ahrens * Given a pool, return the next pool in the namespace, or NULL if there is
812fa9e406ahrens * none.  If 'prev' is NULL, return the first pool.
813fa9e406ahrens */
814fa9e406ahrensspa_t *
815fa9e406ahrensspa_next(spa_t *prev)
817fa9e406ahrens	ASSERT(MUTEX_HELD(&spa_namespace_lock));
819fa9e406ahrens	if (prev)
820fa9e406ahrens		return (AVL_NEXT(&spa_namespace_avl, prev));
821fa9e406ahrens	else
822fa9e406ahrens		return (avl_first(&spa_namespace_avl));
826fa9e406ahrens * ==========================================================================
827fa9e406ahrens * SPA refcount functions
828fa9e406ahrens * ==========================================================================
829fa9e406ahrens */
832fa9e406ahrens * Add a reference to the given spa_t.  Must have at least one reference, or
833fa9e406ahrens * have the namespace lock held.
834fa9e406ahrens */
836fa9e406ahrensspa_open_ref(spa_t *spa, void *tag)
838e914aceTim Schumacher	ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
839fa9e406ahrens	    MUTEX_HELD(&spa_namespace_lock));
840e914aceTim Schumacher	(void) zfs_refcount_add(&spa->spa_refcount, tag);
844fa9e406ahrens * Remove a reference to the given spa_t.  Must have at least one reference, or
845fa9e406ahrens * have the namespace lock held.
846fa9e406ahrens */
848fa9e406ahrensspa_close(spa_t *spa, void *tag)
850e914aceTim Schumacher	ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
851fa9e406ahrens	    MUTEX_HELD(&spa_namespace_lock));
852e914aceTim Schumacher	(void) zfs_refcount_remove(&spa->spa_refcount, tag);
856bc9014eJustin Gibbs * Remove a reference to the given spa_t held by a dsl dir that is
857bc9014eJustin Gibbs * being asynchronously released.  Async releases occur from a taskq
858bc9014eJustin Gibbs * performing eviction of dsl datasets and dirs.  The namespace lock
859bc9014eJustin Gibbs * isn't held and the hold by the object being evicted may contribute to
860bc9014eJustin Gibbs * spa_minref (e.g. dataset or directory released during pool export),
861bc9014eJustin Gibbs * so the asserts in spa_close() do not apply.
862bc9014eJustin Gibbs */
863bc9014eJustin Gibbsvoid
864bc9014eJustin Gibbsspa_async_close(spa_t *spa, void *tag)
865bc9014eJustin Gibbs{
866e914aceTim Schumacher	(void) zfs_refcount_remove(&spa->spa_refcount, tag);
867bc9014eJustin Gibbs}