spa_misc.c revision e914ace2e9d9bf2dbf9a1f1ce81cb776022096f5
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
24 * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2013 Saso Kiselkov. All rights reserved.
27 * Copyright (c) 2014 Integros [integros.com]
28 * Copyright (c) 2017 Datto Inc.
29 */
30
31#include <sys/zfs_context.h>
32#include <sys/spa_impl.h>
33#include <sys/spa_boot.h>
34#include <sys/zio.h>
35#include <sys/zio_checksum.h>
36#include <sys/zio_compress.h>
37#include <sys/dmu.h>
38#include <sys/dmu_tx.h>
39#include <sys/zap.h>
40#include <sys/zil.h>
41#include <sys/vdev_impl.h>
42#include <sys/vdev_initialize.h>
43#include <sys/metaslab.h>
44#include <sys/uberblock_impl.h>
45#include <sys/txg.h>
46#include <sys/avl.h>
47#include <sys/unique.h>
48#include <sys/dsl_pool.h>
49#include <sys/dsl_dir.h>
50#include <sys/dsl_prop.h>
51#include <sys/dsl_scan.h>
52#include <sys/fs/zfs.h>
53#include <sys/metaslab_impl.h>
54#include <sys/arc.h>
55#include <sys/ddt.h>
56#include "zfs_prop.h"
57#include <sys/zfeature.h>
58
59/*
60 * SPA locking
61 *
62 * There are four basic locks for managing spa_t structures:
63 *
64 * spa_namespace_lock (global mutex)
65 *
66 *	This lock must be acquired to do any of the following:
67 *
68 *		- Lookup a spa_t by name
69 *		- Add or remove a spa_t from the namespace
70 *		- Increase spa_refcount from non-zero
71 *		- Check if spa_refcount is zero
72 *		- Rename a spa_t
73 *		- add/remove/attach/detach devices
74 *		- Held for the duration of create/destroy/import/export
75 *
76 *	It does not need to handle recursion.  A create or destroy may
77 *	reference objects (files or zvols) in other pools, but by
78 *	definition they must have an existing reference, and will never need
79 *	to lookup a spa_t by name.
80 *
81 * spa_refcount (per-spa zfs_refcount_t protected by mutex)
82 *
83 *	This reference count keep track of any active users of the spa_t.  The
84 *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
85 *	the refcount is never really 'zero' - opening a pool implicitly keeps
86 *	some references in the DMU.  Internally we check against spa_minref, but
87 *	present the image of a zero/non-zero value to consumers.
88 *
89 * spa_config_lock[] (per-spa array of rwlocks)
90 *
91 *	This protects the spa_t from config changes, and must be held in
92 *	the following circumstances:
93 *
94 *		- RW_READER to perform I/O to the spa
95 *		- RW_WRITER to change the vdev config
96 *
97 * The locking order is fairly straightforward:
98 *
99 *		spa_namespace_lock	->	spa_refcount
100 *
101 *	The namespace lock must be acquired to increase the refcount from 0
102 *	or to check if it is zero.
103 *
104 *		spa_refcount		->	spa_config_lock[]
105 *
106 *	There must be at least one valid reference on the spa_t to acquire
107 *	the config lock.
108 *
109 *		spa_namespace_lock	->	spa_config_lock[]
110 *
111 *	The namespace lock must always be taken before the config lock.
112 *
113 *
114 * The spa_namespace_lock can be acquired directly and is globally visible.
115 *
116 * The namespace is manipulated using the following functions, all of which
117 * require the spa_namespace_lock to be held.
118 *
119 *	spa_lookup()		Lookup a spa_t by name.
120 *
121 *	spa_add()		Create a new spa_t in the namespace.
122 *
123 *	spa_remove()		Remove a spa_t from the namespace.  This also
124 *				frees up any memory associated with the spa_t.
125 *
126 *	spa_next()		Returns the next spa_t in the system, or the
127 *				first if NULL is passed.
128 *
129 *	spa_evict_all()		Shutdown and remove all spa_t structures in
130 *				the system.
131 *
132 *	spa_guid_exists()	Determine whether a pool/device guid exists.
133 *
134 * The spa_refcount is manipulated using the following functions:
135 *
136 *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
137 *				called with spa_namespace_lock held if the
138 *				refcount is currently zero.
139 *
140 *	spa_close()		Remove a reference from the spa_t.  This will
141 *				not free the spa_t or remove it from the
142 *				namespace.  No locking is required.
143 *
144 *	spa_refcount_zero()	Returns true if the refcount is currently
145 *				zero.  Must be called with spa_namespace_lock
146 *				held.
147 *
148 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
149 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
150 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
151 *
152 * To read the configuration, it suffices to hold one of these locks as reader.
153 * To modify the configuration, you must hold all locks as writer.  To modify
154 * vdev state without altering the vdev tree's topology (e.g. online/offline),
155 * you must hold SCL_STATE and SCL_ZIO as writer.
156 *
157 * We use these distinct config locks to avoid recursive lock entry.
158 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
159 * block allocations (SCL_ALLOC), which may require reading space maps
160 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
161 *
162 * The spa config locks cannot be normal rwlocks because we need the
163 * ability to hand off ownership.  For example, SCL_ZIO is acquired
164 * by the issuing thread and later released by an interrupt thread.
165 * They do, however, obey the usual write-wanted semantics to prevent
166 * writer (i.e. system administrator) starvation.
167 *
168 * The lock acquisition rules are as follows:
169 *
170 * SCL_CONFIG
171 *	Protects changes to the vdev tree topology, such as vdev
172 *	add/remove/attach/detach.  Protects the dirty config list
173 *	(spa_config_dirty_list) and the set of spares and l2arc devices.
174 *
175 * SCL_STATE
176 *	Protects changes to pool state and vdev state, such as vdev
177 *	online/offline/fault/degrade/clear.  Protects the dirty state list
178 *	(spa_state_dirty_list) and global pool state (spa_state).
179 *
180 * SCL_ALLOC
181 *	Protects changes to metaslab groups and classes.
182 *	Held as reader by metaslab_alloc() and metaslab_claim().
183 *
184 * SCL_ZIO
185 *	Held by bp-level zios (those which have no io_vd upon entry)
186 *	to prevent changes to the vdev tree.  The bp-level zio implicitly
187 *	protects all of its vdev child zios, which do not hold SCL_ZIO.
188 *
189 * SCL_FREE
190 *	Protects changes to metaslab groups and classes.
191 *	Held as reader by metaslab_free().  SCL_FREE is distinct from
192 *	SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
193 *	blocks in zio_done() while another i/o that holds either
194 *	SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
195 *
196 * SCL_VDEV
197 *	Held as reader to prevent changes to the vdev tree during trivial
198 *	inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
199 *	other locks, and lower than all of them, to ensure that it's safe
200 *	to acquire regardless of caller context.
201 *
202 * In addition, the following rules apply:
203 *
204 * (a)	spa_props_lock protects pool properties, spa_config and spa_config_list.
205 *	The lock ordering is SCL_CONFIG > spa_props_lock.
206 *
207 * (b)	I/O operations on leaf vdevs.  For any zio operation that takes
208 *	an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
209 *	or zio_write_phys() -- the caller must ensure that the config cannot
210 *	cannot change in the interim, and that the vdev cannot be reopened.
211 *	SCL_STATE as reader suffices for both.
212 *
213 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
214 *
215 *	spa_vdev_enter()	Acquire the namespace lock and the config lock
216 *				for writing.
217 *
218 *	spa_vdev_exit()		Release the config lock, wait for all I/O
219 *				to complete, sync the updated configs to the
220 *				cache, and release the namespace lock.
221 *
222 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
223 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
224 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
225 */
226
227static avl_tree_t spa_namespace_avl;
228kmutex_t spa_namespace_lock;
229static kcondvar_t spa_namespace_cv;
230static int spa_active_count;
231int spa_max_replication_override = SPA_DVAS_PER_BP;
232
233static kmutex_t spa_spare_lock;
234static avl_tree_t spa_spare_avl;
235static kmutex_t spa_l2cache_lock;
236static avl_tree_t spa_l2cache_avl;
237
238kmem_cache_t *spa_buffer_pool;
239int spa_mode_global;
240
241#ifdef ZFS_DEBUG
242/*
243 * Everything except dprintf, spa, and indirect_remap is on by default
244 * in debug builds.
245 */
246int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_INDIRECT_REMAP);
247#else
248int zfs_flags = 0;
249#endif
250
251/*
252 * zfs_recover can be set to nonzero to attempt to recover from
253 * otherwise-fatal errors, typically caused by on-disk corruption.  When
254 * set, calls to zfs_panic_recover() will turn into warning messages.
255 * This should only be used as a last resort, as it typically results
256 * in leaked space, or worse.
257 */
258boolean_t zfs_recover = B_FALSE;
259
260/*
261 * If destroy encounters an EIO while reading metadata (e.g. indirect
262 * blocks), space referenced by the missing metadata can not be freed.
263 * Normally this causes the background destroy to become "stalled", as
264 * it is unable to make forward progress.  While in this stalled state,
265 * all remaining space to free from the error-encountering filesystem is
266 * "temporarily leaked".  Set this flag to cause it to ignore the EIO,
267 * permanently leak the space from indirect blocks that can not be read,
268 * and continue to free everything else that it can.
269 *
270 * The default, "stalling" behavior is useful if the storage partially
271 * fails (i.e. some but not all i/os fail), and then later recovers.  In
272 * this case, we will be able to continue pool operations while it is
273 * partially failed, and when it recovers, we can continue to free the
274 * space, with no leaks.  However, note that this case is actually
275 * fairly rare.
276 *
277 * Typically pools either (a) fail completely (but perhaps temporarily,
278 * e.g. a top-level vdev going offline), or (b) have localized,
279 * permanent errors (e.g. disk returns the wrong data due to bit flip or
280 * firmware bug).  In case (a), this setting does not matter because the
281 * pool will be suspended and the sync thread will not be able to make
282 * forward progress regardless.  In case (b), because the error is
283 * permanent, the best we can do is leak the minimum amount of space,
284 * which is what setting this flag will do.  Therefore, it is reasonable
285 * for this flag to normally be set, but we chose the more conservative
286 * approach of not setting it, so that there is no possibility of
287 * leaking space in the "partial temporary" failure case.
288 */
289boolean_t zfs_free_leak_on_eio = B_FALSE;
290
291/*
292 * Expiration time in milliseconds. This value has two meanings. First it is
293 * used to determine when the spa_deadman() logic should fire. By default the
294 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
295 * Secondly, the value determines if an I/O is considered "hung". Any I/O that
296 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
297 * in a system panic.
298 */
299uint64_t zfs_deadman_synctime_ms = 1000000ULL;
300
301/*
302 * Check time in milliseconds. This defines the frequency at which we check
303 * for hung I/O.
304 */
305uint64_t zfs_deadman_checktime_ms = 5000ULL;
306
307/*
308 * Override the zfs deadman behavior via /etc/system. By default the
309 * deadman is enabled except on VMware and sparc deployments.
310 */
311int zfs_deadman_enabled = -1;
312
313/*
314 * The worst case is single-sector max-parity RAID-Z blocks, in which
315 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
316 * times the size; so just assume that.  Add to this the fact that
317 * we can have up to 3 DVAs per bp, and one more factor of 2 because
318 * the block may be dittoed with up to 3 DVAs by ddt_sync().  All together,
319 * the worst case is:
320 *     (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
321 */
322int spa_asize_inflation = 24;
323
324/*
325 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
326 * the pool to be consumed.  This ensures that we don't run the pool
327 * completely out of space, due to unaccounted changes (e.g. to the MOS).
328 * It also limits the worst-case time to allocate space.  If we have
329 * less than this amount of free space, most ZPL operations (e.g. write,
330 * create) will return ENOSPC.
331 *
332 * Certain operations (e.g. file removal, most administrative actions) can
333 * use half the slop space.  They will only return ENOSPC if less than half
334 * the slop space is free.  Typically, once the pool has less than the slop
335 * space free, the user will use these operations to free up space in the pool.
336 * These are the operations that call dsl_pool_adjustedsize() with the netfree
337 * argument set to TRUE.
338 *
339 * Operations that are almost guaranteed to free up space in the absence of
340 * a pool checkpoint can use up to three quarters of the slop space
341 * (e.g zfs destroy).
342 *
343 * A very restricted set of operations are always permitted, regardless of
344 * the amount of free space.  These are the operations that call
345 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
346 * increase in the amount of space used, it is possible to run the pool
347 * completely out of space, causing it to be permanently read-only.
348 *
349 * Note that on very small pools, the slop space will be larger than
350 * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
351 * but we never allow it to be more than half the pool size.
352 *
353 * See also the comments in zfs_space_check_t.
354 */
355int spa_slop_shift = 5;
356uint64_t spa_min_slop = 128 * 1024 * 1024;
357
358int spa_allocators = 4;
359
360/*PRINTFLIKE2*/
361void
362spa_load_failed(spa_t *spa, const char *fmt, ...)
363{
364	va_list adx;
365	char buf[256];
366
367	va_start(adx, fmt);
368	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
369	va_end(adx);
370
371	zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
372	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
373}
374
375/*PRINTFLIKE2*/
376void
377spa_load_note(spa_t *spa, const char *fmt, ...)
378{
379	va_list adx;
380	char buf[256];
381
382	va_start(adx, fmt);
383	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
384	va_end(adx);
385
386	zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
387	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
388}
389
390/*
391 * ==========================================================================
392 * SPA config locking
393 * ==========================================================================
394 */
395static void
396spa_config_lock_init(spa_t *spa)
397{
398	for (int i = 0; i < SCL_LOCKS; i++) {
399		spa_config_lock_t *scl = &spa->spa_config_lock[i];
400		mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
401		cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
402		zfs_refcount_create_untracked(&scl->scl_count);
403		scl->scl_writer = NULL;
404		scl->scl_write_wanted = 0;
405	}
406}
407
408static void
409spa_config_lock_destroy(spa_t *spa)
410{
411	for (int i = 0; i < SCL_LOCKS; i++) {
412		spa_config_lock_t *scl = &spa->spa_config_lock[i];
413		mutex_destroy(&scl->scl_lock);
414		cv_destroy(&scl->scl_cv);
415		zfs_refcount_destroy(&scl->scl_count);
416		ASSERT(scl->scl_writer == NULL);
417		ASSERT(scl->scl_write_wanted == 0);
418	}
419}
420
421int
422spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
423{
424	for (int i = 0; i < SCL_LOCKS; i++) {
425		spa_config_lock_t *scl = &spa->spa_config_lock[i];
426		if (!(locks & (1 << i)))
427			continue;
428		mutex_enter(&scl->scl_lock);
429		if (rw == RW_READER) {
430			if (scl->scl_writer || scl->scl_write_wanted) {
431				mutex_exit(&scl->scl_lock);
432				spa_config_exit(spa, locks & ((1 << i) - 1),
433				    tag);
434				return (0);
435			}
436		} else {
437			ASSERT(scl->scl_writer != curthread);
438			if (!zfs_refcount_is_zero(&scl->scl_count)) {
439				mutex_exit(&scl->scl_lock);
440				spa_config_exit(spa, locks & ((1 << i) - 1),
441				    tag);
442				return (0);
443			}
444			scl->scl_writer = curthread;
445		}
446		(void) zfs_refcount_add(&scl->scl_count, tag);
447		mutex_exit(&scl->scl_lock);
448	}
449	return (1);
450}
451
452void
453spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
454{
455	int wlocks_held = 0;
456
457	ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
458
459	for (int i = 0; i < SCL_LOCKS; i++) {
460		spa_config_lock_t *scl = &spa->spa_config_lock[i];
461		if (scl->scl_writer == curthread)
462			wlocks_held |= (1 << i);
463		if (!(locks & (1 << i)))
464			continue;
465		mutex_enter(&scl->scl_lock);
466		if (rw == RW_READER) {
467			while (scl->scl_writer || scl->scl_write_wanted) {
468				cv_wait(&scl->scl_cv, &scl->scl_lock);
469			}
470		} else {
471			ASSERT(scl->scl_writer != curthread);
472			while (!zfs_refcount_is_zero(&scl->scl_count)) {
473				scl->scl_write_wanted++;
474				cv_wait(&scl->scl_cv, &scl->scl_lock);
475				scl->scl_write_wanted--;
476			}
477			scl->scl_writer = curthread;
478		}
479		(void) zfs_refcount_add(&scl->scl_count, tag);
480		mutex_exit(&scl->scl_lock);
481	}
482	ASSERT3U(wlocks_held, <=, locks);
483}
484
485void
486spa_config_exit(spa_t *spa, int locks, void *tag)
487{
488	for (int i = SCL_LOCKS - 1; i >= 0; i--) {
489		spa_config_lock_t *scl = &spa->spa_config_lock[i];
490		if (!(locks & (1 << i)))
491			continue;
492		mutex_enter(&scl->scl_lock);
493		ASSERT(!zfs_refcount_is_zero(&scl->scl_count));
494		if (zfs_refcount_remove(&scl->scl_count, tag) == 0) {
495			ASSERT(scl->scl_writer == NULL ||
496			    scl->scl_writer == curthread);
497			scl->scl_writer = NULL;	/* OK in either case */
498			cv_broadcast(&scl->scl_cv);
499		}
500		mutex_exit(&scl->scl_lock);
501	}
502}
503
504int
505spa_config_held(spa_t *spa, int locks, krw_t rw)
506{
507	int locks_held = 0;
508
509	for (int i = 0; i < SCL_LOCKS; i++) {
510		spa_config_lock_t *scl = &spa->spa_config_lock[i];
511		if (!(locks & (1 << i)))
512			continue;
513		if ((rw == RW_READER &&
514		    !zfs_refcount_is_zero(&scl->scl_count)) ||
515		    (rw == RW_WRITER && scl->scl_writer == curthread))
516			locks_held |= 1 << i;
517	}
518
519	return (locks_held);
520}
521
522/*
523 * ==========================================================================
524 * SPA namespace functions
525 * ==========================================================================
526 */
527
528/*
529 * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
530 * Returns NULL if no matching spa_t is found.
531 */
532spa_t *
533spa_lookup(const char *name)
534{
535	static spa_t search;	/* spa_t is large; don't allocate on stack */
536	spa_t *spa;
537	avl_index_t where;
538	char *cp;
539
540	ASSERT(MUTEX_HELD(&spa_namespace_lock));
541
542	(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
543
544	/*
545	 * If it's a full dataset name, figure out the pool name and
546	 * just use that.
547	 */
548	cp = strpbrk(search.spa_name, "/@#");
549	if (cp != NULL)
550		*cp = '\0';
551
552	spa = avl_find(&spa_namespace_avl, &search, &where);
553
554	return (spa);
555}
556
557/*
558 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
559 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
560 * looking for potentially hung I/Os.
561 */
562void
563spa_deadman(void *arg)
564{
565	spa_t *spa = arg;
566
567	/*
568	 * Disable the deadman timer if the pool is suspended.
569	 */
570	if (spa_suspended(spa)) {
571		VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
572		return;
573	}
574
575	zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
576	    (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
577	    ++spa->spa_deadman_calls);
578	if (zfs_deadman_enabled)
579		vdev_deadman(spa->spa_root_vdev);
580}
581
582/*
583 * Create an uninitialized spa_t with the given name.  Requires
584 * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
585 * exist by calling spa_lookup() first.
586 */
587spa_t *
588spa_add(const char *name, nvlist_t *config, const char *altroot)
589{
590	spa_t *spa;
591	spa_config_dirent_t *dp;
592	cyc_handler_t hdlr;
593	cyc_time_t when;
594
595	ASSERT(MUTEX_HELD(&spa_namespace_lock));
596
597	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
598
599	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
600	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
601	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
602	mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
603	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
604	mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
605	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
606	mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
607	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
608	mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
609	mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
610	mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL);
611
612	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
613	cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
614	cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
615	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
616	cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
617
618	for (int t = 0; t < TXG_SIZE; t++)
619		bplist_create(&spa->spa_free_bplist[t]);
620
621	(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
622	spa->spa_state = POOL_STATE_UNINITIALIZED;
623	spa->spa_freeze_txg = UINT64_MAX;
624	spa->spa_final_txg = UINT64_MAX;
625	spa->spa_load_max_txg = UINT64_MAX;
626	spa->spa_proc = &p0;
627	spa->spa_proc_state = SPA_PROC_NONE;
628	spa->spa_trust_config = B_TRUE;
629
630	hdlr.cyh_func = spa_deadman;
631	hdlr.cyh_arg = spa;
632	hdlr.cyh_level = CY_LOW_LEVEL;
633
634	spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
635
636	/*
637	 * This determines how often we need to check for hung I/Os after
638	 * the cyclic has already fired. Since checking for hung I/Os is
639	 * an expensive operation we don't want to check too frequently.
640	 * Instead wait for 5 seconds before checking again.
641	 */
642	when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms);
643	when.cyt_when = CY_INFINITY;
644	mutex_enter(&cpu_lock);
645	spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
646	mutex_exit(&cpu_lock);
647
648	zfs_refcount_create(&spa->spa_refcount);
649	spa_config_lock_init(spa);
650
651	avl_add(&spa_namespace_avl, spa);
652
653	/*
654	 * Set the alternate root, if there is one.
655	 */
656	if (altroot) {
657		spa->spa_root = spa_strdup(altroot);
658		spa_active_count++;
659	}
660
661	spa->spa_alloc_count = spa_allocators;
662	spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count *
663	    sizeof (kmutex_t), KM_SLEEP);
664	spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count *
665	    sizeof (avl_tree_t), KM_SLEEP);
666	for (int i = 0; i < spa->spa_alloc_count; i++) {
667		mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL);
668		avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare,
669		    sizeof (zio_t), offsetof(zio_t, io_alloc_node));
670	}
671
672	/*
673	 * Every pool starts with the default cachefile
674	 */
675	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
676	    offsetof(spa_config_dirent_t, scd_link));
677
678	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
679	dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
680	list_insert_head(&spa->spa_config_list, dp);
681
682	VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
683	    KM_SLEEP) == 0);
684
685	if (config != NULL) {
686		nvlist_t *features;
687
688		if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
689		    &features) == 0) {
690			VERIFY(nvlist_dup(features, &spa->spa_label_features,
691			    0) == 0);
692		}
693
694		VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
695	}
696
697	if (spa->spa_label_features == NULL) {
698		VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
699		    KM_SLEEP) == 0);
700	}
701
702	spa->spa_iokstat = kstat_create("zfs", 0, name,
703	    "disk", KSTAT_TYPE_IO, 1, 0);
704	if (spa->spa_iokstat) {
705		spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock;
706		kstat_install(spa->spa_iokstat);
707	}
708
709	spa->spa_min_ashift = INT_MAX;
710	spa->spa_max_ashift = 0;
711
712	/*
713	 * As a pool is being created, treat all features as disabled by
714	 * setting SPA_FEATURE_DISABLED for all entries in the feature
715	 * refcount cache.
716	 */
717	for (int i = 0; i < SPA_FEATURES; i++) {
718		spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
719	}
720
721	return (spa);
722}
723
724/*
725 * Removes a spa_t from the namespace, freeing up any memory used.  Requires
726 * spa_namespace_lock.  This is called only after the spa_t has been closed and
727 * deactivated.
728 */
729void
730spa_remove(spa_t *spa)
731{
732	spa_config_dirent_t *dp;
733
734	ASSERT(MUTEX_HELD(&spa_namespace_lock));
735	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
736	ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
737
738	nvlist_free(spa->spa_config_splitting);
739
740	avl_remove(&spa_namespace_avl, spa);
741	cv_broadcast(&spa_namespace_cv);
742
743	if (spa->spa_root) {
744		spa_strfree(spa->spa_root);
745		spa_active_count--;
746	}
747
748	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
749		list_remove(&spa->spa_config_list, dp);
750		if (dp->scd_path != NULL)
751			spa_strfree(dp->scd_path);
752		kmem_free(dp, sizeof (spa_config_dirent_t));
753	}
754
755	for (int i = 0; i < spa->spa_alloc_count; i++) {
756		avl_destroy(&spa->spa_alloc_trees[i]);
757		mutex_destroy(&spa->spa_alloc_locks[i]);
758	}
759	kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count *
760	    sizeof (kmutex_t));
761	kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count *
762	    sizeof (avl_tree_t));
763
764	list_destroy(&spa->spa_config_list);
765
766	nvlist_free(spa->spa_label_features);
767	nvlist_free(spa->spa_load_info);
768	spa_config_set(spa, NULL);
769
770	mutex_enter(&cpu_lock);
771	if (spa->spa_deadman_cycid != CYCLIC_NONE)
772		cyclic_remove(spa->spa_deadman_cycid);
773	mutex_exit(&cpu_lock);
774	spa->spa_deadman_cycid = CYCLIC_NONE;
775
776	zfs_refcount_destroy(&spa->spa_refcount);
777
778	spa_config_lock_destroy(spa);
779
780	kstat_delete(spa->spa_iokstat);
781	spa->spa_iokstat = NULL;
782
783	for (int t = 0; t < TXG_SIZE; t++)
784		bplist_destroy(&spa->spa_free_bplist[t]);
785
786	zio_checksum_templates_free(spa);
787
788	cv_destroy(&spa->spa_async_cv);
789	cv_destroy(&spa->spa_evicting_os_cv);
790	cv_destroy(&spa->spa_proc_cv);
791	cv_destroy(&spa->spa_scrub_io_cv);
792	cv_destroy(&spa->spa_suspend_cv);
793
794	mutex_destroy(&spa->spa_async_lock);
795	mutex_destroy(&spa->spa_errlist_lock);
796	mutex_destroy(&spa->spa_errlog_lock);
797	mutex_destroy(&spa->spa_evicting_os_lock);
798	mutex_destroy(&spa->spa_history_lock);
799	mutex_destroy(&spa->spa_proc_lock);
800	mutex_destroy(&spa->spa_props_lock);
801	mutex_destroy(&spa->spa_cksum_tmpls_lock);
802	mutex_destroy(&spa->spa_scrub_lock);
803	mutex_destroy(&spa->spa_suspend_lock);
804	mutex_destroy(&spa->spa_vdev_top_lock);
805	mutex_destroy(&spa->spa_iokstat_lock);
806
807	kmem_free(spa, sizeof (spa_t));
808}
809
810/*
811 * Given a pool, return the next pool in the namespace, or NULL if there is
812 * none.  If 'prev' is NULL, return the first pool.
813 */
814spa_t *
815spa_next(spa_t *prev)
816{
817	ASSERT(MUTEX_HELD(&spa_namespace_lock));
818
819	if (prev)
820		return (AVL_NEXT(&spa_namespace_avl, prev));
821	else
822		return (avl_first(&spa_namespace_avl));
823}
824
825/*
826 * ==========================================================================
827 * SPA refcount functions
828 * ==========================================================================
829 */
830
831/*
832 * Add a reference to the given spa_t.  Must have at least one reference, or
833 * have the namespace lock held.
834 */
835void
836spa_open_ref(spa_t *spa, void *tag)
837{
838	ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
839	    MUTEX_HELD(&spa_namespace_lock));
840	(void) zfs_refcount_add(&spa->spa_refcount, tag);
841}
842
843/*
844 * Remove a reference to the given spa_t.  Must have at least one reference, or
845 * have the namespace lock held.
846 */
847void
848spa_close(spa_t *spa, void *tag)
849{
850	ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
851	    MUTEX_HELD(&spa_namespace_lock));
852	(void) zfs_refcount_remove(&spa->spa_refcount, tag);
853}
854
855/*
856 * Remove a reference to the given spa_t held by a dsl dir that is
857 * being asynchronously released.  Async releases occur from a taskq
858 * performing eviction of dsl datasets and dirs.  The namespace lock
859 * isn't held and the hold by the object being evicted may contribute to
860 * spa_minref (e.g. dataset or directory released during pool export),
861 * so the asserts in spa_close() do not apply.
862 */
863void
864spa_async_close(spa_t *spa, void *tag)
865{
866	(void) zfs_refcount_remove(&spa->spa_refcount, tag);
867}
868
869/*
870 * Check to see if the spa refcount is zero.  Must be called with
871 * spa_namespace_lock held.  We really compare against spa_minref, which is the
872 * number of references acquired when opening a pool
873 */
874boolean_t
875spa_refcount_zero(spa_t *spa)
876{
877	ASSERT(MUTEX_HELD(&spa_namespace_lock));
878
879	return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
880}
881
882/*
883 * ==========================================================================
884 * SPA spare and l2cache tracking
885 * ==========================================================================
886 */
887
888/*
889 * Hot spares and cache devices are tracked using the same code below,
890 * for 'auxiliary' devices.
891 */
892
893typedef struct spa_aux {
894	uint64_t	aux_guid;
895	uint64_t	aux_pool;
896	avl_node_t	aux_avl;
897	int		aux_count;
898} spa_aux_t;
899
900static int
901spa_aux_compare(const void *a, const void *b)
902{
903	const spa_aux_t *sa = a;
904	const spa_aux_t *sb = b;
905
906	if (sa->aux_guid < sb->aux_guid)
907		return (-1);
908	else if (sa->aux_guid > sb->aux_guid)
909		return (1);
910	else
911		return (0);
912}
913
914void
915spa_aux_add(vdev_t *vd, avl_tree_t *avl)
916{
917	avl_index_t where;
918	spa_aux_t search;
919	spa_aux_t *aux;
920
921	search.aux_guid = vd->vdev_guid;
922	if ((aux = avl_find(avl, &search, &where)) != NULL) {
923		aux->aux_count++;
924	} else {
925		aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
926		aux->aux_guid = vd->vdev_guid;
927		aux->aux_count = 1;
928		avl_insert(avl, aux, where);
929	}
930}
931
932void
933spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
934{
935	spa_aux_t search;
936	spa_aux_t *aux;
937	avl_index_t where;
938
939	search.aux_guid = vd->vdev_guid;
940	aux = avl_find(avl, &search, &where);
941
942	ASSERT(aux != NULL);
943
944	if (--aux->aux_count == 0) {
945		avl_remove(avl, aux);
946		kmem_free(aux, sizeof (spa_aux_t));
947	} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
948		aux->aux_pool = 0ULL;
949	}
950}
951
952boolean_t
953spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
954{
955	spa_aux_t search, *found;
956
957	search.aux_guid = guid;
958	found = avl_find(avl, &search, NULL);
959
960	if (pool) {
961		if (found)
962			*pool = found->aux_pool;
963		else
964			*pool = 0ULL;
965	}
966
967	if (refcnt) {
968		if (found)
969			*refcnt = found->aux_count;
970		else
971			*refcnt = 0;
972	}
973
974	return (found != NULL);
975}
976
977void
978spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
979{
980	spa_aux_t search, *found;
981	avl_index_t where;
982
983	search.aux_guid = vd->vdev_guid;
984	found = avl_find(avl, &search, &where);
985	ASSERT(found != NULL);
986	ASSERT(found->aux_pool == 0ULL);
987
988	found->aux_pool = spa_guid(vd->vdev_spa);
989}
990
991/*
992 * Spares are tracked globally due to the following constraints:
993 *
994 *	- A spare may be part of multiple pools.
995 *	- A spare may be added to a pool even if it's actively in use within
996 *	  another pool.
997 *	- A spare in use in any pool can only be the source of a replacement if
998 *	  the target is a spare in the same pool.
999 *
1000 * We keep track of all spares on the system through the use of a reference
1001 * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
1002 * spare, then we bump the reference count in the AVL tree.  In addition, we set
1003 * the 'vdev_isspare' member to indicate that the device is a spare (active or
1004 * inactive).  When a spare is made active (used to replace a device in the
1005 * pool), we also keep track of which pool its been made a part of.
1006 *
1007 * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
1008 * called under the spa_namespace lock as part of vdev reconfiguration.  The
1009 * separate spare lock exists for the status query path, which does not need to
1010 * be completely consistent with respect to other vdev configuration changes.
1011 */
1012
1013static int
1014spa_spare_compare(const void *a, const void *b)
1015{
1016	return (spa_aux_compare(a, b));
1017}
1018
1019void
1020spa_spare_add(vdev_t *vd)
1021{
1022	mutex_enter(&spa_spare_lock);
1023	ASSERT(!vd->vdev_isspare);
1024	spa_aux_add(vd, &spa_spare_avl);
1025	vd->vdev_isspare = B_TRUE;
1026	mutex_exit(&spa_spare_lock);
1027}
1028
1029void
1030spa_spare_remove(vdev_t *vd)
1031{
1032	mutex_enter(&spa_spare_lock);
1033	ASSERT(vd->vdev_isspare);
1034	spa_aux_remove(vd, &spa_spare_avl);
1035	vd->vdev_isspare = B_FALSE;
1036	mutex_exit(&spa_spare_lock);
1037}
1038
1039boolean_t
1040spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
1041{
1042	boolean_t found;
1043
1044	mutex_enter(&spa_spare_lock);
1045	found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
1046	mutex_exit(&spa_spare_lock);
1047
1048	return (found);
1049}
1050
1051void
1052spa_spare_activate(vdev_t *vd)
1053{
1054	mutex_enter(&spa_spare_lock);
1055	ASSERT(vd->vdev_isspare);
1056	spa_aux_activate(vd, &spa_spare_avl);
1057	mutex_exit(&spa_spare_lock);
1058}
1059
1060/*
1061 * Level 2 ARC devices are tracked globally for the same reasons as spares.
1062 * Cache devices currently only support one pool per cache device, and so
1063 * for these devices the aux reference count is currently unused beyond 1.
1064 */
1065
1066static int
1067spa_l2cache_compare(const void *a, const void *b)
1068{
1069	return (spa_aux_compare(a, b));
1070}
1071
1072void
1073spa_l2cache_add(vdev_t *vd)
1074{
1075	mutex_enter(&spa_l2cache_lock);
1076	ASSERT(!vd->vdev_isl2cache);
1077	spa_aux_add(vd, &spa_l2cache_avl);
1078	vd->vdev_isl2cache = B_TRUE;
1079	mutex_exit(&spa_l2cache_lock);
1080}
1081
1082void
1083spa_l2cache_remove(vdev_t *vd)
1084{
1085	mutex_enter(&spa_l2cache_lock);
1086	ASSERT(vd->vdev_isl2cache);
1087	spa_aux_remove(vd, &spa_l2cache_avl);
1088	vd->vdev_isl2cache = B_FALSE;
1089	mutex_exit(&spa_l2cache_lock);
1090}
1091
1092boolean_t
1093spa_l2cache_exists(uint64_t guid, uint64_t *pool)
1094{
1095	boolean_t found;
1096
1097	mutex_enter(&spa_l2cache_lock);
1098	found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
1099	mutex_exit(&spa_l2cache_lock);
1100
1101	return (found);
1102}
1103
1104void
1105spa_l2cache_activate(vdev_t *vd)
1106{
1107	mutex_enter(&spa_l2cache_lock);
1108	ASSERT(vd->vdev_isl2cache);
1109	spa_aux_activate(vd, &spa_l2cache_avl);
1110	mutex_exit(&spa_l2cache_lock);
1111}
1112
1113/*
1114 * ==========================================================================
1115 * SPA vdev locking
1116 * ==========================================================================
1117 */
1118
1119/*
1120 * Lock the given spa_t for the purpose of adding or removing a vdev.
1121 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1122 * It returns the next transaction group for the spa_t.
1123 */
1124uint64_t
1125spa_vdev_enter(spa_t *spa)
1126{
1127	mutex_enter(&spa->spa_vdev_top_lock);
1128	mutex_enter(&spa_namespace_lock);
1129	return (spa_vdev_config_enter(spa));
1130}
1131
1132/*
1133 * Internal implementation for spa_vdev_enter().  Used when a vdev
1134 * operation requires multiple syncs (i.e. removing a device) while
1135 * keeping the spa_namespace_lock held.
1136 */
1137uint64_t
1138spa_vdev_config_enter(spa_t *spa)
1139{
1140	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1141
1142	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1143
1144	return (spa_last_synced_txg(spa) + 1);
1145}
1146
1147/*
1148 * Used in combination with spa_vdev_config_enter() to allow the syncing
1149 * of multiple transactions without releasing the spa_namespace_lock.
1150 */
1151void
1152spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
1153{
1154	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1155
1156	int config_changed = B_FALSE;
1157
1158	ASSERT(txg > spa_last_synced_txg(spa));
1159
1160	spa->spa_pending_vdev = NULL;
1161
1162	/*
1163	 * Reassess the DTLs.
1164	 */
1165	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
1166
1167	if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1168		config_changed = B_TRUE;
1169		spa->spa_config_generation++;
1170	}
1171
1172	/*
1173	 * Verify the metaslab classes.
1174	 */
1175	ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1176	ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
1177
1178	spa_config_exit(spa, SCL_ALL, spa);
1179
1180	/*
1181	 * Panic the system if the specified tag requires it.  This
1182	 * is useful for ensuring that configurations are updated
1183	 * transactionally.
1184	 */
1185	if (zio_injection_enabled)
1186		zio_handle_panic_injection(spa, tag, 0);
1187
1188	/*
1189	 * Note: this txg_wait_synced() is important because it ensures
1190	 * that there won't be more than one config change per txg.
1191	 * This allows us to use the txg as the generation number.
1192	 */
1193	if (error == 0)
1194		txg_wait_synced(spa->spa_dsl_pool, txg);
1195
1196	if (vd != NULL) {
1197		ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1198		if (vd->vdev_ops->vdev_op_leaf) {
1199			mutex_enter(&vd->vdev_initialize_lock);
1200			vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED);
1201			mutex_exit(&vd->vdev_initialize_lock);
1202		}
1203
1204		spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1205		vdev_free(vd);
1206		spa_config_exit(spa, SCL_ALL, spa);
1207	}
1208
1209	/*
1210	 * If the config changed, update the config cache.
1211	 */
1212	if (config_changed)
1213		spa_write_cachefile(spa, B_FALSE, B_TRUE);
1214}
1215
1216/*
1217 * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
1218 * locking of spa_vdev_enter(), we also want make sure the transactions have
1219 * synced to disk, and then update the global configuration cache with the new
1220 * information.
1221 */
1222int
1223spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1224{
1225	spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1226	mutex_exit(&spa_namespace_lock);
1227	mutex_exit(&spa->spa_vdev_top_lock);
1228
1229	return (error);
1230}
1231
1232/*
1233 * Lock the given spa_t for the purpose of changing vdev state.
1234 */
1235void
1236spa_vdev_state_enter(spa_t *spa, int oplocks)
1237{
1238	int locks = SCL_STATE_ALL | oplocks;
1239
1240	/*
1241	 * Root pools may need to read of the underlying devfs filesystem
1242	 * when opening up a vdev.  Unfortunately if we're holding the
1243	 * SCL_ZIO lock it will result in a deadlock when we try to issue
1244	 * the read from the root filesystem.  Instead we "prefetch"
1245	 * the associated vnodes that we need prior to opening the
1246	 * underlying devices and cache them so that we can prevent
1247	 * any I/O when we are doing the actual open.
1248	 */
1249	if (spa_is_root(spa)) {
1250		int low = locks & ~(SCL_ZIO - 1);
1251		int high = locks & ~low;
1252
1253		spa_config_enter(spa, high, spa, RW_WRITER);
1254		vdev_hold(spa->spa_root_vdev);
1255		spa_config_enter(spa, low, spa, RW_WRITER);
1256	} else {
1257		spa_config_enter(spa, locks, spa, RW_WRITER);
1258	}
1259	spa->spa_vdev_locks = locks;
1260}
1261
1262int
1263spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1264{
1265	boolean_t config_changed = B_FALSE;
1266
1267	if (vd != NULL || error == 0)
1268		vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
1269		    0, 0, B_FALSE);
1270
1271	if (vd != NULL) {
1272		vdev_state_dirty(vd->vdev_top);
1273		config_changed = B_TRUE;
1274		spa->spa_config_generation++;
1275	}
1276
1277	if (spa_is_root(spa))
1278		vdev_rele(spa->spa_root_vdev);
1279
1280	ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1281	spa_config_exit(spa, spa->spa_vdev_locks, spa);
1282
1283	/*
1284	 * If anything changed, wait for it to sync.  This ensures that,
1285	 * from the system administrator's perspective, zpool(1M) commands
1286	 * are synchronous.  This is important for things like zpool offline:
1287	 * when the command completes, you expect no further I/O from ZFS.
1288	 */
1289	if (vd != NULL)
1290		txg_wait_synced(spa->spa_dsl_pool, 0);
1291
1292	/*
1293	 * If the config changed, update the config cache.
1294	 */
1295	if (config_changed) {
1296		mutex_enter(&spa_namespace_lock);
1297		spa_write_cachefile(spa, B_FALSE, B_TRUE);
1298		mutex_exit(&spa_namespace_lock);
1299	}
1300
1301	return (error);
1302}
1303
1304/*
1305 * ==========================================================================
1306 * Miscellaneous functions
1307 * ==========================================================================
1308 */
1309
1310void
1311spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
1312{
1313	if (!nvlist_exists(spa->spa_label_features, feature)) {
1314		fnvlist_add_boolean(spa->spa_label_features, feature);
1315		/*
1316		 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
1317		 * dirty the vdev config because lock SCL_CONFIG is not held.
1318		 * Thankfully, in this case we don't need to dirty the config
1319		 * because it will be written out anyway when we finish
1320		 * creating the pool.
1321		 */
1322		if (tx->tx_txg != TXG_INITIAL)
1323			vdev_config_dirty(spa->spa_root_vdev);
1324	}
1325}
1326
1327void
1328spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1329{
1330	if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
1331		vdev_config_dirty(spa->spa_root_vdev);
1332}
1333
1334/*
1335 * Return the spa_t associated with given pool_guid, if it exists.  If
1336 * device_guid is non-zero, determine whether the pool exists *and* contains
1337 * a device with the specified device_guid.
1338 */
1339spa_t *
1340spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1341{
1342	spa_t *spa;
1343	avl_tree_t *t = &spa_namespace_avl;
1344
1345	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1346
1347	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1348		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1349			continue;
1350		if (spa->spa_root_vdev == NULL)
1351			continue;
1352		if (spa_guid(spa) == pool_guid) {
1353			if (device_guid == 0)
1354				break;
1355
1356			if (vdev_lookup_by_guid(spa->spa_root_vdev,
1357			    device_guid) != NULL)
1358				break;
1359
1360			/*
1361			 * Check any devices we may be in the process of adding.
1362			 */
1363			if (spa->spa_pending_vdev) {
1364				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1365				    device_guid) != NULL)
1366					break;
1367			}
1368		}
1369	}
1370
1371	return (spa);
1372}
1373
1374/*
1375 * Determine whether a pool with the given pool_guid exists.
1376 */
1377boolean_t
1378spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1379{
1380	return (spa_by_guid(pool_guid, device_guid) != NULL);
1381}
1382
1383char *
1384spa_strdup(const char *s)
1385{
1386	size_t len;
1387	char *new;
1388
1389	len = strlen(s);
1390	new = kmem_alloc(len + 1, KM_SLEEP);
1391	bcopy(s, new, len);
1392	new[len] = '\0';
1393
1394	return (new);
1395}
1396
1397void
1398spa_strfree(char *s)
1399{
1400	kmem_free(s, strlen(s) + 1);
1401}
1402
1403uint64_t
1404spa_get_random(uint64_t range)
1405{
1406	uint64_t r;
1407
1408	ASSERT(range != 0);
1409
1410	(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1411
1412	return (r % range);
1413}
1414
1415uint64_t
1416spa_generate_guid(spa_t *spa)
1417{
1418	uint64_t guid = spa_get_random(-1ULL);
1419
1420	if (spa != NULL) {
1421		while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1422			guid = spa_get_random(-1ULL);
1423	} else {
1424		while (guid == 0 || spa_guid_exists(guid, 0))
1425			guid = spa_get_random(-1ULL);
1426	}
1427
1428	return (guid);
1429}
1430
1431void
1432snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
1433{
1434	char type[256];
1435	char *checksum = NULL;
1436	char *compress = NULL;
1437
1438	if (bp != NULL) {
1439		if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1440			dmu_object_byteswap_t bswap =
1441			    DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1442			(void) snprintf(type, sizeof (type), "bswap %s %s",
1443			    DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1444			    "metadata" : "data",
1445			    dmu_ot_byteswap[bswap].ob_name);
1446		} else {
1447			(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1448			    sizeof (type));
1449		}
1450		if (!BP_IS_EMBEDDED(bp)) {
1451			checksum =
1452			    zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1453		}
1454		compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1455	}
1456
1457	SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
1458	    compress);
1459}
1460
1461void
1462spa_freeze(spa_t *spa)
1463{
1464	uint64_t freeze_txg = 0;
1465
1466	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1467	if (spa->spa_freeze_txg == UINT64_MAX) {
1468		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1469		spa->spa_freeze_txg = freeze_txg;
1470	}
1471	spa_config_exit(spa, SCL_ALL, FTAG);
1472	if (freeze_txg != 0)
1473		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1474}
1475
1476void
1477zfs_panic_recover(const char *fmt, ...)
1478{
1479	va_list adx;
1480
1481	va_start(adx, fmt);
1482	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1483	va_end(adx);
1484}
1485
1486/*
1487 * This is a stripped-down version of strtoull, suitable only for converting
1488 * lowercase hexadecimal numbers that don't overflow.
1489 */
1490uint64_t
1491zfs_strtonum(const char *str, char **nptr)
1492{
1493	uint64_t val = 0;
1494	char c;
1495	int digit;
1496
1497	while ((c = *str) != '\0') {
1498		if (c >= '0' && c <= '9')
1499			digit = c - '0';
1500		else if (c >= 'a' && c <= 'f')
1501			digit = 10 + c - 'a';
1502		else
1503			break;
1504
1505		val *= 16;
1506		val += digit;
1507
1508		str++;
1509	}
1510
1511	if (nptr)
1512		*nptr = (char *)str;
1513
1514	return (val);
1515}
1516
1517/*
1518 * ==========================================================================
1519 * Accessor functions
1520 * ==========================================================================
1521 */
1522
1523boolean_t
1524spa_shutting_down(spa_t *spa)
1525{
1526	return (spa->spa_async_suspended);
1527}
1528
1529dsl_pool_t *
1530spa_get_dsl(spa_t *spa)
1531{
1532	return (spa->spa_dsl_pool);
1533}
1534
1535boolean_t
1536spa_is_initializing(spa_t *spa)
1537{
1538	return (spa->spa_is_initializing);
1539}
1540
1541boolean_t
1542spa_indirect_vdevs_loaded(spa_t *spa)
1543{
1544	return (spa->spa_indirect_vdevs_loaded);
1545}
1546
1547blkptr_t *
1548spa_get_rootblkptr(spa_t *spa)
1549{
1550	return (&spa->spa_ubsync.ub_rootbp);
1551}
1552
1553void
1554spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1555{
1556	spa->spa_uberblock.ub_rootbp = *bp;
1557}
1558
1559void
1560spa_altroot(spa_t *spa, char *buf, size_t buflen)
1561{
1562	if (spa->spa_root == NULL)
1563		buf[0] = '\0';
1564	else
1565		(void) strncpy(buf, spa->spa_root, buflen);
1566}
1567
1568int
1569spa_sync_pass(spa_t *spa)
1570{
1571	return (spa->spa_sync_pass);
1572}
1573
1574char *
1575spa_name(spa_t *spa)
1576{
1577	return (spa->spa_name);
1578}
1579
1580uint64_t
1581spa_guid(spa_t *spa)
1582{
1583	dsl_pool_t *dp = spa_get_dsl(spa);
1584	uint64_t guid;
1585
1586	/*
1587	 * If we fail to parse the config during spa_load(), we can go through
1588	 * the error path (which posts an ereport) and end up here with no root
1589	 * vdev.  We stash the original pool guid in 'spa_config_guid' to handle
1590	 * this case.
1591	 */
1592	if (spa->spa_root_vdev == NULL)
1593		return (spa->spa_config_guid);
1594
1595	guid = spa->spa_last_synced_guid != 0 ?
1596	    spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1597
1598	/*
1599	 * Return the most recently synced out guid unless we're
1600	 * in syncing context.
1601	 */
1602	if (dp && dsl_pool_sync_context(dp))
1603		return (spa->spa_root_vdev->vdev_guid);
1604	else
1605		return (guid);
1606}
1607
1608uint64_t
1609spa_load_guid(spa_t *spa)
1610{
1611	/*
1612	 * This is a GUID that exists solely as a reference for the
1613	 * purposes of the arc.  It is generated at load time, and
1614	 * is never written to persistent storage.
1615	 */
1616	return (spa->spa_load_guid);
1617}
1618
1619uint64_t
1620spa_last_synced_txg(spa_t *spa)
1621{
1622	return (spa->spa_ubsync.ub_txg);
1623}
1624
1625uint64_t
1626spa_first_txg(spa_t *spa)
1627{
1628	return (spa->spa_first_txg);
1629}
1630
1631uint64_t
1632spa_syncing_txg(spa_t *spa)
1633{
1634	return (spa->spa_syncing_txg);
1635}
1636
1637/*
1638 * Return the last txg where data can be dirtied. The final txgs
1639 * will be used to just clear out any deferred frees that remain.
1640 */
1641uint64_t
1642spa_final_dirty_txg(spa_t *spa)
1643{
1644	return (spa->spa_final_txg - TXG_DEFER_SIZE);
1645}
1646
1647pool_state_t
1648spa_state(spa_t *spa)
1649{
1650	return (spa->spa_state);
1651}
1652
1653spa_load_state_t
1654spa_load_state(spa_t *spa)
1655{
1656	return (spa->spa_load_state);
1657}
1658
1659uint64_t
1660spa_freeze_txg(spa_t *spa)
1661{
1662	return (spa->spa_freeze_txg);
1663}
1664
1665/* ARGSUSED */
1666uint64_t
1667spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
1668{
1669	return (lsize * spa_asize_inflation);
1670}
1671
1672/*
1673 * Return the amount of slop space in bytes.  It is 1/32 of the pool (3.2%),
1674 * or at least 128MB, unless that would cause it to be more than half the
1675 * pool size.
1676 *
1677 * See the comment above spa_slop_shift for details.
1678 */
1679uint64_t
1680spa_get_slop_space(spa_t *spa)
1681{
1682	uint64_t space = spa_get_dspace(spa);
1683	return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop)));
1684}
1685
1686uint64_t
1687spa_get_dspace(spa_t *spa)
1688{
1689	return (spa->spa_dspace);
1690}
1691
1692uint64_t
1693spa_get_checkpoint_space(spa_t *spa)
1694{
1695	return (spa->spa_checkpoint_info.sci_dspace);
1696}
1697
1698void
1699spa_update_dspace(spa_t *spa)
1700{
1701	spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1702	    ddt_get_dedup_dspace(spa);
1703	if (spa->spa_vdev_removal != NULL) {
1704		/*
1705		 * We can't allocate from the removing device, so
1706		 * subtract its size.  This prevents the DMU/DSL from
1707		 * filling up the (now smaller) pool while we are in the
1708		 * middle of removing the device.
1709		 *
1710		 * Note that the DMU/DSL doesn't actually know or care
1711		 * how much space is allocated (it does its own tracking
1712		 * of how much space has been logically used).  So it
1713		 * doesn't matter that the data we are moving may be
1714		 * allocated twice (on the old device and the new
1715		 * device).
1716		 */
1717		spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1718		vdev_t *vd =
1719		    vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
1720		spa->spa_dspace -= spa_deflate(spa) ?
1721		    vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
1722		spa_config_exit(spa, SCL_VDEV, FTAG);
1723	}
1724}
1725
1726/*
1727 * Return the failure mode that has been set to this pool. The default
1728 * behavior will be to block all I/Os when a complete failure occurs.
1729 */
1730uint8_t
1731spa_get_failmode(spa_t *spa)
1732{
1733	return (spa->spa_failmode);
1734}
1735
1736boolean_t
1737spa_suspended(spa_t *spa)
1738{
1739	return (spa->spa_suspended);
1740}
1741
1742uint64_t
1743spa_version(spa_t *spa)
1744{
1745	return (spa->spa_ubsync.ub_version);
1746}
1747
1748boolean_t
1749spa_deflate(spa_t *spa)
1750{
1751	return (spa->spa_deflate);
1752}
1753
1754metaslab_class_t *
1755spa_normal_class(spa_t *spa)
1756{
1757	return (spa->spa_normal_class);
1758}
1759
1760metaslab_class_t *
1761spa_log_class(spa_t *spa)
1762{
1763	return (spa->spa_log_class);
1764}
1765
1766void
1767spa_evicting_os_register(spa_t *spa, objset_t *os)
1768{
1769	mutex_enter(&spa->spa_evicting_os_lock);
1770	list_insert_head(&spa->spa_evicting_os_list, os);
1771	mutex_exit(&spa->spa_evicting_os_lock);
1772}
1773
1774void
1775spa_evicting_os_deregister(spa_t *spa, objset_t *os)
1776{
1777	mutex_enter(&spa->spa_evicting_os_lock);
1778	list_remove(&spa->spa_evicting_os_list, os);
1779	cv_broadcast(&spa->spa_evicting_os_cv);
1780	mutex_exit(&spa->spa_evicting_os_lock);
1781}
1782
1783void
1784spa_evicting_os_wait(spa_t *spa)
1785{
1786	mutex_enter(&spa->spa_evicting_os_lock);
1787	while (!list_is_empty(&spa->spa_evicting_os_list))
1788		cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
1789	mutex_exit(&spa->spa_evicting_os_lock);
1790
1791	dmu_buf_user_evict_wait();
1792}
1793
1794int
1795spa_max_replication(spa_t *spa)
1796{
1797	/*
1798	 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1799	 * handle BPs with more than one DVA allocated.  Set our max
1800	 * replication level accordingly.
1801	 */
1802	if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1803		return (1);
1804	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1805}
1806
1807int
1808spa_prev_software_version(spa_t *spa)
1809{
1810	return (spa->spa_prev_software_version);
1811}
1812
1813uint64_t
1814spa_deadman_synctime(spa_t *spa)
1815{
1816	return (spa->spa_deadman_synctime);
1817}
1818
1819uint64_t
1820dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
1821{
1822	uint64_t asize = DVA_GET_ASIZE(dva);
1823	uint64_t dsize = asize;
1824
1825	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1826
1827	if (asize != 0 && spa->spa_deflate) {
1828		vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
1829		dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1830	}
1831
1832	return (dsize);
1833}
1834
1835uint64_t
1836bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
1837{
1838	uint64_t dsize = 0;
1839
1840	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
1841		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1842
1843	return (dsize);
1844}
1845
1846uint64_t
1847bp_get_dsize(spa_t *spa, const blkptr_t *bp)
1848{
1849	uint64_t dsize = 0;
1850
1851	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1852
1853	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
1854		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1855
1856	spa_config_exit(spa, SCL_VDEV, FTAG);
1857
1858	return (dsize);
1859}
1860
1861uint64_t
1862spa_dirty_data(spa_t *spa)
1863{
1864	return (spa->spa_dsl_pool->dp_dirty_total);
1865}
1866
1867/*
1868 * ==========================================================================
1869 * Initialization and Termination
1870 * ==========================================================================
1871 */
1872
1873static int
1874spa_name_compare(const void *a1, const void *a2)
1875{
1876	const spa_t *s1 = a1;
1877	const spa_t *s2 = a2;
1878	int s;
1879
1880	s = strcmp(s1->spa_name, s2->spa_name);
1881	if (s > 0)
1882		return (1);
1883	if (s < 0)
1884		return (-1);
1885	return (0);
1886}
1887
1888int
1889spa_busy(void)
1890{
1891	return (spa_active_count);
1892}
1893
1894void
1895spa_boot_init()
1896{
1897	spa_config_load();
1898}
1899
1900void
1901spa_init(int mode)
1902{
1903	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1904	mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1905	mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1906	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1907
1908	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1909	    offsetof(spa_t, spa_avl));
1910
1911	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1912	    offsetof(spa_aux_t, aux_avl));
1913
1914	avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1915	    offsetof(spa_aux_t, aux_avl));
1916
1917	spa_mode_global = mode;
1918
1919#ifdef _KERNEL
1920	spa_arch_init();
1921#else
1922	if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
1923		arc_procfd = open("/proc/self/ctl", O_WRONLY);
1924		if (arc_procfd == -1) {
1925			perror("could not enable watchpoints: "
1926			    "opening /proc/self/ctl failed: ");
1927		} else {
1928			arc_watch = B_TRUE;
1929		}
1930	}
1931#endif
1932
1933	zfs_refcount_init();
1934	unique_init();
1935	range_tree_init();
1936	metaslab_alloc_trace_init();
1937	zio_init();
1938	dmu_init();
1939	zil_init();
1940	vdev_cache_stat_init();
1941	zfs_prop_init();
1942	zpool_prop_init();
1943	zpool_feature_init();
1944	spa_config_load();
1945	l2arc_start();
1946}
1947
1948void
1949spa_fini(void)
1950{
1951	l2arc_stop();
1952
1953	spa_evict_all();
1954
1955	vdev_cache_stat_fini();
1956	zil_fini();
1957	dmu_fini();
1958	zio_fini();
1959	metaslab_alloc_trace_fini();
1960	range_tree_fini();
1961	unique_fini();
1962	zfs_refcount_fini();
1963
1964	avl_destroy(&spa_namespace_avl);
1965	avl_destroy(&spa_spare_avl);
1966	avl_destroy(&spa_l2cache_avl);
1967
1968	cv_destroy(&spa_namespace_cv);
1969	mutex_destroy(&spa_namespace_lock);
1970	mutex_destroy(&spa_spare_lock);
1971	mutex_destroy(&spa_l2cache_lock);
1972}
1973
1974/*
1975 * Return whether this pool has slogs. No locking needed.
1976 * It's not a problem if the wrong answer is returned as it's only for
1977 * performance and not correctness
1978 */
1979boolean_t
1980spa_has_slogs(spa_t *spa)
1981{
1982	return (spa->spa_log_class->mc_rotor != NULL);
1983}
1984
1985spa_log_state_t
1986spa_get_log_state(spa_t *spa)
1987{
1988	return (spa->spa_log_state);
1989}
1990
1991void
1992spa_set_log_state(spa_t *spa, spa_log_state_t state)
1993{
1994	spa->spa_log_state = state;
1995}
1996
1997boolean_t
1998spa_is_root(spa_t *spa)
1999{
2000	return (spa->spa_is_root);
2001}
2002
2003boolean_t
2004spa_writeable(spa_t *spa)
2005{
2006	return (!!(spa->spa_mode & FWRITE) && spa->spa_trust_config);
2007}
2008
2009/*
2010 * Returns true if there is a pending sync task in any of the current
2011 * syncing txg, the current quiescing txg, or the current open txg.
2012 */
2013boolean_t
2014spa_has_pending_synctask(spa_t *spa)
2015{
2016	return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
2017	    !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
2018}
2019
2020int
2021spa_mode(spa_t *spa)
2022{
2023	return (spa->spa_mode);
2024}
2025
2026uint64_t
2027spa_bootfs(spa_t *spa)
2028{
2029	return (spa->spa_bootfs);
2030}
2031
2032uint64_t
2033spa_delegation(spa_t *spa)
2034{
2035	return (spa->spa_delegation);
2036}
2037
2038objset_t *
2039spa_meta_objset(spa_t *spa)
2040{
2041	return (spa->spa_meta_objset);
2042}
2043
2044enum zio_checksum
2045spa_dedup_checksum(spa_t *spa)
2046{
2047	return (spa->spa_dedup_checksum);
2048}
2049
2050/*
2051 * Reset pool scan stat per scan pass (or reboot).
2052 */
2053void
2054spa_scan_stat_init(spa_t *spa)
2055{
2056	/* data not stored on disk */
2057	spa->spa_scan_pass_start = gethrestime_sec();
2058	if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
2059		spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
2060	else
2061		spa->spa_scan_pass_scrub_pause = 0;
2062	spa->spa_scan_pass_scrub_spent_paused = 0;
2063	spa->spa_scan_pass_exam = 0;
2064	vdev_scan_stat_init(spa->spa_root_vdev);
2065}
2066
2067/*
2068 * Get scan stats for zpool status reports
2069 */
2070int
2071spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
2072{
2073	dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
2074
2075	if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
2076		return (SET_ERROR(ENOENT));
2077	bzero(ps, sizeof (pool_scan_stat_t));
2078
2079	/* data stored on disk */
2080	ps->pss_func = scn->scn_phys.scn_func;
2081	ps->pss_start_time = scn->scn_phys.scn_start_time;
2082	ps->pss_end_time = scn->scn_phys.scn_end_time;
2083	ps->pss_to_examine = scn->scn_phys.scn_to_examine;
2084	ps->pss_examined = scn->scn_phys.scn_examined;
2085	ps->pss_to_process = scn->scn_phys.scn_to_process;
2086	ps->pss_processed = scn->scn_phys.scn_processed;
2087	ps->pss_errors = scn->scn_phys.scn_errors;
2088	ps->pss_state = scn->scn_phys.scn_state;
2089
2090	/* data not stored on disk */
2091	ps->pss_pass_start = spa->spa_scan_pass_start;
2092	ps->pss_pass_exam = spa->spa_scan_pass_exam;
2093	ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
2094	ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
2095
2096	return (0);
2097}
2098
2099int
2100spa_maxblocksize(spa_t *spa)
2101{
2102	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
2103		return (SPA_MAXBLOCKSIZE);
2104	else
2105		return (SPA_OLD_MAXBLOCKSIZE);
2106}
2107
2108int
2109spa_maxdnodesize(spa_t *spa)
2110{
2111	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
2112		return (DNODE_MAX_SIZE);
2113	else
2114		return (DNODE_MIN_SIZE);
2115}
2116
2117/*
2118 * Returns the txg that the last device removal completed. No indirect mappings
2119 * have been added since this txg.
2120 */
2121uint64_t
2122spa_get_last_removal_txg(spa_t *spa)
2123{
2124	uint64_t vdevid;
2125	uint64_t ret = -1ULL;
2126
2127	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2128	/*
2129	 * sr_prev_indirect_vdev is only modified while holding all the
2130	 * config locks, so it is sufficient to hold SCL_VDEV as reader when
2131	 * examining it.
2132	 */
2133	vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
2134
2135	while (vdevid != -1ULL) {
2136		vdev_t *vd = vdev_lookup_top(spa, vdevid);
2137		vdev_indirect_births_t *vib = vd->vdev_indirect_births;
2138
2139		ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
2140
2141		/*
2142		 * If the removal did not remap any data, we don't care.
2143		 */
2144		if (vdev_indirect_births_count(vib) != 0) {
2145			ret = vdev_indirect_births_last_entry_txg(vib);
2146			break;
2147		}
2148
2149		vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
2150	}
2151	spa_config_exit(spa, SCL_VDEV, FTAG);
2152
2153	IMPLY(ret != -1ULL,
2154	    spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
2155
2156	return (ret);
2157}
2158
2159boolean_t
2160spa_trust_config(spa_t *spa)
2161{
2162	return (spa->spa_trust_config);
2163}
2164
2165uint64_t
2166spa_missing_tvds_allowed(spa_t *spa)
2167{
2168	return (spa->spa_missing_tvds_allowed);
2169}
2170
2171void
2172spa_set_missing_tvds(spa_t *spa, uint64_t missing)
2173{
2174	spa->spa_missing_tvds = missing;
2175}
2176
2177boolean_t
2178spa_top_vdevs_spacemap_addressable(spa_t *spa)
2179{
2180	vdev_t *rvd = spa->spa_root_vdev;
2181	for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2182		if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
2183			return (B_FALSE);
2184	}
2185	return (B_TRUE);
2186}
2187
2188boolean_t
2189spa_has_checkpoint(spa_t *spa)
2190{
2191	return (spa->spa_checkpoint_txg != 0);
2192}
2193
2194boolean_t
2195spa_importing_readonly_checkpoint(spa_t *spa)
2196{
2197	return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
2198	    spa->spa_mode == FREAD);
2199}
2200
2201uint64_t
2202spa_min_claim_txg(spa_t *spa)
2203{
2204	uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
2205
2206	if (checkpoint_txg != 0)
2207		return (checkpoint_txg + 1);
2208
2209	return (spa->spa_first_txg);
2210}
2211
2212/*
2213 * If there is a checkpoint, async destroys may consume more space from
2214 * the pool instead of freeing it. In an attempt to save the pool from
2215 * getting suspended when it is about to run out of space, we stop
2216 * processing async destroys.
2217 */
2218boolean_t
2219spa_suspend_async_destroy(spa_t *spa)
2220{
2221	dsl_pool_t *dp = spa_get_dsl(spa);
2222
2223	uint64_t unreserved = dsl_pool_unreserved_space(dp,
2224	    ZFS_SPACE_CHECK_EXTRA_RESERVED);
2225	uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
2226	uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
2227
2228	if (spa_has_checkpoint(spa) && avail == 0)
2229		return (B_TRUE);
2230
2231	return (B_FALSE);
2232}
2233