1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
24 * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2013 Saso Kiselkov. All rights reserved.
27 * Copyright (c) 2014 Integros [integros.com]
28 * Copyright (c) 2017 Datto Inc.
29 */
30
31#include <sys/zfs_context.h>
32#include <sys/spa_impl.h>
33#include <sys/spa_boot.h>
34#include <sys/zio.h>
35#include <sys/zio_checksum.h>
36#include <sys/zio_compress.h>
37#include <sys/dmu.h>
38#include <sys/dmu_tx.h>
39#include <sys/zap.h>
40#include <sys/zil.h>
41#include <sys/vdev_impl.h>
42#include <sys/metaslab.h>
43#include <sys/uberblock_impl.h>
44#include <sys/txg.h>
45#include <sys/avl.h>
46#include <sys/unique.h>
47#include <sys/dsl_pool.h>
48#include <sys/dsl_dir.h>
49#include <sys/dsl_prop.h>
50#include <sys/dsl_scan.h>
51#include <sys/fs/zfs.h>
52#include <sys/metaslab_impl.h>
53#include <sys/arc.h>
54#include <sys/ddt.h>
55#include "zfs_prop.h"
56#include <sys/zfeature.h>
57
58/*
59 * SPA locking
60 *
61 * There are four basic locks for managing spa_t structures:
62 *
63 * spa_namespace_lock (global mutex)
64 *
65 *	This lock must be acquired to do any of the following:
66 *
67 *		- Lookup a spa_t by name
68 *		- Add or remove a spa_t from the namespace
69 *		- Increase spa_refcount from non-zero
70 *		- Check if spa_refcount is zero
71 *		- Rename a spa_t
72 *		- add/remove/attach/detach devices
73 *		- Held for the duration of create/destroy/import/export
74 *
75 *	It does not need to handle recursion.  A create or destroy may
76 *	reference objects (files or zvols) in other pools, but by
77 *	definition they must have an existing reference, and will never need
78 *	to lookup a spa_t by name.
79 *
80 * spa_refcount (per-spa refcount_t protected by mutex)
81 *
82 *	This reference count keep track of any active users of the spa_t.  The
83 *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
84 *	the refcount is never really 'zero' - opening a pool implicitly keeps
85 *	some references in the DMU.  Internally we check against spa_minref, but
86 *	present the image of a zero/non-zero value to consumers.
87 *
88 * spa_config_lock[] (per-spa array of rwlocks)
89 *
90 *	This protects the spa_t from config changes, and must be held in
91 *	the following circumstances:
92 *
93 *		- RW_READER to perform I/O to the spa
94 *		- RW_WRITER to change the vdev config
95 *
96 * The locking order is fairly straightforward:
97 *
98 *		spa_namespace_lock	->	spa_refcount
99 *
100 *	The namespace lock must be acquired to increase the refcount from 0
101 *	or to check if it is zero.
102 *
103 *		spa_refcount		->	spa_config_lock[]
104 *
105 *	There must be at least one valid reference on the spa_t to acquire
106 *	the config lock.
107 *
108 *		spa_namespace_lock	->	spa_config_lock[]
109 *
110 *	The namespace lock must always be taken before the config lock.
111 *
112 *
113 * The spa_namespace_lock can be acquired directly and is globally visible.
114 *
115 * The namespace is manipulated using the following functions, all of which
116 * require the spa_namespace_lock to be held.
117 *
118 *	spa_lookup()		Lookup a spa_t by name.
119 *
120 *	spa_add()		Create a new spa_t in the namespace.
121 *
122 *	spa_remove()		Remove a spa_t from the namespace.  This also
123 *				frees up any memory associated with the spa_t.
124 *
125 *	spa_next()		Returns the next spa_t in the system, or the
126 *				first if NULL is passed.
127 *
128 *	spa_evict_all()		Shutdown and remove all spa_t structures in
129 *				the system.
130 *
131 *	spa_guid_exists()	Determine whether a pool/device guid exists.
132 *
133 * The spa_refcount is manipulated using the following functions:
134 *
135 *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
136 *				called with spa_namespace_lock held if the
137 *				refcount is currently zero.
138 *
139 *	spa_close()		Remove a reference from the spa_t.  This will
140 *				not free the spa_t or remove it from the
141 *				namespace.  No locking is required.
142 *
143 *	spa_refcount_zero()	Returns true if the refcount is currently
144 *				zero.  Must be called with spa_namespace_lock
145 *				held.
146 *
147 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
148 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
149 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
150 *
151 * To read the configuration, it suffices to hold one of these locks as reader.
152 * To modify the configuration, you must hold all locks as writer.  To modify
153 * vdev state without altering the vdev tree's topology (e.g. online/offline),
154 * you must hold SCL_STATE and SCL_ZIO as writer.
155 *
156 * We use these distinct config locks to avoid recursive lock entry.
157 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
158 * block allocations (SCL_ALLOC), which may require reading space maps
159 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
160 *
161 * The spa config locks cannot be normal rwlocks because we need the
162 * ability to hand off ownership.  For example, SCL_ZIO is acquired
163 * by the issuing thread and later released by an interrupt thread.
164 * They do, however, obey the usual write-wanted semantics to prevent
165 * writer (i.e. system administrator) starvation.
166 *
167 * The lock acquisition rules are as follows:
168 *
169 * SCL_CONFIG
170 *	Protects changes to the vdev tree topology, such as vdev
171 *	add/remove/attach/detach.  Protects the dirty config list
172 *	(spa_config_dirty_list) and the set of spares and l2arc devices.
173 *
174 * SCL_STATE
175 *	Protects changes to pool state and vdev state, such as vdev
176 *	online/offline/fault/degrade/clear.  Protects the dirty state list
177 *	(spa_state_dirty_list) and global pool state (spa_state).
178 *
179 * SCL_ALLOC
180 *	Protects changes to metaslab groups and classes.
181 *	Held as reader by metaslab_alloc() and metaslab_claim().
182 *
183 * SCL_ZIO
184 *	Held by bp-level zios (those which have no io_vd upon entry)
185 *	to prevent changes to the vdev tree.  The bp-level zio implicitly
186 *	protects all of its vdev child zios, which do not hold SCL_ZIO.
187 *
188 * SCL_FREE
189 *	Protects changes to metaslab groups and classes.
190 *	Held as reader by metaslab_free().  SCL_FREE is distinct from
191 *	SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
192 *	blocks in zio_done() while another i/o that holds either
193 *	SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
194 *
195 * SCL_VDEV
196 *	Held as reader to prevent changes to the vdev tree during trivial
197 *	inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
198 *	other locks, and lower than all of them, to ensure that it's safe
199 *	to acquire regardless of caller context.
200 *
201 * In addition, the following rules apply:
202 *
203 * (a)	spa_props_lock protects pool properties, spa_config and spa_config_list.
204 *	The lock ordering is SCL_CONFIG > spa_props_lock.
205 *
206 * (b)	I/O operations on leaf vdevs.  For any zio operation that takes
207 *	an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
208 *	or zio_write_phys() -- the caller must ensure that the config cannot
209 *	cannot change in the interim, and that the vdev cannot be reopened.
210 *	SCL_STATE as reader suffices for both.
211 *
212 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
213 *
214 *	spa_vdev_enter()	Acquire the namespace lock and the config lock
215 *				for writing.
216 *
217 *	spa_vdev_exit()		Release the config lock, wait for all I/O
218 *				to complete, sync the updated configs to the
219 *				cache, and release the namespace lock.
220 *
221 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
222 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
223 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
224 *
225 * spa_rename() is also implemented within this file since it requires
226 * manipulation of the namespace.
227 */
228
229static avl_tree_t spa_namespace_avl;
230kmutex_t spa_namespace_lock;
231static kcondvar_t spa_namespace_cv;
232static int spa_active_count;
233int spa_max_replication_override = SPA_DVAS_PER_BP;
234
235static kmutex_t spa_spare_lock;
236static avl_tree_t spa_spare_avl;
237static kmutex_t spa_l2cache_lock;
238static avl_tree_t spa_l2cache_avl;
239
240kmem_cache_t *spa_buffer_pool;
241int spa_mode_global;
242
243#ifdef ZFS_DEBUG
244/*
245 * Everything except dprintf, spa, and indirect_remap is on by default
246 * in debug builds.
247 */
248int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA | ZFS_DEBUG_INDIRECT_REMAP);
249#else
250int zfs_flags = 0;
251#endif
252
253/*
254 * zfs_recover can be set to nonzero to attempt to recover from
255 * otherwise-fatal errors, typically caused by on-disk corruption.  When
256 * set, calls to zfs_panic_recover() will turn into warning messages.
257 * This should only be used as a last resort, as it typically results
258 * in leaked space, or worse.
259 */
260boolean_t zfs_recover = B_FALSE;
261
262/*
263 * If destroy encounters an EIO while reading metadata (e.g. indirect
264 * blocks), space referenced by the missing metadata can not be freed.
265 * Normally this causes the background destroy to become "stalled", as
266 * it is unable to make forward progress.  While in this stalled state,
267 * all remaining space to free from the error-encountering filesystem is
268 * "temporarily leaked".  Set this flag to cause it to ignore the EIO,
269 * permanently leak the space from indirect blocks that can not be read,
270 * and continue to free everything else that it can.
271 *
272 * The default, "stalling" behavior is useful if the storage partially
273 * fails (i.e. some but not all i/os fail), and then later recovers.  In
274 * this case, we will be able to continue pool operations while it is
275 * partially failed, and when it recovers, we can continue to free the
276 * space, with no leaks.  However, note that this case is actually
277 * fairly rare.
278 *
279 * Typically pools either (a) fail completely (but perhaps temporarily,
280 * e.g. a top-level vdev going offline), or (b) have localized,
281 * permanent errors (e.g. disk returns the wrong data due to bit flip or
282 * firmware bug).  In case (a), this setting does not matter because the
283 * pool will be suspended and the sync thread will not be able to make
284 * forward progress regardless.  In case (b), because the error is
285 * permanent, the best we can do is leak the minimum amount of space,
286 * which is what setting this flag will do.  Therefore, it is reasonable
287 * for this flag to normally be set, but we chose the more conservative
288 * approach of not setting it, so that there is no possibility of
289 * leaking space in the "partial temporary" failure case.
290 */
291boolean_t zfs_free_leak_on_eio = B_FALSE;
292
293/*
294 * Expiration time in milliseconds. This value has two meanings. First it is
295 * used to determine when the spa_deadman() logic should fire. By default the
296 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
297 * Secondly, the value determines if an I/O is considered "hung". Any I/O that
298 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
299 * in a system panic.
300 */
301uint64_t zfs_deadman_synctime_ms = 1000000ULL;
302
303/*
304 * Check time in milliseconds. This defines the frequency at which we check
305 * for hung I/O.
306 */
307uint64_t zfs_deadman_checktime_ms = 5000ULL;
308
309/*
310 * Override the zfs deadman behavior via /etc/system. By default the
311 * deadman is enabled except on VMware and sparc deployments.
312 */
313int zfs_deadman_enabled = -1;
314
315/*
316 * The worst case is single-sector max-parity RAID-Z blocks, in which
317 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
318 * times the size; so just assume that.  Add to this the fact that
319 * we can have up to 3 DVAs per bp, and one more factor of 2 because
320 * the block may be dittoed with up to 3 DVAs by ddt_sync().  All together,
321 * the worst case is:
322 *     (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
323 */
324int spa_asize_inflation = 24;
325
326/*
327 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
328 * the pool to be consumed.  This ensures that we don't run the pool
329 * completely out of space, due to unaccounted changes (e.g. to the MOS).
330 * It also limits the worst-case time to allocate space.  If we have
331 * less than this amount of free space, most ZPL operations (e.g. write,
332 * create) will return ENOSPC.
333 *
334 * Certain operations (e.g. file removal, most administrative actions) can
335 * use half the slop space.  They will only return ENOSPC if less than half
336 * the slop space is free.  Typically, once the pool has less than the slop
337 * space free, the user will use these operations to free up space in the pool.
338 * These are the operations that call dsl_pool_adjustedsize() with the netfree
339 * argument set to TRUE.
340 *
341 * A very restricted set of operations are always permitted, regardless of
342 * the amount of free space.  These are the operations that call
343 * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy".  If these
344 * operations result in a net increase in the amount of space used,
345 * it is possible to run the pool completely out of space, causing it to
346 * be permanently read-only.
347 *
348 * Note that on very small pools, the slop space will be larger than
349 * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
350 * but we never allow it to be more than half the pool size.
351 *
352 * See also the comments in zfs_space_check_t.
353 */
354int spa_slop_shift = 5;
355uint64_t spa_min_slop = 128 * 1024 * 1024;
356
357/*PRINTFLIKE2*/
358void
359spa_load_failed(spa_t *spa, const char *fmt, ...)
360{
361	va_list adx;
362	char buf[256];
363
364	va_start(adx, fmt);
365	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
366	va_end(adx);
367
368	zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
369	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
370}
371
372/*PRINTFLIKE2*/
373void
374spa_load_note(spa_t *spa, const char *fmt, ...)
375{
376	va_list adx;
377	char buf[256];
378
379	va_start(adx, fmt);
380	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
381	va_end(adx);
382
383	zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
384	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
385}
386
387/*
388 * ==========================================================================
389 * SPA config locking
390 * ==========================================================================
391 */
392static void
393spa_config_lock_init(spa_t *spa)
394{
395	for (int i = 0; i < SCL_LOCKS; i++) {
396		spa_config_lock_t *scl = &spa->spa_config_lock[i];
397		mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
398		cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
399		refcount_create_untracked(&scl->scl_count);
400		scl->scl_writer = NULL;
401		scl->scl_write_wanted = 0;
402	}
403}
404
405static void
406spa_config_lock_destroy(spa_t *spa)
407{
408	for (int i = 0; i < SCL_LOCKS; i++) {
409		spa_config_lock_t *scl = &spa->spa_config_lock[i];
410		mutex_destroy(&scl->scl_lock);
411		cv_destroy(&scl->scl_cv);
412		refcount_destroy(&scl->scl_count);
413		ASSERT(scl->scl_writer == NULL);
414		ASSERT(scl->scl_write_wanted == 0);
415	}
416}
417
418int
419spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
420{
421	for (int i = 0; i < SCL_LOCKS; i++) {
422		spa_config_lock_t *scl = &spa->spa_config_lock[i];
423		if (!(locks & (1 << i)))
424			continue;
425		mutex_enter(&scl->scl_lock);
426		if (rw == RW_READER) {
427			if (scl->scl_writer || scl->scl_write_wanted) {
428				mutex_exit(&scl->scl_lock);
429				spa_config_exit(spa, locks & ((1 << i) - 1),
430				    tag);
431				return (0);
432			}
433		} else {
434			ASSERT(scl->scl_writer != curthread);
435			if (!refcount_is_zero(&scl->scl_count)) {
436				mutex_exit(&scl->scl_lock);
437				spa_config_exit(spa, locks & ((1 << i) - 1),
438				    tag);
439				return (0);
440			}
441			scl->scl_writer = curthread;
442		}
443		(void) refcount_add(&scl->scl_count, tag);
444		mutex_exit(&scl->scl_lock);
445	}
446	return (1);
447}
448
449void
450spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
451{
452	int wlocks_held = 0;
453
454	ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
455
456	for (int i = 0; i < SCL_LOCKS; i++) {
457		spa_config_lock_t *scl = &spa->spa_config_lock[i];
458		if (scl->scl_writer == curthread)
459			wlocks_held |= (1 << i);
460		if (!(locks & (1 << i)))
461			continue;
462		mutex_enter(&scl->scl_lock);
463		if (rw == RW_READER) {
464			while (scl->scl_writer || scl->scl_write_wanted) {
465				cv_wait(&scl->scl_cv, &scl->scl_lock);
466			}
467		} else {
468			ASSERT(scl->scl_writer != curthread);
469			while (!refcount_is_zero(&scl->scl_count)) {
470				scl->scl_write_wanted++;
471				cv_wait(&scl->scl_cv, &scl->scl_lock);
472				scl->scl_write_wanted--;
473			}
474			scl->scl_writer = curthread;
475		}
476		(void) refcount_add(&scl->scl_count, tag);
477		mutex_exit(&scl->scl_lock);
478	}
479	ASSERT3U(wlocks_held, <=, locks);
480}
481
482void
483spa_config_exit(spa_t *spa, int locks, void *tag)
484{
485	for (int i = SCL_LOCKS - 1; i >= 0; i--) {
486		spa_config_lock_t *scl = &spa->spa_config_lock[i];
487		if (!(locks & (1 << i)))
488			continue;
489		mutex_enter(&scl->scl_lock);
490		ASSERT(!refcount_is_zero(&scl->scl_count));
491		if (refcount_remove(&scl->scl_count, tag) == 0) {
492			ASSERT(scl->scl_writer == NULL ||
493			    scl->scl_writer == curthread);
494			scl->scl_writer = NULL;	/* OK in either case */
495			cv_broadcast(&scl->scl_cv);
496		}
497		mutex_exit(&scl->scl_lock);
498	}
499}
500
501int
502spa_config_held(spa_t *spa, int locks, krw_t rw)
503{
504	int locks_held = 0;
505
506	for (int i = 0; i < SCL_LOCKS; i++) {
507		spa_config_lock_t *scl = &spa->spa_config_lock[i];
508		if (!(locks & (1 << i)))
509			continue;
510		if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
511		    (rw == RW_WRITER && scl->scl_writer == curthread))
512			locks_held |= 1 << i;
513	}
514
515	return (locks_held);
516}
517
518/*
519 * ==========================================================================
520 * SPA namespace functions
521 * ==========================================================================
522 */
523
524/*
525 * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
526 * Returns NULL if no matching spa_t is found.
527 */
528spa_t *
529spa_lookup(const char *name)
530{
531	static spa_t search;	/* spa_t is large; don't allocate on stack */
532	spa_t *spa;
533	avl_index_t where;
534	char *cp;
535
536	ASSERT(MUTEX_HELD(&spa_namespace_lock));
537
538	(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
539
540	/*
541	 * If it's a full dataset name, figure out the pool name and
542	 * just use that.
543	 */
544	cp = strpbrk(search.spa_name, "/@#");
545	if (cp != NULL)
546		*cp = '\0';
547
548	spa = avl_find(&spa_namespace_avl, &search, &where);
549
550	return (spa);
551}
552
553/*
554 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
555 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
556 * looking for potentially hung I/Os.
557 */
558void
559spa_deadman(void *arg)
560{
561	spa_t *spa = arg;
562
563	/*
564	 * Disable the deadman timer if the pool is suspended.
565	 */
566	if (spa_suspended(spa)) {
567		VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
568		return;
569	}
570
571	zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
572	    (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
573	    ++spa->spa_deadman_calls);
574	if (zfs_deadman_enabled)
575		vdev_deadman(spa->spa_root_vdev);
576}
577
578/*
579 * Create an uninitialized spa_t with the given name.  Requires
580 * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
581 * exist by calling spa_lookup() first.
582 */
583spa_t *
584spa_add(const char *name, nvlist_t *config, const char *altroot)
585{
586	spa_t *spa;
587	spa_config_dirent_t *dp;
588	cyc_handler_t hdlr;
589	cyc_time_t when;
590
591	ASSERT(MUTEX_HELD(&spa_namespace_lock));
592
593	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
594
595	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
596	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
597	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
598	mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
599	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
600	mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
601	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
602	mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
603	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
604	mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
605	mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
606	mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL);
607	mutex_init(&spa->spa_alloc_lock, NULL, MUTEX_DEFAULT, NULL);
608
609	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
610	cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
611	cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
612	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
613	cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
614
615	for (int t = 0; t < TXG_SIZE; t++)
616		bplist_create(&spa->spa_free_bplist[t]);
617
618	(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
619	spa->spa_state = POOL_STATE_UNINITIALIZED;
620	spa->spa_freeze_txg = UINT64_MAX;
621	spa->spa_final_txg = UINT64_MAX;
622	spa->spa_load_max_txg = UINT64_MAX;
623	spa->spa_proc = &p0;
624	spa->spa_proc_state = SPA_PROC_NONE;
625	spa->spa_trust_config = B_TRUE;
626
627	hdlr.cyh_func = spa_deadman;
628	hdlr.cyh_arg = spa;
629	hdlr.cyh_level = CY_LOW_LEVEL;
630
631	spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
632
633	/*
634	 * This determines how often we need to check for hung I/Os after
635	 * the cyclic has already fired. Since checking for hung I/Os is
636	 * an expensive operation we don't want to check too frequently.
637	 * Instead wait for 5 seconds before checking again.
638	 */
639	when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms);
640	when.cyt_when = CY_INFINITY;
641	mutex_enter(&cpu_lock);
642	spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
643	mutex_exit(&cpu_lock);
644
645	refcount_create(&spa->spa_refcount);
646	spa_config_lock_init(spa);
647
648	avl_add(&spa_namespace_avl, spa);
649
650	/*
651	 * Set the alternate root, if there is one.
652	 */
653	if (altroot) {
654		spa->spa_root = spa_strdup(altroot);
655		spa_active_count++;
656	}
657
658	avl_create(&spa->spa_alloc_tree, zio_bookmark_compare,
659	    sizeof (zio_t), offsetof(zio_t, io_alloc_node));
660
661	/*
662	 * Every pool starts with the default cachefile
663	 */
664	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
665	    offsetof(spa_config_dirent_t, scd_link));
666
667	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
668	dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
669	list_insert_head(&spa->spa_config_list, dp);
670
671	VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
672	    KM_SLEEP) == 0);
673
674	if (config != NULL) {
675		nvlist_t *features;
676
677		if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
678		    &features) == 0) {
679			VERIFY(nvlist_dup(features, &spa->spa_label_features,
680			    0) == 0);
681		}
682
683		VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
684	}
685
686	if (spa->spa_label_features == NULL) {
687		VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
688		    KM_SLEEP) == 0);
689	}
690
691	spa->spa_iokstat = kstat_create("zfs", 0, name,
692	    "disk", KSTAT_TYPE_IO, 1, 0);
693	if (spa->spa_iokstat) {
694		spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock;
695		kstat_install(spa->spa_iokstat);
696	}
697
698	spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0);
699
700	spa->spa_min_ashift = INT_MAX;
701	spa->spa_max_ashift = 0;
702
703	/*
704	 * As a pool is being created, treat all features as disabled by
705	 * setting SPA_FEATURE_DISABLED for all entries in the feature
706	 * refcount cache.
707	 */
708	for (int i = 0; i < SPA_FEATURES; i++) {
709		spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
710	}
711
712	return (spa);
713}
714
715/*
716 * Removes a spa_t from the namespace, freeing up any memory used.  Requires
717 * spa_namespace_lock.  This is called only after the spa_t has been closed and
718 * deactivated.
719 */
720void
721spa_remove(spa_t *spa)
722{
723	spa_config_dirent_t *dp;
724
725	ASSERT(MUTEX_HELD(&spa_namespace_lock));
726	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
727	ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0);
728
729	nvlist_free(spa->spa_config_splitting);
730
731	avl_remove(&spa_namespace_avl, spa);
732	cv_broadcast(&spa_namespace_cv);
733
734	if (spa->spa_root) {
735		spa_strfree(spa->spa_root);
736		spa_active_count--;
737	}
738
739	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
740		list_remove(&spa->spa_config_list, dp);
741		if (dp->scd_path != NULL)
742			spa_strfree(dp->scd_path);
743		kmem_free(dp, sizeof (spa_config_dirent_t));
744	}
745
746	avl_destroy(&spa->spa_alloc_tree);
747	list_destroy(&spa->spa_config_list);
748
749	nvlist_free(spa->spa_label_features);
750	nvlist_free(spa->spa_load_info);
751	spa_config_set(spa, NULL);
752
753	mutex_enter(&cpu_lock);
754	if (spa->spa_deadman_cycid != CYCLIC_NONE)
755		cyclic_remove(spa->spa_deadman_cycid);
756	mutex_exit(&cpu_lock);
757	spa->spa_deadman_cycid = CYCLIC_NONE;
758
759	refcount_destroy(&spa->spa_refcount);
760
761	spa_config_lock_destroy(spa);
762
763	kstat_delete(spa->spa_iokstat);
764	spa->spa_iokstat = NULL;
765
766	for (int t = 0; t < TXG_SIZE; t++)
767		bplist_destroy(&spa->spa_free_bplist[t]);
768
769	zio_checksum_templates_free(spa);
770
771	cv_destroy(&spa->spa_async_cv);
772	cv_destroy(&spa->spa_evicting_os_cv);
773	cv_destroy(&spa->spa_proc_cv);
774	cv_destroy(&spa->spa_scrub_io_cv);
775	cv_destroy(&spa->spa_suspend_cv);
776
777	mutex_destroy(&spa->spa_alloc_lock);
778	mutex_destroy(&spa->spa_async_lock);
779	mutex_destroy(&spa->spa_errlist_lock);
780	mutex_destroy(&spa->spa_errlog_lock);
781	mutex_destroy(&spa->spa_evicting_os_lock);
782	mutex_destroy(&spa->spa_history_lock);
783	mutex_destroy(&spa->spa_proc_lock);
784	mutex_destroy(&spa->spa_props_lock);
785	mutex_destroy(&spa->spa_cksum_tmpls_lock);
786	mutex_destroy(&spa->spa_scrub_lock);
787	mutex_destroy(&spa->spa_suspend_lock);
788	mutex_destroy(&spa->spa_vdev_top_lock);
789	mutex_destroy(&spa->spa_iokstat_lock);
790
791	kmem_free(spa, sizeof (spa_t));
792}
793
794/*
795 * Given a pool, return the next pool in the namespace, or NULL if there is
796 * none.  If 'prev' is NULL, return the first pool.
797 */
798spa_t *
799spa_next(spa_t *prev)
800{
801	ASSERT(MUTEX_HELD(&spa_namespace_lock));
802
803	if (prev)
804		return (AVL_NEXT(&spa_namespace_avl, prev));
805	else
806		return (avl_first(&spa_namespace_avl));
807}
808
809/*
810 * ==========================================================================
811 * SPA refcount functions
812 * ==========================================================================
813 */
814
815/*
816 * Add a reference to the given spa_t.  Must have at least one reference, or
817 * have the namespace lock held.
818 */
819void
820spa_open_ref(spa_t *spa, void *tag)
821{
822	ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
823	    MUTEX_HELD(&spa_namespace_lock));
824	(void) refcount_add(&spa->spa_refcount, tag);
825}
826
827/*
828 * Remove a reference to the given spa_t.  Must have at least one reference, or
829 * have the namespace lock held.
830 */
831void
832spa_close(spa_t *spa, void *tag)
833{
834	ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
835	    MUTEX_HELD(&spa_namespace_lock));
836	(void) refcount_remove(&spa->spa_refcount, tag);
837}
838
839/*
840 * Remove a reference to the given spa_t held by a dsl dir that is
841 * being asynchronously released.  Async releases occur from a taskq
842 * performing eviction of dsl datasets and dirs.  The namespace lock
843 * isn't held and the hold by the object being evicted may contribute to
844 * spa_minref (e.g. dataset or directory released during pool export),
845 * so the asserts in spa_close() do not apply.
846 */
847void
848spa_async_close(spa_t *spa, void *tag)
849{
850	(void) refcount_remove(&spa->spa_refcount, tag);
851}
852
853/*
854 * Check to see if the spa refcount is zero.  Must be called with
855 * spa_namespace_lock held.  We really compare against spa_minref, which is the
856 * number of references acquired when opening a pool
857 */
858boolean_t
859spa_refcount_zero(spa_t *spa)
860{
861	ASSERT(MUTEX_HELD(&spa_namespace_lock));
862
863	return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
864}
865
866/*
867 * ==========================================================================
868 * SPA spare and l2cache tracking
869 * ==========================================================================
870 */
871
872/*
873 * Hot spares and cache devices are tracked using the same code below,
874 * for 'auxiliary' devices.
875 */
876
877typedef struct spa_aux {
878	uint64_t	aux_guid;
879	uint64_t	aux_pool;
880	avl_node_t	aux_avl;
881	int		aux_count;
882} spa_aux_t;
883
884static int
885spa_aux_compare(const void *a, const void *b)
886{
887	const spa_aux_t *sa = a;
888	const spa_aux_t *sb = b;
889
890	if (sa->aux_guid < sb->aux_guid)
891		return (-1);
892	else if (sa->aux_guid > sb->aux_guid)
893		return (1);
894	else
895		return (0);
896}
897
898void
899spa_aux_add(vdev_t *vd, avl_tree_t *avl)
900{
901	avl_index_t where;
902	spa_aux_t search;
903	spa_aux_t *aux;
904
905	search.aux_guid = vd->vdev_guid;
906	if ((aux = avl_find(avl, &search, &where)) != NULL) {
907		aux->aux_count++;
908	} else {
909		aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
910		aux->aux_guid = vd->vdev_guid;
911		aux->aux_count = 1;
912		avl_insert(avl, aux, where);
913	}
914}
915
916void
917spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
918{
919	spa_aux_t search;
920	spa_aux_t *aux;
921	avl_index_t where;
922
923	search.aux_guid = vd->vdev_guid;
924	aux = avl_find(avl, &search, &where);
925
926	ASSERT(aux != NULL);
927
928	if (--aux->aux_count == 0) {
929		avl_remove(avl, aux);
930		kmem_free(aux, sizeof (spa_aux_t));
931	} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
932		aux->aux_pool = 0ULL;
933	}
934}
935
936boolean_t
937spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
938{
939	spa_aux_t search, *found;
940
941	search.aux_guid = guid;
942	found = avl_find(avl, &search, NULL);
943
944	if (pool) {
945		if (found)
946			*pool = found->aux_pool;
947		else
948			*pool = 0ULL;
949	}
950
951	if (refcnt) {
952		if (found)
953			*refcnt = found->aux_count;
954		else
955			*refcnt = 0;
956	}
957
958	return (found != NULL);
959}
960
961void
962spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
963{
964	spa_aux_t search, *found;
965	avl_index_t where;
966
967	search.aux_guid = vd->vdev_guid;
968	found = avl_find(avl, &search, &where);
969	ASSERT(found != NULL);
970	ASSERT(found->aux_pool == 0ULL);
971
972	found->aux_pool = spa_guid(vd->vdev_spa);
973}
974
975/*
976 * Spares are tracked globally due to the following constraints:
977 *
978 * 	- A spare may be part of multiple pools.
979 * 	- A spare may be added to a pool even if it's actively in use within
980 *	  another pool.
981 * 	- A spare in use in any pool can only be the source of a replacement if
982 *	  the target is a spare in the same pool.
983 *
984 * We keep track of all spares on the system through the use of a reference
985 * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
986 * spare, then we bump the reference count in the AVL tree.  In addition, we set
987 * the 'vdev_isspare' member to indicate that the device is a spare (active or
988 * inactive).  When a spare is made active (used to replace a device in the
989 * pool), we also keep track of which pool its been made a part of.
990 *
991 * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
992 * called under the spa_namespace lock as part of vdev reconfiguration.  The
993 * separate spare lock exists for the status query path, which does not need to
994 * be completely consistent with respect to other vdev configuration changes.
995 */
996
997static int
998spa_spare_compare(const void *a, const void *b)
999{
1000	return (spa_aux_compare(a, b));
1001}
1002
1003void
1004spa_spare_add(vdev_t *vd)
1005{
1006	mutex_enter(&spa_spare_lock);
1007	ASSERT(!vd->vdev_isspare);
1008	spa_aux_add(vd, &spa_spare_avl);
1009	vd->vdev_isspare = B_TRUE;
1010	mutex_exit(&spa_spare_lock);
1011}
1012
1013void
1014spa_spare_remove(vdev_t *vd)
1015{
1016	mutex_enter(&spa_spare_lock);
1017	ASSERT(vd->vdev_isspare);
1018	spa_aux_remove(vd, &spa_spare_avl);
1019	vd->vdev_isspare = B_FALSE;
1020	mutex_exit(&spa_spare_lock);
1021}
1022
1023boolean_t
1024spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
1025{
1026	boolean_t found;
1027
1028	mutex_enter(&spa_spare_lock);
1029	found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
1030	mutex_exit(&spa_spare_lock);
1031
1032	return (found);
1033}
1034
1035void
1036spa_spare_activate(vdev_t *vd)
1037{
1038	mutex_enter(&spa_spare_lock);
1039	ASSERT(vd->vdev_isspare);
1040	spa_aux_activate(vd, &spa_spare_avl);
1041	mutex_exit(&spa_spare_lock);
1042}
1043
1044/*
1045 * Level 2 ARC devices are tracked globally for the same reasons as spares.
1046 * Cache devices currently only support one pool per cache device, and so
1047 * for these devices the aux reference count is currently unused beyond 1.
1048 */
1049
1050static int
1051spa_l2cache_compare(const void *a, const void *b)
1052{
1053	return (spa_aux_compare(a, b));
1054}
1055
1056void
1057spa_l2cache_add(vdev_t *vd)
1058{
1059	mutex_enter(&spa_l2cache_lock);
1060	ASSERT(!vd->vdev_isl2cache);
1061	spa_aux_add(vd, &spa_l2cache_avl);
1062	vd->vdev_isl2cache = B_TRUE;
1063	mutex_exit(&spa_l2cache_lock);
1064}
1065
1066void
1067spa_l2cache_remove(vdev_t *vd)
1068{
1069	mutex_enter(&spa_l2cache_lock);
1070	ASSERT(vd->vdev_isl2cache);
1071	spa_aux_remove(vd, &spa_l2cache_avl);
1072	vd->vdev_isl2cache = B_FALSE;
1073	mutex_exit(&spa_l2cache_lock);
1074}
1075
1076boolean_t
1077spa_l2cache_exists(uint64_t guid, uint64_t *pool)
1078{
1079	boolean_t found;
1080
1081	mutex_enter(&spa_l2cache_lock);
1082	found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
1083	mutex_exit(&spa_l2cache_lock);
1084
1085	return (found);
1086}
1087
1088void
1089spa_l2cache_activate(vdev_t *vd)
1090{
1091	mutex_enter(&spa_l2cache_lock);
1092	ASSERT(vd->vdev_isl2cache);
1093	spa_aux_activate(vd, &spa_l2cache_avl);
1094	mutex_exit(&spa_l2cache_lock);
1095}
1096
1097/*
1098 * ==========================================================================
1099 * SPA vdev locking
1100 * ==========================================================================
1101 */
1102
1103/*
1104 * Lock the given spa_t for the purpose of adding or removing a vdev.
1105 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1106 * It returns the next transaction group for the spa_t.
1107 */
1108uint64_t
1109spa_vdev_enter(spa_t *spa)
1110{
1111	mutex_enter(&spa->spa_vdev_top_lock);
1112	mutex_enter(&spa_namespace_lock);
1113	return (spa_vdev_config_enter(spa));
1114}
1115
1116/*
1117 * Internal implementation for spa_vdev_enter().  Used when a vdev
1118 * operation requires multiple syncs (i.e. removing a device) while
1119 * keeping the spa_namespace_lock held.
1120 */
1121uint64_t
1122spa_vdev_config_enter(spa_t *spa)
1123{
1124	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1125
1126	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1127
1128	return (spa_last_synced_txg(spa) + 1);
1129}
1130
1131/*
1132 * Used in combination with spa_vdev_config_enter() to allow the syncing
1133 * of multiple transactions without releasing the spa_namespace_lock.
1134 */
1135void
1136spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
1137{
1138	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1139
1140	int config_changed = B_FALSE;
1141
1142	ASSERT(txg > spa_last_synced_txg(spa));
1143
1144	spa->spa_pending_vdev = NULL;
1145
1146	/*
1147	 * Reassess the DTLs.
1148	 */
1149	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
1150
1151	if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1152		config_changed = B_TRUE;
1153		spa->spa_config_generation++;
1154	}
1155
1156	/*
1157	 * Verify the metaslab classes.
1158	 */
1159	ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1160	ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
1161
1162	spa_config_exit(spa, SCL_ALL, spa);
1163
1164	/*
1165	 * Panic the system if the specified tag requires it.  This
1166	 * is useful for ensuring that configurations are updated
1167	 * transactionally.
1168	 */
1169	if (zio_injection_enabled)
1170		zio_handle_panic_injection(spa, tag, 0);
1171
1172	/*
1173	 * Note: this txg_wait_synced() is important because it ensures
1174	 * that there won't be more than one config change per txg.
1175	 * This allows us to use the txg as the generation number.
1176	 */
1177	if (error == 0)
1178		txg_wait_synced(spa->spa_dsl_pool, txg);
1179
1180	if (vd != NULL) {
1181		ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1182		spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1183		vdev_free(vd);
1184		spa_config_exit(spa, SCL_ALL, spa);
1185	}
1186
1187	/*
1188	 * If the config changed, update the config cache.
1189	 */
1190	if (config_changed)
1191		spa_write_cachefile(spa, B_FALSE, B_TRUE);
1192}
1193
1194/*
1195 * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
1196 * locking of spa_vdev_enter(), we also want make sure the transactions have
1197 * synced to disk, and then update the global configuration cache with the new
1198 * information.
1199 */
1200int
1201spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1202{
1203	spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1204	mutex_exit(&spa_namespace_lock);
1205	mutex_exit(&spa->spa_vdev_top_lock);
1206
1207	return (error);
1208}
1209
1210/*
1211 * Lock the given spa_t for the purpose of changing vdev state.
1212 */
1213void
1214spa_vdev_state_enter(spa_t *spa, int oplocks)
1215{
1216	int locks = SCL_STATE_ALL | oplocks;
1217
1218	/*
1219	 * Root pools may need to read of the underlying devfs filesystem
1220	 * when opening up a vdev.  Unfortunately if we're holding the
1221	 * SCL_ZIO lock it will result in a deadlock when we try to issue
1222	 * the read from the root filesystem.  Instead we "prefetch"
1223	 * the associated vnodes that we need prior to opening the
1224	 * underlying devices and cache them so that we can prevent
1225	 * any I/O when we are doing the actual open.
1226	 */
1227	if (spa_is_root(spa)) {
1228		int low = locks & ~(SCL_ZIO - 1);
1229		int high = locks & ~low;
1230
1231		spa_config_enter(spa, high, spa, RW_WRITER);
1232		vdev_hold(spa->spa_root_vdev);
1233		spa_config_enter(spa, low, spa, RW_WRITER);
1234	} else {
1235		spa_config_enter(spa, locks, spa, RW_WRITER);
1236	}
1237	spa->spa_vdev_locks = locks;
1238}
1239
1240int
1241spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1242{
1243	boolean_t config_changed = B_FALSE;
1244
1245	if (vd != NULL || error == 0)
1246		vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
1247		    0, 0, B_FALSE);
1248
1249	if (vd != NULL) {
1250		vdev_state_dirty(vd->vdev_top);
1251		config_changed = B_TRUE;
1252		spa->spa_config_generation++;
1253	}
1254
1255	if (spa_is_root(spa))
1256		vdev_rele(spa->spa_root_vdev);
1257
1258	ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1259	spa_config_exit(spa, spa->spa_vdev_locks, spa);
1260
1261	/*
1262	 * If anything changed, wait for it to sync.  This ensures that,
1263	 * from the system administrator's perspective, zpool(1M) commands
1264	 * are synchronous.  This is important for things like zpool offline:
1265	 * when the command completes, you expect no further I/O from ZFS.
1266	 */
1267	if (vd != NULL)
1268		txg_wait_synced(spa->spa_dsl_pool, 0);
1269
1270	/*
1271	 * If the config changed, update the config cache.
1272	 */
1273	if (config_changed) {
1274		mutex_enter(&spa_namespace_lock);
1275		spa_write_cachefile(spa, B_FALSE, B_TRUE);
1276		mutex_exit(&spa_namespace_lock);
1277	}
1278
1279	return (error);
1280}
1281
1282/*
1283 * ==========================================================================
1284 * Miscellaneous functions
1285 * ==========================================================================
1286 */
1287
1288void
1289spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
1290{
1291	if (!nvlist_exists(spa->spa_label_features, feature)) {
1292		fnvlist_add_boolean(spa->spa_label_features, feature);
1293		/*
1294		 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
1295		 * dirty the vdev config because lock SCL_CONFIG is not held.
1296		 * Thankfully, in this case we don't need to dirty the config
1297		 * because it will be written out anyway when we finish
1298		 * creating the pool.
1299		 */
1300		if (tx->tx_txg != TXG_INITIAL)
1301			vdev_config_dirty(spa->spa_root_vdev);
1302	}
1303}
1304
1305void
1306spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1307{
1308	if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
1309		vdev_config_dirty(spa->spa_root_vdev);
1310}
1311
1312/*
1313 * Rename a spa_t.
1314 */
1315int
1316spa_rename(const char *name, const char *newname)
1317{
1318	spa_t *spa;
1319	int err;
1320
1321	/*
1322	 * Lookup the spa_t and grab the config lock for writing.  We need to
1323	 * actually open the pool so that we can sync out the necessary labels.
1324	 * It's OK to call spa_open() with the namespace lock held because we
1325	 * allow recursive calls for other reasons.
1326	 */
1327	mutex_enter(&spa_namespace_lock);
1328	if ((err = spa_open(name, &spa, FTAG)) != 0) {
1329		mutex_exit(&spa_namespace_lock);
1330		return (err);
1331	}
1332
1333	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1334
1335	avl_remove(&spa_namespace_avl, spa);
1336	(void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
1337	avl_add(&spa_namespace_avl, spa);
1338
1339	/*
1340	 * Sync all labels to disk with the new names by marking the root vdev
1341	 * dirty and waiting for it to sync.  It will pick up the new pool name
1342	 * during the sync.
1343	 */
1344	vdev_config_dirty(spa->spa_root_vdev);
1345
1346	spa_config_exit(spa, SCL_ALL, FTAG);
1347
1348	txg_wait_synced(spa->spa_dsl_pool, 0);
1349
1350	/*
1351	 * Sync the updated config cache.
1352	 */
1353	spa_write_cachefile(spa, B_FALSE, B_TRUE);
1354
1355	spa_close(spa, FTAG);
1356
1357	mutex_exit(&spa_namespace_lock);
1358
1359	return (0);
1360}
1361
1362/*
1363 * Return the spa_t associated with given pool_guid, if it exists.  If
1364 * device_guid is non-zero, determine whether the pool exists *and* contains
1365 * a device with the specified device_guid.
1366 */
1367spa_t *
1368spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1369{
1370	spa_t *spa;
1371	avl_tree_t *t = &spa_namespace_avl;
1372
1373	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1374
1375	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1376		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1377			continue;
1378		if (spa->spa_root_vdev == NULL)
1379			continue;
1380		if (spa_guid(spa) == pool_guid) {
1381			if (device_guid == 0)
1382				break;
1383
1384			if (vdev_lookup_by_guid(spa->spa_root_vdev,
1385			    device_guid) != NULL)
1386				break;
1387
1388			/*
1389			 * Check any devices we may be in the process of adding.
1390			 */
1391			if (spa->spa_pending_vdev) {
1392				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1393				    device_guid) != NULL)
1394					break;
1395			}
1396		}
1397	}
1398
1399	return (spa);
1400}
1401
1402/*
1403 * Determine whether a pool with the given pool_guid exists.
1404 */
1405boolean_t
1406spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1407{
1408	return (spa_by_guid(pool_guid, device_guid) != NULL);
1409}
1410
1411char *
1412spa_strdup(const char *s)
1413{
1414	size_t len;
1415	char *new;
1416
1417	len = strlen(s);
1418	new = kmem_alloc(len + 1, KM_SLEEP);
1419	bcopy(s, new, len);
1420	new[len] = '\0';
1421
1422	return (new);
1423}
1424
1425void
1426spa_strfree(char *s)
1427{
1428	kmem_free(s, strlen(s) + 1);
1429}
1430
1431uint64_t
1432spa_get_random(uint64_t range)
1433{
1434	uint64_t r;
1435
1436	ASSERT(range != 0);
1437
1438	(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1439
1440	return (r % range);
1441}
1442
1443uint64_t
1444spa_generate_guid(spa_t *spa)
1445{
1446	uint64_t guid = spa_get_random(-1ULL);
1447
1448	if (spa != NULL) {
1449		while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1450			guid = spa_get_random(-1ULL);
1451	} else {
1452		while (guid == 0 || spa_guid_exists(guid, 0))
1453			guid = spa_get_random(-1ULL);
1454	}
1455
1456	return (guid);
1457}
1458
1459void
1460snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
1461{
1462	char type[256];
1463	char *checksum = NULL;
1464	char *compress = NULL;
1465
1466	if (bp != NULL) {
1467		if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1468			dmu_object_byteswap_t bswap =
1469			    DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1470			(void) snprintf(type, sizeof (type), "bswap %s %s",
1471			    DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1472			    "metadata" : "data",
1473			    dmu_ot_byteswap[bswap].ob_name);
1474		} else {
1475			(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1476			    sizeof (type));
1477		}
1478		if (!BP_IS_EMBEDDED(bp)) {
1479			checksum =
1480			    zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1481		}
1482		compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1483	}
1484
1485	SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
1486	    compress);
1487}
1488
1489void
1490spa_freeze(spa_t *spa)
1491{
1492	uint64_t freeze_txg = 0;
1493
1494	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1495	if (spa->spa_freeze_txg == UINT64_MAX) {
1496		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1497		spa->spa_freeze_txg = freeze_txg;
1498	}
1499	spa_config_exit(spa, SCL_ALL, FTAG);
1500	if (freeze_txg != 0)
1501		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1502}
1503
1504void
1505zfs_panic_recover(const char *fmt, ...)
1506{
1507	va_list adx;
1508
1509	va_start(adx, fmt);
1510	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1511	va_end(adx);
1512}
1513
1514/*
1515 * This is a stripped-down version of strtoull, suitable only for converting
1516 * lowercase hexadecimal numbers that don't overflow.
1517 */
1518uint64_t
1519zfs_strtonum(const char *str, char **nptr)
1520{
1521	uint64_t val = 0;
1522	char c;
1523	int digit;
1524
1525	while ((c = *str) != '\0') {
1526		if (c >= '0' && c <= '9')
1527			digit = c - '0';
1528		else if (c >= 'a' && c <= 'f')
1529			digit = 10 + c - 'a';
1530		else
1531			break;
1532
1533		val *= 16;
1534		val += digit;
1535
1536		str++;
1537	}
1538
1539	if (nptr)
1540		*nptr = (char *)str;
1541
1542	return (val);
1543}
1544
1545/*
1546 * ==========================================================================
1547 * Accessor functions
1548 * ==========================================================================
1549 */
1550
1551boolean_t
1552spa_shutting_down(spa_t *spa)
1553{
1554	return (spa->spa_async_suspended);
1555}
1556
1557dsl_pool_t *
1558spa_get_dsl(spa_t *spa)
1559{
1560	return (spa->spa_dsl_pool);
1561}
1562
1563boolean_t
1564spa_is_initializing(spa_t *spa)
1565{
1566	return (spa->spa_is_initializing);
1567}
1568
1569boolean_t
1570spa_indirect_vdevs_loaded(spa_t *spa)
1571{
1572	return (spa->spa_indirect_vdevs_loaded);
1573}
1574
1575blkptr_t *
1576spa_get_rootblkptr(spa_t *spa)
1577{
1578	return (&spa->spa_ubsync.ub_rootbp);
1579}
1580
1581void
1582spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1583{
1584	spa->spa_uberblock.ub_rootbp = *bp;
1585}
1586
1587void
1588spa_altroot(spa_t *spa, char *buf, size_t buflen)
1589{
1590	if (spa->spa_root == NULL)
1591		buf[0] = '\0';
1592	else
1593		(void) strncpy(buf, spa->spa_root, buflen);
1594}
1595
1596int
1597spa_sync_pass(spa_t *spa)
1598{
1599	return (spa->spa_sync_pass);
1600}
1601
1602char *
1603spa_name(spa_t *spa)
1604{
1605	return (spa->spa_name);
1606}
1607
1608uint64_t
1609spa_guid(spa_t *spa)
1610{
1611	dsl_pool_t *dp = spa_get_dsl(spa);
1612	uint64_t guid;
1613
1614	/*
1615	 * If we fail to parse the config during spa_load(), we can go through
1616	 * the error path (which posts an ereport) and end up here with no root
1617	 * vdev.  We stash the original pool guid in 'spa_config_guid' to handle
1618	 * this case.
1619	 */
1620	if (spa->spa_root_vdev == NULL)
1621		return (spa->spa_config_guid);
1622
1623	guid = spa->spa_last_synced_guid != 0 ?
1624	    spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1625
1626	/*
1627	 * Return the most recently synced out guid unless we're
1628	 * in syncing context.
1629	 */
1630	if (dp && dsl_pool_sync_context(dp))
1631		return (spa->spa_root_vdev->vdev_guid);
1632	else
1633		return (guid);
1634}
1635
1636uint64_t
1637spa_load_guid(spa_t *spa)
1638{
1639	/*
1640	 * This is a GUID that exists solely as a reference for the
1641	 * purposes of the arc.  It is generated at load time, and
1642	 * is never written to persistent storage.
1643	 */
1644	return (spa->spa_load_guid);
1645}
1646
1647uint64_t
1648spa_last_synced_txg(spa_t *spa)
1649{
1650	return (spa->spa_ubsync.ub_txg);
1651}
1652
1653uint64_t
1654spa_first_txg(spa_t *spa)
1655{
1656	return (spa->spa_first_txg);
1657}
1658
1659uint64_t
1660spa_syncing_txg(spa_t *spa)
1661{
1662	return (spa->spa_syncing_txg);
1663}
1664
1665/*
1666 * Return the last txg where data can be dirtied. The final txgs
1667 * will be used to just clear out any deferred frees that remain.
1668 */
1669uint64_t
1670spa_final_dirty_txg(spa_t *spa)
1671{
1672	return (spa->spa_final_txg - TXG_DEFER_SIZE);
1673}
1674
1675pool_state_t
1676spa_state(spa_t *spa)
1677{
1678	return (spa->spa_state);
1679}
1680
1681spa_load_state_t
1682spa_load_state(spa_t *spa)
1683{
1684	return (spa->spa_load_state);
1685}
1686
1687uint64_t
1688spa_freeze_txg(spa_t *spa)
1689{
1690	return (spa->spa_freeze_txg);
1691}
1692
1693/* ARGSUSED */
1694uint64_t
1695spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
1696{
1697	return (lsize * spa_asize_inflation);
1698}
1699
1700/*
1701 * Return the amount of slop space in bytes.  It is 1/32 of the pool (3.2%),
1702 * or at least 128MB, unless that would cause it to be more than half the
1703 * pool size.
1704 *
1705 * See the comment above spa_slop_shift for details.
1706 */
1707uint64_t
1708spa_get_slop_space(spa_t *spa)
1709{
1710	uint64_t space = spa_get_dspace(spa);
1711	return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop)));
1712}
1713
1714uint64_t
1715spa_get_dspace(spa_t *spa)
1716{
1717	return (spa->spa_dspace);
1718}
1719
1720void
1721spa_update_dspace(spa_t *spa)
1722{
1723	spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1724	    ddt_get_dedup_dspace(spa);
1725	if (spa->spa_vdev_removal != NULL) {
1726		/*
1727		 * We can't allocate from the removing device, so
1728		 * subtract its size.  This prevents the DMU/DSL from
1729		 * filling up the (now smaller) pool while we are in the
1730		 * middle of removing the device.
1731		 *
1732		 * Note that the DMU/DSL doesn't actually know or care
1733		 * how much space is allocated (it does its own tracking
1734		 * of how much space has been logically used).  So it
1735		 * doesn't matter that the data we are moving may be
1736		 * allocated twice (on the old device and the new
1737		 * device).
1738		 */
1739		vdev_t *vd = spa->spa_vdev_removal->svr_vdev;
1740		spa->spa_dspace -= spa_deflate(spa) ?
1741		    vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
1742	}
1743}
1744
1745/*
1746 * Return the failure mode that has been set to this pool. The default
1747 * behavior will be to block all I/Os when a complete failure occurs.
1748 */
1749uint8_t
1750spa_get_failmode(spa_t *spa)
1751{
1752	return (spa->spa_failmode);
1753}
1754
1755boolean_t
1756spa_suspended(spa_t *spa)
1757{
1758	return (spa->spa_suspended);
1759}
1760
1761uint64_t
1762spa_version(spa_t *spa)
1763{
1764	return (spa->spa_ubsync.ub_version);
1765}
1766
1767boolean_t
1768spa_deflate(spa_t *spa)
1769{
1770	return (spa->spa_deflate);
1771}
1772
1773metaslab_class_t *
1774spa_normal_class(spa_t *spa)
1775{
1776	return (spa->spa_normal_class);
1777}
1778
1779metaslab_class_t *
1780spa_log_class(spa_t *spa)
1781{
1782	return (spa->spa_log_class);
1783}
1784
1785void
1786spa_evicting_os_register(spa_t *spa, objset_t *os)
1787{
1788	mutex_enter(&spa->spa_evicting_os_lock);
1789	list_insert_head(&spa->spa_evicting_os_list, os);
1790	mutex_exit(&spa->spa_evicting_os_lock);
1791}
1792
1793void
1794spa_evicting_os_deregister(spa_t *spa, objset_t *os)
1795{
1796	mutex_enter(&spa->spa_evicting_os_lock);
1797	list_remove(&spa->spa_evicting_os_list, os);
1798	cv_broadcast(&spa->spa_evicting_os_cv);
1799	mutex_exit(&spa->spa_evicting_os_lock);
1800}
1801
1802void
1803spa_evicting_os_wait(spa_t *spa)
1804{
1805	mutex_enter(&spa->spa_evicting_os_lock);
1806	while (!list_is_empty(&spa->spa_evicting_os_list))
1807		cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
1808	mutex_exit(&spa->spa_evicting_os_lock);
1809
1810	dmu_buf_user_evict_wait();
1811}
1812
1813int
1814spa_max_replication(spa_t *spa)
1815{
1816	/*
1817	 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1818	 * handle BPs with more than one DVA allocated.  Set our max
1819	 * replication level accordingly.
1820	 */
1821	if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1822		return (1);
1823	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1824}
1825
1826int
1827spa_prev_software_version(spa_t *spa)
1828{
1829	return (spa->spa_prev_software_version);
1830}
1831
1832uint64_t
1833spa_deadman_synctime(spa_t *spa)
1834{
1835	return (spa->spa_deadman_synctime);
1836}
1837
1838uint64_t
1839dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
1840{
1841	uint64_t asize = DVA_GET_ASIZE(dva);
1842	uint64_t dsize = asize;
1843
1844	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1845
1846	if (asize != 0 && spa->spa_deflate) {
1847		vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
1848		dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1849	}
1850
1851	return (dsize);
1852}
1853
1854uint64_t
1855bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
1856{
1857	uint64_t dsize = 0;
1858
1859	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
1860		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1861
1862	return (dsize);
1863}
1864
1865uint64_t
1866bp_get_dsize(spa_t *spa, const blkptr_t *bp)
1867{
1868	uint64_t dsize = 0;
1869
1870	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1871
1872	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
1873		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1874
1875	spa_config_exit(spa, SCL_VDEV, FTAG);
1876
1877	return (dsize);
1878}
1879
1880/*
1881 * ==========================================================================
1882 * Initialization and Termination
1883 * ==========================================================================
1884 */
1885
1886static int
1887spa_name_compare(const void *a1, const void *a2)
1888{
1889	const spa_t *s1 = a1;
1890	const spa_t *s2 = a2;
1891	int s;
1892
1893	s = strcmp(s1->spa_name, s2->spa_name);
1894	if (s > 0)
1895		return (1);
1896	if (s < 0)
1897		return (-1);
1898	return (0);
1899}
1900
1901int
1902spa_busy(void)
1903{
1904	return (spa_active_count);
1905}
1906
1907void
1908spa_boot_init()
1909{
1910	spa_config_load();
1911}
1912
1913void
1914spa_init(int mode)
1915{
1916	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1917	mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1918	mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1919	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1920
1921	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1922	    offsetof(spa_t, spa_avl));
1923
1924	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1925	    offsetof(spa_aux_t, aux_avl));
1926
1927	avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1928	    offsetof(spa_aux_t, aux_avl));
1929
1930	spa_mode_global = mode;
1931
1932#ifdef _KERNEL
1933	spa_arch_init();
1934#else
1935	if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
1936		arc_procfd = open("/proc/self/ctl", O_WRONLY);
1937		if (arc_procfd == -1) {
1938			perror("could not enable watchpoints: "
1939			    "opening /proc/self/ctl failed: ");
1940		} else {
1941			arc_watch = B_TRUE;
1942		}
1943	}
1944#endif
1945
1946	refcount_init();
1947	unique_init();
1948	range_tree_init();
1949	metaslab_alloc_trace_init();
1950	zio_init();
1951	dmu_init();
1952	zil_init();
1953	vdev_cache_stat_init();
1954	zfs_prop_init();
1955	zpool_prop_init();
1956	zpool_feature_init();
1957	spa_config_load();
1958	l2arc_start();
1959}
1960
1961void
1962spa_fini(void)
1963{
1964	l2arc_stop();
1965
1966	spa_evict_all();
1967
1968	vdev_cache_stat_fini();
1969	zil_fini();
1970	dmu_fini();
1971	zio_fini();
1972	metaslab_alloc_trace_fini();
1973	range_tree_fini();
1974	unique_fini();
1975	refcount_fini();
1976
1977	avl_destroy(&spa_namespace_avl);
1978	avl_destroy(&spa_spare_avl);
1979	avl_destroy(&spa_l2cache_avl);
1980
1981	cv_destroy(&spa_namespace_cv);
1982	mutex_destroy(&spa_namespace_lock);
1983	mutex_destroy(&spa_spare_lock);
1984	mutex_destroy(&spa_l2cache_lock);
1985}
1986
1987/*
1988 * Return whether this pool has slogs. No locking needed.
1989 * It's not a problem if the wrong answer is returned as it's only for
1990 * performance and not correctness
1991 */
1992boolean_t
1993spa_has_slogs(spa_t *spa)
1994{
1995	return (spa->spa_log_class->mc_rotor != NULL);
1996}
1997
1998spa_log_state_t
1999spa_get_log_state(spa_t *spa)
2000{
2001	return (spa->spa_log_state);
2002}
2003
2004void
2005spa_set_log_state(spa_t *spa, spa_log_state_t state)
2006{
2007	spa->spa_log_state = state;
2008}
2009
2010boolean_t
2011spa_is_root(spa_t *spa)
2012{
2013	return (spa->spa_is_root);
2014}
2015
2016boolean_t
2017spa_writeable(spa_t *spa)
2018{
2019	return (!!(spa->spa_mode & FWRITE) && spa->spa_trust_config);
2020}
2021
2022/*
2023 * Returns true if there is a pending sync task in any of the current
2024 * syncing txg, the current quiescing txg, or the current open txg.
2025 */
2026boolean_t
2027spa_has_pending_synctask(spa_t *spa)
2028{
2029	return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks));
2030}
2031
2032int
2033spa_mode(spa_t *spa)
2034{
2035	return (spa->spa_mode);
2036}
2037
2038uint64_t
2039spa_bootfs(spa_t *spa)
2040{
2041	return (spa->spa_bootfs);
2042}
2043
2044uint64_t
2045spa_delegation(spa_t *spa)
2046{
2047	return (spa->spa_delegation);
2048}
2049
2050objset_t *
2051spa_meta_objset(spa_t *spa)
2052{
2053	return (spa->spa_meta_objset);
2054}
2055
2056enum zio_checksum
2057spa_dedup_checksum(spa_t *spa)
2058{
2059	return (spa->spa_dedup_checksum);
2060}
2061
2062/*
2063 * Reset pool scan stat per scan pass (or reboot).
2064 */
2065void
2066spa_scan_stat_init(spa_t *spa)
2067{
2068	/* data not stored on disk */
2069	spa->spa_scan_pass_start = gethrestime_sec();
2070	if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
2071		spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
2072	else
2073		spa->spa_scan_pass_scrub_pause = 0;
2074	spa->spa_scan_pass_scrub_spent_paused = 0;
2075	spa->spa_scan_pass_exam = 0;
2076	vdev_scan_stat_init(spa->spa_root_vdev);
2077}
2078
2079/*
2080 * Get scan stats for zpool status reports
2081 */
2082int
2083spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
2084{
2085	dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
2086
2087	if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
2088		return (SET_ERROR(ENOENT));
2089	bzero(ps, sizeof (pool_scan_stat_t));
2090
2091	/* data stored on disk */
2092	ps->pss_func = scn->scn_phys.scn_func;
2093	ps->pss_start_time = scn->scn_phys.scn_start_time;
2094	ps->pss_end_time = scn->scn_phys.scn_end_time;
2095	ps->pss_to_examine = scn->scn_phys.scn_to_examine;
2096	ps->pss_examined = scn->scn_phys.scn_examined;
2097	ps->pss_to_process = scn->scn_phys.scn_to_process;
2098	ps->pss_processed = scn->scn_phys.scn_processed;
2099	ps->pss_errors = scn->scn_phys.scn_errors;
2100	ps->pss_state = scn->scn_phys.scn_state;
2101
2102	/* data not stored on disk */
2103	ps->pss_pass_start = spa->spa_scan_pass_start;
2104	ps->pss_pass_exam = spa->spa_scan_pass_exam;
2105	ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
2106	ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
2107
2108	return (0);
2109}
2110
2111boolean_t
2112spa_debug_enabled(spa_t *spa)
2113{
2114	return (spa->spa_debug);
2115}
2116
2117int
2118spa_maxblocksize(spa_t *spa)
2119{
2120	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
2121		return (SPA_MAXBLOCKSIZE);
2122	else
2123		return (SPA_OLD_MAXBLOCKSIZE);
2124}
2125
2126/*
2127 * Returns the txg that the last device removal completed. No indirect mappings
2128 * have been added since this txg.
2129 */
2130uint64_t
2131spa_get_last_removal_txg(spa_t *spa)
2132{
2133	uint64_t vdevid;
2134	uint64_t ret = -1ULL;
2135
2136	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2137	/*
2138	 * sr_prev_indirect_vdev is only modified while holding all the
2139	 * config locks, so it is sufficient to hold SCL_VDEV as reader when
2140	 * examining it.
2141	 */
2142	vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
2143
2144	while (vdevid != -1ULL) {
2145		vdev_t *vd = vdev_lookup_top(spa, vdevid);
2146		vdev_indirect_births_t *vib = vd->vdev_indirect_births;
2147
2148		ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
2149
2150		/*
2151		 * If the removal did not remap any data, we don't care.
2152		 */
2153		if (vdev_indirect_births_count(vib) != 0) {
2154			ret = vdev_indirect_births_last_entry_txg(vib);
2155			break;
2156		}
2157
2158		vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
2159	}
2160	spa_config_exit(spa, SCL_VDEV, FTAG);
2161
2162	IMPLY(ret != -1ULL,
2163	    spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
2164
2165	return (ret);
2166}
2167
2168boolean_t
2169spa_trust_config(spa_t *spa)
2170{
2171	return (spa->spa_trust_config);
2172}
2173
2174uint64_t
2175spa_missing_tvds_allowed(spa_t *spa)
2176{
2177	return (spa->spa_missing_tvds_allowed);
2178}
2179
2180void
2181spa_set_missing_tvds(spa_t *spa, uint64_t missing)
2182{
2183	spa->spa_missing_tvds = missing;
2184}
2185