xref: /illumos-gate/usr/src/uts/common/fs/zfs/spa_misc.c (revision 86714001)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
24  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
25  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26  * Copyright 2013 Saso Kiselkov. All rights reserved.
27  * Copyright (c) 2014 Integros [integros.com]
28  * Copyright (c) 2017 Datto Inc.
29  */
30 
31 #include <sys/zfs_context.h>
32 #include <sys/spa_impl.h>
33 #include <sys/spa_boot.h>
34 #include <sys/zio.h>
35 #include <sys/zio_checksum.h>
36 #include <sys/zio_compress.h>
37 #include <sys/dmu.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/zap.h>
40 #include <sys/zil.h>
41 #include <sys/vdev_impl.h>
42 #include <sys/metaslab.h>
43 #include <sys/uberblock_impl.h>
44 #include <sys/txg.h>
45 #include <sys/avl.h>
46 #include <sys/unique.h>
47 #include <sys/dsl_pool.h>
48 #include <sys/dsl_dir.h>
49 #include <sys/dsl_prop.h>
50 #include <sys/dsl_scan.h>
51 #include <sys/fs/zfs.h>
52 #include <sys/metaslab_impl.h>
53 #include <sys/arc.h>
54 #include <sys/ddt.h>
55 #include "zfs_prop.h"
56 #include <sys/zfeature.h>
57 
58 /*
59  * SPA locking
60  *
61  * There are four basic locks for managing spa_t structures:
62  *
63  * spa_namespace_lock (global mutex)
64  *
65  *	This lock must be acquired to do any of the following:
66  *
67  *		- Lookup a spa_t by name
68  *		- Add or remove a spa_t from the namespace
69  *		- Increase spa_refcount from non-zero
70  *		- Check if spa_refcount is zero
71  *		- Rename a spa_t
72  *		- add/remove/attach/detach devices
73  *		- Held for the duration of create/destroy/import/export
74  *
75  *	It does not need to handle recursion.  A create or destroy may
76  *	reference objects (files or zvols) in other pools, but by
77  *	definition they must have an existing reference, and will never need
78  *	to lookup a spa_t by name.
79  *
80  * spa_refcount (per-spa refcount_t protected by mutex)
81  *
82  *	This reference count keep track of any active users of the spa_t.  The
83  *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
84  *	the refcount is never really 'zero' - opening a pool implicitly keeps
85  *	some references in the DMU.  Internally we check against spa_minref, but
86  *	present the image of a zero/non-zero value to consumers.
87  *
88  * spa_config_lock[] (per-spa array of rwlocks)
89  *
90  *	This protects the spa_t from config changes, and must be held in
91  *	the following circumstances:
92  *
93  *		- RW_READER to perform I/O to the spa
94  *		- RW_WRITER to change the vdev config
95  *
96  * The locking order is fairly straightforward:
97  *
98  *		spa_namespace_lock	->	spa_refcount
99  *
100  *	The namespace lock must be acquired to increase the refcount from 0
101  *	or to check if it is zero.
102  *
103  *		spa_refcount		->	spa_config_lock[]
104  *
105  *	There must be at least one valid reference on the spa_t to acquire
106  *	the config lock.
107  *
108  *		spa_namespace_lock	->	spa_config_lock[]
109  *
110  *	The namespace lock must always be taken before the config lock.
111  *
112  *
113  * The spa_namespace_lock can be acquired directly and is globally visible.
114  *
115  * The namespace is manipulated using the following functions, all of which
116  * require the spa_namespace_lock to be held.
117  *
118  *	spa_lookup()		Lookup a spa_t by name.
119  *
120  *	spa_add()		Create a new spa_t in the namespace.
121  *
122  *	spa_remove()		Remove a spa_t from the namespace.  This also
123  *				frees up any memory associated with the spa_t.
124  *
125  *	spa_next()		Returns the next spa_t in the system, or the
126  *				first if NULL is passed.
127  *
128  *	spa_evict_all()		Shutdown and remove all spa_t structures in
129  *				the system.
130  *
131  *	spa_guid_exists()	Determine whether a pool/device guid exists.
132  *
133  * The spa_refcount is manipulated using the following functions:
134  *
135  *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
136  *				called with spa_namespace_lock held if the
137  *				refcount is currently zero.
138  *
139  *	spa_close()		Remove a reference from the spa_t.  This will
140  *				not free the spa_t or remove it from the
141  *				namespace.  No locking is required.
142  *
143  *	spa_refcount_zero()	Returns true if the refcount is currently
144  *				zero.  Must be called with spa_namespace_lock
145  *				held.
146  *
147  * The spa_config_lock[] is an array of rwlocks, ordered as follows:
148  * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
149  * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
150  *
151  * To read the configuration, it suffices to hold one of these locks as reader.
152  * To modify the configuration, you must hold all locks as writer.  To modify
153  * vdev state without altering the vdev tree's topology (e.g. online/offline),
154  * you must hold SCL_STATE and SCL_ZIO as writer.
155  *
156  * We use these distinct config locks to avoid recursive lock entry.
157  * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
158  * block allocations (SCL_ALLOC), which may require reading space maps
159  * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
160  *
161  * The spa config locks cannot be normal rwlocks because we need the
162  * ability to hand off ownership.  For example, SCL_ZIO is acquired
163  * by the issuing thread and later released by an interrupt thread.
164  * They do, however, obey the usual write-wanted semantics to prevent
165  * writer (i.e. system administrator) starvation.
166  *
167  * The lock acquisition rules are as follows:
168  *
169  * SCL_CONFIG
170  *	Protects changes to the vdev tree topology, such as vdev
171  *	add/remove/attach/detach.  Protects the dirty config list
172  *	(spa_config_dirty_list) and the set of spares and l2arc devices.
173  *
174  * SCL_STATE
175  *	Protects changes to pool state and vdev state, such as vdev
176  *	online/offline/fault/degrade/clear.  Protects the dirty state list
177  *	(spa_state_dirty_list) and global pool state (spa_state).
178  *
179  * SCL_ALLOC
180  *	Protects changes to metaslab groups and classes.
181  *	Held as reader by metaslab_alloc() and metaslab_claim().
182  *
183  * SCL_ZIO
184  *	Held by bp-level zios (those which have no io_vd upon entry)
185  *	to prevent changes to the vdev tree.  The bp-level zio implicitly
186  *	protects all of its vdev child zios, which do not hold SCL_ZIO.
187  *
188  * SCL_FREE
189  *	Protects changes to metaslab groups and classes.
190  *	Held as reader by metaslab_free().  SCL_FREE is distinct from
191  *	SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
192  *	blocks in zio_done() while another i/o that holds either
193  *	SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
194  *
195  * SCL_VDEV
196  *	Held as reader to prevent changes to the vdev tree during trivial
197  *	inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
198  *	other locks, and lower than all of them, to ensure that it's safe
199  *	to acquire regardless of caller context.
200  *
201  * In addition, the following rules apply:
202  *
203  * (a)	spa_props_lock protects pool properties, spa_config and spa_config_list.
204  *	The lock ordering is SCL_CONFIG > spa_props_lock.
205  *
206  * (b)	I/O operations on leaf vdevs.  For any zio operation that takes
207  *	an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
208  *	or zio_write_phys() -- the caller must ensure that the config cannot
209  *	cannot change in the interim, and that the vdev cannot be reopened.
210  *	SCL_STATE as reader suffices for both.
211  *
212  * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
213  *
214  *	spa_vdev_enter()	Acquire the namespace lock and the config lock
215  *				for writing.
216  *
217  *	spa_vdev_exit()		Release the config lock, wait for all I/O
218  *				to complete, sync the updated configs to the
219  *				cache, and release the namespace lock.
220  *
221  * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
222  * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
223  * locking is, always, based on spa_namespace_lock and spa_config_lock[].
224  *
225  * spa_rename() is also implemented within this file since it requires
226  * manipulation of the namespace.
227  */
228 
229 static avl_tree_t spa_namespace_avl;
230 kmutex_t spa_namespace_lock;
231 static kcondvar_t spa_namespace_cv;
232 static int spa_active_count;
233 int spa_max_replication_override = SPA_DVAS_PER_BP;
234 
235 static kmutex_t spa_spare_lock;
236 static avl_tree_t spa_spare_avl;
237 static kmutex_t spa_l2cache_lock;
238 static avl_tree_t spa_l2cache_avl;
239 
240 kmem_cache_t *spa_buffer_pool;
241 int spa_mode_global;
242 
243 #ifdef ZFS_DEBUG
244 /*
245  * Everything except dprintf, spa, and indirect_remap is on by default
246  * in debug builds.
247  */
248 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA | ZFS_DEBUG_INDIRECT_REMAP);
249 #else
250 int zfs_flags = 0;
251 #endif
252 
253 /*
254  * zfs_recover can be set to nonzero to attempt to recover from
255  * otherwise-fatal errors, typically caused by on-disk corruption.  When
256  * set, calls to zfs_panic_recover() will turn into warning messages.
257  * This should only be used as a last resort, as it typically results
258  * in leaked space, or worse.
259  */
260 boolean_t zfs_recover = B_FALSE;
261 
262 /*
263  * If destroy encounters an EIO while reading metadata (e.g. indirect
264  * blocks), space referenced by the missing metadata can not be freed.
265  * Normally this causes the background destroy to become "stalled", as
266  * it is unable to make forward progress.  While in this stalled state,
267  * all remaining space to free from the error-encountering filesystem is
268  * "temporarily leaked".  Set this flag to cause it to ignore the EIO,
269  * permanently leak the space from indirect blocks that can not be read,
270  * and continue to free everything else that it can.
271  *
272  * The default, "stalling" behavior is useful if the storage partially
273  * fails (i.e. some but not all i/os fail), and then later recovers.  In
274  * this case, we will be able to continue pool operations while it is
275  * partially failed, and when it recovers, we can continue to free the
276  * space, with no leaks.  However, note that this case is actually
277  * fairly rare.
278  *
279  * Typically pools either (a) fail completely (but perhaps temporarily,
280  * e.g. a top-level vdev going offline), or (b) have localized,
281  * permanent errors (e.g. disk returns the wrong data due to bit flip or
282  * firmware bug).  In case (a), this setting does not matter because the
283  * pool will be suspended and the sync thread will not be able to make
284  * forward progress regardless.  In case (b), because the error is
285  * permanent, the best we can do is leak the minimum amount of space,
286  * which is what setting this flag will do.  Therefore, it is reasonable
287  * for this flag to normally be set, but we chose the more conservative
288  * approach of not setting it, so that there is no possibility of
289  * leaking space in the "partial temporary" failure case.
290  */
291 boolean_t zfs_free_leak_on_eio = B_FALSE;
292 
293 /*
294  * Expiration time in milliseconds. This value has two meanings. First it is
295  * used to determine when the spa_deadman() logic should fire. By default the
296  * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
297  * Secondly, the value determines if an I/O is considered "hung". Any I/O that
298  * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
299  * in a system panic.
300  */
301 uint64_t zfs_deadman_synctime_ms = 1000000ULL;
302 
303 /*
304  * Check time in milliseconds. This defines the frequency at which we check
305  * for hung I/O.
306  */
307 uint64_t zfs_deadman_checktime_ms = 5000ULL;
308 
309 /*
310  * Override the zfs deadman behavior via /etc/system. By default the
311  * deadman is enabled except on VMware and sparc deployments.
312  */
313 int zfs_deadman_enabled = -1;
314 
315 /*
316  * The worst case is single-sector max-parity RAID-Z blocks, in which
317  * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
318  * times the size; so just assume that.  Add to this the fact that
319  * we can have up to 3 DVAs per bp, and one more factor of 2 because
320  * the block may be dittoed with up to 3 DVAs by ddt_sync().  All together,
321  * the worst case is:
322  *     (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
323  */
324 int spa_asize_inflation = 24;
325 
326 /*
327  * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
328  * the pool to be consumed.  This ensures that we don't run the pool
329  * completely out of space, due to unaccounted changes (e.g. to the MOS).
330  * It also limits the worst-case time to allocate space.  If we have
331  * less than this amount of free space, most ZPL operations (e.g. write,
332  * create) will return ENOSPC.
333  *
334  * Certain operations (e.g. file removal, most administrative actions) can
335  * use half the slop space.  They will only return ENOSPC if less than half
336  * the slop space is free.  Typically, once the pool has less than the slop
337  * space free, the user will use these operations to free up space in the pool.
338  * These are the operations that call dsl_pool_adjustedsize() with the netfree
339  * argument set to TRUE.
340  *
341  * Operations that are almost guaranteed to free up space in the absence of
342  * a pool checkpoint can use up to three quarters of the slop space
343  * (e.g zfs destroy).
344  *
345  * A very restricted set of operations are always permitted, regardless of
346  * the amount of free space.  These are the operations that call
347  * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
348  * increase in the amount of space used, it is possible to run the pool
349  * completely out of space, causing it to be permanently read-only.
350  *
351  * Note that on very small pools, the slop space will be larger than
352  * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
353  * but we never allow it to be more than half the pool size.
354  *
355  * See also the comments in zfs_space_check_t.
356  */
357 int spa_slop_shift = 5;
358 uint64_t spa_min_slop = 128 * 1024 * 1024;
359 
360 /*PRINTFLIKE2*/
361 void
362 spa_load_failed(spa_t *spa, const char *fmt, ...)
363 {
364 	va_list adx;
365 	char buf[256];
366 
367 	va_start(adx, fmt);
368 	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
369 	va_end(adx);
370 
371 	zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
372 	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
373 }
374 
375 /*PRINTFLIKE2*/
376 void
377 spa_load_note(spa_t *spa, const char *fmt, ...)
378 {
379 	va_list adx;
380 	char buf[256];
381 
382 	va_start(adx, fmt);
383 	(void) vsnprintf(buf, sizeof (buf), fmt, adx);
384 	va_end(adx);
385 
386 	zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
387 	    spa->spa_trust_config ? "trusted" : "untrusted", buf);
388 }
389 
390 /*
391  * ==========================================================================
392  * SPA config locking
393  * ==========================================================================
394  */
395 static void
396 spa_config_lock_init(spa_t *spa)
397 {
398 	for (int i = 0; i < SCL_LOCKS; i++) {
399 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
400 		mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
401 		cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
402 		refcount_create_untracked(&scl->scl_count);
403 		scl->scl_writer = NULL;
404 		scl->scl_write_wanted = 0;
405 	}
406 }
407 
408 static void
409 spa_config_lock_destroy(spa_t *spa)
410 {
411 	for (int i = 0; i < SCL_LOCKS; i++) {
412 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
413 		mutex_destroy(&scl->scl_lock);
414 		cv_destroy(&scl->scl_cv);
415 		refcount_destroy(&scl->scl_count);
416 		ASSERT(scl->scl_writer == NULL);
417 		ASSERT(scl->scl_write_wanted == 0);
418 	}
419 }
420 
421 int
422 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
423 {
424 	for (int i = 0; i < SCL_LOCKS; i++) {
425 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
426 		if (!(locks & (1 << i)))
427 			continue;
428 		mutex_enter(&scl->scl_lock);
429 		if (rw == RW_READER) {
430 			if (scl->scl_writer || scl->scl_write_wanted) {
431 				mutex_exit(&scl->scl_lock);
432 				spa_config_exit(spa, locks & ((1 << i) - 1),
433 				    tag);
434 				return (0);
435 			}
436 		} else {
437 			ASSERT(scl->scl_writer != curthread);
438 			if (!refcount_is_zero(&scl->scl_count)) {
439 				mutex_exit(&scl->scl_lock);
440 				spa_config_exit(spa, locks & ((1 << i) - 1),
441 				    tag);
442 				return (0);
443 			}
444 			scl->scl_writer = curthread;
445 		}
446 		(void) refcount_add(&scl->scl_count, tag);
447 		mutex_exit(&scl->scl_lock);
448 	}
449 	return (1);
450 }
451 
452 void
453 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
454 {
455 	int wlocks_held = 0;
456 
457 	ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
458 
459 	for (int i = 0; i < SCL_LOCKS; i++) {
460 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
461 		if (scl->scl_writer == curthread)
462 			wlocks_held |= (1 << i);
463 		if (!(locks & (1 << i)))
464 			continue;
465 		mutex_enter(&scl->scl_lock);
466 		if (rw == RW_READER) {
467 			while (scl->scl_writer || scl->scl_write_wanted) {
468 				cv_wait(&scl->scl_cv, &scl->scl_lock);
469 			}
470 		} else {
471 			ASSERT(scl->scl_writer != curthread);
472 			while (!refcount_is_zero(&scl->scl_count)) {
473 				scl->scl_write_wanted++;
474 				cv_wait(&scl->scl_cv, &scl->scl_lock);
475 				scl->scl_write_wanted--;
476 			}
477 			scl->scl_writer = curthread;
478 		}
479 		(void) refcount_add(&scl->scl_count, tag);
480 		mutex_exit(&scl->scl_lock);
481 	}
482 	ASSERT3U(wlocks_held, <=, locks);
483 }
484 
485 void
486 spa_config_exit(spa_t *spa, int locks, void *tag)
487 {
488 	for (int i = SCL_LOCKS - 1; i >= 0; i--) {
489 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
490 		if (!(locks & (1 << i)))
491 			continue;
492 		mutex_enter(&scl->scl_lock);
493 		ASSERT(!refcount_is_zero(&scl->scl_count));
494 		if (refcount_remove(&scl->scl_count, tag) == 0) {
495 			ASSERT(scl->scl_writer == NULL ||
496 			    scl->scl_writer == curthread);
497 			scl->scl_writer = NULL;	/* OK in either case */
498 			cv_broadcast(&scl->scl_cv);
499 		}
500 		mutex_exit(&scl->scl_lock);
501 	}
502 }
503 
504 int
505 spa_config_held(spa_t *spa, int locks, krw_t rw)
506 {
507 	int locks_held = 0;
508 
509 	for (int i = 0; i < SCL_LOCKS; i++) {
510 		spa_config_lock_t *scl = &spa->spa_config_lock[i];
511 		if (!(locks & (1 << i)))
512 			continue;
513 		if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
514 		    (rw == RW_WRITER && scl->scl_writer == curthread))
515 			locks_held |= 1 << i;
516 	}
517 
518 	return (locks_held);
519 }
520 
521 /*
522  * ==========================================================================
523  * SPA namespace functions
524  * ==========================================================================
525  */
526 
527 /*
528  * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
529  * Returns NULL if no matching spa_t is found.
530  */
531 spa_t *
532 spa_lookup(const char *name)
533 {
534 	static spa_t search;	/* spa_t is large; don't allocate on stack */
535 	spa_t *spa;
536 	avl_index_t where;
537 	char *cp;
538 
539 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
540 
541 	(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
542 
543 	/*
544 	 * If it's a full dataset name, figure out the pool name and
545 	 * just use that.
546 	 */
547 	cp = strpbrk(search.spa_name, "/@#");
548 	if (cp != NULL)
549 		*cp = '\0';
550 
551 	spa = avl_find(&spa_namespace_avl, &search, &where);
552 
553 	return (spa);
554 }
555 
556 /*
557  * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
558  * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
559  * looking for potentially hung I/Os.
560  */
561 void
562 spa_deadman(void *arg)
563 {
564 	spa_t *spa = arg;
565 
566 	/*
567 	 * Disable the deadman timer if the pool is suspended.
568 	 */
569 	if (spa_suspended(spa)) {
570 		VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
571 		return;
572 	}
573 
574 	zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
575 	    (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
576 	    ++spa->spa_deadman_calls);
577 	if (zfs_deadman_enabled)
578 		vdev_deadman(spa->spa_root_vdev);
579 }
580 
581 /*
582  * Create an uninitialized spa_t with the given name.  Requires
583  * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
584  * exist by calling spa_lookup() first.
585  */
586 spa_t *
587 spa_add(const char *name, nvlist_t *config, const char *altroot)
588 {
589 	spa_t *spa;
590 	spa_config_dirent_t *dp;
591 	cyc_handler_t hdlr;
592 	cyc_time_t when;
593 
594 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
595 
596 	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
597 
598 	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
599 	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
600 	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
601 	mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
602 	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
603 	mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
604 	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
605 	mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
606 	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
607 	mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
608 	mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
609 	mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL);
610 	mutex_init(&spa->spa_alloc_lock, NULL, MUTEX_DEFAULT, NULL);
611 
612 	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
613 	cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
614 	cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
615 	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
616 	cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
617 
618 	for (int t = 0; t < TXG_SIZE; t++)
619 		bplist_create(&spa->spa_free_bplist[t]);
620 
621 	(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
622 	spa->spa_state = POOL_STATE_UNINITIALIZED;
623 	spa->spa_freeze_txg = UINT64_MAX;
624 	spa->spa_final_txg = UINT64_MAX;
625 	spa->spa_load_max_txg = UINT64_MAX;
626 	spa->spa_proc = &p0;
627 	spa->spa_proc_state = SPA_PROC_NONE;
628 	spa->spa_trust_config = B_TRUE;
629 
630 	hdlr.cyh_func = spa_deadman;
631 	hdlr.cyh_arg = spa;
632 	hdlr.cyh_level = CY_LOW_LEVEL;
633 
634 	spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
635 
636 	/*
637 	 * This determines how often we need to check for hung I/Os after
638 	 * the cyclic has already fired. Since checking for hung I/Os is
639 	 * an expensive operation we don't want to check too frequently.
640 	 * Instead wait for 5 seconds before checking again.
641 	 */
642 	when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms);
643 	when.cyt_when = CY_INFINITY;
644 	mutex_enter(&cpu_lock);
645 	spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
646 	mutex_exit(&cpu_lock);
647 
648 	refcount_create(&spa->spa_refcount);
649 	spa_config_lock_init(spa);
650 
651 	avl_add(&spa_namespace_avl, spa);
652 
653 	/*
654 	 * Set the alternate root, if there is one.
655 	 */
656 	if (altroot) {
657 		spa->spa_root = spa_strdup(altroot);
658 		spa_active_count++;
659 	}
660 
661 	avl_create(&spa->spa_alloc_tree, zio_bookmark_compare,
662 	    sizeof (zio_t), offsetof(zio_t, io_alloc_node));
663 
664 	/*
665 	 * Every pool starts with the default cachefile
666 	 */
667 	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
668 	    offsetof(spa_config_dirent_t, scd_link));
669 
670 	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
671 	dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
672 	list_insert_head(&spa->spa_config_list, dp);
673 
674 	VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
675 	    KM_SLEEP) == 0);
676 
677 	if (config != NULL) {
678 		nvlist_t *features;
679 
680 		if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
681 		    &features) == 0) {
682 			VERIFY(nvlist_dup(features, &spa->spa_label_features,
683 			    0) == 0);
684 		}
685 
686 		VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
687 	}
688 
689 	if (spa->spa_label_features == NULL) {
690 		VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
691 		    KM_SLEEP) == 0);
692 	}
693 
694 	spa->spa_iokstat = kstat_create("zfs", 0, name,
695 	    "disk", KSTAT_TYPE_IO, 1, 0);
696 	if (spa->spa_iokstat) {
697 		spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock;
698 		kstat_install(spa->spa_iokstat);
699 	}
700 
701 	spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0);
702 
703 	spa->spa_min_ashift = INT_MAX;
704 	spa->spa_max_ashift = 0;
705 
706 	/*
707 	 * As a pool is being created, treat all features as disabled by
708 	 * setting SPA_FEATURE_DISABLED for all entries in the feature
709 	 * refcount cache.
710 	 */
711 	for (int i = 0; i < SPA_FEATURES; i++) {
712 		spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
713 	}
714 
715 	return (spa);
716 }
717 
718 /*
719  * Removes a spa_t from the namespace, freeing up any memory used.  Requires
720  * spa_namespace_lock.  This is called only after the spa_t has been closed and
721  * deactivated.
722  */
723 void
724 spa_remove(spa_t *spa)
725 {
726 	spa_config_dirent_t *dp;
727 
728 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
729 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
730 	ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0);
731 
732 	nvlist_free(spa->spa_config_splitting);
733 
734 	avl_remove(&spa_namespace_avl, spa);
735 	cv_broadcast(&spa_namespace_cv);
736 
737 	if (spa->spa_root) {
738 		spa_strfree(spa->spa_root);
739 		spa_active_count--;
740 	}
741 
742 	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
743 		list_remove(&spa->spa_config_list, dp);
744 		if (dp->scd_path != NULL)
745 			spa_strfree(dp->scd_path);
746 		kmem_free(dp, sizeof (spa_config_dirent_t));
747 	}
748 
749 	avl_destroy(&spa->spa_alloc_tree);
750 	list_destroy(&spa->spa_config_list);
751 
752 	nvlist_free(spa->spa_label_features);
753 	nvlist_free(spa->spa_load_info);
754 	spa_config_set(spa, NULL);
755 
756 	mutex_enter(&cpu_lock);
757 	if (spa->spa_deadman_cycid != CYCLIC_NONE)
758 		cyclic_remove(spa->spa_deadman_cycid);
759 	mutex_exit(&cpu_lock);
760 	spa->spa_deadman_cycid = CYCLIC_NONE;
761 
762 	refcount_destroy(&spa->spa_refcount);
763 
764 	spa_config_lock_destroy(spa);
765 
766 	kstat_delete(spa->spa_iokstat);
767 	spa->spa_iokstat = NULL;
768 
769 	for (int t = 0; t < TXG_SIZE; t++)
770 		bplist_destroy(&spa->spa_free_bplist[t]);
771 
772 	zio_checksum_templates_free(spa);
773 
774 	cv_destroy(&spa->spa_async_cv);
775 	cv_destroy(&spa->spa_evicting_os_cv);
776 	cv_destroy(&spa->spa_proc_cv);
777 	cv_destroy(&spa->spa_scrub_io_cv);
778 	cv_destroy(&spa->spa_suspend_cv);
779 
780 	mutex_destroy(&spa->spa_alloc_lock);
781 	mutex_destroy(&spa->spa_async_lock);
782 	mutex_destroy(&spa->spa_errlist_lock);
783 	mutex_destroy(&spa->spa_errlog_lock);
784 	mutex_destroy(&spa->spa_evicting_os_lock);
785 	mutex_destroy(&spa->spa_history_lock);
786 	mutex_destroy(&spa->spa_proc_lock);
787 	mutex_destroy(&spa->spa_props_lock);
788 	mutex_destroy(&spa->spa_cksum_tmpls_lock);
789 	mutex_destroy(&spa->spa_scrub_lock);
790 	mutex_destroy(&spa->spa_suspend_lock);
791 	mutex_destroy(&spa->spa_vdev_top_lock);
792 	mutex_destroy(&spa->spa_iokstat_lock);
793 
794 	kmem_free(spa, sizeof (spa_t));
795 }
796 
797 /*
798  * Given a pool, return the next pool in the namespace, or NULL if there is
799  * none.  If 'prev' is NULL, return the first pool.
800  */
801 spa_t *
802 spa_next(spa_t *prev)
803 {
804 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
805 
806 	if (prev)
807 		return (AVL_NEXT(&spa_namespace_avl, prev));
808 	else
809 		return (avl_first(&spa_namespace_avl));
810 }
811 
812 /*
813  * ==========================================================================
814  * SPA refcount functions
815  * ==========================================================================
816  */
817 
818 /*
819  * Add a reference to the given spa_t.  Must have at least one reference, or
820  * have the namespace lock held.
821  */
822 void
823 spa_open_ref(spa_t *spa, void *tag)
824 {
825 	ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
826 	    MUTEX_HELD(&spa_namespace_lock));
827 	(void) refcount_add(&spa->spa_refcount, tag);
828 }
829 
830 /*
831  * Remove a reference to the given spa_t.  Must have at least one reference, or
832  * have the namespace lock held.
833  */
834 void
835 spa_close(spa_t *spa, void *tag)
836 {
837 	ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
838 	    MUTEX_HELD(&spa_namespace_lock));
839 	(void) refcount_remove(&spa->spa_refcount, tag);
840 }
841 
842 /*
843  * Remove a reference to the given spa_t held by a dsl dir that is
844  * being asynchronously released.  Async releases occur from a taskq
845  * performing eviction of dsl datasets and dirs.  The namespace lock
846  * isn't held and the hold by the object being evicted may contribute to
847  * spa_minref (e.g. dataset or directory released during pool export),
848  * so the asserts in spa_close() do not apply.
849  */
850 void
851 spa_async_close(spa_t *spa, void *tag)
852 {
853 	(void) refcount_remove(&spa->spa_refcount, tag);
854 }
855 
856 /*
857  * Check to see if the spa refcount is zero.  Must be called with
858  * spa_namespace_lock held.  We really compare against spa_minref, which is the
859  * number of references acquired when opening a pool
860  */
861 boolean_t
862 spa_refcount_zero(spa_t *spa)
863 {
864 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
865 
866 	return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
867 }
868 
869 /*
870  * ==========================================================================
871  * SPA spare and l2cache tracking
872  * ==========================================================================
873  */
874 
875 /*
876  * Hot spares and cache devices are tracked using the same code below,
877  * for 'auxiliary' devices.
878  */
879 
880 typedef struct spa_aux {
881 	uint64_t	aux_guid;
882 	uint64_t	aux_pool;
883 	avl_node_t	aux_avl;
884 	int		aux_count;
885 } spa_aux_t;
886 
887 static int
888 spa_aux_compare(const void *a, const void *b)
889 {
890 	const spa_aux_t *sa = a;
891 	const spa_aux_t *sb = b;
892 
893 	if (sa->aux_guid < sb->aux_guid)
894 		return (-1);
895 	else if (sa->aux_guid > sb->aux_guid)
896 		return (1);
897 	else
898 		return (0);
899 }
900 
901 void
902 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
903 {
904 	avl_index_t where;
905 	spa_aux_t search;
906 	spa_aux_t *aux;
907 
908 	search.aux_guid = vd->vdev_guid;
909 	if ((aux = avl_find(avl, &search, &where)) != NULL) {
910 		aux->aux_count++;
911 	} else {
912 		aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
913 		aux->aux_guid = vd->vdev_guid;
914 		aux->aux_count = 1;
915 		avl_insert(avl, aux, where);
916 	}
917 }
918 
919 void
920 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
921 {
922 	spa_aux_t search;
923 	spa_aux_t *aux;
924 	avl_index_t where;
925 
926 	search.aux_guid = vd->vdev_guid;
927 	aux = avl_find(avl, &search, &where);
928 
929 	ASSERT(aux != NULL);
930 
931 	if (--aux->aux_count == 0) {
932 		avl_remove(avl, aux);
933 		kmem_free(aux, sizeof (spa_aux_t));
934 	} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
935 		aux->aux_pool = 0ULL;
936 	}
937 }
938 
939 boolean_t
940 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
941 {
942 	spa_aux_t search, *found;
943 
944 	search.aux_guid = guid;
945 	found = avl_find(avl, &search, NULL);
946 
947 	if (pool) {
948 		if (found)
949 			*pool = found->aux_pool;
950 		else
951 			*pool = 0ULL;
952 	}
953 
954 	if (refcnt) {
955 		if (found)
956 			*refcnt = found->aux_count;
957 		else
958 			*refcnt = 0;
959 	}
960 
961 	return (found != NULL);
962 }
963 
964 void
965 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
966 {
967 	spa_aux_t search, *found;
968 	avl_index_t where;
969 
970 	search.aux_guid = vd->vdev_guid;
971 	found = avl_find(avl, &search, &where);
972 	ASSERT(found != NULL);
973 	ASSERT(found->aux_pool == 0ULL);
974 
975 	found->aux_pool = spa_guid(vd->vdev_spa);
976 }
977 
978 /*
979  * Spares are tracked globally due to the following constraints:
980  *
981  * 	- A spare may be part of multiple pools.
982  * 	- A spare may be added to a pool even if it's actively in use within
983  *	  another pool.
984  * 	- A spare in use in any pool can only be the source of a replacement if
985  *	  the target is a spare in the same pool.
986  *
987  * We keep track of all spares on the system through the use of a reference
988  * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
989  * spare, then we bump the reference count in the AVL tree.  In addition, we set
990  * the 'vdev_isspare' member to indicate that the device is a spare (active or
991  * inactive).  When a spare is made active (used to replace a device in the
992  * pool), we also keep track of which pool its been made a part of.
993  *
994  * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
995  * called under the spa_namespace lock as part of vdev reconfiguration.  The
996  * separate spare lock exists for the status query path, which does not need to
997  * be completely consistent with respect to other vdev configuration changes.
998  */
999 
1000 static int
1001 spa_spare_compare(const void *a, const void *b)
1002 {
1003 	return (spa_aux_compare(a, b));
1004 }
1005 
1006 void
1007 spa_spare_add(vdev_t *vd)
1008 {
1009 	mutex_enter(&spa_spare_lock);
1010 	ASSERT(!vd->vdev_isspare);
1011 	spa_aux_add(vd, &spa_spare_avl);
1012 	vd->vdev_isspare = B_TRUE;
1013 	mutex_exit(&spa_spare_lock);
1014 }
1015 
1016 void
1017 spa_spare_remove(vdev_t *vd)
1018 {
1019 	mutex_enter(&spa_spare_lock);
1020 	ASSERT(vd->vdev_isspare);
1021 	spa_aux_remove(vd, &spa_spare_avl);
1022 	vd->vdev_isspare = B_FALSE;
1023 	mutex_exit(&spa_spare_lock);
1024 }
1025 
1026 boolean_t
1027 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
1028 {
1029 	boolean_t found;
1030 
1031 	mutex_enter(&spa_spare_lock);
1032 	found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
1033 	mutex_exit(&spa_spare_lock);
1034 
1035 	return (found);
1036 }
1037 
1038 void
1039 spa_spare_activate(vdev_t *vd)
1040 {
1041 	mutex_enter(&spa_spare_lock);
1042 	ASSERT(vd->vdev_isspare);
1043 	spa_aux_activate(vd, &spa_spare_avl);
1044 	mutex_exit(&spa_spare_lock);
1045 }
1046 
1047 /*
1048  * Level 2 ARC devices are tracked globally for the same reasons as spares.
1049  * Cache devices currently only support one pool per cache device, and so
1050  * for these devices the aux reference count is currently unused beyond 1.
1051  */
1052 
1053 static int
1054 spa_l2cache_compare(const void *a, const void *b)
1055 {
1056 	return (spa_aux_compare(a, b));
1057 }
1058 
1059 void
1060 spa_l2cache_add(vdev_t *vd)
1061 {
1062 	mutex_enter(&spa_l2cache_lock);
1063 	ASSERT(!vd->vdev_isl2cache);
1064 	spa_aux_add(vd, &spa_l2cache_avl);
1065 	vd->vdev_isl2cache = B_TRUE;
1066 	mutex_exit(&spa_l2cache_lock);
1067 }
1068 
1069 void
1070 spa_l2cache_remove(vdev_t *vd)
1071 {
1072 	mutex_enter(&spa_l2cache_lock);
1073 	ASSERT(vd->vdev_isl2cache);
1074 	spa_aux_remove(vd, &spa_l2cache_avl);
1075 	vd->vdev_isl2cache = B_FALSE;
1076 	mutex_exit(&spa_l2cache_lock);
1077 }
1078 
1079 boolean_t
1080 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
1081 {
1082 	boolean_t found;
1083 
1084 	mutex_enter(&spa_l2cache_lock);
1085 	found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
1086 	mutex_exit(&spa_l2cache_lock);
1087 
1088 	return (found);
1089 }
1090 
1091 void
1092 spa_l2cache_activate(vdev_t *vd)
1093 {
1094 	mutex_enter(&spa_l2cache_lock);
1095 	ASSERT(vd->vdev_isl2cache);
1096 	spa_aux_activate(vd, &spa_l2cache_avl);
1097 	mutex_exit(&spa_l2cache_lock);
1098 }
1099 
1100 /*
1101  * ==========================================================================
1102  * SPA vdev locking
1103  * ==========================================================================
1104  */
1105 
1106 /*
1107  * Lock the given spa_t for the purpose of adding or removing a vdev.
1108  * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1109  * It returns the next transaction group for the spa_t.
1110  */
1111 uint64_t
1112 spa_vdev_enter(spa_t *spa)
1113 {
1114 	mutex_enter(&spa->spa_vdev_top_lock);
1115 	mutex_enter(&spa_namespace_lock);
1116 	return (spa_vdev_config_enter(spa));
1117 }
1118 
1119 /*
1120  * Internal implementation for spa_vdev_enter().  Used when a vdev
1121  * operation requires multiple syncs (i.e. removing a device) while
1122  * keeping the spa_namespace_lock held.
1123  */
1124 uint64_t
1125 spa_vdev_config_enter(spa_t *spa)
1126 {
1127 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1128 
1129 	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1130 
1131 	return (spa_last_synced_txg(spa) + 1);
1132 }
1133 
1134 /*
1135  * Used in combination with spa_vdev_config_enter() to allow the syncing
1136  * of multiple transactions without releasing the spa_namespace_lock.
1137  */
1138 void
1139 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
1140 {
1141 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1142 
1143 	int config_changed = B_FALSE;
1144 
1145 	ASSERT(txg > spa_last_synced_txg(spa));
1146 
1147 	spa->spa_pending_vdev = NULL;
1148 
1149 	/*
1150 	 * Reassess the DTLs.
1151 	 */
1152 	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
1153 
1154 	if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1155 		config_changed = B_TRUE;
1156 		spa->spa_config_generation++;
1157 	}
1158 
1159 	/*
1160 	 * Verify the metaslab classes.
1161 	 */
1162 	ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1163 	ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
1164 
1165 	spa_config_exit(spa, SCL_ALL, spa);
1166 
1167 	/*
1168 	 * Panic the system if the specified tag requires it.  This
1169 	 * is useful for ensuring that configurations are updated
1170 	 * transactionally.
1171 	 */
1172 	if (zio_injection_enabled)
1173 		zio_handle_panic_injection(spa, tag, 0);
1174 
1175 	/*
1176 	 * Note: this txg_wait_synced() is important because it ensures
1177 	 * that there won't be more than one config change per txg.
1178 	 * This allows us to use the txg as the generation number.
1179 	 */
1180 	if (error == 0)
1181 		txg_wait_synced(spa->spa_dsl_pool, txg);
1182 
1183 	if (vd != NULL) {
1184 		ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1185 		spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1186 		vdev_free(vd);
1187 		spa_config_exit(spa, SCL_ALL, spa);
1188 	}
1189 
1190 	/*
1191 	 * If the config changed, update the config cache.
1192 	 */
1193 	if (config_changed)
1194 		spa_write_cachefile(spa, B_FALSE, B_TRUE);
1195 }
1196 
1197 /*
1198  * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
1199  * locking of spa_vdev_enter(), we also want make sure the transactions have
1200  * synced to disk, and then update the global configuration cache with the new
1201  * information.
1202  */
1203 int
1204 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1205 {
1206 	spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1207 	mutex_exit(&spa_namespace_lock);
1208 	mutex_exit(&spa->spa_vdev_top_lock);
1209 
1210 	return (error);
1211 }
1212 
1213 /*
1214  * Lock the given spa_t for the purpose of changing vdev state.
1215  */
1216 void
1217 spa_vdev_state_enter(spa_t *spa, int oplocks)
1218 {
1219 	int locks = SCL_STATE_ALL | oplocks;
1220 
1221 	/*
1222 	 * Root pools may need to read of the underlying devfs filesystem
1223 	 * when opening up a vdev.  Unfortunately if we're holding the
1224 	 * SCL_ZIO lock it will result in a deadlock when we try to issue
1225 	 * the read from the root filesystem.  Instead we "prefetch"
1226 	 * the associated vnodes that we need prior to opening the
1227 	 * underlying devices and cache them so that we can prevent
1228 	 * any I/O when we are doing the actual open.
1229 	 */
1230 	if (spa_is_root(spa)) {
1231 		int low = locks & ~(SCL_ZIO - 1);
1232 		int high = locks & ~low;
1233 
1234 		spa_config_enter(spa, high, spa, RW_WRITER);
1235 		vdev_hold(spa->spa_root_vdev);
1236 		spa_config_enter(spa, low, spa, RW_WRITER);
1237 	} else {
1238 		spa_config_enter(spa, locks, spa, RW_WRITER);
1239 	}
1240 	spa->spa_vdev_locks = locks;
1241 }
1242 
1243 int
1244 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1245 {
1246 	boolean_t config_changed = B_FALSE;
1247 
1248 	if (vd != NULL || error == 0)
1249 		vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
1250 		    0, 0, B_FALSE);
1251 
1252 	if (vd != NULL) {
1253 		vdev_state_dirty(vd->vdev_top);
1254 		config_changed = B_TRUE;
1255 		spa->spa_config_generation++;
1256 	}
1257 
1258 	if (spa_is_root(spa))
1259 		vdev_rele(spa->spa_root_vdev);
1260 
1261 	ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1262 	spa_config_exit(spa, spa->spa_vdev_locks, spa);
1263 
1264 	/*
1265 	 * If anything changed, wait for it to sync.  This ensures that,
1266 	 * from the system administrator's perspective, zpool(1M) commands
1267 	 * are synchronous.  This is important for things like zpool offline:
1268 	 * when the command completes, you expect no further I/O from ZFS.
1269 	 */
1270 	if (vd != NULL)
1271 		txg_wait_synced(spa->spa_dsl_pool, 0);
1272 
1273 	/*
1274 	 * If the config changed, update the config cache.
1275 	 */
1276 	if (config_changed) {
1277 		mutex_enter(&spa_namespace_lock);
1278 		spa_write_cachefile(spa, B_FALSE, B_TRUE);
1279 		mutex_exit(&spa_namespace_lock);
1280 	}
1281 
1282 	return (error);
1283 }
1284 
1285 /*
1286  * ==========================================================================
1287  * Miscellaneous functions
1288  * ==========================================================================
1289  */
1290 
1291 void
1292 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
1293 {
1294 	if (!nvlist_exists(spa->spa_label_features, feature)) {
1295 		fnvlist_add_boolean(spa->spa_label_features, feature);
1296 		/*
1297 		 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
1298 		 * dirty the vdev config because lock SCL_CONFIG is not held.
1299 		 * Thankfully, in this case we don't need to dirty the config
1300 		 * because it will be written out anyway when we finish
1301 		 * creating the pool.
1302 		 */
1303 		if (tx->tx_txg != TXG_INITIAL)
1304 			vdev_config_dirty(spa->spa_root_vdev);
1305 	}
1306 }
1307 
1308 void
1309 spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1310 {
1311 	if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
1312 		vdev_config_dirty(spa->spa_root_vdev);
1313 }
1314 
1315 /*
1316  * Rename a spa_t.
1317  */
1318 int
1319 spa_rename(const char *name, const char *newname)
1320 {
1321 	spa_t *spa;
1322 	int err;
1323 
1324 	/*
1325 	 * Lookup the spa_t and grab the config lock for writing.  We need to
1326 	 * actually open the pool so that we can sync out the necessary labels.
1327 	 * It's OK to call spa_open() with the namespace lock held because we
1328 	 * allow recursive calls for other reasons.
1329 	 */
1330 	mutex_enter(&spa_namespace_lock);
1331 	if ((err = spa_open(name, &spa, FTAG)) != 0) {
1332 		mutex_exit(&spa_namespace_lock);
1333 		return (err);
1334 	}
1335 
1336 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1337 
1338 	avl_remove(&spa_namespace_avl, spa);
1339 	(void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
1340 	avl_add(&spa_namespace_avl, spa);
1341 
1342 	/*
1343 	 * Sync all labels to disk with the new names by marking the root vdev
1344 	 * dirty and waiting for it to sync.  It will pick up the new pool name
1345 	 * during the sync.
1346 	 */
1347 	vdev_config_dirty(spa->spa_root_vdev);
1348 
1349 	spa_config_exit(spa, SCL_ALL, FTAG);
1350 
1351 	txg_wait_synced(spa->spa_dsl_pool, 0);
1352 
1353 	/*
1354 	 * Sync the updated config cache.
1355 	 */
1356 	spa_write_cachefile(spa, B_FALSE, B_TRUE);
1357 
1358 	spa_close(spa, FTAG);
1359 
1360 	mutex_exit(&spa_namespace_lock);
1361 
1362 	return (0);
1363 }
1364 
1365 /*
1366  * Return the spa_t associated with given pool_guid, if it exists.  If
1367  * device_guid is non-zero, determine whether the pool exists *and* contains
1368  * a device with the specified device_guid.
1369  */
1370 spa_t *
1371 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1372 {
1373 	spa_t *spa;
1374 	avl_tree_t *t = &spa_namespace_avl;
1375 
1376 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1377 
1378 	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1379 		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1380 			continue;
1381 		if (spa->spa_root_vdev == NULL)
1382 			continue;
1383 		if (spa_guid(spa) == pool_guid) {
1384 			if (device_guid == 0)
1385 				break;
1386 
1387 			if (vdev_lookup_by_guid(spa->spa_root_vdev,
1388 			    device_guid) != NULL)
1389 				break;
1390 
1391 			/*
1392 			 * Check any devices we may be in the process of adding.
1393 			 */
1394 			if (spa->spa_pending_vdev) {
1395 				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1396 				    device_guid) != NULL)
1397 					break;
1398 			}
1399 		}
1400 	}
1401 
1402 	return (spa);
1403 }
1404 
1405 /*
1406  * Determine whether a pool with the given pool_guid exists.
1407  */
1408 boolean_t
1409 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1410 {
1411 	return (spa_by_guid(pool_guid, device_guid) != NULL);
1412 }
1413 
1414 char *
1415 spa_strdup(const char *s)
1416 {
1417 	size_t len;
1418 	char *new;
1419 
1420 	len = strlen(s);
1421 	new = kmem_alloc(len + 1, KM_SLEEP);
1422 	bcopy(s, new, len);
1423 	new[len] = '\0';
1424 
1425 	return (new);
1426 }
1427 
1428 void
1429 spa_strfree(char *s)
1430 {
1431 	kmem_free(s, strlen(s) + 1);
1432 }
1433 
1434 uint64_t
1435 spa_get_random(uint64_t range)
1436 {
1437 	uint64_t r;
1438 
1439 	ASSERT(range != 0);
1440 
1441 	(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1442 
1443 	return (r % range);
1444 }
1445 
1446 uint64_t
1447 spa_generate_guid(spa_t *spa)
1448 {
1449 	uint64_t guid = spa_get_random(-1ULL);
1450 
1451 	if (spa != NULL) {
1452 		while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1453 			guid = spa_get_random(-1ULL);
1454 	} else {
1455 		while (guid == 0 || spa_guid_exists(guid, 0))
1456 			guid = spa_get_random(-1ULL);
1457 	}
1458 
1459 	return (guid);
1460 }
1461 
1462 void
1463 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
1464 {
1465 	char type[256];
1466 	char *checksum = NULL;
1467 	char *compress = NULL;
1468 
1469 	if (bp != NULL) {
1470 		if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1471 			dmu_object_byteswap_t bswap =
1472 			    DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1473 			(void) snprintf(type, sizeof (type), "bswap %s %s",
1474 			    DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1475 			    "metadata" : "data",
1476 			    dmu_ot_byteswap[bswap].ob_name);
1477 		} else {
1478 			(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1479 			    sizeof (type));
1480 		}
1481 		if (!BP_IS_EMBEDDED(bp)) {
1482 			checksum =
1483 			    zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1484 		}
1485 		compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1486 	}
1487 
1488 	SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
1489 	    compress);
1490 }
1491 
1492 void
1493 spa_freeze(spa_t *spa)
1494 {
1495 	uint64_t freeze_txg = 0;
1496 
1497 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1498 	if (spa->spa_freeze_txg == UINT64_MAX) {
1499 		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1500 		spa->spa_freeze_txg = freeze_txg;
1501 	}
1502 	spa_config_exit(spa, SCL_ALL, FTAG);
1503 	if (freeze_txg != 0)
1504 		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1505 }
1506 
1507 void
1508 zfs_panic_recover(const char *fmt, ...)
1509 {
1510 	va_list adx;
1511 
1512 	va_start(adx, fmt);
1513 	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1514 	va_end(adx);
1515 }
1516 
1517 /*
1518  * This is a stripped-down version of strtoull, suitable only for converting
1519  * lowercase hexadecimal numbers that don't overflow.
1520  */
1521 uint64_t
1522 zfs_strtonum(const char *str, char **nptr)
1523 {
1524 	uint64_t val = 0;
1525 	char c;
1526 	int digit;
1527 
1528 	while ((c = *str) != '\0') {
1529 		if (c >= '0' && c <= '9')
1530 			digit = c - '0';
1531 		else if (c >= 'a' && c <= 'f')
1532 			digit = 10 + c - 'a';
1533 		else
1534 			break;
1535 
1536 		val *= 16;
1537 		val += digit;
1538 
1539 		str++;
1540 	}
1541 
1542 	if (nptr)
1543 		*nptr = (char *)str;
1544 
1545 	return (val);
1546 }
1547 
1548 /*
1549  * ==========================================================================
1550  * Accessor functions
1551  * ==========================================================================
1552  */
1553 
1554 boolean_t
1555 spa_shutting_down(spa_t *spa)
1556 {
1557 	return (spa->spa_async_suspended);
1558 }
1559 
1560 dsl_pool_t *
1561 spa_get_dsl(spa_t *spa)
1562 {
1563 	return (spa->spa_dsl_pool);
1564 }
1565 
1566 boolean_t
1567 spa_is_initializing(spa_t *spa)
1568 {
1569 	return (spa->spa_is_initializing);
1570 }
1571 
1572 boolean_t
1573 spa_indirect_vdevs_loaded(spa_t *spa)
1574 {
1575 	return (spa->spa_indirect_vdevs_loaded);
1576 }
1577 
1578 blkptr_t *
1579 spa_get_rootblkptr(spa_t *spa)
1580 {
1581 	return (&spa->spa_ubsync.ub_rootbp);
1582 }
1583 
1584 void
1585 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1586 {
1587 	spa->spa_uberblock.ub_rootbp = *bp;
1588 }
1589 
1590 void
1591 spa_altroot(spa_t *spa, char *buf, size_t buflen)
1592 {
1593 	if (spa->spa_root == NULL)
1594 		buf[0] = '\0';
1595 	else
1596 		(void) strncpy(buf, spa->spa_root, buflen);
1597 }
1598 
1599 int
1600 spa_sync_pass(spa_t *spa)
1601 {
1602 	return (spa->spa_sync_pass);
1603 }
1604 
1605 char *
1606 spa_name(spa_t *spa)
1607 {
1608 	return (spa->spa_name);
1609 }
1610 
1611 uint64_t
1612 spa_guid(spa_t *spa)
1613 {
1614 	dsl_pool_t *dp = spa_get_dsl(spa);
1615 	uint64_t guid;
1616 
1617 	/*
1618 	 * If we fail to parse the config during spa_load(), we can go through
1619 	 * the error path (which posts an ereport) and end up here with no root
1620 	 * vdev.  We stash the original pool guid in 'spa_config_guid' to handle
1621 	 * this case.
1622 	 */
1623 	if (spa->spa_root_vdev == NULL)
1624 		return (spa->spa_config_guid);
1625 
1626 	guid = spa->spa_last_synced_guid != 0 ?
1627 	    spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1628 
1629 	/*
1630 	 * Return the most recently synced out guid unless we're
1631 	 * in syncing context.
1632 	 */
1633 	if (dp && dsl_pool_sync_context(dp))
1634 		return (spa->spa_root_vdev->vdev_guid);
1635 	else
1636 		return (guid);
1637 }
1638 
1639 uint64_t
1640 spa_load_guid(spa_t *spa)
1641 {
1642 	/*
1643 	 * This is a GUID that exists solely as a reference for the
1644 	 * purposes of the arc.  It is generated at load time, and
1645 	 * is never written to persistent storage.
1646 	 */
1647 	return (spa->spa_load_guid);
1648 }
1649 
1650 uint64_t
1651 spa_last_synced_txg(spa_t *spa)
1652 {
1653 	return (spa->spa_ubsync.ub_txg);
1654 }
1655 
1656 uint64_t
1657 spa_first_txg(spa_t *spa)
1658 {
1659 	return (spa->spa_first_txg);
1660 }
1661 
1662 uint64_t
1663 spa_syncing_txg(spa_t *spa)
1664 {
1665 	return (spa->spa_syncing_txg);
1666 }
1667 
1668 /*
1669  * Return the last txg where data can be dirtied. The final txgs
1670  * will be used to just clear out any deferred frees that remain.
1671  */
1672 uint64_t
1673 spa_final_dirty_txg(spa_t *spa)
1674 {
1675 	return (spa->spa_final_txg - TXG_DEFER_SIZE);
1676 }
1677 
1678 pool_state_t
1679 spa_state(spa_t *spa)
1680 {
1681 	return (spa->spa_state);
1682 }
1683 
1684 spa_load_state_t
1685 spa_load_state(spa_t *spa)
1686 {
1687 	return (spa->spa_load_state);
1688 }
1689 
1690 uint64_t
1691 spa_freeze_txg(spa_t *spa)
1692 {
1693 	return (spa->spa_freeze_txg);
1694 }
1695 
1696 /* ARGSUSED */
1697 uint64_t
1698 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
1699 {
1700 	return (lsize * spa_asize_inflation);
1701 }
1702 
1703 /*
1704  * Return the amount of slop space in bytes.  It is 1/32 of the pool (3.2%),
1705  * or at least 128MB, unless that would cause it to be more than half the
1706  * pool size.
1707  *
1708  * See the comment above spa_slop_shift for details.
1709  */
1710 uint64_t
1711 spa_get_slop_space(spa_t *spa)
1712 {
1713 	uint64_t space = spa_get_dspace(spa);
1714 	return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop)));
1715 }
1716 
1717 uint64_t
1718 spa_get_dspace(spa_t *spa)
1719 {
1720 	return (spa->spa_dspace);
1721 }
1722 
1723 uint64_t
1724 spa_get_checkpoint_space(spa_t *spa)
1725 {
1726 	return (spa->spa_checkpoint_info.sci_dspace);
1727 }
1728 
1729 void
1730 spa_update_dspace(spa_t *spa)
1731 {
1732 	spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1733 	    ddt_get_dedup_dspace(spa);
1734 	if (spa->spa_vdev_removal != NULL) {
1735 		/*
1736 		 * We can't allocate from the removing device, so
1737 		 * subtract its size.  This prevents the DMU/DSL from
1738 		 * filling up the (now smaller) pool while we are in the
1739 		 * middle of removing the device.
1740 		 *
1741 		 * Note that the DMU/DSL doesn't actually know or care
1742 		 * how much space is allocated (it does its own tracking
1743 		 * of how much space has been logically used).  So it
1744 		 * doesn't matter that the data we are moving may be
1745 		 * allocated twice (on the old device and the new
1746 		 * device).
1747 		 */
1748 		vdev_t *vd = spa->spa_vdev_removal->svr_vdev;
1749 		spa->spa_dspace -= spa_deflate(spa) ?
1750 		    vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
1751 	}
1752 }
1753 
1754 /*
1755  * Return the failure mode that has been set to this pool. The default
1756  * behavior will be to block all I/Os when a complete failure occurs.
1757  */
1758 uint8_t
1759 spa_get_failmode(spa_t *spa)
1760 {
1761 	return (spa->spa_failmode);
1762 }
1763 
1764 boolean_t
1765 spa_suspended(spa_t *spa)
1766 {
1767 	return (spa->spa_suspended);
1768 }
1769 
1770 uint64_t
1771 spa_version(spa_t *spa)
1772 {
1773 	return (spa->spa_ubsync.ub_version);
1774 }
1775 
1776 boolean_t
1777 spa_deflate(spa_t *spa)
1778 {
1779 	return (spa->spa_deflate);
1780 }
1781 
1782 metaslab_class_t *
1783 spa_normal_class(spa_t *spa)
1784 {
1785 	return (spa->spa_normal_class);
1786 }
1787 
1788 metaslab_class_t *
1789 spa_log_class(spa_t *spa)
1790 {
1791 	return (spa->spa_log_class);
1792 }
1793 
1794 void
1795 spa_evicting_os_register(spa_t *spa, objset_t *os)
1796 {
1797 	mutex_enter(&spa->spa_evicting_os_lock);
1798 	list_insert_head(&spa->spa_evicting_os_list, os);
1799 	mutex_exit(&spa->spa_evicting_os_lock);
1800 }
1801 
1802 void
1803 spa_evicting_os_deregister(spa_t *spa, objset_t *os)
1804 {
1805 	mutex_enter(&spa->spa_evicting_os_lock);
1806 	list_remove(&spa->spa_evicting_os_list, os);
1807 	cv_broadcast(&spa->spa_evicting_os_cv);
1808 	mutex_exit(&spa->spa_evicting_os_lock);
1809 }
1810 
1811 void
1812 spa_evicting_os_wait(spa_t *spa)
1813 {
1814 	mutex_enter(&spa->spa_evicting_os_lock);
1815 	while (!list_is_empty(&spa->spa_evicting_os_list))
1816 		cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
1817 	mutex_exit(&spa->spa_evicting_os_lock);
1818 
1819 	dmu_buf_user_evict_wait();
1820 }
1821 
1822 int
1823 spa_max_replication(spa_t *spa)
1824 {
1825 	/*
1826 	 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1827 	 * handle BPs with more than one DVA allocated.  Set our max
1828 	 * replication level accordingly.
1829 	 */
1830 	if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1831 		return (1);
1832 	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1833 }
1834 
1835 int
1836 spa_prev_software_version(spa_t *spa)
1837 {
1838 	return (spa->spa_prev_software_version);
1839 }
1840 
1841 uint64_t
1842 spa_deadman_synctime(spa_t *spa)
1843 {
1844 	return (spa->spa_deadman_synctime);
1845 }
1846 
1847 uint64_t
1848 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
1849 {
1850 	uint64_t asize = DVA_GET_ASIZE(dva);
1851 	uint64_t dsize = asize;
1852 
1853 	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1854 
1855 	if (asize != 0 && spa->spa_deflate) {
1856 		vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
1857 		dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1858 	}
1859 
1860 	return (dsize);
1861 }
1862 
1863 uint64_t
1864 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
1865 {
1866 	uint64_t dsize = 0;
1867 
1868 	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
1869 		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1870 
1871 	return (dsize);
1872 }
1873 
1874 uint64_t
1875 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
1876 {
1877 	uint64_t dsize = 0;
1878 
1879 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1880 
1881 	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
1882 		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1883 
1884 	spa_config_exit(spa, SCL_VDEV, FTAG);
1885 
1886 	return (dsize);
1887 }
1888 
1889 /*
1890  * ==========================================================================
1891  * Initialization and Termination
1892  * ==========================================================================
1893  */
1894 
1895 static int
1896 spa_name_compare(const void *a1, const void *a2)
1897 {
1898 	const spa_t *s1 = a1;
1899 	const spa_t *s2 = a2;
1900 	int s;
1901 
1902 	s = strcmp(s1->spa_name, s2->spa_name);
1903 	if (s > 0)
1904 		return (1);
1905 	if (s < 0)
1906 		return (-1);
1907 	return (0);
1908 }
1909 
1910 int
1911 spa_busy(void)
1912 {
1913 	return (spa_active_count);
1914 }
1915 
1916 void
1917 spa_boot_init()
1918 {
1919 	spa_config_load();
1920 }
1921 
1922 void
1923 spa_init(int mode)
1924 {
1925 	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1926 	mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1927 	mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1928 	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1929 
1930 	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1931 	    offsetof(spa_t, spa_avl));
1932 
1933 	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1934 	    offsetof(spa_aux_t, aux_avl));
1935 
1936 	avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1937 	    offsetof(spa_aux_t, aux_avl));
1938 
1939 	spa_mode_global = mode;
1940 
1941 #ifdef _KERNEL
1942 	spa_arch_init();
1943 #else
1944 	if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
1945 		arc_procfd = open("/proc/self/ctl", O_WRONLY);
1946 		if (arc_procfd == -1) {
1947 			perror("could not enable watchpoints: "
1948 			    "opening /proc/self/ctl failed: ");
1949 		} else {
1950 			arc_watch = B_TRUE;
1951 		}
1952 	}
1953 #endif
1954 
1955 	refcount_init();
1956 	unique_init();
1957 	range_tree_init();
1958 	metaslab_alloc_trace_init();
1959 	zio_init();
1960 	dmu_init();
1961 	zil_init();
1962 	vdev_cache_stat_init();
1963 	zfs_prop_init();
1964 	zpool_prop_init();
1965 	zpool_feature_init();
1966 	spa_config_load();
1967 	l2arc_start();
1968 }
1969 
1970 void
1971 spa_fini(void)
1972 {
1973 	l2arc_stop();
1974 
1975 	spa_evict_all();
1976 
1977 	vdev_cache_stat_fini();
1978 	zil_fini();
1979 	dmu_fini();
1980 	zio_fini();
1981 	metaslab_alloc_trace_fini();
1982 	range_tree_fini();
1983 	unique_fini();
1984 	refcount_fini();
1985 
1986 	avl_destroy(&spa_namespace_avl);
1987 	avl_destroy(&spa_spare_avl);
1988 	avl_destroy(&spa_l2cache_avl);
1989 
1990 	cv_destroy(&spa_namespace_cv);
1991 	mutex_destroy(&spa_namespace_lock);
1992 	mutex_destroy(&spa_spare_lock);
1993 	mutex_destroy(&spa_l2cache_lock);
1994 }
1995 
1996 /*
1997  * Return whether this pool has slogs. No locking needed.
1998  * It's not a problem if the wrong answer is returned as it's only for
1999  * performance and not correctness
2000  */
2001 boolean_t
2002 spa_has_slogs(spa_t *spa)
2003 {
2004 	return (spa->spa_log_class->mc_rotor != NULL);
2005 }
2006 
2007 spa_log_state_t
2008 spa_get_log_state(spa_t *spa)
2009 {
2010 	return (spa->spa_log_state);
2011 }
2012 
2013 void
2014 spa_set_log_state(spa_t *spa, spa_log_state_t state)
2015 {
2016 	spa->spa_log_state = state;
2017 }
2018 
2019 boolean_t
2020 spa_is_root(spa_t *spa)
2021 {
2022 	return (spa->spa_is_root);
2023 }
2024 
2025 boolean_t
2026 spa_writeable(spa_t *spa)
2027 {
2028 	return (!!(spa->spa_mode & FWRITE) && spa->spa_trust_config);
2029 }
2030 
2031 /*
2032  * Returns true if there is a pending sync task in any of the current
2033  * syncing txg, the current quiescing txg, or the current open txg.
2034  */
2035 boolean_t
2036 spa_has_pending_synctask(spa_t *spa)
2037 {
2038 	return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
2039 	    !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
2040 }
2041 
2042 int
2043 spa_mode(spa_t *spa)
2044 {
2045 	return (spa->spa_mode);
2046 }
2047 
2048 uint64_t
2049 spa_bootfs(spa_t *spa)
2050 {
2051 	return (spa->spa_bootfs);
2052 }
2053 
2054 uint64_t
2055 spa_delegation(spa_t *spa)
2056 {
2057 	return (spa->spa_delegation);
2058 }
2059 
2060 objset_t *
2061 spa_meta_objset(spa_t *spa)
2062 {
2063 	return (spa->spa_meta_objset);
2064 }
2065 
2066 enum zio_checksum
2067 spa_dedup_checksum(spa_t *spa)
2068 {
2069 	return (spa->spa_dedup_checksum);
2070 }
2071 
2072 /*
2073  * Reset pool scan stat per scan pass (or reboot).
2074  */
2075 void
2076 spa_scan_stat_init(spa_t *spa)
2077 {
2078 	/* data not stored on disk */
2079 	spa->spa_scan_pass_start = gethrestime_sec();
2080 	if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
2081 		spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
2082 	else
2083 		spa->spa_scan_pass_scrub_pause = 0;
2084 	spa->spa_scan_pass_scrub_spent_paused = 0;
2085 	spa->spa_scan_pass_exam = 0;
2086 	vdev_scan_stat_init(spa->spa_root_vdev);
2087 }
2088 
2089 /*
2090  * Get scan stats for zpool status reports
2091  */
2092 int
2093 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
2094 {
2095 	dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
2096 
2097 	if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
2098 		return (SET_ERROR(ENOENT));
2099 	bzero(ps, sizeof (pool_scan_stat_t));
2100 
2101 	/* data stored on disk */
2102 	ps->pss_func = scn->scn_phys.scn_func;
2103 	ps->pss_start_time = scn->scn_phys.scn_start_time;
2104 	ps->pss_end_time = scn->scn_phys.scn_end_time;
2105 	ps->pss_to_examine = scn->scn_phys.scn_to_examine;
2106 	ps->pss_examined = scn->scn_phys.scn_examined;
2107 	ps->pss_to_process = scn->scn_phys.scn_to_process;
2108 	ps->pss_processed = scn->scn_phys.scn_processed;
2109 	ps->pss_errors = scn->scn_phys.scn_errors;
2110 	ps->pss_state = scn->scn_phys.scn_state;
2111 
2112 	/* data not stored on disk */
2113 	ps->pss_pass_start = spa->spa_scan_pass_start;
2114 	ps->pss_pass_exam = spa->spa_scan_pass_exam;
2115 	ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
2116 	ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
2117 
2118 	return (0);
2119 }
2120 
2121 boolean_t
2122 spa_debug_enabled(spa_t *spa)
2123 {
2124 	return (spa->spa_debug);
2125 }
2126 
2127 int
2128 spa_maxblocksize(spa_t *spa)
2129 {
2130 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
2131 		return (SPA_MAXBLOCKSIZE);
2132 	else
2133 		return (SPA_OLD_MAXBLOCKSIZE);
2134 }
2135 
2136 /*
2137  * Returns the txg that the last device removal completed. No indirect mappings
2138  * have been added since this txg.
2139  */
2140 uint64_t
2141 spa_get_last_removal_txg(spa_t *spa)
2142 {
2143 	uint64_t vdevid;
2144 	uint64_t ret = -1ULL;
2145 
2146 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2147 	/*
2148 	 * sr_prev_indirect_vdev is only modified while holding all the
2149 	 * config locks, so it is sufficient to hold SCL_VDEV as reader when
2150 	 * examining it.
2151 	 */
2152 	vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
2153 
2154 	while (vdevid != -1ULL) {
2155 		vdev_t *vd = vdev_lookup_top(spa, vdevid);
2156 		vdev_indirect_births_t *vib = vd->vdev_indirect_births;
2157 
2158 		ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
2159 
2160 		/*
2161 		 * If the removal did not remap any data, we don't care.
2162 		 */
2163 		if (vdev_indirect_births_count(vib) != 0) {
2164 			ret = vdev_indirect_births_last_entry_txg(vib);
2165 			break;
2166 		}
2167 
2168 		vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
2169 	}
2170 	spa_config_exit(spa, SCL_VDEV, FTAG);
2171 
2172 	IMPLY(ret != -1ULL,
2173 	    spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
2174 
2175 	return (ret);
2176 }
2177 
2178 boolean_t
2179 spa_trust_config(spa_t *spa)
2180 {
2181 	return (spa->spa_trust_config);
2182 }
2183 
2184 uint64_t
2185 spa_missing_tvds_allowed(spa_t *spa)
2186 {
2187 	return (spa->spa_missing_tvds_allowed);
2188 }
2189 
2190 void
2191 spa_set_missing_tvds(spa_t *spa, uint64_t missing)
2192 {
2193 	spa->spa_missing_tvds = missing;
2194 }
2195 
2196 boolean_t
2197 spa_top_vdevs_spacemap_addressable(spa_t *spa)
2198 {
2199 	vdev_t *rvd = spa->spa_root_vdev;
2200 	for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2201 		if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
2202 			return (B_FALSE);
2203 	}
2204 	return (B_TRUE);
2205 }
2206 
2207 boolean_t
2208 spa_has_checkpoint(spa_t *spa)
2209 {
2210 	return (spa->spa_checkpoint_txg != 0);
2211 }
2212 
2213 boolean_t
2214 spa_importing_readonly_checkpoint(spa_t *spa)
2215 {
2216 	return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
2217 	    spa->spa_mode == FREAD);
2218 }
2219 
2220 uint64_t
2221 spa_min_claim_txg(spa_t *spa)
2222 {
2223 	uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
2224 
2225 	if (checkpoint_txg != 0)
2226 		return (checkpoint_txg + 1);
2227 
2228 	return (spa->spa_first_txg);
2229 }
2230 
2231 /*
2232  * If there is a checkpoint, async destroys may consume more space from
2233  * the pool instead of freeing it. In an attempt to save the pool from
2234  * getting suspended when it is about to run out of space, we stop
2235  * processing async destroys.
2236  */
2237 boolean_t
2238 spa_suspend_async_destroy(spa_t *spa)
2239 {
2240 	dsl_pool_t *dp = spa_get_dsl(spa);
2241 
2242 	uint64_t unreserved = dsl_pool_unreserved_space(dp,
2243 	    ZFS_SPACE_CHECK_EXTRA_RESERVED);
2244 	uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
2245 	uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
2246 
2247 	if (spa_has_checkpoint(spa) && avail == 0)
2248 		return (B_TRUE);
2249 
2250 	return (B_FALSE);
2251 }
2252