spa_misc.c revision 468c413a79615e77179e8d98f22a7e513a8135bd
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#include <sys/zfs_context.h>
27#include <sys/spa_impl.h>
28#include <sys/zio.h>
29#include <sys/zio_checksum.h>
30#include <sys/zio_compress.h>
31#include <sys/dmu.h>
32#include <sys/dmu_tx.h>
33#include <sys/zap.h>
34#include <sys/zil.h>
35#include <sys/vdev_impl.h>
36#include <sys/metaslab.h>
37#include <sys/uberblock_impl.h>
38#include <sys/txg.h>
39#include <sys/avl.h>
40#include <sys/unique.h>
41#include <sys/dsl_pool.h>
42#include <sys/dsl_dir.h>
43#include <sys/dsl_prop.h>
44#include <sys/fs/zfs.h>
45#include <sys/metaslab_impl.h>
46#include <sys/sunddi.h>
47#include <sys/arc.h>
48#include "zfs_prop.h"
49
50/*
51 * SPA locking
52 *
53 * There are four basic locks for managing spa_t structures:
54 *
55 * spa_namespace_lock (global mutex)
56 *
57 *	This lock must be acquired to do any of the following:
58 *
59 *		- Lookup a spa_t by name
60 *		- Add or remove a spa_t from the namespace
61 *		- Increase spa_refcount from non-zero
62 *		- Check if spa_refcount is zero
63 *		- Rename a spa_t
64 *		- add/remove/attach/detach devices
65 *		- Held for the duration of create/destroy/import/export
66 *
67 *	It does not need to handle recursion.  A create or destroy may
68 *	reference objects (files or zvols) in other pools, but by
69 *	definition they must have an existing reference, and will never need
70 *	to lookup a spa_t by name.
71 *
72 * spa_refcount (per-spa refcount_t protected by mutex)
73 *
74 *	This reference count keep track of any active users of the spa_t.  The
75 *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
76 *	the refcount is never really 'zero' - opening a pool implicitly keeps
77 *	some references in the DMU.  Internally we check against spa_minref, but
78 *	present the image of a zero/non-zero value to consumers.
79 *
80 * spa_config_lock[] (per-spa array of rwlocks)
81 *
82 *	This protects the spa_t from config changes, and must be held in
83 *	the following circumstances:
84 *
85 *		- RW_READER to perform I/O to the spa
86 *		- RW_WRITER to change the vdev config
87 *
88 * The locking order is fairly straightforward:
89 *
90 *		spa_namespace_lock	->	spa_refcount
91 *
92 *	The namespace lock must be acquired to increase the refcount from 0
93 *	or to check if it is zero.
94 *
95 *		spa_refcount		->	spa_config_lock[]
96 *
97 *	There must be at least one valid reference on the spa_t to acquire
98 *	the config lock.
99 *
100 *		spa_namespace_lock	->	spa_config_lock[]
101 *
102 *	The namespace lock must always be taken before the config lock.
103 *
104 *
105 * The spa_namespace_lock can be acquired directly and is globally visible.
106 *
107 * The namespace is manipulated using the following functions, all of which
108 * require the spa_namespace_lock to be held.
109 *
110 *	spa_lookup()		Lookup a spa_t by name.
111 *
112 *	spa_add()		Create a new spa_t in the namespace.
113 *
114 *	spa_remove()		Remove a spa_t from the namespace.  This also
115 *				frees up any memory associated with the spa_t.
116 *
117 *	spa_next()		Returns the next spa_t in the system, or the
118 *				first if NULL is passed.
119 *
120 *	spa_evict_all()		Shutdown and remove all spa_t structures in
121 *				the system.
122 *
123 *	spa_guid_exists()	Determine whether a pool/device guid exists.
124 *
125 * The spa_refcount is manipulated using the following functions:
126 *
127 *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
128 *				called with spa_namespace_lock held if the
129 *				refcount is currently zero.
130 *
131 *	spa_close()		Remove a reference from the spa_t.  This will
132 *				not free the spa_t or remove it from the
133 *				namespace.  No locking is required.
134 *
135 *	spa_refcount_zero()	Returns true if the refcount is currently
136 *				zero.  Must be called with spa_namespace_lock
137 *				held.
138 *
139 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
140 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
141 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
142 *
143 * To read the configuration, it suffices to hold one of these locks as reader.
144 * To modify the configuration, you must hold all locks as writer.  To modify
145 * vdev state without altering the vdev tree's topology (e.g. online/offline),
146 * you must hold SCL_STATE and SCL_ZIO as writer.
147 *
148 * We use these distinct config locks to avoid recursive lock entry.
149 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
150 * block allocations (SCL_ALLOC), which may require reading space maps
151 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
152 *
153 * The spa config locks cannot be normal rwlocks because we need the
154 * ability to hand off ownership.  For example, SCL_ZIO is acquired
155 * by the issuing thread and later released by an interrupt thread.
156 * They do, however, obey the usual write-wanted semantics to prevent
157 * writer (i.e. system administrator) starvation.
158 *
159 * The lock acquisition rules are as follows:
160 *
161 * SCL_CONFIG
162 *	Protects changes to the vdev tree topology, such as vdev
163 *	add/remove/attach/detach.  Protects the dirty config list
164 *	(spa_config_dirty_list) and the set of spares and l2arc devices.
165 *
166 * SCL_STATE
167 *	Protects changes to pool state and vdev state, such as vdev
168 *	online/offline/fault/degrade/clear.  Protects the dirty state list
169 *	(spa_state_dirty_list) and global pool state (spa_state).
170 *
171 * SCL_ALLOC
172 *	Protects changes to metaslab groups and classes.
173 *	Held as reader by metaslab_alloc() and metaslab_claim().
174 *
175 * SCL_ZIO
176 *	Held by bp-level zios (those which have no io_vd upon entry)
177 *	to prevent changes to the vdev tree.  The bp-level zio implicitly
178 *	protects all of its vdev child zios, which do not hold SCL_ZIO.
179 *
180 * SCL_FREE
181 *	Protects changes to metaslab groups and classes.
182 *	Held as reader by metaslab_free().  SCL_FREE is distinct from
183 *	SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
184 *	blocks in zio_done() while another i/o that holds either
185 *	SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
186 *
187 * SCL_VDEV
188 *	Held as reader to prevent changes to the vdev tree during trivial
189 *	inquiries such as bp_get_dasize().  SCL_VDEV is distinct from the
190 *	other locks, and lower than all of them, to ensure that it's safe
191 *	to acquire regardless of caller context.
192 *
193 * In addition, the following rules apply:
194 *
195 * (a)	spa_props_lock protects pool properties, spa_config and spa_config_list.
196 *	The lock ordering is SCL_CONFIG > spa_props_lock.
197 *
198 * (b)	I/O operations on leaf vdevs.  For any zio operation that takes
199 *	an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
200 *	or zio_write_phys() -- the caller must ensure that the config cannot
201 *	cannot change in the interim, and that the vdev cannot be reopened.
202 *	SCL_STATE as reader suffices for both.
203 *
204 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
205 *
206 *	spa_vdev_enter()	Acquire the namespace lock and the config lock
207 *				for writing.
208 *
209 *	spa_vdev_exit()		Release the config lock, wait for all I/O
210 *				to complete, sync the updated configs to the
211 *				cache, and release the namespace lock.
212 *
213 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
214 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
215 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
216 *
217 * spa_rename() is also implemented within this file since is requires
218 * manipulation of the namespace.
219 */
220
221static avl_tree_t spa_namespace_avl;
222kmutex_t spa_namespace_lock;
223static kcondvar_t spa_namespace_cv;
224static int spa_active_count;
225int spa_max_replication_override = SPA_DVAS_PER_BP;
226
227static kmutex_t spa_spare_lock;
228static avl_tree_t spa_spare_avl;
229static kmutex_t spa_l2cache_lock;
230static avl_tree_t spa_l2cache_avl;
231
232kmem_cache_t *spa_buffer_pool;
233int spa_mode_global;
234
235#ifdef ZFS_DEBUG
236/* Everything except dprintf is on by default in debug builds */
237int zfs_flags = ~ZFS_DEBUG_DPRINTF;
238#else
239int zfs_flags = 0;
240#endif
241
242/*
243 * zfs_recover can be set to nonzero to attempt to recover from
244 * otherwise-fatal errors, typically caused by on-disk corruption.  When
245 * set, calls to zfs_panic_recover() will turn into warning messages.
246 */
247int zfs_recover = 0;
248
249
250/*
251 * ==========================================================================
252 * SPA config locking
253 * ==========================================================================
254 */
255static void
256spa_config_lock_init(spa_t *spa)
257{
258	for (int i = 0; i < SCL_LOCKS; i++) {
259		spa_config_lock_t *scl = &spa->spa_config_lock[i];
260		mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
261		cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
262		refcount_create(&scl->scl_count);
263		scl->scl_writer = NULL;
264		scl->scl_write_wanted = 0;
265	}
266}
267
268static void
269spa_config_lock_destroy(spa_t *spa)
270{
271	for (int i = 0; i < SCL_LOCKS; i++) {
272		spa_config_lock_t *scl = &spa->spa_config_lock[i];
273		mutex_destroy(&scl->scl_lock);
274		cv_destroy(&scl->scl_cv);
275		refcount_destroy(&scl->scl_count);
276		ASSERT(scl->scl_writer == NULL);
277		ASSERT(scl->scl_write_wanted == 0);
278	}
279}
280
281int
282spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
283{
284	for (int i = 0; i < SCL_LOCKS; i++) {
285		spa_config_lock_t *scl = &spa->spa_config_lock[i];
286		if (!(locks & (1 << i)))
287			continue;
288		mutex_enter(&scl->scl_lock);
289		if (rw == RW_READER) {
290			if (scl->scl_writer || scl->scl_write_wanted) {
291				mutex_exit(&scl->scl_lock);
292				spa_config_exit(spa, locks ^ (1 << i), tag);
293				return (0);
294			}
295		} else {
296			ASSERT(scl->scl_writer != curthread);
297			if (!refcount_is_zero(&scl->scl_count)) {
298				mutex_exit(&scl->scl_lock);
299				spa_config_exit(spa, locks ^ (1 << i), tag);
300				return (0);
301			}
302			scl->scl_writer = curthread;
303		}
304		(void) refcount_add(&scl->scl_count, tag);
305		mutex_exit(&scl->scl_lock);
306	}
307	return (1);
308}
309
310void
311spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
312{
313	int wlocks_held = 0;
314
315	for (int i = 0; i < SCL_LOCKS; i++) {
316		spa_config_lock_t *scl = &spa->spa_config_lock[i];
317		if (scl->scl_writer == curthread)
318			wlocks_held |= (1 << i);
319		if (!(locks & (1 << i)))
320			continue;
321		mutex_enter(&scl->scl_lock);
322		if (rw == RW_READER) {
323			while (scl->scl_writer || scl->scl_write_wanted) {
324				cv_wait(&scl->scl_cv, &scl->scl_lock);
325			}
326		} else {
327			ASSERT(scl->scl_writer != curthread);
328			while (!refcount_is_zero(&scl->scl_count)) {
329				scl->scl_write_wanted++;
330				cv_wait(&scl->scl_cv, &scl->scl_lock);
331				scl->scl_write_wanted--;
332			}
333			scl->scl_writer = curthread;
334		}
335		(void) refcount_add(&scl->scl_count, tag);
336		mutex_exit(&scl->scl_lock);
337	}
338	ASSERT(wlocks_held <= locks);
339}
340
341void
342spa_config_exit(spa_t *spa, int locks, void *tag)
343{
344	for (int i = SCL_LOCKS - 1; i >= 0; i--) {
345		spa_config_lock_t *scl = &spa->spa_config_lock[i];
346		if (!(locks & (1 << i)))
347			continue;
348		mutex_enter(&scl->scl_lock);
349		ASSERT(!refcount_is_zero(&scl->scl_count));
350		if (refcount_remove(&scl->scl_count, tag) == 0) {
351			ASSERT(scl->scl_writer == NULL ||
352			    scl->scl_writer == curthread);
353			scl->scl_writer = NULL;	/* OK in either case */
354			cv_broadcast(&scl->scl_cv);
355		}
356		mutex_exit(&scl->scl_lock);
357	}
358}
359
360int
361spa_config_held(spa_t *spa, int locks, krw_t rw)
362{
363	int locks_held = 0;
364
365	for (int i = 0; i < SCL_LOCKS; i++) {
366		spa_config_lock_t *scl = &spa->spa_config_lock[i];
367		if (!(locks & (1 << i)))
368			continue;
369		if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
370		    (rw == RW_WRITER && scl->scl_writer == curthread))
371			locks_held |= 1 << i;
372	}
373
374	return (locks_held);
375}
376
377/*
378 * ==========================================================================
379 * SPA namespace functions
380 * ==========================================================================
381 */
382
383/*
384 * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
385 * Returns NULL if no matching spa_t is found.
386 */
387spa_t *
388spa_lookup(const char *name)
389{
390	static spa_t search;	/* spa_t is large; don't allocate on stack */
391	spa_t *spa;
392	avl_index_t where;
393	char c;
394	char *cp;
395
396	ASSERT(MUTEX_HELD(&spa_namespace_lock));
397
398	/*
399	 * If it's a full dataset name, figure out the pool name and
400	 * just use that.
401	 */
402	cp = strpbrk(name, "/@");
403	if (cp) {
404		c = *cp;
405		*cp = '\0';
406	}
407
408	(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
409	spa = avl_find(&spa_namespace_avl, &search, &where);
410
411	if (cp)
412		*cp = c;
413
414	return (spa);
415}
416
417/*
418 * Create an uninitialized spa_t with the given name.  Requires
419 * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
420 * exist by calling spa_lookup() first.
421 */
422spa_t *
423spa_add(const char *name, nvlist_t *config, const char *altroot)
424{
425	spa_t *spa;
426	spa_config_dirent_t *dp;
427
428	ASSERT(MUTEX_HELD(&spa_namespace_lock));
429
430	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
431
432	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
433	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
434	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
435	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
436	mutex_init(&spa->spa_sync_bplist.bpl_lock, NULL, MUTEX_DEFAULT, NULL);
437	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
438	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
439
440	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
441	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
442	cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
443
444	(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
445	spa->spa_state = POOL_STATE_UNINITIALIZED;
446	spa->spa_freeze_txg = UINT64_MAX;
447	spa->spa_final_txg = UINT64_MAX;
448	spa->spa_load_max_txg = UINT64_MAX;
449
450	refcount_create(&spa->spa_refcount);
451	spa_config_lock_init(spa);
452
453	avl_add(&spa_namespace_avl, spa);
454
455	mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
456
457	/*
458	 * Set the alternate root, if there is one.
459	 */
460	if (altroot) {
461		spa->spa_root = spa_strdup(altroot);
462		spa_active_count++;
463	}
464
465	/*
466	 * Every pool starts with the default cachefile
467	 */
468	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
469	    offsetof(spa_config_dirent_t, scd_link));
470
471	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
472	dp->scd_path = spa_strdup(spa_config_path);
473	list_insert_head(&spa->spa_config_list, dp);
474
475	if (config != NULL)
476		VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
477
478	return (spa);
479}
480
481/*
482 * Removes a spa_t from the namespace, freeing up any memory used.  Requires
483 * spa_namespace_lock.  This is called only after the spa_t has been closed and
484 * deactivated.
485 */
486void
487spa_remove(spa_t *spa)
488{
489	spa_config_dirent_t *dp;
490
491	ASSERT(MUTEX_HELD(&spa_namespace_lock));
492	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
493
494	avl_remove(&spa_namespace_avl, spa);
495	cv_broadcast(&spa_namespace_cv);
496
497	if (spa->spa_root) {
498		spa_strfree(spa->spa_root);
499		spa_active_count--;
500	}
501
502	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
503		list_remove(&spa->spa_config_list, dp);
504		if (dp->scd_path != NULL)
505			spa_strfree(dp->scd_path);
506		kmem_free(dp, sizeof (spa_config_dirent_t));
507	}
508
509	list_destroy(&spa->spa_config_list);
510
511	spa_config_set(spa, NULL);
512
513	refcount_destroy(&spa->spa_refcount);
514
515	spa_config_lock_destroy(spa);
516
517	cv_destroy(&spa->spa_async_cv);
518	cv_destroy(&spa->spa_scrub_io_cv);
519	cv_destroy(&spa->spa_suspend_cv);
520
521	mutex_destroy(&spa->spa_async_lock);
522	mutex_destroy(&spa->spa_scrub_lock);
523	mutex_destroy(&spa->spa_errlog_lock);
524	mutex_destroy(&spa->spa_errlist_lock);
525	mutex_destroy(&spa->spa_sync_bplist.bpl_lock);
526	mutex_destroy(&spa->spa_history_lock);
527	mutex_destroy(&spa->spa_props_lock);
528	mutex_destroy(&spa->spa_suspend_lock);
529
530	kmem_free(spa, sizeof (spa_t));
531}
532
533/*
534 * Given a pool, return the next pool in the namespace, or NULL if there is
535 * none.  If 'prev' is NULL, return the first pool.
536 */
537spa_t *
538spa_next(spa_t *prev)
539{
540	ASSERT(MUTEX_HELD(&spa_namespace_lock));
541
542	if (prev)
543		return (AVL_NEXT(&spa_namespace_avl, prev));
544	else
545		return (avl_first(&spa_namespace_avl));
546}
547
548/*
549 * ==========================================================================
550 * SPA refcount functions
551 * ==========================================================================
552 */
553
554/*
555 * Add a reference to the given spa_t.  Must have at least one reference, or
556 * have the namespace lock held.
557 */
558void
559spa_open_ref(spa_t *spa, void *tag)
560{
561	ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
562	    MUTEX_HELD(&spa_namespace_lock));
563	(void) refcount_add(&spa->spa_refcount, tag);
564}
565
566/*
567 * Remove a reference to the given spa_t.  Must have at least one reference, or
568 * have the namespace lock held.
569 */
570void
571spa_close(spa_t *spa, void *tag)
572{
573	ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
574	    MUTEX_HELD(&spa_namespace_lock));
575	(void) refcount_remove(&spa->spa_refcount, tag);
576}
577
578/*
579 * Check to see if the spa refcount is zero.  Must be called with
580 * spa_namespace_lock held.  We really compare against spa_minref, which is the
581 * number of references acquired when opening a pool
582 */
583boolean_t
584spa_refcount_zero(spa_t *spa)
585{
586	ASSERT(MUTEX_HELD(&spa_namespace_lock));
587
588	return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
589}
590
591/*
592 * ==========================================================================
593 * SPA spare and l2cache tracking
594 * ==========================================================================
595 */
596
597/*
598 * Hot spares and cache devices are tracked using the same code below,
599 * for 'auxiliary' devices.
600 */
601
602typedef struct spa_aux {
603	uint64_t	aux_guid;
604	uint64_t	aux_pool;
605	avl_node_t	aux_avl;
606	int		aux_count;
607} spa_aux_t;
608
609static int
610spa_aux_compare(const void *a, const void *b)
611{
612	const spa_aux_t *sa = a;
613	const spa_aux_t *sb = b;
614
615	if (sa->aux_guid < sb->aux_guid)
616		return (-1);
617	else if (sa->aux_guid > sb->aux_guid)
618		return (1);
619	else
620		return (0);
621}
622
623void
624spa_aux_add(vdev_t *vd, avl_tree_t *avl)
625{
626	avl_index_t where;
627	spa_aux_t search;
628	spa_aux_t *aux;
629
630	search.aux_guid = vd->vdev_guid;
631	if ((aux = avl_find(avl, &search, &where)) != NULL) {
632		aux->aux_count++;
633	} else {
634		aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
635		aux->aux_guid = vd->vdev_guid;
636		aux->aux_count = 1;
637		avl_insert(avl, aux, where);
638	}
639}
640
641void
642spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
643{
644	spa_aux_t search;
645	spa_aux_t *aux;
646	avl_index_t where;
647
648	search.aux_guid = vd->vdev_guid;
649	aux = avl_find(avl, &search, &where);
650
651	ASSERT(aux != NULL);
652
653	if (--aux->aux_count == 0) {
654		avl_remove(avl, aux);
655		kmem_free(aux, sizeof (spa_aux_t));
656	} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
657		aux->aux_pool = 0ULL;
658	}
659}
660
661boolean_t
662spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
663{
664	spa_aux_t search, *found;
665
666	search.aux_guid = guid;
667	found = avl_find(avl, &search, NULL);
668
669	if (pool) {
670		if (found)
671			*pool = found->aux_pool;
672		else
673			*pool = 0ULL;
674	}
675
676	if (refcnt) {
677		if (found)
678			*refcnt = found->aux_count;
679		else
680			*refcnt = 0;
681	}
682
683	return (found != NULL);
684}
685
686void
687spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
688{
689	spa_aux_t search, *found;
690	avl_index_t where;
691
692	search.aux_guid = vd->vdev_guid;
693	found = avl_find(avl, &search, &where);
694	ASSERT(found != NULL);
695	ASSERT(found->aux_pool == 0ULL);
696
697	found->aux_pool = spa_guid(vd->vdev_spa);
698}
699
700/*
701 * Spares are tracked globally due to the following constraints:
702 *
703 * 	- A spare may be part of multiple pools.
704 * 	- A spare may be added to a pool even if it's actively in use within
705 *	  another pool.
706 * 	- A spare in use in any pool can only be the source of a replacement if
707 *	  the target is a spare in the same pool.
708 *
709 * We keep track of all spares on the system through the use of a reference
710 * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
711 * spare, then we bump the reference count in the AVL tree.  In addition, we set
712 * the 'vdev_isspare' member to indicate that the device is a spare (active or
713 * inactive).  When a spare is made active (used to replace a device in the
714 * pool), we also keep track of which pool its been made a part of.
715 *
716 * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
717 * called under the spa_namespace lock as part of vdev reconfiguration.  The
718 * separate spare lock exists for the status query path, which does not need to
719 * be completely consistent with respect to other vdev configuration changes.
720 */
721
722static int
723spa_spare_compare(const void *a, const void *b)
724{
725	return (spa_aux_compare(a, b));
726}
727
728void
729spa_spare_add(vdev_t *vd)
730{
731	mutex_enter(&spa_spare_lock);
732	ASSERT(!vd->vdev_isspare);
733	spa_aux_add(vd, &spa_spare_avl);
734	vd->vdev_isspare = B_TRUE;
735	mutex_exit(&spa_spare_lock);
736}
737
738void
739spa_spare_remove(vdev_t *vd)
740{
741	mutex_enter(&spa_spare_lock);
742	ASSERT(vd->vdev_isspare);
743	spa_aux_remove(vd, &spa_spare_avl);
744	vd->vdev_isspare = B_FALSE;
745	mutex_exit(&spa_spare_lock);
746}
747
748boolean_t
749spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
750{
751	boolean_t found;
752
753	mutex_enter(&spa_spare_lock);
754	found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
755	mutex_exit(&spa_spare_lock);
756
757	return (found);
758}
759
760void
761spa_spare_activate(vdev_t *vd)
762{
763	mutex_enter(&spa_spare_lock);
764	ASSERT(vd->vdev_isspare);
765	spa_aux_activate(vd, &spa_spare_avl);
766	mutex_exit(&spa_spare_lock);
767}
768
769/*
770 * Level 2 ARC devices are tracked globally for the same reasons as spares.
771 * Cache devices currently only support one pool per cache device, and so
772 * for these devices the aux reference count is currently unused beyond 1.
773 */
774
775static int
776spa_l2cache_compare(const void *a, const void *b)
777{
778	return (spa_aux_compare(a, b));
779}
780
781void
782spa_l2cache_add(vdev_t *vd)
783{
784	mutex_enter(&spa_l2cache_lock);
785	ASSERT(!vd->vdev_isl2cache);
786	spa_aux_add(vd, &spa_l2cache_avl);
787	vd->vdev_isl2cache = B_TRUE;
788	mutex_exit(&spa_l2cache_lock);
789}
790
791void
792spa_l2cache_remove(vdev_t *vd)
793{
794	mutex_enter(&spa_l2cache_lock);
795	ASSERT(vd->vdev_isl2cache);
796	spa_aux_remove(vd, &spa_l2cache_avl);
797	vd->vdev_isl2cache = B_FALSE;
798	mutex_exit(&spa_l2cache_lock);
799}
800
801boolean_t
802spa_l2cache_exists(uint64_t guid, uint64_t *pool)
803{
804	boolean_t found;
805
806	mutex_enter(&spa_l2cache_lock);
807	found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
808	mutex_exit(&spa_l2cache_lock);
809
810	return (found);
811}
812
813void
814spa_l2cache_activate(vdev_t *vd)
815{
816	mutex_enter(&spa_l2cache_lock);
817	ASSERT(vd->vdev_isl2cache);
818	spa_aux_activate(vd, &spa_l2cache_avl);
819	mutex_exit(&spa_l2cache_lock);
820}
821
822void
823spa_l2cache_space_update(vdev_t *vd, int64_t space, int64_t alloc)
824{
825	vdev_space_update(vd, space, alloc, 0, B_FALSE);
826}
827
828/*
829 * ==========================================================================
830 * SPA vdev locking
831 * ==========================================================================
832 */
833
834/*
835 * Lock the given spa_t for the purpose of adding or removing a vdev.
836 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
837 * It returns the next transaction group for the spa_t.
838 */
839uint64_t
840spa_vdev_enter(spa_t *spa)
841{
842	mutex_enter(&spa_namespace_lock);
843	return (spa_vdev_config_enter(spa));
844}
845
846/*
847 * Internal implementation for spa_vdev_enter().  Used when a vdev
848 * operation requires multiple syncs (i.e. removing a device) while
849 * keeping the spa_namespace_lock held.
850 */
851uint64_t
852spa_vdev_config_enter(spa_t *spa)
853{
854	ASSERT(MUTEX_HELD(&spa_namespace_lock));
855
856	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
857
858	return (spa_last_synced_txg(spa) + 1);
859}
860
861/*
862 * Used in combination with spa_vdev_config_enter() to allow the syncing
863 * of multiple transactions without releasing the spa_namespace_lock.
864 */
865void
866spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
867{
868	ASSERT(MUTEX_HELD(&spa_namespace_lock));
869
870	int config_changed = B_FALSE;
871
872	ASSERT(txg > spa_last_synced_txg(spa));
873
874	spa->spa_pending_vdev = NULL;
875
876	/*
877	 * Reassess the DTLs.
878	 */
879	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
880
881	/*
882	 * If the config changed, notify the scrub thread that it must restart.
883	 */
884	if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
885		dsl_pool_scrub_restart(spa->spa_dsl_pool);
886		config_changed = B_TRUE;
887		spa->spa_config_generation++;
888	}
889
890	/*
891	 * Verify the metaslab classes.
892	 */
893	ASSERT(metaslab_class_validate(spa->spa_normal_class) == 0);
894	ASSERT(metaslab_class_validate(spa->spa_log_class) == 0);
895
896	spa_config_exit(spa, SCL_ALL, spa);
897
898	/*
899	 * Panic the system if the specified tag requires it.  This
900	 * is useful for ensuring that configurations are updated
901	 * transactionally.
902	 */
903	if (zio_injection_enabled)
904		zio_handle_panic_injection(spa, tag);
905
906	/*
907	 * Note: this txg_wait_synced() is important because it ensures
908	 * that there won't be more than one config change per txg.
909	 * This allows us to use the txg as the generation number.
910	 */
911	if (error == 0)
912		txg_wait_synced(spa->spa_dsl_pool, txg);
913
914	if (vd != NULL) {
915		ASSERT(!vd->vdev_detached || vd->vdev_dtl_smo.smo_object == 0);
916		spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
917		vdev_free(vd);
918		spa_config_exit(spa, SCL_ALL, spa);
919	}
920
921	/*
922	 * If the config changed, update the config cache.
923	 */
924	if (config_changed)
925		spa_config_sync(spa, B_FALSE, B_TRUE);
926}
927
928/*
929 * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
930 * locking of spa_vdev_enter(), we also want make sure the transactions have
931 * synced to disk, and then update the global configuration cache with the new
932 * information.
933 */
934int
935spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
936{
937	spa_vdev_config_exit(spa, vd, txg, error, FTAG);
938	mutex_exit(&spa_namespace_lock);
939
940	return (error);
941}
942
943/*
944 * Lock the given spa_t for the purpose of changing vdev state.
945 */
946void
947spa_vdev_state_enter(spa_t *spa, int oplocks)
948{
949	int locks = SCL_STATE_ALL | oplocks;
950
951	spa_config_enter(spa, locks, spa, RW_WRITER);
952	spa->spa_vdev_locks = locks;
953}
954
955int
956spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
957{
958	if (vd != NULL) {
959		vdev_state_dirty(vd->vdev_top);
960		spa->spa_config_generation++;
961	}
962
963	ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
964	spa_config_exit(spa, spa->spa_vdev_locks, spa);
965
966	/*
967	 * If anything changed, wait for it to sync.  This ensures that,
968	 * from the system administrator's perspective, zpool(1M) commands
969	 * are synchronous.  This is important for things like zpool offline:
970	 * when the command completes, you expect no further I/O from ZFS.
971	 */
972	if (vd != NULL)
973		txg_wait_synced(spa->spa_dsl_pool, 0);
974
975	return (error);
976}
977
978/*
979 * ==========================================================================
980 * Miscellaneous functions
981 * ==========================================================================
982 */
983
984/*
985 * Rename a spa_t.
986 */
987int
988spa_rename(const char *name, const char *newname)
989{
990	spa_t *spa;
991	int err;
992
993	/*
994	 * Lookup the spa_t and grab the config lock for writing.  We need to
995	 * actually open the pool so that we can sync out the necessary labels.
996	 * It's OK to call spa_open() with the namespace lock held because we
997	 * allow recursive calls for other reasons.
998	 */
999	mutex_enter(&spa_namespace_lock);
1000	if ((err = spa_open(name, &spa, FTAG)) != 0) {
1001		mutex_exit(&spa_namespace_lock);
1002		return (err);
1003	}
1004
1005	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1006
1007	avl_remove(&spa_namespace_avl, spa);
1008	(void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
1009	avl_add(&spa_namespace_avl, spa);
1010
1011	/*
1012	 * Sync all labels to disk with the new names by marking the root vdev
1013	 * dirty and waiting for it to sync.  It will pick up the new pool name
1014	 * during the sync.
1015	 */
1016	vdev_config_dirty(spa->spa_root_vdev);
1017
1018	spa_config_exit(spa, SCL_ALL, FTAG);
1019
1020	txg_wait_synced(spa->spa_dsl_pool, 0);
1021
1022	/*
1023	 * Sync the updated config cache.
1024	 */
1025	spa_config_sync(spa, B_FALSE, B_TRUE);
1026
1027	spa_close(spa, FTAG);
1028
1029	mutex_exit(&spa_namespace_lock);
1030
1031	return (0);
1032}
1033
1034
1035/*
1036 * Determine whether a pool with given pool_guid exists.  If device_guid is
1037 * non-zero, determine whether the pool exists *and* contains a device with the
1038 * specified device_guid.
1039 */
1040boolean_t
1041spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1042{
1043	spa_t *spa;
1044	avl_tree_t *t = &spa_namespace_avl;
1045
1046	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1047
1048	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1049		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1050			continue;
1051		if (spa->spa_root_vdev == NULL)
1052			continue;
1053		if (spa_guid(spa) == pool_guid) {
1054			if (device_guid == 0)
1055				break;
1056
1057			if (vdev_lookup_by_guid(spa->spa_root_vdev,
1058			    device_guid) != NULL)
1059				break;
1060
1061			/*
1062			 * Check any devices we may be in the process of adding.
1063			 */
1064			if (spa->spa_pending_vdev) {
1065				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1066				    device_guid) != NULL)
1067					break;
1068			}
1069		}
1070	}
1071
1072	return (spa != NULL);
1073}
1074
1075char *
1076spa_strdup(const char *s)
1077{
1078	size_t len;
1079	char *new;
1080
1081	len = strlen(s);
1082	new = kmem_alloc(len + 1, KM_SLEEP);
1083	bcopy(s, new, len);
1084	new[len] = '\0';
1085
1086	return (new);
1087}
1088
1089void
1090spa_strfree(char *s)
1091{
1092	kmem_free(s, strlen(s) + 1);
1093}
1094
1095uint64_t
1096spa_get_random(uint64_t range)
1097{
1098	uint64_t r;
1099
1100	ASSERT(range != 0);
1101
1102	(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1103
1104	return (r % range);
1105}
1106
1107void
1108sprintf_blkptr(char *buf, int len, const blkptr_t *bp)
1109{
1110	int d;
1111
1112	if (bp == NULL) {
1113		(void) snprintf(buf, len, "<NULL>");
1114		return;
1115	}
1116
1117	if (BP_IS_HOLE(bp)) {
1118		(void) snprintf(buf, len, "<hole>");
1119		return;
1120	}
1121
1122	(void) snprintf(buf, len, "[L%llu %s] %llxL/%llxP ",
1123	    (u_longlong_t)BP_GET_LEVEL(bp),
1124	    BP_GET_TYPE(bp) < DMU_OT_NUMTYPES ?
1125	    dmu_ot[BP_GET_TYPE(bp)].ot_name : "UNKNOWN",
1126	    (u_longlong_t)BP_GET_LSIZE(bp),
1127	    (u_longlong_t)BP_GET_PSIZE(bp));
1128
1129	for (d = 0; d < BP_GET_NDVAS(bp); d++) {
1130		const dva_t *dva = &bp->blk_dva[d];
1131		(void) snprintf(buf + strlen(buf), len - strlen(buf),
1132		    "DVA[%d]=<%llu:%llx:%llx> ", d,
1133		    (u_longlong_t)DVA_GET_VDEV(dva),
1134		    (u_longlong_t)DVA_GET_OFFSET(dva),
1135		    (u_longlong_t)DVA_GET_ASIZE(dva));
1136	}
1137
1138	(void) snprintf(buf + strlen(buf), len - strlen(buf),
1139	    "%s %s %s %s birth=%llu fill=%llu cksum=%llx:%llx:%llx:%llx",
1140	    BP_GET_CHECKSUM(bp) < ZIO_CHECKSUM_FUNCTIONS ?
1141	    zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name : "UNKNOWN",
1142	    BP_GET_COMPRESS(bp) < ZIO_COMPRESS_FUNCTIONS ?
1143	    zio_compress_table[BP_GET_COMPRESS(bp)].ci_name : "UNKNOWN",
1144	    BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE",
1145	    BP_IS_GANG(bp) ? "gang" : "contiguous",
1146	    (u_longlong_t)bp->blk_birth,
1147	    (u_longlong_t)bp->blk_fill,
1148	    (u_longlong_t)bp->blk_cksum.zc_word[0],
1149	    (u_longlong_t)bp->blk_cksum.zc_word[1],
1150	    (u_longlong_t)bp->blk_cksum.zc_word[2],
1151	    (u_longlong_t)bp->blk_cksum.zc_word[3]);
1152}
1153
1154void
1155spa_freeze(spa_t *spa)
1156{
1157	uint64_t freeze_txg = 0;
1158
1159	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1160	if (spa->spa_freeze_txg == UINT64_MAX) {
1161		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1162		spa->spa_freeze_txg = freeze_txg;
1163	}
1164	spa_config_exit(spa, SCL_ALL, FTAG);
1165	if (freeze_txg != 0)
1166		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1167}
1168
1169void
1170zfs_panic_recover(const char *fmt, ...)
1171{
1172	va_list adx;
1173
1174	va_start(adx, fmt);
1175	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1176	va_end(adx);
1177}
1178
1179/*
1180 * ==========================================================================
1181 * Accessor functions
1182 * ==========================================================================
1183 */
1184
1185boolean_t
1186spa_shutting_down(spa_t *spa)
1187{
1188	return (spa->spa_async_suspended);
1189}
1190
1191dsl_pool_t *
1192spa_get_dsl(spa_t *spa)
1193{
1194	return (spa->spa_dsl_pool);
1195}
1196
1197blkptr_t *
1198spa_get_rootblkptr(spa_t *spa)
1199{
1200	return (&spa->spa_ubsync.ub_rootbp);
1201}
1202
1203void
1204spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1205{
1206	spa->spa_uberblock.ub_rootbp = *bp;
1207}
1208
1209void
1210spa_altroot(spa_t *spa, char *buf, size_t buflen)
1211{
1212	if (spa->spa_root == NULL)
1213		buf[0] = '\0';
1214	else
1215		(void) strncpy(buf, spa->spa_root, buflen);
1216}
1217
1218int
1219spa_sync_pass(spa_t *spa)
1220{
1221	return (spa->spa_sync_pass);
1222}
1223
1224char *
1225spa_name(spa_t *spa)
1226{
1227	return (spa->spa_name);
1228}
1229
1230uint64_t
1231spa_guid(spa_t *spa)
1232{
1233	/*
1234	 * If we fail to parse the config during spa_load(), we can go through
1235	 * the error path (which posts an ereport) and end up here with no root
1236	 * vdev.  We stash the original pool guid in 'spa_load_guid' to handle
1237	 * this case.
1238	 */
1239	if (spa->spa_root_vdev != NULL)
1240		return (spa->spa_root_vdev->vdev_guid);
1241	else
1242		return (spa->spa_load_guid);
1243}
1244
1245uint64_t
1246spa_last_synced_txg(spa_t *spa)
1247{
1248	return (spa->spa_ubsync.ub_txg);
1249}
1250
1251uint64_t
1252spa_first_txg(spa_t *spa)
1253{
1254	return (spa->spa_first_txg);
1255}
1256
1257pool_state_t
1258spa_state(spa_t *spa)
1259{
1260	return (spa->spa_state);
1261}
1262
1263uint64_t
1264spa_freeze_txg(spa_t *spa)
1265{
1266	return (spa->spa_freeze_txg);
1267}
1268
1269/*
1270 * Return how much space is allocated in the pool (ie. sum of all asize)
1271 */
1272uint64_t
1273spa_get_alloc(spa_t *spa)
1274{
1275	return (spa->spa_root_vdev->vdev_stat.vs_alloc);
1276}
1277
1278/*
1279 * Return how much (raid-z inflated) space there is in the pool.
1280 */
1281uint64_t
1282spa_get_space(spa_t *spa)
1283{
1284	return (spa->spa_root_vdev->vdev_stat.vs_space);
1285}
1286
1287/*
1288 * Return the amount of raid-z-deflated space in the pool.
1289 */
1290uint64_t
1291spa_get_dspace(spa_t *spa)
1292{
1293	if (spa->spa_deflate)
1294		return (spa->spa_root_vdev->vdev_stat.vs_dspace);
1295	else
1296		return (spa->spa_root_vdev->vdev_stat.vs_space);
1297}
1298
1299/*
1300 * Return the amount of space deferred from freeing (in in-core maps only)
1301 */
1302uint64_t
1303spa_get_defers(spa_t *spa)
1304{
1305	return (spa->spa_root_vdev->vdev_stat.vs_defer);
1306}
1307
1308/* ARGSUSED */
1309uint64_t
1310spa_get_asize(spa_t *spa, uint64_t lsize)
1311{
1312	/*
1313	 * For now, the worst case is 512-byte RAID-Z blocks, in which
1314	 * case the space requirement is exactly 2x; so just assume that.
1315	 * Add to this the fact that we can have up to 3 DVAs per bp, and
1316	 * we have to multiply by a total of 6x.
1317	 */
1318	return (lsize * 6);
1319}
1320
1321/*
1322 * Return the failure mode that has been set to this pool. The default
1323 * behavior will be to block all I/Os when a complete failure occurs.
1324 */
1325uint8_t
1326spa_get_failmode(spa_t *spa)
1327{
1328	return (spa->spa_failmode);
1329}
1330
1331boolean_t
1332spa_suspended(spa_t *spa)
1333{
1334	return (spa->spa_suspended);
1335}
1336
1337uint64_t
1338spa_version(spa_t *spa)
1339{
1340	return (spa->spa_ubsync.ub_version);
1341}
1342
1343int
1344spa_max_replication(spa_t *spa)
1345{
1346	/*
1347	 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1348	 * handle BPs with more than one DVA allocated.  Set our max
1349	 * replication level accordingly.
1350	 */
1351	if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1352		return (1);
1353	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1354}
1355
1356uint64_t
1357bp_get_dasize(spa_t *spa, const blkptr_t *bp)
1358{
1359	int sz = 0, i;
1360
1361	if (!spa->spa_deflate)
1362		return (BP_GET_ASIZE(bp));
1363
1364	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1365	for (i = 0; i < SPA_DVAS_PER_BP; i++) {
1366		vdev_t *vd =
1367		    vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[i]));
1368		if (vd)
1369			sz += (DVA_GET_ASIZE(&bp->blk_dva[i]) >>
1370			    SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1371	}
1372	spa_config_exit(spa, SCL_VDEV, FTAG);
1373	return (sz);
1374}
1375
1376/*
1377 * ==========================================================================
1378 * Initialization and Termination
1379 * ==========================================================================
1380 */
1381
1382static int
1383spa_name_compare(const void *a1, const void *a2)
1384{
1385	const spa_t *s1 = a1;
1386	const spa_t *s2 = a2;
1387	int s;
1388
1389	s = strcmp(s1->spa_name, s2->spa_name);
1390	if (s > 0)
1391		return (1);
1392	if (s < 0)
1393		return (-1);
1394	return (0);
1395}
1396
1397int
1398spa_busy(void)
1399{
1400	return (spa_active_count);
1401}
1402
1403void
1404spa_boot_init()
1405{
1406	spa_config_load();
1407}
1408
1409void
1410spa_init(int mode)
1411{
1412	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1413	mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1414	mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1415	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1416
1417	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1418	    offsetof(spa_t, spa_avl));
1419
1420	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1421	    offsetof(spa_aux_t, aux_avl));
1422
1423	avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1424	    offsetof(spa_aux_t, aux_avl));
1425
1426	spa_mode_global = mode;
1427
1428	refcount_init();
1429	unique_init();
1430	zio_init();
1431	dmu_init();
1432	zil_init();
1433	vdev_cache_stat_init();
1434	zfs_prop_init();
1435	zpool_prop_init();
1436	spa_config_load();
1437	l2arc_start();
1438}
1439
1440void
1441spa_fini(void)
1442{
1443	l2arc_stop();
1444
1445	spa_evict_all();
1446
1447	vdev_cache_stat_fini();
1448	zil_fini();
1449	dmu_fini();
1450	zio_fini();
1451	unique_fini();
1452	refcount_fini();
1453
1454	avl_destroy(&spa_namespace_avl);
1455	avl_destroy(&spa_spare_avl);
1456	avl_destroy(&spa_l2cache_avl);
1457
1458	cv_destroy(&spa_namespace_cv);
1459	mutex_destroy(&spa_namespace_lock);
1460	mutex_destroy(&spa_spare_lock);
1461	mutex_destroy(&spa_l2cache_lock);
1462}
1463
1464/*
1465 * Return whether this pool has slogs. No locking needed.
1466 * It's not a problem if the wrong answer is returned as it's only for
1467 * performance and not correctness
1468 */
1469boolean_t
1470spa_has_slogs(spa_t *spa)
1471{
1472	return (spa->spa_log_class->mc_rotor != NULL);
1473}
1474
1475/*
1476 * Return whether this pool is the root pool.
1477 */
1478boolean_t
1479spa_is_root(spa_t *spa)
1480{
1481	return (spa->spa_is_root);
1482}
1483
1484boolean_t
1485spa_writeable(spa_t *spa)
1486{
1487	return (!!(spa->spa_mode & FWRITE));
1488}
1489
1490int
1491spa_mode(spa_t *spa)
1492{
1493	return (spa->spa_mode);
1494}
1495