spa_misc.c revision 01f55e48fb4d524eaf70687728aa51b7762e2e97
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
25 */
26
27#include <sys/zfs_context.h>
28#include <sys/spa_impl.h>
29#include <sys/spa_boot.h>
30#include <sys/zio.h>
31#include <sys/zio_checksum.h>
32#include <sys/zio_compress.h>
33#include <sys/dmu.h>
34#include <sys/dmu_tx.h>
35#include <sys/zap.h>
36#include <sys/zil.h>
37#include <sys/vdev_impl.h>
38#include <sys/metaslab.h>
39#include <sys/uberblock_impl.h>
40#include <sys/txg.h>
41#include <sys/avl.h>
42#include <sys/unique.h>
43#include <sys/dsl_pool.h>
44#include <sys/dsl_dir.h>
45#include <sys/dsl_prop.h>
46#include <sys/dsl_scan.h>
47#include <sys/fs/zfs.h>
48#include <sys/metaslab_impl.h>
49#include <sys/arc.h>
50#include <sys/ddt.h>
51#include "zfs_prop.h"
52#include "zfeature_common.h"
53
54/*
55 * SPA locking
56 *
57 * There are four basic locks for managing spa_t structures:
58 *
59 * spa_namespace_lock (global mutex)
60 *
61 *	This lock must be acquired to do any of the following:
62 *
63 *		- Lookup a spa_t by name
64 *		- Add or remove a spa_t from the namespace
65 *		- Increase spa_refcount from non-zero
66 *		- Check if spa_refcount is zero
67 *		- Rename a spa_t
68 *		- add/remove/attach/detach devices
69 *		- Held for the duration of create/destroy/import/export
70 *
71 *	It does not need to handle recursion.  A create or destroy may
72 *	reference objects (files or zvols) in other pools, but by
73 *	definition they must have an existing reference, and will never need
74 *	to lookup a spa_t by name.
75 *
76 * spa_refcount (per-spa refcount_t protected by mutex)
77 *
78 *	This reference count keep track of any active users of the spa_t.  The
79 *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
80 *	the refcount is never really 'zero' - opening a pool implicitly keeps
81 *	some references in the DMU.  Internally we check against spa_minref, but
82 *	present the image of a zero/non-zero value to consumers.
83 *
84 * spa_config_lock[] (per-spa array of rwlocks)
85 *
86 *	This protects the spa_t from config changes, and must be held in
87 *	the following circumstances:
88 *
89 *		- RW_READER to perform I/O to the spa
90 *		- RW_WRITER to change the vdev config
91 *
92 * The locking order is fairly straightforward:
93 *
94 *		spa_namespace_lock	->	spa_refcount
95 *
96 *	The namespace lock must be acquired to increase the refcount from 0
97 *	or to check if it is zero.
98 *
99 *		spa_refcount		->	spa_config_lock[]
100 *
101 *	There must be at least one valid reference on the spa_t to acquire
102 *	the config lock.
103 *
104 *		spa_namespace_lock	->	spa_config_lock[]
105 *
106 *	The namespace lock must always be taken before the config lock.
107 *
108 *
109 * The spa_namespace_lock can be acquired directly and is globally visible.
110 *
111 * The namespace is manipulated using the following functions, all of which
112 * require the spa_namespace_lock to be held.
113 *
114 *	spa_lookup()		Lookup a spa_t by name.
115 *
116 *	spa_add()		Create a new spa_t in the namespace.
117 *
118 *	spa_remove()		Remove a spa_t from the namespace.  This also
119 *				frees up any memory associated with the spa_t.
120 *
121 *	spa_next()		Returns the next spa_t in the system, or the
122 *				first if NULL is passed.
123 *
124 *	spa_evict_all()		Shutdown and remove all spa_t structures in
125 *				the system.
126 *
127 *	spa_guid_exists()	Determine whether a pool/device guid exists.
128 *
129 * The spa_refcount is manipulated using the following functions:
130 *
131 *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
132 *				called with spa_namespace_lock held if the
133 *				refcount is currently zero.
134 *
135 *	spa_close()		Remove a reference from the spa_t.  This will
136 *				not free the spa_t or remove it from the
137 *				namespace.  No locking is required.
138 *
139 *	spa_refcount_zero()	Returns true if the refcount is currently
140 *				zero.  Must be called with spa_namespace_lock
141 *				held.
142 *
143 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
144 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
145 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
146 *
147 * To read the configuration, it suffices to hold one of these locks as reader.
148 * To modify the configuration, you must hold all locks as writer.  To modify
149 * vdev state without altering the vdev tree's topology (e.g. online/offline),
150 * you must hold SCL_STATE and SCL_ZIO as writer.
151 *
152 * We use these distinct config locks to avoid recursive lock entry.
153 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
154 * block allocations (SCL_ALLOC), which may require reading space maps
155 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
156 *
157 * The spa config locks cannot be normal rwlocks because we need the
158 * ability to hand off ownership.  For example, SCL_ZIO is acquired
159 * by the issuing thread and later released by an interrupt thread.
160 * They do, however, obey the usual write-wanted semantics to prevent
161 * writer (i.e. system administrator) starvation.
162 *
163 * The lock acquisition rules are as follows:
164 *
165 * SCL_CONFIG
166 *	Protects changes to the vdev tree topology, such as vdev
167 *	add/remove/attach/detach.  Protects the dirty config list
168 *	(spa_config_dirty_list) and the set of spares and l2arc devices.
169 *
170 * SCL_STATE
171 *	Protects changes to pool state and vdev state, such as vdev
172 *	online/offline/fault/degrade/clear.  Protects the dirty state list
173 *	(spa_state_dirty_list) and global pool state (spa_state).
174 *
175 * SCL_ALLOC
176 *	Protects changes to metaslab groups and classes.
177 *	Held as reader by metaslab_alloc() and metaslab_claim().
178 *
179 * SCL_ZIO
180 *	Held by bp-level zios (those which have no io_vd upon entry)
181 *	to prevent changes to the vdev tree.  The bp-level zio implicitly
182 *	protects all of its vdev child zios, which do not hold SCL_ZIO.
183 *
184 * SCL_FREE
185 *	Protects changes to metaslab groups and classes.
186 *	Held as reader by metaslab_free().  SCL_FREE is distinct from
187 *	SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
188 *	blocks in zio_done() while another i/o that holds either
189 *	SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
190 *
191 * SCL_VDEV
192 *	Held as reader to prevent changes to the vdev tree during trivial
193 *	inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
194 *	other locks, and lower than all of them, to ensure that it's safe
195 *	to acquire regardless of caller context.
196 *
197 * In addition, the following rules apply:
198 *
199 * (a)	spa_props_lock protects pool properties, spa_config and spa_config_list.
200 *	The lock ordering is SCL_CONFIG > spa_props_lock.
201 *
202 * (b)	I/O operations on leaf vdevs.  For any zio operation that takes
203 *	an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
204 *	or zio_write_phys() -- the caller must ensure that the config cannot
205 *	cannot change in the interim, and that the vdev cannot be reopened.
206 *	SCL_STATE as reader suffices for both.
207 *
208 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
209 *
210 *	spa_vdev_enter()	Acquire the namespace lock and the config lock
211 *				for writing.
212 *
213 *	spa_vdev_exit()		Release the config lock, wait for all I/O
214 *				to complete, sync the updated configs to the
215 *				cache, and release the namespace lock.
216 *
217 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
218 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
219 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
220 *
221 * spa_rename() is also implemented within this file since it requires
222 * manipulation of the namespace.
223 */
224
225static avl_tree_t spa_namespace_avl;
226kmutex_t spa_namespace_lock;
227static kcondvar_t spa_namespace_cv;
228static int spa_active_count;
229int spa_max_replication_override = SPA_DVAS_PER_BP;
230
231static kmutex_t spa_spare_lock;
232static avl_tree_t spa_spare_avl;
233static kmutex_t spa_l2cache_lock;
234static avl_tree_t spa_l2cache_avl;
235
236kmem_cache_t *spa_buffer_pool;
237int spa_mode_global;
238
239#ifdef ZFS_DEBUG
240/* Everything except dprintf is on by default in debug builds */
241int zfs_flags = ~ZFS_DEBUG_DPRINTF;
242#else
243int zfs_flags = 0;
244#endif
245
246/*
247 * zfs_recover can be set to nonzero to attempt to recover from
248 * otherwise-fatal errors, typically caused by on-disk corruption.  When
249 * set, calls to zfs_panic_recover() will turn into warning messages.
250 */
251int zfs_recover = 0;
252
253extern int zfs_txg_synctime_ms;
254
255/*
256 * Expiration time in units of zfs_txg_synctime_ms. This value has two
257 * meanings. First it is used to determine when the spa_deadman logic
258 * should fire. By default the spa_deadman will fire if spa_sync has
259 * not completed in 1000 * zfs_txg_synctime_ms (i.e. 1000 seconds).
260 * Secondly, the value determines if an I/O is considered "hung".
261 * Any I/O that has not completed in zfs_deadman_synctime is considered
262 * "hung" resulting in a system panic.
263 * 1000 zfs_txg_synctime_ms (i.e. 1000 seconds).
264 */
265uint64_t zfs_deadman_synctime = 1000ULL;
266
267/*
268 * Override the zfs deadman behavior via /etc/system. By default the
269 * deadman is enabled except on VMware and sparc deployments.
270 */
271int zfs_deadman_enabled = -1;
272
273
274/*
275 * ==========================================================================
276 * SPA config locking
277 * ==========================================================================
278 */
279static void
280spa_config_lock_init(spa_t *spa)
281{
282	for (int i = 0; i < SCL_LOCKS; i++) {
283		spa_config_lock_t *scl = &spa->spa_config_lock[i];
284		mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
285		cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
286		refcount_create(&scl->scl_count);
287		scl->scl_writer = NULL;
288		scl->scl_write_wanted = 0;
289	}
290}
291
292static void
293spa_config_lock_destroy(spa_t *spa)
294{
295	for (int i = 0; i < SCL_LOCKS; i++) {
296		spa_config_lock_t *scl = &spa->spa_config_lock[i];
297		mutex_destroy(&scl->scl_lock);
298		cv_destroy(&scl->scl_cv);
299		refcount_destroy(&scl->scl_count);
300		ASSERT(scl->scl_writer == NULL);
301		ASSERT(scl->scl_write_wanted == 0);
302	}
303}
304
305int
306spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
307{
308	for (int i = 0; i < SCL_LOCKS; i++) {
309		spa_config_lock_t *scl = &spa->spa_config_lock[i];
310		if (!(locks & (1 << i)))
311			continue;
312		mutex_enter(&scl->scl_lock);
313		if (rw == RW_READER) {
314			if (scl->scl_writer || scl->scl_write_wanted) {
315				mutex_exit(&scl->scl_lock);
316				spa_config_exit(spa, locks ^ (1 << i), tag);
317				return (0);
318			}
319		} else {
320			ASSERT(scl->scl_writer != curthread);
321			if (!refcount_is_zero(&scl->scl_count)) {
322				mutex_exit(&scl->scl_lock);
323				spa_config_exit(spa, locks ^ (1 << i), tag);
324				return (0);
325			}
326			scl->scl_writer = curthread;
327		}
328		(void) refcount_add(&scl->scl_count, tag);
329		mutex_exit(&scl->scl_lock);
330	}
331	return (1);
332}
333
334void
335spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
336{
337	int wlocks_held = 0;
338
339	for (int i = 0; i < SCL_LOCKS; i++) {
340		spa_config_lock_t *scl = &spa->spa_config_lock[i];
341		if (scl->scl_writer == curthread)
342			wlocks_held |= (1 << i);
343		if (!(locks & (1 << i)))
344			continue;
345		mutex_enter(&scl->scl_lock);
346		if (rw == RW_READER) {
347			while (scl->scl_writer || scl->scl_write_wanted) {
348				cv_wait(&scl->scl_cv, &scl->scl_lock);
349			}
350		} else {
351			ASSERT(scl->scl_writer != curthread);
352			while (!refcount_is_zero(&scl->scl_count)) {
353				scl->scl_write_wanted++;
354				cv_wait(&scl->scl_cv, &scl->scl_lock);
355				scl->scl_write_wanted--;
356			}
357			scl->scl_writer = curthread;
358		}
359		(void) refcount_add(&scl->scl_count, tag);
360		mutex_exit(&scl->scl_lock);
361	}
362	ASSERT(wlocks_held <= locks);
363}
364
365void
366spa_config_exit(spa_t *spa, int locks, void *tag)
367{
368	for (int i = SCL_LOCKS - 1; i >= 0; i--) {
369		spa_config_lock_t *scl = &spa->spa_config_lock[i];
370		if (!(locks & (1 << i)))
371			continue;
372		mutex_enter(&scl->scl_lock);
373		ASSERT(!refcount_is_zero(&scl->scl_count));
374		if (refcount_remove(&scl->scl_count, tag) == 0) {
375			ASSERT(scl->scl_writer == NULL ||
376			    scl->scl_writer == curthread);
377			scl->scl_writer = NULL;	/* OK in either case */
378			cv_broadcast(&scl->scl_cv);
379		}
380		mutex_exit(&scl->scl_lock);
381	}
382}
383
384int
385spa_config_held(spa_t *spa, int locks, krw_t rw)
386{
387	int locks_held = 0;
388
389	for (int i = 0; i < SCL_LOCKS; i++) {
390		spa_config_lock_t *scl = &spa->spa_config_lock[i];
391		if (!(locks & (1 << i)))
392			continue;
393		if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
394		    (rw == RW_WRITER && scl->scl_writer == curthread))
395			locks_held |= 1 << i;
396	}
397
398	return (locks_held);
399}
400
401/*
402 * ==========================================================================
403 * SPA namespace functions
404 * ==========================================================================
405 */
406
407/*
408 * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
409 * Returns NULL if no matching spa_t is found.
410 */
411spa_t *
412spa_lookup(const char *name)
413{
414	static spa_t search;	/* spa_t is large; don't allocate on stack */
415	spa_t *spa;
416	avl_index_t where;
417	char c;
418	char *cp;
419
420	ASSERT(MUTEX_HELD(&spa_namespace_lock));
421
422	/*
423	 * If it's a full dataset name, figure out the pool name and
424	 * just use that.
425	 */
426	cp = strpbrk(name, "/@");
427	if (cp) {
428		c = *cp;
429		*cp = '\0';
430	}
431
432	(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
433	spa = avl_find(&spa_namespace_avl, &search, &where);
434
435	if (cp)
436		*cp = c;
437
438	return (spa);
439}
440
441/*
442 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
443 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
444 * looking for potentially hung I/Os.
445 */
446void
447spa_deadman(void *arg)
448{
449	spa_t *spa = arg;
450
451	zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
452	    (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
453	    ++spa->spa_deadman_calls);
454	if (zfs_deadman_enabled)
455		vdev_deadman(spa->spa_root_vdev);
456}
457
458/*
459 * Create an uninitialized spa_t with the given name.  Requires
460 * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
461 * exist by calling spa_lookup() first.
462 */
463spa_t *
464spa_add(const char *name, nvlist_t *config, const char *altroot)
465{
466	spa_t *spa;
467	spa_config_dirent_t *dp;
468	cyc_handler_t hdlr;
469	cyc_time_t when;
470
471	ASSERT(MUTEX_HELD(&spa_namespace_lock));
472
473	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
474
475	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
476	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
477	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
478	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
479	mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
480	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
481	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
482	mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
483	mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
484
485	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
486	cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
487	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
488	cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
489
490	for (int t = 0; t < TXG_SIZE; t++)
491		bplist_create(&spa->spa_free_bplist[t]);
492
493	(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
494	spa->spa_state = POOL_STATE_UNINITIALIZED;
495	spa->spa_freeze_txg = UINT64_MAX;
496	spa->spa_final_txg = UINT64_MAX;
497	spa->spa_load_max_txg = UINT64_MAX;
498	spa->spa_proc = &p0;
499	spa->spa_proc_state = SPA_PROC_NONE;
500
501	hdlr.cyh_func = spa_deadman;
502	hdlr.cyh_arg = spa;
503	hdlr.cyh_level = CY_LOW_LEVEL;
504
505	spa->spa_deadman_synctime = zfs_deadman_synctime *
506	    zfs_txg_synctime_ms * MICROSEC;
507
508	/*
509	 * This determines how often we need to check for hung I/Os after
510	 * the cyclic has already fired. Since checking for hung I/Os is
511	 * an expensive operation we don't want to check too frequently.
512	 * Instead wait for 5 synctimes before checking again.
513	 */
514	when.cyt_interval = 5ULL * zfs_txg_synctime_ms * MICROSEC;
515	when.cyt_when = CY_INFINITY;
516	mutex_enter(&cpu_lock);
517	spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
518	mutex_exit(&cpu_lock);
519
520	refcount_create(&spa->spa_refcount);
521	spa_config_lock_init(spa);
522
523	avl_add(&spa_namespace_avl, spa);
524
525	/*
526	 * Set the alternate root, if there is one.
527	 */
528	if (altroot) {
529		spa->spa_root = spa_strdup(altroot);
530		spa_active_count++;
531	}
532
533	/*
534	 * Every pool starts with the default cachefile
535	 */
536	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
537	    offsetof(spa_config_dirent_t, scd_link));
538
539	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
540	dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
541	list_insert_head(&spa->spa_config_list, dp);
542
543	VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
544	    KM_SLEEP) == 0);
545
546	if (config != NULL) {
547		nvlist_t *features;
548
549		if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
550		    &features) == 0) {
551			VERIFY(nvlist_dup(features, &spa->spa_label_features,
552			    0) == 0);
553		}
554
555		VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
556	}
557
558	if (spa->spa_label_features == NULL) {
559		VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
560		    KM_SLEEP) == 0);
561	}
562
563	return (spa);
564}
565
566/*
567 * Removes a spa_t from the namespace, freeing up any memory used.  Requires
568 * spa_namespace_lock.  This is called only after the spa_t has been closed and
569 * deactivated.
570 */
571void
572spa_remove(spa_t *spa)
573{
574	spa_config_dirent_t *dp;
575
576	ASSERT(MUTEX_HELD(&spa_namespace_lock));
577	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
578
579	nvlist_free(spa->spa_config_splitting);
580
581	avl_remove(&spa_namespace_avl, spa);
582	cv_broadcast(&spa_namespace_cv);
583
584	if (spa->spa_root) {
585		spa_strfree(spa->spa_root);
586		spa_active_count--;
587	}
588
589	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
590		list_remove(&spa->spa_config_list, dp);
591		if (dp->scd_path != NULL)
592			spa_strfree(dp->scd_path);
593		kmem_free(dp, sizeof (spa_config_dirent_t));
594	}
595
596	list_destroy(&spa->spa_config_list);
597
598	nvlist_free(spa->spa_label_features);
599	nvlist_free(spa->spa_load_info);
600	spa_config_set(spa, NULL);
601
602	mutex_enter(&cpu_lock);
603	if (spa->spa_deadman_cycid != CYCLIC_NONE)
604		cyclic_remove(spa->spa_deadman_cycid);
605	mutex_exit(&cpu_lock);
606	spa->spa_deadman_cycid = CYCLIC_NONE;
607
608	refcount_destroy(&spa->spa_refcount);
609
610	spa_config_lock_destroy(spa);
611
612	for (int t = 0; t < TXG_SIZE; t++)
613		bplist_destroy(&spa->spa_free_bplist[t]);
614
615	cv_destroy(&spa->spa_async_cv);
616	cv_destroy(&spa->spa_proc_cv);
617	cv_destroy(&spa->spa_scrub_io_cv);
618	cv_destroy(&spa->spa_suspend_cv);
619
620	mutex_destroy(&spa->spa_async_lock);
621	mutex_destroy(&spa->spa_errlist_lock);
622	mutex_destroy(&spa->spa_errlog_lock);
623	mutex_destroy(&spa->spa_history_lock);
624	mutex_destroy(&spa->spa_proc_lock);
625	mutex_destroy(&spa->spa_props_lock);
626	mutex_destroy(&spa->spa_scrub_lock);
627	mutex_destroy(&spa->spa_suspend_lock);
628	mutex_destroy(&spa->spa_vdev_top_lock);
629
630	kmem_free(spa, sizeof (spa_t));
631}
632
633/*
634 * Given a pool, return the next pool in the namespace, or NULL if there is
635 * none.  If 'prev' is NULL, return the first pool.
636 */
637spa_t *
638spa_next(spa_t *prev)
639{
640	ASSERT(MUTEX_HELD(&spa_namespace_lock));
641
642	if (prev)
643		return (AVL_NEXT(&spa_namespace_avl, prev));
644	else
645		return (avl_first(&spa_namespace_avl));
646}
647
648/*
649 * ==========================================================================
650 * SPA refcount functions
651 * ==========================================================================
652 */
653
654/*
655 * Add a reference to the given spa_t.  Must have at least one reference, or
656 * have the namespace lock held.
657 */
658void
659spa_open_ref(spa_t *spa, void *tag)
660{
661	ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
662	    MUTEX_HELD(&spa_namespace_lock));
663	(void) refcount_add(&spa->spa_refcount, tag);
664}
665
666/*
667 * Remove a reference to the given spa_t.  Must have at least one reference, or
668 * have the namespace lock held.
669 */
670void
671spa_close(spa_t *spa, void *tag)
672{
673	ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
674	    MUTEX_HELD(&spa_namespace_lock));
675	(void) refcount_remove(&spa->spa_refcount, tag);
676}
677
678/*
679 * Check to see if the spa refcount is zero.  Must be called with
680 * spa_namespace_lock held.  We really compare against spa_minref, which is the
681 * number of references acquired when opening a pool
682 */
683boolean_t
684spa_refcount_zero(spa_t *spa)
685{
686	ASSERT(MUTEX_HELD(&spa_namespace_lock));
687
688	return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
689}
690
691/*
692 * ==========================================================================
693 * SPA spare and l2cache tracking
694 * ==========================================================================
695 */
696
697/*
698 * Hot spares and cache devices are tracked using the same code below,
699 * for 'auxiliary' devices.
700 */
701
702typedef struct spa_aux {
703	uint64_t	aux_guid;
704	uint64_t	aux_pool;
705	avl_node_t	aux_avl;
706	int		aux_count;
707} spa_aux_t;
708
709static int
710spa_aux_compare(const void *a, const void *b)
711{
712	const spa_aux_t *sa = a;
713	const spa_aux_t *sb = b;
714
715	if (sa->aux_guid < sb->aux_guid)
716		return (-1);
717	else if (sa->aux_guid > sb->aux_guid)
718		return (1);
719	else
720		return (0);
721}
722
723void
724spa_aux_add(vdev_t *vd, avl_tree_t *avl)
725{
726	avl_index_t where;
727	spa_aux_t search;
728	spa_aux_t *aux;
729
730	search.aux_guid = vd->vdev_guid;
731	if ((aux = avl_find(avl, &search, &where)) != NULL) {
732		aux->aux_count++;
733	} else {
734		aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
735		aux->aux_guid = vd->vdev_guid;
736		aux->aux_count = 1;
737		avl_insert(avl, aux, where);
738	}
739}
740
741void
742spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
743{
744	spa_aux_t search;
745	spa_aux_t *aux;
746	avl_index_t where;
747
748	search.aux_guid = vd->vdev_guid;
749	aux = avl_find(avl, &search, &where);
750
751	ASSERT(aux != NULL);
752
753	if (--aux->aux_count == 0) {
754		avl_remove(avl, aux);
755		kmem_free(aux, sizeof (spa_aux_t));
756	} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
757		aux->aux_pool = 0ULL;
758	}
759}
760
761boolean_t
762spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
763{
764	spa_aux_t search, *found;
765
766	search.aux_guid = guid;
767	found = avl_find(avl, &search, NULL);
768
769	if (pool) {
770		if (found)
771			*pool = found->aux_pool;
772		else
773			*pool = 0ULL;
774	}
775
776	if (refcnt) {
777		if (found)
778			*refcnt = found->aux_count;
779		else
780			*refcnt = 0;
781	}
782
783	return (found != NULL);
784}
785
786void
787spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
788{
789	spa_aux_t search, *found;
790	avl_index_t where;
791
792	search.aux_guid = vd->vdev_guid;
793	found = avl_find(avl, &search, &where);
794	ASSERT(found != NULL);
795	ASSERT(found->aux_pool == 0ULL);
796
797	found->aux_pool = spa_guid(vd->vdev_spa);
798}
799
800/*
801 * Spares are tracked globally due to the following constraints:
802 *
803 * 	- A spare may be part of multiple pools.
804 * 	- A spare may be added to a pool even if it's actively in use within
805 *	  another pool.
806 * 	- A spare in use in any pool can only be the source of a replacement if
807 *	  the target is a spare in the same pool.
808 *
809 * We keep track of all spares on the system through the use of a reference
810 * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
811 * spare, then we bump the reference count in the AVL tree.  In addition, we set
812 * the 'vdev_isspare' member to indicate that the device is a spare (active or
813 * inactive).  When a spare is made active (used to replace a device in the
814 * pool), we also keep track of which pool its been made a part of.
815 *
816 * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
817 * called under the spa_namespace lock as part of vdev reconfiguration.  The
818 * separate spare lock exists for the status query path, which does not need to
819 * be completely consistent with respect to other vdev configuration changes.
820 */
821
822static int
823spa_spare_compare(const void *a, const void *b)
824{
825	return (spa_aux_compare(a, b));
826}
827
828void
829spa_spare_add(vdev_t *vd)
830{
831	mutex_enter(&spa_spare_lock);
832	ASSERT(!vd->vdev_isspare);
833	spa_aux_add(vd, &spa_spare_avl);
834	vd->vdev_isspare = B_TRUE;
835	mutex_exit(&spa_spare_lock);
836}
837
838void
839spa_spare_remove(vdev_t *vd)
840{
841	mutex_enter(&spa_spare_lock);
842	ASSERT(vd->vdev_isspare);
843	spa_aux_remove(vd, &spa_spare_avl);
844	vd->vdev_isspare = B_FALSE;
845	mutex_exit(&spa_spare_lock);
846}
847
848boolean_t
849spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
850{
851	boolean_t found;
852
853	mutex_enter(&spa_spare_lock);
854	found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
855	mutex_exit(&spa_spare_lock);
856
857	return (found);
858}
859
860void
861spa_spare_activate(vdev_t *vd)
862{
863	mutex_enter(&spa_spare_lock);
864	ASSERT(vd->vdev_isspare);
865	spa_aux_activate(vd, &spa_spare_avl);
866	mutex_exit(&spa_spare_lock);
867}
868
869/*
870 * Level 2 ARC devices are tracked globally for the same reasons as spares.
871 * Cache devices currently only support one pool per cache device, and so
872 * for these devices the aux reference count is currently unused beyond 1.
873 */
874
875static int
876spa_l2cache_compare(const void *a, const void *b)
877{
878	return (spa_aux_compare(a, b));
879}
880
881void
882spa_l2cache_add(vdev_t *vd)
883{
884	mutex_enter(&spa_l2cache_lock);
885	ASSERT(!vd->vdev_isl2cache);
886	spa_aux_add(vd, &spa_l2cache_avl);
887	vd->vdev_isl2cache = B_TRUE;
888	mutex_exit(&spa_l2cache_lock);
889}
890
891void
892spa_l2cache_remove(vdev_t *vd)
893{
894	mutex_enter(&spa_l2cache_lock);
895	ASSERT(vd->vdev_isl2cache);
896	spa_aux_remove(vd, &spa_l2cache_avl);
897	vd->vdev_isl2cache = B_FALSE;
898	mutex_exit(&spa_l2cache_lock);
899}
900
901boolean_t
902spa_l2cache_exists(uint64_t guid, uint64_t *pool)
903{
904	boolean_t found;
905
906	mutex_enter(&spa_l2cache_lock);
907	found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
908	mutex_exit(&spa_l2cache_lock);
909
910	return (found);
911}
912
913void
914spa_l2cache_activate(vdev_t *vd)
915{
916	mutex_enter(&spa_l2cache_lock);
917	ASSERT(vd->vdev_isl2cache);
918	spa_aux_activate(vd, &spa_l2cache_avl);
919	mutex_exit(&spa_l2cache_lock);
920}
921
922/*
923 * ==========================================================================
924 * SPA vdev locking
925 * ==========================================================================
926 */
927
928/*
929 * Lock the given spa_t for the purpose of adding or removing a vdev.
930 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
931 * It returns the next transaction group for the spa_t.
932 */
933uint64_t
934spa_vdev_enter(spa_t *spa)
935{
936	mutex_enter(&spa->spa_vdev_top_lock);
937	mutex_enter(&spa_namespace_lock);
938	return (spa_vdev_config_enter(spa));
939}
940
941/*
942 * Internal implementation for spa_vdev_enter().  Used when a vdev
943 * operation requires multiple syncs (i.e. removing a device) while
944 * keeping the spa_namespace_lock held.
945 */
946uint64_t
947spa_vdev_config_enter(spa_t *spa)
948{
949	ASSERT(MUTEX_HELD(&spa_namespace_lock));
950
951	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
952
953	return (spa_last_synced_txg(spa) + 1);
954}
955
956/*
957 * Used in combination with spa_vdev_config_enter() to allow the syncing
958 * of multiple transactions without releasing the spa_namespace_lock.
959 */
960void
961spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
962{
963	ASSERT(MUTEX_HELD(&spa_namespace_lock));
964
965	int config_changed = B_FALSE;
966
967	ASSERT(txg > spa_last_synced_txg(spa));
968
969	spa->spa_pending_vdev = NULL;
970
971	/*
972	 * Reassess the DTLs.
973	 */
974	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
975
976	if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
977		config_changed = B_TRUE;
978		spa->spa_config_generation++;
979	}
980
981	/*
982	 * Verify the metaslab classes.
983	 */
984	ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
985	ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
986
987	spa_config_exit(spa, SCL_ALL, spa);
988
989	/*
990	 * Panic the system if the specified tag requires it.  This
991	 * is useful for ensuring that configurations are updated
992	 * transactionally.
993	 */
994	if (zio_injection_enabled)
995		zio_handle_panic_injection(spa, tag, 0);
996
997	/*
998	 * Note: this txg_wait_synced() is important because it ensures
999	 * that there won't be more than one config change per txg.
1000	 * This allows us to use the txg as the generation number.
1001	 */
1002	if (error == 0)
1003		txg_wait_synced(spa->spa_dsl_pool, txg);
1004
1005	if (vd != NULL) {
1006		ASSERT(!vd->vdev_detached || vd->vdev_dtl_smo.smo_object == 0);
1007		spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1008		vdev_free(vd);
1009		spa_config_exit(spa, SCL_ALL, spa);
1010	}
1011
1012	/*
1013	 * If the config changed, update the config cache.
1014	 */
1015	if (config_changed)
1016		spa_config_sync(spa, B_FALSE, B_TRUE);
1017}
1018
1019/*
1020 * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
1021 * locking of spa_vdev_enter(), we also want make sure the transactions have
1022 * synced to disk, and then update the global configuration cache with the new
1023 * information.
1024 */
1025int
1026spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1027{
1028	spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1029	mutex_exit(&spa_namespace_lock);
1030	mutex_exit(&spa->spa_vdev_top_lock);
1031
1032	return (error);
1033}
1034
1035/*
1036 * Lock the given spa_t for the purpose of changing vdev state.
1037 */
1038void
1039spa_vdev_state_enter(spa_t *spa, int oplocks)
1040{
1041	int locks = SCL_STATE_ALL | oplocks;
1042
1043	/*
1044	 * Root pools may need to read of the underlying devfs filesystem
1045	 * when opening up a vdev.  Unfortunately if we're holding the
1046	 * SCL_ZIO lock it will result in a deadlock when we try to issue
1047	 * the read from the root filesystem.  Instead we "prefetch"
1048	 * the associated vnodes that we need prior to opening the
1049	 * underlying devices and cache them so that we can prevent
1050	 * any I/O when we are doing the actual open.
1051	 */
1052	if (spa_is_root(spa)) {
1053		int low = locks & ~(SCL_ZIO - 1);
1054		int high = locks & ~low;
1055
1056		spa_config_enter(spa, high, spa, RW_WRITER);
1057		vdev_hold(spa->spa_root_vdev);
1058		spa_config_enter(spa, low, spa, RW_WRITER);
1059	} else {
1060		spa_config_enter(spa, locks, spa, RW_WRITER);
1061	}
1062	spa->spa_vdev_locks = locks;
1063}
1064
1065int
1066spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1067{
1068	boolean_t config_changed = B_FALSE;
1069
1070	if (vd != NULL || error == 0)
1071		vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
1072		    0, 0, B_FALSE);
1073
1074	if (vd != NULL) {
1075		vdev_state_dirty(vd->vdev_top);
1076		config_changed = B_TRUE;
1077		spa->spa_config_generation++;
1078	}
1079
1080	if (spa_is_root(spa))
1081		vdev_rele(spa->spa_root_vdev);
1082
1083	ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1084	spa_config_exit(spa, spa->spa_vdev_locks, spa);
1085
1086	/*
1087	 * If anything changed, wait for it to sync.  This ensures that,
1088	 * from the system administrator's perspective, zpool(1M) commands
1089	 * are synchronous.  This is important for things like zpool offline:
1090	 * when the command completes, you expect no further I/O from ZFS.
1091	 */
1092	if (vd != NULL)
1093		txg_wait_synced(spa->spa_dsl_pool, 0);
1094
1095	/*
1096	 * If the config changed, update the config cache.
1097	 */
1098	if (config_changed) {
1099		mutex_enter(&spa_namespace_lock);
1100		spa_config_sync(spa, B_FALSE, B_TRUE);
1101		mutex_exit(&spa_namespace_lock);
1102	}
1103
1104	return (error);
1105}
1106
1107/*
1108 * ==========================================================================
1109 * Miscellaneous functions
1110 * ==========================================================================
1111 */
1112
1113void
1114spa_activate_mos_feature(spa_t *spa, const char *feature)
1115{
1116	(void) nvlist_add_boolean(spa->spa_label_features, feature);
1117	vdev_config_dirty(spa->spa_root_vdev);
1118}
1119
1120void
1121spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1122{
1123	(void) nvlist_remove_all(spa->spa_label_features, feature);
1124	vdev_config_dirty(spa->spa_root_vdev);
1125}
1126
1127/*
1128 * Rename a spa_t.
1129 */
1130int
1131spa_rename(const char *name, const char *newname)
1132{
1133	spa_t *spa;
1134	int err;
1135
1136	/*
1137	 * Lookup the spa_t and grab the config lock for writing.  We need to
1138	 * actually open the pool so that we can sync out the necessary labels.
1139	 * It's OK to call spa_open() with the namespace lock held because we
1140	 * allow recursive calls for other reasons.
1141	 */
1142	mutex_enter(&spa_namespace_lock);
1143	if ((err = spa_open(name, &spa, FTAG)) != 0) {
1144		mutex_exit(&spa_namespace_lock);
1145		return (err);
1146	}
1147
1148	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1149
1150	avl_remove(&spa_namespace_avl, spa);
1151	(void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
1152	avl_add(&spa_namespace_avl, spa);
1153
1154	/*
1155	 * Sync all labels to disk with the new names by marking the root vdev
1156	 * dirty and waiting for it to sync.  It will pick up the new pool name
1157	 * during the sync.
1158	 */
1159	vdev_config_dirty(spa->spa_root_vdev);
1160
1161	spa_config_exit(spa, SCL_ALL, FTAG);
1162
1163	txg_wait_synced(spa->spa_dsl_pool, 0);
1164
1165	/*
1166	 * Sync the updated config cache.
1167	 */
1168	spa_config_sync(spa, B_FALSE, B_TRUE);
1169
1170	spa_close(spa, FTAG);
1171
1172	mutex_exit(&spa_namespace_lock);
1173
1174	return (0);
1175}
1176
1177/*
1178 * Return the spa_t associated with given pool_guid, if it exists.  If
1179 * device_guid is non-zero, determine whether the pool exists *and* contains
1180 * a device with the specified device_guid.
1181 */
1182spa_t *
1183spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1184{
1185	spa_t *spa;
1186	avl_tree_t *t = &spa_namespace_avl;
1187
1188	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1189
1190	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1191		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1192			continue;
1193		if (spa->spa_root_vdev == NULL)
1194			continue;
1195		if (spa_guid(spa) == pool_guid) {
1196			if (device_guid == 0)
1197				break;
1198
1199			if (vdev_lookup_by_guid(spa->spa_root_vdev,
1200			    device_guid) != NULL)
1201				break;
1202
1203			/*
1204			 * Check any devices we may be in the process of adding.
1205			 */
1206			if (spa->spa_pending_vdev) {
1207				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1208				    device_guid) != NULL)
1209					break;
1210			}
1211		}
1212	}
1213
1214	return (spa);
1215}
1216
1217/*
1218 * Determine whether a pool with the given pool_guid exists.
1219 */
1220boolean_t
1221spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1222{
1223	return (spa_by_guid(pool_guid, device_guid) != NULL);
1224}
1225
1226char *
1227spa_strdup(const char *s)
1228{
1229	size_t len;
1230	char *new;
1231
1232	len = strlen(s);
1233	new = kmem_alloc(len + 1, KM_SLEEP);
1234	bcopy(s, new, len);
1235	new[len] = '\0';
1236
1237	return (new);
1238}
1239
1240void
1241spa_strfree(char *s)
1242{
1243	kmem_free(s, strlen(s) + 1);
1244}
1245
1246uint64_t
1247spa_get_random(uint64_t range)
1248{
1249	uint64_t r;
1250
1251	ASSERT(range != 0);
1252
1253	(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1254
1255	return (r % range);
1256}
1257
1258uint64_t
1259spa_generate_guid(spa_t *spa)
1260{
1261	uint64_t guid = spa_get_random(-1ULL);
1262
1263	if (spa != NULL) {
1264		while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1265			guid = spa_get_random(-1ULL);
1266	} else {
1267		while (guid == 0 || spa_guid_exists(guid, 0))
1268			guid = spa_get_random(-1ULL);
1269	}
1270
1271	return (guid);
1272}
1273
1274void
1275sprintf_blkptr(char *buf, const blkptr_t *bp)
1276{
1277	char type[256];
1278	char *checksum = NULL;
1279	char *compress = NULL;
1280
1281	if (bp != NULL) {
1282		if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1283			dmu_object_byteswap_t bswap =
1284			    DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1285			(void) snprintf(type, sizeof (type), "bswap %s %s",
1286			    DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1287			    "metadata" : "data",
1288			    dmu_ot_byteswap[bswap].ob_name);
1289		} else {
1290			(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1291			    sizeof (type));
1292		}
1293		checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1294		compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1295	}
1296
1297	SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress);
1298}
1299
1300void
1301spa_freeze(spa_t *spa)
1302{
1303	uint64_t freeze_txg = 0;
1304
1305	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1306	if (spa->spa_freeze_txg == UINT64_MAX) {
1307		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1308		spa->spa_freeze_txg = freeze_txg;
1309	}
1310	spa_config_exit(spa, SCL_ALL, FTAG);
1311	if (freeze_txg != 0)
1312		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1313}
1314
1315void
1316zfs_panic_recover(const char *fmt, ...)
1317{
1318	va_list adx;
1319
1320	va_start(adx, fmt);
1321	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1322	va_end(adx);
1323}
1324
1325/*
1326 * This is a stripped-down version of strtoull, suitable only for converting
1327 * lowercase hexidecimal numbers that don't overflow.
1328 */
1329uint64_t
1330strtonum(const char *str, char **nptr)
1331{
1332	uint64_t val = 0;
1333	char c;
1334	int digit;
1335
1336	while ((c = *str) != '\0') {
1337		if (c >= '0' && c <= '9')
1338			digit = c - '0';
1339		else if (c >= 'a' && c <= 'f')
1340			digit = 10 + c - 'a';
1341		else
1342			break;
1343
1344		val *= 16;
1345		val += digit;
1346
1347		str++;
1348	}
1349
1350	if (nptr)
1351		*nptr = (char *)str;
1352
1353	return (val);
1354}
1355
1356/*
1357 * ==========================================================================
1358 * Accessor functions
1359 * ==========================================================================
1360 */
1361
1362boolean_t
1363spa_shutting_down(spa_t *spa)
1364{
1365	return (spa->spa_async_suspended);
1366}
1367
1368dsl_pool_t *
1369spa_get_dsl(spa_t *spa)
1370{
1371	return (spa->spa_dsl_pool);
1372}
1373
1374boolean_t
1375spa_is_initializing(spa_t *spa)
1376{
1377	return (spa->spa_is_initializing);
1378}
1379
1380blkptr_t *
1381spa_get_rootblkptr(spa_t *spa)
1382{
1383	return (&spa->spa_ubsync.ub_rootbp);
1384}
1385
1386void
1387spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1388{
1389	spa->spa_uberblock.ub_rootbp = *bp;
1390}
1391
1392void
1393spa_altroot(spa_t *spa, char *buf, size_t buflen)
1394{
1395	if (spa->spa_root == NULL)
1396		buf[0] = '\0';
1397	else
1398		(void) strncpy(buf, spa->spa_root, buflen);
1399}
1400
1401int
1402spa_sync_pass(spa_t *spa)
1403{
1404	return (spa->spa_sync_pass);
1405}
1406
1407char *
1408spa_name(spa_t *spa)
1409{
1410	return (spa->spa_name);
1411}
1412
1413uint64_t
1414spa_guid(spa_t *spa)
1415{
1416	dsl_pool_t *dp = spa_get_dsl(spa);
1417	uint64_t guid;
1418
1419	/*
1420	 * If we fail to parse the config during spa_load(), we can go through
1421	 * the error path (which posts an ereport) and end up here with no root
1422	 * vdev.  We stash the original pool guid in 'spa_config_guid' to handle
1423	 * this case.
1424	 */
1425	if (spa->spa_root_vdev == NULL)
1426		return (spa->spa_config_guid);
1427
1428	guid = spa->spa_last_synced_guid != 0 ?
1429	    spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1430
1431	/*
1432	 * Return the most recently synced out guid unless we're
1433	 * in syncing context.
1434	 */
1435	if (dp && dsl_pool_sync_context(dp))
1436		return (spa->spa_root_vdev->vdev_guid);
1437	else
1438		return (guid);
1439}
1440
1441uint64_t
1442spa_load_guid(spa_t *spa)
1443{
1444	/*
1445	 * This is a GUID that exists solely as a reference for the
1446	 * purposes of the arc.  It is generated at load time, and
1447	 * is never written to persistent storage.
1448	 */
1449	return (spa->spa_load_guid);
1450}
1451
1452uint64_t
1453spa_last_synced_txg(spa_t *spa)
1454{
1455	return (spa->spa_ubsync.ub_txg);
1456}
1457
1458uint64_t
1459spa_first_txg(spa_t *spa)
1460{
1461	return (spa->spa_first_txg);
1462}
1463
1464uint64_t
1465spa_syncing_txg(spa_t *spa)
1466{
1467	return (spa->spa_syncing_txg);
1468}
1469
1470pool_state_t
1471spa_state(spa_t *spa)
1472{
1473	return (spa->spa_state);
1474}
1475
1476spa_load_state_t
1477spa_load_state(spa_t *spa)
1478{
1479	return (spa->spa_load_state);
1480}
1481
1482uint64_t
1483spa_freeze_txg(spa_t *spa)
1484{
1485	return (spa->spa_freeze_txg);
1486}
1487
1488/* ARGSUSED */
1489uint64_t
1490spa_get_asize(spa_t *spa, uint64_t lsize)
1491{
1492	/*
1493	 * The worst case is single-sector max-parity RAID-Z blocks, in which
1494	 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
1495	 * times the size; so just assume that.  Add to this the fact that
1496	 * we can have up to 3 DVAs per bp, and one more factor of 2 because
1497	 * the block may be dittoed with up to 3 DVAs by ddt_sync().
1498	 */
1499	return (lsize * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2);
1500}
1501
1502uint64_t
1503spa_get_dspace(spa_t *spa)
1504{
1505	return (spa->spa_dspace);
1506}
1507
1508void
1509spa_update_dspace(spa_t *spa)
1510{
1511	spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1512	    ddt_get_dedup_dspace(spa);
1513}
1514
1515/*
1516 * Return the failure mode that has been set to this pool. The default
1517 * behavior will be to block all I/Os when a complete failure occurs.
1518 */
1519uint8_t
1520spa_get_failmode(spa_t *spa)
1521{
1522	return (spa->spa_failmode);
1523}
1524
1525boolean_t
1526spa_suspended(spa_t *spa)
1527{
1528	return (spa->spa_suspended);
1529}
1530
1531uint64_t
1532spa_version(spa_t *spa)
1533{
1534	return (spa->spa_ubsync.ub_version);
1535}
1536
1537boolean_t
1538spa_deflate(spa_t *spa)
1539{
1540	return (spa->spa_deflate);
1541}
1542
1543metaslab_class_t *
1544spa_normal_class(spa_t *spa)
1545{
1546	return (spa->spa_normal_class);
1547}
1548
1549metaslab_class_t *
1550spa_log_class(spa_t *spa)
1551{
1552	return (spa->spa_log_class);
1553}
1554
1555int
1556spa_max_replication(spa_t *spa)
1557{
1558	/*
1559	 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1560	 * handle BPs with more than one DVA allocated.  Set our max
1561	 * replication level accordingly.
1562	 */
1563	if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1564		return (1);
1565	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1566}
1567
1568int
1569spa_prev_software_version(spa_t *spa)
1570{
1571	return (spa->spa_prev_software_version);
1572}
1573
1574uint64_t
1575spa_deadman_synctime(spa_t *spa)
1576{
1577	return (spa->spa_deadman_synctime);
1578}
1579
1580uint64_t
1581dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
1582{
1583	uint64_t asize = DVA_GET_ASIZE(dva);
1584	uint64_t dsize = asize;
1585
1586	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1587
1588	if (asize != 0 && spa->spa_deflate) {
1589		vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
1590		dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1591	}
1592
1593	return (dsize);
1594}
1595
1596uint64_t
1597bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
1598{
1599	uint64_t dsize = 0;
1600
1601	for (int d = 0; d < SPA_DVAS_PER_BP; d++)
1602		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1603
1604	return (dsize);
1605}
1606
1607uint64_t
1608bp_get_dsize(spa_t *spa, const blkptr_t *bp)
1609{
1610	uint64_t dsize = 0;
1611
1612	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1613
1614	for (int d = 0; d < SPA_DVAS_PER_BP; d++)
1615		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1616
1617	spa_config_exit(spa, SCL_VDEV, FTAG);
1618
1619	return (dsize);
1620}
1621
1622/*
1623 * ==========================================================================
1624 * Initialization and Termination
1625 * ==========================================================================
1626 */
1627
1628static int
1629spa_name_compare(const void *a1, const void *a2)
1630{
1631	const spa_t *s1 = a1;
1632	const spa_t *s2 = a2;
1633	int s;
1634
1635	s = strcmp(s1->spa_name, s2->spa_name);
1636	if (s > 0)
1637		return (1);
1638	if (s < 0)
1639		return (-1);
1640	return (0);
1641}
1642
1643int
1644spa_busy(void)
1645{
1646	return (spa_active_count);
1647}
1648
1649void
1650spa_boot_init()
1651{
1652	spa_config_load();
1653}
1654
1655void
1656spa_init(int mode)
1657{
1658	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1659	mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1660	mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1661	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1662
1663	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1664	    offsetof(spa_t, spa_avl));
1665
1666	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1667	    offsetof(spa_aux_t, aux_avl));
1668
1669	avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1670	    offsetof(spa_aux_t, aux_avl));
1671
1672	spa_mode_global = mode;
1673
1674#ifdef _KERNEL
1675	spa_arch_init();
1676#else
1677	if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
1678		arc_procfd = open("/proc/self/ctl", O_WRONLY);
1679		if (arc_procfd == -1) {
1680			perror("could not enable watchpoints: "
1681			    "opening /proc/self/ctl failed: ");
1682		} else {
1683			arc_watch = B_TRUE;
1684		}
1685	}
1686#endif
1687
1688	refcount_init();
1689	unique_init();
1690	space_map_init();
1691	zio_init();
1692	dmu_init();
1693	zil_init();
1694	vdev_cache_stat_init();
1695	zfs_prop_init();
1696	zpool_prop_init();
1697	zpool_feature_init();
1698	spa_config_load();
1699	l2arc_start();
1700}
1701
1702void
1703spa_fini(void)
1704{
1705	l2arc_stop();
1706
1707	spa_evict_all();
1708
1709	vdev_cache_stat_fini();
1710	zil_fini();
1711	dmu_fini();
1712	zio_fini();
1713	space_map_fini();
1714	unique_fini();
1715	refcount_fini();
1716
1717	avl_destroy(&spa_namespace_avl);
1718	avl_destroy(&spa_spare_avl);
1719	avl_destroy(&spa_l2cache_avl);
1720
1721	cv_destroy(&spa_namespace_cv);
1722	mutex_destroy(&spa_namespace_lock);
1723	mutex_destroy(&spa_spare_lock);
1724	mutex_destroy(&spa_l2cache_lock);
1725}
1726
1727/*
1728 * Return whether this pool has slogs. No locking needed.
1729 * It's not a problem if the wrong answer is returned as it's only for
1730 * performance and not correctness
1731 */
1732boolean_t
1733spa_has_slogs(spa_t *spa)
1734{
1735	return (spa->spa_log_class->mc_rotor != NULL);
1736}
1737
1738spa_log_state_t
1739spa_get_log_state(spa_t *spa)
1740{
1741	return (spa->spa_log_state);
1742}
1743
1744void
1745spa_set_log_state(spa_t *spa, spa_log_state_t state)
1746{
1747	spa->spa_log_state = state;
1748}
1749
1750boolean_t
1751spa_is_root(spa_t *spa)
1752{
1753	return (spa->spa_is_root);
1754}
1755
1756boolean_t
1757spa_writeable(spa_t *spa)
1758{
1759	return (!!(spa->spa_mode & FWRITE));
1760}
1761
1762int
1763spa_mode(spa_t *spa)
1764{
1765	return (spa->spa_mode);
1766}
1767
1768uint64_t
1769spa_bootfs(spa_t *spa)
1770{
1771	return (spa->spa_bootfs);
1772}
1773
1774uint64_t
1775spa_delegation(spa_t *spa)
1776{
1777	return (spa->spa_delegation);
1778}
1779
1780objset_t *
1781spa_meta_objset(spa_t *spa)
1782{
1783	return (spa->spa_meta_objset);
1784}
1785
1786enum zio_checksum
1787spa_dedup_checksum(spa_t *spa)
1788{
1789	return (spa->spa_dedup_checksum);
1790}
1791
1792/*
1793 * Reset pool scan stat per scan pass (or reboot).
1794 */
1795void
1796spa_scan_stat_init(spa_t *spa)
1797{
1798	/* data not stored on disk */
1799	spa->spa_scan_pass_start = gethrestime_sec();
1800	spa->spa_scan_pass_exam = 0;
1801	vdev_scan_stat_init(spa->spa_root_vdev);
1802}
1803
1804/*
1805 * Get scan stats for zpool status reports
1806 */
1807int
1808spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
1809{
1810	dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
1811
1812	if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
1813		return (ENOENT);
1814	bzero(ps, sizeof (pool_scan_stat_t));
1815
1816	/* data stored on disk */
1817	ps->pss_func = scn->scn_phys.scn_func;
1818	ps->pss_start_time = scn->scn_phys.scn_start_time;
1819	ps->pss_end_time = scn->scn_phys.scn_end_time;
1820	ps->pss_to_examine = scn->scn_phys.scn_to_examine;
1821	ps->pss_examined = scn->scn_phys.scn_examined;
1822	ps->pss_to_process = scn->scn_phys.scn_to_process;
1823	ps->pss_processed = scn->scn_phys.scn_processed;
1824	ps->pss_errors = scn->scn_phys.scn_errors;
1825	ps->pss_state = scn->scn_phys.scn_state;
1826
1827	/* data not stored on disk */
1828	ps->pss_pass_start = spa->spa_scan_pass_start;
1829	ps->pss_pass_exam = spa->spa_scan_pass_exam;
1830
1831	return (0);
1832}
1833
1834boolean_t
1835spa_debug_enabled(spa_t *spa)
1836{
1837	return (spa->spa_debug);
1838}
1839