spa_misc.c revision 4b964ada391d44b89d97e7e930e6a9a136e0a2f4
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25#include <sys/zfs_context.h>
26#include <sys/spa_impl.h>
27#include <sys/zio.h>
28#include <sys/zio_checksum.h>
29#include <sys/zio_compress.h>
30#include <sys/dmu.h>
31#include <sys/dmu_tx.h>
32#include <sys/zap.h>
33#include <sys/zil.h>
34#include <sys/vdev_impl.h>
35#include <sys/metaslab.h>
36#include <sys/uberblock_impl.h>
37#include <sys/txg.h>
38#include <sys/avl.h>
39#include <sys/unique.h>
40#include <sys/dsl_pool.h>
41#include <sys/dsl_dir.h>
42#include <sys/dsl_prop.h>
43#include <sys/dsl_scan.h>
44#include <sys/fs/zfs.h>
45#include <sys/metaslab_impl.h>
46#include <sys/arc.h>
47#include <sys/ddt.h>
48#include "zfs_prop.h"
49
50/*
51 * SPA locking
52 *
53 * There are four basic locks for managing spa_t structures:
54 *
55 * spa_namespace_lock (global mutex)
56 *
57 *	This lock must be acquired to do any of the following:
58 *
59 *		- Lookup a spa_t by name
60 *		- Add or remove a spa_t from the namespace
61 *		- Increase spa_refcount from non-zero
62 *		- Check if spa_refcount is zero
63 *		- Rename a spa_t
64 *		- add/remove/attach/detach devices
65 *		- Held for the duration of create/destroy/import/export
66 *
67 *	It does not need to handle recursion.  A create or destroy may
68 *	reference objects (files or zvols) in other pools, but by
69 *	definition they must have an existing reference, and will never need
70 *	to lookup a spa_t by name.
71 *
72 * spa_refcount (per-spa refcount_t protected by mutex)
73 *
74 *	This reference count keep track of any active users of the spa_t.  The
75 *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
76 *	the refcount is never really 'zero' - opening a pool implicitly keeps
77 *	some references in the DMU.  Internally we check against spa_minref, but
78 *	present the image of a zero/non-zero value to consumers.
79 *
80 * spa_config_lock[] (per-spa array of rwlocks)
81 *
82 *	This protects the spa_t from config changes, and must be held in
83 *	the following circumstances:
84 *
85 *		- RW_READER to perform I/O to the spa
86 *		- RW_WRITER to change the vdev config
87 *
88 * The locking order is fairly straightforward:
89 *
90 *		spa_namespace_lock	->	spa_refcount
91 *
92 *	The namespace lock must be acquired to increase the refcount from 0
93 *	or to check if it is zero.
94 *
95 *		spa_refcount		->	spa_config_lock[]
96 *
97 *	There must be at least one valid reference on the spa_t to acquire
98 *	the config lock.
99 *
100 *		spa_namespace_lock	->	spa_config_lock[]
101 *
102 *	The namespace lock must always be taken before the config lock.
103 *
104 *
105 * The spa_namespace_lock can be acquired directly and is globally visible.
106 *
107 * The namespace is manipulated using the following functions, all of which
108 * require the spa_namespace_lock to be held.
109 *
110 *	spa_lookup()		Lookup a spa_t by name.
111 *
112 *	spa_add()		Create a new spa_t in the namespace.
113 *
114 *	spa_remove()		Remove a spa_t from the namespace.  This also
115 *				frees up any memory associated with the spa_t.
116 *
117 *	spa_next()		Returns the next spa_t in the system, or the
118 *				first if NULL is passed.
119 *
120 *	spa_evict_all()		Shutdown and remove all spa_t structures in
121 *				the system.
122 *
123 *	spa_guid_exists()	Determine whether a pool/device guid exists.
124 *
125 * The spa_refcount is manipulated using the following functions:
126 *
127 *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
128 *				called with spa_namespace_lock held if the
129 *				refcount is currently zero.
130 *
131 *	spa_close()		Remove a reference from the spa_t.  This will
132 *				not free the spa_t or remove it from the
133 *				namespace.  No locking is required.
134 *
135 *	spa_refcount_zero()	Returns true if the refcount is currently
136 *				zero.  Must be called with spa_namespace_lock
137 *				held.
138 *
139 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
140 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
141 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
142 *
143 * To read the configuration, it suffices to hold one of these locks as reader.
144 * To modify the configuration, you must hold all locks as writer.  To modify
145 * vdev state without altering the vdev tree's topology (e.g. online/offline),
146 * you must hold SCL_STATE and SCL_ZIO as writer.
147 *
148 * We use these distinct config locks to avoid recursive lock entry.
149 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
150 * block allocations (SCL_ALLOC), which may require reading space maps
151 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
152 *
153 * The spa config locks cannot be normal rwlocks because we need the
154 * ability to hand off ownership.  For example, SCL_ZIO is acquired
155 * by the issuing thread and later released by an interrupt thread.
156 * They do, however, obey the usual write-wanted semantics to prevent
157 * writer (i.e. system administrator) starvation.
158 *
159 * The lock acquisition rules are as follows:
160 *
161 * SCL_CONFIG
162 *	Protects changes to the vdev tree topology, such as vdev
163 *	add/remove/attach/detach.  Protects the dirty config list
164 *	(spa_config_dirty_list) and the set of spares and l2arc devices.
165 *
166 * SCL_STATE
167 *	Protects changes to pool state and vdev state, such as vdev
168 *	online/offline/fault/degrade/clear.  Protects the dirty state list
169 *	(spa_state_dirty_list) and global pool state (spa_state).
170 *
171 * SCL_ALLOC
172 *	Protects changes to metaslab groups and classes.
173 *	Held as reader by metaslab_alloc() and metaslab_claim().
174 *
175 * SCL_ZIO
176 *	Held by bp-level zios (those which have no io_vd upon entry)
177 *	to prevent changes to the vdev tree.  The bp-level zio implicitly
178 *	protects all of its vdev child zios, which do not hold SCL_ZIO.
179 *
180 * SCL_FREE
181 *	Protects changes to metaslab groups and classes.
182 *	Held as reader by metaslab_free().  SCL_FREE is distinct from
183 *	SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
184 *	blocks in zio_done() while another i/o that holds either
185 *	SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
186 *
187 * SCL_VDEV
188 *	Held as reader to prevent changes to the vdev tree during trivial
189 *	inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
190 *	other locks, and lower than all of them, to ensure that it's safe
191 *	to acquire regardless of caller context.
192 *
193 * In addition, the following rules apply:
194 *
195 * (a)	spa_props_lock protects pool properties, spa_config and spa_config_list.
196 *	The lock ordering is SCL_CONFIG > spa_props_lock.
197 *
198 * (b)	I/O operations on leaf vdevs.  For any zio operation that takes
199 *	an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
200 *	or zio_write_phys() -- the caller must ensure that the config cannot
201 *	cannot change in the interim, and that the vdev cannot be reopened.
202 *	SCL_STATE as reader suffices for both.
203 *
204 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
205 *
206 *	spa_vdev_enter()	Acquire the namespace lock and the config lock
207 *				for writing.
208 *
209 *	spa_vdev_exit()		Release the config lock, wait for all I/O
210 *				to complete, sync the updated configs to the
211 *				cache, and release the namespace lock.
212 *
213 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
214 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
215 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
216 *
217 * spa_rename() is also implemented within this file since is requires
218 * manipulation of the namespace.
219 */
220
221static avl_tree_t spa_namespace_avl;
222kmutex_t spa_namespace_lock;
223static kcondvar_t spa_namespace_cv;
224static int spa_active_count;
225int spa_max_replication_override = SPA_DVAS_PER_BP;
226
227static kmutex_t spa_spare_lock;
228static avl_tree_t spa_spare_avl;
229static kmutex_t spa_l2cache_lock;
230static avl_tree_t spa_l2cache_avl;
231
232kmem_cache_t *spa_buffer_pool;
233int spa_mode_global;
234
235#ifdef ZFS_DEBUG
236/* Everything except dprintf is on by default in debug builds */
237int zfs_flags = ~ZFS_DEBUG_DPRINTF;
238#else
239int zfs_flags = 0;
240#endif
241
242/*
243 * zfs_recover can be set to nonzero to attempt to recover from
244 * otherwise-fatal errors, typically caused by on-disk corruption.  When
245 * set, calls to zfs_panic_recover() will turn into warning messages.
246 */
247int zfs_recover = 0;
248
249
250/*
251 * ==========================================================================
252 * SPA config locking
253 * ==========================================================================
254 */
255static void
256spa_config_lock_init(spa_t *spa)
257{
258	for (int i = 0; i < SCL_LOCKS; i++) {
259		spa_config_lock_t *scl = &spa->spa_config_lock[i];
260		mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
261		cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
262		refcount_create(&scl->scl_count);
263		scl->scl_writer = NULL;
264		scl->scl_write_wanted = 0;
265	}
266}
267
268static void
269spa_config_lock_destroy(spa_t *spa)
270{
271	for (int i = 0; i < SCL_LOCKS; i++) {
272		spa_config_lock_t *scl = &spa->spa_config_lock[i];
273		mutex_destroy(&scl->scl_lock);
274		cv_destroy(&scl->scl_cv);
275		refcount_destroy(&scl->scl_count);
276		ASSERT(scl->scl_writer == NULL);
277		ASSERT(scl->scl_write_wanted == 0);
278	}
279}
280
281int
282spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
283{
284	for (int i = 0; i < SCL_LOCKS; i++) {
285		spa_config_lock_t *scl = &spa->spa_config_lock[i];
286		if (!(locks & (1 << i)))
287			continue;
288		mutex_enter(&scl->scl_lock);
289		if (rw == RW_READER) {
290			if (scl->scl_writer || scl->scl_write_wanted) {
291				mutex_exit(&scl->scl_lock);
292				spa_config_exit(spa, locks ^ (1 << i), tag);
293				return (0);
294			}
295		} else {
296			ASSERT(scl->scl_writer != curthread);
297			if (!refcount_is_zero(&scl->scl_count)) {
298				mutex_exit(&scl->scl_lock);
299				spa_config_exit(spa, locks ^ (1 << i), tag);
300				return (0);
301			}
302			scl->scl_writer = curthread;
303		}
304		(void) refcount_add(&scl->scl_count, tag);
305		mutex_exit(&scl->scl_lock);
306	}
307	return (1);
308}
309
310void
311spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
312{
313	int wlocks_held = 0;
314
315	for (int i = 0; i < SCL_LOCKS; i++) {
316		spa_config_lock_t *scl = &spa->spa_config_lock[i];
317		if (scl->scl_writer == curthread)
318			wlocks_held |= (1 << i);
319		if (!(locks & (1 << i)))
320			continue;
321		mutex_enter(&scl->scl_lock);
322		if (rw == RW_READER) {
323			while (scl->scl_writer || scl->scl_write_wanted) {
324				cv_wait(&scl->scl_cv, &scl->scl_lock);
325			}
326		} else {
327			ASSERT(scl->scl_writer != curthread);
328			while (!refcount_is_zero(&scl->scl_count)) {
329				scl->scl_write_wanted++;
330				cv_wait(&scl->scl_cv, &scl->scl_lock);
331				scl->scl_write_wanted--;
332			}
333			scl->scl_writer = curthread;
334		}
335		(void) refcount_add(&scl->scl_count, tag);
336		mutex_exit(&scl->scl_lock);
337	}
338	ASSERT(wlocks_held <= locks);
339}
340
341void
342spa_config_exit(spa_t *spa, int locks, void *tag)
343{
344	for (int i = SCL_LOCKS - 1; i >= 0; i--) {
345		spa_config_lock_t *scl = &spa->spa_config_lock[i];
346		if (!(locks & (1 << i)))
347			continue;
348		mutex_enter(&scl->scl_lock);
349		ASSERT(!refcount_is_zero(&scl->scl_count));
350		if (refcount_remove(&scl->scl_count, tag) == 0) {
351			ASSERT(scl->scl_writer == NULL ||
352			    scl->scl_writer == curthread);
353			scl->scl_writer = NULL;	/* OK in either case */
354			cv_broadcast(&scl->scl_cv);
355		}
356		mutex_exit(&scl->scl_lock);
357	}
358}
359
360int
361spa_config_held(spa_t *spa, int locks, krw_t rw)
362{
363	int locks_held = 0;
364
365	for (int i = 0; i < SCL_LOCKS; i++) {
366		spa_config_lock_t *scl = &spa->spa_config_lock[i];
367		if (!(locks & (1 << i)))
368			continue;
369		if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
370		    (rw == RW_WRITER && scl->scl_writer == curthread))
371			locks_held |= 1 << i;
372	}
373
374	return (locks_held);
375}
376
377/*
378 * ==========================================================================
379 * SPA namespace functions
380 * ==========================================================================
381 */
382
383/*
384 * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
385 * Returns NULL if no matching spa_t is found.
386 */
387spa_t *
388spa_lookup(const char *name)
389{
390	static spa_t search;	/* spa_t is large; don't allocate on stack */
391	spa_t *spa;
392	avl_index_t where;
393	char c;
394	char *cp;
395
396	ASSERT(MUTEX_HELD(&spa_namespace_lock));
397
398	/*
399	 * If it's a full dataset name, figure out the pool name and
400	 * just use that.
401	 */
402	cp = strpbrk(name, "/@");
403	if (cp) {
404		c = *cp;
405		*cp = '\0';
406	}
407
408	(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
409	spa = avl_find(&spa_namespace_avl, &search, &where);
410
411	if (cp)
412		*cp = c;
413
414	return (spa);
415}
416
417/*
418 * Create an uninitialized spa_t with the given name.  Requires
419 * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
420 * exist by calling spa_lookup() first.
421 */
422spa_t *
423spa_add(const char *name, nvlist_t *config, const char *altroot)
424{
425	spa_t *spa;
426	spa_config_dirent_t *dp;
427
428	ASSERT(MUTEX_HELD(&spa_namespace_lock));
429
430	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
431
432	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
433	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
434	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
435	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
436	mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
437	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
438	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
439	mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
440	mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
441
442	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
443	cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
444	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
445	cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
446
447	for (int t = 0; t < TXG_SIZE; t++)
448		bplist_create(&spa->spa_free_bplist[t]);
449
450	(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
451	spa->spa_state = POOL_STATE_UNINITIALIZED;
452	spa->spa_freeze_txg = UINT64_MAX;
453	spa->spa_final_txg = UINT64_MAX;
454	spa->spa_load_max_txg = UINT64_MAX;
455	spa->spa_proc = &p0;
456	spa->spa_proc_state = SPA_PROC_NONE;
457
458	refcount_create(&spa->spa_refcount);
459	spa_config_lock_init(spa);
460
461	avl_add(&spa_namespace_avl, spa);
462
463	/*
464	 * Set the alternate root, if there is one.
465	 */
466	if (altroot) {
467		spa->spa_root = spa_strdup(altroot);
468		spa_active_count++;
469	}
470
471	/*
472	 * Every pool starts with the default cachefile
473	 */
474	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
475	    offsetof(spa_config_dirent_t, scd_link));
476
477	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
478	dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
479	list_insert_head(&spa->spa_config_list, dp);
480
481	VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
482	    KM_SLEEP) == 0);
483
484	if (config != NULL)
485		VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
486
487	return (spa);
488}
489
490/*
491 * Removes a spa_t from the namespace, freeing up any memory used.  Requires
492 * spa_namespace_lock.  This is called only after the spa_t has been closed and
493 * deactivated.
494 */
495void
496spa_remove(spa_t *spa)
497{
498	spa_config_dirent_t *dp;
499
500	ASSERT(MUTEX_HELD(&spa_namespace_lock));
501	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
502
503	nvlist_free(spa->spa_config_splitting);
504
505	avl_remove(&spa_namespace_avl, spa);
506	cv_broadcast(&spa_namespace_cv);
507
508	if (spa->spa_root) {
509		spa_strfree(spa->spa_root);
510		spa_active_count--;
511	}
512
513	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
514		list_remove(&spa->spa_config_list, dp);
515		if (dp->scd_path != NULL)
516			spa_strfree(dp->scd_path);
517		kmem_free(dp, sizeof (spa_config_dirent_t));
518	}
519
520	list_destroy(&spa->spa_config_list);
521
522	nvlist_free(spa->spa_load_info);
523	spa_config_set(spa, NULL);
524
525	refcount_destroy(&spa->spa_refcount);
526
527	spa_config_lock_destroy(spa);
528
529	for (int t = 0; t < TXG_SIZE; t++)
530		bplist_destroy(&spa->spa_free_bplist[t]);
531
532	cv_destroy(&spa->spa_async_cv);
533	cv_destroy(&spa->spa_proc_cv);
534	cv_destroy(&spa->spa_scrub_io_cv);
535	cv_destroy(&spa->spa_suspend_cv);
536
537	mutex_destroy(&spa->spa_async_lock);
538	mutex_destroy(&spa->spa_errlist_lock);
539	mutex_destroy(&spa->spa_errlog_lock);
540	mutex_destroy(&spa->spa_history_lock);
541	mutex_destroy(&spa->spa_proc_lock);
542	mutex_destroy(&spa->spa_props_lock);
543	mutex_destroy(&spa->spa_scrub_lock);
544	mutex_destroy(&spa->spa_suspend_lock);
545	mutex_destroy(&spa->spa_vdev_top_lock);
546
547	kmem_free(spa, sizeof (spa_t));
548}
549
550/*
551 * Given a pool, return the next pool in the namespace, or NULL if there is
552 * none.  If 'prev' is NULL, return the first pool.
553 */
554spa_t *
555spa_next(spa_t *prev)
556{
557	ASSERT(MUTEX_HELD(&spa_namespace_lock));
558
559	if (prev)
560		return (AVL_NEXT(&spa_namespace_avl, prev));
561	else
562		return (avl_first(&spa_namespace_avl));
563}
564
565/*
566 * ==========================================================================
567 * SPA refcount functions
568 * ==========================================================================
569 */
570
571/*
572 * Add a reference to the given spa_t.  Must have at least one reference, or
573 * have the namespace lock held.
574 */
575void
576spa_open_ref(spa_t *spa, void *tag)
577{
578	ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
579	    MUTEX_HELD(&spa_namespace_lock));
580	(void) refcount_add(&spa->spa_refcount, tag);
581}
582
583/*
584 * Remove a reference to the given spa_t.  Must have at least one reference, or
585 * have the namespace lock held.
586 */
587void
588spa_close(spa_t *spa, void *tag)
589{
590	ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
591	    MUTEX_HELD(&spa_namespace_lock));
592	(void) refcount_remove(&spa->spa_refcount, tag);
593}
594
595/*
596 * Check to see if the spa refcount is zero.  Must be called with
597 * spa_namespace_lock held.  We really compare against spa_minref, which is the
598 * number of references acquired when opening a pool
599 */
600boolean_t
601spa_refcount_zero(spa_t *spa)
602{
603	ASSERT(MUTEX_HELD(&spa_namespace_lock));
604
605	return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
606}
607
608/*
609 * ==========================================================================
610 * SPA spare and l2cache tracking
611 * ==========================================================================
612 */
613
614/*
615 * Hot spares and cache devices are tracked using the same code below,
616 * for 'auxiliary' devices.
617 */
618
619typedef struct spa_aux {
620	uint64_t	aux_guid;
621	uint64_t	aux_pool;
622	avl_node_t	aux_avl;
623	int		aux_count;
624} spa_aux_t;
625
626static int
627spa_aux_compare(const void *a, const void *b)
628{
629	const spa_aux_t *sa = a;
630	const spa_aux_t *sb = b;
631
632	if (sa->aux_guid < sb->aux_guid)
633		return (-1);
634	else if (sa->aux_guid > sb->aux_guid)
635		return (1);
636	else
637		return (0);
638}
639
640void
641spa_aux_add(vdev_t *vd, avl_tree_t *avl)
642{
643	avl_index_t where;
644	spa_aux_t search;
645	spa_aux_t *aux;
646
647	search.aux_guid = vd->vdev_guid;
648	if ((aux = avl_find(avl, &search, &where)) != NULL) {
649		aux->aux_count++;
650	} else {
651		aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
652		aux->aux_guid = vd->vdev_guid;
653		aux->aux_count = 1;
654		avl_insert(avl, aux, where);
655	}
656}
657
658void
659spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
660{
661	spa_aux_t search;
662	spa_aux_t *aux;
663	avl_index_t where;
664
665	search.aux_guid = vd->vdev_guid;
666	aux = avl_find(avl, &search, &where);
667
668	ASSERT(aux != NULL);
669
670	if (--aux->aux_count == 0) {
671		avl_remove(avl, aux);
672		kmem_free(aux, sizeof (spa_aux_t));
673	} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
674		aux->aux_pool = 0ULL;
675	}
676}
677
678boolean_t
679spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
680{
681	spa_aux_t search, *found;
682
683	search.aux_guid = guid;
684	found = avl_find(avl, &search, NULL);
685
686	if (pool) {
687		if (found)
688			*pool = found->aux_pool;
689		else
690			*pool = 0ULL;
691	}
692
693	if (refcnt) {
694		if (found)
695			*refcnt = found->aux_count;
696		else
697			*refcnt = 0;
698	}
699
700	return (found != NULL);
701}
702
703void
704spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
705{
706	spa_aux_t search, *found;
707	avl_index_t where;
708
709	search.aux_guid = vd->vdev_guid;
710	found = avl_find(avl, &search, &where);
711	ASSERT(found != NULL);
712	ASSERT(found->aux_pool == 0ULL);
713
714	found->aux_pool = spa_guid(vd->vdev_spa);
715}
716
717/*
718 * Spares are tracked globally due to the following constraints:
719 *
720 * 	- A spare may be part of multiple pools.
721 * 	- A spare may be added to a pool even if it's actively in use within
722 *	  another pool.
723 * 	- A spare in use in any pool can only be the source of a replacement if
724 *	  the target is a spare in the same pool.
725 *
726 * We keep track of all spares on the system through the use of a reference
727 * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
728 * spare, then we bump the reference count in the AVL tree.  In addition, we set
729 * the 'vdev_isspare' member to indicate that the device is a spare (active or
730 * inactive).  When a spare is made active (used to replace a device in the
731 * pool), we also keep track of which pool its been made a part of.
732 *
733 * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
734 * called under the spa_namespace lock as part of vdev reconfiguration.  The
735 * separate spare lock exists for the status query path, which does not need to
736 * be completely consistent with respect to other vdev configuration changes.
737 */
738
739static int
740spa_spare_compare(const void *a, const void *b)
741{
742	return (spa_aux_compare(a, b));
743}
744
745void
746spa_spare_add(vdev_t *vd)
747{
748	mutex_enter(&spa_spare_lock);
749	ASSERT(!vd->vdev_isspare);
750	spa_aux_add(vd, &spa_spare_avl);
751	vd->vdev_isspare = B_TRUE;
752	mutex_exit(&spa_spare_lock);
753}
754
755void
756spa_spare_remove(vdev_t *vd)
757{
758	mutex_enter(&spa_spare_lock);
759	ASSERT(vd->vdev_isspare);
760	spa_aux_remove(vd, &spa_spare_avl);
761	vd->vdev_isspare = B_FALSE;
762	mutex_exit(&spa_spare_lock);
763}
764
765boolean_t
766spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
767{
768	boolean_t found;
769
770	mutex_enter(&spa_spare_lock);
771	found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
772	mutex_exit(&spa_spare_lock);
773
774	return (found);
775}
776
777void
778spa_spare_activate(vdev_t *vd)
779{
780	mutex_enter(&spa_spare_lock);
781	ASSERT(vd->vdev_isspare);
782	spa_aux_activate(vd, &spa_spare_avl);
783	mutex_exit(&spa_spare_lock);
784}
785
786/*
787 * Level 2 ARC devices are tracked globally for the same reasons as spares.
788 * Cache devices currently only support one pool per cache device, and so
789 * for these devices the aux reference count is currently unused beyond 1.
790 */
791
792static int
793spa_l2cache_compare(const void *a, const void *b)
794{
795	return (spa_aux_compare(a, b));
796}
797
798void
799spa_l2cache_add(vdev_t *vd)
800{
801	mutex_enter(&spa_l2cache_lock);
802	ASSERT(!vd->vdev_isl2cache);
803	spa_aux_add(vd, &spa_l2cache_avl);
804	vd->vdev_isl2cache = B_TRUE;
805	mutex_exit(&spa_l2cache_lock);
806}
807
808void
809spa_l2cache_remove(vdev_t *vd)
810{
811	mutex_enter(&spa_l2cache_lock);
812	ASSERT(vd->vdev_isl2cache);
813	spa_aux_remove(vd, &spa_l2cache_avl);
814	vd->vdev_isl2cache = B_FALSE;
815	mutex_exit(&spa_l2cache_lock);
816}
817
818boolean_t
819spa_l2cache_exists(uint64_t guid, uint64_t *pool)
820{
821	boolean_t found;
822
823	mutex_enter(&spa_l2cache_lock);
824	found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
825	mutex_exit(&spa_l2cache_lock);
826
827	return (found);
828}
829
830void
831spa_l2cache_activate(vdev_t *vd)
832{
833	mutex_enter(&spa_l2cache_lock);
834	ASSERT(vd->vdev_isl2cache);
835	spa_aux_activate(vd, &spa_l2cache_avl);
836	mutex_exit(&spa_l2cache_lock);
837}
838
839/*
840 * ==========================================================================
841 * SPA vdev locking
842 * ==========================================================================
843 */
844
845/*
846 * Lock the given spa_t for the purpose of adding or removing a vdev.
847 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
848 * It returns the next transaction group for the spa_t.
849 */
850uint64_t
851spa_vdev_enter(spa_t *spa)
852{
853	mutex_enter(&spa->spa_vdev_top_lock);
854	mutex_enter(&spa_namespace_lock);
855	return (spa_vdev_config_enter(spa));
856}
857
858/*
859 * Internal implementation for spa_vdev_enter().  Used when a vdev
860 * operation requires multiple syncs (i.e. removing a device) while
861 * keeping the spa_namespace_lock held.
862 */
863uint64_t
864spa_vdev_config_enter(spa_t *spa)
865{
866	ASSERT(MUTEX_HELD(&spa_namespace_lock));
867
868	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
869
870	return (spa_last_synced_txg(spa) + 1);
871}
872
873/*
874 * Used in combination with spa_vdev_config_enter() to allow the syncing
875 * of multiple transactions without releasing the spa_namespace_lock.
876 */
877void
878spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
879{
880	ASSERT(MUTEX_HELD(&spa_namespace_lock));
881
882	int config_changed = B_FALSE;
883
884	ASSERT(txg > spa_last_synced_txg(spa));
885
886	spa->spa_pending_vdev = NULL;
887
888	/*
889	 * Reassess the DTLs.
890	 */
891	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
892
893	if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
894		config_changed = B_TRUE;
895		spa->spa_config_generation++;
896	}
897
898	/*
899	 * Verify the metaslab classes.
900	 */
901	ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
902	ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
903
904	spa_config_exit(spa, SCL_ALL, spa);
905
906	/*
907	 * Panic the system if the specified tag requires it.  This
908	 * is useful for ensuring that configurations are updated
909	 * transactionally.
910	 */
911	if (zio_injection_enabled)
912		zio_handle_panic_injection(spa, tag, 0);
913
914	/*
915	 * Note: this txg_wait_synced() is important because it ensures
916	 * that there won't be more than one config change per txg.
917	 * This allows us to use the txg as the generation number.
918	 */
919	if (error == 0)
920		txg_wait_synced(spa->spa_dsl_pool, txg);
921
922	if (vd != NULL) {
923		ASSERT(!vd->vdev_detached || vd->vdev_dtl_smo.smo_object == 0);
924		spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
925		vdev_free(vd);
926		spa_config_exit(spa, SCL_ALL, spa);
927	}
928
929	/*
930	 * If the config changed, update the config cache.
931	 */
932	if (config_changed)
933		spa_config_sync(spa, B_FALSE, B_TRUE);
934}
935
936/*
937 * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
938 * locking of spa_vdev_enter(), we also want make sure the transactions have
939 * synced to disk, and then update the global configuration cache with the new
940 * information.
941 */
942int
943spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
944{
945	spa_vdev_config_exit(spa, vd, txg, error, FTAG);
946	mutex_exit(&spa_namespace_lock);
947	mutex_exit(&spa->spa_vdev_top_lock);
948
949	return (error);
950}
951
952/*
953 * Lock the given spa_t for the purpose of changing vdev state.
954 */
955void
956spa_vdev_state_enter(spa_t *spa, int oplocks)
957{
958	int locks = SCL_STATE_ALL | oplocks;
959
960	/*
961	 * Root pools may need to read of the underlying devfs filesystem
962	 * when opening up a vdev.  Unfortunately if we're holding the
963	 * SCL_ZIO lock it will result in a deadlock when we try to issue
964	 * the read from the root filesystem.  Instead we "prefetch"
965	 * the associated vnodes that we need prior to opening the
966	 * underlying devices and cache them so that we can prevent
967	 * any I/O when we are doing the actual open.
968	 */
969	if (spa_is_root(spa)) {
970		int low = locks & ~(SCL_ZIO - 1);
971		int high = locks & ~low;
972
973		spa_config_enter(spa, high, spa, RW_WRITER);
974		vdev_hold(spa->spa_root_vdev);
975		spa_config_enter(spa, low, spa, RW_WRITER);
976	} else {
977		spa_config_enter(spa, locks, spa, RW_WRITER);
978	}
979	spa->spa_vdev_locks = locks;
980}
981
982int
983spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
984{
985	boolean_t config_changed = B_FALSE;
986
987	if (vd != NULL || error == 0)
988		vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
989		    0, 0, B_FALSE);
990
991	if (vd != NULL) {
992		vdev_state_dirty(vd->vdev_top);
993		config_changed = B_TRUE;
994		spa->spa_config_generation++;
995	}
996
997	if (spa_is_root(spa))
998		vdev_rele(spa->spa_root_vdev);
999
1000	ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1001	spa_config_exit(spa, spa->spa_vdev_locks, spa);
1002
1003	/*
1004	 * If anything changed, wait for it to sync.  This ensures that,
1005	 * from the system administrator's perspective, zpool(1M) commands
1006	 * are synchronous.  This is important for things like zpool offline:
1007	 * when the command completes, you expect no further I/O from ZFS.
1008	 */
1009	if (vd != NULL)
1010		txg_wait_synced(spa->spa_dsl_pool, 0);
1011
1012	/*
1013	 * If the config changed, update the config cache.
1014	 */
1015	if (config_changed) {
1016		mutex_enter(&spa_namespace_lock);
1017		spa_config_sync(spa, B_FALSE, B_TRUE);
1018		mutex_exit(&spa_namespace_lock);
1019	}
1020
1021	return (error);
1022}
1023
1024/*
1025 * ==========================================================================
1026 * Miscellaneous functions
1027 * ==========================================================================
1028 */
1029
1030/*
1031 * Rename a spa_t.
1032 */
1033int
1034spa_rename(const char *name, const char *newname)
1035{
1036	spa_t *spa;
1037	int err;
1038
1039	/*
1040	 * Lookup the spa_t and grab the config lock for writing.  We need to
1041	 * actually open the pool so that we can sync out the necessary labels.
1042	 * It's OK to call spa_open() with the namespace lock held because we
1043	 * allow recursive calls for other reasons.
1044	 */
1045	mutex_enter(&spa_namespace_lock);
1046	if ((err = spa_open(name, &spa, FTAG)) != 0) {
1047		mutex_exit(&spa_namespace_lock);
1048		return (err);
1049	}
1050
1051	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1052
1053	avl_remove(&spa_namespace_avl, spa);
1054	(void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
1055	avl_add(&spa_namespace_avl, spa);
1056
1057	/*
1058	 * Sync all labels to disk with the new names by marking the root vdev
1059	 * dirty and waiting for it to sync.  It will pick up the new pool name
1060	 * during the sync.
1061	 */
1062	vdev_config_dirty(spa->spa_root_vdev);
1063
1064	spa_config_exit(spa, SCL_ALL, FTAG);
1065
1066	txg_wait_synced(spa->spa_dsl_pool, 0);
1067
1068	/*
1069	 * Sync the updated config cache.
1070	 */
1071	spa_config_sync(spa, B_FALSE, B_TRUE);
1072
1073	spa_close(spa, FTAG);
1074
1075	mutex_exit(&spa_namespace_lock);
1076
1077	return (0);
1078}
1079
1080/*
1081 * Determine whether a pool with given pool_guid exists.  If device_guid is
1082 * non-zero, determine whether the pool exists *and* contains a device with the
1083 * specified device_guid.
1084 */
1085boolean_t
1086spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1087{
1088	spa_t *spa;
1089	avl_tree_t *t = &spa_namespace_avl;
1090
1091	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1092
1093	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1094		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1095			continue;
1096		if (spa->spa_root_vdev == NULL)
1097			continue;
1098		if (spa_guid(spa) == pool_guid) {
1099			if (device_guid == 0)
1100				break;
1101
1102			if (vdev_lookup_by_guid(spa->spa_root_vdev,
1103			    device_guid) != NULL)
1104				break;
1105
1106			/*
1107			 * Check any devices we may be in the process of adding.
1108			 */
1109			if (spa->spa_pending_vdev) {
1110				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1111				    device_guid) != NULL)
1112					break;
1113			}
1114		}
1115	}
1116
1117	return (spa != NULL);
1118}
1119
1120char *
1121spa_strdup(const char *s)
1122{
1123	size_t len;
1124	char *new;
1125
1126	len = strlen(s);
1127	new = kmem_alloc(len + 1, KM_SLEEP);
1128	bcopy(s, new, len);
1129	new[len] = '\0';
1130
1131	return (new);
1132}
1133
1134void
1135spa_strfree(char *s)
1136{
1137	kmem_free(s, strlen(s) + 1);
1138}
1139
1140uint64_t
1141spa_get_random(uint64_t range)
1142{
1143	uint64_t r;
1144
1145	ASSERT(range != 0);
1146
1147	(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1148
1149	return (r % range);
1150}
1151
1152uint64_t
1153spa_generate_guid(spa_t *spa)
1154{
1155	uint64_t guid = spa_get_random(-1ULL);
1156
1157	if (spa != NULL) {
1158		while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1159			guid = spa_get_random(-1ULL);
1160	} else {
1161		while (guid == 0 || spa_guid_exists(guid, 0))
1162			guid = spa_get_random(-1ULL);
1163	}
1164
1165	return (guid);
1166}
1167
1168void
1169sprintf_blkptr(char *buf, const blkptr_t *bp)
1170{
1171	char *type = NULL;
1172	char *checksum = NULL;
1173	char *compress = NULL;
1174
1175	if (bp != NULL) {
1176		type = dmu_ot[BP_GET_TYPE(bp)].ot_name;
1177		checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1178		compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1179	}
1180
1181	SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress);
1182}
1183
1184void
1185spa_freeze(spa_t *spa)
1186{
1187	uint64_t freeze_txg = 0;
1188
1189	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1190	if (spa->spa_freeze_txg == UINT64_MAX) {
1191		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1192		spa->spa_freeze_txg = freeze_txg;
1193	}
1194	spa_config_exit(spa, SCL_ALL, FTAG);
1195	if (freeze_txg != 0)
1196		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1197}
1198
1199void
1200zfs_panic_recover(const char *fmt, ...)
1201{
1202	va_list adx;
1203
1204	va_start(adx, fmt);
1205	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1206	va_end(adx);
1207}
1208
1209/*
1210 * This is a stripped-down version of strtoull, suitable only for converting
1211 * lowercase hexidecimal numbers that don't overflow.
1212 */
1213uint64_t
1214strtonum(const char *str, char **nptr)
1215{
1216	uint64_t val = 0;
1217	char c;
1218	int digit;
1219
1220	while ((c = *str) != '\0') {
1221		if (c >= '0' && c <= '9')
1222			digit = c - '0';
1223		else if (c >= 'a' && c <= 'f')
1224			digit = 10 + c - 'a';
1225		else
1226			break;
1227
1228		val *= 16;
1229		val += digit;
1230
1231		str++;
1232	}
1233
1234	if (nptr)
1235		*nptr = (char *)str;
1236
1237	return (val);
1238}
1239
1240/*
1241 * ==========================================================================
1242 * Accessor functions
1243 * ==========================================================================
1244 */
1245
1246boolean_t
1247spa_shutting_down(spa_t *spa)
1248{
1249	return (spa->spa_async_suspended);
1250}
1251
1252dsl_pool_t *
1253spa_get_dsl(spa_t *spa)
1254{
1255	return (spa->spa_dsl_pool);
1256}
1257
1258blkptr_t *
1259spa_get_rootblkptr(spa_t *spa)
1260{
1261	return (&spa->spa_ubsync.ub_rootbp);
1262}
1263
1264void
1265spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1266{
1267	spa->spa_uberblock.ub_rootbp = *bp;
1268}
1269
1270void
1271spa_altroot(spa_t *spa, char *buf, size_t buflen)
1272{
1273	if (spa->spa_root == NULL)
1274		buf[0] = '\0';
1275	else
1276		(void) strncpy(buf, spa->spa_root, buflen);
1277}
1278
1279int
1280spa_sync_pass(spa_t *spa)
1281{
1282	return (spa->spa_sync_pass);
1283}
1284
1285char *
1286spa_name(spa_t *spa)
1287{
1288	return (spa->spa_name);
1289}
1290
1291uint64_t
1292spa_guid(spa_t *spa)
1293{
1294	/*
1295	 * If we fail to parse the config during spa_load(), we can go through
1296	 * the error path (which posts an ereport) and end up here with no root
1297	 * vdev.  We stash the original pool guid in 'spa_load_guid' to handle
1298	 * this case.
1299	 */
1300	if (spa->spa_root_vdev != NULL)
1301		return (spa->spa_root_vdev->vdev_guid);
1302	else
1303		return (spa->spa_load_guid);
1304}
1305
1306uint64_t
1307spa_last_synced_txg(spa_t *spa)
1308{
1309	return (spa->spa_ubsync.ub_txg);
1310}
1311
1312uint64_t
1313spa_first_txg(spa_t *spa)
1314{
1315	return (spa->spa_first_txg);
1316}
1317
1318uint64_t
1319spa_syncing_txg(spa_t *spa)
1320{
1321	return (spa->spa_syncing_txg);
1322}
1323
1324pool_state_t
1325spa_state(spa_t *spa)
1326{
1327	return (spa->spa_state);
1328}
1329
1330spa_load_state_t
1331spa_load_state(spa_t *spa)
1332{
1333	return (spa->spa_load_state);
1334}
1335
1336uint64_t
1337spa_freeze_txg(spa_t *spa)
1338{
1339	return (spa->spa_freeze_txg);
1340}
1341
1342/* ARGSUSED */
1343uint64_t
1344spa_get_asize(spa_t *spa, uint64_t lsize)
1345{
1346	/*
1347	 * The worst case is single-sector max-parity RAID-Z blocks, in which
1348	 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
1349	 * times the size; so just assume that.  Add to this the fact that
1350	 * we can have up to 3 DVAs per bp, and one more factor of 2 because
1351	 * the block may be dittoed with up to 3 DVAs by ddt_sync().
1352	 */
1353	return (lsize * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2);
1354}
1355
1356uint64_t
1357spa_get_dspace(spa_t *spa)
1358{
1359	return (spa->spa_dspace);
1360}
1361
1362void
1363spa_update_dspace(spa_t *spa)
1364{
1365	spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1366	    ddt_get_dedup_dspace(spa);
1367}
1368
1369/*
1370 * Return the failure mode that has been set to this pool. The default
1371 * behavior will be to block all I/Os when a complete failure occurs.
1372 */
1373uint8_t
1374spa_get_failmode(spa_t *spa)
1375{
1376	return (spa->spa_failmode);
1377}
1378
1379boolean_t
1380spa_suspended(spa_t *spa)
1381{
1382	return (spa->spa_suspended);
1383}
1384
1385uint64_t
1386spa_version(spa_t *spa)
1387{
1388	return (spa->spa_ubsync.ub_version);
1389}
1390
1391boolean_t
1392spa_deflate(spa_t *spa)
1393{
1394	return (spa->spa_deflate);
1395}
1396
1397metaslab_class_t *
1398spa_normal_class(spa_t *spa)
1399{
1400	return (spa->spa_normal_class);
1401}
1402
1403metaslab_class_t *
1404spa_log_class(spa_t *spa)
1405{
1406	return (spa->spa_log_class);
1407}
1408
1409int
1410spa_max_replication(spa_t *spa)
1411{
1412	/*
1413	 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1414	 * handle BPs with more than one DVA allocated.  Set our max
1415	 * replication level accordingly.
1416	 */
1417	if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1418		return (1);
1419	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1420}
1421
1422int
1423spa_prev_software_version(spa_t *spa)
1424{
1425	return (spa->spa_prev_software_version);
1426}
1427
1428uint64_t
1429dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
1430{
1431	uint64_t asize = DVA_GET_ASIZE(dva);
1432	uint64_t dsize = asize;
1433
1434	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1435
1436	if (asize != 0 && spa->spa_deflate) {
1437		vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
1438		dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1439	}
1440
1441	return (dsize);
1442}
1443
1444uint64_t
1445bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
1446{
1447	uint64_t dsize = 0;
1448
1449	for (int d = 0; d < SPA_DVAS_PER_BP; d++)
1450		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1451
1452	return (dsize);
1453}
1454
1455uint64_t
1456bp_get_dsize(spa_t *spa, const blkptr_t *bp)
1457{
1458	uint64_t dsize = 0;
1459
1460	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1461
1462	for (int d = 0; d < SPA_DVAS_PER_BP; d++)
1463		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1464
1465	spa_config_exit(spa, SCL_VDEV, FTAG);
1466
1467	return (dsize);
1468}
1469
1470/*
1471 * ==========================================================================
1472 * Initialization and Termination
1473 * ==========================================================================
1474 */
1475
1476static int
1477spa_name_compare(const void *a1, const void *a2)
1478{
1479	const spa_t *s1 = a1;
1480	const spa_t *s2 = a2;
1481	int s;
1482
1483	s = strcmp(s1->spa_name, s2->spa_name);
1484	if (s > 0)
1485		return (1);
1486	if (s < 0)
1487		return (-1);
1488	return (0);
1489}
1490
1491int
1492spa_busy(void)
1493{
1494	return (spa_active_count);
1495}
1496
1497void
1498spa_boot_init()
1499{
1500	spa_config_load();
1501}
1502
1503void
1504spa_init(int mode)
1505{
1506	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1507	mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1508	mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1509	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1510
1511	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1512	    offsetof(spa_t, spa_avl));
1513
1514	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1515	    offsetof(spa_aux_t, aux_avl));
1516
1517	avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1518	    offsetof(spa_aux_t, aux_avl));
1519
1520	spa_mode_global = mode;
1521
1522	refcount_init();
1523	unique_init();
1524	zio_init();
1525	dmu_init();
1526	zil_init();
1527	vdev_cache_stat_init();
1528	zfs_prop_init();
1529	zpool_prop_init();
1530	spa_config_load();
1531	l2arc_start();
1532}
1533
1534void
1535spa_fini(void)
1536{
1537	l2arc_stop();
1538
1539	spa_evict_all();
1540
1541	vdev_cache_stat_fini();
1542	zil_fini();
1543	dmu_fini();
1544	zio_fini();
1545	unique_fini();
1546	refcount_fini();
1547
1548	avl_destroy(&spa_namespace_avl);
1549	avl_destroy(&spa_spare_avl);
1550	avl_destroy(&spa_l2cache_avl);
1551
1552	cv_destroy(&spa_namespace_cv);
1553	mutex_destroy(&spa_namespace_lock);
1554	mutex_destroy(&spa_spare_lock);
1555	mutex_destroy(&spa_l2cache_lock);
1556}
1557
1558/*
1559 * Return whether this pool has slogs. No locking needed.
1560 * It's not a problem if the wrong answer is returned as it's only for
1561 * performance and not correctness
1562 */
1563boolean_t
1564spa_has_slogs(spa_t *spa)
1565{
1566	return (spa->spa_log_class->mc_rotor != NULL);
1567}
1568
1569spa_log_state_t
1570spa_get_log_state(spa_t *spa)
1571{
1572	return (spa->spa_log_state);
1573}
1574
1575void
1576spa_set_log_state(spa_t *spa, spa_log_state_t state)
1577{
1578	spa->spa_log_state = state;
1579}
1580
1581boolean_t
1582spa_is_root(spa_t *spa)
1583{
1584	return (spa->spa_is_root);
1585}
1586
1587boolean_t
1588spa_writeable(spa_t *spa)
1589{
1590	return (!!(spa->spa_mode & FWRITE));
1591}
1592
1593int
1594spa_mode(spa_t *spa)
1595{
1596	return (spa->spa_mode);
1597}
1598
1599uint64_t
1600spa_bootfs(spa_t *spa)
1601{
1602	return (spa->spa_bootfs);
1603}
1604
1605uint64_t
1606spa_delegation(spa_t *spa)
1607{
1608	return (spa->spa_delegation);
1609}
1610
1611objset_t *
1612spa_meta_objset(spa_t *spa)
1613{
1614	return (spa->spa_meta_objset);
1615}
1616
1617enum zio_checksum
1618spa_dedup_checksum(spa_t *spa)
1619{
1620	return (spa->spa_dedup_checksum);
1621}
1622
1623/*
1624 * Reset pool scan stat per scan pass (or reboot).
1625 */
1626void
1627spa_scan_stat_init(spa_t *spa)
1628{
1629	/* data not stored on disk */
1630	spa->spa_scan_pass_start = gethrestime_sec();
1631	spa->spa_scan_pass_exam = 0;
1632	vdev_scan_stat_init(spa->spa_root_vdev);
1633}
1634
1635/*
1636 * Get scan stats for zpool status reports
1637 */
1638int
1639spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
1640{
1641	dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
1642
1643	if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
1644		return (ENOENT);
1645	bzero(ps, sizeof (pool_scan_stat_t));
1646
1647	/* data stored on disk */
1648	ps->pss_func = scn->scn_phys.scn_func;
1649	ps->pss_start_time = scn->scn_phys.scn_start_time;
1650	ps->pss_end_time = scn->scn_phys.scn_end_time;
1651	ps->pss_to_examine = scn->scn_phys.scn_to_examine;
1652	ps->pss_examined = scn->scn_phys.scn_examined;
1653	ps->pss_to_process = scn->scn_phys.scn_to_process;
1654	ps->pss_processed = scn->scn_phys.scn_processed;
1655	ps->pss_errors = scn->scn_phys.scn_errors;
1656	ps->pss_state = scn->scn_phys.scn_state;
1657
1658	/* data not stored on disk */
1659	ps->pss_pass_start = spa->spa_scan_pass_start;
1660	ps->pss_pass_exam = spa->spa_scan_pass_exam;
1661
1662	return (0);
1663}
1664