spa_misc.c revision bf82a41b568b2bd31bf9814587eb25ee2e7b05ff
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#include <sys/zfs_context.h>
29#include <sys/spa_impl.h>
30#include <sys/zio.h>
31#include <sys/zio_checksum.h>
32#include <sys/zio_compress.h>
33#include <sys/dmu.h>
34#include <sys/dmu_tx.h>
35#include <sys/zap.h>
36#include <sys/zil.h>
37#include <sys/vdev_impl.h>
38#include <sys/metaslab.h>
39#include <sys/uberblock_impl.h>
40#include <sys/txg.h>
41#include <sys/avl.h>
42#include <sys/unique.h>
43#include <sys/dsl_pool.h>
44#include <sys/dsl_dir.h>
45#include <sys/dsl_prop.h>
46#include <sys/fs/zfs.h>
47#include <sys/metaslab_impl.h>
48#include "zfs_prop.h"
49
50/*
51 * SPA locking
52 *
53 * There are four basic locks for managing spa_t structures:
54 *
55 * spa_namespace_lock (global mutex)
56 *
57 *	This lock must be acquired to do any of the following:
58 *
59 *		- Lookup a spa_t by name
60 *		- Add or remove a spa_t from the namespace
61 *		- Increase spa_refcount from non-zero
62 *		- Check if spa_refcount is zero
63 *		- Rename a spa_t
64 *		- add/remove/attach/detach devices
65 *		- Held for the duration of create/destroy/import/export
66 *
67 *	It does not need to handle recursion.  A create or destroy may
68 *	reference objects (files or zvols) in other pools, but by
69 *	definition they must have an existing reference, and will never need
70 *	to lookup a spa_t by name.
71 *
72 * spa_refcount (per-spa refcount_t protected by mutex)
73 *
74 *	This reference count keep track of any active users of the spa_t.  The
75 *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
76 *	the refcount is never really 'zero' - opening a pool implicitly keeps
77 *	some references in the DMU.  Internally we check against SPA_MINREF, but
78 *	present the image of a zero/non-zero value to consumers.
79 *
80 * spa_config_lock (per-spa read-priority rwlock)
81 *
82 *	This protects the spa_t from config changes, and must be held in
83 *	the following circumstances:
84 *
85 *		- RW_READER to perform I/O to the spa
86 *		- RW_WRITER to change the vdev config
87 *
88 * spa_config_cache_lock (per-spa mutex)
89 *
90 *	This mutex prevents the spa_config nvlist from being updated.  No
91 *      other locks are required to obtain this lock, although implicitly you
92 *      must have the namespace lock or non-zero refcount to have any kind
93 *      of spa_t pointer at all.
94 *
95 * The locking order is fairly straightforward:
96 *
97 *		spa_namespace_lock	->	spa_refcount
98 *
99 *	The namespace lock must be acquired to increase the refcount from 0
100 *	or to check if it is zero.
101 *
102 *		spa_refcount		->	spa_config_lock
103 *
104 *	There must be at least one valid reference on the spa_t to acquire
105 *	the config lock.
106 *
107 *		spa_namespace_lock	->	spa_config_lock
108 *
109 *	The namespace lock must always be taken before the config lock.
110 *
111 *
112 * The spa_namespace_lock and spa_config_cache_lock can be acquired directly and
113 * are globally visible.
114 *
115 * The namespace is manipulated using the following functions, all which require
116 * the spa_namespace_lock to be held.
117 *
118 *	spa_lookup()		Lookup a spa_t by name.
119 *
120 *	spa_add()		Create a new spa_t in the namespace.
121 *
122 *	spa_remove()		Remove a spa_t from the namespace.  This also
123 *				frees up any memory associated with the spa_t.
124 *
125 *	spa_next()		Returns the next spa_t in the system, or the
126 *				first if NULL is passed.
127 *
128 *	spa_evict_all()		Shutdown and remove all spa_t structures in
129 *				the system.
130 *
131 *	spa_guid_exists()	Determine whether a pool/device guid exists.
132 *
133 * The spa_refcount is manipulated using the following functions:
134 *
135 *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
136 *				called with spa_namespace_lock held if the
137 *				refcount is currently zero.
138 *
139 *	spa_close()		Remove a reference from the spa_t.  This will
140 *				not free the spa_t or remove it from the
141 *				namespace.  No locking is required.
142 *
143 *	spa_refcount_zero()	Returns true if the refcount is currently
144 *				zero.  Must be called with spa_namespace_lock
145 *				held.
146 *
147 * The spa_config_lock is a form of rwlock.  It must be held as RW_READER
148 * to perform I/O to the pool, and as RW_WRITER to change the vdev config.
149 * The spa_config_lock is manipulated with spa_config_{enter,exit,held}().
150 *
151 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
152 *
153 *	spa_vdev_enter()	Acquire the namespace lock and the config lock
154 *				for writing.
155 *
156 *	spa_vdev_exit()		Release the config lock, wait for all I/O
157 *				to complete, sync the updated configs to the
158 *				cache, and release the namespace lock.
159 *
160 * The spa_name() function also requires either the spa_namespace_lock
161 * or the spa_config_lock, as both are needed to do a rename.  spa_rename() is
162 * also implemented within this file since is requires manipulation of the
163 * namespace.
164 */
165
166static avl_tree_t spa_namespace_avl;
167kmutex_t spa_namespace_lock;
168static kcondvar_t spa_namespace_cv;
169static int spa_active_count;
170int spa_max_replication_override = SPA_DVAS_PER_BP;
171
172static kmutex_t spa_spare_lock;
173static avl_tree_t spa_spare_avl;
174static kmutex_t spa_l2cache_lock;
175static avl_tree_t spa_l2cache_avl;
176
177kmem_cache_t *spa_buffer_pool;
178int spa_mode;
179
180#ifdef ZFS_DEBUG
181/* Everything except dprintf is on by default in debug builds */
182int zfs_flags = ~ZFS_DEBUG_DPRINTF;
183#else
184int zfs_flags = 0;
185#endif
186
187/*
188 * zfs_recover can be set to nonzero to attempt to recover from
189 * otherwise-fatal errors, typically caused by on-disk corruption.  When
190 * set, calls to zfs_panic_recover() will turn into warning messages.
191 */
192int zfs_recover = 0;
193
194#define	SPA_MINREF	5	/* spa_refcnt for an open-but-idle pool */
195
196/*
197 * ==========================================================================
198 * SPA config locking
199 * ==========================================================================
200 */
201static void
202spa_config_lock_init(spa_config_lock_t *scl)
203{
204	mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
205	scl->scl_writer = NULL;
206	cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
207	refcount_create(&scl->scl_count);
208}
209
210static void
211spa_config_lock_destroy(spa_config_lock_t *scl)
212{
213	mutex_destroy(&scl->scl_lock);
214	ASSERT(scl->scl_writer == NULL);
215	cv_destroy(&scl->scl_cv);
216	refcount_destroy(&scl->scl_count);
217}
218
219void
220spa_config_enter(spa_t *spa, krw_t rw, void *tag)
221{
222	spa_config_lock_t *scl = &spa->spa_config_lock;
223
224	mutex_enter(&scl->scl_lock);
225
226	if (rw == RW_READER) {
227		while (scl->scl_writer != NULL && scl->scl_writer != curthread)
228			cv_wait(&scl->scl_cv, &scl->scl_lock);
229	} else {
230		while (!refcount_is_zero(&scl->scl_count) &&
231		    scl->scl_writer != curthread)
232			cv_wait(&scl->scl_cv, &scl->scl_lock);
233		scl->scl_writer = curthread;
234	}
235
236	(void) refcount_add(&scl->scl_count, tag);
237
238	mutex_exit(&scl->scl_lock);
239}
240
241void
242spa_config_exit(spa_t *spa, void *tag)
243{
244	spa_config_lock_t *scl = &spa->spa_config_lock;
245
246	mutex_enter(&scl->scl_lock);
247
248	ASSERT(!refcount_is_zero(&scl->scl_count));
249
250	if (refcount_remove(&scl->scl_count, tag) == 0) {
251		cv_broadcast(&scl->scl_cv);
252		ASSERT(scl->scl_writer == NULL || scl->scl_writer == curthread);
253		scl->scl_writer = NULL;  /* OK in either case */
254	}
255
256	mutex_exit(&scl->scl_lock);
257}
258
259boolean_t
260spa_config_held(spa_t *spa, krw_t rw)
261{
262	spa_config_lock_t *scl = &spa->spa_config_lock;
263
264	if (rw == RW_READER)
265		return (!refcount_is_zero(&scl->scl_count));
266	else
267		return (scl->scl_writer == curthread);
268}
269
270/*
271 * ==========================================================================
272 * SPA namespace functions
273 * ==========================================================================
274 */
275
276/*
277 * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
278 * Returns NULL if no matching spa_t is found.
279 */
280spa_t *
281spa_lookup(const char *name)
282{
283	spa_t search, *spa;
284	avl_index_t where;
285	char c;
286	char *cp;
287
288	ASSERT(MUTEX_HELD(&spa_namespace_lock));
289
290	/*
291	 * If it's a full dataset name, figure out the pool name and
292	 * just use that.
293	 */
294	cp = strpbrk(name, "/@");
295	if (cp) {
296		c = *cp;
297		*cp = '\0';
298	}
299
300	search.spa_name = (char *)name;
301	spa = avl_find(&spa_namespace_avl, &search, &where);
302
303	if (cp)
304		*cp = c;
305
306	return (spa);
307}
308
309/*
310 * Create an uninitialized spa_t with the given name.  Requires
311 * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
312 * exist by calling spa_lookup() first.
313 */
314spa_t *
315spa_add(const char *name, const char *altroot)
316{
317	spa_t *spa;
318	spa_config_dirent_t *dp;
319
320	ASSERT(MUTEX_HELD(&spa_namespace_lock));
321
322	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
323
324	rw_init(&spa->spa_traverse_lock, NULL, RW_DEFAULT, NULL);
325
326	mutex_init(&spa->spa_uberblock_lock, NULL, MUTEX_DEFAULT, NULL);
327	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
328	mutex_init(&spa->spa_config_cache_lock, NULL, MUTEX_DEFAULT, NULL);
329	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
330	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
331	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
332	mutex_init(&spa->spa_sync_bplist.bpl_lock, NULL, MUTEX_DEFAULT, NULL);
333	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
334	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
335
336	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
337	cv_init(&spa->spa_scrub_cv, NULL, CV_DEFAULT, NULL);
338	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
339
340	spa->spa_name = spa_strdup(name);
341	spa->spa_state = POOL_STATE_UNINITIALIZED;
342	spa->spa_freeze_txg = UINT64_MAX;
343	spa->spa_final_txg = UINT64_MAX;
344
345	refcount_create(&spa->spa_refcount);
346	spa_config_lock_init(&spa->spa_config_lock);
347
348	avl_add(&spa_namespace_avl, spa);
349
350	mutex_init(&spa->spa_zio_lock, NULL, MUTEX_DEFAULT, NULL);
351
352	/*
353	 * Set the alternate root, if there is one.
354	 */
355	if (altroot) {
356		spa->spa_root = spa_strdup(altroot);
357		spa_active_count++;
358	}
359
360	/*
361	 * Every pool starts with the default cachefile
362	 */
363	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
364	    offsetof(spa_config_dirent_t, scd_link));
365
366	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
367	dp->scd_path = spa_strdup(spa_config_path);
368	list_insert_head(&spa->spa_config_list, dp);
369
370	return (spa);
371}
372
373/*
374 * Removes a spa_t from the namespace, freeing up any memory used.  Requires
375 * spa_namespace_lock.  This is called only after the spa_t has been closed and
376 * deactivated.
377 */
378void
379spa_remove(spa_t *spa)
380{
381	spa_config_dirent_t *dp;
382
383	ASSERT(MUTEX_HELD(&spa_namespace_lock));
384	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
385	ASSERT(spa->spa_scrub_thread == NULL);
386
387	avl_remove(&spa_namespace_avl, spa);
388	cv_broadcast(&spa_namespace_cv);
389
390	if (spa->spa_root) {
391		spa_strfree(spa->spa_root);
392		spa_active_count--;
393	}
394
395	if (spa->spa_name)
396		spa_strfree(spa->spa_name);
397
398	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
399		list_remove(&spa->spa_config_list, dp);
400		if (dp->scd_path != NULL)
401			spa_strfree(dp->scd_path);
402		kmem_free(dp, sizeof (spa_config_dirent_t));
403	}
404
405	list_destroy(&spa->spa_config_list);
406
407	spa_config_set(spa, NULL);
408
409	refcount_destroy(&spa->spa_refcount);
410
411	spa_config_lock_destroy(&spa->spa_config_lock);
412
413	rw_destroy(&spa->spa_traverse_lock);
414
415	cv_destroy(&spa->spa_async_cv);
416	cv_destroy(&spa->spa_scrub_cv);
417	cv_destroy(&spa->spa_scrub_io_cv);
418
419	mutex_destroy(&spa->spa_uberblock_lock);
420	mutex_destroy(&spa->spa_async_lock);
421	mutex_destroy(&spa->spa_config_cache_lock);
422	mutex_destroy(&spa->spa_scrub_lock);
423	mutex_destroy(&spa->spa_errlog_lock);
424	mutex_destroy(&spa->spa_errlist_lock);
425	mutex_destroy(&spa->spa_sync_bplist.bpl_lock);
426	mutex_destroy(&spa->spa_history_lock);
427	mutex_destroy(&spa->spa_props_lock);
428	mutex_destroy(&spa->spa_zio_lock);
429
430	kmem_free(spa, sizeof (spa_t));
431}
432
433/*
434 * Given a pool, return the next pool in the namespace, or NULL if there is
435 * none.  If 'prev' is NULL, return the first pool.
436 */
437spa_t *
438spa_next(spa_t *prev)
439{
440	ASSERT(MUTEX_HELD(&spa_namespace_lock));
441
442	if (prev)
443		return (AVL_NEXT(&spa_namespace_avl, prev));
444	else
445		return (avl_first(&spa_namespace_avl));
446}
447
448/*
449 * ==========================================================================
450 * SPA refcount functions
451 * ==========================================================================
452 */
453
454/*
455 * Add a reference to the given spa_t.  Must have at least one reference, or
456 * have the namespace lock held.
457 */
458void
459spa_open_ref(spa_t *spa, void *tag)
460{
461	ASSERT(refcount_count(&spa->spa_refcount) > SPA_MINREF ||
462	    MUTEX_HELD(&spa_namespace_lock));
463
464	(void) refcount_add(&spa->spa_refcount, tag);
465}
466
467/*
468 * Remove a reference to the given spa_t.  Must have at least one reference, or
469 * have the namespace lock held.
470 */
471void
472spa_close(spa_t *spa, void *tag)
473{
474	ASSERT(refcount_count(&spa->spa_refcount) > SPA_MINREF ||
475	    MUTEX_HELD(&spa_namespace_lock));
476
477	(void) refcount_remove(&spa->spa_refcount, tag);
478}
479
480/*
481 * Check to see if the spa refcount is zero.  Must be called with
482 * spa_namespace_lock held.  We really compare against SPA_MINREF, which is the
483 * number of references acquired when opening a pool
484 */
485boolean_t
486spa_refcount_zero(spa_t *spa)
487{
488	ASSERT(MUTEX_HELD(&spa_namespace_lock));
489
490	return (refcount_count(&spa->spa_refcount) == SPA_MINREF);
491}
492
493/*
494 * ==========================================================================
495 * SPA spare and l2cache tracking
496 * ==========================================================================
497 */
498
499/*
500 * Hot spares and cache devices are tracked using the same code below,
501 * for 'auxiliary' devices.
502 */
503
504typedef struct spa_aux {
505	uint64_t	aux_guid;
506	uint64_t	aux_pool;
507	avl_node_t	aux_avl;
508	int		aux_count;
509} spa_aux_t;
510
511static int
512spa_aux_compare(const void *a, const void *b)
513{
514	const spa_aux_t *sa = a;
515	const spa_aux_t *sb = b;
516
517	if (sa->aux_guid < sb->aux_guid)
518		return (-1);
519	else if (sa->aux_guid > sb->aux_guid)
520		return (1);
521	else
522		return (0);
523}
524
525void
526spa_aux_add(vdev_t *vd, avl_tree_t *avl)
527{
528	avl_index_t where;
529	spa_aux_t search;
530	spa_aux_t *aux;
531
532	search.aux_guid = vd->vdev_guid;
533	if ((aux = avl_find(avl, &search, &where)) != NULL) {
534		aux->aux_count++;
535	} else {
536		aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
537		aux->aux_guid = vd->vdev_guid;
538		aux->aux_count = 1;
539		avl_insert(avl, aux, where);
540	}
541}
542
543void
544spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
545{
546	spa_aux_t search;
547	spa_aux_t *aux;
548	avl_index_t where;
549
550	search.aux_guid = vd->vdev_guid;
551	aux = avl_find(avl, &search, &where);
552
553	ASSERT(aux != NULL);
554
555	if (--aux->aux_count == 0) {
556		avl_remove(avl, aux);
557		kmem_free(aux, sizeof (spa_aux_t));
558	} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
559		aux->aux_pool = 0ULL;
560	}
561}
562
563boolean_t
564spa_aux_exists(uint64_t guid, uint64_t *pool, avl_tree_t *avl)
565{
566	spa_aux_t search, *found;
567	avl_index_t where;
568
569	search.aux_guid = guid;
570	found = avl_find(avl, &search, &where);
571
572	if (pool) {
573		if (found)
574			*pool = found->aux_pool;
575		else
576			*pool = 0ULL;
577	}
578
579	return (found != NULL);
580}
581
582void
583spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
584{
585	spa_aux_t search, *found;
586	avl_index_t where;
587
588	search.aux_guid = vd->vdev_guid;
589	found = avl_find(avl, &search, &where);
590	ASSERT(found != NULL);
591	ASSERT(found->aux_pool == 0ULL);
592
593	found->aux_pool = spa_guid(vd->vdev_spa);
594}
595
596/*
597 * Spares are tracked globally due to the following constraints:
598 *
599 * 	- A spare may be part of multiple pools.
600 * 	- A spare may be added to a pool even if it's actively in use within
601 *	  another pool.
602 * 	- A spare in use in any pool can only be the source of a replacement if
603 *	  the target is a spare in the same pool.
604 *
605 * We keep track of all spares on the system through the use of a reference
606 * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
607 * spare, then we bump the reference count in the AVL tree.  In addition, we set
608 * the 'vdev_isspare' member to indicate that the device is a spare (active or
609 * inactive).  When a spare is made active (used to replace a device in the
610 * pool), we also keep track of which pool its been made a part of.
611 *
612 * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
613 * called under the spa_namespace lock as part of vdev reconfiguration.  The
614 * separate spare lock exists for the status query path, which does not need to
615 * be completely consistent with respect to other vdev configuration changes.
616 */
617
618static int
619spa_spare_compare(const void *a, const void *b)
620{
621	return (spa_aux_compare(a, b));
622}
623
624void
625spa_spare_add(vdev_t *vd)
626{
627	mutex_enter(&spa_spare_lock);
628	ASSERT(!vd->vdev_isspare);
629	spa_aux_add(vd, &spa_spare_avl);
630	vd->vdev_isspare = B_TRUE;
631	mutex_exit(&spa_spare_lock);
632}
633
634void
635spa_spare_remove(vdev_t *vd)
636{
637	mutex_enter(&spa_spare_lock);
638	ASSERT(vd->vdev_isspare);
639	spa_aux_remove(vd, &spa_spare_avl);
640	vd->vdev_isspare = B_FALSE;
641	mutex_exit(&spa_spare_lock);
642}
643
644boolean_t
645spa_spare_exists(uint64_t guid, uint64_t *pool)
646{
647	boolean_t found;
648
649	mutex_enter(&spa_spare_lock);
650	found = spa_aux_exists(guid, pool, &spa_spare_avl);
651	mutex_exit(&spa_spare_lock);
652
653	return (found);
654}
655
656void
657spa_spare_activate(vdev_t *vd)
658{
659	mutex_enter(&spa_spare_lock);
660	ASSERT(vd->vdev_isspare);
661	spa_aux_activate(vd, &spa_spare_avl);
662	mutex_exit(&spa_spare_lock);
663}
664
665/*
666 * Level 2 ARC devices are tracked globally for the same reasons as spares.
667 * Cache devices currently only support one pool per cache device, and so
668 * for these devices the aux reference count is currently unused beyond 1.
669 */
670
671static int
672spa_l2cache_compare(const void *a, const void *b)
673{
674	return (spa_aux_compare(a, b));
675}
676
677void
678spa_l2cache_add(vdev_t *vd)
679{
680	mutex_enter(&spa_l2cache_lock);
681	ASSERT(!vd->vdev_isl2cache);
682	spa_aux_add(vd, &spa_l2cache_avl);
683	vd->vdev_isl2cache = B_TRUE;
684	mutex_exit(&spa_l2cache_lock);
685}
686
687void
688spa_l2cache_remove(vdev_t *vd)
689{
690	mutex_enter(&spa_l2cache_lock);
691	ASSERT(vd->vdev_isl2cache);
692	spa_aux_remove(vd, &spa_l2cache_avl);
693	vd->vdev_isl2cache = B_FALSE;
694	mutex_exit(&spa_l2cache_lock);
695}
696
697boolean_t
698spa_l2cache_exists(uint64_t guid, uint64_t *pool)
699{
700	boolean_t found;
701
702	mutex_enter(&spa_l2cache_lock);
703	found = spa_aux_exists(guid, pool, &spa_l2cache_avl);
704	mutex_exit(&spa_l2cache_lock);
705
706	return (found);
707}
708
709void
710spa_l2cache_activate(vdev_t *vd)
711{
712	mutex_enter(&spa_l2cache_lock);
713	ASSERT(vd->vdev_isl2cache);
714	spa_aux_activate(vd, &spa_l2cache_avl);
715	mutex_exit(&spa_l2cache_lock);
716}
717
718void
719spa_l2cache_space_update(vdev_t *vd, int64_t space, int64_t alloc)
720{
721	vdev_space_update(vd, space, alloc, B_FALSE);
722}
723
724/*
725 * ==========================================================================
726 * SPA vdev locking
727 * ==========================================================================
728 */
729
730/*
731 * Lock the given spa_t for the purpose of adding or removing a vdev.
732 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
733 * It returns the next transaction group for the spa_t.
734 */
735uint64_t
736spa_vdev_enter(spa_t *spa)
737{
738	mutex_enter(&spa_namespace_lock);
739
740	/*
741	 * Suspend scrub activity while we mess with the config.  We must do
742	 * this after acquiring the namespace lock to avoid a 3-way deadlock
743	 * with spa_scrub_stop() and the scrub thread.
744	 */
745	spa_scrub_suspend(spa);
746
747	spa_config_enter(spa, RW_WRITER, spa);
748
749	return (spa_last_synced_txg(spa) + 1);
750}
751
752/*
753 * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
754 * locking of spa_vdev_enter(), we also want make sure the transactions have
755 * synced to disk, and then update the global configuration cache with the new
756 * information.
757 */
758int
759spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
760{
761	int config_changed = B_FALSE;
762
763	ASSERT(txg > spa_last_synced_txg(spa));
764
765	/*
766	 * Reassess the DTLs.
767	 */
768	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
769
770	/*
771	 * If the config changed, notify the scrub thread that it must restart.
772	 */
773	if (error == 0 && !list_is_empty(&spa->spa_dirty_list)) {
774		config_changed = B_TRUE;
775		spa_scrub_restart(spa, txg);
776	}
777
778	spa_config_exit(spa, spa);
779
780	/*
781	 * Allow scrubbing to resume.
782	 */
783	spa_scrub_resume(spa);
784
785	/*
786	 * Note: this txg_wait_synced() is important because it ensures
787	 * that there won't be more than one config change per txg.
788	 * This allows us to use the txg as the generation number.
789	 */
790	if (error == 0)
791		txg_wait_synced(spa->spa_dsl_pool, txg);
792
793	if (vd != NULL) {
794		ASSERT(!vd->vdev_detached || vd->vdev_dtl.smo_object == 0);
795		vdev_free(vd);
796	}
797
798	/*
799	 * If the config changed, update the config cache.
800	 */
801	if (config_changed)
802		spa_config_sync(spa, B_FALSE, B_TRUE);
803
804	mutex_exit(&spa_namespace_lock);
805
806	return (error);
807}
808
809/*
810 * ==========================================================================
811 * Miscellaneous functions
812 * ==========================================================================
813 */
814
815/*
816 * Rename a spa_t.
817 */
818int
819spa_rename(const char *name, const char *newname)
820{
821	spa_t *spa;
822	int err;
823
824	/*
825	 * Lookup the spa_t and grab the config lock for writing.  We need to
826	 * actually open the pool so that we can sync out the necessary labels.
827	 * It's OK to call spa_open() with the namespace lock held because we
828	 * allow recursive calls for other reasons.
829	 */
830	mutex_enter(&spa_namespace_lock);
831	if ((err = spa_open(name, &spa, FTAG)) != 0) {
832		mutex_exit(&spa_namespace_lock);
833		return (err);
834	}
835
836	spa_config_enter(spa, RW_WRITER, FTAG);
837
838	avl_remove(&spa_namespace_avl, spa);
839	spa_strfree(spa->spa_name);
840	spa->spa_name = spa_strdup(newname);
841	avl_add(&spa_namespace_avl, spa);
842
843	/*
844	 * Sync all labels to disk with the new names by marking the root vdev
845	 * dirty and waiting for it to sync.  It will pick up the new pool name
846	 * during the sync.
847	 */
848	vdev_config_dirty(spa->spa_root_vdev);
849
850	spa_config_exit(spa, FTAG);
851
852	txg_wait_synced(spa->spa_dsl_pool, 0);
853
854	/*
855	 * Sync the updated config cache.
856	 */
857	spa_config_sync(spa, B_FALSE, B_TRUE);
858
859	spa_close(spa, FTAG);
860
861	mutex_exit(&spa_namespace_lock);
862
863	return (0);
864}
865
866
867/*
868 * Determine whether a pool with given pool_guid exists.  If device_guid is
869 * non-zero, determine whether the pool exists *and* contains a device with the
870 * specified device_guid.
871 */
872boolean_t
873spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
874{
875	spa_t *spa;
876	avl_tree_t *t = &spa_namespace_avl;
877
878	ASSERT(MUTEX_HELD(&spa_namespace_lock));
879
880	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
881		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
882			continue;
883		if (spa->spa_root_vdev == NULL)
884			continue;
885		if (spa_guid(spa) == pool_guid) {
886			if (device_guid == 0)
887				break;
888
889			if (vdev_lookup_by_guid(spa->spa_root_vdev,
890			    device_guid) != NULL)
891				break;
892
893			/*
894			 * Check any devices we may be in the process of adding.
895			 */
896			if (spa->spa_pending_vdev) {
897				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
898				    device_guid) != NULL)
899					break;
900			}
901		}
902	}
903
904	return (spa != NULL);
905}
906
907char *
908spa_strdup(const char *s)
909{
910	size_t len;
911	char *new;
912
913	len = strlen(s);
914	new = kmem_alloc(len + 1, KM_SLEEP);
915	bcopy(s, new, len);
916	new[len] = '\0';
917
918	return (new);
919}
920
921void
922spa_strfree(char *s)
923{
924	kmem_free(s, strlen(s) + 1);
925}
926
927uint64_t
928spa_get_random(uint64_t range)
929{
930	uint64_t r;
931
932	ASSERT(range != 0);
933
934	(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
935
936	return (r % range);
937}
938
939void
940sprintf_blkptr(char *buf, int len, const blkptr_t *bp)
941{
942	int d;
943
944	if (bp == NULL) {
945		(void) snprintf(buf, len, "<NULL>");
946		return;
947	}
948
949	if (BP_IS_HOLE(bp)) {
950		(void) snprintf(buf, len, "<hole>");
951		return;
952	}
953
954	(void) snprintf(buf, len, "[L%llu %s] %llxL/%llxP ",
955	    (u_longlong_t)BP_GET_LEVEL(bp),
956	    dmu_ot[BP_GET_TYPE(bp)].ot_name,
957	    (u_longlong_t)BP_GET_LSIZE(bp),
958	    (u_longlong_t)BP_GET_PSIZE(bp));
959
960	for (d = 0; d < BP_GET_NDVAS(bp); d++) {
961		const dva_t *dva = &bp->blk_dva[d];
962		(void) snprintf(buf + strlen(buf), len - strlen(buf),
963		    "DVA[%d]=<%llu:%llx:%llx> ", d,
964		    (u_longlong_t)DVA_GET_VDEV(dva),
965		    (u_longlong_t)DVA_GET_OFFSET(dva),
966		    (u_longlong_t)DVA_GET_ASIZE(dva));
967	}
968
969	(void) snprintf(buf + strlen(buf), len - strlen(buf),
970	    "%s %s %s %s birth=%llu fill=%llu cksum=%llx:%llx:%llx:%llx",
971	    zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name,
972	    zio_compress_table[BP_GET_COMPRESS(bp)].ci_name,
973	    BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE",
974	    BP_IS_GANG(bp) ? "gang" : "contiguous",
975	    (u_longlong_t)bp->blk_birth,
976	    (u_longlong_t)bp->blk_fill,
977	    (u_longlong_t)bp->blk_cksum.zc_word[0],
978	    (u_longlong_t)bp->blk_cksum.zc_word[1],
979	    (u_longlong_t)bp->blk_cksum.zc_word[2],
980	    (u_longlong_t)bp->blk_cksum.zc_word[3]);
981}
982
983void
984spa_freeze(spa_t *spa)
985{
986	uint64_t freeze_txg = 0;
987
988	spa_config_enter(spa, RW_WRITER, FTAG);
989	if (spa->spa_freeze_txg == UINT64_MAX) {
990		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
991		spa->spa_freeze_txg = freeze_txg;
992	}
993	spa_config_exit(spa, FTAG);
994	if (freeze_txg != 0)
995		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
996}
997
998void
999zfs_panic_recover(const char *fmt, ...)
1000{
1001	va_list adx;
1002
1003	va_start(adx, fmt);
1004	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1005	va_end(adx);
1006}
1007
1008/*
1009 * ==========================================================================
1010 * Accessor functions
1011 * ==========================================================================
1012 */
1013
1014krwlock_t *
1015spa_traverse_rwlock(spa_t *spa)
1016{
1017	return (&spa->spa_traverse_lock);
1018}
1019
1020int
1021spa_traverse_wanted(spa_t *spa)
1022{
1023	return (spa->spa_traverse_wanted);
1024}
1025
1026dsl_pool_t *
1027spa_get_dsl(spa_t *spa)
1028{
1029	return (spa->spa_dsl_pool);
1030}
1031
1032blkptr_t *
1033spa_get_rootblkptr(spa_t *spa)
1034{
1035	return (&spa->spa_ubsync.ub_rootbp);
1036}
1037
1038void
1039spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1040{
1041	spa->spa_uberblock.ub_rootbp = *bp;
1042}
1043
1044void
1045spa_altroot(spa_t *spa, char *buf, size_t buflen)
1046{
1047	if (spa->spa_root == NULL)
1048		buf[0] = '\0';
1049	else
1050		(void) strncpy(buf, spa->spa_root, buflen);
1051}
1052
1053int
1054spa_sync_pass(spa_t *spa)
1055{
1056	return (spa->spa_sync_pass);
1057}
1058
1059char *
1060spa_name(spa_t *spa)
1061{
1062	/*
1063	 * Accessing the name requires holding either the namespace lock or the
1064	 * config lock, both of which are required to do a rename.
1065	 */
1066	ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
1067	    spa_config_held(spa, RW_READER));
1068
1069	return (spa->spa_name);
1070}
1071
1072uint64_t
1073spa_guid(spa_t *spa)
1074{
1075	/*
1076	 * If we fail to parse the config during spa_load(), we can go through
1077	 * the error path (which posts an ereport) and end up here with no root
1078	 * vdev.  We stash the original pool guid in 'spa_load_guid' to handle
1079	 * this case.
1080	 */
1081	if (spa->spa_root_vdev != NULL)
1082		return (spa->spa_root_vdev->vdev_guid);
1083	else
1084		return (spa->spa_load_guid);
1085}
1086
1087uint64_t
1088spa_last_synced_txg(spa_t *spa)
1089{
1090	return (spa->spa_ubsync.ub_txg);
1091}
1092
1093uint64_t
1094spa_first_txg(spa_t *spa)
1095{
1096	return (spa->spa_first_txg);
1097}
1098
1099int
1100spa_state(spa_t *spa)
1101{
1102	return (spa->spa_state);
1103}
1104
1105uint64_t
1106spa_freeze_txg(spa_t *spa)
1107{
1108	return (spa->spa_freeze_txg);
1109}
1110
1111/*
1112 * Return how much space is allocated in the pool (ie. sum of all asize)
1113 */
1114uint64_t
1115spa_get_alloc(spa_t *spa)
1116{
1117	return (spa->spa_root_vdev->vdev_stat.vs_alloc);
1118}
1119
1120/*
1121 * Return how much (raid-z inflated) space there is in the pool.
1122 */
1123uint64_t
1124spa_get_space(spa_t *spa)
1125{
1126	return (spa->spa_root_vdev->vdev_stat.vs_space);
1127}
1128
1129/*
1130 * Return the amount of raid-z-deflated space in the pool.
1131 */
1132uint64_t
1133spa_get_dspace(spa_t *spa)
1134{
1135	if (spa->spa_deflate)
1136		return (spa->spa_root_vdev->vdev_stat.vs_dspace);
1137	else
1138		return (spa->spa_root_vdev->vdev_stat.vs_space);
1139}
1140
1141/* ARGSUSED */
1142uint64_t
1143spa_get_asize(spa_t *spa, uint64_t lsize)
1144{
1145	/*
1146	 * For now, the worst case is 512-byte RAID-Z blocks, in which
1147	 * case the space requirement is exactly 2x; so just assume that.
1148	 * Add to this the fact that we can have up to 3 DVAs per bp, and
1149	 * we have to multiply by a total of 6x.
1150	 */
1151	return (lsize * 6);
1152}
1153
1154/*
1155 * Return the failure mode that has been set to this pool. The default
1156 * behavior will be to block all I/Os when a complete failure occurs.
1157 */
1158uint8_t
1159spa_get_failmode(spa_t *spa)
1160{
1161	return (spa->spa_failmode);
1162}
1163
1164uint64_t
1165spa_version(spa_t *spa)
1166{
1167	return (spa->spa_ubsync.ub_version);
1168}
1169
1170int
1171spa_max_replication(spa_t *spa)
1172{
1173	/*
1174	 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1175	 * handle BPs with more than one DVA allocated.  Set our max
1176	 * replication level accordingly.
1177	 */
1178	if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1179		return (1);
1180	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1181}
1182
1183uint64_t
1184bp_get_dasize(spa_t *spa, const blkptr_t *bp)
1185{
1186	int sz = 0, i;
1187
1188	if (!spa->spa_deflate)
1189		return (BP_GET_ASIZE(bp));
1190
1191	spa_config_enter(spa, RW_READER, FTAG);
1192	for (i = 0; i < SPA_DVAS_PER_BP; i++) {
1193		vdev_t *vd =
1194		    vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[i]));
1195		if (vd)
1196			sz += (DVA_GET_ASIZE(&bp->blk_dva[i]) >>
1197			    SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1198	}
1199	spa_config_exit(spa, FTAG);
1200	return (sz);
1201}
1202
1203/*
1204 * ==========================================================================
1205 * Initialization and Termination
1206 * ==========================================================================
1207 */
1208
1209static int
1210spa_name_compare(const void *a1, const void *a2)
1211{
1212	const spa_t *s1 = a1;
1213	const spa_t *s2 = a2;
1214	int s;
1215
1216	s = strcmp(s1->spa_name, s2->spa_name);
1217	if (s > 0)
1218		return (1);
1219	if (s < 0)
1220		return (-1);
1221	return (0);
1222}
1223
1224int
1225spa_busy(void)
1226{
1227	return (spa_active_count);
1228}
1229
1230void
1231spa_boot_init()
1232{
1233	spa_config_load();
1234}
1235
1236void
1237spa_init(int mode)
1238{
1239	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1240	mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1241	mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1242	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1243
1244	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1245	    offsetof(spa_t, spa_avl));
1246
1247	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1248	    offsetof(spa_aux_t, aux_avl));
1249
1250	avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1251	    offsetof(spa_aux_t, aux_avl));
1252
1253	spa_mode = mode;
1254
1255	refcount_init();
1256	unique_init();
1257	zio_init();
1258	dmu_init();
1259	zil_init();
1260	vdev_cache_stat_init();
1261	zfs_prop_init();
1262	zpool_prop_init();
1263	spa_config_load();
1264}
1265
1266void
1267spa_fini(void)
1268{
1269	spa_evict_all();
1270
1271	vdev_cache_stat_fini();
1272	zil_fini();
1273	dmu_fini();
1274	zio_fini();
1275	unique_fini();
1276	refcount_fini();
1277
1278	avl_destroy(&spa_namespace_avl);
1279	avl_destroy(&spa_spare_avl);
1280	avl_destroy(&spa_l2cache_avl);
1281
1282	cv_destroy(&spa_namespace_cv);
1283	mutex_destroy(&spa_namespace_lock);
1284	mutex_destroy(&spa_spare_lock);
1285	mutex_destroy(&spa_l2cache_lock);
1286}
1287
1288/*
1289 * Return whether this pool has slogs. No locking needed.
1290 * It's not a problem if the wrong answer is returned as it's only for
1291 * performance and not correctness
1292 */
1293boolean_t
1294spa_has_slogs(spa_t *spa)
1295{
1296	return (spa->spa_log_class->mc_rotor != NULL);
1297}
1298
1299/*
1300 * Return whether this pool is the root pool.
1301 */
1302boolean_t
1303spa_is_root(spa_t *spa)
1304{
1305	return (spa->spa_is_root);
1306}
1307