spa_misc.c revision 8654d0253136055bd4cc2423d87378e8a37f2eb5
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#include <sys/zfs_context.h>
29#include <sys/spa_impl.h>
30#include <sys/zio.h>
31#include <sys/zio_checksum.h>
32#include <sys/zio_compress.h>
33#include <sys/dmu.h>
34#include <sys/dmu_tx.h>
35#include <sys/zap.h>
36#include <sys/zil.h>
37#include <sys/vdev_impl.h>
38#include <sys/metaslab.h>
39#include <sys/uberblock_impl.h>
40#include <sys/txg.h>
41#include <sys/avl.h>
42#include <sys/unique.h>
43#include <sys/dsl_pool.h>
44#include <sys/dsl_dir.h>
45#include <sys/dsl_prop.h>
46#include <sys/fs/zfs.h>
47
48/*
49 * SPA locking
50 *
51 * There are four basic locks for managing spa_t structures:
52 *
53 * spa_namespace_lock (global mutex)
54 *
55 *	This lock must be acquired to do any of the following:
56 *
57 *		- Lookup a spa_t by name
58 *		- Add or remove a spa_t from the namespace
59 *		- Increase spa_refcount from non-zero
60 *		- Check if spa_refcount is zero
61 *		- Rename a spa_t
62 *		- add/remove/attach/detach devices
63 *		- Held for the duration of create/destroy/import/export
64 *
65 *	It does not need to handle recursion.  A create or destroy may
66 *	reference objects (files or zvols) in other pools, but by
67 *	definition they must have an existing reference, and will never need
68 *	to lookup a spa_t by name.
69 *
70 * spa_refcount (per-spa refcount_t protected by mutex)
71 *
72 *	This reference count keep track of any active users of the spa_t.  The
73 *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
74 *	the refcount is never really 'zero' - opening a pool implicitly keeps
75 *	some references in the DMU.  Internally we check against SPA_MINREF, but
76 *	present the image of a zero/non-zero value to consumers.
77 *
78 * spa_config_lock (per-spa crazy rwlock)
79 *
80 *	This SPA special is a recursive rwlock, capable of being acquired from
81 *	asynchronous threads.  It has protects the spa_t from config changes,
82 *	and must be held in the following circumstances:
83 *
84 *		- RW_READER to perform I/O to the spa
85 *		- RW_WRITER to change the vdev config
86 *
87 * spa_config_cache_lock (per-spa mutex)
88 *
89 *	This mutex prevents the spa_config nvlist from being updated.  No
90 *      other locks are required to obtain this lock, although implicitly you
91 *      must have the namespace lock or non-zero refcount to have any kind
92 *      of spa_t pointer at all.
93 *
94 * The locking order is fairly straightforward:
95 *
96 *		spa_namespace_lock	->	spa_refcount
97 *
98 *	The namespace lock must be acquired to increase the refcount from 0
99 *	or to check if it is zero.
100 *
101 *		spa_refcount		->	spa_config_lock
102 *
103 *	There must be at least one valid reference on the spa_t to acquire
104 *	the config lock.
105 *
106 *		spa_namespace_lock	->	spa_config_lock
107 *
108 *	The namespace lock must always be taken before the config lock.
109 *
110 *
111 * The spa_namespace_lock and spa_config_cache_lock can be acquired directly and
112 * are globally visible.
113 *
114 * The namespace is manipulated using the following functions, all which require
115 * the spa_namespace_lock to be held.
116 *
117 *	spa_lookup()		Lookup a spa_t by name.
118 *
119 *	spa_add()		Create a new spa_t in the namespace.
120 *
121 *	spa_remove()		Remove a spa_t from the namespace.  This also
122 *				frees up any memory associated with the spa_t.
123 *
124 *	spa_next()		Returns the next spa_t in the system, or the
125 *				first if NULL is passed.
126 *
127 *	spa_evict_all()		Shutdown and remove all spa_t structures in
128 *				the system.
129 *
130 *	spa_guid_exists()	Determine whether a pool/device guid exists.
131 *
132 * The spa_refcount is manipulated using the following functions:
133 *
134 *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
135 *				called with spa_namespace_lock held if the
136 *				refcount is currently zero.
137 *
138 *	spa_close()		Remove a reference from the spa_t.  This will
139 *				not free the spa_t or remove it from the
140 *				namespace.  No locking is required.
141 *
142 *	spa_refcount_zero()	Returns true if the refcount is currently
143 *				zero.  Must be called with spa_namespace_lock
144 *				held.
145 *
146 * The spa_config_lock is manipulated using the following functions:
147 *
148 *	spa_config_enter()	Acquire the config lock as RW_READER or
149 *				RW_WRITER.  At least one reference on the spa_t
150 *				must exist.
151 *
152 *	spa_config_exit()	Release the config lock.
153 *
154 *	spa_config_held()	Returns true if the config lock is currently
155 *				held in the given state.
156 *
157 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
158 *
159 *	spa_vdev_enter()	Acquire the namespace lock and the config lock
160 *				for writing.
161 *
162 *	spa_vdev_exit()		Release the config lock, wait for all I/O
163 *				to complete, sync the updated configs to the
164 *				cache, and release the namespace lock.
165 *
166 * The spa_name() function also requires either the spa_namespace_lock
167 * or the spa_config_lock, as both are needed to do a rename.  spa_rename() is
168 * also implemented within this file since is requires manipulation of the
169 * namespace.
170 */
171
172static avl_tree_t spa_namespace_avl;
173kmutex_t spa_namespace_lock;
174static kcondvar_t spa_namespace_cv;
175static int spa_active_count;
176int spa_max_replication_override = SPA_DVAS_PER_BP;
177
178static kmutex_t spa_spare_lock;
179static avl_tree_t spa_spare_avl;
180
181kmem_cache_t *spa_buffer_pool;
182int spa_mode;
183
184#ifdef ZFS_DEBUG
185int zfs_flags = ~0;
186#else
187int zfs_flags = 0;
188#endif
189
190/*
191 * zfs_recover can be set to nonzero to attempt to recover from
192 * otherwise-fatal errors, typically caused by on-disk corruption.  When
193 * set, calls to zfs_panic_recover() will turn into warning messages.
194 */
195int zfs_recover = 0;
196
197#define	SPA_MINREF	5	/* spa_refcnt for an open-but-idle pool */
198
199/*
200 * ==========================================================================
201 * SPA namespace functions
202 * ==========================================================================
203 */
204
205/*
206 * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
207 * Returns NULL if no matching spa_t is found.
208 */
209spa_t *
210spa_lookup(const char *name)
211{
212	spa_t search, *spa;
213	avl_index_t where;
214
215	ASSERT(MUTEX_HELD(&spa_namespace_lock));
216
217	search.spa_name = (char *)name;
218	spa = avl_find(&spa_namespace_avl, &search, &where);
219
220	return (spa);
221}
222
223/*
224 * Create an uninitialized spa_t with the given name.  Requires
225 * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
226 * exist by calling spa_lookup() first.
227 */
228spa_t *
229spa_add(const char *name, const char *altroot)
230{
231	spa_t *spa;
232
233	ASSERT(MUTEX_HELD(&spa_namespace_lock));
234
235	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
236
237	spa->spa_name = spa_strdup(name);
238	spa->spa_state = POOL_STATE_UNINITIALIZED;
239	spa->spa_freeze_txg = UINT64_MAX;
240	spa->spa_final_txg = UINT64_MAX;
241
242	refcount_create(&spa->spa_refcount);
243	refcount_create(&spa->spa_config_lock.scl_count);
244
245	avl_add(&spa_namespace_avl, spa);
246
247	/*
248	 * Set the alternate root, if there is one.
249	 */
250	if (altroot) {
251		spa->spa_root = spa_strdup(altroot);
252		spa_active_count++;
253	}
254
255	return (spa);
256}
257
258/*
259 * Removes a spa_t from the namespace, freeing up any memory used.  Requires
260 * spa_namespace_lock.  This is called only after the spa_t has been closed and
261 * deactivated.
262 */
263void
264spa_remove(spa_t *spa)
265{
266	ASSERT(MUTEX_HELD(&spa_namespace_lock));
267	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
268	ASSERT(spa->spa_scrub_thread == NULL);
269
270	avl_remove(&spa_namespace_avl, spa);
271	cv_broadcast(&spa_namespace_cv);
272
273	if (spa->spa_root) {
274		spa_strfree(spa->spa_root);
275		spa_active_count--;
276	}
277
278	if (spa->spa_name)
279		spa_strfree(spa->spa_name);
280
281	spa_config_set(spa, NULL);
282
283	refcount_destroy(&spa->spa_refcount);
284	refcount_destroy(&spa->spa_config_lock.scl_count);
285
286	mutex_destroy(&spa->spa_sync_bplist.bpl_lock);
287	mutex_destroy(&spa->spa_config_lock.scl_lock);
288	mutex_destroy(&spa->spa_errlist_lock);
289	mutex_destroy(&spa->spa_errlog_lock);
290	mutex_destroy(&spa->spa_scrub_lock);
291	mutex_destroy(&spa->spa_config_cache_lock);
292	mutex_destroy(&spa->spa_async_lock);
293	mutex_destroy(&spa->spa_history_lock);
294	mutex_destroy(&spa->spa_props_lock);
295
296	kmem_free(spa, sizeof (spa_t));
297}
298
299/*
300 * Given a pool, return the next pool in the namespace, or NULL if there is
301 * none.  If 'prev' is NULL, return the first pool.
302 */
303spa_t *
304spa_next(spa_t *prev)
305{
306	ASSERT(MUTEX_HELD(&spa_namespace_lock));
307
308	if (prev)
309		return (AVL_NEXT(&spa_namespace_avl, prev));
310	else
311		return (avl_first(&spa_namespace_avl));
312}
313
314/*
315 * ==========================================================================
316 * SPA refcount functions
317 * ==========================================================================
318 */
319
320/*
321 * Add a reference to the given spa_t.  Must have at least one reference, or
322 * have the namespace lock held.
323 */
324void
325spa_open_ref(spa_t *spa, void *tag)
326{
327	ASSERT(refcount_count(&spa->spa_refcount) > SPA_MINREF ||
328	    MUTEX_HELD(&spa_namespace_lock));
329
330	(void) refcount_add(&spa->spa_refcount, tag);
331}
332
333/*
334 * Remove a reference to the given spa_t.  Must have at least one reference, or
335 * have the namespace lock held.
336 */
337void
338spa_close(spa_t *spa, void *tag)
339{
340	ASSERT(refcount_count(&spa->spa_refcount) > SPA_MINREF ||
341	    MUTEX_HELD(&spa_namespace_lock));
342
343	(void) refcount_remove(&spa->spa_refcount, tag);
344}
345
346/*
347 * Check to see if the spa refcount is zero.  Must be called with
348 * spa_namespace_lock held.  We really compare against SPA_MINREF, which is the
349 * number of references acquired when opening a pool
350 */
351boolean_t
352spa_refcount_zero(spa_t *spa)
353{
354	ASSERT(MUTEX_HELD(&spa_namespace_lock));
355
356	return (refcount_count(&spa->spa_refcount) == SPA_MINREF);
357}
358
359/*
360 * ==========================================================================
361 * SPA spare tracking
362 * ==========================================================================
363 */
364
365/*
366 * Spares are tracked globally due to the following constraints:
367 *
368 * 	- A spare may be part of multiple pools.
369 * 	- A spare may be added to a pool even if it's actively in use within
370 *	  another pool.
371 * 	- A spare in use in any pool can only be the source of a replacement if
372 *	  the target is a spare in the same pool.
373 *
374 * We keep track of all spares on the system through the use of a reference
375 * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
376 * spare, then we bump the reference count in the AVL tree.  In addition, we set
377 * the 'vdev_isspare' member to indicate that the device is a spare (active or
378 * inactive).  When a spare is made active (used to replace a device in the
379 * pool), we also keep track of which pool its been made a part of.
380 *
381 * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
382 * called under the spa_namespace lock as part of vdev reconfiguration.  The
383 * separate spare lock exists for the status query path, which does not need to
384 * be completely consistent with respect to other vdev configuration changes.
385 */
386
387typedef struct spa_spare {
388	uint64_t	spare_guid;
389	uint64_t	spare_pool;
390	avl_node_t	spare_avl;
391	int		spare_count;
392} spa_spare_t;
393
394static int
395spa_spare_compare(const void *a, const void *b)
396{
397	const spa_spare_t *sa = a;
398	const spa_spare_t *sb = b;
399
400	if (sa->spare_guid < sb->spare_guid)
401		return (-1);
402	else if (sa->spare_guid > sb->spare_guid)
403		return (1);
404	else
405		return (0);
406}
407
408void
409spa_spare_add(vdev_t *vd)
410{
411	avl_index_t where;
412	spa_spare_t search;
413	spa_spare_t *spare;
414
415	mutex_enter(&spa_spare_lock);
416	ASSERT(!vd->vdev_isspare);
417
418	search.spare_guid = vd->vdev_guid;
419	if ((spare = avl_find(&spa_spare_avl, &search, &where)) != NULL) {
420		spare->spare_count++;
421	} else {
422		spare = kmem_zalloc(sizeof (spa_spare_t), KM_SLEEP);
423		spare->spare_guid = vd->vdev_guid;
424		spare->spare_count = 1;
425		avl_insert(&spa_spare_avl, spare, where);
426	}
427	vd->vdev_isspare = B_TRUE;
428
429	mutex_exit(&spa_spare_lock);
430}
431
432void
433spa_spare_remove(vdev_t *vd)
434{
435	spa_spare_t search;
436	spa_spare_t *spare;
437	avl_index_t where;
438
439	mutex_enter(&spa_spare_lock);
440
441	search.spare_guid = vd->vdev_guid;
442	spare = avl_find(&spa_spare_avl, &search, &where);
443
444	ASSERT(vd->vdev_isspare);
445	ASSERT(spare != NULL);
446
447	if (--spare->spare_count == 0) {
448		avl_remove(&spa_spare_avl, spare);
449		kmem_free(spare, sizeof (spa_spare_t));
450	} else if (spare->spare_pool == spa_guid(vd->vdev_spa)) {
451		spare->spare_pool = 0ULL;
452	}
453
454	vd->vdev_isspare = B_FALSE;
455	mutex_exit(&spa_spare_lock);
456}
457
458boolean_t
459spa_spare_exists(uint64_t guid, uint64_t *pool)
460{
461	spa_spare_t search, *found;
462	avl_index_t where;
463
464	mutex_enter(&spa_spare_lock);
465
466	search.spare_guid = guid;
467	found = avl_find(&spa_spare_avl, &search, &where);
468
469	if (pool) {
470		if (found)
471			*pool = found->spare_pool;
472		else
473			*pool = 0ULL;
474	}
475
476	mutex_exit(&spa_spare_lock);
477
478	return (found != NULL);
479}
480
481void
482spa_spare_activate(vdev_t *vd)
483{
484	spa_spare_t search, *found;
485	avl_index_t where;
486
487	mutex_enter(&spa_spare_lock);
488	ASSERT(vd->vdev_isspare);
489
490	search.spare_guid = vd->vdev_guid;
491	found = avl_find(&spa_spare_avl, &search, &where);
492	ASSERT(found != NULL);
493	ASSERT(found->spare_pool == 0ULL);
494
495	found->spare_pool = spa_guid(vd->vdev_spa);
496	mutex_exit(&spa_spare_lock);
497}
498
499/*
500 * ==========================================================================
501 * SPA config locking
502 * ==========================================================================
503 */
504
505/*
506 * Acquire the config lock.  The config lock is a special rwlock that allows for
507 * recursive enters.  Because these enters come from the same thread as well as
508 * asynchronous threads working on behalf of the owner, we must unilaterally
509 * allow all reads access as long at least one reader is held (even if a write
510 * is requested).  This has the side effect of write starvation, but write locks
511 * are extremely rare, and a solution to this problem would be significantly
512 * more complex (if even possible).
513 *
514 * We would like to assert that the namespace lock isn't held, but this is a
515 * valid use during create.
516 */
517void
518spa_config_enter(spa_t *spa, krw_t rw, void *tag)
519{
520	spa_config_lock_t *scl = &spa->spa_config_lock;
521
522	mutex_enter(&scl->scl_lock);
523
524	if (scl->scl_writer != curthread) {
525		if (rw == RW_READER) {
526			while (scl->scl_writer != NULL)
527				cv_wait(&scl->scl_cv, &scl->scl_lock);
528		} else {
529			while (scl->scl_writer != NULL ||
530			    !refcount_is_zero(&scl->scl_count))
531				cv_wait(&scl->scl_cv, &scl->scl_lock);
532			scl->scl_writer = curthread;
533		}
534	}
535
536	(void) refcount_add(&scl->scl_count, tag);
537
538	mutex_exit(&scl->scl_lock);
539}
540
541/*
542 * Release the spa config lock, notifying any waiters in the process.
543 */
544void
545spa_config_exit(spa_t *spa, void *tag)
546{
547	spa_config_lock_t *scl = &spa->spa_config_lock;
548
549	mutex_enter(&scl->scl_lock);
550
551	ASSERT(!refcount_is_zero(&scl->scl_count));
552	if (refcount_remove(&scl->scl_count, tag) == 0) {
553		cv_broadcast(&scl->scl_cv);
554		scl->scl_writer = NULL;  /* OK in either case */
555	}
556
557	mutex_exit(&scl->scl_lock);
558}
559
560/*
561 * Returns true if the config lock is held in the given manner.
562 */
563boolean_t
564spa_config_held(spa_t *spa, krw_t rw)
565{
566	spa_config_lock_t *scl = &spa->spa_config_lock;
567	boolean_t held;
568
569	mutex_enter(&scl->scl_lock);
570	if (rw == RW_WRITER)
571		held = (scl->scl_writer == curthread);
572	else
573		held = !refcount_is_zero(&scl->scl_count);
574	mutex_exit(&scl->scl_lock);
575
576	return (held);
577}
578
579/*
580 * ==========================================================================
581 * SPA vdev locking
582 * ==========================================================================
583 */
584
585/*
586 * Lock the given spa_t for the purpose of adding or removing a vdev.
587 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
588 * It returns the next transaction group for the spa_t.
589 */
590uint64_t
591spa_vdev_enter(spa_t *spa)
592{
593	mutex_enter(&spa_namespace_lock);
594
595	/*
596	 * Suspend scrub activity while we mess with the config.  We must do
597	 * this after acquiring the namespace lock to avoid a 3-way deadlock
598	 * with spa_scrub_stop() and the scrub thread.
599	 */
600	spa_scrub_suspend(spa);
601
602	spa_config_enter(spa, RW_WRITER, spa);
603
604	return (spa_last_synced_txg(spa) + 1);
605}
606
607/*
608 * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
609 * locking of spa_vdev_enter(), we also want make sure the transactions have
610 * synced to disk, and then update the global configuration cache with the new
611 * information.
612 */
613int
614spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
615{
616	int config_changed = B_FALSE;
617
618	ASSERT(txg > spa_last_synced_txg(spa));
619
620	/*
621	 * Reassess the DTLs.
622	 */
623	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
624
625	/*
626	 * If the config changed, notify the scrub thread that it must restart.
627	 */
628	if (error == 0 && !list_is_empty(&spa->spa_dirty_list)) {
629		config_changed = B_TRUE;
630		spa_scrub_restart(spa, txg);
631	}
632
633	spa_config_exit(spa, spa);
634
635	/*
636	 * Allow scrubbing to resume.
637	 */
638	spa_scrub_resume(spa);
639
640	/*
641	 * Note: this txg_wait_synced() is important because it ensures
642	 * that there won't be more than one config change per txg.
643	 * This allows us to use the txg as the generation number.
644	 */
645	if (error == 0)
646		txg_wait_synced(spa->spa_dsl_pool, txg);
647
648	if (vd != NULL) {
649		ASSERT(!vd->vdev_detached || vd->vdev_dtl.smo_object == 0);
650		vdev_free(vd);
651	}
652
653	/*
654	 * If the config changed, update the config cache.
655	 */
656	if (config_changed)
657		spa_config_sync();
658
659	mutex_exit(&spa_namespace_lock);
660
661	return (error);
662}
663
664/*
665 * ==========================================================================
666 * Miscellaneous functions
667 * ==========================================================================
668 */
669
670/*
671 * Rename a spa_t.
672 */
673int
674spa_rename(const char *name, const char *newname)
675{
676	spa_t *spa;
677	int err;
678
679	/*
680	 * Lookup the spa_t and grab the config lock for writing.  We need to
681	 * actually open the pool so that we can sync out the necessary labels.
682	 * It's OK to call spa_open() with the namespace lock held because we
683	 * allow recursive calls for other reasons.
684	 */
685	mutex_enter(&spa_namespace_lock);
686	if ((err = spa_open(name, &spa, FTAG)) != 0) {
687		mutex_exit(&spa_namespace_lock);
688		return (err);
689	}
690
691	spa_config_enter(spa, RW_WRITER, FTAG);
692
693	avl_remove(&spa_namespace_avl, spa);
694	spa_strfree(spa->spa_name);
695	spa->spa_name = spa_strdup(newname);
696	avl_add(&spa_namespace_avl, spa);
697
698	/*
699	 * Sync all labels to disk with the new names by marking the root vdev
700	 * dirty and waiting for it to sync.  It will pick up the new pool name
701	 * during the sync.
702	 */
703	vdev_config_dirty(spa->spa_root_vdev);
704
705	spa_config_exit(spa, FTAG);
706
707	txg_wait_synced(spa->spa_dsl_pool, 0);
708
709	/*
710	 * Sync the updated config cache.
711	 */
712	spa_config_sync();
713
714	spa_close(spa, FTAG);
715
716	mutex_exit(&spa_namespace_lock);
717
718	return (0);
719}
720
721
722/*
723 * Determine whether a pool with given pool_guid exists.  If device_guid is
724 * non-zero, determine whether the pool exists *and* contains a device with the
725 * specified device_guid.
726 */
727boolean_t
728spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
729{
730	spa_t *spa;
731	avl_tree_t *t = &spa_namespace_avl;
732
733	ASSERT(MUTEX_HELD(&spa_namespace_lock));
734
735	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
736		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
737			continue;
738		if (spa->spa_root_vdev == NULL)
739			continue;
740		if (spa_guid(spa) == pool_guid) {
741			if (device_guid == 0)
742				break;
743
744			if (vdev_lookup_by_guid(spa->spa_root_vdev,
745			    device_guid) != NULL)
746				break;
747
748			/*
749			 * Check any devices we may be in the process of adding.
750			 */
751			if (spa->spa_pending_vdev) {
752				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
753				    device_guid) != NULL)
754					break;
755			}
756		}
757	}
758
759	return (spa != NULL);
760}
761
762char *
763spa_strdup(const char *s)
764{
765	size_t len;
766	char *new;
767
768	len = strlen(s);
769	new = kmem_alloc(len + 1, KM_SLEEP);
770	bcopy(s, new, len);
771	new[len] = '\0';
772
773	return (new);
774}
775
776void
777spa_strfree(char *s)
778{
779	kmem_free(s, strlen(s) + 1);
780}
781
782uint64_t
783spa_get_random(uint64_t range)
784{
785	uint64_t r;
786
787	ASSERT(range != 0);
788
789	(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
790
791	return (r % range);
792}
793
794void
795sprintf_blkptr(char *buf, int len, const blkptr_t *bp)
796{
797	int d;
798
799	if (bp == NULL) {
800		(void) snprintf(buf, len, "<NULL>");
801		return;
802	}
803
804	if (BP_IS_HOLE(bp)) {
805		(void) snprintf(buf, len, "<hole>");
806		return;
807	}
808
809	(void) snprintf(buf, len, "[L%llu %s] %llxL/%llxP ",
810	    (u_longlong_t)BP_GET_LEVEL(bp),
811	    dmu_ot[BP_GET_TYPE(bp)].ot_name,
812	    (u_longlong_t)BP_GET_LSIZE(bp),
813	    (u_longlong_t)BP_GET_PSIZE(bp));
814
815	for (d = 0; d < BP_GET_NDVAS(bp); d++) {
816		const dva_t *dva = &bp->blk_dva[d];
817		(void) snprintf(buf + strlen(buf), len - strlen(buf),
818		    "DVA[%d]=<%llu:%llx:%llx> ", d,
819		    (u_longlong_t)DVA_GET_VDEV(dva),
820		    (u_longlong_t)DVA_GET_OFFSET(dva),
821		    (u_longlong_t)DVA_GET_ASIZE(dva));
822	}
823
824	(void) snprintf(buf + strlen(buf), len - strlen(buf),
825	    "%s %s %s %s birth=%llu fill=%llu cksum=%llx:%llx:%llx:%llx",
826	    zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name,
827	    zio_compress_table[BP_GET_COMPRESS(bp)].ci_name,
828	    BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE",
829	    BP_IS_GANG(bp) ? "gang" : "contiguous",
830	    (u_longlong_t)bp->blk_birth,
831	    (u_longlong_t)bp->blk_fill,
832	    (u_longlong_t)bp->blk_cksum.zc_word[0],
833	    (u_longlong_t)bp->blk_cksum.zc_word[1],
834	    (u_longlong_t)bp->blk_cksum.zc_word[2],
835	    (u_longlong_t)bp->blk_cksum.zc_word[3]);
836}
837
838void
839spa_freeze(spa_t *spa)
840{
841	uint64_t freeze_txg = 0;
842
843	spa_config_enter(spa, RW_WRITER, FTAG);
844	if (spa->spa_freeze_txg == UINT64_MAX) {
845		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
846		spa->spa_freeze_txg = freeze_txg;
847	}
848	spa_config_exit(spa, FTAG);
849	if (freeze_txg != 0)
850		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
851}
852
853void
854zfs_panic_recover(const char *fmt, ...)
855{
856	va_list adx;
857
858	va_start(adx, fmt);
859	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
860	va_end(adx);
861}
862
863/*
864 * ==========================================================================
865 * Accessor functions
866 * ==========================================================================
867 */
868
869krwlock_t *
870spa_traverse_rwlock(spa_t *spa)
871{
872	return (&spa->spa_traverse_lock);
873}
874
875int
876spa_traverse_wanted(spa_t *spa)
877{
878	return (spa->spa_traverse_wanted);
879}
880
881dsl_pool_t *
882spa_get_dsl(spa_t *spa)
883{
884	return (spa->spa_dsl_pool);
885}
886
887blkptr_t *
888spa_get_rootblkptr(spa_t *spa)
889{
890	return (&spa->spa_ubsync.ub_rootbp);
891}
892
893void
894spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
895{
896	spa->spa_uberblock.ub_rootbp = *bp;
897}
898
899void
900spa_altroot(spa_t *spa, char *buf, size_t buflen)
901{
902	if (spa->spa_root == NULL)
903		buf[0] = '\0';
904	else
905		(void) strncpy(buf, spa->spa_root, buflen);
906}
907
908int
909spa_sync_pass(spa_t *spa)
910{
911	return (spa->spa_sync_pass);
912}
913
914char *
915spa_name(spa_t *spa)
916{
917	/*
918	 * Accessing the name requires holding either the namespace lock or the
919	 * config lock, both of which are required to do a rename.
920	 */
921	ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
922	    spa_config_held(spa, RW_READER) || spa_config_held(spa, RW_WRITER));
923
924	return (spa->spa_name);
925}
926
927uint64_t
928spa_guid(spa_t *spa)
929{
930	/*
931	 * If we fail to parse the config during spa_load(), we can go through
932	 * the error path (which posts an ereport) and end up here with no root
933	 * vdev.  We stash the original pool guid in 'spa_load_guid' to handle
934	 * this case.
935	 */
936	if (spa->spa_root_vdev != NULL)
937		return (spa->spa_root_vdev->vdev_guid);
938	else
939		return (spa->spa_load_guid);
940}
941
942uint64_t
943spa_last_synced_txg(spa_t *spa)
944{
945	return (spa->spa_ubsync.ub_txg);
946}
947
948uint64_t
949spa_first_txg(spa_t *spa)
950{
951	return (spa->spa_first_txg);
952}
953
954int
955spa_state(spa_t *spa)
956{
957	return (spa->spa_state);
958}
959
960uint64_t
961spa_freeze_txg(spa_t *spa)
962{
963	return (spa->spa_freeze_txg);
964}
965
966/*
967 * Return how much space is allocated in the pool (ie. sum of all asize)
968 */
969uint64_t
970spa_get_alloc(spa_t *spa)
971{
972	return (spa->spa_root_vdev->vdev_stat.vs_alloc);
973}
974
975/*
976 * Return how much (raid-z inflated) space there is in the pool.
977 */
978uint64_t
979spa_get_space(spa_t *spa)
980{
981	return (spa->spa_root_vdev->vdev_stat.vs_space);
982}
983
984/*
985 * Return the amount of raid-z-deflated space in the pool.
986 */
987uint64_t
988spa_get_dspace(spa_t *spa)
989{
990	if (spa->spa_deflate)
991		return (spa->spa_root_vdev->vdev_stat.vs_dspace);
992	else
993		return (spa->spa_root_vdev->vdev_stat.vs_space);
994}
995
996/* ARGSUSED */
997uint64_t
998spa_get_asize(spa_t *spa, uint64_t lsize)
999{
1000	/*
1001	 * For now, the worst case is 512-byte RAID-Z blocks, in which
1002	 * case the space requirement is exactly 2x; so just assume that.
1003	 * Add to this the fact that we can have up to 3 DVAs per bp, and
1004	 * we have to multiply by a total of 6x.
1005	 */
1006	return (lsize * 6);
1007}
1008
1009uint64_t
1010spa_version(spa_t *spa)
1011{
1012	return (spa->spa_ubsync.ub_version);
1013}
1014
1015int
1016spa_max_replication(spa_t *spa)
1017{
1018	/*
1019	 * As of ZFS_VERSION == ZFS_VERSION_DITTO_BLOCKS, we are able to
1020	 * handle BPs with more than one DVA allocated.  Set our max
1021	 * replication level accordingly.
1022	 */
1023	if (spa_version(spa) < ZFS_VERSION_DITTO_BLOCKS)
1024		return (1);
1025	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1026}
1027
1028uint64_t
1029bp_get_dasize(spa_t *spa, const blkptr_t *bp)
1030{
1031	int sz = 0, i;
1032
1033	if (!spa->spa_deflate)
1034		return (BP_GET_ASIZE(bp));
1035
1036	for (i = 0; i < SPA_DVAS_PER_BP; i++) {
1037		vdev_t *vd =
1038		    vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[i]));
1039		sz += (DVA_GET_ASIZE(&bp->blk_dva[i]) >> SPA_MINBLOCKSHIFT) *
1040		    vd->vdev_deflate_ratio;
1041	}
1042	return (sz);
1043}
1044
1045/*
1046 * ==========================================================================
1047 * Initialization and Termination
1048 * ==========================================================================
1049 */
1050
1051static int
1052spa_name_compare(const void *a1, const void *a2)
1053{
1054	const spa_t *s1 = a1;
1055	const spa_t *s2 = a2;
1056	int s;
1057
1058	s = strcmp(s1->spa_name, s2->spa_name);
1059	if (s > 0)
1060		return (1);
1061	if (s < 0)
1062		return (-1);
1063	return (0);
1064}
1065
1066int
1067spa_busy(void)
1068{
1069	return (spa_active_count);
1070}
1071
1072void
1073spa_init(int mode)
1074{
1075	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1076	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1077
1078	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1079	    offsetof(spa_t, spa_avl));
1080
1081	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_spare_t),
1082	    offsetof(spa_spare_t, spare_avl));
1083
1084	spa_mode = mode;
1085
1086	refcount_init();
1087	unique_init();
1088	zio_init();
1089	dmu_init();
1090	zil_init();
1091	spa_config_load();
1092}
1093
1094void
1095spa_fini(void)
1096{
1097	spa_evict_all();
1098
1099	zil_fini();
1100	dmu_fini();
1101	zio_fini();
1102	refcount_fini();
1103
1104	avl_destroy(&spa_namespace_avl);
1105	avl_destroy(&spa_spare_avl);
1106
1107	cv_destroy(&spa_namespace_cv);
1108	mutex_destroy(&spa_namespace_lock);
1109}
1110