xref: /illumos-gate/usr/src/uts/common/fs/zfs/spa_misc.c (revision 40feaa914945406e86e193599d115ea71a171d18)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/zfs_context.h>
29 #include <sys/spa_impl.h>
30 #include <sys/zio.h>
31 #include <sys/zio_checksum.h>
32 #include <sys/zio_compress.h>
33 #include <sys/dmu.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/zap.h>
36 #include <sys/zil.h>
37 #include <sys/vdev_impl.h>
38 #include <sys/metaslab.h>
39 #include <sys/uberblock_impl.h>
40 #include <sys/txg.h>
41 #include <sys/avl.h>
42 #include <sys/unique.h>
43 #include <sys/dsl_pool.h>
44 #include <sys/dsl_dir.h>
45 #include <sys/dsl_prop.h>
46 #include <sys/fs/zfs.h>
47 
48 /*
49  * SPA locking
50  *
51  * There are four basic locks for managing spa_t structures:
52  *
53  * spa_namespace_lock (global mutex)
54  *
55  *	This lock must be acquired to do any of the following:
56  *
57  *		- Lookup a spa_t by name
58  *		- Add or remove a spa_t from the namespace
59  *		- Increase spa_refcount from non-zero
60  *		- Check if spa_refcount is zero
61  *		- Rename a spa_t
62  *		- add/remove/attach/detach devices
63  *		- Held for the duration of create/destroy/import/export
64  *
65  *	It does not need to handle recursion.  A create or destroy may
66  *	reference objects (files or zvols) in other pools, but by
67  *	definition they must have an existing reference, and will never need
68  *	to lookup a spa_t by name.
69  *
70  * spa_refcount (per-spa refcount_t protected by mutex)
71  *
72  *	This reference count keep track of any active users of the spa_t.  The
73  *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
74  *	the refcount is never really 'zero' - opening a pool implicitly keeps
75  *	some references in the DMU.  Internally we check against SPA_MINREF, but
76  *	present the image of a zero/non-zero value to consumers.
77  *
78  * spa_config_lock (per-spa crazy rwlock)
79  *
80  *	This SPA special is a recursive rwlock, capable of being acquired from
81  *	asynchronous threads.  It has protects the spa_t from config changes,
82  *	and must be held in the following circumstances:
83  *
84  *		- RW_READER to perform I/O to the spa
85  *		- RW_WRITER to change the vdev config
86  *
87  * spa_config_cache_lock (per-spa mutex)
88  *
89  *	This mutex prevents the spa_config nvlist from being updated.  No
90  *      other locks are required to obtain this lock, although implicitly you
91  *      must have the namespace lock or non-zero refcount to have any kind
92  *      of spa_t pointer at all.
93  *
94  * The locking order is fairly straightforward:
95  *
96  *		spa_namespace_lock	->	spa_refcount
97  *
98  *	The namespace lock must be acquired to increase the refcount from 0
99  *	or to check if it is zero.
100  *
101  *		spa_refcount		->	spa_config_lock
102  *
103  *	There must be at least one valid reference on the spa_t to acquire
104  *	the config lock.
105  *
106  *		spa_namespace_lock	->	spa_config_lock
107  *
108  *	The namespace lock must always be taken before the config lock.
109  *
110  *
111  * The spa_namespace_lock and spa_config_cache_lock can be acquired directly and
112  * are globally visible.
113  *
114  * The namespace is manipulated using the following functions, all which require
115  * the spa_namespace_lock to be held.
116  *
117  *	spa_lookup()		Lookup a spa_t by name.
118  *
119  *	spa_add()		Create a new spa_t in the namespace.
120  *
121  *	spa_remove()		Remove a spa_t from the namespace.  This also
122  *				frees up any memory associated with the spa_t.
123  *
124  *	spa_next()		Returns the next spa_t in the system, or the
125  *				first if NULL is passed.
126  *
127  *	spa_evict_all()		Shutdown and remove all spa_t structures in
128  *				the system.
129  *
130  *	spa_guid_exists()	Determine whether a pool/device guid exists.
131  *
132  * The spa_refcount is manipulated using the following functions:
133  *
134  *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
135  *				called with spa_namespace_lock held if the
136  *				refcount is currently zero.
137  *
138  *	spa_close()		Remove a reference from the spa_t.  This will
139  *				not free the spa_t or remove it from the
140  *				namespace.  No locking is required.
141  *
142  *	spa_refcount_zero()	Returns true if the refcount is currently
143  *				zero.  Must be called with spa_namespace_lock
144  *				held.
145  *
146  * The spa_config_lock is manipulated using the following functions:
147  *
148  *	spa_config_enter()	Acquire the config lock as RW_READER or
149  *				RW_WRITER.  At least one reference on the spa_t
150  *				must exist.
151  *
152  *	spa_config_exit()	Release the config lock.
153  *
154  *	spa_config_held()	Returns true if the config lock is currently
155  *				held in the given state.
156  *
157  * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
158  *
159  *	spa_vdev_enter()	Acquire the namespace lock and the config lock
160  *				for writing.
161  *
162  *	spa_vdev_exit()		Release the config lock, wait for all I/O
163  *				to complete, sync the updated configs to the
164  *				cache, and release the namespace lock.
165  *
166  * The spa_name() function also requires either the spa_namespace_lock
167  * or the spa_config_lock, as both are needed to do a rename.  spa_rename() is
168  * also implemented within this file since is requires manipulation of the
169  * namespace.
170  */
171 
172 static avl_tree_t spa_namespace_avl;
173 kmutex_t spa_namespace_lock;
174 static kcondvar_t spa_namespace_cv;
175 static int spa_active_count;
176 int spa_max_replication_override = SPA_DVAS_PER_BP;
177 
178 static kmutex_t spa_spare_lock;
179 static avl_tree_t spa_spare_avl;
180 
181 kmem_cache_t *spa_buffer_pool;
182 int spa_mode;
183 
184 #ifdef ZFS_DEBUG
185 /* Everything except dprintf is on by default in debug builds */
186 int zfs_flags = ~ZFS_DEBUG_DPRINTF;
187 #else
188 int zfs_flags = 0;
189 #endif
190 
191 /*
192  * zfs_recover can be set to nonzero to attempt to recover from
193  * otherwise-fatal errors, typically caused by on-disk corruption.  When
194  * set, calls to zfs_panic_recover() will turn into warning messages.
195  */
196 int zfs_recover = 0;
197 
198 #define	SPA_MINREF	5	/* spa_refcnt for an open-but-idle pool */
199 
200 /*
201  * ==========================================================================
202  * SPA namespace functions
203  * ==========================================================================
204  */
205 
206 /*
207  * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
208  * Returns NULL if no matching spa_t is found.
209  */
210 spa_t *
211 spa_lookup(const char *name)
212 {
213 	spa_t search, *spa;
214 	avl_index_t where;
215 	char c;
216 	char *cp;
217 
218 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
219 
220 	/*
221 	 * If it's a full dataset name, figure out the pool name and
222 	 * just use that.
223 	 */
224 	cp = strpbrk(name, "/@");
225 	if (cp) {
226 		c = *cp;
227 		*cp = '\0';
228 	}
229 
230 	search.spa_name = (char *)name;
231 	spa = avl_find(&spa_namespace_avl, &search, &where);
232 
233 	if (cp)
234 		*cp = c;
235 
236 	return (spa);
237 }
238 
239 /*
240  * Create an uninitialized spa_t with the given name.  Requires
241  * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
242  * exist by calling spa_lookup() first.
243  */
244 spa_t *
245 spa_add(const char *name, const char *altroot)
246 {
247 	spa_t *spa;
248 
249 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
250 
251 	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
252 
253 	spa->spa_name = spa_strdup(name);
254 	spa->spa_state = POOL_STATE_UNINITIALIZED;
255 	spa->spa_freeze_txg = UINT64_MAX;
256 	spa->spa_final_txg = UINT64_MAX;
257 
258 	refcount_create(&spa->spa_refcount);
259 	refcount_create(&spa->spa_config_lock.scl_count);
260 
261 	avl_add(&spa_namespace_avl, spa);
262 
263 	/*
264 	 * Set the alternate root, if there is one.
265 	 */
266 	if (altroot) {
267 		spa->spa_root = spa_strdup(altroot);
268 		spa_active_count++;
269 	}
270 
271 	return (spa);
272 }
273 
274 /*
275  * Removes a spa_t from the namespace, freeing up any memory used.  Requires
276  * spa_namespace_lock.  This is called only after the spa_t has been closed and
277  * deactivated.
278  */
279 void
280 spa_remove(spa_t *spa)
281 {
282 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
283 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
284 	ASSERT(spa->spa_scrub_thread == NULL);
285 
286 	avl_remove(&spa_namespace_avl, spa);
287 	cv_broadcast(&spa_namespace_cv);
288 
289 	if (spa->spa_root) {
290 		spa_strfree(spa->spa_root);
291 		spa_active_count--;
292 	}
293 
294 	if (spa->spa_name)
295 		spa_strfree(spa->spa_name);
296 
297 	spa_config_set(spa, NULL);
298 
299 	refcount_destroy(&spa->spa_refcount);
300 	refcount_destroy(&spa->spa_config_lock.scl_count);
301 
302 	mutex_destroy(&spa->spa_sync_bplist.bpl_lock);
303 	mutex_destroy(&spa->spa_config_lock.scl_lock);
304 	mutex_destroy(&spa->spa_errlist_lock);
305 	mutex_destroy(&spa->spa_errlog_lock);
306 	mutex_destroy(&spa->spa_scrub_lock);
307 	mutex_destroy(&spa->spa_config_cache_lock);
308 	mutex_destroy(&spa->spa_async_lock);
309 	mutex_destroy(&spa->spa_history_lock);
310 	mutex_destroy(&spa->spa_props_lock);
311 
312 	kmem_free(spa, sizeof (spa_t));
313 }
314 
315 /*
316  * Given a pool, return the next pool in the namespace, or NULL if there is
317  * none.  If 'prev' is NULL, return the first pool.
318  */
319 spa_t *
320 spa_next(spa_t *prev)
321 {
322 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
323 
324 	if (prev)
325 		return (AVL_NEXT(&spa_namespace_avl, prev));
326 	else
327 		return (avl_first(&spa_namespace_avl));
328 }
329 
330 /*
331  * ==========================================================================
332  * SPA refcount functions
333  * ==========================================================================
334  */
335 
336 /*
337  * Add a reference to the given spa_t.  Must have at least one reference, or
338  * have the namespace lock held.
339  */
340 void
341 spa_open_ref(spa_t *spa, void *tag)
342 {
343 	ASSERT(refcount_count(&spa->spa_refcount) > SPA_MINREF ||
344 	    MUTEX_HELD(&spa_namespace_lock));
345 
346 	(void) refcount_add(&spa->spa_refcount, tag);
347 }
348 
349 /*
350  * Remove a reference to the given spa_t.  Must have at least one reference, or
351  * have the namespace lock held.
352  */
353 void
354 spa_close(spa_t *spa, void *tag)
355 {
356 	ASSERT(refcount_count(&spa->spa_refcount) > SPA_MINREF ||
357 	    MUTEX_HELD(&spa_namespace_lock));
358 
359 	(void) refcount_remove(&spa->spa_refcount, tag);
360 }
361 
362 /*
363  * Check to see if the spa refcount is zero.  Must be called with
364  * spa_namespace_lock held.  We really compare against SPA_MINREF, which is the
365  * number of references acquired when opening a pool
366  */
367 boolean_t
368 spa_refcount_zero(spa_t *spa)
369 {
370 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
371 
372 	return (refcount_count(&spa->spa_refcount) == SPA_MINREF);
373 }
374 
375 /*
376  * ==========================================================================
377  * SPA spare tracking
378  * ==========================================================================
379  */
380 
381 /*
382  * Spares are tracked globally due to the following constraints:
383  *
384  * 	- A spare may be part of multiple pools.
385  * 	- A spare may be added to a pool even if it's actively in use within
386  *	  another pool.
387  * 	- A spare in use in any pool can only be the source of a replacement if
388  *	  the target is a spare in the same pool.
389  *
390  * We keep track of all spares on the system through the use of a reference
391  * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
392  * spare, then we bump the reference count in the AVL tree.  In addition, we set
393  * the 'vdev_isspare' member to indicate that the device is a spare (active or
394  * inactive).  When a spare is made active (used to replace a device in the
395  * pool), we also keep track of which pool its been made a part of.
396  *
397  * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
398  * called under the spa_namespace lock as part of vdev reconfiguration.  The
399  * separate spare lock exists for the status query path, which does not need to
400  * be completely consistent with respect to other vdev configuration changes.
401  */
402 
403 typedef struct spa_spare {
404 	uint64_t	spare_guid;
405 	uint64_t	spare_pool;
406 	avl_node_t	spare_avl;
407 	int		spare_count;
408 } spa_spare_t;
409 
410 static int
411 spa_spare_compare(const void *a, const void *b)
412 {
413 	const spa_spare_t *sa = a;
414 	const spa_spare_t *sb = b;
415 
416 	if (sa->spare_guid < sb->spare_guid)
417 		return (-1);
418 	else if (sa->spare_guid > sb->spare_guid)
419 		return (1);
420 	else
421 		return (0);
422 }
423 
424 void
425 spa_spare_add(vdev_t *vd)
426 {
427 	avl_index_t where;
428 	spa_spare_t search;
429 	spa_spare_t *spare;
430 
431 	mutex_enter(&spa_spare_lock);
432 	ASSERT(!vd->vdev_isspare);
433 
434 	search.spare_guid = vd->vdev_guid;
435 	if ((spare = avl_find(&spa_spare_avl, &search, &where)) != NULL) {
436 		spare->spare_count++;
437 	} else {
438 		spare = kmem_zalloc(sizeof (spa_spare_t), KM_SLEEP);
439 		spare->spare_guid = vd->vdev_guid;
440 		spare->spare_count = 1;
441 		avl_insert(&spa_spare_avl, spare, where);
442 	}
443 	vd->vdev_isspare = B_TRUE;
444 
445 	mutex_exit(&spa_spare_lock);
446 }
447 
448 void
449 spa_spare_remove(vdev_t *vd)
450 {
451 	spa_spare_t search;
452 	spa_spare_t *spare;
453 	avl_index_t where;
454 
455 	mutex_enter(&spa_spare_lock);
456 
457 	search.spare_guid = vd->vdev_guid;
458 	spare = avl_find(&spa_spare_avl, &search, &where);
459 
460 	ASSERT(vd->vdev_isspare);
461 	ASSERT(spare != NULL);
462 
463 	if (--spare->spare_count == 0) {
464 		avl_remove(&spa_spare_avl, spare);
465 		kmem_free(spare, sizeof (spa_spare_t));
466 	} else if (spare->spare_pool == spa_guid(vd->vdev_spa)) {
467 		spare->spare_pool = 0ULL;
468 	}
469 
470 	vd->vdev_isspare = B_FALSE;
471 	mutex_exit(&spa_spare_lock);
472 }
473 
474 boolean_t
475 spa_spare_exists(uint64_t guid, uint64_t *pool)
476 {
477 	spa_spare_t search, *found;
478 	avl_index_t where;
479 
480 	mutex_enter(&spa_spare_lock);
481 
482 	search.spare_guid = guid;
483 	found = avl_find(&spa_spare_avl, &search, &where);
484 
485 	if (pool) {
486 		if (found)
487 			*pool = found->spare_pool;
488 		else
489 			*pool = 0ULL;
490 	}
491 
492 	mutex_exit(&spa_spare_lock);
493 
494 	return (found != NULL);
495 }
496 
497 void
498 spa_spare_activate(vdev_t *vd)
499 {
500 	spa_spare_t search, *found;
501 	avl_index_t where;
502 
503 	mutex_enter(&spa_spare_lock);
504 	ASSERT(vd->vdev_isspare);
505 
506 	search.spare_guid = vd->vdev_guid;
507 	found = avl_find(&spa_spare_avl, &search, &where);
508 	ASSERT(found != NULL);
509 	ASSERT(found->spare_pool == 0ULL);
510 
511 	found->spare_pool = spa_guid(vd->vdev_spa);
512 	mutex_exit(&spa_spare_lock);
513 }
514 
515 /*
516  * ==========================================================================
517  * SPA config locking
518  * ==========================================================================
519  */
520 
521 /*
522  * Acquire the config lock.  The config lock is a special rwlock that allows for
523  * recursive enters.  Because these enters come from the same thread as well as
524  * asynchronous threads working on behalf of the owner, we must unilaterally
525  * allow all reads access as long at least one reader is held (even if a write
526  * is requested).  This has the side effect of write starvation, but write locks
527  * are extremely rare, and a solution to this problem would be significantly
528  * more complex (if even possible).
529  *
530  * We would like to assert that the namespace lock isn't held, but this is a
531  * valid use during create.
532  */
533 void
534 spa_config_enter(spa_t *spa, krw_t rw, void *tag)
535 {
536 	spa_config_lock_t *scl = &spa->spa_config_lock;
537 
538 	mutex_enter(&scl->scl_lock);
539 
540 	if (scl->scl_writer != curthread) {
541 		if (rw == RW_READER) {
542 			while (scl->scl_writer != NULL)
543 				cv_wait(&scl->scl_cv, &scl->scl_lock);
544 		} else {
545 			while (scl->scl_writer != NULL ||
546 			    !refcount_is_zero(&scl->scl_count))
547 				cv_wait(&scl->scl_cv, &scl->scl_lock);
548 			scl->scl_writer = curthread;
549 		}
550 	}
551 
552 	(void) refcount_add(&scl->scl_count, tag);
553 
554 	mutex_exit(&scl->scl_lock);
555 }
556 
557 /*
558  * Release the spa config lock, notifying any waiters in the process.
559  */
560 void
561 spa_config_exit(spa_t *spa, void *tag)
562 {
563 	spa_config_lock_t *scl = &spa->spa_config_lock;
564 
565 	mutex_enter(&scl->scl_lock);
566 
567 	ASSERT(!refcount_is_zero(&scl->scl_count));
568 	if (refcount_remove(&scl->scl_count, tag) == 0) {
569 		cv_broadcast(&scl->scl_cv);
570 		scl->scl_writer = NULL;  /* OK in either case */
571 	}
572 
573 	mutex_exit(&scl->scl_lock);
574 }
575 
576 /*
577  * Returns true if the config lock is held in the given manner.
578  */
579 boolean_t
580 spa_config_held(spa_t *spa, krw_t rw)
581 {
582 	spa_config_lock_t *scl = &spa->spa_config_lock;
583 	boolean_t held;
584 
585 	mutex_enter(&scl->scl_lock);
586 	if (rw == RW_WRITER)
587 		held = (scl->scl_writer == curthread);
588 	else
589 		held = !refcount_is_zero(&scl->scl_count);
590 	mutex_exit(&scl->scl_lock);
591 
592 	return (held);
593 }
594 
595 /*
596  * ==========================================================================
597  * SPA vdev locking
598  * ==========================================================================
599  */
600 
601 /*
602  * Lock the given spa_t for the purpose of adding or removing a vdev.
603  * Grabs the global spa_namespace_lock plus the spa config lock for writing.
604  * It returns the next transaction group for the spa_t.
605  */
606 uint64_t
607 spa_vdev_enter(spa_t *spa)
608 {
609 	mutex_enter(&spa_namespace_lock);
610 
611 	/*
612 	 * Suspend scrub activity while we mess with the config.  We must do
613 	 * this after acquiring the namespace lock to avoid a 3-way deadlock
614 	 * with spa_scrub_stop() and the scrub thread.
615 	 */
616 	spa_scrub_suspend(spa);
617 
618 	spa_config_enter(spa, RW_WRITER, spa);
619 
620 	return (spa_last_synced_txg(spa) + 1);
621 }
622 
623 /*
624  * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
625  * locking of spa_vdev_enter(), we also want make sure the transactions have
626  * synced to disk, and then update the global configuration cache with the new
627  * information.
628  */
629 int
630 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
631 {
632 	int config_changed = B_FALSE;
633 
634 	ASSERT(txg > spa_last_synced_txg(spa));
635 
636 	/*
637 	 * Reassess the DTLs.
638 	 */
639 	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
640 
641 	/*
642 	 * If the config changed, notify the scrub thread that it must restart.
643 	 */
644 	if (error == 0 && !list_is_empty(&spa->spa_dirty_list)) {
645 		config_changed = B_TRUE;
646 		spa_scrub_restart(spa, txg);
647 	}
648 
649 	spa_config_exit(spa, spa);
650 
651 	/*
652 	 * Allow scrubbing to resume.
653 	 */
654 	spa_scrub_resume(spa);
655 
656 	/*
657 	 * Note: this txg_wait_synced() is important because it ensures
658 	 * that there won't be more than one config change per txg.
659 	 * This allows us to use the txg as the generation number.
660 	 */
661 	if (error == 0)
662 		txg_wait_synced(spa->spa_dsl_pool, txg);
663 
664 	if (vd != NULL) {
665 		ASSERT(!vd->vdev_detached || vd->vdev_dtl.smo_object == 0);
666 		vdev_free(vd);
667 	}
668 
669 	/*
670 	 * If the config changed, update the config cache.
671 	 */
672 	if (config_changed)
673 		spa_config_sync();
674 
675 	mutex_exit(&spa_namespace_lock);
676 
677 	return (error);
678 }
679 
680 /*
681  * ==========================================================================
682  * Miscellaneous functions
683  * ==========================================================================
684  */
685 
686 /*
687  * Rename a spa_t.
688  */
689 int
690 spa_rename(const char *name, const char *newname)
691 {
692 	spa_t *spa;
693 	int err;
694 
695 	/*
696 	 * Lookup the spa_t and grab the config lock for writing.  We need to
697 	 * actually open the pool so that we can sync out the necessary labels.
698 	 * It's OK to call spa_open() with the namespace lock held because we
699 	 * allow recursive calls for other reasons.
700 	 */
701 	mutex_enter(&spa_namespace_lock);
702 	if ((err = spa_open(name, &spa, FTAG)) != 0) {
703 		mutex_exit(&spa_namespace_lock);
704 		return (err);
705 	}
706 
707 	spa_config_enter(spa, RW_WRITER, FTAG);
708 
709 	avl_remove(&spa_namespace_avl, spa);
710 	spa_strfree(spa->spa_name);
711 	spa->spa_name = spa_strdup(newname);
712 	avl_add(&spa_namespace_avl, spa);
713 
714 	/*
715 	 * Sync all labels to disk with the new names by marking the root vdev
716 	 * dirty and waiting for it to sync.  It will pick up the new pool name
717 	 * during the sync.
718 	 */
719 	vdev_config_dirty(spa->spa_root_vdev);
720 
721 	spa_config_exit(spa, FTAG);
722 
723 	txg_wait_synced(spa->spa_dsl_pool, 0);
724 
725 	/*
726 	 * Sync the updated config cache.
727 	 */
728 	spa_config_sync();
729 
730 	spa_close(spa, FTAG);
731 
732 	mutex_exit(&spa_namespace_lock);
733 
734 	return (0);
735 }
736 
737 
738 /*
739  * Determine whether a pool with given pool_guid exists.  If device_guid is
740  * non-zero, determine whether the pool exists *and* contains a device with the
741  * specified device_guid.
742  */
743 boolean_t
744 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
745 {
746 	spa_t *spa;
747 	avl_tree_t *t = &spa_namespace_avl;
748 
749 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
750 
751 	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
752 		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
753 			continue;
754 		if (spa->spa_root_vdev == NULL)
755 			continue;
756 		if (spa_guid(spa) == pool_guid) {
757 			if (device_guid == 0)
758 				break;
759 
760 			if (vdev_lookup_by_guid(spa->spa_root_vdev,
761 			    device_guid) != NULL)
762 				break;
763 
764 			/*
765 			 * Check any devices we may be in the process of adding.
766 			 */
767 			if (spa->spa_pending_vdev) {
768 				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
769 				    device_guid) != NULL)
770 					break;
771 			}
772 		}
773 	}
774 
775 	return (spa != NULL);
776 }
777 
778 char *
779 spa_strdup(const char *s)
780 {
781 	size_t len;
782 	char *new;
783 
784 	len = strlen(s);
785 	new = kmem_alloc(len + 1, KM_SLEEP);
786 	bcopy(s, new, len);
787 	new[len] = '\0';
788 
789 	return (new);
790 }
791 
792 void
793 spa_strfree(char *s)
794 {
795 	kmem_free(s, strlen(s) + 1);
796 }
797 
798 uint64_t
799 spa_get_random(uint64_t range)
800 {
801 	uint64_t r;
802 
803 	ASSERT(range != 0);
804 
805 	(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
806 
807 	return (r % range);
808 }
809 
810 void
811 sprintf_blkptr(char *buf, int len, const blkptr_t *bp)
812 {
813 	int d;
814 
815 	if (bp == NULL) {
816 		(void) snprintf(buf, len, "<NULL>");
817 		return;
818 	}
819 
820 	if (BP_IS_HOLE(bp)) {
821 		(void) snprintf(buf, len, "<hole>");
822 		return;
823 	}
824 
825 	(void) snprintf(buf, len, "[L%llu %s] %llxL/%llxP ",
826 	    (u_longlong_t)BP_GET_LEVEL(bp),
827 	    dmu_ot[BP_GET_TYPE(bp)].ot_name,
828 	    (u_longlong_t)BP_GET_LSIZE(bp),
829 	    (u_longlong_t)BP_GET_PSIZE(bp));
830 
831 	for (d = 0; d < BP_GET_NDVAS(bp); d++) {
832 		const dva_t *dva = &bp->blk_dva[d];
833 		(void) snprintf(buf + strlen(buf), len - strlen(buf),
834 		    "DVA[%d]=<%llu:%llx:%llx> ", d,
835 		    (u_longlong_t)DVA_GET_VDEV(dva),
836 		    (u_longlong_t)DVA_GET_OFFSET(dva),
837 		    (u_longlong_t)DVA_GET_ASIZE(dva));
838 	}
839 
840 	(void) snprintf(buf + strlen(buf), len - strlen(buf),
841 	    "%s %s %s %s birth=%llu fill=%llu cksum=%llx:%llx:%llx:%llx",
842 	    zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name,
843 	    zio_compress_table[BP_GET_COMPRESS(bp)].ci_name,
844 	    BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE",
845 	    BP_IS_GANG(bp) ? "gang" : "contiguous",
846 	    (u_longlong_t)bp->blk_birth,
847 	    (u_longlong_t)bp->blk_fill,
848 	    (u_longlong_t)bp->blk_cksum.zc_word[0],
849 	    (u_longlong_t)bp->blk_cksum.zc_word[1],
850 	    (u_longlong_t)bp->blk_cksum.zc_word[2],
851 	    (u_longlong_t)bp->blk_cksum.zc_word[3]);
852 }
853 
854 void
855 spa_freeze(spa_t *spa)
856 {
857 	uint64_t freeze_txg = 0;
858 
859 	spa_config_enter(spa, RW_WRITER, FTAG);
860 	if (spa->spa_freeze_txg == UINT64_MAX) {
861 		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
862 		spa->spa_freeze_txg = freeze_txg;
863 	}
864 	spa_config_exit(spa, FTAG);
865 	if (freeze_txg != 0)
866 		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
867 }
868 
869 void
870 zfs_panic_recover(const char *fmt, ...)
871 {
872 	va_list adx;
873 
874 	va_start(adx, fmt);
875 	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
876 	va_end(adx);
877 }
878 
879 /*
880  * ==========================================================================
881  * Accessor functions
882  * ==========================================================================
883  */
884 
885 krwlock_t *
886 spa_traverse_rwlock(spa_t *spa)
887 {
888 	return (&spa->spa_traverse_lock);
889 }
890 
891 int
892 spa_traverse_wanted(spa_t *spa)
893 {
894 	return (spa->spa_traverse_wanted);
895 }
896 
897 dsl_pool_t *
898 spa_get_dsl(spa_t *spa)
899 {
900 	return (spa->spa_dsl_pool);
901 }
902 
903 blkptr_t *
904 spa_get_rootblkptr(spa_t *spa)
905 {
906 	return (&spa->spa_ubsync.ub_rootbp);
907 }
908 
909 void
910 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
911 {
912 	spa->spa_uberblock.ub_rootbp = *bp;
913 }
914 
915 void
916 spa_altroot(spa_t *spa, char *buf, size_t buflen)
917 {
918 	if (spa->spa_root == NULL)
919 		buf[0] = '\0';
920 	else
921 		(void) strncpy(buf, spa->spa_root, buflen);
922 }
923 
924 int
925 spa_sync_pass(spa_t *spa)
926 {
927 	return (spa->spa_sync_pass);
928 }
929 
930 char *
931 spa_name(spa_t *spa)
932 {
933 	/*
934 	 * Accessing the name requires holding either the namespace lock or the
935 	 * config lock, both of which are required to do a rename.
936 	 */
937 	ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
938 	    spa_config_held(spa, RW_READER) || spa_config_held(spa, RW_WRITER));
939 
940 	return (spa->spa_name);
941 }
942 
943 uint64_t
944 spa_guid(spa_t *spa)
945 {
946 	/*
947 	 * If we fail to parse the config during spa_load(), we can go through
948 	 * the error path (which posts an ereport) and end up here with no root
949 	 * vdev.  We stash the original pool guid in 'spa_load_guid' to handle
950 	 * this case.
951 	 */
952 	if (spa->spa_root_vdev != NULL)
953 		return (spa->spa_root_vdev->vdev_guid);
954 	else
955 		return (spa->spa_load_guid);
956 }
957 
958 uint64_t
959 spa_last_synced_txg(spa_t *spa)
960 {
961 	return (spa->spa_ubsync.ub_txg);
962 }
963 
964 uint64_t
965 spa_first_txg(spa_t *spa)
966 {
967 	return (spa->spa_first_txg);
968 }
969 
970 int
971 spa_state(spa_t *spa)
972 {
973 	return (spa->spa_state);
974 }
975 
976 uint64_t
977 spa_freeze_txg(spa_t *spa)
978 {
979 	return (spa->spa_freeze_txg);
980 }
981 
982 /*
983  * Return how much space is allocated in the pool (ie. sum of all asize)
984  */
985 uint64_t
986 spa_get_alloc(spa_t *spa)
987 {
988 	return (spa->spa_root_vdev->vdev_stat.vs_alloc);
989 }
990 
991 /*
992  * Return how much (raid-z inflated) space there is in the pool.
993  */
994 uint64_t
995 spa_get_space(spa_t *spa)
996 {
997 	return (spa->spa_root_vdev->vdev_stat.vs_space);
998 }
999 
1000 /*
1001  * Return the amount of raid-z-deflated space in the pool.
1002  */
1003 uint64_t
1004 spa_get_dspace(spa_t *spa)
1005 {
1006 	if (spa->spa_deflate)
1007 		return (spa->spa_root_vdev->vdev_stat.vs_dspace);
1008 	else
1009 		return (spa->spa_root_vdev->vdev_stat.vs_space);
1010 }
1011 
1012 /* ARGSUSED */
1013 uint64_t
1014 spa_get_asize(spa_t *spa, uint64_t lsize)
1015 {
1016 	/*
1017 	 * For now, the worst case is 512-byte RAID-Z blocks, in which
1018 	 * case the space requirement is exactly 2x; so just assume that.
1019 	 * Add to this the fact that we can have up to 3 DVAs per bp, and
1020 	 * we have to multiply by a total of 6x.
1021 	 */
1022 	return (lsize * 6);
1023 }
1024 
1025 uint64_t
1026 spa_version(spa_t *spa)
1027 {
1028 	return (spa->spa_ubsync.ub_version);
1029 }
1030 
1031 int
1032 spa_max_replication(spa_t *spa)
1033 {
1034 	/*
1035 	 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1036 	 * handle BPs with more than one DVA allocated.  Set our max
1037 	 * replication level accordingly.
1038 	 */
1039 	if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1040 		return (1);
1041 	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1042 }
1043 
1044 uint64_t
1045 bp_get_dasize(spa_t *spa, const blkptr_t *bp)
1046 {
1047 	int sz = 0, i;
1048 
1049 	if (!spa->spa_deflate)
1050 		return (BP_GET_ASIZE(bp));
1051 
1052 	for (i = 0; i < SPA_DVAS_PER_BP; i++) {
1053 		vdev_t *vd =
1054 		    vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[i]));
1055 		sz += (DVA_GET_ASIZE(&bp->blk_dva[i]) >> SPA_MINBLOCKSHIFT) *
1056 		    vd->vdev_deflate_ratio;
1057 	}
1058 	return (sz);
1059 }
1060 
1061 /*
1062  * ==========================================================================
1063  * Initialization and Termination
1064  * ==========================================================================
1065  */
1066 
1067 static int
1068 spa_name_compare(const void *a1, const void *a2)
1069 {
1070 	const spa_t *s1 = a1;
1071 	const spa_t *s2 = a2;
1072 	int s;
1073 
1074 	s = strcmp(s1->spa_name, s2->spa_name);
1075 	if (s > 0)
1076 		return (1);
1077 	if (s < 0)
1078 		return (-1);
1079 	return (0);
1080 }
1081 
1082 int
1083 spa_busy(void)
1084 {
1085 	return (spa_active_count);
1086 }
1087 
1088 void
1089 spa_init(int mode)
1090 {
1091 	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1092 	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1093 
1094 	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1095 	    offsetof(spa_t, spa_avl));
1096 
1097 	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_spare_t),
1098 	    offsetof(spa_spare_t, spare_avl));
1099 
1100 	spa_mode = mode;
1101 
1102 	refcount_init();
1103 	unique_init();
1104 	zio_init();
1105 	dmu_init();
1106 	zil_init();
1107 	spa_config_load();
1108 }
1109 
1110 void
1111 spa_fini(void)
1112 {
1113 	spa_evict_all();
1114 
1115 	zil_fini();
1116 	dmu_fini();
1117 	zio_fini();
1118 	refcount_fini();
1119 
1120 	avl_destroy(&spa_namespace_avl);
1121 	avl_destroy(&spa_spare_avl);
1122 
1123 	cv_destroy(&spa_namespace_cv);
1124 	mutex_destroy(&spa_namespace_lock);
1125 }
1126