spa.c revision 92241e0b80813d0b83c08e730a29b9d1831794fc
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 * This file contains all the routines used when modifying on-disk SPA state.
29 * This includes opening, importing, destroying, exporting a pool, and syncing a
30 * pool.
31 */
32
33#include <sys/zfs_context.h>
34#include <sys/fm/fs/zfs.h>
35#include <sys/spa_impl.h>
36#include <sys/zio.h>
37#include <sys/zio_checksum.h>
38#include <sys/dmu.h>
39#include <sys/dmu_tx.h>
40#include <sys/zap.h>
41#include <sys/zil.h>
42#include <sys/ddt.h>
43#include <sys/vdev_impl.h>
44#include <sys/metaslab.h>
45#include <sys/metaslab_impl.h>
46#include <sys/uberblock_impl.h>
47#include <sys/txg.h>
48#include <sys/avl.h>
49#include <sys/dmu_traverse.h>
50#include <sys/dmu_objset.h>
51#include <sys/unique.h>
52#include <sys/dsl_pool.h>
53#include <sys/dsl_dataset.h>
54#include <sys/dsl_dir.h>
55#include <sys/dsl_prop.h>
56#include <sys/dsl_synctask.h>
57#include <sys/fs/zfs.h>
58#include <sys/arc.h>
59#include <sys/callb.h>
60#include <sys/systeminfo.h>
61#include <sys/spa_boot.h>
62#include <sys/zfs_ioctl.h>
63
64#ifdef	_KERNEL
65#include <sys/zone.h>
66#include <sys/bootprops.h>
67#endif	/* _KERNEL */
68
69#include "zfs_prop.h"
70#include "zfs_comutil.h"
71
72enum zti_modes {
73	zti_mode_fixed,			/* value is # of threads (min 1) */
74	zti_mode_online_percent,	/* value is % of online CPUs */
75	zti_mode_tune,			/* fill from zio_taskq_tune_* */
76	zti_nmodes
77};
78
79#define	ZTI_THREAD_FIX(n)	{ zti_mode_fixed, (n) }
80#define	ZTI_THREAD_PCT(n)	{ zti_mode_online_percent, (n) }
81#define	ZTI_THREAD_TUNE		{ zti_mode_tune, 0 }
82
83#define	ZTI_THREAD_ONE		ZTI_THREAD_FIX(1)
84
85typedef struct zio_taskq_info {
86	const char *zti_name;
87	struct {
88		enum zti_modes zti_mode;
89		uint_t zti_value;
90	} zti_nthreads[ZIO_TASKQ_TYPES];
91} zio_taskq_info_t;
92
93static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
94				"issue",		"intr"
95};
96
97const zio_taskq_info_t zio_taskqs[ZIO_TYPES] = {
98	/*			ISSUE			INTR		*/
99	{ "spa_zio_null",	{ ZTI_THREAD_ONE,	ZTI_THREAD_ONE } },
100	{ "spa_zio_read",	{ ZTI_THREAD_FIX(8),	ZTI_THREAD_TUNE } },
101	{ "spa_zio_write",	{ ZTI_THREAD_TUNE,	ZTI_THREAD_FIX(8) } },
102	{ "spa_zio_free",	{ ZTI_THREAD_ONE,	ZTI_THREAD_ONE } },
103	{ "spa_zio_claim",	{ ZTI_THREAD_ONE,	ZTI_THREAD_ONE } },
104	{ "spa_zio_ioctl",	{ ZTI_THREAD_ONE,	ZTI_THREAD_ONE } },
105};
106
107enum zti_modes zio_taskq_tune_mode = zti_mode_online_percent;
108uint_t zio_taskq_tune_value = 80;	/* #threads = 80% of # online CPUs */
109
110static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx);
111static boolean_t spa_has_active_shared_spare(spa_t *spa);
112
113/*
114 * ==========================================================================
115 * SPA properties routines
116 * ==========================================================================
117 */
118
119/*
120 * Add a (source=src, propname=propval) list to an nvlist.
121 */
122static void
123spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
124    uint64_t intval, zprop_source_t src)
125{
126	const char *propname = zpool_prop_to_name(prop);
127	nvlist_t *propval;
128
129	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
130	VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
131
132	if (strval != NULL)
133		VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
134	else
135		VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
136
137	VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
138	nvlist_free(propval);
139}
140
141/*
142 * Get property values from the spa configuration.
143 */
144static void
145spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
146{
147	uint64_t size;
148	uint64_t alloc;
149	uint64_t cap, version;
150	zprop_source_t src = ZPROP_SRC_NONE;
151	spa_config_dirent_t *dp;
152
153	ASSERT(MUTEX_HELD(&spa->spa_props_lock));
154
155	if (spa->spa_root_vdev != NULL) {
156		alloc = metaslab_class_get_alloc(spa_normal_class(spa));
157		size = metaslab_class_get_space(spa_normal_class(spa));
158		spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
159		spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
160		spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
161		spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
162		    size - alloc, src);
163
164		cap = (size == 0) ? 0 : (alloc * 100 / size);
165		spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
166
167		spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
168		    ddt_get_pool_dedup_ratio(spa), src);
169
170		spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
171		    spa->spa_root_vdev->vdev_state, src);
172
173		version = spa_version(spa);
174		if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
175			src = ZPROP_SRC_DEFAULT;
176		else
177			src = ZPROP_SRC_LOCAL;
178		spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
179	}
180
181	spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
182
183	if (spa->spa_root != NULL)
184		spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
185		    0, ZPROP_SRC_LOCAL);
186
187	if ((dp = list_head(&spa->spa_config_list)) != NULL) {
188		if (dp->scd_path == NULL) {
189			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
190			    "none", 0, ZPROP_SRC_LOCAL);
191		} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
192			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
193			    dp->scd_path, 0, ZPROP_SRC_LOCAL);
194		}
195	}
196}
197
198/*
199 * Get zpool property values.
200 */
201int
202spa_prop_get(spa_t *spa, nvlist_t **nvp)
203{
204	objset_t *mos = spa->spa_meta_objset;
205	zap_cursor_t zc;
206	zap_attribute_t za;
207	int err;
208
209	VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
210
211	mutex_enter(&spa->spa_props_lock);
212
213	/*
214	 * Get properties from the spa config.
215	 */
216	spa_prop_get_config(spa, nvp);
217
218	/* If no pool property object, no more prop to get. */
219	if (spa->spa_pool_props_object == 0) {
220		mutex_exit(&spa->spa_props_lock);
221		return (0);
222	}
223
224	/*
225	 * Get properties from the MOS pool property object.
226	 */
227	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
228	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
229	    zap_cursor_advance(&zc)) {
230		uint64_t intval = 0;
231		char *strval = NULL;
232		zprop_source_t src = ZPROP_SRC_DEFAULT;
233		zpool_prop_t prop;
234
235		if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
236			continue;
237
238		switch (za.za_integer_length) {
239		case 8:
240			/* integer property */
241			if (za.za_first_integer !=
242			    zpool_prop_default_numeric(prop))
243				src = ZPROP_SRC_LOCAL;
244
245			if (prop == ZPOOL_PROP_BOOTFS) {
246				dsl_pool_t *dp;
247				dsl_dataset_t *ds = NULL;
248
249				dp = spa_get_dsl(spa);
250				rw_enter(&dp->dp_config_rwlock, RW_READER);
251				if (err = dsl_dataset_hold_obj(dp,
252				    za.za_first_integer, FTAG, &ds)) {
253					rw_exit(&dp->dp_config_rwlock);
254					break;
255				}
256
257				strval = kmem_alloc(
258				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
259				    KM_SLEEP);
260				dsl_dataset_name(ds, strval);
261				dsl_dataset_rele(ds, FTAG);
262				rw_exit(&dp->dp_config_rwlock);
263			} else {
264				strval = NULL;
265				intval = za.za_first_integer;
266			}
267
268			spa_prop_add_list(*nvp, prop, strval, intval, src);
269
270			if (strval != NULL)
271				kmem_free(strval,
272				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
273
274			break;
275
276		case 1:
277			/* string property */
278			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
279			err = zap_lookup(mos, spa->spa_pool_props_object,
280			    za.za_name, 1, za.za_num_integers, strval);
281			if (err) {
282				kmem_free(strval, za.za_num_integers);
283				break;
284			}
285			spa_prop_add_list(*nvp, prop, strval, 0, src);
286			kmem_free(strval, za.za_num_integers);
287			break;
288
289		default:
290			break;
291		}
292	}
293	zap_cursor_fini(&zc);
294	mutex_exit(&spa->spa_props_lock);
295out:
296	if (err && err != ENOENT) {
297		nvlist_free(*nvp);
298		*nvp = NULL;
299		return (err);
300	}
301
302	return (0);
303}
304
305/*
306 * Validate the given pool properties nvlist and modify the list
307 * for the property values to be set.
308 */
309static int
310spa_prop_validate(spa_t *spa, nvlist_t *props)
311{
312	nvpair_t *elem;
313	int error = 0, reset_bootfs = 0;
314	uint64_t objnum;
315
316	elem = NULL;
317	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
318		zpool_prop_t prop;
319		char *propname, *strval;
320		uint64_t intval;
321		objset_t *os;
322		char *slash;
323
324		propname = nvpair_name(elem);
325
326		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL)
327			return (EINVAL);
328
329		switch (prop) {
330		case ZPOOL_PROP_VERSION:
331			error = nvpair_value_uint64(elem, &intval);
332			if (!error &&
333			    (intval < spa_version(spa) || intval > SPA_VERSION))
334				error = EINVAL;
335			break;
336
337		case ZPOOL_PROP_DELEGATION:
338		case ZPOOL_PROP_AUTOREPLACE:
339		case ZPOOL_PROP_LISTSNAPS:
340		case ZPOOL_PROP_AUTOEXPAND:
341			error = nvpair_value_uint64(elem, &intval);
342			if (!error && intval > 1)
343				error = EINVAL;
344			break;
345
346		case ZPOOL_PROP_BOOTFS:
347			/*
348			 * If the pool version is less than SPA_VERSION_BOOTFS,
349			 * or the pool is still being created (version == 0),
350			 * the bootfs property cannot be set.
351			 */
352			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
353				error = ENOTSUP;
354				break;
355			}
356
357			/*
358			 * Make sure the vdev config is bootable
359			 */
360			if (!vdev_is_bootable(spa->spa_root_vdev)) {
361				error = ENOTSUP;
362				break;
363			}
364
365			reset_bootfs = 1;
366
367			error = nvpair_value_string(elem, &strval);
368
369			if (!error) {
370				uint64_t compress;
371
372				if (strval == NULL || strval[0] == '\0') {
373					objnum = zpool_prop_default_numeric(
374					    ZPOOL_PROP_BOOTFS);
375					break;
376				}
377
378				if (error = dmu_objset_hold(strval, FTAG, &os))
379					break;
380
381				/* Must be ZPL and not gzip compressed. */
382
383				if (dmu_objset_type(os) != DMU_OST_ZFS) {
384					error = ENOTSUP;
385				} else if ((error = dsl_prop_get_integer(strval,
386				    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
387				    &compress, NULL)) == 0 &&
388				    !BOOTFS_COMPRESS_VALID(compress)) {
389					error = ENOTSUP;
390				} else {
391					objnum = dmu_objset_id(os);
392				}
393				dmu_objset_rele(os, FTAG);
394			}
395			break;
396
397		case ZPOOL_PROP_FAILUREMODE:
398			error = nvpair_value_uint64(elem, &intval);
399			if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
400			    intval > ZIO_FAILURE_MODE_PANIC))
401				error = EINVAL;
402
403			/*
404			 * This is a special case which only occurs when
405			 * the pool has completely failed. This allows
406			 * the user to change the in-core failmode property
407			 * without syncing it out to disk (I/Os might
408			 * currently be blocked). We do this by returning
409			 * EIO to the caller (spa_prop_set) to trick it
410			 * into thinking we encountered a property validation
411			 * error.
412			 */
413			if (!error && spa_suspended(spa)) {
414				spa->spa_failmode = intval;
415				error = EIO;
416			}
417			break;
418
419		case ZPOOL_PROP_CACHEFILE:
420			if ((error = nvpair_value_string(elem, &strval)) != 0)
421				break;
422
423			if (strval[0] == '\0')
424				break;
425
426			if (strcmp(strval, "none") == 0)
427				break;
428
429			if (strval[0] != '/') {
430				error = EINVAL;
431				break;
432			}
433
434			slash = strrchr(strval, '/');
435			ASSERT(slash != NULL);
436
437			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
438			    strcmp(slash, "/..") == 0)
439				error = EINVAL;
440			break;
441
442		case ZPOOL_PROP_DEDUPDITTO:
443			if (spa_version(spa) < SPA_VERSION_DEDUP)
444				error = ENOTSUP;
445			else
446				error = nvpair_value_uint64(elem, &intval);
447			if (error == 0 &&
448			    intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
449				error = EINVAL;
450			break;
451		}
452
453		if (error)
454			break;
455	}
456
457	if (!error && reset_bootfs) {
458		error = nvlist_remove(props,
459		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
460
461		if (!error) {
462			error = nvlist_add_uint64(props,
463			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
464		}
465	}
466
467	return (error);
468}
469
470void
471spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
472{
473	char *cachefile;
474	spa_config_dirent_t *dp;
475
476	if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
477	    &cachefile) != 0)
478		return;
479
480	dp = kmem_alloc(sizeof (spa_config_dirent_t),
481	    KM_SLEEP);
482
483	if (cachefile[0] == '\0')
484		dp->scd_path = spa_strdup(spa_config_path);
485	else if (strcmp(cachefile, "none") == 0)
486		dp->scd_path = NULL;
487	else
488		dp->scd_path = spa_strdup(cachefile);
489
490	list_insert_head(&spa->spa_config_list, dp);
491	if (need_sync)
492		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
493}
494
495int
496spa_prop_set(spa_t *spa, nvlist_t *nvp)
497{
498	int error;
499	nvpair_t *elem;
500	boolean_t need_sync = B_FALSE;
501	zpool_prop_t prop;
502
503	if ((error = spa_prop_validate(spa, nvp)) != 0)
504		return (error);
505
506	elem = NULL;
507	while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
508		if ((prop = zpool_name_to_prop(
509		    nvpair_name(elem))) == ZPROP_INVAL)
510			return (EINVAL);
511
512		if (prop == ZPOOL_PROP_CACHEFILE || prop == ZPOOL_PROP_ALTROOT)
513			continue;
514
515		need_sync = B_TRUE;
516		break;
517	}
518
519	if (need_sync)
520		return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
521		    spa, nvp, 3));
522	else
523		return (0);
524}
525
526/*
527 * If the bootfs property value is dsobj, clear it.
528 */
529void
530spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
531{
532	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
533		VERIFY(zap_remove(spa->spa_meta_objset,
534		    spa->spa_pool_props_object,
535		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
536		spa->spa_bootfs = 0;
537	}
538}
539
540/*
541 * ==========================================================================
542 * SPA state manipulation (open/create/destroy/import/export)
543 * ==========================================================================
544 */
545
546static int
547spa_error_entry_compare(const void *a, const void *b)
548{
549	spa_error_entry_t *sa = (spa_error_entry_t *)a;
550	spa_error_entry_t *sb = (spa_error_entry_t *)b;
551	int ret;
552
553	ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
554	    sizeof (zbookmark_t));
555
556	if (ret < 0)
557		return (-1);
558	else if (ret > 0)
559		return (1);
560	else
561		return (0);
562}
563
564/*
565 * Utility function which retrieves copies of the current logs and
566 * re-initializes them in the process.
567 */
568void
569spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
570{
571	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
572
573	bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
574	bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
575
576	avl_create(&spa->spa_errlist_scrub,
577	    spa_error_entry_compare, sizeof (spa_error_entry_t),
578	    offsetof(spa_error_entry_t, se_avl));
579	avl_create(&spa->spa_errlist_last,
580	    spa_error_entry_compare, sizeof (spa_error_entry_t),
581	    offsetof(spa_error_entry_t, se_avl));
582}
583
584/*
585 * Activate an uninitialized pool.
586 */
587static void
588spa_activate(spa_t *spa, int mode)
589{
590	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
591
592	spa->spa_state = POOL_STATE_ACTIVE;
593	spa->spa_mode = mode;
594
595	spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
596	spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
597
598	for (int t = 0; t < ZIO_TYPES; t++) {
599		const zio_taskq_info_t *ztip = &zio_taskqs[t];
600		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
601			enum zti_modes mode = ztip->zti_nthreads[q].zti_mode;
602			uint_t value = ztip->zti_nthreads[q].zti_value;
603			char name[32];
604
605			(void) snprintf(name, sizeof (name),
606			    "%s_%s", ztip->zti_name, zio_taskq_types[q]);
607
608			if (mode == zti_mode_tune) {
609				mode = zio_taskq_tune_mode;
610				value = zio_taskq_tune_value;
611				if (mode == zti_mode_tune)
612					mode = zti_mode_online_percent;
613			}
614
615			switch (mode) {
616			case zti_mode_fixed:
617				ASSERT3U(value, >=, 1);
618				value = MAX(value, 1);
619
620				spa->spa_zio_taskq[t][q] = taskq_create(name,
621				    value, maxclsyspri, 50, INT_MAX,
622				    TASKQ_PREPOPULATE);
623				break;
624
625			case zti_mode_online_percent:
626				spa->spa_zio_taskq[t][q] = taskq_create(name,
627				    value, maxclsyspri, 50, INT_MAX,
628				    TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
629				break;
630
631			case zti_mode_tune:
632			default:
633				panic("unrecognized mode for "
634				    "zio_taskqs[%u]->zti_nthreads[%u] (%u:%u) "
635				    "in spa_activate()",
636				    t, q, mode, value);
637				break;
638			}
639		}
640	}
641
642	list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
643	    offsetof(vdev_t, vdev_config_dirty_node));
644	list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
645	    offsetof(vdev_t, vdev_state_dirty_node));
646
647	txg_list_create(&spa->spa_vdev_txg_list,
648	    offsetof(struct vdev, vdev_txg_node));
649
650	avl_create(&spa->spa_errlist_scrub,
651	    spa_error_entry_compare, sizeof (spa_error_entry_t),
652	    offsetof(spa_error_entry_t, se_avl));
653	avl_create(&spa->spa_errlist_last,
654	    spa_error_entry_compare, sizeof (spa_error_entry_t),
655	    offsetof(spa_error_entry_t, se_avl));
656}
657
658/*
659 * Opposite of spa_activate().
660 */
661static void
662spa_deactivate(spa_t *spa)
663{
664	ASSERT(spa->spa_sync_on == B_FALSE);
665	ASSERT(spa->spa_dsl_pool == NULL);
666	ASSERT(spa->spa_root_vdev == NULL);
667	ASSERT(spa->spa_async_zio_root == NULL);
668	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
669
670	txg_list_destroy(&spa->spa_vdev_txg_list);
671
672	list_destroy(&spa->spa_config_dirty_list);
673	list_destroy(&spa->spa_state_dirty_list);
674
675	for (int t = 0; t < ZIO_TYPES; t++) {
676		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
677			taskq_destroy(spa->spa_zio_taskq[t][q]);
678			spa->spa_zio_taskq[t][q] = NULL;
679		}
680	}
681
682	metaslab_class_destroy(spa->spa_normal_class);
683	spa->spa_normal_class = NULL;
684
685	metaslab_class_destroy(spa->spa_log_class);
686	spa->spa_log_class = NULL;
687
688	/*
689	 * If this was part of an import or the open otherwise failed, we may
690	 * still have errors left in the queues.  Empty them just in case.
691	 */
692	spa_errlog_drain(spa);
693
694	avl_destroy(&spa->spa_errlist_scrub);
695	avl_destroy(&spa->spa_errlist_last);
696
697	spa->spa_state = POOL_STATE_UNINITIALIZED;
698}
699
700/*
701 * Verify a pool configuration, and construct the vdev tree appropriately.  This
702 * will create all the necessary vdevs in the appropriate layout, with each vdev
703 * in the CLOSED state.  This will prep the pool before open/creation/import.
704 * All vdev validation is done by the vdev_alloc() routine.
705 */
706static int
707spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
708    uint_t id, int atype)
709{
710	nvlist_t **child;
711	uint_t children;
712	int error;
713
714	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
715		return (error);
716
717	if ((*vdp)->vdev_ops->vdev_op_leaf)
718		return (0);
719
720	error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
721	    &child, &children);
722
723	if (error == ENOENT)
724		return (0);
725
726	if (error) {
727		vdev_free(*vdp);
728		*vdp = NULL;
729		return (EINVAL);
730	}
731
732	for (int c = 0; c < children; c++) {
733		vdev_t *vd;
734		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
735		    atype)) != 0) {
736			vdev_free(*vdp);
737			*vdp = NULL;
738			return (error);
739		}
740	}
741
742	ASSERT(*vdp != NULL);
743
744	return (0);
745}
746
747/*
748 * Opposite of spa_load().
749 */
750static void
751spa_unload(spa_t *spa)
752{
753	int i;
754
755	ASSERT(MUTEX_HELD(&spa_namespace_lock));
756
757	/*
758	 * Stop async tasks.
759	 */
760	spa_async_suspend(spa);
761
762	/*
763	 * Stop syncing.
764	 */
765	if (spa->spa_sync_on) {
766		txg_sync_stop(spa->spa_dsl_pool);
767		spa->spa_sync_on = B_FALSE;
768	}
769
770	/*
771	 * Wait for any outstanding async I/O to complete.
772	 */
773	if (spa->spa_async_zio_root != NULL) {
774		(void) zio_wait(spa->spa_async_zio_root);
775		spa->spa_async_zio_root = NULL;
776	}
777
778	/*
779	 * Close the dsl pool.
780	 */
781	if (spa->spa_dsl_pool) {
782		dsl_pool_close(spa->spa_dsl_pool);
783		spa->spa_dsl_pool = NULL;
784	}
785
786	ddt_unload(spa);
787
788	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
789
790	/*
791	 * Drop and purge level 2 cache
792	 */
793	spa_l2cache_drop(spa);
794
795	/*
796	 * Close all vdevs.
797	 */
798	if (spa->spa_root_vdev)
799		vdev_free(spa->spa_root_vdev);
800	ASSERT(spa->spa_root_vdev == NULL);
801
802	for (i = 0; i < spa->spa_spares.sav_count; i++)
803		vdev_free(spa->spa_spares.sav_vdevs[i]);
804	if (spa->spa_spares.sav_vdevs) {
805		kmem_free(spa->spa_spares.sav_vdevs,
806		    spa->spa_spares.sav_count * sizeof (void *));
807		spa->spa_spares.sav_vdevs = NULL;
808	}
809	if (spa->spa_spares.sav_config) {
810		nvlist_free(spa->spa_spares.sav_config);
811		spa->spa_spares.sav_config = NULL;
812	}
813	spa->spa_spares.sav_count = 0;
814
815	for (i = 0; i < spa->spa_l2cache.sav_count; i++)
816		vdev_free(spa->spa_l2cache.sav_vdevs[i]);
817	if (spa->spa_l2cache.sav_vdevs) {
818		kmem_free(spa->spa_l2cache.sav_vdevs,
819		    spa->spa_l2cache.sav_count * sizeof (void *));
820		spa->spa_l2cache.sav_vdevs = NULL;
821	}
822	if (spa->spa_l2cache.sav_config) {
823		nvlist_free(spa->spa_l2cache.sav_config);
824		spa->spa_l2cache.sav_config = NULL;
825	}
826	spa->spa_l2cache.sav_count = 0;
827
828	spa->spa_async_suspended = 0;
829
830	spa_config_exit(spa, SCL_ALL, FTAG);
831}
832
833/*
834 * Load (or re-load) the current list of vdevs describing the active spares for
835 * this pool.  When this is called, we have some form of basic information in
836 * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
837 * then re-generate a more complete list including status information.
838 */
839static void
840spa_load_spares(spa_t *spa)
841{
842	nvlist_t **spares;
843	uint_t nspares;
844	int i;
845	vdev_t *vd, *tvd;
846
847	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
848
849	/*
850	 * First, close and free any existing spare vdevs.
851	 */
852	for (i = 0; i < spa->spa_spares.sav_count; i++) {
853		vd = spa->spa_spares.sav_vdevs[i];
854
855		/* Undo the call to spa_activate() below */
856		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
857		    B_FALSE)) != NULL && tvd->vdev_isspare)
858			spa_spare_remove(tvd);
859		vdev_close(vd);
860		vdev_free(vd);
861	}
862
863	if (spa->spa_spares.sav_vdevs)
864		kmem_free(spa->spa_spares.sav_vdevs,
865		    spa->spa_spares.sav_count * sizeof (void *));
866
867	if (spa->spa_spares.sav_config == NULL)
868		nspares = 0;
869	else
870		VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
871		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
872
873	spa->spa_spares.sav_count = (int)nspares;
874	spa->spa_spares.sav_vdevs = NULL;
875
876	if (nspares == 0)
877		return;
878
879	/*
880	 * Construct the array of vdevs, opening them to get status in the
881	 * process.   For each spare, there is potentially two different vdev_t
882	 * structures associated with it: one in the list of spares (used only
883	 * for basic validation purposes) and one in the active vdev
884	 * configuration (if it's spared in).  During this phase we open and
885	 * validate each vdev on the spare list.  If the vdev also exists in the
886	 * active configuration, then we also mark this vdev as an active spare.
887	 */
888	spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
889	    KM_SLEEP);
890	for (i = 0; i < spa->spa_spares.sav_count; i++) {
891		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
892		    VDEV_ALLOC_SPARE) == 0);
893		ASSERT(vd != NULL);
894
895		spa->spa_spares.sav_vdevs[i] = vd;
896
897		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
898		    B_FALSE)) != NULL) {
899			if (!tvd->vdev_isspare)
900				spa_spare_add(tvd);
901
902			/*
903			 * We only mark the spare active if we were successfully
904			 * able to load the vdev.  Otherwise, importing a pool
905			 * with a bad active spare would result in strange
906			 * behavior, because multiple pool would think the spare
907			 * is actively in use.
908			 *
909			 * There is a vulnerability here to an equally bizarre
910			 * circumstance, where a dead active spare is later
911			 * brought back to life (onlined or otherwise).  Given
912			 * the rarity of this scenario, and the extra complexity
913			 * it adds, we ignore the possibility.
914			 */
915			if (!vdev_is_dead(tvd))
916				spa_spare_activate(tvd);
917		}
918
919		vd->vdev_top = vd;
920		vd->vdev_aux = &spa->spa_spares;
921
922		if (vdev_open(vd) != 0)
923			continue;
924
925		if (vdev_validate_aux(vd) == 0)
926			spa_spare_add(vd);
927	}
928
929	/*
930	 * Recompute the stashed list of spares, with status information
931	 * this time.
932	 */
933	VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
934	    DATA_TYPE_NVLIST_ARRAY) == 0);
935
936	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
937	    KM_SLEEP);
938	for (i = 0; i < spa->spa_spares.sav_count; i++)
939		spares[i] = vdev_config_generate(spa,
940		    spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE);
941	VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
942	    ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
943	for (i = 0; i < spa->spa_spares.sav_count; i++)
944		nvlist_free(spares[i]);
945	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
946}
947
948/*
949 * Load (or re-load) the current list of vdevs describing the active l2cache for
950 * this pool.  When this is called, we have some form of basic information in
951 * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
952 * then re-generate a more complete list including status information.
953 * Devices which are already active have their details maintained, and are
954 * not re-opened.
955 */
956static void
957spa_load_l2cache(spa_t *spa)
958{
959	nvlist_t **l2cache;
960	uint_t nl2cache;
961	int i, j, oldnvdevs;
962	uint64_t guid;
963	vdev_t *vd, **oldvdevs, **newvdevs;
964	spa_aux_vdev_t *sav = &spa->spa_l2cache;
965
966	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
967
968	if (sav->sav_config != NULL) {
969		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
970		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
971		newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
972	} else {
973		nl2cache = 0;
974	}
975
976	oldvdevs = sav->sav_vdevs;
977	oldnvdevs = sav->sav_count;
978	sav->sav_vdevs = NULL;
979	sav->sav_count = 0;
980
981	/*
982	 * Process new nvlist of vdevs.
983	 */
984	for (i = 0; i < nl2cache; i++) {
985		VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
986		    &guid) == 0);
987
988		newvdevs[i] = NULL;
989		for (j = 0; j < oldnvdevs; j++) {
990			vd = oldvdevs[j];
991			if (vd != NULL && guid == vd->vdev_guid) {
992				/*
993				 * Retain previous vdev for add/remove ops.
994				 */
995				newvdevs[i] = vd;
996				oldvdevs[j] = NULL;
997				break;
998			}
999		}
1000
1001		if (newvdevs[i] == NULL) {
1002			/*
1003			 * Create new vdev
1004			 */
1005			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1006			    VDEV_ALLOC_L2CACHE) == 0);
1007			ASSERT(vd != NULL);
1008			newvdevs[i] = vd;
1009
1010			/*
1011			 * Commit this vdev as an l2cache device,
1012			 * even if it fails to open.
1013			 */
1014			spa_l2cache_add(vd);
1015
1016			vd->vdev_top = vd;
1017			vd->vdev_aux = sav;
1018
1019			spa_l2cache_activate(vd);
1020
1021			if (vdev_open(vd) != 0)
1022				continue;
1023
1024			(void) vdev_validate_aux(vd);
1025
1026			if (!vdev_is_dead(vd))
1027				l2arc_add_vdev(spa, vd);
1028		}
1029	}
1030
1031	/*
1032	 * Purge vdevs that were dropped
1033	 */
1034	for (i = 0; i < oldnvdevs; i++) {
1035		uint64_t pool;
1036
1037		vd = oldvdevs[i];
1038		if (vd != NULL) {
1039			if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1040			    pool != 0ULL && l2arc_vdev_present(vd))
1041				l2arc_remove_vdev(vd);
1042			(void) vdev_close(vd);
1043			spa_l2cache_remove(vd);
1044		}
1045	}
1046
1047	if (oldvdevs)
1048		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1049
1050	if (sav->sav_config == NULL)
1051		goto out;
1052
1053	sav->sav_vdevs = newvdevs;
1054	sav->sav_count = (int)nl2cache;
1055
1056	/*
1057	 * Recompute the stashed list of l2cache devices, with status
1058	 * information this time.
1059	 */
1060	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1061	    DATA_TYPE_NVLIST_ARRAY) == 0);
1062
1063	l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1064	for (i = 0; i < sav->sav_count; i++)
1065		l2cache[i] = vdev_config_generate(spa,
1066		    sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE);
1067	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1068	    ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1069out:
1070	for (i = 0; i < sav->sav_count; i++)
1071		nvlist_free(l2cache[i]);
1072	if (sav->sav_count)
1073		kmem_free(l2cache, sav->sav_count * sizeof (void *));
1074}
1075
1076static int
1077load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1078{
1079	dmu_buf_t *db;
1080	char *packed = NULL;
1081	size_t nvsize = 0;
1082	int error;
1083	*value = NULL;
1084
1085	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
1086	nvsize = *(uint64_t *)db->db_data;
1087	dmu_buf_rele(db, FTAG);
1088
1089	packed = kmem_alloc(nvsize, KM_SLEEP);
1090	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1091	    DMU_READ_PREFETCH);
1092	if (error == 0)
1093		error = nvlist_unpack(packed, nvsize, value, 0);
1094	kmem_free(packed, nvsize);
1095
1096	return (error);
1097}
1098
1099/*
1100 * Checks to see if the given vdev could not be opened, in which case we post a
1101 * sysevent to notify the autoreplace code that the device has been removed.
1102 */
1103static void
1104spa_check_removed(vdev_t *vd)
1105{
1106	for (int c = 0; c < vd->vdev_children; c++)
1107		spa_check_removed(vd->vdev_child[c]);
1108
1109	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
1110		zfs_post_autoreplace(vd->vdev_spa, vd);
1111		spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
1112	}
1113}
1114
1115/*
1116 * Load the slog device state from the config object since it's possible
1117 * that the label does not contain the most up-to-date information.
1118 */
1119void
1120spa_load_log_state(spa_t *spa, nvlist_t *nv)
1121{
1122	vdev_t *ovd, *rvd = spa->spa_root_vdev;
1123
1124	/*
1125	 * Load the original root vdev tree from the passed config.
1126	 */
1127	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1128	VERIFY(spa_config_parse(spa, &ovd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
1129
1130	for (int c = 0; c < rvd->vdev_children; c++) {
1131		vdev_t *cvd = rvd->vdev_child[c];
1132		if (cvd->vdev_islog)
1133			vdev_load_log_state(cvd, ovd->vdev_child[c]);
1134	}
1135	vdev_free(ovd);
1136	spa_config_exit(spa, SCL_ALL, FTAG);
1137}
1138
1139/*
1140 * Check for missing log devices
1141 */
1142int
1143spa_check_logs(spa_t *spa)
1144{
1145	switch (spa->spa_log_state) {
1146	case SPA_LOG_MISSING:
1147		/* need to recheck in case slog has been restored */
1148	case SPA_LOG_UNKNOWN:
1149		if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL,
1150		    DS_FIND_CHILDREN)) {
1151			spa->spa_log_state = SPA_LOG_MISSING;
1152			return (1);
1153		}
1154		break;
1155	}
1156	return (0);
1157}
1158
1159static void
1160spa_aux_check_removed(spa_aux_vdev_t *sav)
1161{
1162	for (int i = 0; i < sav->sav_count; i++)
1163		spa_check_removed(sav->sav_vdevs[i]);
1164}
1165
1166void
1167spa_claim_notify(zio_t *zio)
1168{
1169	spa_t *spa = zio->io_spa;
1170
1171	if (zio->io_error)
1172		return;
1173
1174	mutex_enter(&spa->spa_props_lock);	/* any mutex will do */
1175	if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1176		spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1177	mutex_exit(&spa->spa_props_lock);
1178}
1179
1180typedef struct spa_load_error {
1181	uint64_t	sle_metadata_count;
1182	uint64_t	sle_data_count;
1183} spa_load_error_t;
1184
1185static void
1186spa_load_verify_done(zio_t *zio)
1187{
1188	blkptr_t *bp = zio->io_bp;
1189	spa_load_error_t *sle = zio->io_private;
1190	dmu_object_type_t type = BP_GET_TYPE(bp);
1191	int error = zio->io_error;
1192
1193	if (error) {
1194		if ((BP_GET_LEVEL(bp) != 0 || dmu_ot[type].ot_metadata) &&
1195		    type != DMU_OT_INTENT_LOG)
1196			atomic_add_64(&sle->sle_metadata_count, 1);
1197		else
1198			atomic_add_64(&sle->sle_data_count, 1);
1199	}
1200	zio_data_buf_free(zio->io_data, zio->io_size);
1201}
1202
1203/*ARGSUSED*/
1204static int
1205spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1206    const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1207{
1208	if (bp != NULL) {
1209		zio_t *rio = arg;
1210		size_t size = BP_GET_PSIZE(bp);
1211		void *data = zio_data_buf_alloc(size);
1212
1213		zio_nowait(zio_read(rio, spa, bp, data, size,
1214		    spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
1215		    ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
1216		    ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
1217	}
1218	return (0);
1219}
1220
1221static int
1222spa_load_verify(spa_t *spa)
1223{
1224	zio_t *rio;
1225	spa_load_error_t sle = { 0 };
1226	zpool_rewind_policy_t policy;
1227	boolean_t verify_ok = B_FALSE;
1228	int error;
1229
1230	rio = zio_root(spa, NULL, &sle,
1231	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
1232
1233	error = traverse_pool(spa, spa_load_verify_cb, rio,
1234	    spa->spa_verify_min_txg);
1235
1236	(void) zio_wait(rio);
1237
1238	zpool_get_rewind_policy(spa->spa_config, &policy);
1239
1240	spa->spa_load_meta_errors = sle.sle_metadata_count;
1241	spa->spa_load_data_errors = sle.sle_data_count;
1242
1243	if (!error && sle.sle_metadata_count <= policy.zrp_maxmeta &&
1244	    sle.sle_data_count <= policy.zrp_maxdata) {
1245		verify_ok = B_TRUE;
1246		spa->spa_load_txg = spa->spa_uberblock.ub_txg;
1247		spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
1248	}
1249
1250	if (error) {
1251		if (error != ENXIO && error != EIO)
1252			error = EIO;
1253		return (error);
1254	}
1255
1256	return (verify_ok ? 0 : EIO);
1257}
1258
1259/*
1260 * Load an existing storage pool, using the pool's builtin spa_config as a
1261 * source of configuration information.
1262 */
1263static int
1264spa_load(spa_t *spa, spa_load_state_t state, int mosconfig)
1265{
1266	int error = 0;
1267	nvlist_t *nvconfig, *nvroot = NULL;
1268	vdev_t *rvd;
1269	uberblock_t *ub = &spa->spa_uberblock;
1270	uint64_t config_cache_txg = spa->spa_config_txg;
1271	uint64_t pool_guid;
1272	uint64_t version;
1273	uint64_t autoreplace = 0;
1274	int orig_mode = spa->spa_mode;
1275	char *ereport = FM_EREPORT_ZFS_POOL;
1276	nvlist_t *config = spa->spa_config;
1277
1278	/*
1279	 * If this is an untrusted config, access the pool in read-only mode.
1280	 * This prevents things like resilvering recently removed devices.
1281	 */
1282	if (!mosconfig)
1283		spa->spa_mode = FREAD;
1284
1285	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1286
1287	spa->spa_load_state = state;
1288
1289	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
1290	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
1291		error = EINVAL;
1292		goto out;
1293	}
1294
1295	/*
1296	 * Versioning wasn't explicitly added to the label until later, so if
1297	 * it's not present treat it as the initial version.
1298	 */
1299	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
1300		version = SPA_VERSION_INITIAL;
1301
1302	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
1303	    &spa->spa_config_txg);
1304
1305	if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
1306	    spa_guid_exists(pool_guid, 0)) {
1307		error = EEXIST;
1308		goto out;
1309	}
1310
1311	spa->spa_load_guid = pool_guid;
1312
1313	/*
1314	 * Create "The Godfather" zio to hold all async IOs
1315	 */
1316	spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
1317	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
1318
1319	/*
1320	 * Parse the configuration into a vdev tree.  We explicitly set the
1321	 * value that will be returned by spa_version() since parsing the
1322	 * configuration requires knowing the version number.
1323	 */
1324	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1325	spa->spa_ubsync.ub_version = version;
1326	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
1327	spa_config_exit(spa, SCL_ALL, FTAG);
1328
1329	if (error != 0)
1330		goto out;
1331
1332	ASSERT(spa->spa_root_vdev == rvd);
1333	ASSERT(spa_guid(spa) == pool_guid);
1334
1335	/*
1336	 * Try to open all vdevs, loading each label in the process.
1337	 */
1338	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1339	error = vdev_open(rvd);
1340	spa_config_exit(spa, SCL_ALL, FTAG);
1341	if (error != 0)
1342		goto out;
1343
1344	/*
1345	 * We need to validate the vdev labels against the configuration that
1346	 * we have in hand, which is dependent on the setting of mosconfig. If
1347	 * mosconfig is true then we're validating the vdev labels based on
1348	 * that config. Otherwise, we're validating against the cached config
1349	 * (zpool.cache) that was read when we loaded the zfs module, and then
1350	 * later we will recursively call spa_load() and validate against
1351	 * the vdev config.
1352	 */
1353	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1354	error = vdev_validate(rvd);
1355	spa_config_exit(spa, SCL_ALL, FTAG);
1356	if (error != 0)
1357		goto out;
1358
1359	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1360		error = ENXIO;
1361		goto out;
1362	}
1363
1364	/*
1365	 * Find the best uberblock.
1366	 */
1367	vdev_uberblock_load(NULL, rvd, ub);
1368
1369	/*
1370	 * If we weren't able to find a single valid uberblock, return failure.
1371	 */
1372	if (ub->ub_txg == 0) {
1373		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1374		    VDEV_AUX_CORRUPT_DATA);
1375		error = ENXIO;
1376		goto out;
1377	}
1378
1379	/*
1380	 * If the pool is newer than the code, we can't open it.
1381	 */
1382	if (ub->ub_version > SPA_VERSION) {
1383		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1384		    VDEV_AUX_VERSION_NEWER);
1385		error = ENOTSUP;
1386		goto out;
1387	}
1388
1389	/*
1390	 * If the vdev guid sum doesn't match the uberblock, we have an
1391	 * incomplete configuration.
1392	 */
1393	if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
1394		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1395		    VDEV_AUX_BAD_GUID_SUM);
1396		error = ENXIO;
1397		goto out;
1398	}
1399
1400	/*
1401	 * Initialize internal SPA structures.
1402	 */
1403	spa->spa_state = POOL_STATE_ACTIVE;
1404	spa->spa_ubsync = spa->spa_uberblock;
1405	spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
1406	    TXG_INITIAL : spa_last_synced_txg(spa) - TXG_DEFER_SIZE;
1407	spa->spa_first_txg = spa->spa_last_ubsync_txg ?
1408	    spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
1409	spa->spa_claim_max_txg = spa->spa_first_txg;
1410
1411	error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
1412	if (error) {
1413		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1414		    VDEV_AUX_CORRUPT_DATA);
1415		error = EIO;
1416		goto out;
1417	}
1418	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
1419
1420	if (zap_lookup(spa->spa_meta_objset,
1421	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1422	    sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
1423		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1424		    VDEV_AUX_CORRUPT_DATA);
1425		error = EIO;
1426		goto out;
1427	}
1428
1429	if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) {
1430		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1431		    VDEV_AUX_CORRUPT_DATA);
1432		error = EIO;
1433		goto out;
1434	}
1435
1436	if (!mosconfig) {
1437		uint64_t hostid;
1438
1439		if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
1440		    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
1441			char *hostname;
1442			unsigned long myhostid = 0;
1443
1444			VERIFY(nvlist_lookup_string(nvconfig,
1445			    ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
1446
1447#ifdef	_KERNEL
1448			myhostid = zone_get_hostid(NULL);
1449#else	/* _KERNEL */
1450			/*
1451			 * We're emulating the system's hostid in userland, so
1452			 * we can't use zone_get_hostid().
1453			 */
1454			(void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
1455#endif	/* _KERNEL */
1456			if (hostid != 0 && myhostid != 0 &&
1457			    hostid != myhostid) {
1458				cmn_err(CE_WARN, "pool '%s' could not be "
1459				    "loaded as it was last accessed by "
1460				    "another system (host: %s hostid: 0x%lx). "
1461				    "See: http://www.sun.com/msg/ZFS-8000-EY",
1462				    spa_name(spa), hostname,
1463				    (unsigned long)hostid);
1464				error = EBADF;
1465				goto out;
1466			}
1467		}
1468
1469		spa_config_set(spa, nvconfig);
1470		spa_unload(spa);
1471		spa_deactivate(spa);
1472		spa_activate(spa, orig_mode);
1473
1474		return (spa_load(spa, state, B_TRUE));
1475	}
1476
1477	if (zap_lookup(spa->spa_meta_objset,
1478	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1479	    sizeof (uint64_t), 1, &spa->spa_deferred_bplist_obj) != 0) {
1480		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1481		    VDEV_AUX_CORRUPT_DATA);
1482		error = EIO;
1483		goto out;
1484	}
1485
1486	/*
1487	 * Load the bit that tells us to use the new accounting function
1488	 * (raid-z deflation).  If we have an older pool, this will not
1489	 * be present.
1490	 */
1491	error = zap_lookup(spa->spa_meta_objset,
1492	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
1493	    sizeof (uint64_t), 1, &spa->spa_deflate);
1494	if (error != 0 && error != ENOENT) {
1495		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1496		    VDEV_AUX_CORRUPT_DATA);
1497		error = EIO;
1498		goto out;
1499	}
1500
1501	/*
1502	 * Load the persistent error log.  If we have an older pool, this will
1503	 * not be present.
1504	 */
1505	error = zap_lookup(spa->spa_meta_objset,
1506	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
1507	    sizeof (uint64_t), 1, &spa->spa_errlog_last);
1508	if (error != 0 && error != ENOENT) {
1509		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1510		    VDEV_AUX_CORRUPT_DATA);
1511		error = EIO;
1512		goto out;
1513	}
1514
1515	error = zap_lookup(spa->spa_meta_objset,
1516	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
1517	    sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
1518	if (error != 0 && error != ENOENT) {
1519		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1520		    VDEV_AUX_CORRUPT_DATA);
1521		error = EIO;
1522		goto out;
1523	}
1524
1525	/*
1526	 * Load the history object.  If we have an older pool, this
1527	 * will not be present.
1528	 */
1529	error = zap_lookup(spa->spa_meta_objset,
1530	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
1531	    sizeof (uint64_t), 1, &spa->spa_history);
1532	if (error != 0 && error != ENOENT) {
1533		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1534		    VDEV_AUX_CORRUPT_DATA);
1535		error = EIO;
1536		goto out;
1537	}
1538
1539	/*
1540	 * Load any hot spares for this pool.
1541	 */
1542	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1543	    DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object);
1544	if (error != 0 && error != ENOENT) {
1545		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1546		    VDEV_AUX_CORRUPT_DATA);
1547		error = EIO;
1548		goto out;
1549	}
1550	if (error == 0) {
1551		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
1552		if (load_nvlist(spa, spa->spa_spares.sav_object,
1553		    &spa->spa_spares.sav_config) != 0) {
1554			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1555			    VDEV_AUX_CORRUPT_DATA);
1556			error = EIO;
1557			goto out;
1558		}
1559
1560		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1561		spa_load_spares(spa);
1562		spa_config_exit(spa, SCL_ALL, FTAG);
1563	}
1564
1565	/*
1566	 * Load any level 2 ARC devices for this pool.
1567	 */
1568	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1569	    DMU_POOL_L2CACHE, sizeof (uint64_t), 1,
1570	    &spa->spa_l2cache.sav_object);
1571	if (error != 0 && error != ENOENT) {
1572		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1573		    VDEV_AUX_CORRUPT_DATA);
1574		error = EIO;
1575		goto out;
1576	}
1577	if (error == 0) {
1578		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
1579		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
1580		    &spa->spa_l2cache.sav_config) != 0) {
1581			vdev_set_state(rvd, B_TRUE,
1582			    VDEV_STATE_CANT_OPEN,
1583			    VDEV_AUX_CORRUPT_DATA);
1584			error = EIO;
1585			goto out;
1586		}
1587
1588		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1589		spa_load_l2cache(spa);
1590		spa_config_exit(spa, SCL_ALL, FTAG);
1591	}
1592
1593	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
1594
1595	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1596	    DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object);
1597
1598	if (error && error != ENOENT) {
1599		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1600		    VDEV_AUX_CORRUPT_DATA);
1601		error = EIO;
1602		goto out;
1603	}
1604
1605	if (error == 0) {
1606		(void) zap_lookup(spa->spa_meta_objset,
1607		    spa->spa_pool_props_object,
1608		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS),
1609		    sizeof (uint64_t), 1, &spa->spa_bootfs);
1610		(void) zap_lookup(spa->spa_meta_objset,
1611		    spa->spa_pool_props_object,
1612		    zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE),
1613		    sizeof (uint64_t), 1, &autoreplace);
1614		spa->spa_autoreplace = (autoreplace != 0);
1615		(void) zap_lookup(spa->spa_meta_objset,
1616		    spa->spa_pool_props_object,
1617		    zpool_prop_to_name(ZPOOL_PROP_DELEGATION),
1618		    sizeof (uint64_t), 1, &spa->spa_delegation);
1619		(void) zap_lookup(spa->spa_meta_objset,
1620		    spa->spa_pool_props_object,
1621		    zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
1622		    sizeof (uint64_t), 1, &spa->spa_failmode);
1623		(void) zap_lookup(spa->spa_meta_objset,
1624		    spa->spa_pool_props_object,
1625		    zpool_prop_to_name(ZPOOL_PROP_AUTOEXPAND),
1626		    sizeof (uint64_t), 1, &spa->spa_autoexpand);
1627		(void) zap_lookup(spa->spa_meta_objset,
1628		    spa->spa_pool_props_object,
1629		    zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO),
1630		    sizeof (uint64_t), 1, &spa->spa_dedup_ditto);
1631	}
1632
1633	/*
1634	 * If the 'autoreplace' property is set, then post a resource notifying
1635	 * the ZFS DE that it should not issue any faults for unopenable
1636	 * devices.  We also iterate over the vdevs, and post a sysevent for any
1637	 * unopenable vdevs so that the normal autoreplace handler can take
1638	 * over.
1639	 */
1640	if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
1641		spa_check_removed(spa->spa_root_vdev);
1642		/*
1643		 * For the import case, this is done in spa_import(), because
1644		 * at this point we're using the spare definitions from
1645		 * the MOS config, not necessarily from the userland config.
1646		 */
1647		if (state != SPA_LOAD_IMPORT) {
1648			spa_aux_check_removed(&spa->spa_spares);
1649			spa_aux_check_removed(&spa->spa_l2cache);
1650		}
1651	}
1652
1653	/*
1654	 * Load the vdev state for all toplevel vdevs.
1655	 */
1656	vdev_load(rvd);
1657
1658	/*
1659	 * Propagate the leaf DTLs we just loaded all the way up the tree.
1660	 */
1661	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1662	vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
1663	spa_config_exit(spa, SCL_ALL, FTAG);
1664
1665	/*
1666	 * Check the state of the root vdev.  If it can't be opened, it
1667	 * indicates one or more toplevel vdevs are faulted.
1668	 */
1669	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1670		error = ENXIO;
1671		goto out;
1672	}
1673
1674	/*
1675	 * Load the DDTs (dedup tables).
1676	 */
1677	error = ddt_load(spa);
1678	if (error != 0) {
1679		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1680		    VDEV_AUX_CORRUPT_DATA);
1681		error = EIO;
1682		goto out;
1683	}
1684
1685	spa_update_dspace(spa);
1686
1687	if (state != SPA_LOAD_TRYIMPORT) {
1688		error = spa_load_verify(spa);
1689		if (error) {
1690			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1691			    VDEV_AUX_CORRUPT_DATA);
1692			goto out;
1693		}
1694	}
1695
1696	/*
1697	 * Load the intent log state and check log integrity.
1698	 */
1699	VERIFY(nvlist_lookup_nvlist(nvconfig, ZPOOL_CONFIG_VDEV_TREE,
1700	    &nvroot) == 0);
1701	spa_load_log_state(spa, nvroot);
1702	nvlist_free(nvconfig);
1703
1704	if (spa_check_logs(spa)) {
1705		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1706		    VDEV_AUX_BAD_LOG);
1707		error = ENXIO;
1708		ereport = FM_EREPORT_ZFS_LOG_REPLAY;
1709		goto out;
1710	}
1711
1712	if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
1713	    spa->spa_load_max_txg == UINT64_MAX)) {
1714		dmu_tx_t *tx;
1715		int need_update = B_FALSE;
1716
1717		ASSERT(state != SPA_LOAD_TRYIMPORT);
1718
1719		/*
1720		 * Claim log blocks that haven't been committed yet.
1721		 * This must all happen in a single txg.
1722		 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
1723		 * invoked from zil_claim_log_block()'s i/o done callback.
1724		 * Price of rollback is that we abandon the log.
1725		 */
1726		spa->spa_claiming = B_TRUE;
1727
1728		tx = dmu_tx_create_assigned(spa_get_dsl(spa),
1729		    spa_first_txg(spa));
1730		(void) dmu_objset_find(spa_name(spa),
1731		    zil_claim, tx, DS_FIND_CHILDREN);
1732		dmu_tx_commit(tx);
1733
1734		spa->spa_claiming = B_FALSE;
1735
1736		spa->spa_log_state = SPA_LOG_GOOD;
1737		spa->spa_sync_on = B_TRUE;
1738		txg_sync_start(spa->spa_dsl_pool);
1739
1740		/*
1741		 * Wait for all claims to sync.  We sync up to the highest
1742		 * claimed log block birth time so that claimed log blocks
1743		 * don't appear to be from the future.  spa_claim_max_txg
1744		 * will have been set for us by either zil_check_log_chain()
1745		 * (invoked from spa_check_logs()) or zil_claim() above.
1746		 */
1747		txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
1748
1749		/*
1750		 * If the config cache is stale, or we have uninitialized
1751		 * metaslabs (see spa_vdev_add()), then update the config.
1752		 *
1753		 * If spa_load_verbatim is true, trust the current
1754		 * in-core spa_config and update the disk labels.
1755		 */
1756		if (config_cache_txg != spa->spa_config_txg ||
1757		    state == SPA_LOAD_IMPORT || spa->spa_load_verbatim ||
1758		    state == SPA_LOAD_RECOVER)
1759			need_update = B_TRUE;
1760
1761		for (int c = 0; c < rvd->vdev_children; c++)
1762			if (rvd->vdev_child[c]->vdev_ms_array == 0)
1763				need_update = B_TRUE;
1764
1765		/*
1766		 * Update the config cache asychronously in case we're the
1767		 * root pool, in which case the config cache isn't writable yet.
1768		 */
1769		if (need_update)
1770			spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
1771
1772		/*
1773		 * Check all DTLs to see if anything needs resilvering.
1774		 */
1775		if (vdev_resilver_needed(rvd, NULL, NULL))
1776			spa_async_request(spa, SPA_ASYNC_RESILVER);
1777
1778		/*
1779		 * Delete any inconsistent datasets.
1780		 */
1781		(void) dmu_objset_find(spa_name(spa),
1782		    dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
1783
1784		/*
1785		 * Clean up any stale temporary dataset userrefs.
1786		 */
1787		dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
1788	}
1789
1790	error = 0;
1791out:
1792
1793	spa->spa_minref = refcount_count(&spa->spa_refcount);
1794	if (error && error != EBADF)
1795		zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
1796	spa->spa_load_state = SPA_LOAD_NONE;
1797	spa->spa_ena = 0;
1798
1799	return (error);
1800}
1801
1802static int
1803spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
1804{
1805	spa_unload(spa);
1806	spa_deactivate(spa);
1807
1808	spa->spa_load_max_txg--;
1809
1810	spa_activate(spa, spa_mode_global);
1811	spa_async_suspend(spa);
1812
1813	return (spa_load(spa, state, mosconfig));
1814}
1815
1816static int
1817spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
1818    uint64_t max_request, boolean_t extreme)
1819{
1820	nvlist_t *config = NULL;
1821	int load_error, rewind_error;
1822	uint64_t safe_rollback_txg;
1823	uint64_t min_txg;
1824
1825	if (spa->spa_load_txg && state == SPA_LOAD_RECOVER)
1826		spa->spa_load_max_txg = spa->spa_load_txg;
1827	else
1828		spa->spa_load_max_txg = max_request;
1829
1830	load_error = rewind_error = spa_load(spa, state, mosconfig);
1831	if (load_error == 0)
1832		return (0);
1833
1834	if (spa->spa_root_vdev != NULL)
1835		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1836
1837	spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
1838	spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
1839
1840	/* specific txg requested */
1841	if (spa->spa_load_max_txg != UINT64_MAX && !extreme) {
1842		nvlist_free(config);
1843		return (load_error);
1844	}
1845
1846	/* Price of rolling back is discarding txgs, including log */
1847	if (state == SPA_LOAD_RECOVER)
1848		spa->spa_log_state = SPA_LOG_CLEAR;
1849
1850	spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
1851	safe_rollback_txg = spa->spa_uberblock.ub_txg - TXG_DEFER_SIZE;
1852
1853	min_txg = extreme ? TXG_INITIAL : safe_rollback_txg;
1854	while (rewind_error && (spa->spa_uberblock.ub_txg >= min_txg)) {
1855		if (spa->spa_load_max_txg < safe_rollback_txg)
1856			spa->spa_extreme_rewind = B_TRUE;
1857		rewind_error = spa_load_retry(spa, state, mosconfig);
1858	}
1859
1860	if (config)
1861		spa_rewind_data_to_nvlist(spa, config);
1862
1863	spa->spa_extreme_rewind = B_FALSE;
1864	spa->spa_load_max_txg = UINT64_MAX;
1865
1866	if (config && (rewind_error || state != SPA_LOAD_RECOVER))
1867		spa_config_set(spa, config);
1868
1869	return (state == SPA_LOAD_RECOVER ? rewind_error : load_error);
1870}
1871
1872/*
1873 * Pool Open/Import
1874 *
1875 * The import case is identical to an open except that the configuration is sent
1876 * down from userland, instead of grabbed from the configuration cache.  For the
1877 * case of an open, the pool configuration will exist in the
1878 * POOL_STATE_UNINITIALIZED state.
1879 *
1880 * The stats information (gen/count/ustats) is used to gather vdev statistics at
1881 * the same time open the pool, without having to keep around the spa_t in some
1882 * ambiguous state.
1883 */
1884static int
1885spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
1886    nvlist_t **config)
1887{
1888	spa_t *spa;
1889	boolean_t norewind;
1890	boolean_t extreme;
1891	zpool_rewind_policy_t policy;
1892	spa_load_state_t state = SPA_LOAD_OPEN;
1893	int error;
1894	int locked = B_FALSE;
1895
1896	*spapp = NULL;
1897
1898	zpool_get_rewind_policy(nvpolicy, &policy);
1899	if (policy.zrp_request & ZPOOL_DO_REWIND)
1900		state = SPA_LOAD_RECOVER;
1901	norewind = (policy.zrp_request == ZPOOL_NO_REWIND);
1902	extreme = ((policy.zrp_request & ZPOOL_EXTREME_REWIND) != 0);
1903
1904	/*
1905	 * As disgusting as this is, we need to support recursive calls to this
1906	 * function because dsl_dir_open() is called during spa_load(), and ends
1907	 * up calling spa_open() again.  The real fix is to figure out how to
1908	 * avoid dsl_dir_open() calling this in the first place.
1909	 */
1910	if (mutex_owner(&spa_namespace_lock) != curthread) {
1911		mutex_enter(&spa_namespace_lock);
1912		locked = B_TRUE;
1913	}
1914
1915	if ((spa = spa_lookup(pool)) == NULL) {
1916		if (locked)
1917			mutex_exit(&spa_namespace_lock);
1918		return (ENOENT);
1919	}
1920
1921	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
1922
1923		spa_activate(spa, spa_mode_global);
1924
1925		if (spa->spa_last_open_failed && norewind) {
1926			if (config != NULL && spa->spa_config)
1927				VERIFY(nvlist_dup(spa->spa_config,
1928				    config, KM_SLEEP) == 0);
1929			spa_deactivate(spa);
1930			if (locked)
1931				mutex_exit(&spa_namespace_lock);
1932			return (spa->spa_last_open_failed);
1933		}
1934
1935		if (state != SPA_LOAD_RECOVER)
1936			spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
1937
1938		error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
1939		    extreme);
1940
1941		if (error == EBADF) {
1942			/*
1943			 * If vdev_validate() returns failure (indicated by
1944			 * EBADF), it indicates that one of the vdevs indicates
1945			 * that the pool has been exported or destroyed.  If
1946			 * this is the case, the config cache is out of sync and
1947			 * we should remove the pool from the namespace.
1948			 */
1949			spa_unload(spa);
1950			spa_deactivate(spa);
1951			spa_config_sync(spa, B_TRUE, B_TRUE);
1952			spa_remove(spa);
1953			if (locked)
1954				mutex_exit(&spa_namespace_lock);
1955			return (ENOENT);
1956		}
1957
1958		if (error) {
1959			/*
1960			 * We can't open the pool, but we still have useful
1961			 * information: the state of each vdev after the
1962			 * attempted vdev_open().  Return this to the user.
1963			 */
1964			if (config != NULL && spa->spa_config)
1965				VERIFY(nvlist_dup(spa->spa_config, config,
1966				    KM_SLEEP) == 0);
1967			spa_unload(spa);
1968			spa_deactivate(spa);
1969			spa->spa_last_open_failed = error;
1970			if (locked)
1971				mutex_exit(&spa_namespace_lock);
1972			*spapp = NULL;
1973			return (error);
1974		}
1975
1976	}
1977
1978	spa_open_ref(spa, tag);
1979
1980	spa->spa_last_open_failed = 0;
1981
1982	if (config != NULL)
1983		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1984
1985	spa->spa_last_ubsync_txg = 0;
1986	spa->spa_load_txg = 0;
1987
1988	if (locked)
1989		mutex_exit(&spa_namespace_lock);
1990
1991	*spapp = spa;
1992
1993	return (0);
1994}
1995
1996int
1997spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
1998    nvlist_t **config)
1999{
2000	return (spa_open_common(name, spapp, tag, policy, config));
2001}
2002
2003int
2004spa_open(const char *name, spa_t **spapp, void *tag)
2005{
2006	return (spa_open_common(name, spapp, tag, NULL, NULL));
2007}
2008
2009/*
2010 * Lookup the given spa_t, incrementing the inject count in the process,
2011 * preventing it from being exported or destroyed.
2012 */
2013spa_t *
2014spa_inject_addref(char *name)
2015{
2016	spa_t *spa;
2017
2018	mutex_enter(&spa_namespace_lock);
2019	if ((spa = spa_lookup(name)) == NULL) {
2020		mutex_exit(&spa_namespace_lock);
2021		return (NULL);
2022	}
2023	spa->spa_inject_ref++;
2024	mutex_exit(&spa_namespace_lock);
2025
2026	return (spa);
2027}
2028
2029void
2030spa_inject_delref(spa_t *spa)
2031{
2032	mutex_enter(&spa_namespace_lock);
2033	spa->spa_inject_ref--;
2034	mutex_exit(&spa_namespace_lock);
2035}
2036
2037/*
2038 * Add spares device information to the nvlist.
2039 */
2040static void
2041spa_add_spares(spa_t *spa, nvlist_t *config)
2042{
2043	nvlist_t **spares;
2044	uint_t i, nspares;
2045	nvlist_t *nvroot;
2046	uint64_t guid;
2047	vdev_stat_t *vs;
2048	uint_t vsc;
2049	uint64_t pool;
2050
2051	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
2052
2053	if (spa->spa_spares.sav_count == 0)
2054		return;
2055
2056	VERIFY(nvlist_lookup_nvlist(config,
2057	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2058	VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
2059	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
2060	if (nspares != 0) {
2061		VERIFY(nvlist_add_nvlist_array(nvroot,
2062		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2063		VERIFY(nvlist_lookup_nvlist_array(nvroot,
2064		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
2065
2066		/*
2067		 * Go through and find any spares which have since been
2068		 * repurposed as an active spare.  If this is the case, update
2069		 * their status appropriately.
2070		 */
2071		for (i = 0; i < nspares; i++) {
2072			VERIFY(nvlist_lookup_uint64(spares[i],
2073			    ZPOOL_CONFIG_GUID, &guid) == 0);
2074			if (spa_spare_exists(guid, &pool, NULL) &&
2075			    pool != 0ULL) {
2076				VERIFY(nvlist_lookup_uint64_array(
2077				    spares[i], ZPOOL_CONFIG_STATS,
2078				    (uint64_t **)&vs, &vsc) == 0);
2079				vs->vs_state = VDEV_STATE_CANT_OPEN;
2080				vs->vs_aux = VDEV_AUX_SPARED;
2081			}
2082		}
2083	}
2084}
2085
2086/*
2087 * Add l2cache device information to the nvlist, including vdev stats.
2088 */
2089static void
2090spa_add_l2cache(spa_t *spa, nvlist_t *config)
2091{
2092	nvlist_t **l2cache;
2093	uint_t i, j, nl2cache;
2094	nvlist_t *nvroot;
2095	uint64_t guid;
2096	vdev_t *vd;
2097	vdev_stat_t *vs;
2098	uint_t vsc;
2099
2100	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
2101
2102	if (spa->spa_l2cache.sav_count == 0)
2103		return;
2104
2105	VERIFY(nvlist_lookup_nvlist(config,
2106	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2107	VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
2108	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
2109	if (nl2cache != 0) {
2110		VERIFY(nvlist_add_nvlist_array(nvroot,
2111		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2112		VERIFY(nvlist_lookup_nvlist_array(nvroot,
2113		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
2114
2115		/*
2116		 * Update level 2 cache device stats.
2117		 */
2118
2119		for (i = 0; i < nl2cache; i++) {
2120			VERIFY(nvlist_lookup_uint64(l2cache[i],
2121			    ZPOOL_CONFIG_GUID, &guid) == 0);
2122
2123			vd = NULL;
2124			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
2125				if (guid ==
2126				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
2127					vd = spa->spa_l2cache.sav_vdevs[j];
2128					break;
2129				}
2130			}
2131			ASSERT(vd != NULL);
2132
2133			VERIFY(nvlist_lookup_uint64_array(l2cache[i],
2134			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
2135			vdev_get_stats(vd, vs);
2136		}
2137	}
2138}
2139
2140int
2141spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
2142{
2143	int error;
2144	spa_t *spa;
2145
2146	*config = NULL;
2147	error = spa_open_common(name, &spa, FTAG, NULL, config);
2148
2149	if (spa != NULL) {
2150		/*
2151		 * This still leaves a window of inconsistency where the spares
2152		 * or l2cache devices could change and the config would be
2153		 * self-inconsistent.
2154		 */
2155		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
2156
2157		if (*config != NULL) {
2158			VERIFY(nvlist_add_uint64(*config,
2159			    ZPOOL_CONFIG_ERRCOUNT,
2160			    spa_get_errlog_size(spa)) == 0);
2161
2162			if (spa_suspended(spa))
2163				VERIFY(nvlist_add_uint64(*config,
2164				    ZPOOL_CONFIG_SUSPENDED,
2165				    spa->spa_failmode) == 0);
2166
2167			spa_add_spares(spa, *config);
2168			spa_add_l2cache(spa, *config);
2169		}
2170	}
2171
2172	/*
2173	 * We want to get the alternate root even for faulted pools, so we cheat
2174	 * and call spa_lookup() directly.
2175	 */
2176	if (altroot) {
2177		if (spa == NULL) {
2178			mutex_enter(&spa_namespace_lock);
2179			spa = spa_lookup(name);
2180			if (spa)
2181				spa_altroot(spa, altroot, buflen);
2182			else
2183				altroot[0] = '\0';
2184			spa = NULL;
2185			mutex_exit(&spa_namespace_lock);
2186		} else {
2187			spa_altroot(spa, altroot, buflen);
2188		}
2189	}
2190
2191	if (spa != NULL) {
2192		spa_config_exit(spa, SCL_CONFIG, FTAG);
2193		spa_close(spa, FTAG);
2194	}
2195
2196	return (error);
2197}
2198
2199/*
2200 * Validate that the auxiliary device array is well formed.  We must have an
2201 * array of nvlists, each which describes a valid leaf vdev.  If this is an
2202 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
2203 * specified, as long as they are well-formed.
2204 */
2205static int
2206spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
2207    spa_aux_vdev_t *sav, const char *config, uint64_t version,
2208    vdev_labeltype_t label)
2209{
2210	nvlist_t **dev;
2211	uint_t i, ndev;
2212	vdev_t *vd;
2213	int error;
2214
2215	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
2216
2217	/*
2218	 * It's acceptable to have no devs specified.
2219	 */
2220	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
2221		return (0);
2222
2223	if (ndev == 0)
2224		return (EINVAL);
2225
2226	/*
2227	 * Make sure the pool is formatted with a version that supports this
2228	 * device type.
2229	 */
2230	if (spa_version(spa) < version)
2231		return (ENOTSUP);
2232
2233	/*
2234	 * Set the pending device list so we correctly handle device in-use
2235	 * checking.
2236	 */
2237	sav->sav_pending = dev;
2238	sav->sav_npending = ndev;
2239
2240	for (i = 0; i < ndev; i++) {
2241		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
2242		    mode)) != 0)
2243			goto out;
2244
2245		if (!vd->vdev_ops->vdev_op_leaf) {
2246			vdev_free(vd);
2247			error = EINVAL;
2248			goto out;
2249		}
2250
2251		/*
2252		 * The L2ARC currently only supports disk devices in
2253		 * kernel context.  For user-level testing, we allow it.
2254		 */
2255#ifdef _KERNEL
2256		if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
2257		    strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
2258			error = ENOTBLK;
2259			goto out;
2260		}
2261#endif
2262		vd->vdev_top = vd;
2263
2264		if ((error = vdev_open(vd)) == 0 &&
2265		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
2266			VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
2267			    vd->vdev_guid) == 0);
2268		}
2269
2270		vdev_free(vd);
2271
2272		if (error &&
2273		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
2274			goto out;
2275		else
2276			error = 0;
2277	}
2278
2279out:
2280	sav->sav_pending = NULL;
2281	sav->sav_npending = 0;
2282	return (error);
2283}
2284
2285static int
2286spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
2287{
2288	int error;
2289
2290	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
2291
2292	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
2293	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
2294	    VDEV_LABEL_SPARE)) != 0) {
2295		return (error);
2296	}
2297
2298	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
2299	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
2300	    VDEV_LABEL_L2CACHE));
2301}
2302
2303static void
2304spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
2305    const char *config)
2306{
2307	int i;
2308
2309	if (sav->sav_config != NULL) {
2310		nvlist_t **olddevs;
2311		uint_t oldndevs;
2312		nvlist_t **newdevs;
2313
2314		/*
2315		 * Generate new dev list by concatentating with the
2316		 * current dev list.
2317		 */
2318		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
2319		    &olddevs, &oldndevs) == 0);
2320
2321		newdevs = kmem_alloc(sizeof (void *) *
2322		    (ndevs + oldndevs), KM_SLEEP);
2323		for (i = 0; i < oldndevs; i++)
2324			VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
2325			    KM_SLEEP) == 0);
2326		for (i = 0; i < ndevs; i++)
2327			VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
2328			    KM_SLEEP) == 0);
2329
2330		VERIFY(nvlist_remove(sav->sav_config, config,
2331		    DATA_TYPE_NVLIST_ARRAY) == 0);
2332
2333		VERIFY(nvlist_add_nvlist_array(sav->sav_config,
2334		    config, newdevs, ndevs + oldndevs) == 0);
2335		for (i = 0; i < oldndevs + ndevs; i++)
2336			nvlist_free(newdevs[i]);
2337		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
2338	} else {
2339		/*
2340		 * Generate a new dev list.
2341		 */
2342		VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
2343		    KM_SLEEP) == 0);
2344		VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
2345		    devs, ndevs) == 0);
2346	}
2347}
2348
2349/*
2350 * Stop and drop level 2 ARC devices
2351 */
2352void
2353spa_l2cache_drop(spa_t *spa)
2354{
2355	vdev_t *vd;
2356	int i;
2357	spa_aux_vdev_t *sav = &spa->spa_l2cache;
2358
2359	for (i = 0; i < sav->sav_count; i++) {
2360		uint64_t pool;
2361
2362		vd = sav->sav_vdevs[i];
2363		ASSERT(vd != NULL);
2364
2365		if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
2366		    pool != 0ULL && l2arc_vdev_present(vd))
2367			l2arc_remove_vdev(vd);
2368		if (vd->vdev_isl2cache)
2369			spa_l2cache_remove(vd);
2370		vdev_clear_stats(vd);
2371		(void) vdev_close(vd);
2372	}
2373}
2374
2375/*
2376 * Pool Creation
2377 */
2378int
2379spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
2380    const char *history_str, nvlist_t *zplprops)
2381{
2382	spa_t *spa;
2383	char *altroot = NULL;
2384	vdev_t *rvd;
2385	dsl_pool_t *dp;
2386	dmu_tx_t *tx;
2387	int error = 0;
2388	uint64_t txg = TXG_INITIAL;
2389	nvlist_t **spares, **l2cache;
2390	uint_t nspares, nl2cache;
2391	uint64_t version;
2392
2393	/*
2394	 * If this pool already exists, return failure.
2395	 */
2396	mutex_enter(&spa_namespace_lock);
2397	if (spa_lookup(pool) != NULL) {
2398		mutex_exit(&spa_namespace_lock);
2399		return (EEXIST);
2400	}
2401
2402	/*
2403	 * Allocate a new spa_t structure.
2404	 */
2405	(void) nvlist_lookup_string(props,
2406	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2407	spa = spa_add(pool, NULL, altroot);
2408	spa_activate(spa, spa_mode_global);
2409
2410	if (props && (error = spa_prop_validate(spa, props))) {
2411		spa_deactivate(spa);
2412		spa_remove(spa);
2413		mutex_exit(&spa_namespace_lock);
2414		return (error);
2415	}
2416
2417	if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION),
2418	    &version) != 0)
2419		version = SPA_VERSION;
2420	ASSERT(version <= SPA_VERSION);
2421
2422	spa->spa_first_txg = txg;
2423	spa->spa_uberblock.ub_txg = txg - 1;
2424	spa->spa_uberblock.ub_version = version;
2425	spa->spa_ubsync = spa->spa_uberblock;
2426
2427	/*
2428	 * Create "The Godfather" zio to hold all async IOs
2429	 */
2430	spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
2431	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
2432
2433	/*
2434	 * Create the root vdev.
2435	 */
2436	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2437
2438	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
2439
2440	ASSERT(error != 0 || rvd != NULL);
2441	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
2442
2443	if (error == 0 && !zfs_allocatable_devs(nvroot))
2444		error = EINVAL;
2445
2446	if (error == 0 &&
2447	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
2448	    (error = spa_validate_aux(spa, nvroot, txg,
2449	    VDEV_ALLOC_ADD)) == 0) {
2450		for (int c = 0; c < rvd->vdev_children; c++) {
2451			vdev_metaslab_set_size(rvd->vdev_child[c]);
2452			vdev_expand(rvd->vdev_child[c], txg);
2453		}
2454	}
2455
2456	spa_config_exit(spa, SCL_ALL, FTAG);
2457
2458	if (error != 0) {
2459		spa_unload(spa);
2460		spa_deactivate(spa);
2461		spa_remove(spa);
2462		mutex_exit(&spa_namespace_lock);
2463		return (error);
2464	}
2465
2466	/*
2467	 * Get the list of spares, if specified.
2468	 */
2469	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2470	    &spares, &nspares) == 0) {
2471		VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
2472		    KM_SLEEP) == 0);
2473		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
2474		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2475		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2476		spa_load_spares(spa);
2477		spa_config_exit(spa, SCL_ALL, FTAG);
2478		spa->spa_spares.sav_sync = B_TRUE;
2479	}
2480
2481	/*
2482	 * Get the list of level 2 cache devices, if specified.
2483	 */
2484	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2485	    &l2cache, &nl2cache) == 0) {
2486		VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2487		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2488		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2489		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2490		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2491		spa_load_l2cache(spa);
2492		spa_config_exit(spa, SCL_ALL, FTAG);
2493		spa->spa_l2cache.sav_sync = B_TRUE;
2494	}
2495
2496	spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
2497	spa->spa_meta_objset = dp->dp_meta_objset;
2498
2499	/*
2500	 * Create DDTs (dedup tables).
2501	 */
2502	ddt_create(spa);
2503
2504	spa_update_dspace(spa);
2505
2506	tx = dmu_tx_create_assigned(dp, txg);
2507
2508	/*
2509	 * Create the pool config object.
2510	 */
2511	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
2512	    DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
2513	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
2514
2515	if (zap_add(spa->spa_meta_objset,
2516	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
2517	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
2518		cmn_err(CE_PANIC, "failed to add pool config");
2519	}
2520
2521	/* Newly created pools with the right version are always deflated. */
2522	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
2523		spa->spa_deflate = TRUE;
2524		if (zap_add(spa->spa_meta_objset,
2525		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
2526		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
2527			cmn_err(CE_PANIC, "failed to add deflate");
2528		}
2529	}
2530
2531	/*
2532	 * Create the deferred-free bplist object.  Turn off compression
2533	 * because sync-to-convergence takes longer if the blocksize
2534	 * keeps changing.
2535	 */
2536	spa->spa_deferred_bplist_obj = bplist_create(spa->spa_meta_objset,
2537	    1 << 14, tx);
2538	dmu_object_set_compress(spa->spa_meta_objset,
2539	    spa->spa_deferred_bplist_obj, ZIO_COMPRESS_OFF, tx);
2540
2541	if (zap_add(spa->spa_meta_objset,
2542	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
2543	    sizeof (uint64_t), 1, &spa->spa_deferred_bplist_obj, tx) != 0) {
2544		cmn_err(CE_PANIC, "failed to add bplist");
2545	}
2546
2547	/*
2548	 * Create the pool's history object.
2549	 */
2550	if (version >= SPA_VERSION_ZPOOL_HISTORY)
2551		spa_history_create_obj(spa, tx);
2552
2553	/*
2554	 * Set pool properties.
2555	 */
2556	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
2557	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2558	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
2559	spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
2560
2561	if (props != NULL) {
2562		spa_configfile_set(spa, props, B_FALSE);
2563		spa_sync_props(spa, props, CRED(), tx);
2564	}
2565
2566	dmu_tx_commit(tx);
2567
2568	spa->spa_sync_on = B_TRUE;
2569	txg_sync_start(spa->spa_dsl_pool);
2570
2571	/*
2572	 * We explicitly wait for the first transaction to complete so that our
2573	 * bean counters are appropriately updated.
2574	 */
2575	txg_wait_synced(spa->spa_dsl_pool, txg);
2576
2577	spa_config_sync(spa, B_FALSE, B_TRUE);
2578
2579	if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL)
2580		(void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE);
2581	spa_history_log_version(spa, LOG_POOL_CREATE);
2582
2583	spa->spa_minref = refcount_count(&spa->spa_refcount);
2584
2585	mutex_exit(&spa_namespace_lock);
2586
2587	return (0);
2588}
2589
2590#ifdef _KERNEL
2591/*
2592 * Get the root pool information from the root disk, then import the root pool
2593 * during the system boot up time.
2594 */
2595extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
2596
2597static nvlist_t *
2598spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
2599{
2600	nvlist_t *config;
2601	nvlist_t *nvtop, *nvroot;
2602	uint64_t pgid;
2603
2604	if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
2605		return (NULL);
2606
2607	/*
2608	 * Add this top-level vdev to the child array.
2609	 */
2610	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2611	    &nvtop) == 0);
2612	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
2613	    &pgid) == 0);
2614	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
2615
2616	/*
2617	 * Put this pool's top-level vdevs into a root vdev.
2618	 */
2619	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2620	VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
2621	    VDEV_TYPE_ROOT) == 0);
2622	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
2623	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
2624	VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2625	    &nvtop, 1) == 0);
2626
2627	/*
2628	 * Replace the existing vdev_tree with the new root vdev in
2629	 * this pool's configuration (remove the old, add the new).
2630	 */
2631	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
2632	nvlist_free(nvroot);
2633	return (config);
2634}
2635
2636/*
2637 * Walk the vdev tree and see if we can find a device with "better"
2638 * configuration. A configuration is "better" if the label on that
2639 * device has a more recent txg.
2640 */
2641static void
2642spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
2643{
2644	for (int c = 0; c < vd->vdev_children; c++)
2645		spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
2646
2647	if (vd->vdev_ops->vdev_op_leaf) {
2648		nvlist_t *label;
2649		uint64_t label_txg;
2650
2651		if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
2652		    &label) != 0)
2653			return;
2654
2655		VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
2656		    &label_txg) == 0);
2657
2658		/*
2659		 * Do we have a better boot device?
2660		 */
2661		if (label_txg > *txg) {
2662			*txg = label_txg;
2663			*avd = vd;
2664		}
2665		nvlist_free(label);
2666	}
2667}
2668
2669/*
2670 * Import a root pool.
2671 *
2672 * For x86. devpath_list will consist of devid and/or physpath name of
2673 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
2674 * The GRUB "findroot" command will return the vdev we should boot.
2675 *
2676 * For Sparc, devpath_list consists the physpath name of the booting device
2677 * no matter the rootpool is a single device pool or a mirrored pool.
2678 * e.g.
2679 *	"/pci@1f,0/ide@d/disk@0,0:a"
2680 */
2681int
2682spa_import_rootpool(char *devpath, char *devid)
2683{
2684	spa_t *spa;
2685	vdev_t *rvd, *bvd, *avd = NULL;
2686	nvlist_t *config, *nvtop;
2687	uint64_t guid, txg;
2688	char *pname;
2689	int error;
2690
2691	/*
2692	 * Read the label from the boot device and generate a configuration.
2693	 */
2694	config = spa_generate_rootconf(devpath, devid, &guid);
2695#if defined(_OBP) && defined(_KERNEL)
2696	if (config == NULL) {
2697		if (strstr(devpath, "/iscsi/ssd") != NULL) {
2698			/* iscsi boot */
2699			get_iscsi_bootpath_phy(devpath);
2700			config = spa_generate_rootconf(devpath, devid, &guid);
2701		}
2702	}
2703#endif
2704	if (config == NULL) {
2705		cmn_err(CE_NOTE, "Can not read the pool label from '%s'",
2706		    devpath);
2707		return (EIO);
2708	}
2709
2710	VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
2711	    &pname) == 0);
2712	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
2713
2714	mutex_enter(&spa_namespace_lock);
2715	if ((spa = spa_lookup(pname)) != NULL) {
2716		/*
2717		 * Remove the existing root pool from the namespace so that we
2718		 * can replace it with the correct config we just read in.
2719		 */
2720		spa_remove(spa);
2721	}
2722
2723	spa = spa_add(pname, config, NULL);
2724	spa->spa_is_root = B_TRUE;
2725	spa->spa_load_verbatim = B_TRUE;
2726
2727	/*
2728	 * Build up a vdev tree based on the boot device's label config.
2729	 */
2730	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2731	    &nvtop) == 0);
2732	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2733	error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
2734	    VDEV_ALLOC_ROOTPOOL);
2735	spa_config_exit(spa, SCL_ALL, FTAG);
2736	if (error) {
2737		mutex_exit(&spa_namespace_lock);
2738		nvlist_free(config);
2739		cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
2740		    pname);
2741		return (error);
2742	}
2743
2744	/*
2745	 * Get the boot vdev.
2746	 */
2747	if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
2748		cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
2749		    (u_longlong_t)guid);
2750		error = ENOENT;
2751		goto out;
2752	}
2753
2754	/*
2755	 * Determine if there is a better boot device.
2756	 */
2757	avd = bvd;
2758	spa_alt_rootvdev(rvd, &avd, &txg);
2759	if (avd != bvd) {
2760		cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
2761		    "try booting from '%s'", avd->vdev_path);
2762		error = EINVAL;
2763		goto out;
2764	}
2765
2766	/*
2767	 * If the boot device is part of a spare vdev then ensure that
2768	 * we're booting off the active spare.
2769	 */
2770	if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
2771	    !bvd->vdev_isspare) {
2772		cmn_err(CE_NOTE, "The boot device is currently spared. Please "
2773		    "try booting from '%s'",
2774		    bvd->vdev_parent->vdev_child[1]->vdev_path);
2775		error = EINVAL;
2776		goto out;
2777	}
2778
2779	error = 0;
2780	spa_history_log_version(spa, LOG_POOL_IMPORT);
2781out:
2782	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2783	vdev_free(rvd);
2784	spa_config_exit(spa, SCL_ALL, FTAG);
2785	mutex_exit(&spa_namespace_lock);
2786
2787	nvlist_free(config);
2788	return (error);
2789}
2790
2791#endif
2792
2793/*
2794 * Take a pool and insert it into the namespace as if it had been loaded at
2795 * boot.
2796 */
2797int
2798spa_import_verbatim(const char *pool, nvlist_t *config, nvlist_t *props)
2799{
2800	spa_t *spa;
2801	zpool_rewind_policy_t policy;
2802	char *altroot = NULL;
2803
2804	mutex_enter(&spa_namespace_lock);
2805	if (spa_lookup(pool) != NULL) {
2806		mutex_exit(&spa_namespace_lock);
2807		return (EEXIST);
2808	}
2809
2810	(void) nvlist_lookup_string(props,
2811	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2812	spa = spa_add(pool, config, altroot);
2813
2814	zpool_get_rewind_policy(config, &policy);
2815	spa->spa_load_max_txg = policy.zrp_txg;
2816
2817	spa->spa_load_verbatim = B_TRUE;
2818
2819	if (props != NULL)
2820		spa_configfile_set(spa, props, B_FALSE);
2821
2822	spa_config_sync(spa, B_FALSE, B_TRUE);
2823
2824	mutex_exit(&spa_namespace_lock);
2825	spa_history_log_version(spa, LOG_POOL_IMPORT);
2826
2827	return (0);
2828}
2829
2830/*
2831 * Import a non-root pool into the system.
2832 */
2833int
2834spa_import(const char *pool, nvlist_t *config, nvlist_t *props)
2835{
2836	spa_t *spa;
2837	char *altroot = NULL;
2838	spa_load_state_t state = SPA_LOAD_IMPORT;
2839	zpool_rewind_policy_t policy;
2840	int error;
2841	nvlist_t *nvroot;
2842	nvlist_t **spares, **l2cache;
2843	uint_t nspares, nl2cache;
2844
2845	/*
2846	 * If a pool with this name exists, return failure.
2847	 */
2848	mutex_enter(&spa_namespace_lock);
2849	if ((spa = spa_lookup(pool)) != NULL) {
2850		mutex_exit(&spa_namespace_lock);
2851		return (EEXIST);
2852	}
2853
2854	zpool_get_rewind_policy(config, &policy);
2855	if (policy.zrp_request & ZPOOL_DO_REWIND)
2856		state = SPA_LOAD_RECOVER;
2857
2858	/*
2859	 * Create and initialize the spa structure.
2860	 */
2861	(void) nvlist_lookup_string(props,
2862	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2863	spa = spa_add(pool, config, altroot);
2864	spa_activate(spa, spa_mode_global);
2865
2866	/*
2867	 * Don't start async tasks until we know everything is healthy.
2868	 */
2869	spa_async_suspend(spa);
2870
2871	/*
2872	 * Pass off the heavy lifting to spa_load().  Pass TRUE for mosconfig
2873	 * because the user-supplied config is actually the one to trust when
2874	 * doing an import.
2875	 */
2876	if (state != SPA_LOAD_RECOVER)
2877		spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
2878	error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
2879	    ((policy.zrp_request & ZPOOL_EXTREME_REWIND) != 0));
2880
2881	/*
2882	 * Propagate anything learned about failing or best txgs
2883	 * back to caller
2884	 */
2885	spa_rewind_data_to_nvlist(spa, config);
2886
2887	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2888	/*
2889	 * Toss any existing sparelist, as it doesn't have any validity
2890	 * anymore, and conflicts with spa_has_spare().
2891	 */
2892	if (spa->spa_spares.sav_config) {
2893		nvlist_free(spa->spa_spares.sav_config);
2894		spa->spa_spares.sav_config = NULL;
2895		spa_load_spares(spa);
2896	}
2897	if (spa->spa_l2cache.sav_config) {
2898		nvlist_free(spa->spa_l2cache.sav_config);
2899		spa->spa_l2cache.sav_config = NULL;
2900		spa_load_l2cache(spa);
2901	}
2902
2903	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2904	    &nvroot) == 0);
2905	if (error == 0)
2906		error = spa_validate_aux(spa, nvroot, -1ULL,
2907		    VDEV_ALLOC_SPARE);
2908	if (error == 0)
2909		error = spa_validate_aux(spa, nvroot, -1ULL,
2910		    VDEV_ALLOC_L2CACHE);
2911	spa_config_exit(spa, SCL_ALL, FTAG);
2912
2913	if (props != NULL)
2914		spa_configfile_set(spa, props, B_FALSE);
2915
2916	if (error != 0 || (props && spa_writeable(spa) &&
2917	    (error = spa_prop_set(spa, props)))) {
2918		spa_unload(spa);
2919		spa_deactivate(spa);
2920		spa_remove(spa);
2921		mutex_exit(&spa_namespace_lock);
2922		return (error);
2923	}
2924
2925	spa_async_resume(spa);
2926
2927	/*
2928	 * Override any spares and level 2 cache devices as specified by
2929	 * the user, as these may have correct device names/devids, etc.
2930	 */
2931	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2932	    &spares, &nspares) == 0) {
2933		if (spa->spa_spares.sav_config)
2934			VERIFY(nvlist_remove(spa->spa_spares.sav_config,
2935			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
2936		else
2937			VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
2938			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2939		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
2940		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2941		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2942		spa_load_spares(spa);
2943		spa_config_exit(spa, SCL_ALL, FTAG);
2944		spa->spa_spares.sav_sync = B_TRUE;
2945	}
2946	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2947	    &l2cache, &nl2cache) == 0) {
2948		if (spa->spa_l2cache.sav_config)
2949			VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
2950			    ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
2951		else
2952			VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2953			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2954		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2955		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2956		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2957		spa_load_l2cache(spa);
2958		spa_config_exit(spa, SCL_ALL, FTAG);
2959		spa->spa_l2cache.sav_sync = B_TRUE;
2960	}
2961
2962	/*
2963	 * Check for any removed devices.
2964	 */
2965	if (spa->spa_autoreplace) {
2966		spa_aux_check_removed(&spa->spa_spares);
2967		spa_aux_check_removed(&spa->spa_l2cache);
2968	}
2969
2970	if (spa_writeable(spa)) {
2971		/*
2972		 * Update the config cache to include the newly-imported pool.
2973		 */
2974		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
2975	}
2976
2977	/*
2978	 * It's possible that the pool was expanded while it was exported.
2979	 * We kick off an async task to handle this for us.
2980	 */
2981	spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
2982
2983	mutex_exit(&spa_namespace_lock);
2984	spa_history_log_version(spa, LOG_POOL_IMPORT);
2985
2986	return (0);
2987}
2988
2989
2990/*
2991 * This (illegal) pool name is used when temporarily importing a spa_t in order
2992 * to get the vdev stats associated with the imported devices.
2993 */
2994#define	TRYIMPORT_NAME	"$import"
2995
2996nvlist_t *
2997spa_tryimport(nvlist_t *tryconfig)
2998{
2999	nvlist_t *config = NULL;
3000	char *poolname;
3001	spa_t *spa;
3002	uint64_t state;
3003	int error;
3004
3005	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
3006		return (NULL);
3007
3008	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
3009		return (NULL);
3010
3011	/*
3012	 * Create and initialize the spa structure.
3013	 */
3014	mutex_enter(&spa_namespace_lock);
3015	spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
3016	spa_activate(spa, FREAD);
3017
3018	/*
3019	 * Pass off the heavy lifting to spa_load().
3020	 * Pass TRUE for mosconfig because the user-supplied config
3021	 * is actually the one to trust when doing an import.
3022	 */
3023	error = spa_load(spa, SPA_LOAD_TRYIMPORT, B_TRUE);
3024
3025	/*
3026	 * If 'tryconfig' was at least parsable, return the current config.
3027	 */
3028	if (spa->spa_root_vdev != NULL) {
3029		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
3030		VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
3031		    poolname) == 0);
3032		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3033		    state) == 0);
3034		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
3035		    spa->spa_uberblock.ub_timestamp) == 0);
3036
3037		/*
3038		 * If the bootfs property exists on this pool then we
3039		 * copy it out so that external consumers can tell which
3040		 * pools are bootable.
3041		 */
3042		if ((!error || error == EEXIST) && spa->spa_bootfs) {
3043			char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3044
3045			/*
3046			 * We have to play games with the name since the
3047			 * pool was opened as TRYIMPORT_NAME.
3048			 */
3049			if (dsl_dsobj_to_dsname(spa_name(spa),
3050			    spa->spa_bootfs, tmpname) == 0) {
3051				char *cp;
3052				char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3053
3054				cp = strchr(tmpname, '/');
3055				if (cp == NULL) {
3056					(void) strlcpy(dsname, tmpname,
3057					    MAXPATHLEN);
3058				} else {
3059					(void) snprintf(dsname, MAXPATHLEN,
3060					    "%s/%s", poolname, ++cp);
3061				}
3062				VERIFY(nvlist_add_string(config,
3063				    ZPOOL_CONFIG_BOOTFS, dsname) == 0);
3064				kmem_free(dsname, MAXPATHLEN);
3065			}
3066			kmem_free(tmpname, MAXPATHLEN);
3067		}
3068
3069		/*
3070		 * Add the list of hot spares and level 2 cache devices.
3071		 */
3072		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3073		spa_add_spares(spa, config);
3074		spa_add_l2cache(spa, config);
3075		spa_config_exit(spa, SCL_CONFIG, FTAG);
3076	}
3077
3078	spa_unload(spa);
3079	spa_deactivate(spa);
3080	spa_remove(spa);
3081	mutex_exit(&spa_namespace_lock);
3082
3083	return (config);
3084}
3085
3086/*
3087 * Pool export/destroy
3088 *
3089 * The act of destroying or exporting a pool is very simple.  We make sure there
3090 * is no more pending I/O and any references to the pool are gone.  Then, we
3091 * update the pool state and sync all the labels to disk, removing the
3092 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
3093 * we don't sync the labels or remove the configuration cache.
3094 */
3095static int
3096spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
3097    boolean_t force, boolean_t hardforce)
3098{
3099	spa_t *spa;
3100
3101	if (oldconfig)
3102		*oldconfig = NULL;
3103
3104	if (!(spa_mode_global & FWRITE))
3105		return (EROFS);
3106
3107	mutex_enter(&spa_namespace_lock);
3108	if ((spa = spa_lookup(pool)) == NULL) {
3109		mutex_exit(&spa_namespace_lock);
3110		return (ENOENT);
3111	}
3112
3113	/*
3114	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
3115	 * reacquire the namespace lock, and see if we can export.
3116	 */
3117	spa_open_ref(spa, FTAG);
3118	mutex_exit(&spa_namespace_lock);
3119	spa_async_suspend(spa);
3120	mutex_enter(&spa_namespace_lock);
3121	spa_close(spa, FTAG);
3122
3123	/*
3124	 * The pool will be in core if it's openable,
3125	 * in which case we can modify its state.
3126	 */
3127	if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
3128		/*
3129		 * Objsets may be open only because they're dirty, so we
3130		 * have to force it to sync before checking spa_refcnt.
3131		 */
3132		txg_wait_synced(spa->spa_dsl_pool, 0);
3133
3134		/*
3135		 * A pool cannot be exported or destroyed if there are active
3136		 * references.  If we are resetting a pool, allow references by
3137		 * fault injection handlers.
3138		 */
3139		if (!spa_refcount_zero(spa) ||
3140		    (spa->spa_inject_ref != 0 &&
3141		    new_state != POOL_STATE_UNINITIALIZED)) {
3142			spa_async_resume(spa);
3143			mutex_exit(&spa_namespace_lock);
3144			return (EBUSY);
3145		}
3146
3147		/*
3148		 * A pool cannot be exported if it has an active shared spare.
3149		 * This is to prevent other pools stealing the active spare
3150		 * from an exported pool. At user's own will, such pool can
3151		 * be forcedly exported.
3152		 */
3153		if (!force && new_state == POOL_STATE_EXPORTED &&
3154		    spa_has_active_shared_spare(spa)) {
3155			spa_async_resume(spa);
3156			mutex_exit(&spa_namespace_lock);
3157			return (EXDEV);
3158		}
3159
3160		/*
3161		 * We want this to be reflected on every label,
3162		 * so mark them all dirty.  spa_unload() will do the
3163		 * final sync that pushes these changes out.
3164		 */
3165		if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
3166			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3167			spa->spa_state = new_state;
3168			spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
3169			vdev_config_dirty(spa->spa_root_vdev);
3170			spa_config_exit(spa, SCL_ALL, FTAG);
3171		}
3172	}
3173
3174	spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
3175
3176	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
3177		spa_unload(spa);
3178		spa_deactivate(spa);
3179	}
3180
3181	if (oldconfig && spa->spa_config)
3182		VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
3183
3184	if (new_state != POOL_STATE_UNINITIALIZED) {
3185		if (!hardforce)
3186			spa_config_sync(spa, B_TRUE, B_TRUE);
3187		spa_remove(spa);
3188	}
3189	mutex_exit(&spa_namespace_lock);
3190
3191	return (0);
3192}
3193
3194/*
3195 * Destroy a storage pool.
3196 */
3197int
3198spa_destroy(char *pool)
3199{
3200	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
3201	    B_FALSE, B_FALSE));
3202}
3203
3204/*
3205 * Export a storage pool.
3206 */
3207int
3208spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
3209    boolean_t hardforce)
3210{
3211	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
3212	    force, hardforce));
3213}
3214
3215/*
3216 * Similar to spa_export(), this unloads the spa_t without actually removing it
3217 * from the namespace in any way.
3218 */
3219int
3220spa_reset(char *pool)
3221{
3222	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
3223	    B_FALSE, B_FALSE));
3224}
3225
3226/*
3227 * ==========================================================================
3228 * Device manipulation
3229 * ==========================================================================
3230 */
3231
3232/*
3233 * Add a device to a storage pool.
3234 */
3235int
3236spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
3237{
3238	uint64_t txg, id;
3239	int error;
3240	vdev_t *rvd = spa->spa_root_vdev;
3241	vdev_t *vd, *tvd;
3242	nvlist_t **spares, **l2cache;
3243	uint_t nspares, nl2cache;
3244
3245	txg = spa_vdev_enter(spa);
3246
3247	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
3248	    VDEV_ALLOC_ADD)) != 0)
3249		return (spa_vdev_exit(spa, NULL, txg, error));
3250
3251	spa->spa_pending_vdev = vd;	/* spa_vdev_exit() will clear this */
3252
3253	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
3254	    &nspares) != 0)
3255		nspares = 0;
3256
3257	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
3258	    &nl2cache) != 0)
3259		nl2cache = 0;
3260
3261	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
3262		return (spa_vdev_exit(spa, vd, txg, EINVAL));
3263
3264	if (vd->vdev_children != 0 &&
3265	    (error = vdev_create(vd, txg, B_FALSE)) != 0)
3266		return (spa_vdev_exit(spa, vd, txg, error));
3267
3268	/*
3269	 * We must validate the spares and l2cache devices after checking the
3270	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
3271	 */
3272	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
3273		return (spa_vdev_exit(spa, vd, txg, error));
3274
3275	/*
3276	 * Transfer each new top-level vdev from vd to rvd.
3277	 */
3278	for (int c = 0; c < vd->vdev_children; c++) {
3279
3280		/*
3281		 * Set the vdev id to the first hole, if one exists.
3282		 */
3283		for (id = 0; id < rvd->vdev_children; id++) {
3284			if (rvd->vdev_child[id]->vdev_ishole) {
3285				vdev_free(rvd->vdev_child[id]);
3286				break;
3287			}
3288		}
3289		tvd = vd->vdev_child[c];
3290		vdev_remove_child(vd, tvd);
3291		tvd->vdev_id = id;
3292		vdev_add_child(rvd, tvd);
3293		vdev_config_dirty(tvd);
3294	}
3295
3296	if (nspares != 0) {
3297		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
3298		    ZPOOL_CONFIG_SPARES);
3299		spa_load_spares(spa);
3300		spa->spa_spares.sav_sync = B_TRUE;
3301	}
3302
3303	if (nl2cache != 0) {
3304		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
3305		    ZPOOL_CONFIG_L2CACHE);
3306		spa_load_l2cache(spa);
3307		spa->spa_l2cache.sav_sync = B_TRUE;
3308	}
3309
3310	/*
3311	 * We have to be careful when adding new vdevs to an existing pool.
3312	 * If other threads start allocating from these vdevs before we
3313	 * sync the config cache, and we lose power, then upon reboot we may
3314	 * fail to open the pool because there are DVAs that the config cache
3315	 * can't translate.  Therefore, we first add the vdevs without
3316	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
3317	 * and then let spa_config_update() initialize the new metaslabs.
3318	 *
3319	 * spa_load() checks for added-but-not-initialized vdevs, so that
3320	 * if we lose power at any point in this sequence, the remaining
3321	 * steps will be completed the next time we load the pool.
3322	 */
3323	(void) spa_vdev_exit(spa, vd, txg, 0);
3324
3325	mutex_enter(&spa_namespace_lock);
3326	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
3327	mutex_exit(&spa_namespace_lock);
3328
3329	return (0);
3330}
3331
3332/*
3333 * Attach a device to a mirror.  The arguments are the path to any device
3334 * in the mirror, and the nvroot for the new device.  If the path specifies
3335 * a device that is not mirrored, we automatically insert the mirror vdev.
3336 *
3337 * If 'replacing' is specified, the new device is intended to replace the
3338 * existing device; in this case the two devices are made into their own
3339 * mirror using the 'replacing' vdev, which is functionally identical to
3340 * the mirror vdev (it actually reuses all the same ops) but has a few
3341 * extra rules: you can't attach to it after it's been created, and upon
3342 * completion of resilvering, the first disk (the one being replaced)
3343 * is automatically detached.
3344 */
3345int
3346spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
3347{
3348	uint64_t txg, open_txg;
3349	vdev_t *rvd = spa->spa_root_vdev;
3350	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
3351	vdev_ops_t *pvops;
3352	char *oldvdpath, *newvdpath;
3353	int newvd_isspare;
3354	int error;
3355
3356	txg = spa_vdev_enter(spa);
3357
3358	oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
3359
3360	if (oldvd == NULL)
3361		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
3362
3363	if (!oldvd->vdev_ops->vdev_op_leaf)
3364		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3365
3366	pvd = oldvd->vdev_parent;
3367
3368	if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
3369	    VDEV_ALLOC_ADD)) != 0)
3370		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
3371
3372	if (newrootvd->vdev_children != 1)
3373		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
3374
3375	newvd = newrootvd->vdev_child[0];
3376
3377	if (!newvd->vdev_ops->vdev_op_leaf)
3378		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
3379
3380	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
3381		return (spa_vdev_exit(spa, newrootvd, txg, error));
3382
3383	/*
3384	 * Spares can't replace logs
3385	 */
3386	if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
3387		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3388
3389	if (!replacing) {
3390		/*
3391		 * For attach, the only allowable parent is a mirror or the root
3392		 * vdev.
3393		 */
3394		if (pvd->vdev_ops != &vdev_mirror_ops &&
3395		    pvd->vdev_ops != &vdev_root_ops)
3396			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3397
3398		pvops = &vdev_mirror_ops;
3399	} else {
3400		/*
3401		 * Active hot spares can only be replaced by inactive hot
3402		 * spares.
3403		 */
3404		if (pvd->vdev_ops == &vdev_spare_ops &&
3405		    pvd->vdev_child[1] == oldvd &&
3406		    !spa_has_spare(spa, newvd->vdev_guid))
3407			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3408
3409		/*
3410		 * If the source is a hot spare, and the parent isn't already a
3411		 * spare, then we want to create a new hot spare.  Otherwise, we
3412		 * want to create a replacing vdev.  The user is not allowed to
3413		 * attach to a spared vdev child unless the 'isspare' state is
3414		 * the same (spare replaces spare, non-spare replaces
3415		 * non-spare).
3416		 */
3417		if (pvd->vdev_ops == &vdev_replacing_ops)
3418			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3419		else if (pvd->vdev_ops == &vdev_spare_ops &&
3420		    newvd->vdev_isspare != oldvd->vdev_isspare)
3421			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3422		else if (pvd->vdev_ops != &vdev_spare_ops &&
3423		    newvd->vdev_isspare)
3424			pvops = &vdev_spare_ops;
3425		else
3426			pvops = &vdev_replacing_ops;
3427	}
3428
3429	/*
3430	 * Make sure the new device is big enough.
3431	 */
3432	if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
3433		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
3434
3435	/*
3436	 * The new device cannot have a higher alignment requirement
3437	 * than the top-level vdev.
3438	 */
3439	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
3440		return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
3441
3442	/*
3443	 * If this is an in-place replacement, update oldvd's path and devid
3444	 * to make it distinguishable from newvd, and unopenable from now on.
3445	 */
3446	if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
3447		spa_strfree(oldvd->vdev_path);
3448		oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
3449		    KM_SLEEP);
3450		(void) sprintf(oldvd->vdev_path, "%s/%s",
3451		    newvd->vdev_path, "old");
3452		if (oldvd->vdev_devid != NULL) {
3453			spa_strfree(oldvd->vdev_devid);
3454			oldvd->vdev_devid = NULL;
3455		}
3456	}
3457
3458	/*
3459	 * If the parent is not a mirror, or if we're replacing, insert the new
3460	 * mirror/replacing/spare vdev above oldvd.
3461	 */
3462	if (pvd->vdev_ops != pvops)
3463		pvd = vdev_add_parent(oldvd, pvops);
3464
3465	ASSERT(pvd->vdev_top->vdev_parent == rvd);
3466	ASSERT(pvd->vdev_ops == pvops);
3467	ASSERT(oldvd->vdev_parent == pvd);
3468
3469	/*
3470	 * Extract the new device from its root and add it to pvd.
3471	 */
3472	vdev_remove_child(newrootvd, newvd);
3473	newvd->vdev_id = pvd->vdev_children;
3474	newvd->vdev_crtxg = oldvd->vdev_crtxg;
3475	vdev_add_child(pvd, newvd);
3476
3477	tvd = newvd->vdev_top;
3478	ASSERT(pvd->vdev_top == tvd);
3479	ASSERT(tvd->vdev_parent == rvd);
3480
3481	vdev_config_dirty(tvd);
3482
3483	/*
3484	 * Set newvd's DTL to [TXG_INITIAL, open_txg].  It will propagate
3485	 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
3486	 */
3487	open_txg = txg + TXG_CONCURRENT_STATES - 1;
3488
3489	vdev_dtl_dirty(newvd, DTL_MISSING,
3490	    TXG_INITIAL, open_txg - TXG_INITIAL + 1);
3491
3492	if (newvd->vdev_isspare) {
3493		spa_spare_activate(newvd);
3494		spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
3495	}
3496
3497	oldvdpath = spa_strdup(oldvd->vdev_path);
3498	newvdpath = spa_strdup(newvd->vdev_path);
3499	newvd_isspare = newvd->vdev_isspare;
3500
3501	/*
3502	 * Mark newvd's DTL dirty in this txg.
3503	 */
3504	vdev_dirty(tvd, VDD_DTL, newvd, txg);
3505
3506	(void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
3507
3508	spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, NULL,
3509	    CRED(),  "%s vdev=%s %s vdev=%s",
3510	    replacing && newvd_isspare ? "spare in" :
3511	    replacing ? "replace" : "attach", newvdpath,
3512	    replacing ? "for" : "to", oldvdpath);
3513
3514	spa_strfree(oldvdpath);
3515	spa_strfree(newvdpath);
3516
3517	/*
3518	 * Kick off a resilver to update newvd.
3519	 */
3520	VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0);
3521
3522	return (0);
3523}
3524
3525/*
3526 * Detach a device from a mirror or replacing vdev.
3527 * If 'replace_done' is specified, only detach if the parent
3528 * is a replacing vdev.
3529 */
3530int
3531spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
3532{
3533	uint64_t txg;
3534	int error;
3535	vdev_t *rvd = spa->spa_root_vdev;
3536	vdev_t *vd, *pvd, *cvd, *tvd;
3537	boolean_t unspare = B_FALSE;
3538	uint64_t unspare_guid;
3539	size_t len;
3540
3541	txg = spa_vdev_enter(spa);
3542
3543	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3544
3545	if (vd == NULL)
3546		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
3547
3548	if (!vd->vdev_ops->vdev_op_leaf)
3549		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3550
3551	pvd = vd->vdev_parent;
3552
3553	/*
3554	 * If the parent/child relationship is not as expected, don't do it.
3555	 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
3556	 * vdev that's replacing B with C.  The user's intent in replacing
3557	 * is to go from M(A,B) to M(A,C).  If the user decides to cancel
3558	 * the replace by detaching C, the expected behavior is to end up
3559	 * M(A,B).  But suppose that right after deciding to detach C,
3560	 * the replacement of B completes.  We would have M(A,C), and then
3561	 * ask to detach C, which would leave us with just A -- not what
3562	 * the user wanted.  To prevent this, we make sure that the
3563	 * parent/child relationship hasn't changed -- in this example,
3564	 * that C's parent is still the replacing vdev R.
3565	 */
3566	if (pvd->vdev_guid != pguid && pguid != 0)
3567		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3568
3569	/*
3570	 * If replace_done is specified, only remove this device if it's
3571	 * the first child of a replacing vdev.  For the 'spare' vdev, either
3572	 * disk can be removed.
3573	 */
3574	if (replace_done) {
3575		if (pvd->vdev_ops == &vdev_replacing_ops) {
3576			if (vd->vdev_id != 0)
3577				return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3578		} else if (pvd->vdev_ops != &vdev_spare_ops) {
3579			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3580		}
3581	}
3582
3583	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
3584	    spa_version(spa) >= SPA_VERSION_SPARES);
3585
3586	/*
3587	 * Only mirror, replacing, and spare vdevs support detach.
3588	 */
3589	if (pvd->vdev_ops != &vdev_replacing_ops &&
3590	    pvd->vdev_ops != &vdev_mirror_ops &&
3591	    pvd->vdev_ops != &vdev_spare_ops)
3592		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3593
3594	/*
3595	 * If this device has the only valid copy of some data,
3596	 * we cannot safely detach it.
3597	 */
3598	if (vdev_dtl_required(vd))
3599		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3600
3601	ASSERT(pvd->vdev_children >= 2);
3602
3603	/*
3604	 * If we are detaching the second disk from a replacing vdev, then
3605	 * check to see if we changed the original vdev's path to have "/old"
3606	 * at the end in spa_vdev_attach().  If so, undo that change now.
3607	 */
3608	if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 &&
3609	    pvd->vdev_child[0]->vdev_path != NULL &&
3610	    pvd->vdev_child[1]->vdev_path != NULL) {
3611		ASSERT(pvd->vdev_child[1] == vd);
3612		cvd = pvd->vdev_child[0];
3613		len = strlen(vd->vdev_path);
3614		if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
3615		    strcmp(cvd->vdev_path + len, "/old") == 0) {
3616			spa_strfree(cvd->vdev_path);
3617			cvd->vdev_path = spa_strdup(vd->vdev_path);
3618		}
3619	}
3620
3621	/*
3622	 * If we are detaching the original disk from a spare, then it implies
3623	 * that the spare should become a real disk, and be removed from the
3624	 * active spare list for the pool.
3625	 */
3626	if (pvd->vdev_ops == &vdev_spare_ops &&
3627	    vd->vdev_id == 0 && pvd->vdev_child[1]->vdev_isspare)
3628		unspare = B_TRUE;
3629
3630	/*
3631	 * Erase the disk labels so the disk can be used for other things.
3632	 * This must be done after all other error cases are handled,
3633	 * but before we disembowel vd (so we can still do I/O to it).
3634	 * But if we can't do it, don't treat the error as fatal --
3635	 * it may be that the unwritability of the disk is the reason
3636	 * it's being detached!
3637	 */
3638	error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
3639
3640	/*
3641	 * Remove vd from its parent and compact the parent's children.
3642	 */
3643	vdev_remove_child(pvd, vd);
3644	vdev_compact_children(pvd);
3645
3646	/*
3647	 * Remember one of the remaining children so we can get tvd below.
3648	 */
3649	cvd = pvd->vdev_child[0];
3650
3651	/*
3652	 * If we need to remove the remaining child from the list of hot spares,
3653	 * do it now, marking the vdev as no longer a spare in the process.
3654	 * We must do this before vdev_remove_parent(), because that can
3655	 * change the GUID if it creates a new toplevel GUID.  For a similar
3656	 * reason, we must remove the spare now, in the same txg as the detach;
3657	 * otherwise someone could attach a new sibling, change the GUID, and
3658	 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
3659	 */
3660	if (unspare) {
3661		ASSERT(cvd->vdev_isspare);
3662		spa_spare_remove(cvd);
3663		unspare_guid = cvd->vdev_guid;
3664		(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
3665	}
3666
3667	/*
3668	 * If the parent mirror/replacing vdev only has one child,
3669	 * the parent is no longer needed.  Remove it from the tree.
3670	 */
3671	if (pvd->vdev_children == 1)
3672		vdev_remove_parent(cvd);
3673
3674	/*
3675	 * We don't set tvd until now because the parent we just removed
3676	 * may have been the previous top-level vdev.
3677	 */
3678	tvd = cvd->vdev_top;
3679	ASSERT(tvd->vdev_parent == rvd);
3680
3681	/*
3682	 * Reevaluate the parent vdev state.
3683	 */
3684	vdev_propagate_state(cvd);
3685
3686	/*
3687	 * If the 'autoexpand' property is set on the pool then automatically
3688	 * try to expand the size of the pool. For example if the device we
3689	 * just detached was smaller than the others, it may be possible to
3690	 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
3691	 * first so that we can obtain the updated sizes of the leaf vdevs.
3692	 */
3693	if (spa->spa_autoexpand) {
3694		vdev_reopen(tvd);
3695		vdev_expand(tvd, txg);
3696	}
3697
3698	vdev_config_dirty(tvd);
3699
3700	/*
3701	 * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
3702	 * vd->vdev_detached is set and free vd's DTL object in syncing context.
3703	 * But first make sure we're not on any *other* txg's DTL list, to
3704	 * prevent vd from being accessed after it's freed.
3705	 */
3706	for (int t = 0; t < TXG_SIZE; t++)
3707		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
3708	vd->vdev_detached = B_TRUE;
3709	vdev_dirty(tvd, VDD_DTL, vd, txg);
3710
3711	spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
3712
3713	error = spa_vdev_exit(spa, vd, txg, 0);
3714
3715	/*
3716	 * If this was the removal of the original device in a hot spare vdev,
3717	 * then we want to go through and remove the device from the hot spare
3718	 * list of every other pool.
3719	 */
3720	if (unspare) {
3721		spa_t *myspa = spa;
3722		spa = NULL;
3723		mutex_enter(&spa_namespace_lock);
3724		while ((spa = spa_next(spa)) != NULL) {
3725			if (spa->spa_state != POOL_STATE_ACTIVE)
3726				continue;
3727			if (spa == myspa)
3728				continue;
3729			spa_open_ref(spa, FTAG);
3730			mutex_exit(&spa_namespace_lock);
3731			(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
3732			mutex_enter(&spa_namespace_lock);
3733			spa_close(spa, FTAG);
3734		}
3735		mutex_exit(&spa_namespace_lock);
3736	}
3737
3738	return (error);
3739}
3740
3741static nvlist_t *
3742spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
3743{
3744	for (int i = 0; i < count; i++) {
3745		uint64_t guid;
3746
3747		VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
3748		    &guid) == 0);
3749
3750		if (guid == target_guid)
3751			return (nvpp[i]);
3752	}
3753
3754	return (NULL);
3755}
3756
3757static void
3758spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
3759	nvlist_t *dev_to_remove)
3760{
3761	nvlist_t **newdev = NULL;
3762
3763	if (count > 1)
3764		newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
3765
3766	for (int i = 0, j = 0; i < count; i++) {
3767		if (dev[i] == dev_to_remove)
3768			continue;
3769		VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
3770	}
3771
3772	VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
3773	VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
3774
3775	for (int i = 0; i < count - 1; i++)
3776		nvlist_free(newdev[i]);
3777
3778	if (count > 1)
3779		kmem_free(newdev, (count - 1) * sizeof (void *));
3780}
3781
3782/*
3783 * Removing a device from the vdev namespace requires several steps
3784 * and can take a significant amount of time.  As a result we use
3785 * the spa_vdev_config_[enter/exit] functions which allow us to
3786 * grab and release the spa_config_lock while still holding the namespace
3787 * lock.  During each step the configuration is synced out.
3788 */
3789
3790/*
3791 * Evacuate the device.
3792 */
3793int
3794spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
3795{
3796	int error = 0;
3797	uint64_t txg;
3798
3799	ASSERT(MUTEX_HELD(&spa_namespace_lock));
3800	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
3801	ASSERT(vd == vd->vdev_top);
3802
3803	/*
3804	 * Evacuate the device.  We don't hold the config lock as writer
3805	 * since we need to do I/O but we do keep the
3806	 * spa_namespace_lock held.  Once this completes the device
3807	 * should no longer have any blocks allocated on it.
3808	 */
3809	if (vd->vdev_islog) {
3810		error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
3811		    NULL, DS_FIND_CHILDREN);
3812	} else {
3813		error = ENOTSUP;	/* until we have bp rewrite */
3814	}
3815
3816	txg_wait_synced(spa_get_dsl(spa), 0);
3817
3818	if (error)
3819		return (error);
3820
3821	/*
3822	 * The evacuation succeeded.  Remove any remaining MOS metadata
3823	 * associated with this vdev, and wait for these changes to sync.
3824	 */
3825	txg = spa_vdev_config_enter(spa);
3826	vd->vdev_removing = B_TRUE;
3827	vdev_dirty(vd, 0, NULL, txg);
3828	vdev_config_dirty(vd);
3829	spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
3830
3831	return (0);
3832}
3833
3834/*
3835 * Complete the removal by cleaning up the namespace.
3836 */
3837void
3838spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
3839{
3840	vdev_t *rvd = spa->spa_root_vdev;
3841	uint64_t id = vd->vdev_id;
3842	boolean_t last_vdev = (id == (rvd->vdev_children - 1));
3843
3844	ASSERT(MUTEX_HELD(&spa_namespace_lock));
3845	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3846	ASSERT(vd == vd->vdev_top);
3847
3848	(void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
3849
3850	if (list_link_active(&vd->vdev_state_dirty_node))
3851		vdev_state_clean(vd);
3852	if (list_link_active(&vd->vdev_config_dirty_node))
3853		vdev_config_clean(vd);
3854
3855	vdev_free(vd);
3856
3857	if (last_vdev) {
3858		vdev_compact_children(rvd);
3859	} else {
3860		vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
3861		vdev_add_child(rvd, vd);
3862	}
3863	vdev_config_dirty(rvd);
3864
3865	/*
3866	 * Reassess the health of our root vdev.
3867	 */
3868	vdev_reopen(rvd);
3869}
3870
3871/*
3872 * Remove a device from the pool.  Currently, this supports removing only hot
3873 * spares, slogs, and level 2 ARC devices.
3874 */
3875int
3876spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
3877{
3878	vdev_t *vd;
3879	metaslab_group_t *mg;
3880	nvlist_t **spares, **l2cache, *nv;
3881	uint64_t txg = 0;
3882	uint_t nspares, nl2cache;
3883	int error = 0;
3884	boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
3885
3886	if (!locked)
3887		txg = spa_vdev_enter(spa);
3888
3889	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3890
3891	if (spa->spa_spares.sav_vdevs != NULL &&
3892	    nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3893	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
3894	    (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
3895		/*
3896		 * Only remove the hot spare if it's not currently in use
3897		 * in this pool.
3898		 */
3899		if (vd == NULL || unspare) {
3900			spa_vdev_remove_aux(spa->spa_spares.sav_config,
3901			    ZPOOL_CONFIG_SPARES, spares, nspares, nv);
3902			spa_load_spares(spa);
3903			spa->spa_spares.sav_sync = B_TRUE;
3904		} else {
3905			error = EBUSY;
3906		}
3907	} else if (spa->spa_l2cache.sav_vdevs != NULL &&
3908	    nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3909	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
3910	    (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
3911		/*
3912		 * Cache devices can always be removed.
3913		 */
3914		spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
3915		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
3916		spa_load_l2cache(spa);
3917		spa->spa_l2cache.sav_sync = B_TRUE;
3918	} else if (vd != NULL && vd->vdev_islog) {
3919		ASSERT(!locked);
3920		ASSERT(vd == vd->vdev_top);
3921
3922		/*
3923		 * XXX - Once we have bp-rewrite this should
3924		 * become the common case.
3925		 */
3926
3927		mg = vd->vdev_mg;
3928
3929		/*
3930		 * Stop allocating from this vdev.
3931		 */
3932		metaslab_group_passivate(mg);
3933
3934		/*
3935		 * Wait for the youngest allocations and frees to sync,
3936		 * and then wait for the deferral of those frees to finish.
3937		 */
3938		spa_vdev_config_exit(spa, NULL,
3939		    txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
3940
3941		/*
3942		 * Attempt to evacuate the vdev.
3943		 */
3944		error = spa_vdev_remove_evacuate(spa, vd);
3945
3946		txg = spa_vdev_config_enter(spa);
3947
3948		/*
3949		 * If we couldn't evacuate the vdev, unwind.
3950		 */
3951		if (error) {
3952			metaslab_group_activate(mg);
3953			return (spa_vdev_exit(spa, NULL, txg, error));
3954		}
3955
3956		/*
3957		 * Clean up the vdev namespace.
3958		 */
3959		spa_vdev_remove_from_namespace(spa, vd);
3960
3961	} else if (vd != NULL) {
3962		/*
3963		 * Normal vdevs cannot be removed (yet).
3964		 */
3965		error = ENOTSUP;
3966	} else {
3967		/*
3968		 * There is no vdev of any kind with the specified guid.
3969		 */
3970		error = ENOENT;
3971	}
3972
3973	if (!locked)
3974		return (spa_vdev_exit(spa, NULL, txg, error));
3975
3976	return (error);
3977}
3978
3979/*
3980 * Find any device that's done replacing, or a vdev marked 'unspare' that's
3981 * current spared, so we can detach it.
3982 */
3983static vdev_t *
3984spa_vdev_resilver_done_hunt(vdev_t *vd)
3985{
3986	vdev_t *newvd, *oldvd;
3987
3988	for (int c = 0; c < vd->vdev_children; c++) {
3989		oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
3990		if (oldvd != NULL)
3991			return (oldvd);
3992	}
3993
3994	/*
3995	 * Check for a completed replacement.
3996	 */
3997	if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
3998		oldvd = vd->vdev_child[0];
3999		newvd = vd->vdev_child[1];
4000
4001		if (vdev_dtl_empty(newvd, DTL_MISSING) &&
4002		    !vdev_dtl_required(oldvd))
4003			return (oldvd);
4004	}
4005
4006	/*
4007	 * Check for a completed resilver with the 'unspare' flag set.
4008	 */
4009	if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) {
4010		newvd = vd->vdev_child[0];
4011		oldvd = vd->vdev_child[1];
4012
4013		if (newvd->vdev_unspare &&
4014		    vdev_dtl_empty(newvd, DTL_MISSING) &&
4015		    !vdev_dtl_required(oldvd)) {
4016			newvd->vdev_unspare = 0;
4017			return (oldvd);
4018		}
4019	}
4020
4021	return (NULL);
4022}
4023
4024static void
4025spa_vdev_resilver_done(spa_t *spa)
4026{
4027	vdev_t *vd, *pvd, *ppvd;
4028	uint64_t guid, sguid, pguid, ppguid;
4029
4030	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4031
4032	while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
4033		pvd = vd->vdev_parent;
4034		ppvd = pvd->vdev_parent;
4035		guid = vd->vdev_guid;
4036		pguid = pvd->vdev_guid;
4037		ppguid = ppvd->vdev_guid;
4038		sguid = 0;
4039		/*
4040		 * If we have just finished replacing a hot spared device, then
4041		 * we need to detach the parent's first child (the original hot
4042		 * spare) as well.
4043		 */
4044		if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0) {
4045			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
4046			ASSERT(ppvd->vdev_children == 2);
4047			sguid = ppvd->vdev_child[1]->vdev_guid;
4048		}
4049		spa_config_exit(spa, SCL_ALL, FTAG);
4050		if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
4051			return;
4052		if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
4053			return;
4054		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4055	}
4056
4057	spa_config_exit(spa, SCL_ALL, FTAG);
4058}
4059
4060/*
4061 * Update the stored path or FRU for this vdev.  Dirty the vdev configuration,
4062 * relying on spa_vdev_enter/exit() to synchronize the labels and cache.
4063 */
4064int
4065spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
4066    boolean_t ispath)
4067{
4068	vdev_t *vd;
4069	uint64_t txg;
4070
4071	txg = spa_vdev_enter(spa);
4072
4073	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4074		return (spa_vdev_exit(spa, NULL, txg, ENOENT));
4075
4076	if (!vd->vdev_ops->vdev_op_leaf)
4077		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4078
4079	if (ispath) {
4080		spa_strfree(vd->vdev_path);
4081		vd->vdev_path = spa_strdup(value);
4082	} else {
4083		if (vd->vdev_fru != NULL)
4084			spa_strfree(vd->vdev_fru);
4085		vd->vdev_fru = spa_strdup(value);
4086	}
4087
4088	vdev_config_dirty(vd->vdev_top);
4089
4090	return (spa_vdev_exit(spa, NULL, txg, 0));
4091}
4092
4093int
4094spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
4095{
4096	return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
4097}
4098
4099int
4100spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
4101{
4102	return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
4103}
4104
4105/*
4106 * ==========================================================================
4107 * SPA Scrubbing
4108 * ==========================================================================
4109 */
4110
4111int
4112spa_scrub(spa_t *spa, pool_scrub_type_t type)
4113{
4114	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
4115
4116	if ((uint_t)type >= POOL_SCRUB_TYPES)
4117		return (ENOTSUP);
4118
4119	/*
4120	 * If a resilver was requested, but there is no DTL on a
4121	 * writeable leaf device, we have nothing to do.
4122	 */
4123	if (type == POOL_SCRUB_RESILVER &&
4124	    !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
4125		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
4126		return (0);
4127	}
4128
4129	if (type == POOL_SCRUB_EVERYTHING &&
4130	    spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE &&
4131	    spa->spa_dsl_pool->dp_scrub_isresilver)
4132		return (EBUSY);
4133
4134	if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) {
4135		return (dsl_pool_scrub_clean(spa->spa_dsl_pool));
4136	} else if (type == POOL_SCRUB_NONE) {
4137		return (dsl_pool_scrub_cancel(spa->spa_dsl_pool));
4138	} else {
4139		return (EINVAL);
4140	}
4141}
4142
4143/*
4144 * ==========================================================================
4145 * SPA async task processing
4146 * ==========================================================================
4147 */
4148
4149static void
4150spa_async_remove(spa_t *spa, vdev_t *vd)
4151{
4152	if (vd->vdev_remove_wanted) {
4153		vd->vdev_remove_wanted = 0;
4154		vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
4155
4156		/*
4157		 * We want to clear the stats, but we don't want to do a full
4158		 * vdev_clear() as that will cause us to throw away
4159		 * degraded/faulted state as well as attempt to reopen the
4160		 * device, all of which is a waste.
4161		 */
4162		vd->vdev_stat.vs_read_errors = 0;
4163		vd->vdev_stat.vs_write_errors = 0;
4164		vd->vdev_stat.vs_checksum_errors = 0;
4165
4166		vdev_state_dirty(vd->vdev_top);
4167	}
4168
4169	for (int c = 0; c < vd->vdev_children; c++)
4170		spa_async_remove(spa, vd->vdev_child[c]);
4171}
4172
4173static void
4174spa_async_probe(spa_t *spa, vdev_t *vd)
4175{
4176	if (vd->vdev_probe_wanted) {
4177		vd->vdev_probe_wanted = 0;
4178		vdev_reopen(vd);	/* vdev_open() does the actual probe */
4179	}
4180
4181	for (int c = 0; c < vd->vdev_children; c++)
4182		spa_async_probe(spa, vd->vdev_child[c]);
4183}
4184
4185static void
4186spa_async_autoexpand(spa_t *spa, vdev_t *vd)
4187{
4188	sysevent_id_t eid;
4189	nvlist_t *attr;
4190	char *physpath;
4191
4192	if (!spa->spa_autoexpand)
4193		return;
4194
4195	for (int c = 0; c < vd->vdev_children; c++) {
4196		vdev_t *cvd = vd->vdev_child[c];
4197		spa_async_autoexpand(spa, cvd);
4198	}
4199
4200	if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
4201		return;
4202
4203	physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
4204	(void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath);
4205
4206	VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
4207	VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
4208
4209	(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
4210	    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
4211
4212	nvlist_free(attr);
4213	kmem_free(physpath, MAXPATHLEN);
4214}
4215
4216static void
4217spa_async_thread(spa_t *spa)
4218{
4219	int tasks;
4220
4221	ASSERT(spa->spa_sync_on);
4222
4223	mutex_enter(&spa->spa_async_lock);
4224	tasks = spa->spa_async_tasks;
4225	spa->spa_async_tasks = 0;
4226	mutex_exit(&spa->spa_async_lock);
4227
4228	/*
4229	 * See if the config needs to be updated.
4230	 */
4231	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
4232		uint64_t old_space, new_space;
4233
4234		mutex_enter(&spa_namespace_lock);
4235		old_space = metaslab_class_get_space(spa_normal_class(spa));
4236		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4237		new_space = metaslab_class_get_space(spa_normal_class(spa));
4238		mutex_exit(&spa_namespace_lock);
4239
4240		/*
4241		 * If the pool grew as a result of the config update,
4242		 * then log an internal history event.
4243		 */
4244		if (new_space != old_space) {
4245			spa_history_internal_log(LOG_POOL_VDEV_ONLINE,
4246			    spa, NULL, CRED(),
4247			    "pool '%s' size: %llu(+%llu)",
4248			    spa_name(spa), new_space, new_space - old_space);
4249		}
4250	}
4251
4252	/*
4253	 * See if any devices need to be marked REMOVED.
4254	 */
4255	if (tasks & SPA_ASYNC_REMOVE) {
4256		spa_vdev_state_enter(spa, SCL_NONE);
4257		spa_async_remove(spa, spa->spa_root_vdev);
4258		for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
4259			spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
4260		for (int i = 0; i < spa->spa_spares.sav_count; i++)
4261			spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
4262		(void) spa_vdev_state_exit(spa, NULL, 0);
4263	}
4264
4265	if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
4266		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4267		spa_async_autoexpand(spa, spa->spa_root_vdev);
4268		spa_config_exit(spa, SCL_CONFIG, FTAG);
4269	}
4270
4271	/*
4272	 * See if any devices need to be probed.
4273	 */
4274	if (tasks & SPA_ASYNC_PROBE) {
4275		spa_vdev_state_enter(spa, SCL_NONE);
4276		spa_async_probe(spa, spa->spa_root_vdev);
4277		(void) spa_vdev_state_exit(spa, NULL, 0);
4278	}
4279
4280	/*
4281	 * If any devices are done replacing, detach them.
4282	 */
4283	if (tasks & SPA_ASYNC_RESILVER_DONE)
4284		spa_vdev_resilver_done(spa);
4285
4286	/*
4287	 * Kick off a resilver.
4288	 */
4289	if (tasks & SPA_ASYNC_RESILVER)
4290		VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0);
4291
4292	/*
4293	 * Let the world know that we're done.
4294	 */
4295	mutex_enter(&spa->spa_async_lock);
4296	spa->spa_async_thread = NULL;
4297	cv_broadcast(&spa->spa_async_cv);
4298	mutex_exit(&spa->spa_async_lock);
4299	thread_exit();
4300}
4301
4302void
4303spa_async_suspend(spa_t *spa)
4304{
4305	mutex_enter(&spa->spa_async_lock);
4306	spa->spa_async_suspended++;
4307	while (spa->spa_async_thread != NULL)
4308		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
4309	mutex_exit(&spa->spa_async_lock);
4310}
4311
4312void
4313spa_async_resume(spa_t *spa)
4314{
4315	mutex_enter(&spa->spa_async_lock);
4316	ASSERT(spa->spa_async_suspended != 0);
4317	spa->spa_async_suspended--;
4318	mutex_exit(&spa->spa_async_lock);
4319}
4320
4321static void
4322spa_async_dispatch(spa_t *spa)
4323{
4324	mutex_enter(&spa->spa_async_lock);
4325	if (spa->spa_async_tasks && !spa->spa_async_suspended &&
4326	    spa->spa_async_thread == NULL &&
4327	    rootdir != NULL && !vn_is_readonly(rootdir))
4328		spa->spa_async_thread = thread_create(NULL, 0,
4329		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
4330	mutex_exit(&spa->spa_async_lock);
4331}
4332
4333void
4334spa_async_request(spa_t *spa, int task)
4335{
4336	mutex_enter(&spa->spa_async_lock);
4337	spa->spa_async_tasks |= task;
4338	mutex_exit(&spa->spa_async_lock);
4339}
4340
4341/*
4342 * ==========================================================================
4343 * SPA syncing routines
4344 * ==========================================================================
4345 */
4346static void
4347spa_sync_deferred_bplist(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx, uint64_t txg)
4348{
4349	blkptr_t blk;
4350	uint64_t itor = 0;
4351	uint8_t c = 1;
4352
4353	while (bplist_iterate(bpl, &itor, &blk) == 0) {
4354		ASSERT(blk.blk_birth < txg);
4355		zio_free(spa, txg, &blk);
4356	}
4357
4358	bplist_vacate(bpl, tx);
4359
4360	/*
4361	 * Pre-dirty the first block so we sync to convergence faster.
4362	 * (Usually only the first block is needed.)
4363	 */
4364	dmu_write(bpl->bpl_mos, spa->spa_deferred_bplist_obj, 0, 1, &c, tx);
4365}
4366
4367static void
4368spa_sync_free(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
4369{
4370	zio_t *zio = arg;
4371
4372	zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
4373	    zio->io_flags));
4374}
4375
4376static void
4377spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
4378{
4379	char *packed = NULL;
4380	size_t bufsize;
4381	size_t nvsize = 0;
4382	dmu_buf_t *db;
4383
4384	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
4385
4386	/*
4387	 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
4388	 * information.  This avoids the dbuf_will_dirty() path and
4389	 * saves us a pre-read to get data we don't actually care about.
4390	 */
4391	bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE);
4392	packed = kmem_alloc(bufsize, KM_SLEEP);
4393
4394	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
4395	    KM_SLEEP) == 0);
4396	bzero(packed + nvsize, bufsize - nvsize);
4397
4398	dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
4399
4400	kmem_free(packed, bufsize);
4401
4402	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
4403	dmu_buf_will_dirty(db, tx);
4404	*(uint64_t *)db->db_data = nvsize;
4405	dmu_buf_rele(db, FTAG);
4406}
4407
4408static void
4409spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
4410    const char *config, const char *entry)
4411{
4412	nvlist_t *nvroot;
4413	nvlist_t **list;
4414	int i;
4415
4416	if (!sav->sav_sync)
4417		return;
4418
4419	/*
4420	 * Update the MOS nvlist describing the list of available devices.
4421	 * spa_validate_aux() will have already made sure this nvlist is
4422	 * valid and the vdevs are labeled appropriately.
4423	 */
4424	if (sav->sav_object == 0) {
4425		sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
4426		    DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
4427		    sizeof (uint64_t), tx);
4428		VERIFY(zap_update(spa->spa_meta_objset,
4429		    DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
4430		    &sav->sav_object, tx) == 0);
4431	}
4432
4433	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
4434	if (sav->sav_count == 0) {
4435		VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
4436	} else {
4437		list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
4438		for (i = 0; i < sav->sav_count; i++)
4439			list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
4440			    B_FALSE, B_FALSE, B_TRUE);
4441		VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
4442		    sav->sav_count) == 0);
4443		for (i = 0; i < sav->sav_count; i++)
4444			nvlist_free(list[i]);
4445		kmem_free(list, sav->sav_count * sizeof (void *));
4446	}
4447
4448	spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
4449	nvlist_free(nvroot);
4450
4451	sav->sav_sync = B_FALSE;
4452}
4453
4454static void
4455spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
4456{
4457	nvlist_t *config;
4458
4459	if (list_is_empty(&spa->spa_config_dirty_list))
4460		return;
4461
4462	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4463
4464	config = spa_config_generate(spa, spa->spa_root_vdev,
4465	    dmu_tx_get_txg(tx), B_FALSE);
4466
4467	spa_config_exit(spa, SCL_STATE, FTAG);
4468
4469	if (spa->spa_config_syncing)
4470		nvlist_free(spa->spa_config_syncing);
4471	spa->spa_config_syncing = config;
4472
4473	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
4474}
4475
4476/*
4477 * Set zpool properties.
4478 */
4479static void
4480spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
4481{
4482	spa_t *spa = arg1;
4483	objset_t *mos = spa->spa_meta_objset;
4484	nvlist_t *nvp = arg2;
4485	nvpair_t *elem;
4486	uint64_t intval;
4487	char *strval;
4488	zpool_prop_t prop;
4489	const char *propname;
4490	zprop_type_t proptype;
4491
4492	mutex_enter(&spa->spa_props_lock);
4493
4494	elem = NULL;
4495	while ((elem = nvlist_next_nvpair(nvp, elem))) {
4496		switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
4497		case ZPOOL_PROP_VERSION:
4498			/*
4499			 * Only set version for non-zpool-creation cases
4500			 * (set/import). spa_create() needs special care
4501			 * for version setting.
4502			 */
4503			if (tx->tx_txg != TXG_INITIAL) {
4504				VERIFY(nvpair_value_uint64(elem,
4505				    &intval) == 0);
4506				ASSERT(intval <= SPA_VERSION);
4507				ASSERT(intval >= spa_version(spa));
4508				spa->spa_uberblock.ub_version = intval;
4509				vdev_config_dirty(spa->spa_root_vdev);
4510			}
4511			break;
4512
4513		case ZPOOL_PROP_ALTROOT:
4514			/*
4515			 * 'altroot' is a non-persistent property. It should
4516			 * have been set temporarily at creation or import time.
4517			 */
4518			ASSERT(spa->spa_root != NULL);
4519			break;
4520
4521		case ZPOOL_PROP_CACHEFILE:
4522			/*
4523			 * 'cachefile' is also a non-persisitent property.
4524			 */
4525			break;
4526		default:
4527			/*
4528			 * Set pool property values in the poolprops mos object.
4529			 */
4530			if (spa->spa_pool_props_object == 0) {
4531				VERIFY((spa->spa_pool_props_object =
4532				    zap_create(mos, DMU_OT_POOL_PROPS,
4533				    DMU_OT_NONE, 0, tx)) > 0);
4534
4535				VERIFY(zap_update(mos,
4536				    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
4537				    8, 1, &spa->spa_pool_props_object, tx)
4538				    == 0);
4539			}
4540
4541			/* normalize the property name */
4542			propname = zpool_prop_to_name(prop);
4543			proptype = zpool_prop_get_type(prop);
4544
4545			if (nvpair_type(elem) == DATA_TYPE_STRING) {
4546				ASSERT(proptype == PROP_TYPE_STRING);
4547				VERIFY(nvpair_value_string(elem, &strval) == 0);
4548				VERIFY(zap_update(mos,
4549				    spa->spa_pool_props_object, propname,
4550				    1, strlen(strval) + 1, strval, tx) == 0);
4551
4552			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
4553				VERIFY(nvpair_value_uint64(elem, &intval) == 0);
4554
4555				if (proptype == PROP_TYPE_INDEX) {
4556					const char *unused;
4557					VERIFY(zpool_prop_index_to_string(
4558					    prop, intval, &unused) == 0);
4559				}
4560				VERIFY(zap_update(mos,
4561				    spa->spa_pool_props_object, propname,
4562				    8, 1, &intval, tx) == 0);
4563			} else {
4564				ASSERT(0); /* not allowed */
4565			}
4566
4567			switch (prop) {
4568			case ZPOOL_PROP_DELEGATION:
4569				spa->spa_delegation = intval;
4570				break;
4571			case ZPOOL_PROP_BOOTFS:
4572				spa->spa_bootfs = intval;
4573				break;
4574			case ZPOOL_PROP_FAILUREMODE:
4575				spa->spa_failmode = intval;
4576				break;
4577			case ZPOOL_PROP_AUTOEXPAND:
4578				spa->spa_autoexpand = intval;
4579				spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
4580				break;
4581			case ZPOOL_PROP_DEDUPDITTO:
4582				spa->spa_dedup_ditto = intval;
4583				break;
4584			default:
4585				break;
4586			}
4587		}
4588
4589		/* log internal history if this is not a zpool create */
4590		if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY &&
4591		    tx->tx_txg != TXG_INITIAL) {
4592			spa_history_internal_log(LOG_POOL_PROPSET,
4593			    spa, tx, cr, "%s %lld %s",
4594			    nvpair_name(elem), intval, spa_name(spa));
4595		}
4596	}
4597
4598	mutex_exit(&spa->spa_props_lock);
4599}
4600
4601/*
4602 * Sync the specified transaction group.  New blocks may be dirtied as
4603 * part of the process, so we iterate until it converges.
4604 */
4605void
4606spa_sync(spa_t *spa, uint64_t txg)
4607{
4608	dsl_pool_t *dp = spa->spa_dsl_pool;
4609	objset_t *mos = spa->spa_meta_objset;
4610	bplist_t *defer_bpl = &spa->spa_deferred_bplist;
4611	bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
4612	vdev_t *rvd = spa->spa_root_vdev;
4613	vdev_t *vd;
4614	dmu_tx_t *tx;
4615	int error;
4616
4617	/*
4618	 * Lock out configuration changes.
4619	 */
4620	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4621
4622	spa->spa_syncing_txg = txg;
4623	spa->spa_sync_pass = 0;
4624
4625	/*
4626	 * If there are any pending vdev state changes, convert them
4627	 * into config changes that go out with this transaction group.
4628	 */
4629	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4630	while (list_head(&spa->spa_state_dirty_list) != NULL) {
4631		/*
4632		 * We need the write lock here because, for aux vdevs,
4633		 * calling vdev_config_dirty() modifies sav_config.
4634		 * This is ugly and will become unnecessary when we
4635		 * eliminate the aux vdev wart by integrating all vdevs
4636		 * into the root vdev tree.
4637		 */
4638		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
4639		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
4640		while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
4641			vdev_state_clean(vd);
4642			vdev_config_dirty(vd);
4643		}
4644		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
4645		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
4646	}
4647	spa_config_exit(spa, SCL_STATE, FTAG);
4648
4649	VERIFY(0 == bplist_open(defer_bpl, mos, spa->spa_deferred_bplist_obj));
4650
4651	tx = dmu_tx_create_assigned(dp, txg);
4652
4653	/*
4654	 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
4655	 * set spa_deflate if we have no raid-z vdevs.
4656	 */
4657	if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
4658	    spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
4659		int i;
4660
4661		for (i = 0; i < rvd->vdev_children; i++) {
4662			vd = rvd->vdev_child[i];
4663			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
4664				break;
4665		}
4666		if (i == rvd->vdev_children) {
4667			spa->spa_deflate = TRUE;
4668			VERIFY(0 == zap_add(spa->spa_meta_objset,
4669			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
4670			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
4671		}
4672	}
4673
4674	if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
4675	    spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
4676		dsl_pool_create_origin(dp, tx);
4677
4678		/* Keeping the origin open increases spa_minref */
4679		spa->spa_minref += 3;
4680	}
4681
4682	if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
4683	    spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
4684		dsl_pool_upgrade_clones(dp, tx);
4685	}
4686
4687	/*
4688	 * If anything has changed in this txg, push the deferred frees
4689	 * from the previous txg.  If not, leave them alone so that we
4690	 * don't generate work on an otherwise idle system.
4691	 */
4692	if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
4693	    !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
4694	    !txg_list_empty(&dp->dp_sync_tasks, txg))
4695		spa_sync_deferred_bplist(spa, defer_bpl, tx, txg);
4696
4697	/*
4698	 * Iterate to convergence.
4699	 */
4700	do {
4701		int pass = ++spa->spa_sync_pass;
4702
4703		spa_sync_config_object(spa, tx);
4704		spa_sync_aux_dev(spa, &spa->spa_spares, tx,
4705		    ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
4706		spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
4707		    ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
4708		spa_errlog_sync(spa, txg);
4709		dsl_pool_sync(dp, txg);
4710
4711		if (pass <= SYNC_PASS_DEFERRED_FREE) {
4712			zio_t *zio = zio_root(spa, NULL, NULL, 0);
4713			bplist_sync(free_bpl, spa_sync_free, zio, tx);
4714			VERIFY(zio_wait(zio) == 0);
4715		} else {
4716			bplist_sync(free_bpl, bplist_enqueue_cb, defer_bpl, tx);
4717		}
4718
4719		ddt_sync(spa, txg);
4720
4721		while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
4722			vdev_sync(vd, txg);
4723
4724	} while (dmu_objset_is_dirty(mos, txg));
4725
4726	ASSERT(free_bpl->bpl_queue == NULL);
4727
4728	bplist_close(defer_bpl);
4729
4730	/*
4731	 * Rewrite the vdev configuration (which includes the uberblock)
4732	 * to commit the transaction group.
4733	 *
4734	 * If there are no dirty vdevs, we sync the uberblock to a few
4735	 * random top-level vdevs that are known to be visible in the
4736	 * config cache (see spa_vdev_add() for a complete description).
4737	 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
4738	 */
4739	for (;;) {
4740		/*
4741		 * We hold SCL_STATE to prevent vdev open/close/etc.
4742		 * while we're attempting to write the vdev labels.
4743		 */
4744		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4745
4746		if (list_is_empty(&spa->spa_config_dirty_list)) {
4747			vdev_t *svd[SPA_DVAS_PER_BP];
4748			int svdcount = 0;
4749			int children = rvd->vdev_children;
4750			int c0 = spa_get_random(children);
4751
4752			for (int c = 0; c < children; c++) {
4753				vd = rvd->vdev_child[(c0 + c) % children];
4754				if (vd->vdev_ms_array == 0 || vd->vdev_islog)
4755					continue;
4756				svd[svdcount++] = vd;
4757				if (svdcount == SPA_DVAS_PER_BP)
4758					break;
4759			}
4760			error = vdev_config_sync(svd, svdcount, txg, B_FALSE);
4761			if (error != 0)
4762				error = vdev_config_sync(svd, svdcount, txg,
4763				    B_TRUE);
4764		} else {
4765			error = vdev_config_sync(rvd->vdev_child,
4766			    rvd->vdev_children, txg, B_FALSE);
4767			if (error != 0)
4768				error = vdev_config_sync(rvd->vdev_child,
4769				    rvd->vdev_children, txg, B_TRUE);
4770		}
4771
4772		spa_config_exit(spa, SCL_STATE, FTAG);
4773
4774		if (error == 0)
4775			break;
4776		zio_suspend(spa, NULL);
4777		zio_resume_wait(spa);
4778	}
4779	dmu_tx_commit(tx);
4780
4781	/*
4782	 * Clear the dirty config list.
4783	 */
4784	while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
4785		vdev_config_clean(vd);
4786
4787	/*
4788	 * Now that the new config has synced transactionally,
4789	 * let it become visible to the config cache.
4790	 */
4791	if (spa->spa_config_syncing != NULL) {
4792		spa_config_set(spa, spa->spa_config_syncing);
4793		spa->spa_config_txg = txg;
4794		spa->spa_config_syncing = NULL;
4795	}
4796
4797	spa->spa_ubsync = spa->spa_uberblock;
4798
4799	dsl_pool_sync_done(dp, txg);
4800
4801	/*
4802	 * Update usable space statistics.
4803	 */
4804	while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
4805		vdev_sync_done(vd, txg);
4806
4807	spa_update_dspace(spa);
4808
4809	/*
4810	 * It had better be the case that we didn't dirty anything
4811	 * since vdev_config_sync().
4812	 */
4813	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
4814	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
4815	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
4816	ASSERT(defer_bpl->bpl_queue == NULL);
4817	ASSERT(free_bpl->bpl_queue == NULL);
4818
4819	spa->spa_sync_pass = 0;
4820
4821	spa_config_exit(spa, SCL_CONFIG, FTAG);
4822
4823	spa_handle_ignored_writes(spa);
4824
4825	/*
4826	 * If any async tasks have been requested, kick them off.
4827	 */
4828	spa_async_dispatch(spa);
4829}
4830
4831/*
4832 * Sync all pools.  We don't want to hold the namespace lock across these
4833 * operations, so we take a reference on the spa_t and drop the lock during the
4834 * sync.
4835 */
4836void
4837spa_sync_allpools(void)
4838{
4839	spa_t *spa = NULL;
4840	mutex_enter(&spa_namespace_lock);
4841	while ((spa = spa_next(spa)) != NULL) {
4842		if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa))
4843			continue;
4844		spa_open_ref(spa, FTAG);
4845		mutex_exit(&spa_namespace_lock);
4846		txg_wait_synced(spa_get_dsl(spa), 0);
4847		mutex_enter(&spa_namespace_lock);
4848		spa_close(spa, FTAG);
4849	}
4850	mutex_exit(&spa_namespace_lock);
4851}
4852
4853/*
4854 * ==========================================================================
4855 * Miscellaneous routines
4856 * ==========================================================================
4857 */
4858
4859/*
4860 * Remove all pools in the system.
4861 */
4862void
4863spa_evict_all(void)
4864{
4865	spa_t *spa;
4866
4867	/*
4868	 * Remove all cached state.  All pools should be closed now,
4869	 * so every spa in the AVL tree should be unreferenced.
4870	 */
4871	mutex_enter(&spa_namespace_lock);
4872	while ((spa = spa_next(NULL)) != NULL) {
4873		/*
4874		 * Stop async tasks.  The async thread may need to detach
4875		 * a device that's been replaced, which requires grabbing
4876		 * spa_namespace_lock, so we must drop it here.
4877		 */
4878		spa_open_ref(spa, FTAG);
4879		mutex_exit(&spa_namespace_lock);
4880		spa_async_suspend(spa);
4881		mutex_enter(&spa_namespace_lock);
4882		spa_close(spa, FTAG);
4883
4884		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4885			spa_unload(spa);
4886			spa_deactivate(spa);
4887		}
4888		spa_remove(spa);
4889	}
4890	mutex_exit(&spa_namespace_lock);
4891}
4892
4893vdev_t *
4894spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
4895{
4896	vdev_t *vd;
4897	int i;
4898
4899	if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
4900		return (vd);
4901
4902	if (aux) {
4903		for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
4904			vd = spa->spa_l2cache.sav_vdevs[i];
4905			if (vd->vdev_guid == guid)
4906				return (vd);
4907		}
4908
4909		for (i = 0; i < spa->spa_spares.sav_count; i++) {
4910			vd = spa->spa_spares.sav_vdevs[i];
4911			if (vd->vdev_guid == guid)
4912				return (vd);
4913		}
4914	}
4915
4916	return (NULL);
4917}
4918
4919void
4920spa_upgrade(spa_t *spa, uint64_t version)
4921{
4922	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4923
4924	/*
4925	 * This should only be called for a non-faulted pool, and since a
4926	 * future version would result in an unopenable pool, this shouldn't be
4927	 * possible.
4928	 */
4929	ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION);
4930	ASSERT(version >= spa->spa_uberblock.ub_version);
4931
4932	spa->spa_uberblock.ub_version = version;
4933	vdev_config_dirty(spa->spa_root_vdev);
4934
4935	spa_config_exit(spa, SCL_ALL, FTAG);
4936
4937	txg_wait_synced(spa_get_dsl(spa), 0);
4938}
4939
4940boolean_t
4941spa_has_spare(spa_t *spa, uint64_t guid)
4942{
4943	int i;
4944	uint64_t spareguid;
4945	spa_aux_vdev_t *sav = &spa->spa_spares;
4946
4947	for (i = 0; i < sav->sav_count; i++)
4948		if (sav->sav_vdevs[i]->vdev_guid == guid)
4949			return (B_TRUE);
4950
4951	for (i = 0; i < sav->sav_npending; i++) {
4952		if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
4953		    &spareguid) == 0 && spareguid == guid)
4954			return (B_TRUE);
4955	}
4956
4957	return (B_FALSE);
4958}
4959
4960/*
4961 * Check if a pool has an active shared spare device.
4962 * Note: reference count of an active spare is 2, as a spare and as a replace
4963 */
4964static boolean_t
4965spa_has_active_shared_spare(spa_t *spa)
4966{
4967	int i, refcnt;
4968	uint64_t pool;
4969	spa_aux_vdev_t *sav = &spa->spa_spares;
4970
4971	for (i = 0; i < sav->sav_count; i++) {
4972		if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
4973		    &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
4974		    refcnt > 2)
4975			return (B_TRUE);
4976	}
4977
4978	return (B_FALSE);
4979}
4980
4981/*
4982 * Post a sysevent corresponding to the given event.  The 'name' must be one of
4983 * the event definitions in sys/sysevent/eventdefs.h.  The payload will be
4984 * filled in from the spa and (optionally) the vdev.  This doesn't do anything
4985 * in the userland libzpool, as we don't want consumers to misinterpret ztest
4986 * or zdb as real changes.
4987 */
4988void
4989spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
4990{
4991#ifdef _KERNEL
4992	sysevent_t		*ev;
4993	sysevent_attr_list_t	*attr = NULL;
4994	sysevent_value_t	value;
4995	sysevent_id_t		eid;
4996
4997	ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
4998	    SE_SLEEP);
4999
5000	value.value_type = SE_DATA_TYPE_STRING;
5001	value.value.sv_string = spa_name(spa);
5002	if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
5003		goto done;
5004
5005	value.value_type = SE_DATA_TYPE_UINT64;
5006	value.value.sv_uint64 = spa_guid(spa);
5007	if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
5008		goto done;
5009
5010	if (vd) {
5011		value.value_type = SE_DATA_TYPE_UINT64;
5012		value.value.sv_uint64 = vd->vdev_guid;
5013		if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
5014		    SE_SLEEP) != 0)
5015			goto done;
5016
5017		if (vd->vdev_path) {
5018			value.value_type = SE_DATA_TYPE_STRING;
5019			value.value.sv_string = vd->vdev_path;
5020			if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
5021			    &value, SE_SLEEP) != 0)
5022				goto done;
5023		}
5024	}
5025
5026	if (sysevent_attach_attributes(ev, attr) != 0)
5027		goto done;
5028	attr = NULL;
5029
5030	(void) log_sysevent(ev, SE_SLEEP, &eid);
5031
5032done:
5033	if (attr)
5034		sysevent_free_attr(attr);
5035	sysevent_free(ev);
5036#endif
5037}
5038