xref: /illumos-gate/usr/src/uts/common/fs/zfs/spa.c (revision 2e0c549e)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
2199653d4eSeschrock 
22fa9e4066Sahrens /*
23379c004dSEric Schrock  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24fa9e4066Sahrens  * Use is subject to license terms.
25fa9e4066Sahrens  */
26fa9e4066Sahrens 
27fa9e4066Sahrens /*
28fa9e4066Sahrens  * This file contains all the routines used when modifying on-disk SPA state.
29fa9e4066Sahrens  * This includes opening, importing, destroying, exporting a pool, and syncing a
30fa9e4066Sahrens  * pool.
31fa9e4066Sahrens  */
32fa9e4066Sahrens 
33fa9e4066Sahrens #include <sys/zfs_context.h>
34ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h>
35fa9e4066Sahrens #include <sys/spa_impl.h>
36fa9e4066Sahrens #include <sys/zio.h>
37fa9e4066Sahrens #include <sys/zio_checksum.h>
38fa9e4066Sahrens #include <sys/zio_compress.h>
39fa9e4066Sahrens #include <sys/dmu.h>
40fa9e4066Sahrens #include <sys/dmu_tx.h>
41fa9e4066Sahrens #include <sys/zap.h>
42fa9e4066Sahrens #include <sys/zil.h>
43fa9e4066Sahrens #include <sys/vdev_impl.h>
44fa9e4066Sahrens #include <sys/metaslab.h>
45fa9e4066Sahrens #include <sys/uberblock_impl.h>
46fa9e4066Sahrens #include <sys/txg.h>
47fa9e4066Sahrens #include <sys/avl.h>
48fa9e4066Sahrens #include <sys/dmu_traverse.h>
49b1b8ab34Slling #include <sys/dmu_objset.h>
50fa9e4066Sahrens #include <sys/unique.h>
51fa9e4066Sahrens #include <sys/dsl_pool.h>
52b1b8ab34Slling #include <sys/dsl_dataset.h>
53fa9e4066Sahrens #include <sys/dsl_dir.h>
54fa9e4066Sahrens #include <sys/dsl_prop.h>
55b1b8ab34Slling #include <sys/dsl_synctask.h>
56fa9e4066Sahrens #include <sys/fs/zfs.h>
57fa94a07fSbrendan #include <sys/arc.h>
58fa9e4066Sahrens #include <sys/callb.h>
5995173954Sek #include <sys/systeminfo.h>
6095173954Sek #include <sys/sunddi.h>
61e7cbe64fSgw #include <sys/spa_boot.h>
62fa9e4066Sahrens 
635679c89fSjv #ifdef	_KERNEL
645679c89fSjv #include <sys/zone.h>
655679c89fSjv #endif	/* _KERNEL */
665679c89fSjv 
67990b4856Slling #include "zfs_prop.h"
68b7b97454Sperrin #include "zfs_comutil.h"
69990b4856Slling 
70*2e0c549eSJonathan Adams enum zti_modes {
71*2e0c549eSJonathan Adams 	zti_mode_fixed,			/* value is # of threads (min 1) */
72*2e0c549eSJonathan Adams 	zti_mode_online_percent,	/* value is % of online CPUs */
73*2e0c549eSJonathan Adams 	zti_mode_tune,			/* fill from zio_taskq_tune_* */
74*2e0c549eSJonathan Adams 	zti_nmodes
75e14bb325SJeff Bonwick };
76416e0cd8Sek 
77*2e0c549eSJonathan Adams #define	ZTI_THREAD_FIX(n)	{ zti_mode_fixed, (n) }
78*2e0c549eSJonathan Adams #define	ZTI_THREAD_PCT(n)	{ zti_mode_online_percent, (n) }
79*2e0c549eSJonathan Adams #define	ZTI_THREAD_TUNE		{ zti_mode_tune, 0 }
80*2e0c549eSJonathan Adams 
81*2e0c549eSJonathan Adams #define	ZTI_THREAD_ONE		ZTI_THREAD_FIX(1)
82*2e0c549eSJonathan Adams 
83*2e0c549eSJonathan Adams typedef struct zio_taskq_info {
84*2e0c549eSJonathan Adams 	const char *zti_name;
85*2e0c549eSJonathan Adams 	struct {
86*2e0c549eSJonathan Adams 		enum zti_modes zti_mode;
87*2e0c549eSJonathan Adams 		uint_t zti_value;
88*2e0c549eSJonathan Adams 	} zti_nthreads[ZIO_TASKQ_TYPES];
89*2e0c549eSJonathan Adams } zio_taskq_info_t;
90*2e0c549eSJonathan Adams 
91*2e0c549eSJonathan Adams static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
92*2e0c549eSJonathan Adams 				"issue",		"intr"
93*2e0c549eSJonathan Adams };
94*2e0c549eSJonathan Adams 
95*2e0c549eSJonathan Adams const zio_taskq_info_t zio_taskqs[ZIO_TYPES] = {
96*2e0c549eSJonathan Adams 	/*			ISSUE			INTR		*/
97*2e0c549eSJonathan Adams 	{ "spa_zio_null",	{ ZTI_THREAD_ONE,	ZTI_THREAD_ONE } },
98*2e0c549eSJonathan Adams 	{ "spa_zio_read",	{ ZTI_THREAD_FIX(8),	ZTI_THREAD_TUNE } },
99*2e0c549eSJonathan Adams 	{ "spa_zio_write",	{ ZTI_THREAD_TUNE,	ZTI_THREAD_FIX(8) } },
100*2e0c549eSJonathan Adams 	{ "spa_zio_free",	{ ZTI_THREAD_ONE,	ZTI_THREAD_ONE } },
101*2e0c549eSJonathan Adams 	{ "spa_zio_claim",	{ ZTI_THREAD_ONE,	ZTI_THREAD_ONE } },
102*2e0c549eSJonathan Adams 	{ "spa_zio_ioctl",	{ ZTI_THREAD_ONE,	ZTI_THREAD_ONE } },
103*2e0c549eSJonathan Adams };
104*2e0c549eSJonathan Adams 
105*2e0c549eSJonathan Adams enum zti_modes zio_taskq_tune_mode = zti_mode_online_percent;
106*2e0c549eSJonathan Adams uint_t zio_taskq_tune_value = 80;	/* #threads = 80% of # online CPUs */
107*2e0c549eSJonathan Adams 
108990b4856Slling static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx);
10989a89ebfSlling static boolean_t spa_has_active_shared_spare(spa_t *spa);
110990b4856Slling 
111990b4856Slling /*
112990b4856Slling  * ==========================================================================
113990b4856Slling  * SPA properties routines
114990b4856Slling  * ==========================================================================
115990b4856Slling  */
116990b4856Slling 
117990b4856Slling /*
118990b4856Slling  * Add a (source=src, propname=propval) list to an nvlist.
119990b4856Slling  */
1209d82f4f6Slling static void
121990b4856Slling spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
122990b4856Slling     uint64_t intval, zprop_source_t src)
123990b4856Slling {
124990b4856Slling 	const char *propname = zpool_prop_to_name(prop);
125990b4856Slling 	nvlist_t *propval;
126990b4856Slling 
1279d82f4f6Slling 	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1289d82f4f6Slling 	VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
129990b4856Slling 
1309d82f4f6Slling 	if (strval != NULL)
1319d82f4f6Slling 		VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
1329d82f4f6Slling 	else
1339d82f4f6Slling 		VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
134990b4856Slling 
1359d82f4f6Slling 	VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
136990b4856Slling 	nvlist_free(propval);
137990b4856Slling }
138990b4856Slling 
139990b4856Slling /*
140990b4856Slling  * Get property values from the spa configuration.
141990b4856Slling  */
1429d82f4f6Slling static void
143990b4856Slling spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
144990b4856Slling {
145379c004dSEric Schrock 	uint64_t size;
146379c004dSEric Schrock 	uint64_t used;
147990b4856Slling 	uint64_t cap, version;
148990b4856Slling 	zprop_source_t src = ZPROP_SRC_NONE;
149c5904d13Seschrock 	spa_config_dirent_t *dp;
150990b4856Slling 
151e14bb325SJeff Bonwick 	ASSERT(MUTEX_HELD(&spa->spa_props_lock));
152e14bb325SJeff Bonwick 
153379c004dSEric Schrock 	if (spa->spa_root_vdev != NULL) {
154379c004dSEric Schrock 		size = spa_get_space(spa);
155379c004dSEric Schrock 		used = spa_get_alloc(spa);
156379c004dSEric Schrock 		spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
157379c004dSEric Schrock 		spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
158379c004dSEric Schrock 		spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src);
159379c004dSEric Schrock 		spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL,
160379c004dSEric Schrock 		    size - used, src);
161379c004dSEric Schrock 
162379c004dSEric Schrock 		cap = (size == 0) ? 0 : (used * 100 / size);
163379c004dSEric Schrock 		spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
164379c004dSEric Schrock 
165379c004dSEric Schrock 		spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
166379c004dSEric Schrock 		    spa->spa_root_vdev->vdev_state, src);
167379c004dSEric Schrock 
168379c004dSEric Schrock 		version = spa_version(spa);
169379c004dSEric Schrock 		if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
170379c004dSEric Schrock 			src = ZPROP_SRC_DEFAULT;
171379c004dSEric Schrock 		else
172379c004dSEric Schrock 			src = ZPROP_SRC_LOCAL;
173379c004dSEric Schrock 		spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
174379c004dSEric Schrock 	}
175990b4856Slling 
1769d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
177990b4856Slling 
1789d82f4f6Slling 	if (spa->spa_root != NULL)
1799d82f4f6Slling 		spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
1809d82f4f6Slling 		    0, ZPROP_SRC_LOCAL);
181990b4856Slling 
182c5904d13Seschrock 	if ((dp = list_head(&spa->spa_config_list)) != NULL) {
183c5904d13Seschrock 		if (dp->scd_path == NULL) {
1849d82f4f6Slling 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
185c5904d13Seschrock 			    "none", 0, ZPROP_SRC_LOCAL);
186c5904d13Seschrock 		} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
1879d82f4f6Slling 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
188c5904d13Seschrock 			    dp->scd_path, 0, ZPROP_SRC_LOCAL);
1892f8aaab3Seschrock 		}
1902f8aaab3Seschrock 	}
191990b4856Slling }
192990b4856Slling 
193990b4856Slling /*
194990b4856Slling  * Get zpool property values.
195990b4856Slling  */
196990b4856Slling int
197990b4856Slling spa_prop_get(spa_t *spa, nvlist_t **nvp)
198990b4856Slling {
199990b4856Slling 	zap_cursor_t zc;
200990b4856Slling 	zap_attribute_t za;
201990b4856Slling 	objset_t *mos = spa->spa_meta_objset;
202990b4856Slling 	int err;
203990b4856Slling 
2049d82f4f6Slling 	VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
205990b4856Slling 
206e14bb325SJeff Bonwick 	mutex_enter(&spa->spa_props_lock);
207e14bb325SJeff Bonwick 
208990b4856Slling 	/*
209990b4856Slling 	 * Get properties from the spa config.
210990b4856Slling 	 */
2119d82f4f6Slling 	spa_prop_get_config(spa, nvp);
212990b4856Slling 
213990b4856Slling 	/* If no pool property object, no more prop to get. */
214990b4856Slling 	if (spa->spa_pool_props_object == 0) {
215990b4856Slling 		mutex_exit(&spa->spa_props_lock);
216990b4856Slling 		return (0);
217990b4856Slling 	}
218990b4856Slling 
219990b4856Slling 	/*
220990b4856Slling 	 * Get properties from the MOS pool property object.
221990b4856Slling 	 */
222990b4856Slling 	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
223990b4856Slling 	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
224990b4856Slling 	    zap_cursor_advance(&zc)) {
225990b4856Slling 		uint64_t intval = 0;
226990b4856Slling 		char *strval = NULL;
227990b4856Slling 		zprop_source_t src = ZPROP_SRC_DEFAULT;
228990b4856Slling 		zpool_prop_t prop;
229990b4856Slling 
230990b4856Slling 		if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
231990b4856Slling 			continue;
232990b4856Slling 
233990b4856Slling 		switch (za.za_integer_length) {
234990b4856Slling 		case 8:
235990b4856Slling 			/* integer property */
236990b4856Slling 			if (za.za_first_integer !=
237990b4856Slling 			    zpool_prop_default_numeric(prop))
238990b4856Slling 				src = ZPROP_SRC_LOCAL;
239990b4856Slling 
240990b4856Slling 			if (prop == ZPOOL_PROP_BOOTFS) {
241990b4856Slling 				dsl_pool_t *dp;
242990b4856Slling 				dsl_dataset_t *ds = NULL;
243990b4856Slling 
244990b4856Slling 				dp = spa_get_dsl(spa);
245990b4856Slling 				rw_enter(&dp->dp_config_rwlock, RW_READER);
246745cd3c5Smaybee 				if (err = dsl_dataset_hold_obj(dp,
247745cd3c5Smaybee 				    za.za_first_integer, FTAG, &ds)) {
248990b4856Slling 					rw_exit(&dp->dp_config_rwlock);
249990b4856Slling 					break;
250990b4856Slling 				}
251990b4856Slling 
252990b4856Slling 				strval = kmem_alloc(
253990b4856Slling 				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
254990b4856Slling 				    KM_SLEEP);
255990b4856Slling 				dsl_dataset_name(ds, strval);
256745cd3c5Smaybee 				dsl_dataset_rele(ds, FTAG);
257990b4856Slling 				rw_exit(&dp->dp_config_rwlock);
258990b4856Slling 			} else {
259990b4856Slling 				strval = NULL;
260990b4856Slling 				intval = za.za_first_integer;
261990b4856Slling 			}
262990b4856Slling 
2639d82f4f6Slling 			spa_prop_add_list(*nvp, prop, strval, intval, src);
264990b4856Slling 
265990b4856Slling 			if (strval != NULL)
266990b4856Slling 				kmem_free(strval,
267990b4856Slling 				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
268990b4856Slling 
269990b4856Slling 			break;
270990b4856Slling 
271990b4856Slling 		case 1:
272990b4856Slling 			/* string property */
273990b4856Slling 			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
274990b4856Slling 			err = zap_lookup(mos, spa->spa_pool_props_object,
275990b4856Slling 			    za.za_name, 1, za.za_num_integers, strval);
276990b4856Slling 			if (err) {
277990b4856Slling 				kmem_free(strval, za.za_num_integers);
278990b4856Slling 				break;
279990b4856Slling 			}
2809d82f4f6Slling 			spa_prop_add_list(*nvp, prop, strval, 0, src);
281990b4856Slling 			kmem_free(strval, za.za_num_integers);
282990b4856Slling 			break;
283990b4856Slling 
284990b4856Slling 		default:
285990b4856Slling 			break;
286990b4856Slling 		}
287990b4856Slling 	}
288990b4856Slling 	zap_cursor_fini(&zc);
289990b4856Slling 	mutex_exit(&spa->spa_props_lock);
290990b4856Slling out:
291990b4856Slling 	if (err && err != ENOENT) {
292990b4856Slling 		nvlist_free(*nvp);
2939d82f4f6Slling 		*nvp = NULL;
294990b4856Slling 		return (err);
295990b4856Slling 	}
296990b4856Slling 
297990b4856Slling 	return (0);
298990b4856Slling }
299990b4856Slling 
300990b4856Slling /*
301990b4856Slling  * Validate the given pool properties nvlist and modify the list
302990b4856Slling  * for the property values to be set.
303990b4856Slling  */
304990b4856Slling static int
305990b4856Slling spa_prop_validate(spa_t *spa, nvlist_t *props)
306990b4856Slling {
307990b4856Slling 	nvpair_t *elem;
308990b4856Slling 	int error = 0, reset_bootfs = 0;
309990b4856Slling 	uint64_t objnum;
310990b4856Slling 
311990b4856Slling 	elem = NULL;
312990b4856Slling 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
313990b4856Slling 		zpool_prop_t prop;
314990b4856Slling 		char *propname, *strval;
315990b4856Slling 		uint64_t intval;
316990b4856Slling 		objset_t *os;
3172f8aaab3Seschrock 		char *slash;
318990b4856Slling 
319990b4856Slling 		propname = nvpair_name(elem);
320990b4856Slling 
321990b4856Slling 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL)
322990b4856Slling 			return (EINVAL);
323990b4856Slling 
324990b4856Slling 		switch (prop) {
325990b4856Slling 		case ZPOOL_PROP_VERSION:
326990b4856Slling 			error = nvpair_value_uint64(elem, &intval);
327990b4856Slling 			if (!error &&
328990b4856Slling 			    (intval < spa_version(spa) || intval > SPA_VERSION))
329990b4856Slling 				error = EINVAL;
330990b4856Slling 			break;
331990b4856Slling 
332990b4856Slling 		case ZPOOL_PROP_DELEGATION:
333990b4856Slling 		case ZPOOL_PROP_AUTOREPLACE:
334d5b5bb25SRich Morris 		case ZPOOL_PROP_LISTSNAPS:
335990b4856Slling 			error = nvpair_value_uint64(elem, &intval);
336990b4856Slling 			if (!error && intval > 1)
337990b4856Slling 				error = EINVAL;
338990b4856Slling 			break;
339990b4856Slling 
340990b4856Slling 		case ZPOOL_PROP_BOOTFS:
341990b4856Slling 			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
342990b4856Slling 				error = ENOTSUP;
343990b4856Slling 				break;
344990b4856Slling 			}
345990b4856Slling 
346990b4856Slling 			/*
34715e6edf1Sgw 			 * Make sure the vdev config is bootable
348990b4856Slling 			 */
34915e6edf1Sgw 			if (!vdev_is_bootable(spa->spa_root_vdev)) {
350990b4856Slling 				error = ENOTSUP;
351990b4856Slling 				break;
352990b4856Slling 			}
353990b4856Slling 
354990b4856Slling 			reset_bootfs = 1;
355990b4856Slling 
356990b4856Slling 			error = nvpair_value_string(elem, &strval);
357990b4856Slling 
358990b4856Slling 			if (!error) {
35915e6edf1Sgw 				uint64_t compress;
36015e6edf1Sgw 
361990b4856Slling 				if (strval == NULL || strval[0] == '\0') {
362990b4856Slling 					objnum = zpool_prop_default_numeric(
363990b4856Slling 					    ZPOOL_PROP_BOOTFS);
364990b4856Slling 					break;
365990b4856Slling 				}
366990b4856Slling 
367990b4856Slling 				if (error = dmu_objset_open(strval, DMU_OST_ZFS,
368745cd3c5Smaybee 				    DS_MODE_USER | DS_MODE_READONLY, &os))
369990b4856Slling 					break;
37015e6edf1Sgw 
37115e6edf1Sgw 				/* We don't support gzip bootable datasets */
37215e6edf1Sgw 				if ((error = dsl_prop_get_integer(strval,
37315e6edf1Sgw 				    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
37415e6edf1Sgw 				    &compress, NULL)) == 0 &&
37515e6edf1Sgw 				    !BOOTFS_COMPRESS_VALID(compress)) {
37615e6edf1Sgw 					error = ENOTSUP;
37715e6edf1Sgw 				} else {
37815e6edf1Sgw 					objnum = dmu_objset_id(os);
37915e6edf1Sgw 				}
380990b4856Slling 				dmu_objset_close(os);
381990b4856Slling 			}
382990b4856Slling 			break;
383e14bb325SJeff Bonwick 
3840a4e9518Sgw 		case ZPOOL_PROP_FAILUREMODE:
3850a4e9518Sgw 			error = nvpair_value_uint64(elem, &intval);
3860a4e9518Sgw 			if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
3870a4e9518Sgw 			    intval > ZIO_FAILURE_MODE_PANIC))
3880a4e9518Sgw 				error = EINVAL;
3890a4e9518Sgw 
3900a4e9518Sgw 			/*
3910a4e9518Sgw 			 * This is a special case which only occurs when
3920a4e9518Sgw 			 * the pool has completely failed. This allows
3930a4e9518Sgw 			 * the user to change the in-core failmode property
3940a4e9518Sgw 			 * without syncing it out to disk (I/Os might
3950a4e9518Sgw 			 * currently be blocked). We do this by returning
3960a4e9518Sgw 			 * EIO to the caller (spa_prop_set) to trick it
3970a4e9518Sgw 			 * into thinking we encountered a property validation
3980a4e9518Sgw 			 * error.
3990a4e9518Sgw 			 */
400e14bb325SJeff Bonwick 			if (!error && spa_suspended(spa)) {
4010a4e9518Sgw 				spa->spa_failmode = intval;
4020a4e9518Sgw 				error = EIO;
4030a4e9518Sgw 			}
4040a4e9518Sgw 			break;
4052f8aaab3Seschrock 
4062f8aaab3Seschrock 		case ZPOOL_PROP_CACHEFILE:
4072f8aaab3Seschrock 			if ((error = nvpair_value_string(elem, &strval)) != 0)
4082f8aaab3Seschrock 				break;
4092f8aaab3Seschrock 
4102f8aaab3Seschrock 			if (strval[0] == '\0')
4112f8aaab3Seschrock 				break;
4122f8aaab3Seschrock 
4132f8aaab3Seschrock 			if (strcmp(strval, "none") == 0)
4142f8aaab3Seschrock 				break;
4152f8aaab3Seschrock 
4162f8aaab3Seschrock 			if (strval[0] != '/') {
4172f8aaab3Seschrock 				error = EINVAL;
4182f8aaab3Seschrock 				break;
4192f8aaab3Seschrock 			}
4202f8aaab3Seschrock 
4212f8aaab3Seschrock 			slash = strrchr(strval, '/');
4222f8aaab3Seschrock 			ASSERT(slash != NULL);
4232f8aaab3Seschrock 
4242f8aaab3Seschrock 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
4252f8aaab3Seschrock 			    strcmp(slash, "/..") == 0)
4262f8aaab3Seschrock 				error = EINVAL;
4272f8aaab3Seschrock 			break;
428990b4856Slling 		}
429990b4856Slling 
430990b4856Slling 		if (error)
431990b4856Slling 			break;
432990b4856Slling 	}
433990b4856Slling 
434990b4856Slling 	if (!error && reset_bootfs) {
435990b4856Slling 		error = nvlist_remove(props,
436990b4856Slling 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
437990b4856Slling 
438990b4856Slling 		if (!error) {
439990b4856Slling 			error = nvlist_add_uint64(props,
440990b4856Slling 			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
441990b4856Slling 		}
442990b4856Slling 	}
443990b4856Slling 
444990b4856Slling 	return (error);
445990b4856Slling }
446990b4856Slling 
447379c004dSEric Schrock void
448379c004dSEric Schrock spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
449379c004dSEric Schrock {
450379c004dSEric Schrock 	char *cachefile;
451379c004dSEric Schrock 	spa_config_dirent_t *dp;
452379c004dSEric Schrock 
453379c004dSEric Schrock 	if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
454379c004dSEric Schrock 	    &cachefile) != 0)
455379c004dSEric Schrock 		return;
456379c004dSEric Schrock 
457379c004dSEric Schrock 	dp = kmem_alloc(sizeof (spa_config_dirent_t),
458379c004dSEric Schrock 	    KM_SLEEP);
459379c004dSEric Schrock 
460379c004dSEric Schrock 	if (cachefile[0] == '\0')
461379c004dSEric Schrock 		dp->scd_path = spa_strdup(spa_config_path);
462379c004dSEric Schrock 	else if (strcmp(cachefile, "none") == 0)
463379c004dSEric Schrock 		dp->scd_path = NULL;
464379c004dSEric Schrock 	else
465379c004dSEric Schrock 		dp->scd_path = spa_strdup(cachefile);
466379c004dSEric Schrock 
467379c004dSEric Schrock 	list_insert_head(&spa->spa_config_list, dp);
468379c004dSEric Schrock 	if (need_sync)
469379c004dSEric Schrock 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
470379c004dSEric Schrock }
471379c004dSEric Schrock 
472990b4856Slling int
473990b4856Slling spa_prop_set(spa_t *spa, nvlist_t *nvp)
474990b4856Slling {
475990b4856Slling 	int error;
476379c004dSEric Schrock 	nvpair_t *elem;
477379c004dSEric Schrock 	boolean_t need_sync = B_FALSE;
478379c004dSEric Schrock 	zpool_prop_t prop;
479990b4856Slling 
480990b4856Slling 	if ((error = spa_prop_validate(spa, nvp)) != 0)
481990b4856Slling 		return (error);
482990b4856Slling 
483379c004dSEric Schrock 	elem = NULL;
484379c004dSEric Schrock 	while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
485379c004dSEric Schrock 		if ((prop = zpool_name_to_prop(
486379c004dSEric Schrock 		    nvpair_name(elem))) == ZPROP_INVAL)
487379c004dSEric Schrock 			return (EINVAL);
488379c004dSEric Schrock 
489379c004dSEric Schrock 		if (prop == ZPOOL_PROP_CACHEFILE || prop == ZPOOL_PROP_ALTROOT)
490379c004dSEric Schrock 			continue;
491379c004dSEric Schrock 
492379c004dSEric Schrock 		need_sync = B_TRUE;
493379c004dSEric Schrock 		break;
494379c004dSEric Schrock 	}
495379c004dSEric Schrock 
496379c004dSEric Schrock 	if (need_sync)
497379c004dSEric Schrock 		return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
498379c004dSEric Schrock 		    spa, nvp, 3));
499379c004dSEric Schrock 	else
500379c004dSEric Schrock 		return (0);
501990b4856Slling }
502990b4856Slling 
503990b4856Slling /*
504990b4856Slling  * If the bootfs property value is dsobj, clear it.
505990b4856Slling  */
506990b4856Slling void
507990b4856Slling spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
508990b4856Slling {
509990b4856Slling 	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
510990b4856Slling 		VERIFY(zap_remove(spa->spa_meta_objset,
511990b4856Slling 		    spa->spa_pool_props_object,
512990b4856Slling 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
513990b4856Slling 		spa->spa_bootfs = 0;
514990b4856Slling 	}
515990b4856Slling }
516990b4856Slling 
517fa9e4066Sahrens /*
518fa9e4066Sahrens  * ==========================================================================
519fa9e4066Sahrens  * SPA state manipulation (open/create/destroy/import/export)
520fa9e4066Sahrens  * ==========================================================================
521fa9e4066Sahrens  */
522fa9e4066Sahrens 
523ea8dc4b6Seschrock static int
524ea8dc4b6Seschrock spa_error_entry_compare(const void *a, const void *b)
525ea8dc4b6Seschrock {
526ea8dc4b6Seschrock 	spa_error_entry_t *sa = (spa_error_entry_t *)a;
527ea8dc4b6Seschrock 	spa_error_entry_t *sb = (spa_error_entry_t *)b;
528ea8dc4b6Seschrock 	int ret;
529ea8dc4b6Seschrock 
530ea8dc4b6Seschrock 	ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
531ea8dc4b6Seschrock 	    sizeof (zbookmark_t));
532ea8dc4b6Seschrock 
533ea8dc4b6Seschrock 	if (ret < 0)
534ea8dc4b6Seschrock 		return (-1);
535ea8dc4b6Seschrock 	else if (ret > 0)
536ea8dc4b6Seschrock 		return (1);
537ea8dc4b6Seschrock 	else
538ea8dc4b6Seschrock 		return (0);
539ea8dc4b6Seschrock }
540ea8dc4b6Seschrock 
541ea8dc4b6Seschrock /*
542ea8dc4b6Seschrock  * Utility function which retrieves copies of the current logs and
543ea8dc4b6Seschrock  * re-initializes them in the process.
544ea8dc4b6Seschrock  */
545ea8dc4b6Seschrock void
546ea8dc4b6Seschrock spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
547ea8dc4b6Seschrock {
548ea8dc4b6Seschrock 	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
549ea8dc4b6Seschrock 
550ea8dc4b6Seschrock 	bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
551ea8dc4b6Seschrock 	bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
552ea8dc4b6Seschrock 
553ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_scrub,
554ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
555ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
556ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_last,
557ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
558ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
559ea8dc4b6Seschrock }
560ea8dc4b6Seschrock 
561fa9e4066Sahrens /*
562fa9e4066Sahrens  * Activate an uninitialized pool.
563fa9e4066Sahrens  */
564fa9e4066Sahrens static void
5658ad4d6ddSJeff Bonwick spa_activate(spa_t *spa, int mode)
566fa9e4066Sahrens {
567fa9e4066Sahrens 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
568fa9e4066Sahrens 
569fa9e4066Sahrens 	spa->spa_state = POOL_STATE_ACTIVE;
5708ad4d6ddSJeff Bonwick 	spa->spa_mode = mode;
571fa9e4066Sahrens 
572d6e555bdSGeorge Wilson 	spa->spa_normal_class = metaslab_class_create(zfs_metaslab_ops);
573d6e555bdSGeorge Wilson 	spa->spa_log_class = metaslab_class_create(zfs_metaslab_ops);
574fa9e4066Sahrens 
575e14bb325SJeff Bonwick 	for (int t = 0; t < ZIO_TYPES; t++) {
576*2e0c549eSJonathan Adams 		const zio_taskq_info_t *ztip = &zio_taskqs[t];
577e14bb325SJeff Bonwick 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
578*2e0c549eSJonathan Adams 			enum zti_modes mode = ztip->zti_nthreads[q].zti_mode;
579*2e0c549eSJonathan Adams 			uint_t value = ztip->zti_nthreads[q].zti_value;
580*2e0c549eSJonathan Adams 			char name[32];
581*2e0c549eSJonathan Adams 
582*2e0c549eSJonathan Adams 			(void) snprintf(name, sizeof (name),
583*2e0c549eSJonathan Adams 			    "%s_%s", ztip->zti_name, zio_taskq_types[q]);
584*2e0c549eSJonathan Adams 
585*2e0c549eSJonathan Adams 			if (mode == zti_mode_tune) {
586*2e0c549eSJonathan Adams 				mode = zio_taskq_tune_mode;
587*2e0c549eSJonathan Adams 				value = zio_taskq_tune_value;
588*2e0c549eSJonathan Adams 				if (mode == zti_mode_tune)
589*2e0c549eSJonathan Adams 					mode = zti_mode_online_percent;
590*2e0c549eSJonathan Adams 			}
591*2e0c549eSJonathan Adams 
592*2e0c549eSJonathan Adams 			switch (mode) {
593*2e0c549eSJonathan Adams 			case zti_mode_fixed:
594*2e0c549eSJonathan Adams 				ASSERT3U(value, >=, 1);
595*2e0c549eSJonathan Adams 				value = MAX(value, 1);
596*2e0c549eSJonathan Adams 
597*2e0c549eSJonathan Adams 				spa->spa_zio_taskq[t][q] = taskq_create(name,
598*2e0c549eSJonathan Adams 				    value, maxclsyspri, 50, INT_MAX,
599*2e0c549eSJonathan Adams 				    TASKQ_PREPOPULATE);
600*2e0c549eSJonathan Adams 				break;
601*2e0c549eSJonathan Adams 
602*2e0c549eSJonathan Adams 			case zti_mode_online_percent:
603*2e0c549eSJonathan Adams 				spa->spa_zio_taskq[t][q] = taskq_create(name,
604*2e0c549eSJonathan Adams 				    value, maxclsyspri, 50, INT_MAX,
605*2e0c549eSJonathan Adams 				    TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
606*2e0c549eSJonathan Adams 				break;
607*2e0c549eSJonathan Adams 
608*2e0c549eSJonathan Adams 			case zti_mode_tune:
609*2e0c549eSJonathan Adams 			default:
610*2e0c549eSJonathan Adams 				panic("unrecognized mode for "
611*2e0c549eSJonathan Adams 				    "zio_taskqs[%u]->zti_nthreads[%u] (%u:%u) "
612*2e0c549eSJonathan Adams 				    "in spa_activate()",
613*2e0c549eSJonathan Adams 				    t, q, mode, value);
614*2e0c549eSJonathan Adams 				break;
615*2e0c549eSJonathan Adams 			}
616e14bb325SJeff Bonwick 		}
617fa9e4066Sahrens 	}
618fa9e4066Sahrens 
619e14bb325SJeff Bonwick 	list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
620e14bb325SJeff Bonwick 	    offsetof(vdev_t, vdev_config_dirty_node));
621e14bb325SJeff Bonwick 	list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
622e14bb325SJeff Bonwick 	    offsetof(vdev_t, vdev_state_dirty_node));
623fa9e4066Sahrens 
624fa9e4066Sahrens 	txg_list_create(&spa->spa_vdev_txg_list,
625fa9e4066Sahrens 	    offsetof(struct vdev, vdev_txg_node));
626ea8dc4b6Seschrock 
627ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_scrub,
628ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
629ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
630ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_last,
631ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
632ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
633fa9e4066Sahrens }
634fa9e4066Sahrens 
635fa9e4066Sahrens /*
636fa9e4066Sahrens  * Opposite of spa_activate().
637fa9e4066Sahrens  */
638fa9e4066Sahrens static void
639fa9e4066Sahrens spa_deactivate(spa_t *spa)
640fa9e4066Sahrens {
641fa9e4066Sahrens 	ASSERT(spa->spa_sync_on == B_FALSE);
642fa9e4066Sahrens 	ASSERT(spa->spa_dsl_pool == NULL);
643fa9e4066Sahrens 	ASSERT(spa->spa_root_vdev == NULL);
644fa9e4066Sahrens 
645fa9e4066Sahrens 	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
646fa9e4066Sahrens 
647fa9e4066Sahrens 	txg_list_destroy(&spa->spa_vdev_txg_list);
648fa9e4066Sahrens 
649e14bb325SJeff Bonwick 	list_destroy(&spa->spa_config_dirty_list);
650e14bb325SJeff Bonwick 	list_destroy(&spa->spa_state_dirty_list);
651fa9e4066Sahrens 
652e14bb325SJeff Bonwick 	for (int t = 0; t < ZIO_TYPES; t++) {
653e14bb325SJeff Bonwick 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
654e14bb325SJeff Bonwick 			taskq_destroy(spa->spa_zio_taskq[t][q]);
655e14bb325SJeff Bonwick 			spa->spa_zio_taskq[t][q] = NULL;
656e14bb325SJeff Bonwick 		}
657fa9e4066Sahrens 	}
658fa9e4066Sahrens 
659fa9e4066Sahrens 	metaslab_class_destroy(spa->spa_normal_class);
660fa9e4066Sahrens 	spa->spa_normal_class = NULL;
661fa9e4066Sahrens 
6628654d025Sperrin 	metaslab_class_destroy(spa->spa_log_class);
6638654d025Sperrin 	spa->spa_log_class = NULL;
6648654d025Sperrin 
665ea8dc4b6Seschrock 	/*
666ea8dc4b6Seschrock 	 * If this was part of an import or the open otherwise failed, we may
667ea8dc4b6Seschrock 	 * still have errors left in the queues.  Empty them just in case.
668ea8dc4b6Seschrock 	 */
669ea8dc4b6Seschrock 	spa_errlog_drain(spa);
670ea8dc4b6Seschrock 
671ea8dc4b6Seschrock 	avl_destroy(&spa->spa_errlist_scrub);
672ea8dc4b6Seschrock 	avl_destroy(&spa->spa_errlist_last);
673ea8dc4b6Seschrock 
674fa9e4066Sahrens 	spa->spa_state = POOL_STATE_UNINITIALIZED;
675fa9e4066Sahrens }
676fa9e4066Sahrens 
677fa9e4066Sahrens /*
678fa9e4066Sahrens  * Verify a pool configuration, and construct the vdev tree appropriately.  This
679fa9e4066Sahrens  * will create all the necessary vdevs in the appropriate layout, with each vdev
680fa9e4066Sahrens  * in the CLOSED state.  This will prep the pool before open/creation/import.
681fa9e4066Sahrens  * All vdev validation is done by the vdev_alloc() routine.
682fa9e4066Sahrens  */
68399653d4eSeschrock static int
68499653d4eSeschrock spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
68599653d4eSeschrock     uint_t id, int atype)
686fa9e4066Sahrens {
687fa9e4066Sahrens 	nvlist_t **child;
688fa9e4066Sahrens 	uint_t c, children;
68999653d4eSeschrock 	int error;
690fa9e4066Sahrens 
69199653d4eSeschrock 	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
69299653d4eSeschrock 		return (error);
693fa9e4066Sahrens 
69499653d4eSeschrock 	if ((*vdp)->vdev_ops->vdev_op_leaf)
69599653d4eSeschrock 		return (0);
696fa9e4066Sahrens 
697e14bb325SJeff Bonwick 	error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
698e14bb325SJeff Bonwick 	    &child, &children);
699e14bb325SJeff Bonwick 
700e14bb325SJeff Bonwick 	if (error == ENOENT)
701e14bb325SJeff Bonwick 		return (0);
702e14bb325SJeff Bonwick 
703e14bb325SJeff Bonwick 	if (error) {
70499653d4eSeschrock 		vdev_free(*vdp);
70599653d4eSeschrock 		*vdp = NULL;
70699653d4eSeschrock 		return (EINVAL);
707fa9e4066Sahrens 	}
708fa9e4066Sahrens 
709fa9e4066Sahrens 	for (c = 0; c < children; c++) {
71099653d4eSeschrock 		vdev_t *vd;
71199653d4eSeschrock 		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
71299653d4eSeschrock 		    atype)) != 0) {
71399653d4eSeschrock 			vdev_free(*vdp);
71499653d4eSeschrock 			*vdp = NULL;
71599653d4eSeschrock 			return (error);
716fa9e4066Sahrens 		}
717fa9e4066Sahrens 	}
718fa9e4066Sahrens 
71999653d4eSeschrock 	ASSERT(*vdp != NULL);
72099653d4eSeschrock 
72199653d4eSeschrock 	return (0);
722fa9e4066Sahrens }
723fa9e4066Sahrens 
724fa9e4066Sahrens /*
725fa9e4066Sahrens  * Opposite of spa_load().
726fa9e4066Sahrens  */
727fa9e4066Sahrens static void
728fa9e4066Sahrens spa_unload(spa_t *spa)
729fa9e4066Sahrens {
73099653d4eSeschrock 	int i;
73199653d4eSeschrock 
732e14bb325SJeff Bonwick 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
733e14bb325SJeff Bonwick 
734ea8dc4b6Seschrock 	/*
735ea8dc4b6Seschrock 	 * Stop async tasks.
736ea8dc4b6Seschrock 	 */
737ea8dc4b6Seschrock 	spa_async_suspend(spa);
738ea8dc4b6Seschrock 
739fa9e4066Sahrens 	/*
740fa9e4066Sahrens 	 * Stop syncing.
741fa9e4066Sahrens 	 */
742fa9e4066Sahrens 	if (spa->spa_sync_on) {
743fa9e4066Sahrens 		txg_sync_stop(spa->spa_dsl_pool);
744fa9e4066Sahrens 		spa->spa_sync_on = B_FALSE;
745fa9e4066Sahrens 	}
746fa9e4066Sahrens 
747fa9e4066Sahrens 	/*
748e14bb325SJeff Bonwick 	 * Wait for any outstanding async I/O to complete.
749fa9e4066Sahrens 	 */
75054d692b7SGeorge Wilson 	if (spa->spa_async_zio_root != NULL) {
75154d692b7SGeorge Wilson 		(void) zio_wait(spa->spa_async_zio_root);
75254d692b7SGeorge Wilson 		spa->spa_async_zio_root = NULL;
75354d692b7SGeorge Wilson 	}
754fa9e4066Sahrens 
755fa9e4066Sahrens 	/*
756fa9e4066Sahrens 	 * Close the dsl pool.
757fa9e4066Sahrens 	 */
758fa9e4066Sahrens 	if (spa->spa_dsl_pool) {
759fa9e4066Sahrens 		dsl_pool_close(spa->spa_dsl_pool);
760fa9e4066Sahrens 		spa->spa_dsl_pool = NULL;
761fa9e4066Sahrens 	}
762fa9e4066Sahrens 
7638ad4d6ddSJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7648ad4d6ddSJeff Bonwick 
7658ad4d6ddSJeff Bonwick 	/*
7668ad4d6ddSJeff Bonwick 	 * Drop and purge level 2 cache
7678ad4d6ddSJeff Bonwick 	 */
7688ad4d6ddSJeff Bonwick 	spa_l2cache_drop(spa);
7698ad4d6ddSJeff Bonwick 
770fa9e4066Sahrens 	/*
771fa9e4066Sahrens 	 * Close all vdevs.
772fa9e4066Sahrens 	 */
7730e34b6a7Sbonwick 	if (spa->spa_root_vdev)
774fa9e4066Sahrens 		vdev_free(spa->spa_root_vdev);
7750e34b6a7Sbonwick 	ASSERT(spa->spa_root_vdev == NULL);
776ea8dc4b6Seschrock 
777fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
778fa94a07fSbrendan 		vdev_free(spa->spa_spares.sav_vdevs[i]);
779fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs) {
780fa94a07fSbrendan 		kmem_free(spa->spa_spares.sav_vdevs,
781fa94a07fSbrendan 		    spa->spa_spares.sav_count * sizeof (void *));
782fa94a07fSbrendan 		spa->spa_spares.sav_vdevs = NULL;
78399653d4eSeschrock 	}
784fa94a07fSbrendan 	if (spa->spa_spares.sav_config) {
785fa94a07fSbrendan 		nvlist_free(spa->spa_spares.sav_config);
786fa94a07fSbrendan 		spa->spa_spares.sav_config = NULL;
787fa94a07fSbrendan 	}
7882ce8af81SEric Schrock 	spa->spa_spares.sav_count = 0;
789fa94a07fSbrendan 
790fa94a07fSbrendan 	for (i = 0; i < spa->spa_l2cache.sav_count; i++)
791fa94a07fSbrendan 		vdev_free(spa->spa_l2cache.sav_vdevs[i]);
792fa94a07fSbrendan 	if (spa->spa_l2cache.sav_vdevs) {
793fa94a07fSbrendan 		kmem_free(spa->spa_l2cache.sav_vdevs,
794fa94a07fSbrendan 		    spa->spa_l2cache.sav_count * sizeof (void *));
795fa94a07fSbrendan 		spa->spa_l2cache.sav_vdevs = NULL;
796fa94a07fSbrendan 	}
797fa94a07fSbrendan 	if (spa->spa_l2cache.sav_config) {
798fa94a07fSbrendan 		nvlist_free(spa->spa_l2cache.sav_config);
799fa94a07fSbrendan 		spa->spa_l2cache.sav_config = NULL;
80099653d4eSeschrock 	}
8012ce8af81SEric Schrock 	spa->spa_l2cache.sav_count = 0;
80299653d4eSeschrock 
803ea8dc4b6Seschrock 	spa->spa_async_suspended = 0;
8048ad4d6ddSJeff Bonwick 
8058ad4d6ddSJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
806fa9e4066Sahrens }
807fa9e4066Sahrens 
80899653d4eSeschrock /*
80999653d4eSeschrock  * Load (or re-load) the current list of vdevs describing the active spares for
81099653d4eSeschrock  * this pool.  When this is called, we have some form of basic information in
811fa94a07fSbrendan  * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
812fa94a07fSbrendan  * then re-generate a more complete list including status information.
81399653d4eSeschrock  */
81499653d4eSeschrock static void
81599653d4eSeschrock spa_load_spares(spa_t *spa)
81699653d4eSeschrock {
81799653d4eSeschrock 	nvlist_t **spares;
81899653d4eSeschrock 	uint_t nspares;
81999653d4eSeschrock 	int i;
82039c23413Seschrock 	vdev_t *vd, *tvd;
82199653d4eSeschrock 
822e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
823e14bb325SJeff Bonwick 
82499653d4eSeschrock 	/*
82599653d4eSeschrock 	 * First, close and free any existing spare vdevs.
82699653d4eSeschrock 	 */
827fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
828fa94a07fSbrendan 		vd = spa->spa_spares.sav_vdevs[i];
82939c23413Seschrock 
83039c23413Seschrock 		/* Undo the call to spa_activate() below */
831c5904d13Seschrock 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
832c5904d13Seschrock 		    B_FALSE)) != NULL && tvd->vdev_isspare)
83339c23413Seschrock 			spa_spare_remove(tvd);
83439c23413Seschrock 		vdev_close(vd);
83539c23413Seschrock 		vdev_free(vd);
83699653d4eSeschrock 	}
83739c23413Seschrock 
838fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs)
839fa94a07fSbrendan 		kmem_free(spa->spa_spares.sav_vdevs,
840fa94a07fSbrendan 		    spa->spa_spares.sav_count * sizeof (void *));
84199653d4eSeschrock 
842fa94a07fSbrendan 	if (spa->spa_spares.sav_config == NULL)
84399653d4eSeschrock 		nspares = 0;
84499653d4eSeschrock 	else
845fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
84699653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
84799653d4eSeschrock 
848fa94a07fSbrendan 	spa->spa_spares.sav_count = (int)nspares;
849fa94a07fSbrendan 	spa->spa_spares.sav_vdevs = NULL;
85099653d4eSeschrock 
85199653d4eSeschrock 	if (nspares == 0)
85299653d4eSeschrock 		return;
85399653d4eSeschrock 
85499653d4eSeschrock 	/*
85599653d4eSeschrock 	 * Construct the array of vdevs, opening them to get status in the
85639c23413Seschrock 	 * process.   For each spare, there is potentially two different vdev_t
85739c23413Seschrock 	 * structures associated with it: one in the list of spares (used only
85839c23413Seschrock 	 * for basic validation purposes) and one in the active vdev
85939c23413Seschrock 	 * configuration (if it's spared in).  During this phase we open and
86039c23413Seschrock 	 * validate each vdev on the spare list.  If the vdev also exists in the
86139c23413Seschrock 	 * active configuration, then we also mark this vdev as an active spare.
86299653d4eSeschrock 	 */
863fa94a07fSbrendan 	spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
864fa94a07fSbrendan 	    KM_SLEEP);
865fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
86699653d4eSeschrock 		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
86799653d4eSeschrock 		    VDEV_ALLOC_SPARE) == 0);
86899653d4eSeschrock 		ASSERT(vd != NULL);
86999653d4eSeschrock 
870fa94a07fSbrendan 		spa->spa_spares.sav_vdevs[i] = vd;
87199653d4eSeschrock 
872c5904d13Seschrock 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
873c5904d13Seschrock 		    B_FALSE)) != NULL) {
87439c23413Seschrock 			if (!tvd->vdev_isspare)
87539c23413Seschrock 				spa_spare_add(tvd);
87639c23413Seschrock 
87739c23413Seschrock 			/*
87839c23413Seschrock 			 * We only mark the spare active if we were successfully
87939c23413Seschrock 			 * able to load the vdev.  Otherwise, importing a pool
88039c23413Seschrock 			 * with a bad active spare would result in strange
88139c23413Seschrock 			 * behavior, because multiple pool would think the spare
88239c23413Seschrock 			 * is actively in use.
88339c23413Seschrock 			 *
88439c23413Seschrock 			 * There is a vulnerability here to an equally bizarre
88539c23413Seschrock 			 * circumstance, where a dead active spare is later
88639c23413Seschrock 			 * brought back to life (onlined or otherwise).  Given
88739c23413Seschrock 			 * the rarity of this scenario, and the extra complexity
88839c23413Seschrock 			 * it adds, we ignore the possibility.
88939c23413Seschrock 			 */
89039c23413Seschrock 			if (!vdev_is_dead(tvd))
89139c23413Seschrock 				spa_spare_activate(tvd);
89239c23413Seschrock 		}
89339c23413Seschrock 
894e14bb325SJeff Bonwick 		vd->vdev_top = vd;
8956809eb4eSEric Schrock 		vd->vdev_aux = &spa->spa_spares;
896e14bb325SJeff Bonwick 
89799653d4eSeschrock 		if (vdev_open(vd) != 0)
89899653d4eSeschrock 			continue;
89999653d4eSeschrock 
900fa94a07fSbrendan 		if (vdev_validate_aux(vd) == 0)
901fa94a07fSbrendan 			spa_spare_add(vd);
90299653d4eSeschrock 	}
90399653d4eSeschrock 
90499653d4eSeschrock 	/*
90599653d4eSeschrock 	 * Recompute the stashed list of spares, with status information
90699653d4eSeschrock 	 * this time.
90799653d4eSeschrock 	 */
908fa94a07fSbrendan 	VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
90999653d4eSeschrock 	    DATA_TYPE_NVLIST_ARRAY) == 0);
91099653d4eSeschrock 
911fa94a07fSbrendan 	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
912fa94a07fSbrendan 	    KM_SLEEP);
913fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
914fa94a07fSbrendan 		spares[i] = vdev_config_generate(spa,
915fa94a07fSbrendan 		    spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE);
916fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
917fa94a07fSbrendan 	    ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
918fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
91999653d4eSeschrock 		nvlist_free(spares[i]);
920fa94a07fSbrendan 	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
921fa94a07fSbrendan }
922fa94a07fSbrendan 
923fa94a07fSbrendan /*
924fa94a07fSbrendan  * Load (or re-load) the current list of vdevs describing the active l2cache for
925fa94a07fSbrendan  * this pool.  When this is called, we have some form of basic information in
926fa94a07fSbrendan  * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
927fa94a07fSbrendan  * then re-generate a more complete list including status information.
928fa94a07fSbrendan  * Devices which are already active have their details maintained, and are
929fa94a07fSbrendan  * not re-opened.
930fa94a07fSbrendan  */
931fa94a07fSbrendan static void
932fa94a07fSbrendan spa_load_l2cache(spa_t *spa)
933fa94a07fSbrendan {
934fa94a07fSbrendan 	nvlist_t **l2cache;
935fa94a07fSbrendan 	uint_t nl2cache;
936fa94a07fSbrendan 	int i, j, oldnvdevs;
937c5904d13Seschrock 	uint64_t guid, size;
938fa94a07fSbrendan 	vdev_t *vd, **oldvdevs, **newvdevs;
939fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
940fa94a07fSbrendan 
941e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
942e14bb325SJeff Bonwick 
943fa94a07fSbrendan 	if (sav->sav_config != NULL) {
944fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
945fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
946fa94a07fSbrendan 		newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
947fa94a07fSbrendan 	} else {
948fa94a07fSbrendan 		nl2cache = 0;
949fa94a07fSbrendan 	}
950fa94a07fSbrendan 
951fa94a07fSbrendan 	oldvdevs = sav->sav_vdevs;
952fa94a07fSbrendan 	oldnvdevs = sav->sav_count;
953fa94a07fSbrendan 	sav->sav_vdevs = NULL;
954fa94a07fSbrendan 	sav->sav_count = 0;
955fa94a07fSbrendan 
956fa94a07fSbrendan 	/*
957fa94a07fSbrendan 	 * Process new nvlist of vdevs.
958fa94a07fSbrendan 	 */
959fa94a07fSbrendan 	for (i = 0; i < nl2cache; i++) {
960fa94a07fSbrendan 		VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
961fa94a07fSbrendan 		    &guid) == 0);
962fa94a07fSbrendan 
963fa94a07fSbrendan 		newvdevs[i] = NULL;
964fa94a07fSbrendan 		for (j = 0; j < oldnvdevs; j++) {
965fa94a07fSbrendan 			vd = oldvdevs[j];
966fa94a07fSbrendan 			if (vd != NULL && guid == vd->vdev_guid) {
967fa94a07fSbrendan 				/*
968fa94a07fSbrendan 				 * Retain previous vdev for add/remove ops.
969fa94a07fSbrendan 				 */
970fa94a07fSbrendan 				newvdevs[i] = vd;
971fa94a07fSbrendan 				oldvdevs[j] = NULL;
972fa94a07fSbrendan 				break;
973fa94a07fSbrendan 			}
974fa94a07fSbrendan 		}
975fa94a07fSbrendan 
976fa94a07fSbrendan 		if (newvdevs[i] == NULL) {
977fa94a07fSbrendan 			/*
978fa94a07fSbrendan 			 * Create new vdev
979fa94a07fSbrendan 			 */
980fa94a07fSbrendan 			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
981fa94a07fSbrendan 			    VDEV_ALLOC_L2CACHE) == 0);
982fa94a07fSbrendan 			ASSERT(vd != NULL);
983fa94a07fSbrendan 			newvdevs[i] = vd;
984fa94a07fSbrendan 
985fa94a07fSbrendan 			/*
986fa94a07fSbrendan 			 * Commit this vdev as an l2cache device,
987fa94a07fSbrendan 			 * even if it fails to open.
988fa94a07fSbrendan 			 */
989fa94a07fSbrendan 			spa_l2cache_add(vd);
990fa94a07fSbrendan 
991c5904d13Seschrock 			vd->vdev_top = vd;
992c5904d13Seschrock 			vd->vdev_aux = sav;
993c5904d13Seschrock 
994c5904d13Seschrock 			spa_l2cache_activate(vd);
995c5904d13Seschrock 
996fa94a07fSbrendan 			if (vdev_open(vd) != 0)
997fa94a07fSbrendan 				continue;
998fa94a07fSbrendan 
999fa94a07fSbrendan 			(void) vdev_validate_aux(vd);
1000fa94a07fSbrendan 
1001fa94a07fSbrendan 			if (!vdev_is_dead(vd)) {
1002fa94a07fSbrendan 				size = vdev_get_rsize(vd);
1003c5904d13Seschrock 				l2arc_add_vdev(spa, vd,
1004c5904d13Seschrock 				    VDEV_LABEL_START_SIZE,
1005c5904d13Seschrock 				    size - VDEV_LABEL_START_SIZE);
1006fa94a07fSbrendan 			}
1007fa94a07fSbrendan 		}
1008fa94a07fSbrendan 	}
1009fa94a07fSbrendan 
1010fa94a07fSbrendan 	/*
1011fa94a07fSbrendan 	 * Purge vdevs that were dropped
1012fa94a07fSbrendan 	 */
1013fa94a07fSbrendan 	for (i = 0; i < oldnvdevs; i++) {
1014fa94a07fSbrendan 		uint64_t pool;
1015fa94a07fSbrendan 
1016fa94a07fSbrendan 		vd = oldvdevs[i];
1017fa94a07fSbrendan 		if (vd != NULL) {
10188ad4d6ddSJeff Bonwick 			if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
10198ad4d6ddSJeff Bonwick 			    pool != 0ULL && l2arc_vdev_present(vd))
1020fa94a07fSbrendan 				l2arc_remove_vdev(vd);
1021fa94a07fSbrendan 			(void) vdev_close(vd);
1022fa94a07fSbrendan 			spa_l2cache_remove(vd);
1023fa94a07fSbrendan 		}
1024fa94a07fSbrendan 	}
1025fa94a07fSbrendan 
1026fa94a07fSbrendan 	if (oldvdevs)
1027fa94a07fSbrendan 		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1028fa94a07fSbrendan 
1029fa94a07fSbrendan 	if (sav->sav_config == NULL)
1030fa94a07fSbrendan 		goto out;
1031fa94a07fSbrendan 
1032fa94a07fSbrendan 	sav->sav_vdevs = newvdevs;
1033fa94a07fSbrendan 	sav->sav_count = (int)nl2cache;
1034fa94a07fSbrendan 
1035fa94a07fSbrendan 	/*
1036fa94a07fSbrendan 	 * Recompute the stashed list of l2cache devices, with status
1037fa94a07fSbrendan 	 * information this time.
1038fa94a07fSbrendan 	 */
1039fa94a07fSbrendan 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1040fa94a07fSbrendan 	    DATA_TYPE_NVLIST_ARRAY) == 0);
1041fa94a07fSbrendan 
1042fa94a07fSbrendan 	l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1043fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
1044fa94a07fSbrendan 		l2cache[i] = vdev_config_generate(spa,
1045fa94a07fSbrendan 		    sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE);
1046fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1047fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1048fa94a07fSbrendan out:
1049fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
1050fa94a07fSbrendan 		nvlist_free(l2cache[i]);
1051fa94a07fSbrendan 	if (sav->sav_count)
1052fa94a07fSbrendan 		kmem_free(l2cache, sav->sav_count * sizeof (void *));
105399653d4eSeschrock }
105499653d4eSeschrock 
105599653d4eSeschrock static int
105699653d4eSeschrock load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
105799653d4eSeschrock {
105899653d4eSeschrock 	dmu_buf_t *db;
105999653d4eSeschrock 	char *packed = NULL;
106099653d4eSeschrock 	size_t nvsize = 0;
106199653d4eSeschrock 	int error;
106299653d4eSeschrock 	*value = NULL;
106399653d4eSeschrock 
106499653d4eSeschrock 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
106599653d4eSeschrock 	nvsize = *(uint64_t *)db->db_data;
106699653d4eSeschrock 	dmu_buf_rele(db, FTAG);
106799653d4eSeschrock 
106899653d4eSeschrock 	packed = kmem_alloc(nvsize, KM_SLEEP);
10697bfdf011SNeil Perrin 	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
10707bfdf011SNeil Perrin 	    DMU_READ_PREFETCH);
107199653d4eSeschrock 	if (error == 0)
107299653d4eSeschrock 		error = nvlist_unpack(packed, nvsize, value, 0);
107399653d4eSeschrock 	kmem_free(packed, nvsize);
107499653d4eSeschrock 
107599653d4eSeschrock 	return (error);
107699653d4eSeschrock }
107799653d4eSeschrock 
10783d7072f8Seschrock /*
10793d7072f8Seschrock  * Checks to see if the given vdev could not be opened, in which case we post a
10803d7072f8Seschrock  * sysevent to notify the autoreplace code that the device has been removed.
10813d7072f8Seschrock  */
10823d7072f8Seschrock static void
10833d7072f8Seschrock spa_check_removed(vdev_t *vd)
10843d7072f8Seschrock {
10853d7072f8Seschrock 	int c;
10863d7072f8Seschrock 
10873d7072f8Seschrock 	for (c = 0; c < vd->vdev_children; c++)
10883d7072f8Seschrock 		spa_check_removed(vd->vdev_child[c]);
10893d7072f8Seschrock 
10903d7072f8Seschrock 	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
10913d7072f8Seschrock 		zfs_post_autoreplace(vd->vdev_spa, vd);
10923d7072f8Seschrock 		spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
10933d7072f8Seschrock 	}
10943d7072f8Seschrock }
10953d7072f8Seschrock 
1096b87f3af3Sperrin /*
1097b87f3af3Sperrin  * Check for missing log devices
1098b87f3af3Sperrin  */
1099b87f3af3Sperrin int
1100b87f3af3Sperrin spa_check_logs(spa_t *spa)
1101b87f3af3Sperrin {
1102b87f3af3Sperrin 	switch (spa->spa_log_state) {
1103b87f3af3Sperrin 	case SPA_LOG_MISSING:
1104b87f3af3Sperrin 		/* need to recheck in case slog has been restored */
1105b87f3af3Sperrin 	case SPA_LOG_UNKNOWN:
1106b87f3af3Sperrin 		if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL,
1107b87f3af3Sperrin 		    DS_FIND_CHILDREN)) {
1108b87f3af3Sperrin 			spa->spa_log_state = SPA_LOG_MISSING;
1109b87f3af3Sperrin 			return (1);
1110b87f3af3Sperrin 		}
1111b87f3af3Sperrin 		break;
1112b87f3af3Sperrin 
1113b87f3af3Sperrin 	case SPA_LOG_CLEAR:
1114b87f3af3Sperrin 		(void) dmu_objset_find(spa->spa_name, zil_clear_log_chain, NULL,
1115b87f3af3Sperrin 		    DS_FIND_CHILDREN);
1116b87f3af3Sperrin 		break;
1117b87f3af3Sperrin 	}
1118b87f3af3Sperrin 	spa->spa_log_state = SPA_LOG_GOOD;
1119b87f3af3Sperrin 	return (0);
1120b87f3af3Sperrin }
1121b87f3af3Sperrin 
1122fa9e4066Sahrens /*
1123fa9e4066Sahrens  * Load an existing storage pool, using the pool's builtin spa_config as a
1124ea8dc4b6Seschrock  * source of configuration information.
1125fa9e4066Sahrens  */
1126fa9e4066Sahrens static int
1127ea8dc4b6Seschrock spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig)
1128fa9e4066Sahrens {
1129fa9e4066Sahrens 	int error = 0;
1130fa9e4066Sahrens 	nvlist_t *nvroot = NULL;
1131fa9e4066Sahrens 	vdev_t *rvd;
1132fa9e4066Sahrens 	uberblock_t *ub = &spa->spa_uberblock;
11330373e76bSbonwick 	uint64_t config_cache_txg = spa->spa_config_txg;
1134fa9e4066Sahrens 	uint64_t pool_guid;
113599653d4eSeschrock 	uint64_t version;
11363d7072f8Seschrock 	uint64_t autoreplace = 0;
11378ad4d6ddSJeff Bonwick 	int orig_mode = spa->spa_mode;
1138b87f3af3Sperrin 	char *ereport = FM_EREPORT_ZFS_POOL;
1139fa9e4066Sahrens 
11408ad4d6ddSJeff Bonwick 	/*
11418ad4d6ddSJeff Bonwick 	 * If this is an untrusted config, access the pool in read-only mode.
11428ad4d6ddSJeff Bonwick 	 * This prevents things like resilvering recently removed devices.
11438ad4d6ddSJeff Bonwick 	 */
11448ad4d6ddSJeff Bonwick 	if (!mosconfig)
11458ad4d6ddSJeff Bonwick 		spa->spa_mode = FREAD;
11468ad4d6ddSJeff Bonwick 
1147e14bb325SJeff Bonwick 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1148e14bb325SJeff Bonwick 
1149ea8dc4b6Seschrock 	spa->spa_load_state = state;
11500373e76bSbonwick 
1151fa9e4066Sahrens 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
1152a9926bf0Sbonwick 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
1153ea8dc4b6Seschrock 		error = EINVAL;
1154ea8dc4b6Seschrock 		goto out;
1155ea8dc4b6Seschrock 	}
1156fa9e4066Sahrens 
115799653d4eSeschrock 	/*
115899653d4eSeschrock 	 * Versioning wasn't explicitly added to the label until later, so if
115999653d4eSeschrock 	 * it's not present treat it as the initial version.
116099653d4eSeschrock 	 */
116199653d4eSeschrock 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
1162e7437265Sahrens 		version = SPA_VERSION_INITIAL;
116399653d4eSeschrock 
1164a9926bf0Sbonwick 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
1165a9926bf0Sbonwick 	    &spa->spa_config_txg);
1166a9926bf0Sbonwick 
11670373e76bSbonwick 	if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
1168ea8dc4b6Seschrock 	    spa_guid_exists(pool_guid, 0)) {
1169ea8dc4b6Seschrock 		error = EEXIST;
1170ea8dc4b6Seschrock 		goto out;
1171ea8dc4b6Seschrock 	}
1172fa9e4066Sahrens 
1173b5989ec7Seschrock 	spa->spa_load_guid = pool_guid;
1174b5989ec7Seschrock 
117554d692b7SGeorge Wilson 	/*
117654d692b7SGeorge Wilson 	 * Create "The Godfather" zio to hold all async IOs
117754d692b7SGeorge Wilson 	 */
117854d692b7SGeorge Wilson 	if (spa->spa_async_zio_root == NULL)
117954d692b7SGeorge Wilson 		spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
118054d692b7SGeorge Wilson 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
118154d692b7SGeorge Wilson 		    ZIO_FLAG_GODFATHER);
118254d692b7SGeorge Wilson 
1183fa9e4066Sahrens 	/*
118499653d4eSeschrock 	 * Parse the configuration into a vdev tree.  We explicitly set the
118599653d4eSeschrock 	 * value that will be returned by spa_version() since parsing the
118699653d4eSeschrock 	 * configuration requires knowing the version number.
1187fa9e4066Sahrens 	 */
1188e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
118999653d4eSeschrock 	spa->spa_ubsync.ub_version = version;
119099653d4eSeschrock 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
1191e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
1192fa9e4066Sahrens 
119399653d4eSeschrock 	if (error != 0)
1194ea8dc4b6Seschrock 		goto out;
1195fa9e4066Sahrens 
11960e34b6a7Sbonwick 	ASSERT(spa->spa_root_vdev == rvd);
1197fa9e4066Sahrens 	ASSERT(spa_guid(spa) == pool_guid);
1198fa9e4066Sahrens 
1199fa9e4066Sahrens 	/*
1200fa9e4066Sahrens 	 * Try to open all vdevs, loading each label in the process.
1201fa9e4066Sahrens 	 */
1202e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
12030bf246f5Smc 	error = vdev_open(rvd);
1204e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
12050bf246f5Smc 	if (error != 0)
1206ea8dc4b6Seschrock 		goto out;
1207fa9e4066Sahrens 
1208560e6e96Seschrock 	/*
120977e3a39cSMark J Musante 	 * We need to validate the vdev labels against the configuration that
121077e3a39cSMark J Musante 	 * we have in hand, which is dependent on the setting of mosconfig. If
121177e3a39cSMark J Musante 	 * mosconfig is true then we're validating the vdev labels based on
121277e3a39cSMark J Musante 	 * that config. Otherwise, we're validating against the cached config
121377e3a39cSMark J Musante 	 * (zpool.cache) that was read when we loaded the zfs module, and then
121477e3a39cSMark J Musante 	 * later we will recursively call spa_load() and validate against
121577e3a39cSMark J Musante 	 * the vdev config.
1216560e6e96Seschrock 	 */
121777e3a39cSMark J Musante 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
121877e3a39cSMark J Musante 	error = vdev_validate(rvd);
121977e3a39cSMark J Musante 	spa_config_exit(spa, SCL_ALL, FTAG);
122077e3a39cSMark J Musante 	if (error != 0)
122177e3a39cSMark J Musante 		goto out;
1222560e6e96Seschrock 
1223560e6e96Seschrock 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1224560e6e96Seschrock 		error = ENXIO;
1225560e6e96Seschrock 		goto out;
1226560e6e96Seschrock 	}
1227560e6e96Seschrock 
1228fa9e4066Sahrens 	/*
1229fa9e4066Sahrens 	 * Find the best uberblock.
1230fa9e4066Sahrens 	 */
1231e14bb325SJeff Bonwick 	vdev_uberblock_load(NULL, rvd, ub);
1232fa9e4066Sahrens 
1233fa9e4066Sahrens 	/*
1234fa9e4066Sahrens 	 * If we weren't able to find a single valid uberblock, return failure.
1235fa9e4066Sahrens 	 */
1236fa9e4066Sahrens 	if (ub->ub_txg == 0) {
1237eaca9bbdSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1238eaca9bbdSeschrock 		    VDEV_AUX_CORRUPT_DATA);
1239ea8dc4b6Seschrock 		error = ENXIO;
1240ea8dc4b6Seschrock 		goto out;
1241ea8dc4b6Seschrock 	}
1242ea8dc4b6Seschrock 
1243ea8dc4b6Seschrock 	/*
1244ea8dc4b6Seschrock 	 * If the pool is newer than the code, we can't open it.
1245ea8dc4b6Seschrock 	 */
1246e7437265Sahrens 	if (ub->ub_version > SPA_VERSION) {
1247eaca9bbdSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1248eaca9bbdSeschrock 		    VDEV_AUX_VERSION_NEWER);
1249ea8dc4b6Seschrock 		error = ENOTSUP;
1250ea8dc4b6Seschrock 		goto out;
1251fa9e4066Sahrens 	}
1252fa9e4066Sahrens 
1253fa9e4066Sahrens 	/*
1254fa9e4066Sahrens 	 * If the vdev guid sum doesn't match the uberblock, we have an
1255fa9e4066Sahrens 	 * incomplete configuration.
1256fa9e4066Sahrens 	 */
1257ecc2d604Sbonwick 	if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
1258ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1259ea8dc4b6Seschrock 		    VDEV_AUX_BAD_GUID_SUM);
1260ea8dc4b6Seschrock 		error = ENXIO;
1261ea8dc4b6Seschrock 		goto out;
1262fa9e4066Sahrens 	}
1263fa9e4066Sahrens 
1264fa9e4066Sahrens 	/*
1265fa9e4066Sahrens 	 * Initialize internal SPA structures.
1266fa9e4066Sahrens 	 */
1267fa9e4066Sahrens 	spa->spa_state = POOL_STATE_ACTIVE;
1268fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
1269fa9e4066Sahrens 	spa->spa_first_txg = spa_last_synced_txg(spa) + 1;
1270ea8dc4b6Seschrock 	error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
1271ea8dc4b6Seschrock 	if (error) {
1272ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1273ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1274ea8dc4b6Seschrock 		goto out;
1275ea8dc4b6Seschrock 	}
1276fa9e4066Sahrens 	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
1277fa9e4066Sahrens 
1278ea8dc4b6Seschrock 	if (zap_lookup(spa->spa_meta_objset,
1279fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1280ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
1281ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1282ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1283ea8dc4b6Seschrock 		error = EIO;
1284ea8dc4b6Seschrock 		goto out;
1285ea8dc4b6Seschrock 	}
1286fa9e4066Sahrens 
1287fa9e4066Sahrens 	if (!mosconfig) {
128899653d4eSeschrock 		nvlist_t *newconfig;
128995173954Sek 		uint64_t hostid;
1290fa9e4066Sahrens 
129199653d4eSeschrock 		if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) {
1292ea8dc4b6Seschrock 			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1293ea8dc4b6Seschrock 			    VDEV_AUX_CORRUPT_DATA);
1294ea8dc4b6Seschrock 			error = EIO;
1295ea8dc4b6Seschrock 			goto out;
1296ea8dc4b6Seschrock 		}
1297fa9e4066Sahrens 
129877650510SLin Ling 		if (!spa_is_root(spa) && nvlist_lookup_uint64(newconfig,
129977650510SLin Ling 		    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
130095173954Sek 			char *hostname;
130195173954Sek 			unsigned long myhostid = 0;
130295173954Sek 
130395173954Sek 			VERIFY(nvlist_lookup_string(newconfig,
130495173954Sek 			    ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
130595173954Sek 
13065679c89fSjv #ifdef	_KERNEL
13075679c89fSjv 			myhostid = zone_get_hostid(NULL);
13085679c89fSjv #else	/* _KERNEL */
13095679c89fSjv 			/*
13105679c89fSjv 			 * We're emulating the system's hostid in userland, so
13115679c89fSjv 			 * we can't use zone_get_hostid().
13125679c89fSjv 			 */
131395173954Sek 			(void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
13145679c89fSjv #endif	/* _KERNEL */
131517194a52Slling 			if (hostid != 0 && myhostid != 0 &&
13165679c89fSjv 			    hostid != myhostid) {
131795173954Sek 				cmn_err(CE_WARN, "pool '%s' could not be "
131895173954Sek 				    "loaded as it was last accessed by "
131977650510SLin Ling 				    "another system (host: %s hostid: 0x%lx). "
132095173954Sek 				    "See: http://www.sun.com/msg/ZFS-8000-EY",
1321e14bb325SJeff Bonwick 				    spa_name(spa), hostname,
132295173954Sek 				    (unsigned long)hostid);
132395173954Sek 				error = EBADF;
132495173954Sek 				goto out;
132595173954Sek 			}
132695173954Sek 		}
132795173954Sek 
1328fa9e4066Sahrens 		spa_config_set(spa, newconfig);
1329fa9e4066Sahrens 		spa_unload(spa);
1330fa9e4066Sahrens 		spa_deactivate(spa);
13318ad4d6ddSJeff Bonwick 		spa_activate(spa, orig_mode);
1332fa9e4066Sahrens 
1333ea8dc4b6Seschrock 		return (spa_load(spa, newconfig, state, B_TRUE));
1334fa9e4066Sahrens 	}
1335fa9e4066Sahrens 
1336ea8dc4b6Seschrock 	if (zap_lookup(spa->spa_meta_objset,
1337fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1338ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) {
1339ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1340ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1341ea8dc4b6Seschrock 		error = EIO;
1342ea8dc4b6Seschrock 		goto out;
1343ea8dc4b6Seschrock 	}
1344fa9e4066Sahrens 
134599653d4eSeschrock 	/*
134699653d4eSeschrock 	 * Load the bit that tells us to use the new accounting function
134799653d4eSeschrock 	 * (raid-z deflation).  If we have an older pool, this will not
134899653d4eSeschrock 	 * be present.
134999653d4eSeschrock 	 */
135099653d4eSeschrock 	error = zap_lookup(spa->spa_meta_objset,
135199653d4eSeschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
135299653d4eSeschrock 	    sizeof (uint64_t), 1, &spa->spa_deflate);
135399653d4eSeschrock 	if (error != 0 && error != ENOENT) {
135499653d4eSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
135599653d4eSeschrock 		    VDEV_AUX_CORRUPT_DATA);
135699653d4eSeschrock 		error = EIO;
135799653d4eSeschrock 		goto out;
135899653d4eSeschrock 	}
135999653d4eSeschrock 
1360fa9e4066Sahrens 	/*
1361ea8dc4b6Seschrock 	 * Load the persistent error log.  If we have an older pool, this will
1362ea8dc4b6Seschrock 	 * not be present.
1363fa9e4066Sahrens 	 */
1364ea8dc4b6Seschrock 	error = zap_lookup(spa->spa_meta_objset,
1365ea8dc4b6Seschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
1366ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_errlog_last);
1367d80c45e0Sbonwick 	if (error != 0 && error != ENOENT) {
1368ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1369ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1370ea8dc4b6Seschrock 		error = EIO;
1371ea8dc4b6Seschrock 		goto out;
1372ea8dc4b6Seschrock 	}
1373ea8dc4b6Seschrock 
1374ea8dc4b6Seschrock 	error = zap_lookup(spa->spa_meta_objset,
1375ea8dc4b6Seschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
1376ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
1377ea8dc4b6Seschrock 	if (error != 0 && error != ENOENT) {
1378ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1379ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1380ea8dc4b6Seschrock 		error = EIO;
1381ea8dc4b6Seschrock 		goto out;
1382ea8dc4b6Seschrock 	}
1383ea8dc4b6Seschrock 
138406eeb2adSek 	/*
138506eeb2adSek 	 * Load the history object.  If we have an older pool, this
138606eeb2adSek 	 * will not be present.
138706eeb2adSek 	 */
138806eeb2adSek 	error = zap_lookup(spa->spa_meta_objset,
138906eeb2adSek 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
139006eeb2adSek 	    sizeof (uint64_t), 1, &spa->spa_history);
139106eeb2adSek 	if (error != 0 && error != ENOENT) {
139206eeb2adSek 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
139306eeb2adSek 		    VDEV_AUX_CORRUPT_DATA);
139406eeb2adSek 		error = EIO;
139506eeb2adSek 		goto out;
139606eeb2adSek 	}
139706eeb2adSek 
139899653d4eSeschrock 	/*
139999653d4eSeschrock 	 * Load any hot spares for this pool.
140099653d4eSeschrock 	 */
140199653d4eSeschrock 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1402fa94a07fSbrendan 	    DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object);
140399653d4eSeschrock 	if (error != 0 && error != ENOENT) {
140499653d4eSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
140599653d4eSeschrock 		    VDEV_AUX_CORRUPT_DATA);
140699653d4eSeschrock 		error = EIO;
140799653d4eSeschrock 		goto out;
140899653d4eSeschrock 	}
140999653d4eSeschrock 	if (error == 0) {
1410e7437265Sahrens 		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
1411fa94a07fSbrendan 		if (load_nvlist(spa, spa->spa_spares.sav_object,
1412fa94a07fSbrendan 		    &spa->spa_spares.sav_config) != 0) {
141399653d4eSeschrock 			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
141499653d4eSeschrock 			    VDEV_AUX_CORRUPT_DATA);
141599653d4eSeschrock 			error = EIO;
141699653d4eSeschrock 			goto out;
141799653d4eSeschrock 		}
141899653d4eSeschrock 
1419e14bb325SJeff Bonwick 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
142099653d4eSeschrock 		spa_load_spares(spa);
1421e14bb325SJeff Bonwick 		spa_config_exit(spa, SCL_ALL, FTAG);
142299653d4eSeschrock 	}
142399653d4eSeschrock 
1424fa94a07fSbrendan 	/*
1425fa94a07fSbrendan 	 * Load any level 2 ARC devices for this pool.
1426fa94a07fSbrendan 	 */
1427fa94a07fSbrendan 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1428fa94a07fSbrendan 	    DMU_POOL_L2CACHE, sizeof (uint64_t), 1,
1429fa94a07fSbrendan 	    &spa->spa_l2cache.sav_object);
1430fa94a07fSbrendan 	if (error != 0 && error != ENOENT) {
1431fa94a07fSbrendan 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1432fa94a07fSbrendan 		    VDEV_AUX_CORRUPT_DATA);
1433fa94a07fSbrendan 		error = EIO;
1434fa94a07fSbrendan 		goto out;
1435fa94a07fSbrendan 	}
1436fa94a07fSbrendan 	if (error == 0) {
1437fa94a07fSbrendan 		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
1438fa94a07fSbrendan 		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
1439fa94a07fSbrendan 		    &spa->spa_l2cache.sav_config) != 0) {
1440fa94a07fSbrendan 			vdev_set_state(rvd, B_TRUE,
1441fa94a07fSbrendan 			    VDEV_STATE_CANT_OPEN,
1442fa94a07fSbrendan 			    VDEV_AUX_CORRUPT_DATA);
1443fa94a07fSbrendan 			error = EIO;
1444fa94a07fSbrendan 			goto out;
1445fa94a07fSbrendan 		}
1446fa94a07fSbrendan 
1447e14bb325SJeff Bonwick 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1448fa94a07fSbrendan 		spa_load_l2cache(spa);
1449e14bb325SJeff Bonwick 		spa_config_exit(spa, SCL_ALL, FTAG);
1450fa94a07fSbrendan 	}
1451fa94a07fSbrendan 
1452b87f3af3Sperrin 	if (spa_check_logs(spa)) {
1453b87f3af3Sperrin 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1454b87f3af3Sperrin 		    VDEV_AUX_BAD_LOG);
1455b87f3af3Sperrin 		error = ENXIO;
1456b87f3af3Sperrin 		ereport = FM_EREPORT_ZFS_LOG_REPLAY;
1457b87f3af3Sperrin 		goto out;
1458b87f3af3Sperrin 	}
1459b87f3af3Sperrin 
1460b87f3af3Sperrin 
1461990b4856Slling 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
1462ecd6cf80Smarks 
1463b1b8ab34Slling 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1464b1b8ab34Slling 	    DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object);
1465b1b8ab34Slling 
1466b1b8ab34Slling 	if (error && error != ENOENT) {
1467b1b8ab34Slling 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1468b1b8ab34Slling 		    VDEV_AUX_CORRUPT_DATA);
1469b1b8ab34Slling 		error = EIO;
1470b1b8ab34Slling 		goto out;
1471b1b8ab34Slling 	}
1472b1b8ab34Slling 
1473b1b8ab34Slling 	if (error == 0) {
1474b1b8ab34Slling 		(void) zap_lookup(spa->spa_meta_objset,
1475b1b8ab34Slling 		    spa->spa_pool_props_object,
14763d7072f8Seschrock 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS),
1477b1b8ab34Slling 		    sizeof (uint64_t), 1, &spa->spa_bootfs);
14783d7072f8Seschrock 		(void) zap_lookup(spa->spa_meta_objset,
14793d7072f8Seschrock 		    spa->spa_pool_props_object,
14803d7072f8Seschrock 		    zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE),
14813d7072f8Seschrock 		    sizeof (uint64_t), 1, &autoreplace);
1482ecd6cf80Smarks 		(void) zap_lookup(spa->spa_meta_objset,
1483ecd6cf80Smarks 		    spa->spa_pool_props_object,
1484ecd6cf80Smarks 		    zpool_prop_to_name(ZPOOL_PROP_DELEGATION),
1485ecd6cf80Smarks 		    sizeof (uint64_t), 1, &spa->spa_delegation);
14860a4e9518Sgw 		(void) zap_lookup(spa->spa_meta_objset,
14870a4e9518Sgw 		    spa->spa_pool_props_object,
14880a4e9518Sgw 		    zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
14890a4e9518Sgw 		    sizeof (uint64_t), 1, &spa->spa_failmode);
1490b1b8ab34Slling 	}
1491b1b8ab34Slling 
14923d7072f8Seschrock 	/*
14933d7072f8Seschrock 	 * If the 'autoreplace' property is set, then post a resource notifying
14943d7072f8Seschrock 	 * the ZFS DE that it should not issue any faults for unopenable
14953d7072f8Seschrock 	 * devices.  We also iterate over the vdevs, and post a sysevent for any
14963d7072f8Seschrock 	 * unopenable vdevs so that the normal autoreplace handler can take
14973d7072f8Seschrock 	 * over.
14983d7072f8Seschrock 	 */
1499b01c3b58Seschrock 	if (autoreplace && state != SPA_LOAD_TRYIMPORT)
15003d7072f8Seschrock 		spa_check_removed(spa->spa_root_vdev);
15013d7072f8Seschrock 
1502ea8dc4b6Seschrock 	/*
1503560e6e96Seschrock 	 * Load the vdev state for all toplevel vdevs.
1504ea8dc4b6Seschrock 	 */
1505560e6e96Seschrock 	vdev_load(rvd);
15060373e76bSbonwick 
1507fa9e4066Sahrens 	/*
1508fa9e4066Sahrens 	 * Propagate the leaf DTLs we just loaded all the way up the tree.
1509fa9e4066Sahrens 	 */
1510e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1511fa9e4066Sahrens 	vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
1512e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
1513fa9e4066Sahrens 
1514fa9e4066Sahrens 	/*
1515fa9e4066Sahrens 	 * Check the state of the root vdev.  If it can't be opened, it
1516fa9e4066Sahrens 	 * indicates one or more toplevel vdevs are faulted.
1517fa9e4066Sahrens 	 */
1518ea8dc4b6Seschrock 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1519ea8dc4b6Seschrock 		error = ENXIO;
1520ea8dc4b6Seschrock 		goto out;
1521ea8dc4b6Seschrock 	}
1522fa9e4066Sahrens 
15238ad4d6ddSJeff Bonwick 	if (spa_writeable(spa)) {
15245dabedeeSbonwick 		dmu_tx_t *tx;
15250373e76bSbonwick 		int need_update = B_FALSE;
15268ad4d6ddSJeff Bonwick 
15278ad4d6ddSJeff Bonwick 		ASSERT(state != SPA_LOAD_TRYIMPORT);
15285dabedeeSbonwick 
15290373e76bSbonwick 		/*
15300373e76bSbonwick 		 * Claim log blocks that haven't been committed yet.
15310373e76bSbonwick 		 * This must all happen in a single txg.
15320373e76bSbonwick 		 */
15335dabedeeSbonwick 		tx = dmu_tx_create_assigned(spa_get_dsl(spa),
1534fa9e4066Sahrens 		    spa_first_txg(spa));
1535e14bb325SJeff Bonwick 		(void) dmu_objset_find(spa_name(spa),
15360b69c2f0Sahrens 		    zil_claim, tx, DS_FIND_CHILDREN);
1537fa9e4066Sahrens 		dmu_tx_commit(tx);
1538fa9e4066Sahrens 
1539fa9e4066Sahrens 		spa->spa_sync_on = B_TRUE;
1540fa9e4066Sahrens 		txg_sync_start(spa->spa_dsl_pool);
1541fa9e4066Sahrens 
1542fa9e4066Sahrens 		/*
1543fa9e4066Sahrens 		 * Wait for all claims to sync.
1544fa9e4066Sahrens 		 */
1545fa9e4066Sahrens 		txg_wait_synced(spa->spa_dsl_pool, 0);
15460e34b6a7Sbonwick 
15470e34b6a7Sbonwick 		/*
15480373e76bSbonwick 		 * If the config cache is stale, or we have uninitialized
15490373e76bSbonwick 		 * metaslabs (see spa_vdev_add()), then update the config.
15500e34b6a7Sbonwick 		 */
15510373e76bSbonwick 		if (config_cache_txg != spa->spa_config_txg ||
15520373e76bSbonwick 		    state == SPA_LOAD_IMPORT)
15530373e76bSbonwick 			need_update = B_TRUE;
15540373e76bSbonwick 
15558ad4d6ddSJeff Bonwick 		for (int c = 0; c < rvd->vdev_children; c++)
15560373e76bSbonwick 			if (rvd->vdev_child[c]->vdev_ms_array == 0)
15570373e76bSbonwick 				need_update = B_TRUE;
15580e34b6a7Sbonwick 
15590e34b6a7Sbonwick 		/*
15600373e76bSbonwick 		 * Update the config cache asychronously in case we're the
15610373e76bSbonwick 		 * root pool, in which case the config cache isn't writable yet.
15620e34b6a7Sbonwick 		 */
15630373e76bSbonwick 		if (need_update)
15640373e76bSbonwick 			spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
15658ad4d6ddSJeff Bonwick 
15668ad4d6ddSJeff Bonwick 		/*
15678ad4d6ddSJeff Bonwick 		 * Check all DTLs to see if anything needs resilvering.
15688ad4d6ddSJeff Bonwick 		 */
15698ad4d6ddSJeff Bonwick 		if (vdev_resilver_needed(rvd, NULL, NULL))
15708ad4d6ddSJeff Bonwick 			spa_async_request(spa, SPA_ASYNC_RESILVER);
1571fa9e4066Sahrens 	}
1572fa9e4066Sahrens 
1573ea8dc4b6Seschrock 	error = 0;
1574ea8dc4b6Seschrock out:
1575088f3894Sahrens 	spa->spa_minref = refcount_count(&spa->spa_refcount);
157699653d4eSeschrock 	if (error && error != EBADF)
1577b87f3af3Sperrin 		zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
1578ea8dc4b6Seschrock 	spa->spa_load_state = SPA_LOAD_NONE;
1579ea8dc4b6Seschrock 	spa->spa_ena = 0;
1580ea8dc4b6Seschrock 
1581ea8dc4b6Seschrock 	return (error);
1582fa9e4066Sahrens }
1583fa9e4066Sahrens 
1584fa9e4066Sahrens /*
1585fa9e4066Sahrens  * Pool Open/Import
1586fa9e4066Sahrens  *
1587fa9e4066Sahrens  * The import case is identical to an open except that the configuration is sent
1588fa9e4066Sahrens  * down from userland, instead of grabbed from the configuration cache.  For the
1589fa9e4066Sahrens  * case of an open, the pool configuration will exist in the
15903d7072f8Seschrock  * POOL_STATE_UNINITIALIZED state.
1591fa9e4066Sahrens  *
1592fa9e4066Sahrens  * The stats information (gen/count/ustats) is used to gather vdev statistics at
1593fa9e4066Sahrens  * the same time open the pool, without having to keep around the spa_t in some
1594fa9e4066Sahrens  * ambiguous state.
1595fa9e4066Sahrens  */
1596fa9e4066Sahrens static int
1597fa9e4066Sahrens spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config)
1598fa9e4066Sahrens {
1599fa9e4066Sahrens 	spa_t *spa;
1600fa9e4066Sahrens 	int error;
1601fa9e4066Sahrens 	int locked = B_FALSE;
1602fa9e4066Sahrens 
1603fa9e4066Sahrens 	*spapp = NULL;
1604fa9e4066Sahrens 
1605fa9e4066Sahrens 	/*
1606fa9e4066Sahrens 	 * As disgusting as this is, we need to support recursive calls to this
1607fa9e4066Sahrens 	 * function because dsl_dir_open() is called during spa_load(), and ends
1608fa9e4066Sahrens 	 * up calling spa_open() again.  The real fix is to figure out how to
1609fa9e4066Sahrens 	 * avoid dsl_dir_open() calling this in the first place.
1610fa9e4066Sahrens 	 */
1611fa9e4066Sahrens 	if (mutex_owner(&spa_namespace_lock) != curthread) {
1612fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
1613fa9e4066Sahrens 		locked = B_TRUE;
1614fa9e4066Sahrens 	}
1615fa9e4066Sahrens 
1616fa9e4066Sahrens 	if ((spa = spa_lookup(pool)) == NULL) {
1617fa9e4066Sahrens 		if (locked)
1618fa9e4066Sahrens 			mutex_exit(&spa_namespace_lock);
1619fa9e4066Sahrens 		return (ENOENT);
1620fa9e4066Sahrens 	}
1621fa9e4066Sahrens 	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
1622fa9e4066Sahrens 
16238ad4d6ddSJeff Bonwick 		spa_activate(spa, spa_mode_global);
1624fa9e4066Sahrens 
16250373e76bSbonwick 		error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
1626fa9e4066Sahrens 
1627fa9e4066Sahrens 		if (error == EBADF) {
1628fa9e4066Sahrens 			/*
1629560e6e96Seschrock 			 * If vdev_validate() returns failure (indicated by
1630560e6e96Seschrock 			 * EBADF), it indicates that one of the vdevs indicates
1631560e6e96Seschrock 			 * that the pool has been exported or destroyed.  If
1632560e6e96Seschrock 			 * this is the case, the config cache is out of sync and
1633560e6e96Seschrock 			 * we should remove the pool from the namespace.
1634fa9e4066Sahrens 			 */
1635fa9e4066Sahrens 			spa_unload(spa);
1636fa9e4066Sahrens 			spa_deactivate(spa);
1637c5904d13Seschrock 			spa_config_sync(spa, B_TRUE, B_TRUE);
1638fa9e4066Sahrens 			spa_remove(spa);
1639fa9e4066Sahrens 			if (locked)
1640fa9e4066Sahrens 				mutex_exit(&spa_namespace_lock);
1641fa9e4066Sahrens 			return (ENOENT);
1642ea8dc4b6Seschrock 		}
1643ea8dc4b6Seschrock 
1644ea8dc4b6Seschrock 		if (error) {
1645fa9e4066Sahrens 			/*
1646fa9e4066Sahrens 			 * We can't open the pool, but we still have useful
1647fa9e4066Sahrens 			 * information: the state of each vdev after the
1648fa9e4066Sahrens 			 * attempted vdev_open().  Return this to the user.
1649fa9e4066Sahrens 			 */
1650e14bb325SJeff Bonwick 			if (config != NULL && spa->spa_root_vdev != NULL)
1651fa9e4066Sahrens 				*config = spa_config_generate(spa, NULL, -1ULL,
1652fa9e4066Sahrens 				    B_TRUE);
1653fa9e4066Sahrens 			spa_unload(spa);
1654fa9e4066Sahrens 			spa_deactivate(spa);
1655ea8dc4b6Seschrock 			spa->spa_last_open_failed = B_TRUE;
1656fa9e4066Sahrens 			if (locked)
1657fa9e4066Sahrens 				mutex_exit(&spa_namespace_lock);
1658fa9e4066Sahrens 			*spapp = NULL;
1659fa9e4066Sahrens 			return (error);
1660ea8dc4b6Seschrock 		} else {
1661ea8dc4b6Seschrock 			spa->spa_last_open_failed = B_FALSE;
1662fa9e4066Sahrens 		}
1663fa9e4066Sahrens 	}
1664fa9e4066Sahrens 
1665fa9e4066Sahrens 	spa_open_ref(spa, tag);
16663d7072f8Seschrock 
1667fa9e4066Sahrens 	if (locked)
1668fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1669fa9e4066Sahrens 
1670fa9e4066Sahrens 	*spapp = spa;
1671fa9e4066Sahrens 
1672e14bb325SJeff Bonwick 	if (config != NULL)
1673fa9e4066Sahrens 		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1674fa9e4066Sahrens 
1675fa9e4066Sahrens 	return (0);
1676fa9e4066Sahrens }
1677fa9e4066Sahrens 
1678fa9e4066Sahrens int
1679fa9e4066Sahrens spa_open(const char *name, spa_t **spapp, void *tag)
1680fa9e4066Sahrens {
1681fa9e4066Sahrens 	return (spa_open_common(name, spapp, tag, NULL));
1682fa9e4066Sahrens }
1683fa9e4066Sahrens 
1684ea8dc4b6Seschrock /*
1685ea8dc4b6Seschrock  * Lookup the given spa_t, incrementing the inject count in the process,
1686ea8dc4b6Seschrock  * preventing it from being exported or destroyed.
1687ea8dc4b6Seschrock  */
1688ea8dc4b6Seschrock spa_t *
1689ea8dc4b6Seschrock spa_inject_addref(char *name)
1690ea8dc4b6Seschrock {
1691ea8dc4b6Seschrock 	spa_t *spa;
1692ea8dc4b6Seschrock 
1693ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
1694ea8dc4b6Seschrock 	if ((spa = spa_lookup(name)) == NULL) {
1695ea8dc4b6Seschrock 		mutex_exit(&spa_namespace_lock);
1696ea8dc4b6Seschrock 		return (NULL);
1697ea8dc4b6Seschrock 	}
1698ea8dc4b6Seschrock 	spa->spa_inject_ref++;
1699ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
1700ea8dc4b6Seschrock 
1701ea8dc4b6Seschrock 	return (spa);
1702ea8dc4b6Seschrock }
1703ea8dc4b6Seschrock 
1704ea8dc4b6Seschrock void
1705ea8dc4b6Seschrock spa_inject_delref(spa_t *spa)
1706ea8dc4b6Seschrock {
1707ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
1708ea8dc4b6Seschrock 	spa->spa_inject_ref--;
1709ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
1710ea8dc4b6Seschrock }
1711ea8dc4b6Seschrock 
1712fa94a07fSbrendan /*
1713fa94a07fSbrendan  * Add spares device information to the nvlist.
1714fa94a07fSbrendan  */
171599653d4eSeschrock static void
171699653d4eSeschrock spa_add_spares(spa_t *spa, nvlist_t *config)
171799653d4eSeschrock {
171899653d4eSeschrock 	nvlist_t **spares;
171999653d4eSeschrock 	uint_t i, nspares;
172099653d4eSeschrock 	nvlist_t *nvroot;
172199653d4eSeschrock 	uint64_t guid;
172299653d4eSeschrock 	vdev_stat_t *vs;
172399653d4eSeschrock 	uint_t vsc;
172439c23413Seschrock 	uint64_t pool;
172599653d4eSeschrock 
17266809eb4eSEric Schrock 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
17276809eb4eSEric Schrock 
1728fa94a07fSbrendan 	if (spa->spa_spares.sav_count == 0)
172999653d4eSeschrock 		return;
173099653d4eSeschrock 
173199653d4eSeschrock 	VERIFY(nvlist_lookup_nvlist(config,
173299653d4eSeschrock 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1733fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
173499653d4eSeschrock 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
173599653d4eSeschrock 	if (nspares != 0) {
173699653d4eSeschrock 		VERIFY(nvlist_add_nvlist_array(nvroot,
173799653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
173899653d4eSeschrock 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
173999653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
174099653d4eSeschrock 
174199653d4eSeschrock 		/*
174299653d4eSeschrock 		 * Go through and find any spares which have since been
174399653d4eSeschrock 		 * repurposed as an active spare.  If this is the case, update
174499653d4eSeschrock 		 * their status appropriately.
174599653d4eSeschrock 		 */
174699653d4eSeschrock 		for (i = 0; i < nspares; i++) {
174799653d4eSeschrock 			VERIFY(nvlist_lookup_uint64(spares[i],
174899653d4eSeschrock 			    ZPOOL_CONFIG_GUID, &guid) == 0);
174989a89ebfSlling 			if (spa_spare_exists(guid, &pool, NULL) &&
175089a89ebfSlling 			    pool != 0ULL) {
175199653d4eSeschrock 				VERIFY(nvlist_lookup_uint64_array(
175299653d4eSeschrock 				    spares[i], ZPOOL_CONFIG_STATS,
175399653d4eSeschrock 				    (uint64_t **)&vs, &vsc) == 0);
175499653d4eSeschrock 				vs->vs_state = VDEV_STATE_CANT_OPEN;
175599653d4eSeschrock 				vs->vs_aux = VDEV_AUX_SPARED;
175699653d4eSeschrock 			}
175799653d4eSeschrock 		}
175899653d4eSeschrock 	}
175999653d4eSeschrock }
176099653d4eSeschrock 
1761fa94a07fSbrendan /*
1762fa94a07fSbrendan  * Add l2cache device information to the nvlist, including vdev stats.
1763fa94a07fSbrendan  */
1764fa94a07fSbrendan static void
1765fa94a07fSbrendan spa_add_l2cache(spa_t *spa, nvlist_t *config)
1766fa94a07fSbrendan {
1767fa94a07fSbrendan 	nvlist_t **l2cache;
1768fa94a07fSbrendan 	uint_t i, j, nl2cache;
1769fa94a07fSbrendan 	nvlist_t *nvroot;
1770fa94a07fSbrendan 	uint64_t guid;
1771fa94a07fSbrendan 	vdev_t *vd;
1772fa94a07fSbrendan 	vdev_stat_t *vs;
1773fa94a07fSbrendan 	uint_t vsc;
1774fa94a07fSbrendan 
17756809eb4eSEric Schrock 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
17766809eb4eSEric Schrock 
1777fa94a07fSbrendan 	if (spa->spa_l2cache.sav_count == 0)
1778fa94a07fSbrendan 		return;
1779fa94a07fSbrendan 
1780fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist(config,
1781fa94a07fSbrendan 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1782fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
1783fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1784fa94a07fSbrendan 	if (nl2cache != 0) {
1785fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot,
1786fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1787fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
1788fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1789fa94a07fSbrendan 
1790fa94a07fSbrendan 		/*
1791fa94a07fSbrendan 		 * Update level 2 cache device stats.
1792fa94a07fSbrendan 		 */
1793fa94a07fSbrendan 
1794fa94a07fSbrendan 		for (i = 0; i < nl2cache; i++) {
1795fa94a07fSbrendan 			VERIFY(nvlist_lookup_uint64(l2cache[i],
1796fa94a07fSbrendan 			    ZPOOL_CONFIG_GUID, &guid) == 0);
1797fa94a07fSbrendan 
1798fa94a07fSbrendan 			vd = NULL;
1799fa94a07fSbrendan 			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
1800fa94a07fSbrendan 				if (guid ==
1801fa94a07fSbrendan 				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
1802fa94a07fSbrendan 					vd = spa->spa_l2cache.sav_vdevs[j];
1803fa94a07fSbrendan 					break;
1804fa94a07fSbrendan 				}
1805fa94a07fSbrendan 			}
1806fa94a07fSbrendan 			ASSERT(vd != NULL);
1807fa94a07fSbrendan 
1808fa94a07fSbrendan 			VERIFY(nvlist_lookup_uint64_array(l2cache[i],
1809fa94a07fSbrendan 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
1810fa94a07fSbrendan 			vdev_get_stats(vd, vs);
1811fa94a07fSbrendan 		}
1812fa94a07fSbrendan 	}
1813fa94a07fSbrendan }
1814fa94a07fSbrendan 
1815fa9e4066Sahrens int
1816ea8dc4b6Seschrock spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
1817fa9e4066Sahrens {
1818fa9e4066Sahrens 	int error;
1819fa9e4066Sahrens 	spa_t *spa;
1820fa9e4066Sahrens 
1821fa9e4066Sahrens 	*config = NULL;
1822fa9e4066Sahrens 	error = spa_open_common(name, &spa, FTAG, config);
1823fa9e4066Sahrens 
18246809eb4eSEric Schrock 	if (spa != NULL) {
18256809eb4eSEric Schrock 		/*
18266809eb4eSEric Schrock 		 * This still leaves a window of inconsistency where the spares
18276809eb4eSEric Schrock 		 * or l2cache devices could change and the config would be
18286809eb4eSEric Schrock 		 * self-inconsistent.
18296809eb4eSEric Schrock 		 */
18306809eb4eSEric Schrock 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
1831ea8dc4b6Seschrock 
18326809eb4eSEric Schrock 		if (*config != NULL) {
1833e14bb325SJeff Bonwick 			VERIFY(nvlist_add_uint64(*config,
18346809eb4eSEric Schrock 			    ZPOOL_CONFIG_ERRCOUNT,
18356809eb4eSEric Schrock 			    spa_get_errlog_size(spa)) == 0);
1836e14bb325SJeff Bonwick 
18376809eb4eSEric Schrock 			if (spa_suspended(spa))
18386809eb4eSEric Schrock 				VERIFY(nvlist_add_uint64(*config,
18396809eb4eSEric Schrock 				    ZPOOL_CONFIG_SUSPENDED,
18406809eb4eSEric Schrock 				    spa->spa_failmode) == 0);
18416809eb4eSEric Schrock 
18426809eb4eSEric Schrock 			spa_add_spares(spa, *config);
18436809eb4eSEric Schrock 			spa_add_l2cache(spa, *config);
18446809eb4eSEric Schrock 		}
184599653d4eSeschrock 	}
184699653d4eSeschrock 
1847ea8dc4b6Seschrock 	/*
1848ea8dc4b6Seschrock 	 * We want to get the alternate root even for faulted pools, so we cheat
1849ea8dc4b6Seschrock 	 * and call spa_lookup() directly.
1850ea8dc4b6Seschrock 	 */
1851ea8dc4b6Seschrock 	if (altroot) {
1852ea8dc4b6Seschrock 		if (spa == NULL) {
1853ea8dc4b6Seschrock 			mutex_enter(&spa_namespace_lock);
1854ea8dc4b6Seschrock 			spa = spa_lookup(name);
1855ea8dc4b6Seschrock 			if (spa)
1856ea8dc4b6Seschrock 				spa_altroot(spa, altroot, buflen);
1857ea8dc4b6Seschrock 			else
1858ea8dc4b6Seschrock 				altroot[0] = '\0';
1859ea8dc4b6Seschrock 			spa = NULL;
1860ea8dc4b6Seschrock 			mutex_exit(&spa_namespace_lock);
1861ea8dc4b6Seschrock 		} else {
1862ea8dc4b6Seschrock 			spa_altroot(spa, altroot, buflen);
1863ea8dc4b6Seschrock 		}
1864ea8dc4b6Seschrock 	}
1865ea8dc4b6Seschrock 
18666809eb4eSEric Schrock 	if (spa != NULL) {
18676809eb4eSEric Schrock 		spa_config_exit(spa, SCL_CONFIG, FTAG);
1868fa9e4066Sahrens 		spa_close(spa, FTAG);
18696809eb4eSEric Schrock 	}
1870fa9e4066Sahrens 
1871fa9e4066Sahrens 	return (error);
1872fa9e4066Sahrens }
1873fa9e4066Sahrens 
187499653d4eSeschrock /*
1875fa94a07fSbrendan  * Validate that the auxiliary device array is well formed.  We must have an
1876fa94a07fSbrendan  * array of nvlists, each which describes a valid leaf vdev.  If this is an
1877fa94a07fSbrendan  * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
1878fa94a07fSbrendan  * specified, as long as they are well-formed.
187999653d4eSeschrock  */
188099653d4eSeschrock static int
1881fa94a07fSbrendan spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
1882fa94a07fSbrendan     spa_aux_vdev_t *sav, const char *config, uint64_t version,
1883fa94a07fSbrendan     vdev_labeltype_t label)
188499653d4eSeschrock {
1885fa94a07fSbrendan 	nvlist_t **dev;
1886fa94a07fSbrendan 	uint_t i, ndev;
188799653d4eSeschrock 	vdev_t *vd;
188899653d4eSeschrock 	int error;
188999653d4eSeschrock 
1890e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1891e14bb325SJeff Bonwick 
189299653d4eSeschrock 	/*
1893fa94a07fSbrendan 	 * It's acceptable to have no devs specified.
189499653d4eSeschrock 	 */
1895fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
189699653d4eSeschrock 		return (0);
189799653d4eSeschrock 
1898fa94a07fSbrendan 	if (ndev == 0)
189999653d4eSeschrock 		return (EINVAL);
190099653d4eSeschrock 
190199653d4eSeschrock 	/*
1902fa94a07fSbrendan 	 * Make sure the pool is formatted with a version that supports this
1903fa94a07fSbrendan 	 * device type.
190499653d4eSeschrock 	 */
1905fa94a07fSbrendan 	if (spa_version(spa) < version)
190699653d4eSeschrock 		return (ENOTSUP);
190799653d4eSeschrock 
190839c23413Seschrock 	/*
1909fa94a07fSbrendan 	 * Set the pending device list so we correctly handle device in-use
191039c23413Seschrock 	 * checking.
191139c23413Seschrock 	 */
1912fa94a07fSbrendan 	sav->sav_pending = dev;
1913fa94a07fSbrendan 	sav->sav_npending = ndev;
191439c23413Seschrock 
1915fa94a07fSbrendan 	for (i = 0; i < ndev; i++) {
1916fa94a07fSbrendan 		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
191799653d4eSeschrock 		    mode)) != 0)
191839c23413Seschrock 			goto out;
191999653d4eSeschrock 
192099653d4eSeschrock 		if (!vd->vdev_ops->vdev_op_leaf) {
192199653d4eSeschrock 			vdev_free(vd);
192239c23413Seschrock 			error = EINVAL;
192339c23413Seschrock 			goto out;
192499653d4eSeschrock 		}
192599653d4eSeschrock 
1926fa94a07fSbrendan 		/*
1927e14bb325SJeff Bonwick 		 * The L2ARC currently only supports disk devices in
1928e14bb325SJeff Bonwick 		 * kernel context.  For user-level testing, we allow it.
1929fa94a07fSbrendan 		 */
1930e14bb325SJeff Bonwick #ifdef _KERNEL
1931fa94a07fSbrendan 		if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
1932fa94a07fSbrendan 		    strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
1933fa94a07fSbrendan 			error = ENOTBLK;
1934fa94a07fSbrendan 			goto out;
1935fa94a07fSbrendan 		}
1936e14bb325SJeff Bonwick #endif
193799653d4eSeschrock 		vd->vdev_top = vd;
193899653d4eSeschrock 
193939c23413Seschrock 		if ((error = vdev_open(vd)) == 0 &&
1940fa94a07fSbrendan 		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
1941fa94a07fSbrendan 			VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
194239c23413Seschrock 			    vd->vdev_guid) == 0);
194339c23413Seschrock 		}
194499653d4eSeschrock 
194599653d4eSeschrock 		vdev_free(vd);
194639c23413Seschrock 
1947fa94a07fSbrendan 		if (error &&
1948fa94a07fSbrendan 		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
194939c23413Seschrock 			goto out;
195039c23413Seschrock 		else
195139c23413Seschrock 			error = 0;
195299653d4eSeschrock 	}
195399653d4eSeschrock 
195439c23413Seschrock out:
1955fa94a07fSbrendan 	sav->sav_pending = NULL;
1956fa94a07fSbrendan 	sav->sav_npending = 0;
195739c23413Seschrock 	return (error);
195899653d4eSeschrock }
195999653d4eSeschrock 
1960fa94a07fSbrendan static int
1961fa94a07fSbrendan spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
1962fa94a07fSbrendan {
1963fa94a07fSbrendan 	int error;
1964fa94a07fSbrendan 
1965e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1966e14bb325SJeff Bonwick 
1967fa94a07fSbrendan 	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1968fa94a07fSbrendan 	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
1969fa94a07fSbrendan 	    VDEV_LABEL_SPARE)) != 0) {
1970fa94a07fSbrendan 		return (error);
1971fa94a07fSbrendan 	}
1972fa94a07fSbrendan 
1973fa94a07fSbrendan 	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1974fa94a07fSbrendan 	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
1975fa94a07fSbrendan 	    VDEV_LABEL_L2CACHE));
1976fa94a07fSbrendan }
1977fa94a07fSbrendan 
1978fa94a07fSbrendan static void
1979fa94a07fSbrendan spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
1980fa94a07fSbrendan     const char *config)
1981fa94a07fSbrendan {
1982fa94a07fSbrendan 	int i;
1983fa94a07fSbrendan 
1984fa94a07fSbrendan 	if (sav->sav_config != NULL) {
1985fa94a07fSbrendan 		nvlist_t **olddevs;
1986fa94a07fSbrendan 		uint_t oldndevs;
1987fa94a07fSbrendan 		nvlist_t **newdevs;
1988fa94a07fSbrendan 
1989fa94a07fSbrendan 		/*
1990fa94a07fSbrendan 		 * Generate new dev list by concatentating with the
1991fa94a07fSbrendan 		 * current dev list.
1992fa94a07fSbrendan 		 */
1993fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
1994fa94a07fSbrendan 		    &olddevs, &oldndevs) == 0);
1995fa94a07fSbrendan 
1996fa94a07fSbrendan 		newdevs = kmem_alloc(sizeof (void *) *
1997fa94a07fSbrendan 		    (ndevs + oldndevs), KM_SLEEP);
1998fa94a07fSbrendan 		for (i = 0; i < oldndevs; i++)
1999fa94a07fSbrendan 			VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
2000fa94a07fSbrendan 			    KM_SLEEP) == 0);
2001fa94a07fSbrendan 		for (i = 0; i < ndevs; i++)
2002fa94a07fSbrendan 			VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
2003fa94a07fSbrendan 			    KM_SLEEP) == 0);
2004fa94a07fSbrendan 
2005fa94a07fSbrendan 		VERIFY(nvlist_remove(sav->sav_config, config,
2006fa94a07fSbrendan 		    DATA_TYPE_NVLIST_ARRAY) == 0);
2007fa94a07fSbrendan 
2008fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(sav->sav_config,
2009fa94a07fSbrendan 		    config, newdevs, ndevs + oldndevs) == 0);
2010fa94a07fSbrendan 		for (i = 0; i < oldndevs + ndevs; i++)
2011fa94a07fSbrendan 			nvlist_free(newdevs[i]);
2012fa94a07fSbrendan 		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
2013fa94a07fSbrendan 	} else {
2014fa94a07fSbrendan 		/*
2015fa94a07fSbrendan 		 * Generate a new dev list.
2016fa94a07fSbrendan 		 */
2017fa94a07fSbrendan 		VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
2018fa94a07fSbrendan 		    KM_SLEEP) == 0);
2019fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
2020fa94a07fSbrendan 		    devs, ndevs) == 0);
2021fa94a07fSbrendan 	}
2022fa94a07fSbrendan }
2023fa94a07fSbrendan 
2024fa94a07fSbrendan /*
2025fa94a07fSbrendan  * Stop and drop level 2 ARC devices
2026fa94a07fSbrendan  */
2027fa94a07fSbrendan void
2028fa94a07fSbrendan spa_l2cache_drop(spa_t *spa)
2029fa94a07fSbrendan {
2030fa94a07fSbrendan 	vdev_t *vd;
2031fa94a07fSbrendan 	int i;
2032fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
2033fa94a07fSbrendan 
2034fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++) {
2035fa94a07fSbrendan 		uint64_t pool;
2036fa94a07fSbrendan 
2037fa94a07fSbrendan 		vd = sav->sav_vdevs[i];
2038fa94a07fSbrendan 		ASSERT(vd != NULL);
2039fa94a07fSbrendan 
20408ad4d6ddSJeff Bonwick 		if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
20418ad4d6ddSJeff Bonwick 		    pool != 0ULL && l2arc_vdev_present(vd))
2042fa94a07fSbrendan 			l2arc_remove_vdev(vd);
2043fa94a07fSbrendan 		if (vd->vdev_isl2cache)
2044fa94a07fSbrendan 			spa_l2cache_remove(vd);
2045fa94a07fSbrendan 		vdev_clear_stats(vd);
2046fa94a07fSbrendan 		(void) vdev_close(vd);
2047fa94a07fSbrendan 	}
2048fa94a07fSbrendan }
2049fa94a07fSbrendan 
2050fa9e4066Sahrens /*
2051fa9e4066Sahrens  * Pool Creation
2052fa9e4066Sahrens  */
2053fa9e4066Sahrens int
2054990b4856Slling spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
20550a48a24eStimh     const char *history_str, nvlist_t *zplprops)
2056fa9e4066Sahrens {
2057fa9e4066Sahrens 	spa_t *spa;
2058990b4856Slling 	char *altroot = NULL;
20590373e76bSbonwick 	vdev_t *rvd;
2060fa9e4066Sahrens 	dsl_pool_t *dp;
2061fa9e4066Sahrens 	dmu_tx_t *tx;
206299653d4eSeschrock 	int c, error = 0;
2063fa9e4066Sahrens 	uint64_t txg = TXG_INITIAL;
2064fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
2065fa94a07fSbrendan 	uint_t nspares, nl2cache;
2066990b4856Slling 	uint64_t version;
2067fa9e4066Sahrens 
2068fa9e4066Sahrens 	/*
2069fa9e4066Sahrens 	 * If this pool already exists, return failure.
2070fa9e4066Sahrens 	 */
2071fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
2072fa9e4066Sahrens 	if (spa_lookup(pool) != NULL) {
2073fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2074fa9e4066Sahrens 		return (EEXIST);
2075fa9e4066Sahrens 	}
2076fa9e4066Sahrens 
2077fa9e4066Sahrens 	/*
2078fa9e4066Sahrens 	 * Allocate a new spa_t structure.
2079fa9e4066Sahrens 	 */
2080990b4856Slling 	(void) nvlist_lookup_string(props,
2081990b4856Slling 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
20820373e76bSbonwick 	spa = spa_add(pool, altroot);
20838ad4d6ddSJeff Bonwick 	spa_activate(spa, spa_mode_global);
2084fa9e4066Sahrens 
2085fa9e4066Sahrens 	spa->spa_uberblock.ub_txg = txg - 1;
2086990b4856Slling 
2087990b4856Slling 	if (props && (error = spa_prop_validate(spa, props))) {
2088990b4856Slling 		spa_unload(spa);
2089990b4856Slling 		spa_deactivate(spa);
2090990b4856Slling 		spa_remove(spa);
2091c5904d13Seschrock 		mutex_exit(&spa_namespace_lock);
2092990b4856Slling 		return (error);
2093990b4856Slling 	}
2094990b4856Slling 
2095990b4856Slling 	if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION),
2096990b4856Slling 	    &version) != 0)
2097990b4856Slling 		version = SPA_VERSION;
2098990b4856Slling 	ASSERT(version <= SPA_VERSION);
2099990b4856Slling 	spa->spa_uberblock.ub_version = version;
2100fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
2101fa9e4066Sahrens 
210254d692b7SGeorge Wilson 	/*
210354d692b7SGeorge Wilson 	 * Create "The Godfather" zio to hold all async IOs
210454d692b7SGeorge Wilson 	 */
210554d692b7SGeorge Wilson 	if (spa->spa_async_zio_root == NULL)
210654d692b7SGeorge Wilson 		spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
210754d692b7SGeorge Wilson 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
210854d692b7SGeorge Wilson 		    ZIO_FLAG_GODFATHER);
210954d692b7SGeorge Wilson 
21100373e76bSbonwick 	/*
21110373e76bSbonwick 	 * Create the root vdev.
21120373e76bSbonwick 	 */
2113e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
21140373e76bSbonwick 
211599653d4eSeschrock 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
21160373e76bSbonwick 
211799653d4eSeschrock 	ASSERT(error != 0 || rvd != NULL);
211899653d4eSeschrock 	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
21190373e76bSbonwick 
2120b7b97454Sperrin 	if (error == 0 && !zfs_allocatable_devs(nvroot))
21210373e76bSbonwick 		error = EINVAL;
212299653d4eSeschrock 
212399653d4eSeschrock 	if (error == 0 &&
212499653d4eSeschrock 	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
2125fa94a07fSbrendan 	    (error = spa_validate_aux(spa, nvroot, txg,
212699653d4eSeschrock 	    VDEV_ALLOC_ADD)) == 0) {
212799653d4eSeschrock 		for (c = 0; c < rvd->vdev_children; c++)
212899653d4eSeschrock 			vdev_init(rvd->vdev_child[c], txg);
212999653d4eSeschrock 		vdev_config_dirty(rvd);
21300373e76bSbonwick 	}
21310373e76bSbonwick 
2132e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
2133fa9e4066Sahrens 
213499653d4eSeschrock 	if (error != 0) {
2135fa9e4066Sahrens 		spa_unload(spa);
2136fa9e4066Sahrens 		spa_deactivate(spa);
2137fa9e4066Sahrens 		spa_remove(spa);
2138fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2139fa9e4066Sahrens 		return (error);
2140fa9e4066Sahrens 	}
2141fa9e4066Sahrens 
214299653d4eSeschrock 	/*
214399653d4eSeschrock 	 * Get the list of spares, if specified.
214499653d4eSeschrock 	 */
214599653d4eSeschrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
214699653d4eSeschrock 	    &spares, &nspares) == 0) {
2147fa94a07fSbrendan 		VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
214899653d4eSeschrock 		    KM_SLEEP) == 0);
2149fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
215099653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2151e14bb325SJeff Bonwick 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
215299653d4eSeschrock 		spa_load_spares(spa);
2153e14bb325SJeff Bonwick 		spa_config_exit(spa, SCL_ALL, FTAG);
2154fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
2155fa94a07fSbrendan 	}
2156fa94a07fSbrendan 
2157fa94a07fSbrendan 	/*
2158fa94a07fSbrendan 	 * Get the list of level 2 cache devices, if specified.
2159fa94a07fSbrendan 	 */
2160fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2161fa94a07fSbrendan 	    &l2cache, &nl2cache) == 0) {
2162fa94a07fSbrendan 		VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2163fa94a07fSbrendan 		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2164fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2165fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2166e14bb325SJeff Bonwick 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2167fa94a07fSbrendan 		spa_load_l2cache(spa);
2168e14bb325SJeff Bonwick 		spa_config_exit(spa, SCL_ALL, FTAG);
2169fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
217099653d4eSeschrock 	}
217199653d4eSeschrock 
21720a48a24eStimh 	spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
2173fa9e4066Sahrens 	spa->spa_meta_objset = dp->dp_meta_objset;
2174fa9e4066Sahrens 
2175fa9e4066Sahrens 	tx = dmu_tx_create_assigned(dp, txg);
2176fa9e4066Sahrens 
2177fa9e4066Sahrens 	/*
2178fa9e4066Sahrens 	 * Create the pool config object.
2179fa9e4066Sahrens 	 */
2180fa9e4066Sahrens 	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
2181f7991ba4STim Haley 	    DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
2182fa9e4066Sahrens 	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
2183fa9e4066Sahrens 
2184ea8dc4b6Seschrock 	if (zap_add(spa->spa_meta_objset,
2185fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
2186ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
2187ea8dc4b6Seschrock 		cmn_err(CE_PANIC, "failed to add pool config");
2188ea8dc4b6Seschrock 	}
2189fa9e4066Sahrens 
2190990b4856Slling 	/* Newly created pools with the right version are always deflated. */
2191990b4856Slling 	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
2192990b4856Slling 		spa->spa_deflate = TRUE;
2193990b4856Slling 		if (zap_add(spa->spa_meta_objset,
2194990b4856Slling 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
2195990b4856Slling 		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
2196990b4856Slling 			cmn_err(CE_PANIC, "failed to add deflate");
2197990b4856Slling 		}
219899653d4eSeschrock 	}
219999653d4eSeschrock 
2200fa9e4066Sahrens 	/*
2201fa9e4066Sahrens 	 * Create the deferred-free bplist object.  Turn off compression
2202fa9e4066Sahrens 	 * because sync-to-convergence takes longer if the blocksize
2203fa9e4066Sahrens 	 * keeps changing.
2204fa9e4066Sahrens 	 */
2205fa9e4066Sahrens 	spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset,
2206fa9e4066Sahrens 	    1 << 14, tx);
2207fa9e4066Sahrens 	dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj,
2208fa9e4066Sahrens 	    ZIO_COMPRESS_OFF, tx);
2209fa9e4066Sahrens 
2210ea8dc4b6Seschrock 	if (zap_add(spa->spa_meta_objset,
2211fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
2212ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) {
2213ea8dc4b6Seschrock 		cmn_err(CE_PANIC, "failed to add bplist");
2214ea8dc4b6Seschrock 	}
2215fa9e4066Sahrens 
221606eeb2adSek 	/*
221706eeb2adSek 	 * Create the pool's history object.
221806eeb2adSek 	 */
2219990b4856Slling 	if (version >= SPA_VERSION_ZPOOL_HISTORY)
2220990b4856Slling 		spa_history_create_obj(spa, tx);
2221990b4856Slling 
2222990b4856Slling 	/*
2223990b4856Slling 	 * Set pool properties.
2224990b4856Slling 	 */
2225990b4856Slling 	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
2226990b4856Slling 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
22270a4e9518Sgw 	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
2228379c004dSEric Schrock 	if (props != NULL) {
2229379c004dSEric Schrock 		spa_configfile_set(spa, props, B_FALSE);
2230990b4856Slling 		spa_sync_props(spa, props, CRED(), tx);
2231379c004dSEric Schrock 	}
223206eeb2adSek 
2233fa9e4066Sahrens 	dmu_tx_commit(tx);
2234fa9e4066Sahrens 
2235fa9e4066Sahrens 	spa->spa_sync_on = B_TRUE;
2236fa9e4066Sahrens 	txg_sync_start(spa->spa_dsl_pool);
2237fa9e4066Sahrens 
2238fa9e4066Sahrens 	/*
2239fa9e4066Sahrens 	 * We explicitly wait for the first transaction to complete so that our
2240fa9e4066Sahrens 	 * bean counters are appropriately updated.
2241fa9e4066Sahrens 	 */
2242fa9e4066Sahrens 	txg_wait_synced(spa->spa_dsl_pool, txg);
2243fa9e4066Sahrens 
2244c5904d13Seschrock 	spa_config_sync(spa, B_FALSE, B_TRUE);
2245fa9e4066Sahrens 
2246990b4856Slling 	if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL)
2247228975ccSek 		(void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE);
2248228975ccSek 
2249088f3894Sahrens 	spa->spa_minref = refcount_count(&spa->spa_refcount);
2250088f3894Sahrens 
2251daaa36a7SGeorge Wilson 	mutex_exit(&spa_namespace_lock);
2252daaa36a7SGeorge Wilson 
2253fa9e4066Sahrens 	return (0);
2254fa9e4066Sahrens }
2255fa9e4066Sahrens 
2256e7cbe64fSgw #ifdef _KERNEL
2257e7cbe64fSgw /*
2258e7cbe64fSgw  * Build a "root" vdev for a top level vdev read in from a rootpool
2259e7cbe64fSgw  * device label.
2260e7cbe64fSgw  */
2261e7cbe64fSgw static void
2262e7cbe64fSgw spa_build_rootpool_config(nvlist_t *config)
2263e7cbe64fSgw {
2264e7cbe64fSgw 	nvlist_t *nvtop, *nvroot;
2265e7cbe64fSgw 	uint64_t pgid;
2266e7cbe64fSgw 
2267e7cbe64fSgw 	/*
2268e7cbe64fSgw 	 * Add this top-level vdev to the child array.
2269e7cbe64fSgw 	 */
2270e7cbe64fSgw 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop)
2271e7cbe64fSgw 	    == 0);
2272e7cbe64fSgw 	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid)
2273e7cbe64fSgw 	    == 0);
2274e7cbe64fSgw 
2275e7cbe64fSgw 	/*
2276e7cbe64fSgw 	 * Put this pool's top-level vdevs into a root vdev.
2277e7cbe64fSgw 	 */
2278e7cbe64fSgw 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2279e7cbe64fSgw 	VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT)
2280e7cbe64fSgw 	    == 0);
2281e7cbe64fSgw 	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
2282e7cbe64fSgw 	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
2283e7cbe64fSgw 	VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2284e7cbe64fSgw 	    &nvtop, 1) == 0);
2285e7cbe64fSgw 
2286e7cbe64fSgw 	/*
2287e7cbe64fSgw 	 * Replace the existing vdev_tree with the new root vdev in
2288e7cbe64fSgw 	 * this pool's configuration (remove the old, add the new).
2289e7cbe64fSgw 	 */
2290e7cbe64fSgw 	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
2291e7cbe64fSgw 	nvlist_free(nvroot);
2292e7cbe64fSgw }
2293e7cbe64fSgw 
2294e7cbe64fSgw /*
2295e7cbe64fSgw  * Get the root pool information from the root disk, then import the root pool
2296e7cbe64fSgw  * during the system boot up time.
2297e7cbe64fSgw  */
2298f940fbb1SLin Ling extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
2299e7cbe64fSgw 
2300051aabe6Staylor int
2301051aabe6Staylor spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf,
2302e7cbe64fSgw     uint64_t *besttxg)
2303e7cbe64fSgw {
2304e7cbe64fSgw 	nvlist_t *config;
2305e7cbe64fSgw 	uint64_t txg;
2306f940fbb1SLin Ling 	int error;
2307e7cbe64fSgw 
2308f940fbb1SLin Ling 	if (error = vdev_disk_read_rootlabel(devpath, devid, &config))
2309f940fbb1SLin Ling 		return (error);
2310e7cbe64fSgw 
2311e7cbe64fSgw 	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
2312e7cbe64fSgw 
2313051aabe6Staylor 	if (bestconf != NULL)
2314e7cbe64fSgw 		*bestconf = config;
2315f940fbb1SLin Ling 	else
2316f940fbb1SLin Ling 		nvlist_free(config);
2317051aabe6Staylor 	*besttxg = txg;
2318051aabe6Staylor 	return (0);
2319e7cbe64fSgw }
2320e7cbe64fSgw 
2321e7cbe64fSgw boolean_t
2322e7cbe64fSgw spa_rootdev_validate(nvlist_t *nv)
2323e7cbe64fSgw {
2324e7cbe64fSgw 	uint64_t ival;
2325e7cbe64fSgw 
2326e7cbe64fSgw 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2327e7cbe64fSgw 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2328e7cbe64fSgw 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2329e7cbe64fSgw 		return (B_FALSE);
2330e7cbe64fSgw 
2331e7cbe64fSgw 	return (B_TRUE);
2332e7cbe64fSgw }
2333e7cbe64fSgw 
2334051aabe6Staylor 
2335051aabe6Staylor /*
2336051aabe6Staylor  * Given the boot device's physical path or devid, check if the device
2337051aabe6Staylor  * is in a valid state.  If so, return the configuration from the vdev
2338051aabe6Staylor  * label.
2339051aabe6Staylor  */
2340051aabe6Staylor int
2341051aabe6Staylor spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf)
2342051aabe6Staylor {
2343051aabe6Staylor 	nvlist_t *conf = NULL;
2344051aabe6Staylor 	uint64_t txg = 0;
2345051aabe6Staylor 	nvlist_t *nvtop, **child;
2346051aabe6Staylor 	char *type;
2347051aabe6Staylor 	char *bootpath = NULL;
2348051aabe6Staylor 	uint_t children, c;
2349051aabe6Staylor 	char *tmp;
2350f940fbb1SLin Ling 	int error;
2351051aabe6Staylor 
2352051aabe6Staylor 	if (devpath && ((tmp = strchr(devpath, ' ')) != NULL))
2353051aabe6Staylor 		*tmp = '\0';
2354f940fbb1SLin Ling 	if (error = spa_check_rootconf(devpath, devid, &conf, &txg)) {
2355051aabe6Staylor 		cmn_err(CE_NOTE, "error reading device label");
2356f940fbb1SLin Ling 		return (error);
2357051aabe6Staylor 	}
2358051aabe6Staylor 	if (txg == 0) {
2359051aabe6Staylor 		cmn_err(CE_NOTE, "this device is detached");
2360051aabe6Staylor 		nvlist_free(conf);
2361051aabe6Staylor 		return (EINVAL);
2362051aabe6Staylor 	}
2363051aabe6Staylor 
2364051aabe6Staylor 	VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE,
2365051aabe6Staylor 	    &nvtop) == 0);
2366051aabe6Staylor 	VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0);
2367051aabe6Staylor 
2368051aabe6Staylor 	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2369051aabe6Staylor 		if (spa_rootdev_validate(nvtop)) {
2370051aabe6Staylor 			goto out;
2371051aabe6Staylor 		} else {
2372051aabe6Staylor 			nvlist_free(conf);
2373051aabe6Staylor 			return (EINVAL);
2374051aabe6Staylor 		}
2375051aabe6Staylor 	}
2376051aabe6Staylor 
2377051aabe6Staylor 	ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0);
2378051aabe6Staylor 
2379051aabe6Staylor 	VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN,
2380051aabe6Staylor 	    &child, &children) == 0);
2381051aabe6Staylor 
2382051aabe6Staylor 	/*
2383051aabe6Staylor 	 * Go thru vdevs in the mirror to see if the given device
2384051aabe6Staylor 	 * has the most recent txg. Only the device with the most
2385051aabe6Staylor 	 * recent txg has valid information and should be booted.
2386051aabe6Staylor 	 */
2387051aabe6Staylor 	for (c = 0; c < children; c++) {
2388051aabe6Staylor 		char *cdevid, *cpath;
2389051aabe6Staylor 		uint64_t tmptxg;
2390051aabe6Staylor 
2391ffb5616eSLin Ling 		cpath = NULL;
2392ffb5616eSLin Ling 		cdevid = NULL;
2393051aabe6Staylor 		if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH,
2394ffb5616eSLin Ling 		    &cpath) != 0 && nvlist_lookup_string(child[c],
2395ffb5616eSLin Ling 		    ZPOOL_CONFIG_DEVID, &cdevid) != 0)
2396051aabe6Staylor 			return (EINVAL);
2397f4565e39SLin Ling 		if ((spa_check_rootconf(cpath, cdevid, NULL,
2398f4565e39SLin Ling 		    &tmptxg) == 0) && (tmptxg > txg)) {
2399051aabe6Staylor 			txg = tmptxg;
2400051aabe6Staylor 			VERIFY(nvlist_lookup_string(child[c],
2401051aabe6Staylor 			    ZPOOL_CONFIG_PATH, &bootpath) == 0);
2402051aabe6Staylor 		}
2403051aabe6Staylor 	}
2404051aabe6Staylor 
2405051aabe6Staylor 	/* Does the best device match the one we've booted from? */
2406051aabe6Staylor 	if (bootpath) {
2407051aabe6Staylor 		cmn_err(CE_NOTE, "try booting from '%s'", bootpath);
2408051aabe6Staylor 		return (EINVAL);
2409051aabe6Staylor 	}
2410051aabe6Staylor out:
2411051aabe6Staylor 	*bestconf = conf;
2412051aabe6Staylor 	return (0);
2413051aabe6Staylor }
2414051aabe6Staylor 
2415e7cbe64fSgw /*
2416e7cbe64fSgw  * Import a root pool.
2417e7cbe64fSgw  *
2418051aabe6Staylor  * For x86. devpath_list will consist of devid and/or physpath name of
2419051aabe6Staylor  * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
2420051aabe6Staylor  * The GRUB "findroot" command will return the vdev we should boot.
2421e7cbe64fSgw  *
2422e7cbe64fSgw  * For Sparc, devpath_list consists the physpath name of the booting device
2423e7cbe64fSgw  * no matter the rootpool is a single device pool or a mirrored pool.
2424e7cbe64fSgw  * e.g.
2425e7cbe64fSgw  *	"/pci@1f,0/ide@d/disk@0,0:a"
2426e7cbe64fSgw  */
2427e7cbe64fSgw int
2428051aabe6Staylor spa_import_rootpool(char *devpath, char *devid)
2429e7cbe64fSgw {
2430e7cbe64fSgw 	nvlist_t *conf = NULL;
2431e7cbe64fSgw 	char *pname;
2432e7cbe64fSgw 	int error;
24336809eb4eSEric Schrock 	spa_t *spa;
2434e7cbe64fSgw 
2435e7cbe64fSgw 	/*
2436e7cbe64fSgw 	 * Get the vdev pathname and configuation from the most
2437e7cbe64fSgw 	 * recently updated vdev (highest txg).
2438e7cbe64fSgw 	 */
2439051aabe6Staylor 	if (error = spa_get_rootconf(devpath, devid, &conf))
2440e7cbe64fSgw 		goto msg_out;
2441e7cbe64fSgw 
2442e7cbe64fSgw 	/*
2443e7cbe64fSgw 	 * Add type "root" vdev to the config.
2444e7cbe64fSgw 	 */
2445e7cbe64fSgw 	spa_build_rootpool_config(conf);
2446e7cbe64fSgw 
2447e7cbe64fSgw 	VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0);
2448e7cbe64fSgw 
24496809eb4eSEric Schrock 	mutex_enter(&spa_namespace_lock);
24506809eb4eSEric Schrock 	if ((spa = spa_lookup(pname)) != NULL) {
24516809eb4eSEric Schrock 		/*
24526809eb4eSEric Schrock 		 * Remove the existing root pool from the namespace so that we
24536809eb4eSEric Schrock 		 * can replace it with the correct config we just read in.
24546809eb4eSEric Schrock 		 */
24556809eb4eSEric Schrock 		spa_remove(spa);
24566809eb4eSEric Schrock 	}
24576809eb4eSEric Schrock 
24586809eb4eSEric Schrock 	spa = spa_add(pname, NULL);
24596809eb4eSEric Schrock 
24606809eb4eSEric Schrock 	spa->spa_is_root = B_TRUE;
24616809eb4eSEric Schrock 	VERIFY(nvlist_dup(conf, &spa->spa_config, 0) == 0);
24626809eb4eSEric Schrock 	mutex_exit(&spa_namespace_lock);
2463e7cbe64fSgw 
2464e7cbe64fSgw 	nvlist_free(conf);
24656809eb4eSEric Schrock 	return (0);
2466e7cbe64fSgw 
2467e7cbe64fSgw msg_out:
2468051aabe6Staylor 	cmn_err(CE_NOTE, "\n"
2469e7cbe64fSgw 	    "  ***************************************************  \n"
2470e7cbe64fSgw 	    "  *  This device is not bootable!                   *  \n"
2471e7cbe64fSgw 	    "  *  It is either offlined or detached or faulted.  *  \n"
2472e7cbe64fSgw 	    "  *  Please try to boot from a different device.    *  \n"
2473051aabe6Staylor 	    "  ***************************************************  ");
2474e7cbe64fSgw 
2475e7cbe64fSgw 	return (error);
2476e7cbe64fSgw }
2477e7cbe64fSgw #endif
2478e7cbe64fSgw 
2479e7cbe64fSgw /*
24806809eb4eSEric Schrock  * Take a pool and insert it into the namespace as if it had been loaded at
24816809eb4eSEric Schrock  * boot.
2482e7cbe64fSgw  */
2483e7cbe64fSgw int
24846809eb4eSEric Schrock spa_import_verbatim(const char *pool, nvlist_t *config, nvlist_t *props)
2485e7cbe64fSgw {
24866809eb4eSEric Schrock 	spa_t *spa;
24876809eb4eSEric Schrock 	char *altroot = NULL;
24886809eb4eSEric Schrock 
24896809eb4eSEric Schrock 	mutex_enter(&spa_namespace_lock);
24906809eb4eSEric Schrock 	if (spa_lookup(pool) != NULL) {
24916809eb4eSEric Schrock 		mutex_exit(&spa_namespace_lock);
24926809eb4eSEric Schrock 		return (EEXIST);
24936809eb4eSEric Schrock 	}
24946809eb4eSEric Schrock 
24956809eb4eSEric Schrock 	(void) nvlist_lookup_string(props,
24966809eb4eSEric Schrock 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
24976809eb4eSEric Schrock 	spa = spa_add(pool, altroot);
24986809eb4eSEric Schrock 
24996809eb4eSEric Schrock 	VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
25006809eb4eSEric Schrock 
25016809eb4eSEric Schrock 	if (props != NULL)
25026809eb4eSEric Schrock 		spa_configfile_set(spa, props, B_FALSE);
25036809eb4eSEric Schrock 
25046809eb4eSEric Schrock 	spa_config_sync(spa, B_FALSE, B_TRUE);
25056809eb4eSEric Schrock 
25066809eb4eSEric Schrock 	mutex_exit(&spa_namespace_lock);
25076809eb4eSEric Schrock 
25086809eb4eSEric Schrock 	return (0);
2509e7cbe64fSgw }
2510e7cbe64fSgw 
25116809eb4eSEric Schrock /*
25126809eb4eSEric Schrock  * Import a non-root pool into the system.
25136809eb4eSEric Schrock  */
2514c5904d13Seschrock int
25156809eb4eSEric Schrock spa_import(const char *pool, nvlist_t *config, nvlist_t *props)
2516c5904d13Seschrock {
25176809eb4eSEric Schrock 	spa_t *spa;
25186809eb4eSEric Schrock 	char *altroot = NULL;
25196809eb4eSEric Schrock 	int error;
25206809eb4eSEric Schrock 	nvlist_t *nvroot;
25216809eb4eSEric Schrock 	nvlist_t **spares, **l2cache;
25226809eb4eSEric Schrock 	uint_t nspares, nl2cache;
25236809eb4eSEric Schrock 
25246809eb4eSEric Schrock 	/*
25256809eb4eSEric Schrock 	 * If a pool with this name exists, return failure.
25266809eb4eSEric Schrock 	 */
25276809eb4eSEric Schrock 	mutex_enter(&spa_namespace_lock);
25286809eb4eSEric Schrock 	if ((spa = spa_lookup(pool)) != NULL) {
25296809eb4eSEric Schrock 		mutex_exit(&spa_namespace_lock);
25306809eb4eSEric Schrock 		return (EEXIST);
25316809eb4eSEric Schrock 	}
25326809eb4eSEric Schrock 
25336809eb4eSEric Schrock 	/*
25346809eb4eSEric Schrock 	 * Create and initialize the spa structure.
25356809eb4eSEric Schrock 	 */
25366809eb4eSEric Schrock 	(void) nvlist_lookup_string(props,
25376809eb4eSEric Schrock 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
25386809eb4eSEric Schrock 	spa = spa_add(pool, altroot);
25396809eb4eSEric Schrock 	spa_activate(spa, spa_mode_global);
25406809eb4eSEric Schrock 
25416809eb4eSEric Schrock 	/*
25426809eb4eSEric Schrock 	 * Pass off the heavy lifting to spa_load().  Pass TRUE for mosconfig
25436809eb4eSEric Schrock 	 * because the user-supplied config is actually the one to trust when
25446809eb4eSEric Schrock 	 * doing an import.
25456809eb4eSEric Schrock 	 */
25466809eb4eSEric Schrock 	error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE);
25476809eb4eSEric Schrock 
25486809eb4eSEric Schrock 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
25496809eb4eSEric Schrock 	/*
25506809eb4eSEric Schrock 	 * Toss any existing sparelist, as it doesn't have any validity
25516809eb4eSEric Schrock 	 * anymore, and conflicts with spa_has_spare().
25526809eb4eSEric Schrock 	 */
25536809eb4eSEric Schrock 	if (spa->spa_spares.sav_config) {
25546809eb4eSEric Schrock 		nvlist_free(spa->spa_spares.sav_config);
25556809eb4eSEric Schrock 		spa->spa_spares.sav_config = NULL;
25566809eb4eSEric Schrock 		spa_load_spares(spa);
25576809eb4eSEric Schrock 	}
25586809eb4eSEric Schrock 	if (spa->spa_l2cache.sav_config) {
25596809eb4eSEric Schrock 		nvlist_free(spa->spa_l2cache.sav_config);
25606809eb4eSEric Schrock 		spa->spa_l2cache.sav_config = NULL;
25616809eb4eSEric Schrock 		spa_load_l2cache(spa);
25626809eb4eSEric Schrock 	}
25636809eb4eSEric Schrock 
25646809eb4eSEric Schrock 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
25656809eb4eSEric Schrock 	    &nvroot) == 0);
25666809eb4eSEric Schrock 	if (error == 0)
25676809eb4eSEric Schrock 		error = spa_validate_aux(spa, nvroot, -1ULL,
25686809eb4eSEric Schrock 		    VDEV_ALLOC_SPARE);
25696809eb4eSEric Schrock 	if (error == 0)
25706809eb4eSEric Schrock 		error = spa_validate_aux(spa, nvroot, -1ULL,
25716809eb4eSEric Schrock 		    VDEV_ALLOC_L2CACHE);
25726809eb4eSEric Schrock 	spa_config_exit(spa, SCL_ALL, FTAG);
25736809eb4eSEric Schrock 
25746809eb4eSEric Schrock 	if (props != NULL)
25756809eb4eSEric Schrock 		spa_configfile_set(spa, props, B_FALSE);
25766809eb4eSEric Schrock 
25776809eb4eSEric Schrock 	if (error != 0 || (props && spa_writeable(spa) &&
25786809eb4eSEric Schrock 	    (error = spa_prop_set(spa, props)))) {
25796809eb4eSEric Schrock 		spa_unload(spa);
25806809eb4eSEric Schrock 		spa_deactivate(spa);
25816809eb4eSEric Schrock 		spa_remove(spa);
25826809eb4eSEric Schrock 		mutex_exit(&spa_namespace_lock);
25836809eb4eSEric Schrock 		return (error);
25846809eb4eSEric Schrock 	}
25856809eb4eSEric Schrock 
25866809eb4eSEric Schrock 	/*
25876809eb4eSEric Schrock 	 * Override any spares and level 2 cache devices as specified by
25886809eb4eSEric Schrock 	 * the user, as these may have correct device names/devids, etc.
25896809eb4eSEric Schrock 	 */
25906809eb4eSEric Schrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
25916809eb4eSEric Schrock 	    &spares, &nspares) == 0) {
25926809eb4eSEric Schrock 		if (spa->spa_spares.sav_config)
25936809eb4eSEric Schrock 			VERIFY(nvlist_remove(spa->spa_spares.sav_config,
25946809eb4eSEric Schrock 			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
25956809eb4eSEric Schrock 		else
25966809eb4eSEric Schrock 			VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
25976809eb4eSEric Schrock 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
25986809eb4eSEric Schrock 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
25996809eb4eSEric Schrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
26006809eb4eSEric Schrock 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
26016809eb4eSEric Schrock 		spa_load_spares(spa);
26026809eb4eSEric Schrock 		spa_config_exit(spa, SCL_ALL, FTAG);
26036809eb4eSEric Schrock 		spa->spa_spares.sav_sync = B_TRUE;
26046809eb4eSEric Schrock 	}
26056809eb4eSEric Schrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
26066809eb4eSEric Schrock 	    &l2cache, &nl2cache) == 0) {
26076809eb4eSEric Schrock 		if (spa->spa_l2cache.sav_config)
26086809eb4eSEric Schrock 			VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
26096809eb4eSEric Schrock 			    ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
26106809eb4eSEric Schrock 		else
26116809eb4eSEric Schrock 			VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
26126809eb4eSEric Schrock 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
26136809eb4eSEric Schrock 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
26146809eb4eSEric Schrock 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
26156809eb4eSEric Schrock 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
26166809eb4eSEric Schrock 		spa_load_l2cache(spa);
26176809eb4eSEric Schrock 		spa_config_exit(spa, SCL_ALL, FTAG);
26186809eb4eSEric Schrock 		spa->spa_l2cache.sav_sync = B_TRUE;
26196809eb4eSEric Schrock 	}
26206809eb4eSEric Schrock 
26216809eb4eSEric Schrock 	if (spa_writeable(spa)) {
26226809eb4eSEric Schrock 		/*
26236809eb4eSEric Schrock 		 * Update the config cache to include the newly-imported pool.
26246809eb4eSEric Schrock 		 */
26256809eb4eSEric Schrock 		spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, B_FALSE);
26266809eb4eSEric Schrock 	}
26276809eb4eSEric Schrock 
26286809eb4eSEric Schrock 	mutex_exit(&spa_namespace_lock);
26296809eb4eSEric Schrock 
26306809eb4eSEric Schrock 	return (0);
2631c5904d13Seschrock }
2632c5904d13Seschrock 
2633c5904d13Seschrock 
2634fa9e4066Sahrens /*
2635fa9e4066Sahrens  * This (illegal) pool name is used when temporarily importing a spa_t in order
2636fa9e4066Sahrens  * to get the vdev stats associated with the imported devices.
2637fa9e4066Sahrens  */
2638fa9e4066Sahrens #define	TRYIMPORT_NAME	"$import"
2639fa9e4066Sahrens 
2640fa9e4066Sahrens nvlist_t *
2641fa9e4066Sahrens spa_tryimport(nvlist_t *tryconfig)
2642fa9e4066Sahrens {
2643fa9e4066Sahrens 	nvlist_t *config = NULL;
2644fa9e4066Sahrens 	char *poolname;
2645fa9e4066Sahrens 	spa_t *spa;
2646fa9e4066Sahrens 	uint64_t state;
26477b7154beSLin Ling 	int error;
2648fa9e4066Sahrens 
2649fa9e4066Sahrens 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
2650fa9e4066Sahrens 		return (NULL);
2651fa9e4066Sahrens 
2652fa9e4066Sahrens 	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
2653fa9e4066Sahrens 		return (NULL);
2654fa9e4066Sahrens 
2655fa9e4066Sahrens 	/*
26560373e76bSbonwick 	 * Create and initialize the spa structure.
2657fa9e4066Sahrens 	 */
26580373e76bSbonwick 	mutex_enter(&spa_namespace_lock);
26590373e76bSbonwick 	spa = spa_add(TRYIMPORT_NAME, NULL);
26608ad4d6ddSJeff Bonwick 	spa_activate(spa, FREAD);
2661fa9e4066Sahrens 
2662fa9e4066Sahrens 	/*
26630373e76bSbonwick 	 * Pass off the heavy lifting to spa_load().
2664ecc2d604Sbonwick 	 * Pass TRUE for mosconfig because the user-supplied config
2665ecc2d604Sbonwick 	 * is actually the one to trust when doing an import.
2666fa9e4066Sahrens 	 */
26677b7154beSLin Ling 	error = spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
2668fa9e4066Sahrens 
2669fa9e4066Sahrens 	/*
2670fa9e4066Sahrens 	 * If 'tryconfig' was at least parsable, return the current config.
2671fa9e4066Sahrens 	 */
2672fa9e4066Sahrens 	if (spa->spa_root_vdev != NULL) {
2673fa9e4066Sahrens 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2674fa9e4066Sahrens 		VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
2675fa9e4066Sahrens 		    poolname) == 0);
2676fa9e4066Sahrens 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2677fa9e4066Sahrens 		    state) == 0);
267895173954Sek 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
267995173954Sek 		    spa->spa_uberblock.ub_timestamp) == 0);
268099653d4eSeschrock 
2681e7cbe64fSgw 		/*
2682e7cbe64fSgw 		 * If the bootfs property exists on this pool then we
2683e7cbe64fSgw 		 * copy it out so that external consumers can tell which
2684e7cbe64fSgw 		 * pools are bootable.
2685e7cbe64fSgw 		 */
26867b7154beSLin Ling 		if ((!error || error == EEXIST) && spa->spa_bootfs) {
2687e7cbe64fSgw 			char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2688e7cbe64fSgw 
2689e7cbe64fSgw 			/*
2690e7cbe64fSgw 			 * We have to play games with the name since the
2691e7cbe64fSgw 			 * pool was opened as TRYIMPORT_NAME.
2692e7cbe64fSgw 			 */
2693e14bb325SJeff Bonwick 			if (dsl_dsobj_to_dsname(spa_name(spa),
2694e7cbe64fSgw 			    spa->spa_bootfs, tmpname) == 0) {
2695e7cbe64fSgw 				char *cp;
2696e7cbe64fSgw 				char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2697e7cbe64fSgw 
2698e7cbe64fSgw 				cp = strchr(tmpname, '/');
2699e7cbe64fSgw 				if (cp == NULL) {
2700e7cbe64fSgw 					(void) strlcpy(dsname, tmpname,
2701e7cbe64fSgw 					    MAXPATHLEN);
2702e7cbe64fSgw 				} else {
2703e7cbe64fSgw 					(void) snprintf(dsname, MAXPATHLEN,
2704e7cbe64fSgw 					    "%s/%s", poolname, ++cp);
2705e7cbe64fSgw 				}
2706e7cbe64fSgw 				VERIFY(nvlist_add_string(config,
2707e7cbe64fSgw 				    ZPOOL_CONFIG_BOOTFS, dsname) == 0);
2708e7cbe64fSgw 				kmem_free(dsname, MAXPATHLEN);
2709e7cbe64fSgw 			}
2710e7cbe64fSgw 			kmem_free(tmpname, MAXPATHLEN);
2711e7cbe64fSgw 		}
2712e7cbe64fSgw 
271399653d4eSeschrock 		/*
2714fa94a07fSbrendan 		 * Add the list of hot spares and level 2 cache devices.
271599653d4eSeschrock 		 */
27166809eb4eSEric Schrock 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
271799653d4eSeschrock 		spa_add_spares(spa, config);
2718fa94a07fSbrendan 		spa_add_l2cache(spa, config);
27196809eb4eSEric Schrock 		spa_config_exit(spa, SCL_CONFIG, FTAG);
2720fa9e4066Sahrens 	}
2721fa9e4066Sahrens 
2722fa9e4066Sahrens 	spa_unload(spa);
2723fa9e4066Sahrens 	spa_deactivate(spa);
2724fa9e4066Sahrens 	spa_remove(spa);
2725fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2726fa9e4066Sahrens 
2727fa9e4066Sahrens 	return (config);
2728fa9e4066Sahrens }
2729fa9e4066Sahrens 
2730fa9e4066Sahrens /*
2731fa9e4066Sahrens  * Pool export/destroy
2732fa9e4066Sahrens  *
2733fa9e4066Sahrens  * The act of destroying or exporting a pool is very simple.  We make sure there
2734fa9e4066Sahrens  * is no more pending I/O and any references to the pool are gone.  Then, we
2735fa9e4066Sahrens  * update the pool state and sync all the labels to disk, removing the
2736394ab0cbSGeorge Wilson  * configuration from the cache afterwards. If the 'hardforce' flag is set, then
2737394ab0cbSGeorge Wilson  * we don't sync the labels or remove the configuration cache.
2738fa9e4066Sahrens  */
2739fa9e4066Sahrens static int
274089a89ebfSlling spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
2741394ab0cbSGeorge Wilson     boolean_t force, boolean_t hardforce)
2742fa9e4066Sahrens {
2743fa9e4066Sahrens 	spa_t *spa;
2744fa9e4066Sahrens 
274544cd46caSbillm 	if (oldconfig)
274644cd46caSbillm 		*oldconfig = NULL;
274744cd46caSbillm 
27488ad4d6ddSJeff Bonwick 	if (!(spa_mode_global & FWRITE))
2749fa9e4066Sahrens 		return (EROFS);
2750fa9e4066Sahrens 
2751fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
2752fa9e4066Sahrens 	if ((spa = spa_lookup(pool)) == NULL) {
2753fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2754fa9e4066Sahrens 		return (ENOENT);
2755fa9e4066Sahrens 	}
2756fa9e4066Sahrens 
2757ea8dc4b6Seschrock 	/*
2758ea8dc4b6Seschrock 	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
2759ea8dc4b6Seschrock 	 * reacquire the namespace lock, and see if we can export.
2760ea8dc4b6Seschrock 	 */
2761ea8dc4b6Seschrock 	spa_open_ref(spa, FTAG);
2762ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
2763ea8dc4b6Seschrock 	spa_async_suspend(spa);
2764ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
2765ea8dc4b6Seschrock 	spa_close(spa, FTAG);
2766ea8dc4b6Seschrock 
2767fa9e4066Sahrens 	/*
2768fa9e4066Sahrens 	 * The pool will be in core if it's openable,
2769fa9e4066Sahrens 	 * in which case we can modify its state.
2770fa9e4066Sahrens 	 */
2771fa9e4066Sahrens 	if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
2772fa9e4066Sahrens 		/*
2773fa9e4066Sahrens 		 * Objsets may be open only because they're dirty, so we
2774fa9e4066Sahrens 		 * have to force it to sync before checking spa_refcnt.
2775fa9e4066Sahrens 		 */
2776fa9e4066Sahrens 		txg_wait_synced(spa->spa_dsl_pool, 0);
2777fa9e4066Sahrens 
2778ea8dc4b6Seschrock 		/*
2779ea8dc4b6Seschrock 		 * A pool cannot be exported or destroyed if there are active
2780ea8dc4b6Seschrock 		 * references.  If we are resetting a pool, allow references by
2781ea8dc4b6Seschrock 		 * fault injection handlers.
2782ea8dc4b6Seschrock 		 */
2783ea8dc4b6Seschrock 		if (!spa_refcount_zero(spa) ||
2784ea8dc4b6Seschrock 		    (spa->spa_inject_ref != 0 &&
2785ea8dc4b6Seschrock 		    new_state != POOL_STATE_UNINITIALIZED)) {
2786ea8dc4b6Seschrock 			spa_async_resume(spa);
2787fa9e4066Sahrens 			mutex_exit(&spa_namespace_lock);
2788fa9e4066Sahrens 			return (EBUSY);
2789fa9e4066Sahrens 		}
2790fa9e4066Sahrens 
279189a89ebfSlling 		/*
279289a89ebfSlling 		 * A pool cannot be exported if it has an active shared spare.
279389a89ebfSlling 		 * This is to prevent other pools stealing the active spare
279489a89ebfSlling 		 * from an exported pool. At user's own will, such pool can
279589a89ebfSlling 		 * be forcedly exported.
279689a89ebfSlling 		 */
279789a89ebfSlling 		if (!force && new_state == POOL_STATE_EXPORTED &&
279889a89ebfSlling 		    spa_has_active_shared_spare(spa)) {
279989a89ebfSlling 			spa_async_resume(spa);
280089a89ebfSlling 			mutex_exit(&spa_namespace_lock);
280189a89ebfSlling 			return (EXDEV);
280289a89ebfSlling 		}
280389a89ebfSlling 
2804fa9e4066Sahrens 		/*
2805fa9e4066Sahrens 		 * We want this to be reflected on every label,
2806fa9e4066Sahrens 		 * so mark them all dirty.  spa_unload() will do the
2807fa9e4066Sahrens 		 * final sync that pushes these changes out.
2808fa9e4066Sahrens 		 */
2809394ab0cbSGeorge Wilson 		if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
2810e14bb325SJeff Bonwick 			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2811ea8dc4b6Seschrock 			spa->spa_state = new_state;
28120373e76bSbonwick 			spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
2813ea8dc4b6Seschrock 			vdev_config_dirty(spa->spa_root_vdev);
2814e14bb325SJeff Bonwick 			spa_config_exit(spa, SCL_ALL, FTAG);
2815ea8dc4b6Seschrock 		}
2816fa9e4066Sahrens 	}
2817fa9e4066Sahrens 
28183d7072f8Seschrock 	spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
28193d7072f8Seschrock 
2820fa9e4066Sahrens 	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
2821fa9e4066Sahrens 		spa_unload(spa);
2822fa9e4066Sahrens 		spa_deactivate(spa);
2823fa9e4066Sahrens 	}
2824fa9e4066Sahrens 
282544cd46caSbillm 	if (oldconfig && spa->spa_config)
282644cd46caSbillm 		VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
282744cd46caSbillm 
2828ea8dc4b6Seschrock 	if (new_state != POOL_STATE_UNINITIALIZED) {
2829394ab0cbSGeorge Wilson 		if (!hardforce)
2830394ab0cbSGeorge Wilson 			spa_config_sync(spa, B_TRUE, B_TRUE);
2831ea8dc4b6Seschrock 		spa_remove(spa);
2832ea8dc4b6Seschrock 	}
2833fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2834fa9e4066Sahrens 
2835fa9e4066Sahrens 	return (0);
2836fa9e4066Sahrens }
2837fa9e4066Sahrens 
2838fa9e4066Sahrens /*
2839fa9e4066Sahrens  * Destroy a storage pool.
2840fa9e4066Sahrens  */
2841fa9e4066Sahrens int
2842fa9e4066Sahrens spa_destroy(char *pool)
2843fa9e4066Sahrens {
2844394ab0cbSGeorge Wilson 	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
2845394ab0cbSGeorge Wilson 	    B_FALSE, B_FALSE));
2846fa9e4066Sahrens }
2847fa9e4066Sahrens 
2848fa9e4066Sahrens /*
2849fa9e4066Sahrens  * Export a storage pool.
2850fa9e4066Sahrens  */
2851fa9e4066Sahrens int
2852394ab0cbSGeorge Wilson spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
2853394ab0cbSGeorge Wilson     boolean_t hardforce)
2854fa9e4066Sahrens {
2855394ab0cbSGeorge Wilson 	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
2856394ab0cbSGeorge Wilson 	    force, hardforce));
2857fa9e4066Sahrens }
2858fa9e4066Sahrens 
2859ea8dc4b6Seschrock /*
2860ea8dc4b6Seschrock  * Similar to spa_export(), this unloads the spa_t without actually removing it
2861ea8dc4b6Seschrock  * from the namespace in any way.
2862ea8dc4b6Seschrock  */
2863ea8dc4b6Seschrock int
2864ea8dc4b6Seschrock spa_reset(char *pool)
2865ea8dc4b6Seschrock {
286689a89ebfSlling 	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
2867394ab0cbSGeorge Wilson 	    B_FALSE, B_FALSE));
2868ea8dc4b6Seschrock }
2869ea8dc4b6Seschrock 
2870fa9e4066Sahrens /*
2871fa9e4066Sahrens  * ==========================================================================
2872fa9e4066Sahrens  * Device manipulation
2873fa9e4066Sahrens  * ==========================================================================
2874fa9e4066Sahrens  */
2875fa9e4066Sahrens 
2876fa9e4066Sahrens /*
28778654d025Sperrin  * Add a device to a storage pool.
2878fa9e4066Sahrens  */
2879fa9e4066Sahrens int
2880fa9e4066Sahrens spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
2881fa9e4066Sahrens {
2882fa9e4066Sahrens 	uint64_t txg;
28838ad4d6ddSJeff Bonwick 	int error;
2884fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
28850e34b6a7Sbonwick 	vdev_t *vd, *tvd;
2886fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
2887fa94a07fSbrendan 	uint_t nspares, nl2cache;
2888fa9e4066Sahrens 
2889fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
2890fa9e4066Sahrens 
289199653d4eSeschrock 	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
289299653d4eSeschrock 	    VDEV_ALLOC_ADD)) != 0)
289399653d4eSeschrock 		return (spa_vdev_exit(spa, NULL, txg, error));
2894fa9e4066Sahrens 
2895e14bb325SJeff Bonwick 	spa->spa_pending_vdev = vd;	/* spa_vdev_exit() will clear this */
289699653d4eSeschrock 
2897fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
2898fa94a07fSbrendan 	    &nspares) != 0)
289999653d4eSeschrock 		nspares = 0;
290099653d4eSeschrock 
2901fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
2902fa94a07fSbrendan 	    &nl2cache) != 0)
2903fa94a07fSbrendan 		nl2cache = 0;
2904fa94a07fSbrendan 
2905e14bb325SJeff Bonwick 	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
2906fa9e4066Sahrens 		return (spa_vdev_exit(spa, vd, txg, EINVAL));
2907fa9e4066Sahrens 
2908e14bb325SJeff Bonwick 	if (vd->vdev_children != 0 &&
2909e14bb325SJeff Bonwick 	    (error = vdev_create(vd, txg, B_FALSE)) != 0)
2910e14bb325SJeff Bonwick 		return (spa_vdev_exit(spa, vd, txg, error));
291199653d4eSeschrock 
291239c23413Seschrock 	/*
2913fa94a07fSbrendan 	 * We must validate the spares and l2cache devices after checking the
2914fa94a07fSbrendan 	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
291539c23413Seschrock 	 */
2916e14bb325SJeff Bonwick 	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
291739c23413Seschrock 		return (spa_vdev_exit(spa, vd, txg, error));
291839c23413Seschrock 
291939c23413Seschrock 	/*
292039c23413Seschrock 	 * Transfer each new top-level vdev from vd to rvd.
292139c23413Seschrock 	 */
29228ad4d6ddSJeff Bonwick 	for (int c = 0; c < vd->vdev_children; c++) {
292339c23413Seschrock 		tvd = vd->vdev_child[c];
292439c23413Seschrock 		vdev_remove_child(vd, tvd);
292539c23413Seschrock 		tvd->vdev_id = rvd->vdev_children;
292639c23413Seschrock 		vdev_add_child(rvd, tvd);
292739c23413Seschrock 		vdev_config_dirty(tvd);
292839c23413Seschrock 	}
292939c23413Seschrock 
293099653d4eSeschrock 	if (nspares != 0) {
2931fa94a07fSbrendan 		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
2932fa94a07fSbrendan 		    ZPOOL_CONFIG_SPARES);
293399653d4eSeschrock 		spa_load_spares(spa);
2934fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
2935fa94a07fSbrendan 	}
2936fa94a07fSbrendan 
2937fa94a07fSbrendan 	if (nl2cache != 0) {
2938fa94a07fSbrendan 		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
2939fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE);
2940fa94a07fSbrendan 		spa_load_l2cache(spa);
2941fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
2942fa9e4066Sahrens 	}
2943fa9e4066Sahrens 
2944fa9e4066Sahrens 	/*
29450e34b6a7Sbonwick 	 * We have to be careful when adding new vdevs to an existing pool.
29460e34b6a7Sbonwick 	 * If other threads start allocating from these vdevs before we
29470e34b6a7Sbonwick 	 * sync the config cache, and we lose power, then upon reboot we may
29480e34b6a7Sbonwick 	 * fail to open the pool because there are DVAs that the config cache
29490e34b6a7Sbonwick 	 * can't translate.  Therefore, we first add the vdevs without
29500e34b6a7Sbonwick 	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
29510373e76bSbonwick 	 * and then let spa_config_update() initialize the new metaslabs.
29520e34b6a7Sbonwick 	 *
29530e34b6a7Sbonwick 	 * spa_load() checks for added-but-not-initialized vdevs, so that
29540e34b6a7Sbonwick 	 * if we lose power at any point in this sequence, the remaining
29550e34b6a7Sbonwick 	 * steps will be completed the next time we load the pool.
29560e34b6a7Sbonwick 	 */
29570373e76bSbonwick 	(void) spa_vdev_exit(spa, vd, txg, 0);
29580e34b6a7Sbonwick 
29590373e76bSbonwick 	mutex_enter(&spa_namespace_lock);
29600373e76bSbonwick 	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
29610373e76bSbonwick 	mutex_exit(&spa_namespace_lock);
2962fa9e4066Sahrens 
29630373e76bSbonwick 	return (0);
2964fa9e4066Sahrens }
2965fa9e4066Sahrens 
2966fa9e4066Sahrens /*
2967fa9e4066Sahrens  * Attach a device to a mirror.  The arguments are the path to any device
2968fa9e4066Sahrens  * in the mirror, and the nvroot for the new device.  If the path specifies
2969fa9e4066Sahrens  * a device that is not mirrored, we automatically insert the mirror vdev.
2970fa9e4066Sahrens  *
2971fa9e4066Sahrens  * If 'replacing' is specified, the new device is intended to replace the
2972fa9e4066Sahrens  * existing device; in this case the two devices are made into their own
29733d7072f8Seschrock  * mirror using the 'replacing' vdev, which is functionally identical to
2974fa9e4066Sahrens  * the mirror vdev (it actually reuses all the same ops) but has a few
2975fa9e4066Sahrens  * extra rules: you can't attach to it after it's been created, and upon
2976fa9e4066Sahrens  * completion of resilvering, the first disk (the one being replaced)
2977fa9e4066Sahrens  * is automatically detached.
2978fa9e4066Sahrens  */
2979fa9e4066Sahrens int
2980ea8dc4b6Seschrock spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
2981fa9e4066Sahrens {
2982fa9e4066Sahrens 	uint64_t txg, open_txg;
2983fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
2984fa9e4066Sahrens 	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
298599653d4eSeschrock 	vdev_ops_t *pvops;
29869b3f6b42SEric Kustarz 	dmu_tx_t *tx;
29879b3f6b42SEric Kustarz 	char *oldvdpath, *newvdpath;
29889b3f6b42SEric Kustarz 	int newvd_isspare;
29899b3f6b42SEric Kustarz 	int error;
2990fa9e4066Sahrens 
2991fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
2992fa9e4066Sahrens 
2993c5904d13Seschrock 	oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
2994fa9e4066Sahrens 
2995fa9e4066Sahrens 	if (oldvd == NULL)
2996fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
2997fa9e4066Sahrens 
29980e34b6a7Sbonwick 	if (!oldvd->vdev_ops->vdev_op_leaf)
29990e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
30000e34b6a7Sbonwick 
3001fa9e4066Sahrens 	pvd = oldvd->vdev_parent;
3002fa9e4066Sahrens 
300399653d4eSeschrock 	if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
30043d7072f8Seschrock 	    VDEV_ALLOC_ADD)) != 0)
30053d7072f8Seschrock 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
30063d7072f8Seschrock 
30073d7072f8Seschrock 	if (newrootvd->vdev_children != 1)
3008fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
3009fa9e4066Sahrens 
3010fa9e4066Sahrens 	newvd = newrootvd->vdev_child[0];
3011fa9e4066Sahrens 
3012fa9e4066Sahrens 	if (!newvd->vdev_ops->vdev_op_leaf)
3013fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
3014fa9e4066Sahrens 
301599653d4eSeschrock 	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
3016fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, error));
3017fa9e4066Sahrens 
30188654d025Sperrin 	/*
30198654d025Sperrin 	 * Spares can't replace logs
30208654d025Sperrin 	 */
3021ee0eb9f2SEric Schrock 	if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
30228654d025Sperrin 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
30238654d025Sperrin 
302499653d4eSeschrock 	if (!replacing) {
302599653d4eSeschrock 		/*
302699653d4eSeschrock 		 * For attach, the only allowable parent is a mirror or the root
302799653d4eSeschrock 		 * vdev.
302899653d4eSeschrock 		 */
302999653d4eSeschrock 		if (pvd->vdev_ops != &vdev_mirror_ops &&
303099653d4eSeschrock 		    pvd->vdev_ops != &vdev_root_ops)
303199653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
303299653d4eSeschrock 
303399653d4eSeschrock 		pvops = &vdev_mirror_ops;
303499653d4eSeschrock 	} else {
303599653d4eSeschrock 		/*
303699653d4eSeschrock 		 * Active hot spares can only be replaced by inactive hot
303799653d4eSeschrock 		 * spares.
303899653d4eSeschrock 		 */
303999653d4eSeschrock 		if (pvd->vdev_ops == &vdev_spare_ops &&
304099653d4eSeschrock 		    pvd->vdev_child[1] == oldvd &&
304199653d4eSeschrock 		    !spa_has_spare(spa, newvd->vdev_guid))
304299653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
304399653d4eSeschrock 
304499653d4eSeschrock 		/*
304599653d4eSeschrock 		 * If the source is a hot spare, and the parent isn't already a
304699653d4eSeschrock 		 * spare, then we want to create a new hot spare.  Otherwise, we
304739c23413Seschrock 		 * want to create a replacing vdev.  The user is not allowed to
304839c23413Seschrock 		 * attach to a spared vdev child unless the 'isspare' state is
304939c23413Seschrock 		 * the same (spare replaces spare, non-spare replaces
305039c23413Seschrock 		 * non-spare).
305199653d4eSeschrock 		 */
305299653d4eSeschrock 		if (pvd->vdev_ops == &vdev_replacing_ops)
305399653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
305439c23413Seschrock 		else if (pvd->vdev_ops == &vdev_spare_ops &&
305539c23413Seschrock 		    newvd->vdev_isspare != oldvd->vdev_isspare)
305639c23413Seschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
305799653d4eSeschrock 		else if (pvd->vdev_ops != &vdev_spare_ops &&
305899653d4eSeschrock 		    newvd->vdev_isspare)
305999653d4eSeschrock 			pvops = &vdev_spare_ops;
306099653d4eSeschrock 		else
306199653d4eSeschrock 			pvops = &vdev_replacing_ops;
306299653d4eSeschrock 	}
306399653d4eSeschrock 
30642a79c5feSlling 	/*
30652a79c5feSlling 	 * Compare the new device size with the replaceable/attachable
30662a79c5feSlling 	 * device size.
30672a79c5feSlling 	 */
30682a79c5feSlling 	if (newvd->vdev_psize < vdev_get_rsize(oldvd))
3069fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
3070fa9e4066Sahrens 
3071ecc2d604Sbonwick 	/*
3072ecc2d604Sbonwick 	 * The new device cannot have a higher alignment requirement
3073ecc2d604Sbonwick 	 * than the top-level vdev.
3074ecc2d604Sbonwick 	 */
3075ecc2d604Sbonwick 	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
3076fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
3077fa9e4066Sahrens 
3078fa9e4066Sahrens 	/*
3079fa9e4066Sahrens 	 * If this is an in-place replacement, update oldvd's path and devid
3080fa9e4066Sahrens 	 * to make it distinguishable from newvd, and unopenable from now on.
3081fa9e4066Sahrens 	 */
3082fa9e4066Sahrens 	if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
3083fa9e4066Sahrens 		spa_strfree(oldvd->vdev_path);
3084fa9e4066Sahrens 		oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
3085fa9e4066Sahrens 		    KM_SLEEP);
3086fa9e4066Sahrens 		(void) sprintf(oldvd->vdev_path, "%s/%s",
3087fa9e4066Sahrens 		    newvd->vdev_path, "old");
3088fa9e4066Sahrens 		if (oldvd->vdev_devid != NULL) {
3089fa9e4066Sahrens 			spa_strfree(oldvd->vdev_devid);
3090fa9e4066Sahrens 			oldvd->vdev_devid = NULL;
3091fa9e4066Sahrens 		}
3092fa9e4066Sahrens 	}
3093fa9e4066Sahrens 
3094fa9e4066Sahrens 	/*
309599653d4eSeschrock 	 * If the parent is not a mirror, or if we're replacing, insert the new
309699653d4eSeschrock 	 * mirror/replacing/spare vdev above oldvd.
3097fa9e4066Sahrens 	 */
3098fa9e4066Sahrens 	if (pvd->vdev_ops != pvops)
3099fa9e4066Sahrens 		pvd = vdev_add_parent(oldvd, pvops);
3100fa9e4066Sahrens 
3101fa9e4066Sahrens 	ASSERT(pvd->vdev_top->vdev_parent == rvd);
3102fa9e4066Sahrens 	ASSERT(pvd->vdev_ops == pvops);
3103fa9e4066Sahrens 	ASSERT(oldvd->vdev_parent == pvd);
3104fa9e4066Sahrens 
3105fa9e4066Sahrens 	/*
3106fa9e4066Sahrens 	 * Extract the new device from its root and add it to pvd.
3107fa9e4066Sahrens 	 */
3108fa9e4066Sahrens 	vdev_remove_child(newrootvd, newvd);
3109fa9e4066Sahrens 	newvd->vdev_id = pvd->vdev_children;
3110fa9e4066Sahrens 	vdev_add_child(pvd, newvd);
3111fa9e4066Sahrens 
3112ea8dc4b6Seschrock 	/*
3113ea8dc4b6Seschrock 	 * If newvd is smaller than oldvd, but larger than its rsize,
3114ea8dc4b6Seschrock 	 * the addition of newvd may have decreased our parent's asize.
3115ea8dc4b6Seschrock 	 */
3116ea8dc4b6Seschrock 	pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
3117ea8dc4b6Seschrock 
3118fa9e4066Sahrens 	tvd = newvd->vdev_top;
3119fa9e4066Sahrens 	ASSERT(pvd->vdev_top == tvd);
3120fa9e4066Sahrens 	ASSERT(tvd->vdev_parent == rvd);
3121fa9e4066Sahrens 
3122fa9e4066Sahrens 	vdev_config_dirty(tvd);
3123fa9e4066Sahrens 
3124fa9e4066Sahrens 	/*
3125fa9e4066Sahrens 	 * Set newvd's DTL to [TXG_INITIAL, open_txg].  It will propagate
3126fa9e4066Sahrens 	 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
3127fa9e4066Sahrens 	 */
3128fa9e4066Sahrens 	open_txg = txg + TXG_CONCURRENT_STATES - 1;
3129fa9e4066Sahrens 
31308ad4d6ddSJeff Bonwick 	vdev_dtl_dirty(newvd, DTL_MISSING,
31318ad4d6ddSJeff Bonwick 	    TXG_INITIAL, open_txg - TXG_INITIAL + 1);
3132fa9e4066Sahrens 
31336809eb4eSEric Schrock 	if (newvd->vdev_isspare) {
313439c23413Seschrock 		spa_spare_activate(newvd);
31356809eb4eSEric Schrock 		spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
31366809eb4eSEric Schrock 	}
31376809eb4eSEric Schrock 
3138e14bb325SJeff Bonwick 	oldvdpath = spa_strdup(oldvd->vdev_path);
3139e14bb325SJeff Bonwick 	newvdpath = spa_strdup(newvd->vdev_path);
31409b3f6b42SEric Kustarz 	newvd_isspare = newvd->vdev_isspare;
3141ea8dc4b6Seschrock 
3142fa9e4066Sahrens 	/*
3143fa9e4066Sahrens 	 * Mark newvd's DTL dirty in this txg.
3144fa9e4066Sahrens 	 */
3145ecc2d604Sbonwick 	vdev_dirty(tvd, VDD_DTL, newvd, txg);
3146fa9e4066Sahrens 
3147fa9e4066Sahrens 	(void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
3148fa9e4066Sahrens 
31499b3f6b42SEric Kustarz 	tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
31509b3f6b42SEric Kustarz 	if (dmu_tx_assign(tx, TXG_WAIT) == 0) {
31519b3f6b42SEric Kustarz 		spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx,
31529b3f6b42SEric Kustarz 		    CRED(),  "%s vdev=%s %s vdev=%s",
31539b3f6b42SEric Kustarz 		    replacing && newvd_isspare ? "spare in" :
31549b3f6b42SEric Kustarz 		    replacing ? "replace" : "attach", newvdpath,
31559b3f6b42SEric Kustarz 		    replacing ? "for" : "to", oldvdpath);
31569b3f6b42SEric Kustarz 		dmu_tx_commit(tx);
31579b3f6b42SEric Kustarz 	} else {
31589b3f6b42SEric Kustarz 		dmu_tx_abort(tx);
31599b3f6b42SEric Kustarz 	}
31609b3f6b42SEric Kustarz 
31619b3f6b42SEric Kustarz 	spa_strfree(oldvdpath);
31629b3f6b42SEric Kustarz 	spa_strfree(newvdpath);
31639b3f6b42SEric Kustarz 
3164fa9e4066Sahrens 	/*
3165088f3894Sahrens 	 * Kick off a resilver to update newvd.
3166fa9e4066Sahrens 	 */
3167088f3894Sahrens 	VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0);
3168fa9e4066Sahrens 
3169fa9e4066Sahrens 	return (0);
3170fa9e4066Sahrens }
3171fa9e4066Sahrens 
3172fa9e4066Sahrens /*
3173fa9e4066Sahrens  * Detach a device from a mirror or replacing vdev.
3174fa9e4066Sahrens  * If 'replace_done' is specified, only detach if the parent
3175fa9e4066Sahrens  * is a replacing vdev.
3176fa9e4066Sahrens  */
3177fa9e4066Sahrens int
31788ad4d6ddSJeff Bonwick spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
3179fa9e4066Sahrens {
3180fa9e4066Sahrens 	uint64_t txg;
31818ad4d6ddSJeff Bonwick 	int error;
3182fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
3183fa9e4066Sahrens 	vdev_t *vd, *pvd, *cvd, *tvd;
318499653d4eSeschrock 	boolean_t unspare = B_FALSE;
318599653d4eSeschrock 	uint64_t unspare_guid;
3186bf82a41bSeschrock 	size_t len;
3187fa9e4066Sahrens 
3188fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
3189fa9e4066Sahrens 
3190c5904d13Seschrock 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3191fa9e4066Sahrens 
3192fa9e4066Sahrens 	if (vd == NULL)
3193fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
3194fa9e4066Sahrens 
31950e34b6a7Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
31960e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
31970e34b6a7Sbonwick 
3198fa9e4066Sahrens 	pvd = vd->vdev_parent;
3199fa9e4066Sahrens 
32008ad4d6ddSJeff Bonwick 	/*
32018ad4d6ddSJeff Bonwick 	 * If the parent/child relationship is not as expected, don't do it.
32028ad4d6ddSJeff Bonwick 	 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
32038ad4d6ddSJeff Bonwick 	 * vdev that's replacing B with C.  The user's intent in replacing
32048ad4d6ddSJeff Bonwick 	 * is to go from M(A,B) to M(A,C).  If the user decides to cancel
32058ad4d6ddSJeff Bonwick 	 * the replace by detaching C, the expected behavior is to end up
32068ad4d6ddSJeff Bonwick 	 * M(A,B).  But suppose that right after deciding to detach C,
32078ad4d6ddSJeff Bonwick 	 * the replacement of B completes.  We would have M(A,C), and then
32088ad4d6ddSJeff Bonwick 	 * ask to detach C, which would leave us with just A -- not what
32098ad4d6ddSJeff Bonwick 	 * the user wanted.  To prevent this, we make sure that the
32108ad4d6ddSJeff Bonwick 	 * parent/child relationship hasn't changed -- in this example,
32118ad4d6ddSJeff Bonwick 	 * that C's parent is still the replacing vdev R.
32128ad4d6ddSJeff Bonwick 	 */
32138ad4d6ddSJeff Bonwick 	if (pvd->vdev_guid != pguid && pguid != 0)
32148ad4d6ddSJeff Bonwick 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
32158ad4d6ddSJeff Bonwick 
3216fa9e4066Sahrens 	/*
3217fa9e4066Sahrens 	 * If replace_done is specified, only remove this device if it's
321899653d4eSeschrock 	 * the first child of a replacing vdev.  For the 'spare' vdev, either
321999653d4eSeschrock 	 * disk can be removed.
322099653d4eSeschrock 	 */
322199653d4eSeschrock 	if (replace_done) {
322299653d4eSeschrock 		if (pvd->vdev_ops == &vdev_replacing_ops) {
322399653d4eSeschrock 			if (vd->vdev_id != 0)
322499653d4eSeschrock 				return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
322599653d4eSeschrock 		} else if (pvd->vdev_ops != &vdev_spare_ops) {
322699653d4eSeschrock 			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
322799653d4eSeschrock 		}
322899653d4eSeschrock 	}
322999653d4eSeschrock 
323099653d4eSeschrock 	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
3231e7437265Sahrens 	    spa_version(spa) >= SPA_VERSION_SPARES);
3232fa9e4066Sahrens 
3233fa9e4066Sahrens 	/*
323499653d4eSeschrock 	 * Only mirror, replacing, and spare vdevs support detach.
3235fa9e4066Sahrens 	 */
3236fa9e4066Sahrens 	if (pvd->vdev_ops != &vdev_replacing_ops &&
323799653d4eSeschrock 	    pvd->vdev_ops != &vdev_mirror_ops &&
323899653d4eSeschrock 	    pvd->vdev_ops != &vdev_spare_ops)
3239fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3240fa9e4066Sahrens 
3241fa9e4066Sahrens 	/*
32428ad4d6ddSJeff Bonwick 	 * If this device has the only valid copy of some data,
32438ad4d6ddSJeff Bonwick 	 * we cannot safely detach it.
3244fa9e4066Sahrens 	 */
32458ad4d6ddSJeff Bonwick 	if (vdev_dtl_required(vd))
3246fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3247fa9e4066Sahrens 
32488ad4d6ddSJeff Bonwick 	ASSERT(pvd->vdev_children >= 2);
3249fa9e4066Sahrens 
3250bf82a41bSeschrock 	/*
3251bf82a41bSeschrock 	 * If we are detaching the second disk from a replacing vdev, then
3252bf82a41bSeschrock 	 * check to see if we changed the original vdev's path to have "/old"
3253bf82a41bSeschrock 	 * at the end in spa_vdev_attach().  If so, undo that change now.
3254bf82a41bSeschrock 	 */
3255bf82a41bSeschrock 	if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 &&
3256bf82a41bSeschrock 	    pvd->vdev_child[0]->vdev_path != NULL &&
3257bf82a41bSeschrock 	    pvd->vdev_child[1]->vdev_path != NULL) {
3258bf82a41bSeschrock 		ASSERT(pvd->vdev_child[1] == vd);
3259bf82a41bSeschrock 		cvd = pvd->vdev_child[0];
3260bf82a41bSeschrock 		len = strlen(vd->vdev_path);
3261bf82a41bSeschrock 		if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
3262bf82a41bSeschrock 		    strcmp(cvd->vdev_path + len, "/old") == 0) {
3263bf82a41bSeschrock 			spa_strfree(cvd->vdev_path);
3264bf82a41bSeschrock 			cvd->vdev_path = spa_strdup(vd->vdev_path);
3265bf82a41bSeschrock 		}
3266bf82a41bSeschrock 	}
3267bf82a41bSeschrock 
326899653d4eSeschrock 	/*
326999653d4eSeschrock 	 * If we are detaching the original disk from a spare, then it implies
327099653d4eSeschrock 	 * that the spare should become a real disk, and be removed from the
327199653d4eSeschrock 	 * active spare list for the pool.
327299653d4eSeschrock 	 */
327399653d4eSeschrock 	if (pvd->vdev_ops == &vdev_spare_ops &&
32748ad4d6ddSJeff Bonwick 	    vd->vdev_id == 0 && pvd->vdev_child[1]->vdev_isspare)
327599653d4eSeschrock 		unspare = B_TRUE;
327699653d4eSeschrock 
3277fa9e4066Sahrens 	/*
3278fa9e4066Sahrens 	 * Erase the disk labels so the disk can be used for other things.
3279fa9e4066Sahrens 	 * This must be done after all other error cases are handled,
3280fa9e4066Sahrens 	 * but before we disembowel vd (so we can still do I/O to it).
3281fa9e4066Sahrens 	 * But if we can't do it, don't treat the error as fatal --
3282fa9e4066Sahrens 	 * it may be that the unwritability of the disk is the reason
3283fa9e4066Sahrens 	 * it's being detached!
3284fa9e4066Sahrens 	 */
328539c23413Seschrock 	error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
3286fa9e4066Sahrens 
3287fa9e4066Sahrens 	/*
3288fa9e4066Sahrens 	 * Remove vd from its parent and compact the parent's children.
3289fa9e4066Sahrens 	 */
3290fa9e4066Sahrens 	vdev_remove_child(pvd, vd);
3291fa9e4066Sahrens 	vdev_compact_children(pvd);
3292fa9e4066Sahrens 
3293fa9e4066Sahrens 	/*
3294fa9e4066Sahrens 	 * Remember one of the remaining children so we can get tvd below.
3295fa9e4066Sahrens 	 */
3296fa9e4066Sahrens 	cvd = pvd->vdev_child[0];
3297fa9e4066Sahrens 
329899653d4eSeschrock 	/*
329999653d4eSeschrock 	 * If we need to remove the remaining child from the list of hot spares,
33008ad4d6ddSJeff Bonwick 	 * do it now, marking the vdev as no longer a spare in the process.
33018ad4d6ddSJeff Bonwick 	 * We must do this before vdev_remove_parent(), because that can
33028ad4d6ddSJeff Bonwick 	 * change the GUID if it creates a new toplevel GUID.  For a similar
33038ad4d6ddSJeff Bonwick 	 * reason, we must remove the spare now, in the same txg as the detach;
33048ad4d6ddSJeff Bonwick 	 * otherwise someone could attach a new sibling, change the GUID, and
33058ad4d6ddSJeff Bonwick 	 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
330699653d4eSeschrock 	 */
330799653d4eSeschrock 	if (unspare) {
330899653d4eSeschrock 		ASSERT(cvd->vdev_isspare);
330939c23413Seschrock 		spa_spare_remove(cvd);
331099653d4eSeschrock 		unspare_guid = cvd->vdev_guid;
33118ad4d6ddSJeff Bonwick 		(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
331299653d4eSeschrock 	}
331399653d4eSeschrock 
3314fa9e4066Sahrens 	/*
3315fa9e4066Sahrens 	 * If the parent mirror/replacing vdev only has one child,
3316fa9e4066Sahrens 	 * the parent is no longer needed.  Remove it from the tree.
3317fa9e4066Sahrens 	 */
3318fa9e4066Sahrens 	if (pvd->vdev_children == 1)
3319fa9e4066Sahrens 		vdev_remove_parent(cvd);
3320fa9e4066Sahrens 
3321fa9e4066Sahrens 	/*
3322fa9e4066Sahrens 	 * We don't set tvd until now because the parent we just removed
3323fa9e4066Sahrens 	 * may have been the previous top-level vdev.
3324fa9e4066Sahrens 	 */
3325fa9e4066Sahrens 	tvd = cvd->vdev_top;
3326fa9e4066Sahrens 	ASSERT(tvd->vdev_parent == rvd);
3327fa9e4066Sahrens 
3328fa9e4066Sahrens 	/*
332939c23413Seschrock 	 * Reevaluate the parent vdev state.
3330fa9e4066Sahrens 	 */
33313d7072f8Seschrock 	vdev_propagate_state(cvd);
3332fa9e4066Sahrens 
3333fa9e4066Sahrens 	/*
333439c23413Seschrock 	 * If the device we just detached was smaller than the others, it may be
333539c23413Seschrock 	 * possible to add metaslabs (i.e. grow the pool).  vdev_metaslab_init()
333639c23413Seschrock 	 * can't fail because the existing metaslabs are already in core, so
333739c23413Seschrock 	 * there's nothing to read from disk.
3338fa9e4066Sahrens 	 */
3339ecc2d604Sbonwick 	VERIFY(vdev_metaslab_init(tvd, txg) == 0);
3340fa9e4066Sahrens 
3341fa9e4066Sahrens 	vdev_config_dirty(tvd);
3342fa9e4066Sahrens 
3343fa9e4066Sahrens 	/*
334439c23413Seschrock 	 * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
334539c23413Seschrock 	 * vd->vdev_detached is set and free vd's DTL object in syncing context.
334639c23413Seschrock 	 * But first make sure we're not on any *other* txg's DTL list, to
334739c23413Seschrock 	 * prevent vd from being accessed after it's freed.
3348fa9e4066Sahrens 	 */
33498ad4d6ddSJeff Bonwick 	for (int t = 0; t < TXG_SIZE; t++)
3350fa9e4066Sahrens 		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
3351ecc2d604Sbonwick 	vd->vdev_detached = B_TRUE;
3352ecc2d604Sbonwick 	vdev_dirty(tvd, VDD_DTL, vd, txg);
3353fa9e4066Sahrens 
33543d7072f8Seschrock 	spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
33553d7072f8Seschrock 
335699653d4eSeschrock 	error = spa_vdev_exit(spa, vd, txg, 0);
335799653d4eSeschrock 
335899653d4eSeschrock 	/*
335939c23413Seschrock 	 * If this was the removal of the original device in a hot spare vdev,
336039c23413Seschrock 	 * then we want to go through and remove the device from the hot spare
336139c23413Seschrock 	 * list of every other pool.
336299653d4eSeschrock 	 */
336399653d4eSeschrock 	if (unspare) {
33648ad4d6ddSJeff Bonwick 		spa_t *myspa = spa;
336599653d4eSeschrock 		spa = NULL;
336699653d4eSeschrock 		mutex_enter(&spa_namespace_lock);
336799653d4eSeschrock 		while ((spa = spa_next(spa)) != NULL) {
336899653d4eSeschrock 			if (spa->spa_state != POOL_STATE_ACTIVE)
336999653d4eSeschrock 				continue;
33708ad4d6ddSJeff Bonwick 			if (spa == myspa)
33718ad4d6ddSJeff Bonwick 				continue;
33729af0a4dfSJeff Bonwick 			spa_open_ref(spa, FTAG);
33739af0a4dfSJeff Bonwick 			mutex_exit(&spa_namespace_lock);
337499653d4eSeschrock 			(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
33759af0a4dfSJeff Bonwick 			mutex_enter(&spa_namespace_lock);
33769af0a4dfSJeff Bonwick 			spa_close(spa, FTAG);
337799653d4eSeschrock 		}
337899653d4eSeschrock 		mutex_exit(&spa_namespace_lock);
337999653d4eSeschrock 	}
338099653d4eSeschrock 
338199653d4eSeschrock 	return (error);
338299653d4eSeschrock }
338399653d4eSeschrock 
3384e14bb325SJeff Bonwick static nvlist_t *
3385e14bb325SJeff Bonwick spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
338699653d4eSeschrock {
3387e14bb325SJeff Bonwick 	for (int i = 0; i < count; i++) {
3388e14bb325SJeff Bonwick 		uint64_t guid;
338999653d4eSeschrock 
3390e14bb325SJeff Bonwick 		VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
3391e14bb325SJeff Bonwick 		    &guid) == 0);
339299653d4eSeschrock 
3393e14bb325SJeff Bonwick 		if (guid == target_guid)
3394e14bb325SJeff Bonwick 			return (nvpp[i]);
339599653d4eSeschrock 	}
339699653d4eSeschrock 
3397e14bb325SJeff Bonwick 	return (NULL);
3398fa94a07fSbrendan }
3399fa94a07fSbrendan 
3400e14bb325SJeff Bonwick static void
3401e14bb325SJeff Bonwick spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
3402e14bb325SJeff Bonwick 	nvlist_t *dev_to_remove)
3403fa94a07fSbrendan {
3404e14bb325SJeff Bonwick 	nvlist_t **newdev = NULL;
3405fa94a07fSbrendan 
3406e14bb325SJeff Bonwick 	if (count > 1)
3407e14bb325SJeff Bonwick 		newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
3408fa94a07fSbrendan 
3409e14bb325SJeff Bonwick 	for (int i = 0, j = 0; i < count; i++) {
3410e14bb325SJeff Bonwick 		if (dev[i] == dev_to_remove)
3411e14bb325SJeff Bonwick 			continue;
3412e14bb325SJeff Bonwick 		VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
3413fa94a07fSbrendan 	}
3414fa94a07fSbrendan 
3415e14bb325SJeff Bonwick 	VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
3416e14bb325SJeff Bonwick 	VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
3417fa94a07fSbrendan 
3418e14bb325SJeff Bonwick 	for (int i = 0; i < count - 1; i++)
3419e14bb325SJeff Bonwick 		nvlist_free(newdev[i]);
3420fa94a07fSbrendan 
3421e14bb325SJeff Bonwick 	if (count > 1)
3422e14bb325SJeff Bonwick 		kmem_free(newdev, (count - 1) * sizeof (void *));
3423fa94a07fSbrendan }
3424fa94a07fSbrendan 
3425fa94a07fSbrendan /*
3426fa94a07fSbrendan  * Remove a device from the pool.  Currently, this supports removing only hot
3427fa94a07fSbrendan  * spares and level 2 ARC devices.
3428fa94a07fSbrendan  */
3429fa94a07fSbrendan int
3430fa94a07fSbrendan spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
3431fa94a07fSbrendan {
3432fa94a07fSbrendan 	vdev_t *vd;
3433e14bb325SJeff Bonwick 	nvlist_t **spares, **l2cache, *nv;
3434fa94a07fSbrendan 	uint_t nspares, nl2cache;
34358ad4d6ddSJeff Bonwick 	uint64_t txg = 0;
3436fa94a07fSbrendan 	int error = 0;
34378ad4d6ddSJeff Bonwick 	boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
3438fa94a07fSbrendan 
34398ad4d6ddSJeff Bonwick 	if (!locked)
34408ad4d6ddSJeff Bonwick 		txg = spa_vdev_enter(spa);
3441fa94a07fSbrendan 
3442c5904d13Seschrock 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3443fa94a07fSbrendan 
3444fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs != NULL &&
3445fa94a07fSbrendan 	    nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3446e14bb325SJeff Bonwick 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
3447e14bb325SJeff Bonwick 	    (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
3448e14bb325SJeff Bonwick 		/*
3449e14bb325SJeff Bonwick 		 * Only remove the hot spare if it's not currently in use
3450e14bb325SJeff Bonwick 		 * in this pool.
3451e14bb325SJeff Bonwick 		 */
3452e14bb325SJeff Bonwick 		if (vd == NULL || unspare) {
3453e14bb325SJeff Bonwick 			spa_vdev_remove_aux(spa->spa_spares.sav_config,
3454e14bb325SJeff Bonwick 			    ZPOOL_CONFIG_SPARES, spares, nspares, nv);
3455e14bb325SJeff Bonwick 			spa_load_spares(spa);
3456e14bb325SJeff Bonwick 			spa->spa_spares.sav_sync = B_TRUE;
3457e14bb325SJeff Bonwick 		} else {
3458e14bb325SJeff Bonwick 			error = EBUSY;
3459e14bb325SJeff Bonwick 		}
3460e14bb325SJeff Bonwick 	} else if (spa->spa_l2cache.sav_vdevs != NULL &&
3461fa94a07fSbrendan 	    nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3462e14bb325SJeff Bonwick 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
3463e14bb325SJeff Bonwick 	    (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
3464e14bb325SJeff Bonwick 		/*
3465e14bb325SJeff Bonwick 		 * Cache devices can always be removed.
3466e14bb325SJeff Bonwick 		 */
3467e14bb325SJeff Bonwick 		spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
3468e14bb325SJeff Bonwick 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
3469fa94a07fSbrendan 		spa_load_l2cache(spa);
3470fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
3471e14bb325SJeff Bonwick 	} else if (vd != NULL) {
3472e14bb325SJeff Bonwick 		/*
3473e14bb325SJeff Bonwick 		 * Normal vdevs cannot be removed (yet).
3474e14bb325SJeff Bonwick 		 */
3475e14bb325SJeff Bonwick 		error = ENOTSUP;
3476e14bb325SJeff Bonwick 	} else {
3477e14bb325SJeff Bonwick 		/*
3478e14bb325SJeff Bonwick 		 * There is no vdev of any kind with the specified guid.
3479e14bb325SJeff Bonwick 		 */
3480e14bb325SJeff Bonwick 		error = ENOENT;
3481fa94a07fSbrendan 	}
348299653d4eSeschrock 
34838ad4d6ddSJeff Bonwick 	if (!locked)
34848ad4d6ddSJeff Bonwick 		return (spa_vdev_exit(spa, NULL, txg, error));
34858ad4d6ddSJeff Bonwick 
34868ad4d6ddSJeff Bonwick 	return (error);
3487fa9e4066Sahrens }
3488fa9e4066Sahrens 
3489fa9e4066Sahrens /*
34903d7072f8Seschrock  * Find any device that's done replacing, or a vdev marked 'unspare' that's
34913d7072f8Seschrock  * current spared, so we can detach it.
3492fa9e4066Sahrens  */
3493ea8dc4b6Seschrock static vdev_t *
34943d7072f8Seschrock spa_vdev_resilver_done_hunt(vdev_t *vd)
3495fa9e4066Sahrens {
3496ea8dc4b6Seschrock 	vdev_t *newvd, *oldvd;
3497fa9e4066Sahrens 	int c;
3498fa9e4066Sahrens 
3499ea8dc4b6Seschrock 	for (c = 0; c < vd->vdev_children; c++) {
35003d7072f8Seschrock 		oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
3501ea8dc4b6Seschrock 		if (oldvd != NULL)
3502ea8dc4b6Seschrock 			return (oldvd);
3503ea8dc4b6Seschrock 	}
3504fa9e4066Sahrens 
35053d7072f8Seschrock 	/*
35063d7072f8Seschrock 	 * Check for a completed replacement.
35073d7072f8Seschrock 	 */
3508fa9e4066Sahrens 	if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
3509ea8dc4b6Seschrock 		oldvd = vd->vdev_child[0];
3510ea8dc4b6Seschrock 		newvd = vd->vdev_child[1];
3511ea8dc4b6Seschrock 
35128ad4d6ddSJeff Bonwick 		if (vdev_dtl_empty(newvd, DTL_MISSING) &&
35138ad4d6ddSJeff Bonwick 		    !vdev_dtl_required(oldvd))
3514ea8dc4b6Seschrock 			return (oldvd);
3515fa9e4066Sahrens 	}
3516ea8dc4b6Seschrock 
35173d7072f8Seschrock 	/*
35183d7072f8Seschrock 	 * Check for a completed resilver with the 'unspare' flag set.
35193d7072f8Seschrock 	 */
35203d7072f8Seschrock 	if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) {
35213d7072f8Seschrock 		newvd = vd->vdev_child[0];
35223d7072f8Seschrock 		oldvd = vd->vdev_child[1];
35233d7072f8Seschrock 
35243d7072f8Seschrock 		if (newvd->vdev_unspare &&
35258ad4d6ddSJeff Bonwick 		    vdev_dtl_empty(newvd, DTL_MISSING) &&
35268ad4d6ddSJeff Bonwick 		    !vdev_dtl_required(oldvd)) {
35273d7072f8Seschrock 			newvd->vdev_unspare = 0;
35283d7072f8Seschrock 			return (oldvd);
35293d7072f8Seschrock 		}
35303d7072f8Seschrock 	}
35313d7072f8Seschrock 
3532ea8dc4b6Seschrock 	return (NULL);
3533fa9e4066Sahrens }
3534fa9e4066Sahrens 
3535ea8dc4b6Seschrock static void
35363d7072f8Seschrock spa_vdev_resilver_done(spa_t *spa)
3537fa9e4066Sahrens {
35388ad4d6ddSJeff Bonwick 	vdev_t *vd, *pvd, *ppvd;
35398ad4d6ddSJeff Bonwick 	uint64_t guid, sguid, pguid, ppguid;
3540ea8dc4b6Seschrock 
35418ad4d6ddSJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3542ea8dc4b6Seschrock 
35433d7072f8Seschrock 	while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
35448ad4d6ddSJeff Bonwick 		pvd = vd->vdev_parent;
35458ad4d6ddSJeff Bonwick 		ppvd = pvd->vdev_parent;
3546ea8dc4b6Seschrock 		guid = vd->vdev_guid;
35478ad4d6ddSJeff Bonwick 		pguid = pvd->vdev_guid;
35488ad4d6ddSJeff Bonwick 		ppguid = ppvd->vdev_guid;
35498ad4d6ddSJeff Bonwick 		sguid = 0;
355099653d4eSeschrock 		/*
355199653d4eSeschrock 		 * If we have just finished replacing a hot spared device, then
355299653d4eSeschrock 		 * we need to detach the parent's first child (the original hot
355399653d4eSeschrock 		 * spare) as well.
355499653d4eSeschrock 		 */
35558ad4d6ddSJeff Bonwick 		if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0) {
355699653d4eSeschrock 			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
35578ad4d6ddSJeff Bonwick 			ASSERT(ppvd->vdev_children == 2);
35588ad4d6ddSJeff Bonwick 			sguid = ppvd->vdev_child[1]->vdev_guid;
355999653d4eSeschrock 		}
35608ad4d6ddSJeff Bonwick 		spa_config_exit(spa, SCL_ALL, FTAG);
35618ad4d6ddSJeff Bonwick 		if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
3562ea8dc4b6Seschrock 			return;
35638ad4d6ddSJeff Bonwick 		if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
356499653d4eSeschrock 			return;
35658ad4d6ddSJeff Bonwick 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3566fa9e4066Sahrens 	}
3567fa9e4066Sahrens 
35688ad4d6ddSJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
3569fa9e4066Sahrens }
3570fa9e4066Sahrens 
3571c67d9675Seschrock /*
35726809eb4eSEric Schrock  * Update the stored path or FRU for this vdev.  Dirty the vdev configuration,
35736809eb4eSEric Schrock  * relying on spa_vdev_enter/exit() to synchronize the labels and cache.
3574c67d9675Seschrock  */
3575c67d9675Seschrock int
35766809eb4eSEric Schrock spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
35776809eb4eSEric Schrock     boolean_t ispath)
3578c67d9675Seschrock {
3579c5904d13Seschrock 	vdev_t *vd;
3580c67d9675Seschrock 	uint64_t txg;
3581c67d9675Seschrock 
3582c67d9675Seschrock 	txg = spa_vdev_enter(spa);
3583c67d9675Seschrock 
35846809eb4eSEric Schrock 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3585fa94a07fSbrendan 		return (spa_vdev_exit(spa, NULL, txg, ENOENT));
3586c67d9675Seschrock 
35870e34b6a7Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
35880e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
35890e34b6a7Sbonwick 
35906809eb4eSEric Schrock 	if (ispath) {
35916809eb4eSEric Schrock 		spa_strfree(vd->vdev_path);
35926809eb4eSEric Schrock 		vd->vdev_path = spa_strdup(value);
35936809eb4eSEric Schrock 	} else {
35946809eb4eSEric Schrock 		if (vd->vdev_fru != NULL)
35956809eb4eSEric Schrock 			spa_strfree(vd->vdev_fru);
35966809eb4eSEric Schrock 		vd->vdev_fru = spa_strdup(value);
35976809eb4eSEric Schrock 	}
3598c67d9675Seschrock 
3599c67d9675Seschrock 	vdev_config_dirty(vd->vdev_top);
3600c67d9675Seschrock 
3601c67d9675Seschrock 	return (spa_vdev_exit(spa, NULL, txg, 0));
3602c67d9675Seschrock }
3603c67d9675Seschrock 
36046809eb4eSEric Schrock int
36056809eb4eSEric Schrock spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
36066809eb4eSEric Schrock {
36076809eb4eSEric Schrock 	return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
36086809eb4eSEric Schrock }
36096809eb4eSEric Schrock 
36106809eb4eSEric Schrock int
36116809eb4eSEric Schrock spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
36126809eb4eSEric Schrock {
36136809eb4eSEric Schrock 	return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
36146809eb4eSEric Schrock }
36156809eb4eSEric Schrock 
3616fa9e4066Sahrens /*
3617fa9e4066Sahrens  * ==========================================================================
3618fa9e4066Sahrens  * SPA Scrubbing
3619fa9e4066Sahrens  * ==========================================================================
3620fa9e4066Sahrens  */
3621fa9e4066Sahrens 
3622ea8dc4b6Seschrock int
3623088f3894Sahrens spa_scrub(spa_t *spa, pool_scrub_type_t type)
3624fa9e4066Sahrens {
3625e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
3626bb8b5132Sek 
3627fa9e4066Sahrens 	if ((uint_t)type >= POOL_SCRUB_TYPES)
3628fa9e4066Sahrens 		return (ENOTSUP);
3629fa9e4066Sahrens 
3630fa9e4066Sahrens 	/*
3631088f3894Sahrens 	 * If a resilver was requested, but there is no DTL on a
3632088f3894Sahrens 	 * writeable leaf device, we have nothing to do.
3633fa9e4066Sahrens 	 */
3634088f3894Sahrens 	if (type == POOL_SCRUB_RESILVER &&
3635088f3894Sahrens 	    !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
3636088f3894Sahrens 		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
3637ea8dc4b6Seschrock 		return (0);
3638ea8dc4b6Seschrock 	}
3639fa9e4066Sahrens 
3640088f3894Sahrens 	if (type == POOL_SCRUB_EVERYTHING &&
3641088f3894Sahrens 	    spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE &&
3642088f3894Sahrens 	    spa->spa_dsl_pool->dp_scrub_isresilver)
3643088f3894Sahrens 		return (EBUSY);
3644fa9e4066Sahrens 
3645088f3894Sahrens 	if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) {
3646088f3894Sahrens 		return (dsl_pool_scrub_clean(spa->spa_dsl_pool));
3647088f3894Sahrens 	} else if (type == POOL_SCRUB_NONE) {
3648088f3894Sahrens 		return (dsl_pool_scrub_cancel(spa->spa_dsl_pool));
3649ea8dc4b6Seschrock 	} else {
3650088f3894Sahrens 		return (EINVAL);
3651fa9e4066Sahrens 	}
3652fa9e4066Sahrens }
3653fa9e4066Sahrens 
3654ea8dc4b6Seschrock /*
3655ea8dc4b6Seschrock  * ==========================================================================
3656ea8dc4b6Seschrock  * SPA async task processing
3657ea8dc4b6Seschrock  * ==========================================================================
3658ea8dc4b6Seschrock  */
3659ea8dc4b6Seschrock 
3660ea8dc4b6Seschrock static void
36613d7072f8Seschrock spa_async_remove(spa_t *spa, vdev_t *vd)
3662fa9e4066Sahrens {
366349cf58c0SBrendan Gregg - Sun Microsystems 	if (vd->vdev_remove_wanted) {
366449cf58c0SBrendan Gregg - Sun Microsystems 		vd->vdev_remove_wanted = 0;
366549cf58c0SBrendan Gregg - Sun Microsystems 		vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
3666e14bb325SJeff Bonwick 		vdev_clear(spa, vd);
3667e14bb325SJeff Bonwick 		vdev_state_dirty(vd->vdev_top);
3668ea8dc4b6Seschrock 	}
366949cf58c0SBrendan Gregg - Sun Microsystems 
3670e14bb325SJeff Bonwick 	for (int c = 0; c < vd->vdev_children; c++)
367149cf58c0SBrendan Gregg - Sun Microsystems 		spa_async_remove(spa, vd->vdev_child[c]);
3672ea8dc4b6Seschrock }
3673fa9e4066Sahrens 
3674e14bb325SJeff Bonwick static void
3675e14bb325SJeff Bonwick spa_async_probe(spa_t *spa, vdev_t *vd)
3676e14bb325SJeff Bonwick {
3677e14bb325SJeff Bonwick 	if (vd->vdev_probe_wanted) {
3678e14bb325SJeff Bonwick 		vd->vdev_probe_wanted = 0;
3679e14bb325SJeff Bonwick 		vdev_reopen(vd);	/* vdev_open() does the actual probe */
3680e14bb325SJeff Bonwick 	}
3681e14bb325SJeff Bonwick 
3682e14bb325SJeff Bonwick 	for (int c = 0; c < vd->vdev_children; c++)
3683e14bb325SJeff Bonwick 		spa_async_probe(spa, vd->vdev_child[c]);
3684e14bb325SJeff Bonwick }
3685e14bb325SJeff Bonwick 
3686ea8dc4b6Seschrock static void
3687ea8dc4b6Seschrock spa_async_thread(spa_t *spa)
3688ea8dc4b6Seschrock {
3689e14bb325SJeff Bonwick 	int tasks;
3690ea8dc4b6Seschrock 
3691ea8dc4b6Seschrock 	ASSERT(spa->spa_sync_on);
3692ea8dc4b6Seschrock 
3693ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3694ea8dc4b6Seschrock 	tasks = spa->spa_async_tasks;
3695ea8dc4b6Seschrock 	spa->spa_async_tasks = 0;
3696ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3697ea8dc4b6Seschrock 
36980373e76bSbonwick 	/*
36990373e76bSbonwick 	 * See if the config needs to be updated.
37000373e76bSbonwick 	 */
37010373e76bSbonwick 	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
37020373e76bSbonwick 		mutex_enter(&spa_namespace_lock);
37030373e76bSbonwick 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
37040373e76bSbonwick 		mutex_exit(&spa_namespace_lock);
37050373e76bSbonwick 	}
37060373e76bSbonwick 
3707ea8dc4b6Seschrock 	/*
37083d7072f8Seschrock 	 * See if any devices need to be marked REMOVED.
3709ea8dc4b6Seschrock 	 */
3710e14bb325SJeff Bonwick 	if (tasks & SPA_ASYNC_REMOVE) {
3711e14bb325SJeff Bonwick 		spa_vdev_state_enter(spa);
37123d7072f8Seschrock 		spa_async_remove(spa, spa->spa_root_vdev);
3713e14bb325SJeff Bonwick 		for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
371449cf58c0SBrendan Gregg - Sun Microsystems 			spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
3715e14bb325SJeff Bonwick 		for (int i = 0; i < spa->spa_spares.sav_count; i++)
371649cf58c0SBrendan Gregg - Sun Microsystems 			spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
3717e14bb325SJeff Bonwick 		(void) spa_vdev_state_exit(spa, NULL, 0);
3718e14bb325SJeff Bonwick 	}
3719e14bb325SJeff Bonwick 
3720e14bb325SJeff Bonwick 	/*
3721e14bb325SJeff Bonwick 	 * See if any devices need to be probed.
3722e14bb325SJeff Bonwick 	 */
3723e14bb325SJeff Bonwick 	if (tasks & SPA_ASYNC_PROBE) {
3724e14bb325SJeff Bonwick 		spa_vdev_state_enter(spa);
3725e14bb325SJeff Bonwick 		spa_async_probe(spa, spa->spa_root_vdev);
3726e14bb325SJeff Bonwick 		(void) spa_vdev_state_exit(spa, NULL, 0);
37273d7072f8Seschrock 	}
3728ea8dc4b6Seschrock 
3729ea8dc4b6Seschrock 	/*
3730ea8dc4b6Seschrock 	 * If any devices are done replacing, detach them.
3731ea8dc4b6Seschrock 	 */
37323d7072f8Seschrock 	if (tasks & SPA_ASYNC_RESILVER_DONE)
37333d7072f8Seschrock 		spa_vdev_resilver_done(spa);
3734fa9e4066Sahrens 
3735ea8dc4b6Seschrock 	/*
3736ea8dc4b6Seschrock 	 * Kick off a resilver.
3737ea8dc4b6Seschrock 	 */
3738088f3894Sahrens 	if (tasks & SPA_ASYNC_RESILVER)
3739088f3894Sahrens 		VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0);
3740ea8dc4b6Seschrock 
3741ea8dc4b6Seschrock 	/*
3742ea8dc4b6Seschrock 	 * Let the world know that we're done.
3743ea8dc4b6Seschrock 	 */
3744ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3745ea8dc4b6Seschrock 	spa->spa_async_thread = NULL;
3746ea8dc4b6Seschrock 	cv_broadcast(&spa->spa_async_cv);
3747ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3748ea8dc4b6Seschrock 	thread_exit();
3749ea8dc4b6Seschrock }
3750ea8dc4b6Seschrock 
3751ea8dc4b6Seschrock void
3752ea8dc4b6Seschrock spa_async_suspend(spa_t *spa)
3753ea8dc4b6Seschrock {
3754ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3755ea8dc4b6Seschrock 	spa->spa_async_suspended++;
3756ea8dc4b6Seschrock 	while (spa->spa_async_thread != NULL)
3757ea8dc4b6Seschrock 		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
3758ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3759ea8dc4b6Seschrock }
3760ea8dc4b6Seschrock 
3761ea8dc4b6Seschrock void
3762ea8dc4b6Seschrock spa_async_resume(spa_t *spa)
3763ea8dc4b6Seschrock {
3764ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3765ea8dc4b6Seschrock 	ASSERT(spa->spa_async_suspended != 0);
3766ea8dc4b6Seschrock 	spa->spa_async_suspended--;
3767ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3768ea8dc4b6Seschrock }
3769ea8dc4b6Seschrock 
3770ea8dc4b6Seschrock static void
3771ea8dc4b6Seschrock spa_async_dispatch(spa_t *spa)
3772ea8dc4b6Seschrock {
3773ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3774ea8dc4b6Seschrock 	if (spa->spa_async_tasks && !spa->spa_async_suspended &&
37750373e76bSbonwick 	    spa->spa_async_thread == NULL &&
37760373e76bSbonwick 	    rootdir != NULL && !vn_is_readonly(rootdir))
3777ea8dc4b6Seschrock 		spa->spa_async_thread = thread_create(NULL, 0,
3778ea8dc4b6Seschrock 		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
3779ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3780ea8dc4b6Seschrock }
3781ea8dc4b6Seschrock 
3782ea8dc4b6Seschrock void
3783ea8dc4b6Seschrock spa_async_request(spa_t *spa, int task)
3784ea8dc4b6Seschrock {
3785ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3786ea8dc4b6Seschrock 	spa->spa_async_tasks |= task;
3787ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3788fa9e4066Sahrens }
3789fa9e4066Sahrens 
3790fa9e4066Sahrens /*
3791fa9e4066Sahrens  * ==========================================================================
3792fa9e4066Sahrens  * SPA syncing routines
3793fa9e4066Sahrens  * ==========================================================================
3794fa9e4066Sahrens  */
3795fa9e4066Sahrens 
3796fa9e4066Sahrens static void
3797fa9e4066Sahrens spa_sync_deferred_frees(spa_t *spa, uint64_t txg)
3798fa9e4066Sahrens {
3799fa9e4066Sahrens 	bplist_t *bpl = &spa->spa_sync_bplist;
3800fa9e4066Sahrens 	dmu_tx_t *tx;
3801fa9e4066Sahrens 	blkptr_t blk;
3802fa9e4066Sahrens 	uint64_t itor = 0;
3803fa9e4066Sahrens 	zio_t *zio;
3804fa9e4066Sahrens 	int error;
3805fa9e4066Sahrens 	uint8_t c = 1;
3806fa9e4066Sahrens 
3807e14bb325SJeff Bonwick 	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
3808fa9e4066Sahrens 
3809e14bb325SJeff Bonwick 	while (bplist_iterate(bpl, &itor, &blk) == 0) {
3810e14bb325SJeff Bonwick 		ASSERT(blk.blk_birth < txg);
3811e14bb325SJeff Bonwick 		zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL,
3812e14bb325SJeff Bonwick 		    ZIO_FLAG_MUSTSUCCEED));
3813e14bb325SJeff Bonwick 	}
3814fa9e4066Sahrens 
3815fa9e4066Sahrens 	error = zio_wait(zio);
3816fa9e4066Sahrens 	ASSERT3U(error, ==, 0);
3817fa9e4066Sahrens 
3818fa9e4066Sahrens 	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3819fa9e4066Sahrens 	bplist_vacate(bpl, tx);
3820fa9e4066Sahrens 
3821fa9e4066Sahrens 	/*
3822fa9e4066Sahrens 	 * Pre-dirty the first block so we sync to convergence faster.
3823fa9e4066Sahrens 	 * (Usually only the first block is needed.)
3824fa9e4066Sahrens 	 */
3825fa9e4066Sahrens 	dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx);
3826fa9e4066Sahrens 	dmu_tx_commit(tx);
3827fa9e4066Sahrens }
3828fa9e4066Sahrens 
3829fa9e4066Sahrens static void
383099653d4eSeschrock spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
3831fa9e4066Sahrens {
3832fa9e4066Sahrens 	char *packed = NULL;
3833f7991ba4STim Haley 	size_t bufsize;
3834fa9e4066Sahrens 	size_t nvsize = 0;
3835fa9e4066Sahrens 	dmu_buf_t *db;
3836fa9e4066Sahrens 
383799653d4eSeschrock 	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
3838fa9e4066Sahrens 
3839f7991ba4STim Haley 	/*
3840f7991ba4STim Haley 	 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
3841f7991ba4STim Haley 	 * information.  This avoids the dbuf_will_dirty() path and
3842f7991ba4STim Haley 	 * saves us a pre-read to get data we don't actually care about.
3843f7991ba4STim Haley 	 */
3844f7991ba4STim Haley 	bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE);
3845f7991ba4STim Haley 	packed = kmem_alloc(bufsize, KM_SLEEP);
3846fa9e4066Sahrens 
384799653d4eSeschrock 	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
3848ea8dc4b6Seschrock 	    KM_SLEEP) == 0);
3849f7991ba4STim Haley 	bzero(packed + nvsize, bufsize - nvsize);
3850fa9e4066Sahrens 
3851f7991ba4STim Haley 	dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
3852fa9e4066Sahrens 
3853f7991ba4STim Haley 	kmem_free(packed, bufsize);
3854fa9e4066Sahrens 
385599653d4eSeschrock 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
3856fa9e4066Sahrens 	dmu_buf_will_dirty(db, tx);
3857fa9e4066Sahrens 	*(uint64_t *)db->db_data = nvsize;
3858ea8dc4b6Seschrock 	dmu_buf_rele(db, FTAG);
3859fa9e4066Sahrens }
3860fa9e4066Sahrens 
386199653d4eSeschrock static void
3862fa94a07fSbrendan spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
3863fa94a07fSbrendan     const char *config, const char *entry)
386499653d4eSeschrock {
386599653d4eSeschrock 	nvlist_t *nvroot;
3866fa94a07fSbrendan 	nvlist_t **list;
386799653d4eSeschrock 	int i;
386899653d4eSeschrock 
3869fa94a07fSbrendan 	if (!sav->sav_sync)
387099653d4eSeschrock 		return;
387199653d4eSeschrock 
387299653d4eSeschrock 	/*
3873fa94a07fSbrendan 	 * Update the MOS nvlist describing the list of available devices.
3874fa94a07fSbrendan 	 * spa_validate_aux() will have already made sure this nvlist is
38753d7072f8Seschrock 	 * valid and the vdevs are labeled appropriately.
387699653d4eSeschrock 	 */
3877fa94a07fSbrendan 	if (sav->sav_object == 0) {
3878fa94a07fSbrendan 		sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
3879fa94a07fSbrendan 		    DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
3880fa94a07fSbrendan 		    sizeof (uint64_t), tx);
388199653d4eSeschrock 		VERIFY(zap_update(spa->spa_meta_objset,
3882fa94a07fSbrendan 		    DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
3883fa94a07fSbrendan 		    &sav->sav_object, tx) == 0);
388499653d4eSeschrock 	}
388599653d4eSeschrock 
388699653d4eSeschrock 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3887fa94a07fSbrendan 	if (sav->sav_count == 0) {
3888fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
388999653d4eSeschrock 	} else {
3890fa94a07fSbrendan 		list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
3891fa94a07fSbrendan 		for (i = 0; i < sav->sav_count; i++)
3892fa94a07fSbrendan 			list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
3893fa94a07fSbrendan 			    B_FALSE, B_FALSE, B_TRUE);
3894fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
3895fa94a07fSbrendan 		    sav->sav_count) == 0);
3896fa94a07fSbrendan 		for (i = 0; i < sav->sav_count; i++)
3897fa94a07fSbrendan 			nvlist_free(list[i]);
3898fa94a07fSbrendan 		kmem_free(list, sav->sav_count * sizeof (void *));
389999653d4eSeschrock 	}
390099653d4eSeschrock 
3901fa94a07fSbrendan 	spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
390206eeb2adSek 	nvlist_free(nvroot);
390399653d4eSeschrock 
3904fa94a07fSbrendan 	sav->sav_sync = B_FALSE;
390599653d4eSeschrock }
390699653d4eSeschrock 
390799653d4eSeschrock static void
390899653d4eSeschrock spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
390999653d4eSeschrock {
391099653d4eSeschrock 	nvlist_t *config;
391199653d4eSeschrock 
3912e14bb325SJeff Bonwick 	if (list_is_empty(&spa->spa_config_dirty_list))
391399653d4eSeschrock 		return;
391499653d4eSeschrock 
3915e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
3916e14bb325SJeff Bonwick 
3917e14bb325SJeff Bonwick 	config = spa_config_generate(spa, spa->spa_root_vdev,
3918e14bb325SJeff Bonwick 	    dmu_tx_get_txg(tx), B_FALSE);
3919e14bb325SJeff Bonwick 
3920e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_STATE, FTAG);
392199653d4eSeschrock 
392299653d4eSeschrock 	if (spa->spa_config_syncing)
392399653d4eSeschrock 		nvlist_free(spa->spa_config_syncing);
392499653d4eSeschrock 	spa->spa_config_syncing = config;
392599653d4eSeschrock 
392699653d4eSeschrock 	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
392799653d4eSeschrock }
392899653d4eSeschrock 
3929990b4856Slling /*
3930990b4856Slling  * Set zpool properties.
3931990b4856Slling  */
3932b1b8ab34Slling static void
3933ecd6cf80Smarks spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
3934b1b8ab34Slling {
3935b1b8ab34Slling 	spa_t *spa = arg1;
3936b1b8ab34Slling 	objset_t *mos = spa->spa_meta_objset;
3937990b4856Slling 	nvlist_t *nvp = arg2;
3938990b4856Slling 	nvpair_t *elem;
39393d7072f8Seschrock 	uint64_t intval;
3940c5904d13Seschrock 	char *strval;
3941990b4856Slling 	zpool_prop_t prop;
3942990b4856Slling 	const char *propname;
3943990b4856Slling 	zprop_type_t proptype;
3944b1b8ab34Slling 
3945e14bb325SJeff Bonwick 	mutex_enter(&spa->spa_props_lock);
3946e14bb325SJeff Bonwick 
3947990b4856Slling 	elem = NULL;
3948990b4856Slling 	while ((elem = nvlist_next_nvpair(nvp, elem))) {
3949990b4856Slling 		switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
3950990b4856Slling 		case ZPOOL_PROP_VERSION:
3951990b4856Slling 			/*
3952990b4856Slling 			 * Only set version for non-zpool-creation cases
3953990b4856Slling 			 * (set/import). spa_create() needs special care
3954990b4856Slling 			 * for version setting.
3955990b4856Slling 			 */
3956990b4856Slling 			if (tx->tx_txg != TXG_INITIAL) {
3957990b4856Slling 				VERIFY(nvpair_value_uint64(elem,
3958990b4856Slling 				    &intval) == 0);
3959990b4856Slling 				ASSERT(intval <= SPA_VERSION);
3960990b4856Slling 				ASSERT(intval >= spa_version(spa));
3961990b4856Slling 				spa->spa_uberblock.ub_version = intval;
3962990b4856Slling 				vdev_config_dirty(spa->spa_root_vdev);
3963990b4856Slling 			}
3964ecd6cf80Smarks 			break;
3965990b4856Slling 
3966990b4856Slling 		case ZPOOL_PROP_ALTROOT:
3967990b4856Slling 			/*
3968990b4856Slling 			 * 'altroot' is a non-persistent property. It should
3969990b4856Slling 			 * have been set temporarily at creation or import time.
3970990b4856Slling 			 */
3971990b4856Slling 			ASSERT(spa->spa_root != NULL);
3972b1b8ab34Slling 			break;
39733d7072f8Seschrock 
39742f8aaab3Seschrock 		case ZPOOL_PROP_CACHEFILE:
3975990b4856Slling 			/*
3976379c004dSEric Schrock 			 * 'cachefile' is also a non-persisitent property.
3977990b4856Slling 			 */
39783d7072f8Seschrock 			break;
3979990b4856Slling 		default:
3980990b4856Slling 			/*
3981990b4856Slling 			 * Set pool property values in the poolprops mos object.
3982990b4856Slling 			 */
3983990b4856Slling 			if (spa->spa_pool_props_object == 0) {
3984990b4856Slling 				objset_t *mos = spa->spa_meta_objset;
3985990b4856Slling 
3986990b4856Slling 				VERIFY((spa->spa_pool_props_object =
3987990b4856Slling 				    zap_create(mos, DMU_OT_POOL_PROPS,
3988990b4856Slling 				    DMU_OT_NONE, 0, tx)) > 0);
3989990b4856Slling 
3990990b4856Slling 				VERIFY(zap_update(mos,
3991990b4856Slling 				    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
3992990b4856Slling 				    8, 1, &spa->spa_pool_props_object, tx)
3993990b4856Slling 				    == 0);
3994990b4856Slling 			}
3995990b4856Slling 
3996990b4856Slling 			/* normalize the property name */
3997990b4856Slling 			propname = zpool_prop_to_name(prop);
3998990b4856Slling 			proptype = zpool_prop_get_type(prop);
3999990b4856Slling 
4000990b4856Slling 			if (nvpair_type(elem) == DATA_TYPE_STRING) {
4001990b4856Slling 				ASSERT(proptype == PROP_TYPE_STRING);
4002990b4856Slling 				VERIFY(nvpair_value_string(elem, &strval) == 0);
4003990b4856Slling 				VERIFY(zap_update(mos,
4004990b4856Slling 				    spa->spa_pool_props_object, propname,
4005990b4856Slling 				    1, strlen(strval) + 1, strval, tx) == 0);
4006990b4856Slling 
4007990b4856Slling 			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
4008990b4856Slling 				VERIFY(nvpair_value_uint64(elem, &intval) == 0);
4009990b4856Slling 
4010990b4856Slling 				if (proptype == PROP_TYPE_INDEX) {
4011990b4856Slling 					const char *unused;
4012990b4856Slling 					VERIFY(zpool_prop_index_to_string(
4013990b4856Slling 					    prop, intval, &unused) == 0);
4014990b4856Slling 				}
4015990b4856Slling 				VERIFY(zap_update(mos,
4016990b4856Slling 				    spa->spa_pool_props_object, propname,
4017990b4856Slling 				    8, 1, &intval, tx) == 0);
4018990b4856Slling 			} else {
4019990b4856Slling 				ASSERT(0); /* not allowed */
4020990b4856Slling 			}
4021990b4856Slling 
40220a4e9518Sgw 			switch (prop) {
40230a4e9518Sgw 			case ZPOOL_PROP_DELEGATION:
4024990b4856Slling 				spa->spa_delegation = intval;
40250a4e9518Sgw 				break;
40260a4e9518Sgw 			case ZPOOL_PROP_BOOTFS:
4027990b4856Slling 				spa->spa_bootfs = intval;
40280a4e9518Sgw 				break;
40290a4e9518Sgw 			case ZPOOL_PROP_FAILUREMODE:
40300a4e9518Sgw 				spa->spa_failmode = intval;
40310a4e9518Sgw 				break;
40320a4e9518Sgw 			default:
40330a4e9518Sgw 				break;
40340a4e9518Sgw 			}
4035990b4856Slling 		}
4036990b4856Slling 
4037990b4856Slling 		/* log internal history if this is not a zpool create */
4038990b4856Slling 		if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY &&
4039990b4856Slling 		    tx->tx_txg != TXG_INITIAL) {
4040990b4856Slling 			spa_history_internal_log(LOG_POOL_PROPSET,
4041990b4856Slling 			    spa, tx, cr, "%s %lld %s",
4042e14bb325SJeff Bonwick 			    nvpair_name(elem), intval, spa_name(spa));
4043b1b8ab34Slling 		}
4044b1b8ab34Slling 	}
4045e14bb325SJeff Bonwick 
4046e14bb325SJeff Bonwick 	mutex_exit(&spa->spa_props_lock);
4047b1b8ab34Slling }
4048b1b8ab34Slling 
4049fa9e4066Sahrens /*
4050fa9e4066Sahrens  * Sync the specified transaction group.  New blocks may be dirtied as
4051fa9e4066Sahrens  * part of the process, so we iterate until it converges.
4052fa9e4066Sahrens  */
4053fa9e4066Sahrens void
4054fa9e4066Sahrens spa_sync(spa_t *spa, uint64_t txg)
4055fa9e4066Sahrens {
4056fa9e4066Sahrens 	dsl_pool_t *dp = spa->spa_dsl_pool;
4057fa9e4066Sahrens 	objset_t *mos = spa->spa_meta_objset;
4058fa9e4066Sahrens 	bplist_t *bpl = &spa->spa_sync_bplist;
40590373e76bSbonwick 	vdev_t *rvd = spa->spa_root_vdev;
4060fa9e4066Sahrens 	vdev_t *vd;
4061fa9e4066Sahrens 	dmu_tx_t *tx;
4062fa9e4066Sahrens 	int dirty_vdevs;
4063e14bb325SJeff Bonwick 	int error;
4064fa9e4066Sahrens 
4065fa9e4066Sahrens 	/*
4066fa9e4066Sahrens 	 * Lock out configuration changes.
4067fa9e4066Sahrens 	 */
4068e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4069fa9e4066Sahrens 
4070fa9e4066Sahrens 	spa->spa_syncing_txg = txg;
4071fa9e4066Sahrens 	spa->spa_sync_pass = 0;
4072fa9e4066Sahrens 
4073e14bb325SJeff Bonwick 	/*
4074e14bb325SJeff Bonwick 	 * If there are any pending vdev state changes, convert them
4075e14bb325SJeff Bonwick 	 * into config changes that go out with this transaction group.
4076e14bb325SJeff Bonwick 	 */
4077e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
40788ad4d6ddSJeff Bonwick 	while (list_head(&spa->spa_state_dirty_list) != NULL) {
40798ad4d6ddSJeff Bonwick 		/*
40808ad4d6ddSJeff Bonwick 		 * We need the write lock here because, for aux vdevs,
40818ad4d6ddSJeff Bonwick 		 * calling vdev_config_dirty() modifies sav_config.
40828ad4d6ddSJeff Bonwick 		 * This is ugly and will become unnecessary when we
40838ad4d6ddSJeff Bonwick 		 * eliminate the aux vdev wart by integrating all vdevs
40848ad4d6ddSJeff Bonwick 		 * into the root vdev tree.
40858ad4d6ddSJeff Bonwick 		 */
40868ad4d6ddSJeff Bonwick 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
40878ad4d6ddSJeff Bonwick 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
40888ad4d6ddSJeff Bonwick 		while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
40898ad4d6ddSJeff Bonwick 			vdev_state_clean(vd);
40908ad4d6ddSJeff Bonwick 			vdev_config_dirty(vd);
40918ad4d6ddSJeff Bonwick 		}
40928ad4d6ddSJeff Bonwick 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
40938ad4d6ddSJeff Bonwick 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
4094e14bb325SJeff Bonwick 	}
4095e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_STATE, FTAG);
4096e14bb325SJeff Bonwick 
4097ea8dc4b6Seschrock 	VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj));
4098fa9e4066Sahrens 
409999653d4eSeschrock 	tx = dmu_tx_create_assigned(dp, txg);
410099653d4eSeschrock 
410199653d4eSeschrock 	/*
4102e7437265Sahrens 	 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
410399653d4eSeschrock 	 * set spa_deflate if we have no raid-z vdevs.
410499653d4eSeschrock 	 */
4105e7437265Sahrens 	if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
4106e7437265Sahrens 	    spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
410799653d4eSeschrock 		int i;
410899653d4eSeschrock 
410999653d4eSeschrock 		for (i = 0; i < rvd->vdev_children; i++) {
411099653d4eSeschrock 			vd = rvd->vdev_child[i];
411199653d4eSeschrock 			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
411299653d4eSeschrock 				break;
411399653d4eSeschrock 		}
411499653d4eSeschrock 		if (i == rvd->vdev_children) {
411599653d4eSeschrock 			spa->spa_deflate = TRUE;
411699653d4eSeschrock 			VERIFY(0 == zap_add(spa->spa_meta_objset,
411799653d4eSeschrock 			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
411899653d4eSeschrock 			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
411999653d4eSeschrock 		}
412099653d4eSeschrock 	}
412199653d4eSeschrock 
4122088f3894Sahrens 	if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
4123088f3894Sahrens 	    spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
4124088f3894Sahrens 		dsl_pool_create_origin(dp, tx);
4125088f3894Sahrens 
4126088f3894Sahrens 		/* Keeping the origin open increases spa_minref */
4127088f3894Sahrens 		spa->spa_minref += 3;
4128088f3894Sahrens 	}
4129088f3894Sahrens 
4130088f3894Sahrens 	if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
4131088f3894Sahrens 	    spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
4132088f3894Sahrens 		dsl_pool_upgrade_clones(dp, tx);
4133088f3894Sahrens 	}
4134088f3894Sahrens 
4135fa9e4066Sahrens 	/*
4136fa9e4066Sahrens 	 * If anything has changed in this txg, push the deferred frees
4137fa9e4066Sahrens 	 * from the previous txg.  If not, leave them alone so that we
4138fa9e4066Sahrens 	 * don't generate work on an otherwise idle system.
4139fa9e4066Sahrens 	 */
4140fa9e4066Sahrens 	if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
41411615a317Sek 	    !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
41421615a317Sek 	    !txg_list_empty(&dp->dp_sync_tasks, txg))
4143fa9e4066Sahrens 		spa_sync_deferred_frees(spa, txg);
4144fa9e4066Sahrens 
4145fa9e4066Sahrens 	/*
4146fa9e4066Sahrens 	 * Iterate to convergence.
4147fa9e4066Sahrens 	 */
4148fa9e4066Sahrens 	do {
4149fa9e4066Sahrens 		spa->spa_sync_pass++;
4150fa9e4066Sahrens 
4151fa9e4066Sahrens 		spa_sync_config_object(spa, tx);
4152fa94a07fSbrendan 		spa_sync_aux_dev(spa, &spa->spa_spares, tx,
4153fa94a07fSbrendan 		    ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
4154fa94a07fSbrendan 		spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
4155fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
4156ea8dc4b6Seschrock 		spa_errlog_sync(spa, txg);
4157fa9e4066Sahrens 		dsl_pool_sync(dp, txg);
4158fa9e4066Sahrens 
4159fa9e4066Sahrens 		dirty_vdevs = 0;
4160fa9e4066Sahrens 		while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
4161fa9e4066Sahrens 			vdev_sync(vd, txg);
4162fa9e4066Sahrens 			dirty_vdevs++;
4163fa9e4066Sahrens 		}
4164fa9e4066Sahrens 
4165fa9e4066Sahrens 		bplist_sync(bpl, tx);
4166fa9e4066Sahrens 	} while (dirty_vdevs);
4167fa9e4066Sahrens 
4168fa9e4066Sahrens 	bplist_close(bpl);
4169fa9e4066Sahrens 
4170fa9e4066Sahrens 	dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass);
4171fa9e4066Sahrens 
4172fa9e4066Sahrens 	/*
4173fa9e4066Sahrens 	 * Rewrite the vdev configuration (which includes the uberblock)
4174fa9e4066Sahrens 	 * to commit the transaction group.
41750373e76bSbonwick 	 *
417617f17c2dSbonwick 	 * If there are no dirty vdevs, we sync the uberblock to a few
417717f17c2dSbonwick 	 * random top-level vdevs that are known to be visible in the
4178e14bb325SJeff Bonwick 	 * config cache (see spa_vdev_add() for a complete description).
4179e14bb325SJeff Bonwick 	 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
41800373e76bSbonwick 	 */
4181e14bb325SJeff Bonwick 	for (;;) {
4182e14bb325SJeff Bonwick 		/*
4183e14bb325SJeff Bonwick 		 * We hold SCL_STATE to prevent vdev open/close/etc.
4184e14bb325SJeff Bonwick 		 * while we're attempting to write the vdev labels.
4185e14bb325SJeff Bonwick 		 */
4186e14bb325SJeff Bonwick 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4187e14bb325SJeff Bonwick 
4188e14bb325SJeff Bonwick 		if (list_is_empty(&spa->spa_config_dirty_list)) {
4189e14bb325SJeff Bonwick 			vdev_t *svd[SPA_DVAS_PER_BP];
4190e14bb325SJeff Bonwick 			int svdcount = 0;
4191e14bb325SJeff Bonwick 			int children = rvd->vdev_children;
4192e14bb325SJeff Bonwick 			int c0 = spa_get_random(children);
4193e14bb325SJeff Bonwick 			int c;
4194e14bb325SJeff Bonwick 
4195e14bb325SJeff Bonwick 			for (c = 0; c < children; c++) {
4196e14bb325SJeff Bonwick 				vd = rvd->vdev_child[(c0 + c) % children];
4197e14bb325SJeff Bonwick 				if (vd->vdev_ms_array == 0 || vd->vdev_islog)
4198e14bb325SJeff Bonwick 					continue;
4199e14bb325SJeff Bonwick 				svd[svdcount++] = vd;
4200e14bb325SJeff Bonwick 				if (svdcount == SPA_DVAS_PER_BP)
4201e14bb325SJeff Bonwick 					break;
4202e14bb325SJeff Bonwick 			}
4203e14bb325SJeff Bonwick 			error = vdev_config_sync(svd, svdcount, txg);
4204e14bb325SJeff Bonwick 		} else {
4205e14bb325SJeff Bonwick 			error = vdev_config_sync(rvd->vdev_child,
4206e14bb325SJeff Bonwick 			    rvd->vdev_children, txg);
42070373e76bSbonwick 		}
4208e14bb325SJeff Bonwick 
4209e14bb325SJeff Bonwick 		spa_config_exit(spa, SCL_STATE, FTAG);
4210e14bb325SJeff Bonwick 
4211e14bb325SJeff Bonwick 		if (error == 0)
4212e14bb325SJeff Bonwick 			break;
4213e14bb325SJeff Bonwick 		zio_suspend(spa, NULL);
4214e14bb325SJeff Bonwick 		zio_resume_wait(spa);
42150373e76bSbonwick 	}
421699653d4eSeschrock 	dmu_tx_commit(tx);
421799653d4eSeschrock 
42180373e76bSbonwick 	/*
42190373e76bSbonwick 	 * Clear the dirty config list.
4220fa9e4066Sahrens 	 */
4221e14bb325SJeff Bonwick 	while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
42220373e76bSbonwick 		vdev_config_clean(vd);
42230373e76bSbonwick 
42240373e76bSbonwick 	/*
42250373e76bSbonwick 	 * Now that the new config has synced transactionally,
42260373e76bSbonwick 	 * let it become visible to the config cache.
42270373e76bSbonwick 	 */
42280373e76bSbonwick 	if (spa->spa_config_syncing != NULL) {
42290373e76bSbonwick 		spa_config_set(spa, spa->spa_config_syncing);
42300373e76bSbonwick 		spa->spa_config_txg = txg;
42310373e76bSbonwick 		spa->spa_config_syncing = NULL;
42320373e76bSbonwick 	}
4233fa9e4066Sahrens 
4234fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
4235fa9e4066Sahrens 
4236fa9e4066Sahrens 	/*
4237fa9e4066Sahrens 	 * Clean up the ZIL records for the synced txg.
4238fa9e4066Sahrens 	 */
4239fa9e4066Sahrens 	dsl_pool_zil_clean(dp);
4240fa9e4066Sahrens 
4241fa9e4066Sahrens 	/*
4242fa9e4066Sahrens 	 * Update usable space statistics.
4243fa9e4066Sahrens 	 */
4244fa9e4066Sahrens 	while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
4245fa9e4066Sahrens 		vdev_sync_done(vd, txg);
4246fa9e4066Sahrens 
4247fa9e4066Sahrens 	/*
4248fa9e4066Sahrens 	 * It had better be the case that we didn't dirty anything
424999653d4eSeschrock 	 * since vdev_config_sync().
4250fa9e4066Sahrens 	 */
4251fa9e4066Sahrens 	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
4252fa9e4066Sahrens 	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
4253fa9e4066Sahrens 	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
4254fa9e4066Sahrens 	ASSERT(bpl->bpl_queue == NULL);
4255fa9e4066Sahrens 
4256e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_CONFIG, FTAG);
4257ea8dc4b6Seschrock 
4258ea8dc4b6Seschrock 	/*
4259ea8dc4b6Seschrock 	 * If any async tasks have been requested, kick them off.
4260ea8dc4b6Seschrock 	 */
4261ea8dc4b6Seschrock 	spa_async_dispatch(spa);
4262fa9e4066Sahrens }
4263fa9e4066Sahrens 
4264fa9e4066Sahrens /*
4265fa9e4066Sahrens  * Sync all pools.  We don't want to hold the namespace lock across these
4266fa9e4066Sahrens  * operations, so we take a reference on the spa_t and drop the lock during the
4267fa9e4066Sahrens  * sync.
4268fa9e4066Sahrens  */
4269fa9e4066Sahrens void
4270fa9e4066Sahrens spa_sync_allpools(void)
4271fa9e4066Sahrens {
4272fa9e4066Sahrens 	spa_t *spa = NULL;
4273fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
4274fa9e4066Sahrens 	while ((spa = spa_next(spa)) != NULL) {
4275e14bb325SJeff Bonwick 		if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa))
4276fa9e4066Sahrens 			continue;
4277fa9e4066Sahrens 		spa_open_ref(spa, FTAG);
4278fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
4279fa9e4066Sahrens 		txg_wait_synced(spa_get_dsl(spa), 0);
4280fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
4281fa9e4066Sahrens 		spa_close(spa, FTAG);
4282fa9e4066Sahrens 	}
4283fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
4284fa9e4066Sahrens }
4285fa9e4066Sahrens 
4286fa9e4066Sahrens /*
4287fa9e4066Sahrens  * ==========================================================================
4288fa9e4066Sahrens  * Miscellaneous routines
4289fa9e4066Sahrens  * ==========================================================================
4290fa9e4066Sahrens  */
4291fa9e4066Sahrens 
4292fa9e4066Sahrens /*
4293fa9e4066Sahrens  * Remove all pools in the system.
4294fa9e4066Sahrens  */
4295fa9e4066Sahrens void
4296fa9e4066Sahrens spa_evict_all(void)
4297fa9e4066Sahrens {
4298fa9e4066Sahrens 	spa_t *spa;
4299fa9e4066Sahrens 
4300fa9e4066Sahrens 	/*
4301fa9e4066Sahrens 	 * Remove all cached state.  All pools should be closed now,
4302fa9e4066Sahrens 	 * so every spa in the AVL tree should be unreferenced.
4303fa9e4066Sahrens 	 */
4304fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
4305fa9e4066Sahrens 	while ((spa = spa_next(NULL)) != NULL) {
4306fa9e4066Sahrens 		/*
4307ea8dc4b6Seschrock 		 * Stop async tasks.  The async thread may need to detach
4308ea8dc4b6Seschrock 		 * a device that's been replaced, which requires grabbing
4309ea8dc4b6Seschrock 		 * spa_namespace_lock, so we must drop it here.
4310fa9e4066Sahrens 		 */
4311fa9e4066Sahrens 		spa_open_ref(spa, FTAG);
4312fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
4313ea8dc4b6Seschrock 		spa_async_suspend(spa);
4314fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
4315fa9e4066Sahrens 		spa_close(spa, FTAG);
4316fa9e4066Sahrens 
4317fa9e4066Sahrens 		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4318fa9e4066Sahrens 			spa_unload(spa);
4319fa9e4066Sahrens 			spa_deactivate(spa);
4320fa9e4066Sahrens 		}
4321fa9e4066Sahrens 		spa_remove(spa);
4322fa9e4066Sahrens 	}
4323fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
4324fa9e4066Sahrens }
4325ea8dc4b6Seschrock 
4326ea8dc4b6Seschrock vdev_t *
43276809eb4eSEric Schrock spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
4328ea8dc4b6Seschrock {
4329c5904d13Seschrock 	vdev_t *vd;
4330c5904d13Seschrock 	int i;
4331c5904d13Seschrock 
4332c5904d13Seschrock 	if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
4333c5904d13Seschrock 		return (vd);
4334c5904d13Seschrock 
43356809eb4eSEric Schrock 	if (aux) {
4336c5904d13Seschrock 		for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
4337c5904d13Seschrock 			vd = spa->spa_l2cache.sav_vdevs[i];
43386809eb4eSEric Schrock 			if (vd->vdev_guid == guid)
43396809eb4eSEric Schrock 				return (vd);
43406809eb4eSEric Schrock 		}
43416809eb4eSEric Schrock 
43426809eb4eSEric Schrock 		for (i = 0; i < spa->spa_spares.sav_count; i++) {
43436809eb4eSEric Schrock 			vd = spa->spa_spares.sav_vdevs[i];
4344c5904d13Seschrock 			if (vd->vdev_guid == guid)
4345c5904d13Seschrock 				return (vd);
4346c5904d13Seschrock 		}
4347c5904d13Seschrock 	}
4348c5904d13Seschrock 
4349c5904d13Seschrock 	return (NULL);
4350ea8dc4b6Seschrock }
4351eaca9bbdSeschrock 
4352eaca9bbdSeschrock void
4353990b4856Slling spa_upgrade(spa_t *spa, uint64_t version)
4354eaca9bbdSeschrock {
4355e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4356eaca9bbdSeschrock 
4357eaca9bbdSeschrock 	/*
4358eaca9bbdSeschrock 	 * This should only be called for a non-faulted pool, and since a
4359eaca9bbdSeschrock 	 * future version would result in an unopenable pool, this shouldn't be
4360eaca9bbdSeschrock 	 * possible.
4361eaca9bbdSeschrock 	 */
4362e7437265Sahrens 	ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION);
4363990b4856Slling 	ASSERT(version >= spa->spa_uberblock.ub_version);
4364eaca9bbdSeschrock 
4365990b4856Slling 	spa->spa_uberblock.ub_version = version;
4366eaca9bbdSeschrock 	vdev_config_dirty(spa->spa_root_vdev);
4367eaca9bbdSeschrock 
4368e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
436999653d4eSeschrock 
437099653d4eSeschrock 	txg_wait_synced(spa_get_dsl(spa), 0);
437199653d4eSeschrock }
437299653d4eSeschrock 
437399653d4eSeschrock boolean_t
437499653d4eSeschrock spa_has_spare(spa_t *spa, uint64_t guid)
437599653d4eSeschrock {
437699653d4eSeschrock 	int i;
437739c23413Seschrock 	uint64_t spareguid;
4378fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_spares;
437999653d4eSeschrock 
4380fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
4381fa94a07fSbrendan 		if (sav->sav_vdevs[i]->vdev_guid == guid)
438299653d4eSeschrock 			return (B_TRUE);
438399653d4eSeschrock 
4384fa94a07fSbrendan 	for (i = 0; i < sav->sav_npending; i++) {
4385fa94a07fSbrendan 		if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
4386fa94a07fSbrendan 		    &spareguid) == 0 && spareguid == guid)
438739c23413Seschrock 			return (B_TRUE);
438839c23413Seschrock 	}
438939c23413Seschrock 
439099653d4eSeschrock 	return (B_FALSE);
4391eaca9bbdSeschrock }
4392b1b8ab34Slling 
439389a89ebfSlling /*
439489a89ebfSlling  * Check if a pool has an active shared spare device.
439589a89ebfSlling  * Note: reference count of an active spare is 2, as a spare and as a replace
439689a89ebfSlling  */
439789a89ebfSlling static boolean_t
439889a89ebfSlling spa_has_active_shared_spare(spa_t *spa)
439989a89ebfSlling {
440089a89ebfSlling 	int i, refcnt;
440189a89ebfSlling 	uint64_t pool;
440289a89ebfSlling 	spa_aux_vdev_t *sav = &spa->spa_spares;
440389a89ebfSlling 
440489a89ebfSlling 	for (i = 0; i < sav->sav_count; i++) {
440589a89ebfSlling 		if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
440689a89ebfSlling 		    &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
440789a89ebfSlling 		    refcnt > 2)
440889a89ebfSlling 			return (B_TRUE);
440989a89ebfSlling 	}
441089a89ebfSlling 
441189a89ebfSlling 	return (B_FALSE);
441289a89ebfSlling }
441389a89ebfSlling 
44143d7072f8Seschrock /*
44153d7072f8Seschrock  * Post a sysevent corresponding to the given event.  The 'name' must be one of
44163d7072f8Seschrock  * the event definitions in sys/sysevent/eventdefs.h.  The payload will be
44173d7072f8Seschrock  * filled in from the spa and (optionally) the vdev.  This doesn't do anything
44183d7072f8Seschrock  * in the userland libzpool, as we don't want consumers to misinterpret ztest
44193d7072f8Seschrock  * or zdb as real changes.
44203d7072f8Seschrock  */
44213d7072f8Seschrock void
44223d7072f8Seschrock spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
44233d7072f8Seschrock {
44243d7072f8Seschrock #ifdef _KERNEL
44253d7072f8Seschrock 	sysevent_t		*ev;
44263d7072f8Seschrock 	sysevent_attr_list_t	*attr = NULL;
44273d7072f8Seschrock 	sysevent_value_t	value;
44283d7072f8Seschrock 	sysevent_id_t		eid;
44293d7072f8Seschrock 
44303d7072f8Seschrock 	ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
44313d7072f8Seschrock 	    SE_SLEEP);
44323d7072f8Seschrock 
44333d7072f8Seschrock 	value.value_type = SE_DATA_TYPE_STRING;
44343d7072f8Seschrock 	value.value.sv_string = spa_name(spa);
44353d7072f8Seschrock 	if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
44363d7072f8Seschrock 		goto done;
44373d7072f8Seschrock 
44383d7072f8Seschrock 	value.value_type = SE_DATA_TYPE_UINT64;
44393d7072f8Seschrock 	value.value.sv_uint64 = spa_guid(spa);
44403d7072f8Seschrock 	if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
44413d7072f8Seschrock 		goto done;
44423d7072f8Seschrock 
44433d7072f8Seschrock 	if (vd) {
44443d7072f8Seschrock 		value.value_type = SE_DATA_TYPE_UINT64;
44453d7072f8Seschrock 		value.value.sv_uint64 = vd->vdev_guid;
44463d7072f8Seschrock 		if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
44473d7072f8Seschrock 		    SE_SLEEP) != 0)
44483d7072f8Seschrock 			goto done;
44493d7072f8Seschrock 
44503d7072f8Seschrock 		if (vd->vdev_path) {
44513d7072f8Seschrock 			value.value_type = SE_DATA_TYPE_STRING;
44523d7072f8Seschrock 			value.value.sv_string = vd->vdev_path;
44533d7072f8Seschrock 			if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
44543d7072f8Seschrock 			    &value, SE_SLEEP) != 0)
44553d7072f8Seschrock 				goto done;
44563d7072f8Seschrock 		}
44573d7072f8Seschrock 	}
44583d7072f8Seschrock 
4459b01c3b58Seschrock 	if (sysevent_attach_attributes(ev, attr) != 0)
4460b01c3b58Seschrock 		goto done;
4461b01c3b58Seschrock 	attr = NULL;
4462b01c3b58Seschrock 
44633d7072f8Seschrock 	(void) log_sysevent(ev, SE_SLEEP, &eid);
44643d7072f8Seschrock 
44653d7072f8Seschrock done:
44663d7072f8Seschrock 	if (attr)
44673d7072f8Seschrock 		sysevent_free_attr(attr);
44683d7072f8Seschrock 	sysevent_free(ev);
44693d7072f8Seschrock #endif
44703d7072f8Seschrock }
4471