xref: /illumos-gate/usr/src/uts/common/fs/zfs/spa.c (revision 5679c89f)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
2199653d4eSeschrock 
22fa9e4066Sahrens /*
23379c004dSEric Schrock  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24fa9e4066Sahrens  * Use is subject to license terms.
25fa9e4066Sahrens  */
26fa9e4066Sahrens 
27fa9e4066Sahrens /*
28fa9e4066Sahrens  * This file contains all the routines used when modifying on-disk SPA state.
29fa9e4066Sahrens  * This includes opening, importing, destroying, exporting a pool, and syncing a
30fa9e4066Sahrens  * pool.
31fa9e4066Sahrens  */
32fa9e4066Sahrens 
33fa9e4066Sahrens #include <sys/zfs_context.h>
34ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h>
35fa9e4066Sahrens #include <sys/spa_impl.h>
36fa9e4066Sahrens #include <sys/zio.h>
37fa9e4066Sahrens #include <sys/zio_checksum.h>
38fa9e4066Sahrens #include <sys/zio_compress.h>
39fa9e4066Sahrens #include <sys/dmu.h>
40fa9e4066Sahrens #include <sys/dmu_tx.h>
41fa9e4066Sahrens #include <sys/zap.h>
42fa9e4066Sahrens #include <sys/zil.h>
43fa9e4066Sahrens #include <sys/vdev_impl.h>
44fa9e4066Sahrens #include <sys/metaslab.h>
45fa9e4066Sahrens #include <sys/uberblock_impl.h>
46fa9e4066Sahrens #include <sys/txg.h>
47fa9e4066Sahrens #include <sys/avl.h>
48fa9e4066Sahrens #include <sys/dmu_traverse.h>
49b1b8ab34Slling #include <sys/dmu_objset.h>
50fa9e4066Sahrens #include <sys/unique.h>
51fa9e4066Sahrens #include <sys/dsl_pool.h>
52b1b8ab34Slling #include <sys/dsl_dataset.h>
53fa9e4066Sahrens #include <sys/dsl_dir.h>
54fa9e4066Sahrens #include <sys/dsl_prop.h>
55b1b8ab34Slling #include <sys/dsl_synctask.h>
56fa9e4066Sahrens #include <sys/fs/zfs.h>
57fa94a07fSbrendan #include <sys/arc.h>
58fa9e4066Sahrens #include <sys/callb.h>
5995173954Sek #include <sys/systeminfo.h>
6095173954Sek #include <sys/sunddi.h>
61e7cbe64fSgw #include <sys/spa_boot.h>
62fa9e4066Sahrens 
63*5679c89fSjv #ifdef	_KERNEL
64*5679c89fSjv #include <sys/zone.h>
65*5679c89fSjv #endif	/* _KERNEL */
66*5679c89fSjv 
67990b4856Slling #include "zfs_prop.h"
68b7b97454Sperrin #include "zfs_comutil.h"
69990b4856Slling 
70e14bb325SJeff Bonwick int zio_taskq_threads[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
71e14bb325SJeff Bonwick 	/*	ISSUE	INTR					*/
72e14bb325SJeff Bonwick 	{	1,	1	},	/* ZIO_TYPE_NULL	*/
73e14bb325SJeff Bonwick 	{	1,	8	},	/* ZIO_TYPE_READ	*/
74e14bb325SJeff Bonwick 	{	8,	1	},	/* ZIO_TYPE_WRITE	*/
75e14bb325SJeff Bonwick 	{	1,	1	},	/* ZIO_TYPE_FREE	*/
76e14bb325SJeff Bonwick 	{	1,	1	},	/* ZIO_TYPE_CLAIM	*/
77e14bb325SJeff Bonwick 	{	1,	1	},	/* ZIO_TYPE_IOCTL	*/
78e14bb325SJeff Bonwick };
79416e0cd8Sek 
80990b4856Slling static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx);
8189a89ebfSlling static boolean_t spa_has_active_shared_spare(spa_t *spa);
82990b4856Slling 
83990b4856Slling /*
84990b4856Slling  * ==========================================================================
85990b4856Slling  * SPA properties routines
86990b4856Slling  * ==========================================================================
87990b4856Slling  */
88990b4856Slling 
89990b4856Slling /*
90990b4856Slling  * Add a (source=src, propname=propval) list to an nvlist.
91990b4856Slling  */
929d82f4f6Slling static void
93990b4856Slling spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
94990b4856Slling     uint64_t intval, zprop_source_t src)
95990b4856Slling {
96990b4856Slling 	const char *propname = zpool_prop_to_name(prop);
97990b4856Slling 	nvlist_t *propval;
98990b4856Slling 
999d82f4f6Slling 	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1009d82f4f6Slling 	VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
101990b4856Slling 
1029d82f4f6Slling 	if (strval != NULL)
1039d82f4f6Slling 		VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
1049d82f4f6Slling 	else
1059d82f4f6Slling 		VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
106990b4856Slling 
1079d82f4f6Slling 	VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
108990b4856Slling 	nvlist_free(propval);
109990b4856Slling }
110990b4856Slling 
111990b4856Slling /*
112990b4856Slling  * Get property values from the spa configuration.
113990b4856Slling  */
1149d82f4f6Slling static void
115990b4856Slling spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
116990b4856Slling {
117379c004dSEric Schrock 	uint64_t size;
118379c004dSEric Schrock 	uint64_t used;
119990b4856Slling 	uint64_t cap, version;
120990b4856Slling 	zprop_source_t src = ZPROP_SRC_NONE;
121c5904d13Seschrock 	spa_config_dirent_t *dp;
122990b4856Slling 
123e14bb325SJeff Bonwick 	ASSERT(MUTEX_HELD(&spa->spa_props_lock));
124e14bb325SJeff Bonwick 
125379c004dSEric Schrock 	if (spa->spa_root_vdev != NULL) {
126379c004dSEric Schrock 		size = spa_get_space(spa);
127379c004dSEric Schrock 		used = spa_get_alloc(spa);
128379c004dSEric Schrock 		spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
129379c004dSEric Schrock 		spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
130379c004dSEric Schrock 		spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src);
131379c004dSEric Schrock 		spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL,
132379c004dSEric Schrock 		    size - used, src);
133379c004dSEric Schrock 
134379c004dSEric Schrock 		cap = (size == 0) ? 0 : (used * 100 / size);
135379c004dSEric Schrock 		spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
136379c004dSEric Schrock 
137379c004dSEric Schrock 		spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
138379c004dSEric Schrock 		    spa->spa_root_vdev->vdev_state, src);
139379c004dSEric Schrock 
140379c004dSEric Schrock 		version = spa_version(spa);
141379c004dSEric Schrock 		if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
142379c004dSEric Schrock 			src = ZPROP_SRC_DEFAULT;
143379c004dSEric Schrock 		else
144379c004dSEric Schrock 			src = ZPROP_SRC_LOCAL;
145379c004dSEric Schrock 		spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
146379c004dSEric Schrock 	}
147990b4856Slling 
1489d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
149990b4856Slling 
1509d82f4f6Slling 	if (spa->spa_root != NULL)
1519d82f4f6Slling 		spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
1529d82f4f6Slling 		    0, ZPROP_SRC_LOCAL);
153990b4856Slling 
154c5904d13Seschrock 	if ((dp = list_head(&spa->spa_config_list)) != NULL) {
155c5904d13Seschrock 		if (dp->scd_path == NULL) {
1569d82f4f6Slling 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
157c5904d13Seschrock 			    "none", 0, ZPROP_SRC_LOCAL);
158c5904d13Seschrock 		} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
1599d82f4f6Slling 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
160c5904d13Seschrock 			    dp->scd_path, 0, ZPROP_SRC_LOCAL);
1612f8aaab3Seschrock 		}
1622f8aaab3Seschrock 	}
163990b4856Slling }
164990b4856Slling 
165990b4856Slling /*
166990b4856Slling  * Get zpool property values.
167990b4856Slling  */
168990b4856Slling int
169990b4856Slling spa_prop_get(spa_t *spa, nvlist_t **nvp)
170990b4856Slling {
171990b4856Slling 	zap_cursor_t zc;
172990b4856Slling 	zap_attribute_t za;
173990b4856Slling 	objset_t *mos = spa->spa_meta_objset;
174990b4856Slling 	int err;
175990b4856Slling 
1769d82f4f6Slling 	VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
177990b4856Slling 
178e14bb325SJeff Bonwick 	mutex_enter(&spa->spa_props_lock);
179e14bb325SJeff Bonwick 
180990b4856Slling 	/*
181990b4856Slling 	 * Get properties from the spa config.
182990b4856Slling 	 */
1839d82f4f6Slling 	spa_prop_get_config(spa, nvp);
184990b4856Slling 
185990b4856Slling 	/* If no pool property object, no more prop to get. */
186990b4856Slling 	if (spa->spa_pool_props_object == 0) {
187990b4856Slling 		mutex_exit(&spa->spa_props_lock);
188990b4856Slling 		return (0);
189990b4856Slling 	}
190990b4856Slling 
191990b4856Slling 	/*
192990b4856Slling 	 * Get properties from the MOS pool property object.
193990b4856Slling 	 */
194990b4856Slling 	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
195990b4856Slling 	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
196990b4856Slling 	    zap_cursor_advance(&zc)) {
197990b4856Slling 		uint64_t intval = 0;
198990b4856Slling 		char *strval = NULL;
199990b4856Slling 		zprop_source_t src = ZPROP_SRC_DEFAULT;
200990b4856Slling 		zpool_prop_t prop;
201990b4856Slling 
202990b4856Slling 		if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
203990b4856Slling 			continue;
204990b4856Slling 
205990b4856Slling 		switch (za.za_integer_length) {
206990b4856Slling 		case 8:
207990b4856Slling 			/* integer property */
208990b4856Slling 			if (za.za_first_integer !=
209990b4856Slling 			    zpool_prop_default_numeric(prop))
210990b4856Slling 				src = ZPROP_SRC_LOCAL;
211990b4856Slling 
212990b4856Slling 			if (prop == ZPOOL_PROP_BOOTFS) {
213990b4856Slling 				dsl_pool_t *dp;
214990b4856Slling 				dsl_dataset_t *ds = NULL;
215990b4856Slling 
216990b4856Slling 				dp = spa_get_dsl(spa);
217990b4856Slling 				rw_enter(&dp->dp_config_rwlock, RW_READER);
218745cd3c5Smaybee 				if (err = dsl_dataset_hold_obj(dp,
219745cd3c5Smaybee 				    za.za_first_integer, FTAG, &ds)) {
220990b4856Slling 					rw_exit(&dp->dp_config_rwlock);
221990b4856Slling 					break;
222990b4856Slling 				}
223990b4856Slling 
224990b4856Slling 				strval = kmem_alloc(
225990b4856Slling 				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
226990b4856Slling 				    KM_SLEEP);
227990b4856Slling 				dsl_dataset_name(ds, strval);
228745cd3c5Smaybee 				dsl_dataset_rele(ds, FTAG);
229990b4856Slling 				rw_exit(&dp->dp_config_rwlock);
230990b4856Slling 			} else {
231990b4856Slling 				strval = NULL;
232990b4856Slling 				intval = za.za_first_integer;
233990b4856Slling 			}
234990b4856Slling 
2359d82f4f6Slling 			spa_prop_add_list(*nvp, prop, strval, intval, src);
236990b4856Slling 
237990b4856Slling 			if (strval != NULL)
238990b4856Slling 				kmem_free(strval,
239990b4856Slling 				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
240990b4856Slling 
241990b4856Slling 			break;
242990b4856Slling 
243990b4856Slling 		case 1:
244990b4856Slling 			/* string property */
245990b4856Slling 			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
246990b4856Slling 			err = zap_lookup(mos, spa->spa_pool_props_object,
247990b4856Slling 			    za.za_name, 1, za.za_num_integers, strval);
248990b4856Slling 			if (err) {
249990b4856Slling 				kmem_free(strval, za.za_num_integers);
250990b4856Slling 				break;
251990b4856Slling 			}
2529d82f4f6Slling 			spa_prop_add_list(*nvp, prop, strval, 0, src);
253990b4856Slling 			kmem_free(strval, za.za_num_integers);
254990b4856Slling 			break;
255990b4856Slling 
256990b4856Slling 		default:
257990b4856Slling 			break;
258990b4856Slling 		}
259990b4856Slling 	}
260990b4856Slling 	zap_cursor_fini(&zc);
261990b4856Slling 	mutex_exit(&spa->spa_props_lock);
262990b4856Slling out:
263990b4856Slling 	if (err && err != ENOENT) {
264990b4856Slling 		nvlist_free(*nvp);
2659d82f4f6Slling 		*nvp = NULL;
266990b4856Slling 		return (err);
267990b4856Slling 	}
268990b4856Slling 
269990b4856Slling 	return (0);
270990b4856Slling }
271990b4856Slling 
272990b4856Slling /*
273990b4856Slling  * Validate the given pool properties nvlist and modify the list
274990b4856Slling  * for the property values to be set.
275990b4856Slling  */
276990b4856Slling static int
277990b4856Slling spa_prop_validate(spa_t *spa, nvlist_t *props)
278990b4856Slling {
279990b4856Slling 	nvpair_t *elem;
280990b4856Slling 	int error = 0, reset_bootfs = 0;
281990b4856Slling 	uint64_t objnum;
282990b4856Slling 
283990b4856Slling 	elem = NULL;
284990b4856Slling 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
285990b4856Slling 		zpool_prop_t prop;
286990b4856Slling 		char *propname, *strval;
287990b4856Slling 		uint64_t intval;
288990b4856Slling 		objset_t *os;
2892f8aaab3Seschrock 		char *slash;
290990b4856Slling 
291990b4856Slling 		propname = nvpair_name(elem);
292990b4856Slling 
293990b4856Slling 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL)
294990b4856Slling 			return (EINVAL);
295990b4856Slling 
296990b4856Slling 		switch (prop) {
297990b4856Slling 		case ZPOOL_PROP_VERSION:
298990b4856Slling 			error = nvpair_value_uint64(elem, &intval);
299990b4856Slling 			if (!error &&
300990b4856Slling 			    (intval < spa_version(spa) || intval > SPA_VERSION))
301990b4856Slling 				error = EINVAL;
302990b4856Slling 			break;
303990b4856Slling 
304990b4856Slling 		case ZPOOL_PROP_DELEGATION:
305990b4856Slling 		case ZPOOL_PROP_AUTOREPLACE:
306d5b5bb25SRich Morris 		case ZPOOL_PROP_LISTSNAPS:
307990b4856Slling 			error = nvpair_value_uint64(elem, &intval);
308990b4856Slling 			if (!error && intval > 1)
309990b4856Slling 				error = EINVAL;
310990b4856Slling 			break;
311990b4856Slling 
312990b4856Slling 		case ZPOOL_PROP_BOOTFS:
313990b4856Slling 			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
314990b4856Slling 				error = ENOTSUP;
315990b4856Slling 				break;
316990b4856Slling 			}
317990b4856Slling 
318990b4856Slling 			/*
31915e6edf1Sgw 			 * Make sure the vdev config is bootable
320990b4856Slling 			 */
32115e6edf1Sgw 			if (!vdev_is_bootable(spa->spa_root_vdev)) {
322990b4856Slling 				error = ENOTSUP;
323990b4856Slling 				break;
324990b4856Slling 			}
325990b4856Slling 
326990b4856Slling 			reset_bootfs = 1;
327990b4856Slling 
328990b4856Slling 			error = nvpair_value_string(elem, &strval);
329990b4856Slling 
330990b4856Slling 			if (!error) {
33115e6edf1Sgw 				uint64_t compress;
33215e6edf1Sgw 
333990b4856Slling 				if (strval == NULL || strval[0] == '\0') {
334990b4856Slling 					objnum = zpool_prop_default_numeric(
335990b4856Slling 					    ZPOOL_PROP_BOOTFS);
336990b4856Slling 					break;
337990b4856Slling 				}
338990b4856Slling 
339990b4856Slling 				if (error = dmu_objset_open(strval, DMU_OST_ZFS,
340745cd3c5Smaybee 				    DS_MODE_USER | DS_MODE_READONLY, &os))
341990b4856Slling 					break;
34215e6edf1Sgw 
34315e6edf1Sgw 				/* We don't support gzip bootable datasets */
34415e6edf1Sgw 				if ((error = dsl_prop_get_integer(strval,
34515e6edf1Sgw 				    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
34615e6edf1Sgw 				    &compress, NULL)) == 0 &&
34715e6edf1Sgw 				    !BOOTFS_COMPRESS_VALID(compress)) {
34815e6edf1Sgw 					error = ENOTSUP;
34915e6edf1Sgw 				} else {
35015e6edf1Sgw 					objnum = dmu_objset_id(os);
35115e6edf1Sgw 				}
352990b4856Slling 				dmu_objset_close(os);
353990b4856Slling 			}
354990b4856Slling 			break;
355e14bb325SJeff Bonwick 
3560a4e9518Sgw 		case ZPOOL_PROP_FAILUREMODE:
3570a4e9518Sgw 			error = nvpair_value_uint64(elem, &intval);
3580a4e9518Sgw 			if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
3590a4e9518Sgw 			    intval > ZIO_FAILURE_MODE_PANIC))
3600a4e9518Sgw 				error = EINVAL;
3610a4e9518Sgw 
3620a4e9518Sgw 			/*
3630a4e9518Sgw 			 * This is a special case which only occurs when
3640a4e9518Sgw 			 * the pool has completely failed. This allows
3650a4e9518Sgw 			 * the user to change the in-core failmode property
3660a4e9518Sgw 			 * without syncing it out to disk (I/Os might
3670a4e9518Sgw 			 * currently be blocked). We do this by returning
3680a4e9518Sgw 			 * EIO to the caller (spa_prop_set) to trick it
3690a4e9518Sgw 			 * into thinking we encountered a property validation
3700a4e9518Sgw 			 * error.
3710a4e9518Sgw 			 */
372e14bb325SJeff Bonwick 			if (!error && spa_suspended(spa)) {
3730a4e9518Sgw 				spa->spa_failmode = intval;
3740a4e9518Sgw 				error = EIO;
3750a4e9518Sgw 			}
3760a4e9518Sgw 			break;
3772f8aaab3Seschrock 
3782f8aaab3Seschrock 		case ZPOOL_PROP_CACHEFILE:
3792f8aaab3Seschrock 			if ((error = nvpair_value_string(elem, &strval)) != 0)
3802f8aaab3Seschrock 				break;
3812f8aaab3Seschrock 
3822f8aaab3Seschrock 			if (strval[0] == '\0')
3832f8aaab3Seschrock 				break;
3842f8aaab3Seschrock 
3852f8aaab3Seschrock 			if (strcmp(strval, "none") == 0)
3862f8aaab3Seschrock 				break;
3872f8aaab3Seschrock 
3882f8aaab3Seschrock 			if (strval[0] != '/') {
3892f8aaab3Seschrock 				error = EINVAL;
3902f8aaab3Seschrock 				break;
3912f8aaab3Seschrock 			}
3922f8aaab3Seschrock 
3932f8aaab3Seschrock 			slash = strrchr(strval, '/');
3942f8aaab3Seschrock 			ASSERT(slash != NULL);
3952f8aaab3Seschrock 
3962f8aaab3Seschrock 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
3972f8aaab3Seschrock 			    strcmp(slash, "/..") == 0)
3982f8aaab3Seschrock 				error = EINVAL;
3992f8aaab3Seschrock 			break;
400990b4856Slling 		}
401990b4856Slling 
402990b4856Slling 		if (error)
403990b4856Slling 			break;
404990b4856Slling 	}
405990b4856Slling 
406990b4856Slling 	if (!error && reset_bootfs) {
407990b4856Slling 		error = nvlist_remove(props,
408990b4856Slling 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
409990b4856Slling 
410990b4856Slling 		if (!error) {
411990b4856Slling 			error = nvlist_add_uint64(props,
412990b4856Slling 			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
413990b4856Slling 		}
414990b4856Slling 	}
415990b4856Slling 
416990b4856Slling 	return (error);
417990b4856Slling }
418990b4856Slling 
419379c004dSEric Schrock void
420379c004dSEric Schrock spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
421379c004dSEric Schrock {
422379c004dSEric Schrock 	char *cachefile;
423379c004dSEric Schrock 	spa_config_dirent_t *dp;
424379c004dSEric Schrock 
425379c004dSEric Schrock 	if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
426379c004dSEric Schrock 	    &cachefile) != 0)
427379c004dSEric Schrock 		return;
428379c004dSEric Schrock 
429379c004dSEric Schrock 	dp = kmem_alloc(sizeof (spa_config_dirent_t),
430379c004dSEric Schrock 	    KM_SLEEP);
431379c004dSEric Schrock 
432379c004dSEric Schrock 	if (cachefile[0] == '\0')
433379c004dSEric Schrock 		dp->scd_path = spa_strdup(spa_config_path);
434379c004dSEric Schrock 	else if (strcmp(cachefile, "none") == 0)
435379c004dSEric Schrock 		dp->scd_path = NULL;
436379c004dSEric Schrock 	else
437379c004dSEric Schrock 		dp->scd_path = spa_strdup(cachefile);
438379c004dSEric Schrock 
439379c004dSEric Schrock 	list_insert_head(&spa->spa_config_list, dp);
440379c004dSEric Schrock 	if (need_sync)
441379c004dSEric Schrock 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
442379c004dSEric Schrock }
443379c004dSEric Schrock 
444990b4856Slling int
445990b4856Slling spa_prop_set(spa_t *spa, nvlist_t *nvp)
446990b4856Slling {
447990b4856Slling 	int error;
448379c004dSEric Schrock 	nvpair_t *elem;
449379c004dSEric Schrock 	boolean_t need_sync = B_FALSE;
450379c004dSEric Schrock 	zpool_prop_t prop;
451990b4856Slling 
452990b4856Slling 	if ((error = spa_prop_validate(spa, nvp)) != 0)
453990b4856Slling 		return (error);
454990b4856Slling 
455379c004dSEric Schrock 	elem = NULL;
456379c004dSEric Schrock 	while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
457379c004dSEric Schrock 		if ((prop = zpool_name_to_prop(
458379c004dSEric Schrock 		    nvpair_name(elem))) == ZPROP_INVAL)
459379c004dSEric Schrock 			return (EINVAL);
460379c004dSEric Schrock 
461379c004dSEric Schrock 		if (prop == ZPOOL_PROP_CACHEFILE || prop == ZPOOL_PROP_ALTROOT)
462379c004dSEric Schrock 			continue;
463379c004dSEric Schrock 
464379c004dSEric Schrock 		need_sync = B_TRUE;
465379c004dSEric Schrock 		break;
466379c004dSEric Schrock 	}
467379c004dSEric Schrock 
468379c004dSEric Schrock 	if (need_sync)
469379c004dSEric Schrock 		return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
470379c004dSEric Schrock 		    spa, nvp, 3));
471379c004dSEric Schrock 	else
472379c004dSEric Schrock 		return (0);
473990b4856Slling }
474990b4856Slling 
475990b4856Slling /*
476990b4856Slling  * If the bootfs property value is dsobj, clear it.
477990b4856Slling  */
478990b4856Slling void
479990b4856Slling spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
480990b4856Slling {
481990b4856Slling 	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
482990b4856Slling 		VERIFY(zap_remove(spa->spa_meta_objset,
483990b4856Slling 		    spa->spa_pool_props_object,
484990b4856Slling 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
485990b4856Slling 		spa->spa_bootfs = 0;
486990b4856Slling 	}
487990b4856Slling }
488990b4856Slling 
489fa9e4066Sahrens /*
490fa9e4066Sahrens  * ==========================================================================
491fa9e4066Sahrens  * SPA state manipulation (open/create/destroy/import/export)
492fa9e4066Sahrens  * ==========================================================================
493fa9e4066Sahrens  */
494fa9e4066Sahrens 
495ea8dc4b6Seschrock static int
496ea8dc4b6Seschrock spa_error_entry_compare(const void *a, const void *b)
497ea8dc4b6Seschrock {
498ea8dc4b6Seschrock 	spa_error_entry_t *sa = (spa_error_entry_t *)a;
499ea8dc4b6Seschrock 	spa_error_entry_t *sb = (spa_error_entry_t *)b;
500ea8dc4b6Seschrock 	int ret;
501ea8dc4b6Seschrock 
502ea8dc4b6Seschrock 	ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
503ea8dc4b6Seschrock 	    sizeof (zbookmark_t));
504ea8dc4b6Seschrock 
505ea8dc4b6Seschrock 	if (ret < 0)
506ea8dc4b6Seschrock 		return (-1);
507ea8dc4b6Seschrock 	else if (ret > 0)
508ea8dc4b6Seschrock 		return (1);
509ea8dc4b6Seschrock 	else
510ea8dc4b6Seschrock 		return (0);
511ea8dc4b6Seschrock }
512ea8dc4b6Seschrock 
513ea8dc4b6Seschrock /*
514ea8dc4b6Seschrock  * Utility function which retrieves copies of the current logs and
515ea8dc4b6Seschrock  * re-initializes them in the process.
516ea8dc4b6Seschrock  */
517ea8dc4b6Seschrock void
518ea8dc4b6Seschrock spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
519ea8dc4b6Seschrock {
520ea8dc4b6Seschrock 	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
521ea8dc4b6Seschrock 
522ea8dc4b6Seschrock 	bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
523ea8dc4b6Seschrock 	bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
524ea8dc4b6Seschrock 
525ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_scrub,
526ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
527ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
528ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_last,
529ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
530ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
531ea8dc4b6Seschrock }
532ea8dc4b6Seschrock 
533fa9e4066Sahrens /*
534fa9e4066Sahrens  * Activate an uninitialized pool.
535fa9e4066Sahrens  */
536fa9e4066Sahrens static void
5378ad4d6ddSJeff Bonwick spa_activate(spa_t *spa, int mode)
538fa9e4066Sahrens {
539fa9e4066Sahrens 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
540fa9e4066Sahrens 
541fa9e4066Sahrens 	spa->spa_state = POOL_STATE_ACTIVE;
5428ad4d6ddSJeff Bonwick 	spa->spa_mode = mode;
543fa9e4066Sahrens 
544fa9e4066Sahrens 	spa->spa_normal_class = metaslab_class_create();
5458654d025Sperrin 	spa->spa_log_class = metaslab_class_create();
546fa9e4066Sahrens 
547e14bb325SJeff Bonwick 	for (int t = 0; t < ZIO_TYPES; t++) {
548e14bb325SJeff Bonwick 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
549e14bb325SJeff Bonwick 			spa->spa_zio_taskq[t][q] = taskq_create("spa_zio",
550e14bb325SJeff Bonwick 			    zio_taskq_threads[t][q], maxclsyspri, 50,
551e14bb325SJeff Bonwick 			    INT_MAX, TASKQ_PREPOPULATE);
552e14bb325SJeff Bonwick 		}
553fa9e4066Sahrens 	}
554fa9e4066Sahrens 
555e14bb325SJeff Bonwick 	list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
556e14bb325SJeff Bonwick 	    offsetof(vdev_t, vdev_config_dirty_node));
557e14bb325SJeff Bonwick 	list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
558e14bb325SJeff Bonwick 	    offsetof(vdev_t, vdev_state_dirty_node));
559fa9e4066Sahrens 
560fa9e4066Sahrens 	txg_list_create(&spa->spa_vdev_txg_list,
561fa9e4066Sahrens 	    offsetof(struct vdev, vdev_txg_node));
562ea8dc4b6Seschrock 
563ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_scrub,
564ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
565ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
566ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_last,
567ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
568ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
569fa9e4066Sahrens }
570fa9e4066Sahrens 
571fa9e4066Sahrens /*
572fa9e4066Sahrens  * Opposite of spa_activate().
573fa9e4066Sahrens  */
574fa9e4066Sahrens static void
575fa9e4066Sahrens spa_deactivate(spa_t *spa)
576fa9e4066Sahrens {
577fa9e4066Sahrens 	ASSERT(spa->spa_sync_on == B_FALSE);
578fa9e4066Sahrens 	ASSERT(spa->spa_dsl_pool == NULL);
579fa9e4066Sahrens 	ASSERT(spa->spa_root_vdev == NULL);
580fa9e4066Sahrens 
581fa9e4066Sahrens 	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
582fa9e4066Sahrens 
583fa9e4066Sahrens 	txg_list_destroy(&spa->spa_vdev_txg_list);
584fa9e4066Sahrens 
585e14bb325SJeff Bonwick 	list_destroy(&spa->spa_config_dirty_list);
586e14bb325SJeff Bonwick 	list_destroy(&spa->spa_state_dirty_list);
587fa9e4066Sahrens 
588e14bb325SJeff Bonwick 	for (int t = 0; t < ZIO_TYPES; t++) {
589e14bb325SJeff Bonwick 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
590e14bb325SJeff Bonwick 			taskq_destroy(spa->spa_zio_taskq[t][q]);
591e14bb325SJeff Bonwick 			spa->spa_zio_taskq[t][q] = NULL;
592e14bb325SJeff Bonwick 		}
593fa9e4066Sahrens 	}
594fa9e4066Sahrens 
595fa9e4066Sahrens 	metaslab_class_destroy(spa->spa_normal_class);
596fa9e4066Sahrens 	spa->spa_normal_class = NULL;
597fa9e4066Sahrens 
5988654d025Sperrin 	metaslab_class_destroy(spa->spa_log_class);
5998654d025Sperrin 	spa->spa_log_class = NULL;
6008654d025Sperrin 
601ea8dc4b6Seschrock 	/*
602ea8dc4b6Seschrock 	 * If this was part of an import or the open otherwise failed, we may
603ea8dc4b6Seschrock 	 * still have errors left in the queues.  Empty them just in case.
604ea8dc4b6Seschrock 	 */
605ea8dc4b6Seschrock 	spa_errlog_drain(spa);
606ea8dc4b6Seschrock 
607ea8dc4b6Seschrock 	avl_destroy(&spa->spa_errlist_scrub);
608ea8dc4b6Seschrock 	avl_destroy(&spa->spa_errlist_last);
609ea8dc4b6Seschrock 
610fa9e4066Sahrens 	spa->spa_state = POOL_STATE_UNINITIALIZED;
611fa9e4066Sahrens }
612fa9e4066Sahrens 
613fa9e4066Sahrens /*
614fa9e4066Sahrens  * Verify a pool configuration, and construct the vdev tree appropriately.  This
615fa9e4066Sahrens  * will create all the necessary vdevs in the appropriate layout, with each vdev
616fa9e4066Sahrens  * in the CLOSED state.  This will prep the pool before open/creation/import.
617fa9e4066Sahrens  * All vdev validation is done by the vdev_alloc() routine.
618fa9e4066Sahrens  */
61999653d4eSeschrock static int
62099653d4eSeschrock spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
62199653d4eSeschrock     uint_t id, int atype)
622fa9e4066Sahrens {
623fa9e4066Sahrens 	nvlist_t **child;
624fa9e4066Sahrens 	uint_t c, children;
62599653d4eSeschrock 	int error;
626fa9e4066Sahrens 
62799653d4eSeschrock 	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
62899653d4eSeschrock 		return (error);
629fa9e4066Sahrens 
63099653d4eSeschrock 	if ((*vdp)->vdev_ops->vdev_op_leaf)
63199653d4eSeschrock 		return (0);
632fa9e4066Sahrens 
633e14bb325SJeff Bonwick 	error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
634e14bb325SJeff Bonwick 	    &child, &children);
635e14bb325SJeff Bonwick 
636e14bb325SJeff Bonwick 	if (error == ENOENT)
637e14bb325SJeff Bonwick 		return (0);
638e14bb325SJeff Bonwick 
639e14bb325SJeff Bonwick 	if (error) {
64099653d4eSeschrock 		vdev_free(*vdp);
64199653d4eSeschrock 		*vdp = NULL;
64299653d4eSeschrock 		return (EINVAL);
643fa9e4066Sahrens 	}
644fa9e4066Sahrens 
645fa9e4066Sahrens 	for (c = 0; c < children; c++) {
64699653d4eSeschrock 		vdev_t *vd;
64799653d4eSeschrock 		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
64899653d4eSeschrock 		    atype)) != 0) {
64999653d4eSeschrock 			vdev_free(*vdp);
65099653d4eSeschrock 			*vdp = NULL;
65199653d4eSeschrock 			return (error);
652fa9e4066Sahrens 		}
653fa9e4066Sahrens 	}
654fa9e4066Sahrens 
65599653d4eSeschrock 	ASSERT(*vdp != NULL);
65699653d4eSeschrock 
65799653d4eSeschrock 	return (0);
658fa9e4066Sahrens }
659fa9e4066Sahrens 
660fa9e4066Sahrens /*
661fa9e4066Sahrens  * Opposite of spa_load().
662fa9e4066Sahrens  */
663fa9e4066Sahrens static void
664fa9e4066Sahrens spa_unload(spa_t *spa)
665fa9e4066Sahrens {
66699653d4eSeschrock 	int i;
66799653d4eSeschrock 
668e14bb325SJeff Bonwick 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
669e14bb325SJeff Bonwick 
670ea8dc4b6Seschrock 	/*
671ea8dc4b6Seschrock 	 * Stop async tasks.
672ea8dc4b6Seschrock 	 */
673ea8dc4b6Seschrock 	spa_async_suspend(spa);
674ea8dc4b6Seschrock 
675fa9e4066Sahrens 	/*
676fa9e4066Sahrens 	 * Stop syncing.
677fa9e4066Sahrens 	 */
678fa9e4066Sahrens 	if (spa->spa_sync_on) {
679fa9e4066Sahrens 		txg_sync_stop(spa->spa_dsl_pool);
680fa9e4066Sahrens 		spa->spa_sync_on = B_FALSE;
681fa9e4066Sahrens 	}
682fa9e4066Sahrens 
683fa9e4066Sahrens 	/*
684e14bb325SJeff Bonwick 	 * Wait for any outstanding async I/O to complete.
685fa9e4066Sahrens 	 */
686e14bb325SJeff Bonwick 	mutex_enter(&spa->spa_async_root_lock);
687e14bb325SJeff Bonwick 	while (spa->spa_async_root_count != 0)
688e14bb325SJeff Bonwick 		cv_wait(&spa->spa_async_root_cv, &spa->spa_async_root_lock);
689e14bb325SJeff Bonwick 	mutex_exit(&spa->spa_async_root_lock);
690fa9e4066Sahrens 
691fa9e4066Sahrens 	/*
692fa9e4066Sahrens 	 * Close the dsl pool.
693fa9e4066Sahrens 	 */
694fa9e4066Sahrens 	if (spa->spa_dsl_pool) {
695fa9e4066Sahrens 		dsl_pool_close(spa->spa_dsl_pool);
696fa9e4066Sahrens 		spa->spa_dsl_pool = NULL;
697fa9e4066Sahrens 	}
698fa9e4066Sahrens 
6998ad4d6ddSJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7008ad4d6ddSJeff Bonwick 
7018ad4d6ddSJeff Bonwick 	/*
7028ad4d6ddSJeff Bonwick 	 * Drop and purge level 2 cache
7038ad4d6ddSJeff Bonwick 	 */
7048ad4d6ddSJeff Bonwick 	spa_l2cache_drop(spa);
7058ad4d6ddSJeff Bonwick 
706fa9e4066Sahrens 	/*
707fa9e4066Sahrens 	 * Close all vdevs.
708fa9e4066Sahrens 	 */
7090e34b6a7Sbonwick 	if (spa->spa_root_vdev)
710fa9e4066Sahrens 		vdev_free(spa->spa_root_vdev);
7110e34b6a7Sbonwick 	ASSERT(spa->spa_root_vdev == NULL);
712ea8dc4b6Seschrock 
713fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
714fa94a07fSbrendan 		vdev_free(spa->spa_spares.sav_vdevs[i]);
715fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs) {
716fa94a07fSbrendan 		kmem_free(spa->spa_spares.sav_vdevs,
717fa94a07fSbrendan 		    spa->spa_spares.sav_count * sizeof (void *));
718fa94a07fSbrendan 		spa->spa_spares.sav_vdevs = NULL;
71999653d4eSeschrock 	}
720fa94a07fSbrendan 	if (spa->spa_spares.sav_config) {
721fa94a07fSbrendan 		nvlist_free(spa->spa_spares.sav_config);
722fa94a07fSbrendan 		spa->spa_spares.sav_config = NULL;
723fa94a07fSbrendan 	}
7242ce8af81SEric Schrock 	spa->spa_spares.sav_count = 0;
725fa94a07fSbrendan 
726fa94a07fSbrendan 	for (i = 0; i < spa->spa_l2cache.sav_count; i++)
727fa94a07fSbrendan 		vdev_free(spa->spa_l2cache.sav_vdevs[i]);
728fa94a07fSbrendan 	if (spa->spa_l2cache.sav_vdevs) {
729fa94a07fSbrendan 		kmem_free(spa->spa_l2cache.sav_vdevs,
730fa94a07fSbrendan 		    spa->spa_l2cache.sav_count * sizeof (void *));
731fa94a07fSbrendan 		spa->spa_l2cache.sav_vdevs = NULL;
732fa94a07fSbrendan 	}
733fa94a07fSbrendan 	if (spa->spa_l2cache.sav_config) {
734fa94a07fSbrendan 		nvlist_free(spa->spa_l2cache.sav_config);
735fa94a07fSbrendan 		spa->spa_l2cache.sav_config = NULL;
73699653d4eSeschrock 	}
7372ce8af81SEric Schrock 	spa->spa_l2cache.sav_count = 0;
73899653d4eSeschrock 
739ea8dc4b6Seschrock 	spa->spa_async_suspended = 0;
7408ad4d6ddSJeff Bonwick 
7418ad4d6ddSJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
742fa9e4066Sahrens }
743fa9e4066Sahrens 
74499653d4eSeschrock /*
74599653d4eSeschrock  * Load (or re-load) the current list of vdevs describing the active spares for
74699653d4eSeschrock  * this pool.  When this is called, we have some form of basic information in
747fa94a07fSbrendan  * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
748fa94a07fSbrendan  * then re-generate a more complete list including status information.
74999653d4eSeschrock  */
75099653d4eSeschrock static void
75199653d4eSeschrock spa_load_spares(spa_t *spa)
75299653d4eSeschrock {
75399653d4eSeschrock 	nvlist_t **spares;
75499653d4eSeschrock 	uint_t nspares;
75599653d4eSeschrock 	int i;
75639c23413Seschrock 	vdev_t *vd, *tvd;
75799653d4eSeschrock 
758e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
759e14bb325SJeff Bonwick 
76099653d4eSeschrock 	/*
76199653d4eSeschrock 	 * First, close and free any existing spare vdevs.
76299653d4eSeschrock 	 */
763fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
764fa94a07fSbrendan 		vd = spa->spa_spares.sav_vdevs[i];
76539c23413Seschrock 
76639c23413Seschrock 		/* Undo the call to spa_activate() below */
767c5904d13Seschrock 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
768c5904d13Seschrock 		    B_FALSE)) != NULL && tvd->vdev_isspare)
76939c23413Seschrock 			spa_spare_remove(tvd);
77039c23413Seschrock 		vdev_close(vd);
77139c23413Seschrock 		vdev_free(vd);
77299653d4eSeschrock 	}
77339c23413Seschrock 
774fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs)
775fa94a07fSbrendan 		kmem_free(spa->spa_spares.sav_vdevs,
776fa94a07fSbrendan 		    spa->spa_spares.sav_count * sizeof (void *));
77799653d4eSeschrock 
778fa94a07fSbrendan 	if (spa->spa_spares.sav_config == NULL)
77999653d4eSeschrock 		nspares = 0;
78099653d4eSeschrock 	else
781fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
78299653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
78399653d4eSeschrock 
784fa94a07fSbrendan 	spa->spa_spares.sav_count = (int)nspares;
785fa94a07fSbrendan 	spa->spa_spares.sav_vdevs = NULL;
78699653d4eSeschrock 
78799653d4eSeschrock 	if (nspares == 0)
78899653d4eSeschrock 		return;
78999653d4eSeschrock 
79099653d4eSeschrock 	/*
79199653d4eSeschrock 	 * Construct the array of vdevs, opening them to get status in the
79239c23413Seschrock 	 * process.   For each spare, there is potentially two different vdev_t
79339c23413Seschrock 	 * structures associated with it: one in the list of spares (used only
79439c23413Seschrock 	 * for basic validation purposes) and one in the active vdev
79539c23413Seschrock 	 * configuration (if it's spared in).  During this phase we open and
79639c23413Seschrock 	 * validate each vdev on the spare list.  If the vdev also exists in the
79739c23413Seschrock 	 * active configuration, then we also mark this vdev as an active spare.
79899653d4eSeschrock 	 */
799fa94a07fSbrendan 	spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
800fa94a07fSbrendan 	    KM_SLEEP);
801fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
80299653d4eSeschrock 		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
80399653d4eSeschrock 		    VDEV_ALLOC_SPARE) == 0);
80499653d4eSeschrock 		ASSERT(vd != NULL);
80599653d4eSeschrock 
806fa94a07fSbrendan 		spa->spa_spares.sav_vdevs[i] = vd;
80799653d4eSeschrock 
808c5904d13Seschrock 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
809c5904d13Seschrock 		    B_FALSE)) != NULL) {
81039c23413Seschrock 			if (!tvd->vdev_isspare)
81139c23413Seschrock 				spa_spare_add(tvd);
81239c23413Seschrock 
81339c23413Seschrock 			/*
81439c23413Seschrock 			 * We only mark the spare active if we were successfully
81539c23413Seschrock 			 * able to load the vdev.  Otherwise, importing a pool
81639c23413Seschrock 			 * with a bad active spare would result in strange
81739c23413Seschrock 			 * behavior, because multiple pool would think the spare
81839c23413Seschrock 			 * is actively in use.
81939c23413Seschrock 			 *
82039c23413Seschrock 			 * There is a vulnerability here to an equally bizarre
82139c23413Seschrock 			 * circumstance, where a dead active spare is later
82239c23413Seschrock 			 * brought back to life (onlined or otherwise).  Given
82339c23413Seschrock 			 * the rarity of this scenario, and the extra complexity
82439c23413Seschrock 			 * it adds, we ignore the possibility.
82539c23413Seschrock 			 */
82639c23413Seschrock 			if (!vdev_is_dead(tvd))
82739c23413Seschrock 				spa_spare_activate(tvd);
82839c23413Seschrock 		}
82939c23413Seschrock 
830e14bb325SJeff Bonwick 		vd->vdev_top = vd;
831e14bb325SJeff Bonwick 
83299653d4eSeschrock 		if (vdev_open(vd) != 0)
83399653d4eSeschrock 			continue;
83499653d4eSeschrock 
835fa94a07fSbrendan 		if (vdev_validate_aux(vd) == 0)
836fa94a07fSbrendan 			spa_spare_add(vd);
83799653d4eSeschrock 	}
83899653d4eSeschrock 
83999653d4eSeschrock 	/*
84099653d4eSeschrock 	 * Recompute the stashed list of spares, with status information
84199653d4eSeschrock 	 * this time.
84299653d4eSeschrock 	 */
843fa94a07fSbrendan 	VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
84499653d4eSeschrock 	    DATA_TYPE_NVLIST_ARRAY) == 0);
84599653d4eSeschrock 
846fa94a07fSbrendan 	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
847fa94a07fSbrendan 	    KM_SLEEP);
848fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
849fa94a07fSbrendan 		spares[i] = vdev_config_generate(spa,
850fa94a07fSbrendan 		    spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE);
851fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
852fa94a07fSbrendan 	    ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
853fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
85499653d4eSeschrock 		nvlist_free(spares[i]);
855fa94a07fSbrendan 	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
856fa94a07fSbrendan }
857fa94a07fSbrendan 
858fa94a07fSbrendan /*
859fa94a07fSbrendan  * Load (or re-load) the current list of vdevs describing the active l2cache for
860fa94a07fSbrendan  * this pool.  When this is called, we have some form of basic information in
861fa94a07fSbrendan  * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
862fa94a07fSbrendan  * then re-generate a more complete list including status information.
863fa94a07fSbrendan  * Devices which are already active have their details maintained, and are
864fa94a07fSbrendan  * not re-opened.
865fa94a07fSbrendan  */
866fa94a07fSbrendan static void
867fa94a07fSbrendan spa_load_l2cache(spa_t *spa)
868fa94a07fSbrendan {
869fa94a07fSbrendan 	nvlist_t **l2cache;
870fa94a07fSbrendan 	uint_t nl2cache;
871fa94a07fSbrendan 	int i, j, oldnvdevs;
872c5904d13Seschrock 	uint64_t guid, size;
873fa94a07fSbrendan 	vdev_t *vd, **oldvdevs, **newvdevs;
874fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
875fa94a07fSbrendan 
876e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
877e14bb325SJeff Bonwick 
878fa94a07fSbrendan 	if (sav->sav_config != NULL) {
879fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
880fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
881fa94a07fSbrendan 		newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
882fa94a07fSbrendan 	} else {
883fa94a07fSbrendan 		nl2cache = 0;
884fa94a07fSbrendan 	}
885fa94a07fSbrendan 
886fa94a07fSbrendan 	oldvdevs = sav->sav_vdevs;
887fa94a07fSbrendan 	oldnvdevs = sav->sav_count;
888fa94a07fSbrendan 	sav->sav_vdevs = NULL;
889fa94a07fSbrendan 	sav->sav_count = 0;
890fa94a07fSbrendan 
891fa94a07fSbrendan 	/*
892fa94a07fSbrendan 	 * Process new nvlist of vdevs.
893fa94a07fSbrendan 	 */
894fa94a07fSbrendan 	for (i = 0; i < nl2cache; i++) {
895fa94a07fSbrendan 		VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
896fa94a07fSbrendan 		    &guid) == 0);
897fa94a07fSbrendan 
898fa94a07fSbrendan 		newvdevs[i] = NULL;
899fa94a07fSbrendan 		for (j = 0; j < oldnvdevs; j++) {
900fa94a07fSbrendan 			vd = oldvdevs[j];
901fa94a07fSbrendan 			if (vd != NULL && guid == vd->vdev_guid) {
902fa94a07fSbrendan 				/*
903fa94a07fSbrendan 				 * Retain previous vdev for add/remove ops.
904fa94a07fSbrendan 				 */
905fa94a07fSbrendan 				newvdevs[i] = vd;
906fa94a07fSbrendan 				oldvdevs[j] = NULL;
907fa94a07fSbrendan 				break;
908fa94a07fSbrendan 			}
909fa94a07fSbrendan 		}
910fa94a07fSbrendan 
911fa94a07fSbrendan 		if (newvdevs[i] == NULL) {
912fa94a07fSbrendan 			/*
913fa94a07fSbrendan 			 * Create new vdev
914fa94a07fSbrendan 			 */
915fa94a07fSbrendan 			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
916fa94a07fSbrendan 			    VDEV_ALLOC_L2CACHE) == 0);
917fa94a07fSbrendan 			ASSERT(vd != NULL);
918fa94a07fSbrendan 			newvdevs[i] = vd;
919fa94a07fSbrendan 
920fa94a07fSbrendan 			/*
921fa94a07fSbrendan 			 * Commit this vdev as an l2cache device,
922fa94a07fSbrendan 			 * even if it fails to open.
923fa94a07fSbrendan 			 */
924fa94a07fSbrendan 			spa_l2cache_add(vd);
925fa94a07fSbrendan 
926c5904d13Seschrock 			vd->vdev_top = vd;
927c5904d13Seschrock 			vd->vdev_aux = sav;
928c5904d13Seschrock 
929c5904d13Seschrock 			spa_l2cache_activate(vd);
930c5904d13Seschrock 
931fa94a07fSbrendan 			if (vdev_open(vd) != 0)
932fa94a07fSbrendan 				continue;
933fa94a07fSbrendan 
934fa94a07fSbrendan 			(void) vdev_validate_aux(vd);
935fa94a07fSbrendan 
936fa94a07fSbrendan 			if (!vdev_is_dead(vd)) {
937fa94a07fSbrendan 				size = vdev_get_rsize(vd);
938c5904d13Seschrock 				l2arc_add_vdev(spa, vd,
939c5904d13Seschrock 				    VDEV_LABEL_START_SIZE,
940c5904d13Seschrock 				    size - VDEV_LABEL_START_SIZE);
941fa94a07fSbrendan 			}
942fa94a07fSbrendan 		}
943fa94a07fSbrendan 	}
944fa94a07fSbrendan 
945fa94a07fSbrendan 	/*
946fa94a07fSbrendan 	 * Purge vdevs that were dropped
947fa94a07fSbrendan 	 */
948fa94a07fSbrendan 	for (i = 0; i < oldnvdevs; i++) {
949fa94a07fSbrendan 		uint64_t pool;
950fa94a07fSbrendan 
951fa94a07fSbrendan 		vd = oldvdevs[i];
952fa94a07fSbrendan 		if (vd != NULL) {
9538ad4d6ddSJeff Bonwick 			if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
9548ad4d6ddSJeff Bonwick 			    pool != 0ULL && l2arc_vdev_present(vd))
955fa94a07fSbrendan 				l2arc_remove_vdev(vd);
956fa94a07fSbrendan 			(void) vdev_close(vd);
957fa94a07fSbrendan 			spa_l2cache_remove(vd);
958fa94a07fSbrendan 		}
959fa94a07fSbrendan 	}
960fa94a07fSbrendan 
961fa94a07fSbrendan 	if (oldvdevs)
962fa94a07fSbrendan 		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
963fa94a07fSbrendan 
964fa94a07fSbrendan 	if (sav->sav_config == NULL)
965fa94a07fSbrendan 		goto out;
966fa94a07fSbrendan 
967fa94a07fSbrendan 	sav->sav_vdevs = newvdevs;
968fa94a07fSbrendan 	sav->sav_count = (int)nl2cache;
969fa94a07fSbrendan 
970fa94a07fSbrendan 	/*
971fa94a07fSbrendan 	 * Recompute the stashed list of l2cache devices, with status
972fa94a07fSbrendan 	 * information this time.
973fa94a07fSbrendan 	 */
974fa94a07fSbrendan 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
975fa94a07fSbrendan 	    DATA_TYPE_NVLIST_ARRAY) == 0);
976fa94a07fSbrendan 
977fa94a07fSbrendan 	l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
978fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
979fa94a07fSbrendan 		l2cache[i] = vdev_config_generate(spa,
980fa94a07fSbrendan 		    sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE);
981fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
982fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
983fa94a07fSbrendan out:
984fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
985fa94a07fSbrendan 		nvlist_free(l2cache[i]);
986fa94a07fSbrendan 	if (sav->sav_count)
987fa94a07fSbrendan 		kmem_free(l2cache, sav->sav_count * sizeof (void *));
98899653d4eSeschrock }
98999653d4eSeschrock 
99099653d4eSeschrock static int
99199653d4eSeschrock load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
99299653d4eSeschrock {
99399653d4eSeschrock 	dmu_buf_t *db;
99499653d4eSeschrock 	char *packed = NULL;
99599653d4eSeschrock 	size_t nvsize = 0;
99699653d4eSeschrock 	int error;
99799653d4eSeschrock 	*value = NULL;
99899653d4eSeschrock 
99999653d4eSeschrock 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
100099653d4eSeschrock 	nvsize = *(uint64_t *)db->db_data;
100199653d4eSeschrock 	dmu_buf_rele(db, FTAG);
100299653d4eSeschrock 
100399653d4eSeschrock 	packed = kmem_alloc(nvsize, KM_SLEEP);
100499653d4eSeschrock 	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed);
100599653d4eSeschrock 	if (error == 0)
100699653d4eSeschrock 		error = nvlist_unpack(packed, nvsize, value, 0);
100799653d4eSeschrock 	kmem_free(packed, nvsize);
100899653d4eSeschrock 
100999653d4eSeschrock 	return (error);
101099653d4eSeschrock }
101199653d4eSeschrock 
10123d7072f8Seschrock /*
10133d7072f8Seschrock  * Checks to see if the given vdev could not be opened, in which case we post a
10143d7072f8Seschrock  * sysevent to notify the autoreplace code that the device has been removed.
10153d7072f8Seschrock  */
10163d7072f8Seschrock static void
10173d7072f8Seschrock spa_check_removed(vdev_t *vd)
10183d7072f8Seschrock {
10193d7072f8Seschrock 	int c;
10203d7072f8Seschrock 
10213d7072f8Seschrock 	for (c = 0; c < vd->vdev_children; c++)
10223d7072f8Seschrock 		spa_check_removed(vd->vdev_child[c]);
10233d7072f8Seschrock 
10243d7072f8Seschrock 	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
10253d7072f8Seschrock 		zfs_post_autoreplace(vd->vdev_spa, vd);
10263d7072f8Seschrock 		spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
10273d7072f8Seschrock 	}
10283d7072f8Seschrock }
10293d7072f8Seschrock 
1030b87f3af3Sperrin /*
1031b87f3af3Sperrin  * Check for missing log devices
1032b87f3af3Sperrin  */
1033b87f3af3Sperrin int
1034b87f3af3Sperrin spa_check_logs(spa_t *spa)
1035b87f3af3Sperrin {
1036b87f3af3Sperrin 	switch (spa->spa_log_state) {
1037b87f3af3Sperrin 	case SPA_LOG_MISSING:
1038b87f3af3Sperrin 		/* need to recheck in case slog has been restored */
1039b87f3af3Sperrin 	case SPA_LOG_UNKNOWN:
1040b87f3af3Sperrin 		if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL,
1041b87f3af3Sperrin 		    DS_FIND_CHILDREN)) {
1042b87f3af3Sperrin 			spa->spa_log_state = SPA_LOG_MISSING;
1043b87f3af3Sperrin 			return (1);
1044b87f3af3Sperrin 		}
1045b87f3af3Sperrin 		break;
1046b87f3af3Sperrin 
1047b87f3af3Sperrin 	case SPA_LOG_CLEAR:
1048b87f3af3Sperrin 		(void) dmu_objset_find(spa->spa_name, zil_clear_log_chain, NULL,
1049b87f3af3Sperrin 		    DS_FIND_CHILDREN);
1050b87f3af3Sperrin 		break;
1051b87f3af3Sperrin 	}
1052b87f3af3Sperrin 	spa->spa_log_state = SPA_LOG_GOOD;
1053b87f3af3Sperrin 	return (0);
1054b87f3af3Sperrin }
1055b87f3af3Sperrin 
1056fa9e4066Sahrens /*
1057fa9e4066Sahrens  * Load an existing storage pool, using the pool's builtin spa_config as a
1058ea8dc4b6Seschrock  * source of configuration information.
1059fa9e4066Sahrens  */
1060fa9e4066Sahrens static int
1061ea8dc4b6Seschrock spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig)
1062fa9e4066Sahrens {
1063fa9e4066Sahrens 	int error = 0;
1064fa9e4066Sahrens 	nvlist_t *nvroot = NULL;
1065fa9e4066Sahrens 	vdev_t *rvd;
1066fa9e4066Sahrens 	uberblock_t *ub = &spa->spa_uberblock;
10670373e76bSbonwick 	uint64_t config_cache_txg = spa->spa_config_txg;
1068fa9e4066Sahrens 	uint64_t pool_guid;
106999653d4eSeschrock 	uint64_t version;
10703d7072f8Seschrock 	uint64_t autoreplace = 0;
10718ad4d6ddSJeff Bonwick 	int orig_mode = spa->spa_mode;
1072b87f3af3Sperrin 	char *ereport = FM_EREPORT_ZFS_POOL;
1073fa9e4066Sahrens 
10748ad4d6ddSJeff Bonwick 	/*
10758ad4d6ddSJeff Bonwick 	 * If this is an untrusted config, access the pool in read-only mode.
10768ad4d6ddSJeff Bonwick 	 * This prevents things like resilvering recently removed devices.
10778ad4d6ddSJeff Bonwick 	 */
10788ad4d6ddSJeff Bonwick 	if (!mosconfig)
10798ad4d6ddSJeff Bonwick 		spa->spa_mode = FREAD;
10808ad4d6ddSJeff Bonwick 
1081e14bb325SJeff Bonwick 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1082e14bb325SJeff Bonwick 
1083ea8dc4b6Seschrock 	spa->spa_load_state = state;
10840373e76bSbonwick 
1085fa9e4066Sahrens 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
1086a9926bf0Sbonwick 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
1087ea8dc4b6Seschrock 		error = EINVAL;
1088ea8dc4b6Seschrock 		goto out;
1089ea8dc4b6Seschrock 	}
1090fa9e4066Sahrens 
109199653d4eSeschrock 	/*
109299653d4eSeschrock 	 * Versioning wasn't explicitly added to the label until later, so if
109399653d4eSeschrock 	 * it's not present treat it as the initial version.
109499653d4eSeschrock 	 */
109599653d4eSeschrock 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
1096e7437265Sahrens 		version = SPA_VERSION_INITIAL;
109799653d4eSeschrock 
1098a9926bf0Sbonwick 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
1099a9926bf0Sbonwick 	    &spa->spa_config_txg);
1100a9926bf0Sbonwick 
11010373e76bSbonwick 	if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
1102ea8dc4b6Seschrock 	    spa_guid_exists(pool_guid, 0)) {
1103ea8dc4b6Seschrock 		error = EEXIST;
1104ea8dc4b6Seschrock 		goto out;
1105ea8dc4b6Seschrock 	}
1106fa9e4066Sahrens 
1107b5989ec7Seschrock 	spa->spa_load_guid = pool_guid;
1108b5989ec7Seschrock 
1109fa9e4066Sahrens 	/*
111099653d4eSeschrock 	 * Parse the configuration into a vdev tree.  We explicitly set the
111199653d4eSeschrock 	 * value that will be returned by spa_version() since parsing the
111299653d4eSeschrock 	 * configuration requires knowing the version number.
1113fa9e4066Sahrens 	 */
1114e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
111599653d4eSeschrock 	spa->spa_ubsync.ub_version = version;
111699653d4eSeschrock 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
1117e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
1118fa9e4066Sahrens 
111999653d4eSeschrock 	if (error != 0)
1120ea8dc4b6Seschrock 		goto out;
1121fa9e4066Sahrens 
11220e34b6a7Sbonwick 	ASSERT(spa->spa_root_vdev == rvd);
1123fa9e4066Sahrens 	ASSERT(spa_guid(spa) == pool_guid);
1124fa9e4066Sahrens 
1125fa9e4066Sahrens 	/*
1126fa9e4066Sahrens 	 * Try to open all vdevs, loading each label in the process.
1127fa9e4066Sahrens 	 */
1128e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
11290bf246f5Smc 	error = vdev_open(rvd);
1130e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
11310bf246f5Smc 	if (error != 0)
1132ea8dc4b6Seschrock 		goto out;
1133fa9e4066Sahrens 
1134560e6e96Seschrock 	/*
1135560e6e96Seschrock 	 * Validate the labels for all leaf vdevs.  We need to grab the config
1136e14bb325SJeff Bonwick 	 * lock because all label I/O is done with ZIO_FLAG_CONFIG_WRITER.
1137560e6e96Seschrock 	 */
11388ad4d6ddSJeff Bonwick 	if (mosconfig) {
11398ad4d6ddSJeff Bonwick 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
11408ad4d6ddSJeff Bonwick 		error = vdev_validate(rvd);
11418ad4d6ddSJeff Bonwick 		spa_config_exit(spa, SCL_ALL, FTAG);
11428ad4d6ddSJeff Bonwick 		if (error != 0)
11438ad4d6ddSJeff Bonwick 			goto out;
11448ad4d6ddSJeff Bonwick 	}
1145560e6e96Seschrock 
1146560e6e96Seschrock 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1147560e6e96Seschrock 		error = ENXIO;
1148560e6e96Seschrock 		goto out;
1149560e6e96Seschrock 	}
1150560e6e96Seschrock 
1151fa9e4066Sahrens 	/*
1152fa9e4066Sahrens 	 * Find the best uberblock.
1153fa9e4066Sahrens 	 */
1154e14bb325SJeff Bonwick 	vdev_uberblock_load(NULL, rvd, ub);
1155fa9e4066Sahrens 
1156fa9e4066Sahrens 	/*
1157fa9e4066Sahrens 	 * If we weren't able to find a single valid uberblock, return failure.
1158fa9e4066Sahrens 	 */
1159fa9e4066Sahrens 	if (ub->ub_txg == 0) {
1160eaca9bbdSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1161eaca9bbdSeschrock 		    VDEV_AUX_CORRUPT_DATA);
1162ea8dc4b6Seschrock 		error = ENXIO;
1163ea8dc4b6Seschrock 		goto out;
1164ea8dc4b6Seschrock 	}
1165ea8dc4b6Seschrock 
1166ea8dc4b6Seschrock 	/*
1167ea8dc4b6Seschrock 	 * If the pool is newer than the code, we can't open it.
1168ea8dc4b6Seschrock 	 */
1169e7437265Sahrens 	if (ub->ub_version > SPA_VERSION) {
1170eaca9bbdSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1171eaca9bbdSeschrock 		    VDEV_AUX_VERSION_NEWER);
1172ea8dc4b6Seschrock 		error = ENOTSUP;
1173ea8dc4b6Seschrock 		goto out;
1174fa9e4066Sahrens 	}
1175fa9e4066Sahrens 
1176fa9e4066Sahrens 	/*
1177fa9e4066Sahrens 	 * If the vdev guid sum doesn't match the uberblock, we have an
1178fa9e4066Sahrens 	 * incomplete configuration.
1179fa9e4066Sahrens 	 */
1180ecc2d604Sbonwick 	if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
1181ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1182ea8dc4b6Seschrock 		    VDEV_AUX_BAD_GUID_SUM);
1183ea8dc4b6Seschrock 		error = ENXIO;
1184ea8dc4b6Seschrock 		goto out;
1185fa9e4066Sahrens 	}
1186fa9e4066Sahrens 
1187fa9e4066Sahrens 	/*
1188fa9e4066Sahrens 	 * Initialize internal SPA structures.
1189fa9e4066Sahrens 	 */
1190fa9e4066Sahrens 	spa->spa_state = POOL_STATE_ACTIVE;
1191fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
1192fa9e4066Sahrens 	spa->spa_first_txg = spa_last_synced_txg(spa) + 1;
1193ea8dc4b6Seschrock 	error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
1194ea8dc4b6Seschrock 	if (error) {
1195ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1196ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1197ea8dc4b6Seschrock 		goto out;
1198ea8dc4b6Seschrock 	}
1199fa9e4066Sahrens 	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
1200fa9e4066Sahrens 
1201ea8dc4b6Seschrock 	if (zap_lookup(spa->spa_meta_objset,
1202fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1203ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
1204ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1205ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1206ea8dc4b6Seschrock 		error = EIO;
1207ea8dc4b6Seschrock 		goto out;
1208ea8dc4b6Seschrock 	}
1209fa9e4066Sahrens 
1210fa9e4066Sahrens 	if (!mosconfig) {
121199653d4eSeschrock 		nvlist_t *newconfig;
121295173954Sek 		uint64_t hostid;
1213fa9e4066Sahrens 
121499653d4eSeschrock 		if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) {
1215ea8dc4b6Seschrock 			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1216ea8dc4b6Seschrock 			    VDEV_AUX_CORRUPT_DATA);
1217ea8dc4b6Seschrock 			error = EIO;
1218ea8dc4b6Seschrock 			goto out;
1219ea8dc4b6Seschrock 		}
1220fa9e4066Sahrens 
122177650510SLin Ling 		if (!spa_is_root(spa) && nvlist_lookup_uint64(newconfig,
122277650510SLin Ling 		    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
122395173954Sek 			char *hostname;
122495173954Sek 			unsigned long myhostid = 0;
122595173954Sek 
122695173954Sek 			VERIFY(nvlist_lookup_string(newconfig,
122795173954Sek 			    ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
122895173954Sek 
1229*5679c89fSjv #ifdef	_KERNEL
1230*5679c89fSjv 			myhostid = zone_get_hostid(NULL);
1231*5679c89fSjv #else	/* _KERNEL */
1232*5679c89fSjv 			/*
1233*5679c89fSjv 			 * We're emulating the system's hostid in userland, so
1234*5679c89fSjv 			 * we can't use zone_get_hostid().
1235*5679c89fSjv 			 */
123695173954Sek 			(void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
1237*5679c89fSjv #endif	/* _KERNEL */
123817194a52Slling 			if (hostid != 0 && myhostid != 0 &&
1239*5679c89fSjv 			    hostid != myhostid) {
124095173954Sek 				cmn_err(CE_WARN, "pool '%s' could not be "
124195173954Sek 				    "loaded as it was last accessed by "
124277650510SLin Ling 				    "another system (host: %s hostid: 0x%lx). "
124395173954Sek 				    "See: http://www.sun.com/msg/ZFS-8000-EY",
1244e14bb325SJeff Bonwick 				    spa_name(spa), hostname,
124595173954Sek 				    (unsigned long)hostid);
124695173954Sek 				error = EBADF;
124795173954Sek 				goto out;
124895173954Sek 			}
124995173954Sek 		}
125095173954Sek 
1251fa9e4066Sahrens 		spa_config_set(spa, newconfig);
1252fa9e4066Sahrens 		spa_unload(spa);
1253fa9e4066Sahrens 		spa_deactivate(spa);
12548ad4d6ddSJeff Bonwick 		spa_activate(spa, orig_mode);
1255fa9e4066Sahrens 
1256ea8dc4b6Seschrock 		return (spa_load(spa, newconfig, state, B_TRUE));
1257fa9e4066Sahrens 	}
1258fa9e4066Sahrens 
1259ea8dc4b6Seschrock 	if (zap_lookup(spa->spa_meta_objset,
1260fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1261ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) {
1262ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1263ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1264ea8dc4b6Seschrock 		error = EIO;
1265ea8dc4b6Seschrock 		goto out;
1266ea8dc4b6Seschrock 	}
1267fa9e4066Sahrens 
126899653d4eSeschrock 	/*
126999653d4eSeschrock 	 * Load the bit that tells us to use the new accounting function
127099653d4eSeschrock 	 * (raid-z deflation).  If we have an older pool, this will not
127199653d4eSeschrock 	 * be present.
127299653d4eSeschrock 	 */
127399653d4eSeschrock 	error = zap_lookup(spa->spa_meta_objset,
127499653d4eSeschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
127599653d4eSeschrock 	    sizeof (uint64_t), 1, &spa->spa_deflate);
127699653d4eSeschrock 	if (error != 0 && error != ENOENT) {
127799653d4eSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
127899653d4eSeschrock 		    VDEV_AUX_CORRUPT_DATA);
127999653d4eSeschrock 		error = EIO;
128099653d4eSeschrock 		goto out;
128199653d4eSeschrock 	}
128299653d4eSeschrock 
1283fa9e4066Sahrens 	/*
1284ea8dc4b6Seschrock 	 * Load the persistent error log.  If we have an older pool, this will
1285ea8dc4b6Seschrock 	 * not be present.
1286fa9e4066Sahrens 	 */
1287ea8dc4b6Seschrock 	error = zap_lookup(spa->spa_meta_objset,
1288ea8dc4b6Seschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
1289ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_errlog_last);
1290d80c45e0Sbonwick 	if (error != 0 && error != ENOENT) {
1291ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1292ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1293ea8dc4b6Seschrock 		error = EIO;
1294ea8dc4b6Seschrock 		goto out;
1295ea8dc4b6Seschrock 	}
1296ea8dc4b6Seschrock 
1297ea8dc4b6Seschrock 	error = zap_lookup(spa->spa_meta_objset,
1298ea8dc4b6Seschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
1299ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
1300ea8dc4b6Seschrock 	if (error != 0 && error != ENOENT) {
1301ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1302ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1303ea8dc4b6Seschrock 		error = EIO;
1304ea8dc4b6Seschrock 		goto out;
1305ea8dc4b6Seschrock 	}
1306ea8dc4b6Seschrock 
130706eeb2adSek 	/*
130806eeb2adSek 	 * Load the history object.  If we have an older pool, this
130906eeb2adSek 	 * will not be present.
131006eeb2adSek 	 */
131106eeb2adSek 	error = zap_lookup(spa->spa_meta_objset,
131206eeb2adSek 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
131306eeb2adSek 	    sizeof (uint64_t), 1, &spa->spa_history);
131406eeb2adSek 	if (error != 0 && error != ENOENT) {
131506eeb2adSek 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
131606eeb2adSek 		    VDEV_AUX_CORRUPT_DATA);
131706eeb2adSek 		error = EIO;
131806eeb2adSek 		goto out;
131906eeb2adSek 	}
132006eeb2adSek 
132199653d4eSeschrock 	/*
132299653d4eSeschrock 	 * Load any hot spares for this pool.
132399653d4eSeschrock 	 */
132499653d4eSeschrock 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1325fa94a07fSbrendan 	    DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object);
132699653d4eSeschrock 	if (error != 0 && error != ENOENT) {
132799653d4eSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
132899653d4eSeschrock 		    VDEV_AUX_CORRUPT_DATA);
132999653d4eSeschrock 		error = EIO;
133099653d4eSeschrock 		goto out;
133199653d4eSeschrock 	}
133299653d4eSeschrock 	if (error == 0) {
1333e7437265Sahrens 		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
1334fa94a07fSbrendan 		if (load_nvlist(spa, spa->spa_spares.sav_object,
1335fa94a07fSbrendan 		    &spa->spa_spares.sav_config) != 0) {
133699653d4eSeschrock 			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
133799653d4eSeschrock 			    VDEV_AUX_CORRUPT_DATA);
133899653d4eSeschrock 			error = EIO;
133999653d4eSeschrock 			goto out;
134099653d4eSeschrock 		}
134199653d4eSeschrock 
1342e14bb325SJeff Bonwick 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
134399653d4eSeschrock 		spa_load_spares(spa);
1344e14bb325SJeff Bonwick 		spa_config_exit(spa, SCL_ALL, FTAG);
134599653d4eSeschrock 	}
134699653d4eSeschrock 
1347fa94a07fSbrendan 	/*
1348fa94a07fSbrendan 	 * Load any level 2 ARC devices for this pool.
1349fa94a07fSbrendan 	 */
1350fa94a07fSbrendan 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1351fa94a07fSbrendan 	    DMU_POOL_L2CACHE, sizeof (uint64_t), 1,
1352fa94a07fSbrendan 	    &spa->spa_l2cache.sav_object);
1353fa94a07fSbrendan 	if (error != 0 && error != ENOENT) {
1354fa94a07fSbrendan 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1355fa94a07fSbrendan 		    VDEV_AUX_CORRUPT_DATA);
1356fa94a07fSbrendan 		error = EIO;
1357fa94a07fSbrendan 		goto out;
1358fa94a07fSbrendan 	}
1359fa94a07fSbrendan 	if (error == 0) {
1360fa94a07fSbrendan 		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
1361fa94a07fSbrendan 		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
1362fa94a07fSbrendan 		    &spa->spa_l2cache.sav_config) != 0) {
1363fa94a07fSbrendan 			vdev_set_state(rvd, B_TRUE,
1364fa94a07fSbrendan 			    VDEV_STATE_CANT_OPEN,
1365fa94a07fSbrendan 			    VDEV_AUX_CORRUPT_DATA);
1366fa94a07fSbrendan 			error = EIO;
1367fa94a07fSbrendan 			goto out;
1368fa94a07fSbrendan 		}
1369fa94a07fSbrendan 
1370e14bb325SJeff Bonwick 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1371fa94a07fSbrendan 		spa_load_l2cache(spa);
1372e14bb325SJeff Bonwick 		spa_config_exit(spa, SCL_ALL, FTAG);
1373fa94a07fSbrendan 	}
1374fa94a07fSbrendan 
1375b87f3af3Sperrin 	if (spa_check_logs(spa)) {
1376b87f3af3Sperrin 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1377b87f3af3Sperrin 		    VDEV_AUX_BAD_LOG);
1378b87f3af3Sperrin 		error = ENXIO;
1379b87f3af3Sperrin 		ereport = FM_EREPORT_ZFS_LOG_REPLAY;
1380b87f3af3Sperrin 		goto out;
1381b87f3af3Sperrin 	}
1382b87f3af3Sperrin 
1383b87f3af3Sperrin 
1384990b4856Slling 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
1385ecd6cf80Smarks 
1386b1b8ab34Slling 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1387b1b8ab34Slling 	    DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object);
1388b1b8ab34Slling 
1389b1b8ab34Slling 	if (error && error != ENOENT) {
1390b1b8ab34Slling 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1391b1b8ab34Slling 		    VDEV_AUX_CORRUPT_DATA);
1392b1b8ab34Slling 		error = EIO;
1393b1b8ab34Slling 		goto out;
1394b1b8ab34Slling 	}
1395b1b8ab34Slling 
1396b1b8ab34Slling 	if (error == 0) {
1397b1b8ab34Slling 		(void) zap_lookup(spa->spa_meta_objset,
1398b1b8ab34Slling 		    spa->spa_pool_props_object,
13993d7072f8Seschrock 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS),
1400b1b8ab34Slling 		    sizeof (uint64_t), 1, &spa->spa_bootfs);
14013d7072f8Seschrock 		(void) zap_lookup(spa->spa_meta_objset,
14023d7072f8Seschrock 		    spa->spa_pool_props_object,
14033d7072f8Seschrock 		    zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE),
14043d7072f8Seschrock 		    sizeof (uint64_t), 1, &autoreplace);
1405ecd6cf80Smarks 		(void) zap_lookup(spa->spa_meta_objset,
1406ecd6cf80Smarks 		    spa->spa_pool_props_object,
1407ecd6cf80Smarks 		    zpool_prop_to_name(ZPOOL_PROP_DELEGATION),
1408ecd6cf80Smarks 		    sizeof (uint64_t), 1, &spa->spa_delegation);
14090a4e9518Sgw 		(void) zap_lookup(spa->spa_meta_objset,
14100a4e9518Sgw 		    spa->spa_pool_props_object,
14110a4e9518Sgw 		    zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
14120a4e9518Sgw 		    sizeof (uint64_t), 1, &spa->spa_failmode);
1413b1b8ab34Slling 	}
1414b1b8ab34Slling 
14153d7072f8Seschrock 	/*
14163d7072f8Seschrock 	 * If the 'autoreplace' property is set, then post a resource notifying
14173d7072f8Seschrock 	 * the ZFS DE that it should not issue any faults for unopenable
14183d7072f8Seschrock 	 * devices.  We also iterate over the vdevs, and post a sysevent for any
14193d7072f8Seschrock 	 * unopenable vdevs so that the normal autoreplace handler can take
14203d7072f8Seschrock 	 * over.
14213d7072f8Seschrock 	 */
1422b01c3b58Seschrock 	if (autoreplace && state != SPA_LOAD_TRYIMPORT)
14233d7072f8Seschrock 		spa_check_removed(spa->spa_root_vdev);
14243d7072f8Seschrock 
1425ea8dc4b6Seschrock 	/*
1426560e6e96Seschrock 	 * Load the vdev state for all toplevel vdevs.
1427ea8dc4b6Seschrock 	 */
1428560e6e96Seschrock 	vdev_load(rvd);
14290373e76bSbonwick 
1430fa9e4066Sahrens 	/*
1431fa9e4066Sahrens 	 * Propagate the leaf DTLs we just loaded all the way up the tree.
1432fa9e4066Sahrens 	 */
1433e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1434fa9e4066Sahrens 	vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
1435e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
1436fa9e4066Sahrens 
1437fa9e4066Sahrens 	/*
1438fa9e4066Sahrens 	 * Check the state of the root vdev.  If it can't be opened, it
1439fa9e4066Sahrens 	 * indicates one or more toplevel vdevs are faulted.
1440fa9e4066Sahrens 	 */
1441ea8dc4b6Seschrock 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1442ea8dc4b6Seschrock 		error = ENXIO;
1443ea8dc4b6Seschrock 		goto out;
1444ea8dc4b6Seschrock 	}
1445fa9e4066Sahrens 
14468ad4d6ddSJeff Bonwick 	if (spa_writeable(spa)) {
14475dabedeeSbonwick 		dmu_tx_t *tx;
14480373e76bSbonwick 		int need_update = B_FALSE;
14498ad4d6ddSJeff Bonwick 
14508ad4d6ddSJeff Bonwick 		ASSERT(state != SPA_LOAD_TRYIMPORT);
14515dabedeeSbonwick 
14520373e76bSbonwick 		/*
14530373e76bSbonwick 		 * Claim log blocks that haven't been committed yet.
14540373e76bSbonwick 		 * This must all happen in a single txg.
14550373e76bSbonwick 		 */
14565dabedeeSbonwick 		tx = dmu_tx_create_assigned(spa_get_dsl(spa),
1457fa9e4066Sahrens 		    spa_first_txg(spa));
1458e14bb325SJeff Bonwick 		(void) dmu_objset_find(spa_name(spa),
14590b69c2f0Sahrens 		    zil_claim, tx, DS_FIND_CHILDREN);
1460fa9e4066Sahrens 		dmu_tx_commit(tx);
1461fa9e4066Sahrens 
1462fa9e4066Sahrens 		spa->spa_sync_on = B_TRUE;
1463fa9e4066Sahrens 		txg_sync_start(spa->spa_dsl_pool);
1464fa9e4066Sahrens 
1465fa9e4066Sahrens 		/*
1466fa9e4066Sahrens 		 * Wait for all claims to sync.
1467fa9e4066Sahrens 		 */
1468fa9e4066Sahrens 		txg_wait_synced(spa->spa_dsl_pool, 0);
14690e34b6a7Sbonwick 
14700e34b6a7Sbonwick 		/*
14710373e76bSbonwick 		 * If the config cache is stale, or we have uninitialized
14720373e76bSbonwick 		 * metaslabs (see spa_vdev_add()), then update the config.
14730e34b6a7Sbonwick 		 */
14740373e76bSbonwick 		if (config_cache_txg != spa->spa_config_txg ||
14750373e76bSbonwick 		    state == SPA_LOAD_IMPORT)
14760373e76bSbonwick 			need_update = B_TRUE;
14770373e76bSbonwick 
14788ad4d6ddSJeff Bonwick 		for (int c = 0; c < rvd->vdev_children; c++)
14790373e76bSbonwick 			if (rvd->vdev_child[c]->vdev_ms_array == 0)
14800373e76bSbonwick 				need_update = B_TRUE;
14810e34b6a7Sbonwick 
14820e34b6a7Sbonwick 		/*
14830373e76bSbonwick 		 * Update the config cache asychronously in case we're the
14840373e76bSbonwick 		 * root pool, in which case the config cache isn't writable yet.
14850e34b6a7Sbonwick 		 */
14860373e76bSbonwick 		if (need_update)
14870373e76bSbonwick 			spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
14888ad4d6ddSJeff Bonwick 
14898ad4d6ddSJeff Bonwick 		/*
14908ad4d6ddSJeff Bonwick 		 * Check all DTLs to see if anything needs resilvering.
14918ad4d6ddSJeff Bonwick 		 */
14928ad4d6ddSJeff Bonwick 		if (vdev_resilver_needed(rvd, NULL, NULL))
14938ad4d6ddSJeff Bonwick 			spa_async_request(spa, SPA_ASYNC_RESILVER);
1494fa9e4066Sahrens 	}
1495fa9e4066Sahrens 
1496ea8dc4b6Seschrock 	error = 0;
1497ea8dc4b6Seschrock out:
1498088f3894Sahrens 	spa->spa_minref = refcount_count(&spa->spa_refcount);
149999653d4eSeschrock 	if (error && error != EBADF)
1500b87f3af3Sperrin 		zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
1501ea8dc4b6Seschrock 	spa->spa_load_state = SPA_LOAD_NONE;
1502ea8dc4b6Seschrock 	spa->spa_ena = 0;
1503ea8dc4b6Seschrock 
1504ea8dc4b6Seschrock 	return (error);
1505fa9e4066Sahrens }
1506fa9e4066Sahrens 
1507fa9e4066Sahrens /*
1508fa9e4066Sahrens  * Pool Open/Import
1509fa9e4066Sahrens  *
1510fa9e4066Sahrens  * The import case is identical to an open except that the configuration is sent
1511fa9e4066Sahrens  * down from userland, instead of grabbed from the configuration cache.  For the
1512fa9e4066Sahrens  * case of an open, the pool configuration will exist in the
15133d7072f8Seschrock  * POOL_STATE_UNINITIALIZED state.
1514fa9e4066Sahrens  *
1515fa9e4066Sahrens  * The stats information (gen/count/ustats) is used to gather vdev statistics at
1516fa9e4066Sahrens  * the same time open the pool, without having to keep around the spa_t in some
1517fa9e4066Sahrens  * ambiguous state.
1518fa9e4066Sahrens  */
1519fa9e4066Sahrens static int
1520fa9e4066Sahrens spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config)
1521fa9e4066Sahrens {
1522fa9e4066Sahrens 	spa_t *spa;
1523fa9e4066Sahrens 	int error;
1524fa9e4066Sahrens 	int locked = B_FALSE;
1525fa9e4066Sahrens 
1526fa9e4066Sahrens 	*spapp = NULL;
1527fa9e4066Sahrens 
1528fa9e4066Sahrens 	/*
1529fa9e4066Sahrens 	 * As disgusting as this is, we need to support recursive calls to this
1530fa9e4066Sahrens 	 * function because dsl_dir_open() is called during spa_load(), and ends
1531fa9e4066Sahrens 	 * up calling spa_open() again.  The real fix is to figure out how to
1532fa9e4066Sahrens 	 * avoid dsl_dir_open() calling this in the first place.
1533fa9e4066Sahrens 	 */
1534fa9e4066Sahrens 	if (mutex_owner(&spa_namespace_lock) != curthread) {
1535fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
1536fa9e4066Sahrens 		locked = B_TRUE;
1537fa9e4066Sahrens 	}
1538fa9e4066Sahrens 
1539fa9e4066Sahrens 	if ((spa = spa_lookup(pool)) == NULL) {
1540fa9e4066Sahrens 		if (locked)
1541fa9e4066Sahrens 			mutex_exit(&spa_namespace_lock);
1542fa9e4066Sahrens 		return (ENOENT);
1543fa9e4066Sahrens 	}
1544fa9e4066Sahrens 	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
1545fa9e4066Sahrens 
15468ad4d6ddSJeff Bonwick 		spa_activate(spa, spa_mode_global);
1547fa9e4066Sahrens 
15480373e76bSbonwick 		error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
1549fa9e4066Sahrens 
1550fa9e4066Sahrens 		if (error == EBADF) {
1551fa9e4066Sahrens 			/*
1552560e6e96Seschrock 			 * If vdev_validate() returns failure (indicated by
1553560e6e96Seschrock 			 * EBADF), it indicates that one of the vdevs indicates
1554560e6e96Seschrock 			 * that the pool has been exported or destroyed.  If
1555560e6e96Seschrock 			 * this is the case, the config cache is out of sync and
1556560e6e96Seschrock 			 * we should remove the pool from the namespace.
1557fa9e4066Sahrens 			 */
1558fa9e4066Sahrens 			spa_unload(spa);
1559fa9e4066Sahrens 			spa_deactivate(spa);
1560c5904d13Seschrock 			spa_config_sync(spa, B_TRUE, B_TRUE);
1561fa9e4066Sahrens 			spa_remove(spa);
1562fa9e4066Sahrens 			if (locked)
1563fa9e4066Sahrens 				mutex_exit(&spa_namespace_lock);
1564fa9e4066Sahrens 			return (ENOENT);
1565ea8dc4b6Seschrock 		}
1566ea8dc4b6Seschrock 
1567ea8dc4b6Seschrock 		if (error) {
1568fa9e4066Sahrens 			/*
1569fa9e4066Sahrens 			 * We can't open the pool, but we still have useful
1570fa9e4066Sahrens 			 * information: the state of each vdev after the
1571fa9e4066Sahrens 			 * attempted vdev_open().  Return this to the user.
1572fa9e4066Sahrens 			 */
1573e14bb325SJeff Bonwick 			if (config != NULL && spa->spa_root_vdev != NULL)
1574fa9e4066Sahrens 				*config = spa_config_generate(spa, NULL, -1ULL,
1575fa9e4066Sahrens 				    B_TRUE);
1576fa9e4066Sahrens 			spa_unload(spa);
1577fa9e4066Sahrens 			spa_deactivate(spa);
1578ea8dc4b6Seschrock 			spa->spa_last_open_failed = B_TRUE;
1579fa9e4066Sahrens 			if (locked)
1580fa9e4066Sahrens 				mutex_exit(&spa_namespace_lock);
1581fa9e4066Sahrens 			*spapp = NULL;
1582fa9e4066Sahrens 			return (error);
1583ea8dc4b6Seschrock 		} else {
1584ea8dc4b6Seschrock 			spa->spa_last_open_failed = B_FALSE;
1585fa9e4066Sahrens 		}
1586fa9e4066Sahrens 	}
1587fa9e4066Sahrens 
1588fa9e4066Sahrens 	spa_open_ref(spa, tag);
15893d7072f8Seschrock 
1590fa9e4066Sahrens 	if (locked)
1591fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1592fa9e4066Sahrens 
1593fa9e4066Sahrens 	*spapp = spa;
1594fa9e4066Sahrens 
1595e14bb325SJeff Bonwick 	if (config != NULL)
1596fa9e4066Sahrens 		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1597fa9e4066Sahrens 
1598fa9e4066Sahrens 	return (0);
1599fa9e4066Sahrens }
1600fa9e4066Sahrens 
1601fa9e4066Sahrens int
1602fa9e4066Sahrens spa_open(const char *name, spa_t **spapp, void *tag)
1603fa9e4066Sahrens {
1604fa9e4066Sahrens 	return (spa_open_common(name, spapp, tag, NULL));
1605fa9e4066Sahrens }
1606fa9e4066Sahrens 
1607ea8dc4b6Seschrock /*
1608ea8dc4b6Seschrock  * Lookup the given spa_t, incrementing the inject count in the process,
1609ea8dc4b6Seschrock  * preventing it from being exported or destroyed.
1610ea8dc4b6Seschrock  */
1611ea8dc4b6Seschrock spa_t *
1612ea8dc4b6Seschrock spa_inject_addref(char *name)
1613ea8dc4b6Seschrock {
1614ea8dc4b6Seschrock 	spa_t *spa;
1615ea8dc4b6Seschrock 
1616ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
1617ea8dc4b6Seschrock 	if ((spa = spa_lookup(name)) == NULL) {
1618ea8dc4b6Seschrock 		mutex_exit(&spa_namespace_lock);
1619ea8dc4b6Seschrock 		return (NULL);
1620ea8dc4b6Seschrock 	}
1621ea8dc4b6Seschrock 	spa->spa_inject_ref++;
1622ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
1623ea8dc4b6Seschrock 
1624ea8dc4b6Seschrock 	return (spa);
1625ea8dc4b6Seschrock }
1626ea8dc4b6Seschrock 
1627ea8dc4b6Seschrock void
1628ea8dc4b6Seschrock spa_inject_delref(spa_t *spa)
1629ea8dc4b6Seschrock {
1630ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
1631ea8dc4b6Seschrock 	spa->spa_inject_ref--;
1632ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
1633ea8dc4b6Seschrock }
1634ea8dc4b6Seschrock 
1635fa94a07fSbrendan /*
1636fa94a07fSbrendan  * Add spares device information to the nvlist.
1637fa94a07fSbrendan  */
163899653d4eSeschrock static void
163999653d4eSeschrock spa_add_spares(spa_t *spa, nvlist_t *config)
164099653d4eSeschrock {
164199653d4eSeschrock 	nvlist_t **spares;
164299653d4eSeschrock 	uint_t i, nspares;
164399653d4eSeschrock 	nvlist_t *nvroot;
164499653d4eSeschrock 	uint64_t guid;
164599653d4eSeschrock 	vdev_stat_t *vs;
164699653d4eSeschrock 	uint_t vsc;
164739c23413Seschrock 	uint64_t pool;
164899653d4eSeschrock 
1649fa94a07fSbrendan 	if (spa->spa_spares.sav_count == 0)
165099653d4eSeschrock 		return;
165199653d4eSeschrock 
165299653d4eSeschrock 	VERIFY(nvlist_lookup_nvlist(config,
165399653d4eSeschrock 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1654fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
165599653d4eSeschrock 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
165699653d4eSeschrock 	if (nspares != 0) {
165799653d4eSeschrock 		VERIFY(nvlist_add_nvlist_array(nvroot,
165899653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
165999653d4eSeschrock 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
166099653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
166199653d4eSeschrock 
166299653d4eSeschrock 		/*
166399653d4eSeschrock 		 * Go through and find any spares which have since been
166499653d4eSeschrock 		 * repurposed as an active spare.  If this is the case, update
166599653d4eSeschrock 		 * their status appropriately.
166699653d4eSeschrock 		 */
166799653d4eSeschrock 		for (i = 0; i < nspares; i++) {
166899653d4eSeschrock 			VERIFY(nvlist_lookup_uint64(spares[i],
166999653d4eSeschrock 			    ZPOOL_CONFIG_GUID, &guid) == 0);
167089a89ebfSlling 			if (spa_spare_exists(guid, &pool, NULL) &&
167189a89ebfSlling 			    pool != 0ULL) {
167299653d4eSeschrock 				VERIFY(nvlist_lookup_uint64_array(
167399653d4eSeschrock 				    spares[i], ZPOOL_CONFIG_STATS,
167499653d4eSeschrock 				    (uint64_t **)&vs, &vsc) == 0);
167599653d4eSeschrock 				vs->vs_state = VDEV_STATE_CANT_OPEN;
167699653d4eSeschrock 				vs->vs_aux = VDEV_AUX_SPARED;
167799653d4eSeschrock 			}
167899653d4eSeschrock 		}
167999653d4eSeschrock 	}
168099653d4eSeschrock }
168199653d4eSeschrock 
1682fa94a07fSbrendan /*
1683fa94a07fSbrendan  * Add l2cache device information to the nvlist, including vdev stats.
1684fa94a07fSbrendan  */
1685fa94a07fSbrendan static void
1686fa94a07fSbrendan spa_add_l2cache(spa_t *spa, nvlist_t *config)
1687fa94a07fSbrendan {
1688fa94a07fSbrendan 	nvlist_t **l2cache;
1689fa94a07fSbrendan 	uint_t i, j, nl2cache;
1690fa94a07fSbrendan 	nvlist_t *nvroot;
1691fa94a07fSbrendan 	uint64_t guid;
1692fa94a07fSbrendan 	vdev_t *vd;
1693fa94a07fSbrendan 	vdev_stat_t *vs;
1694fa94a07fSbrendan 	uint_t vsc;
1695fa94a07fSbrendan 
1696fa94a07fSbrendan 	if (spa->spa_l2cache.sav_count == 0)
1697fa94a07fSbrendan 		return;
1698fa94a07fSbrendan 
1699e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
1700fa94a07fSbrendan 
1701fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist(config,
1702fa94a07fSbrendan 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1703fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
1704fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1705fa94a07fSbrendan 	if (nl2cache != 0) {
1706fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot,
1707fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1708fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
1709fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1710fa94a07fSbrendan 
1711fa94a07fSbrendan 		/*
1712fa94a07fSbrendan 		 * Update level 2 cache device stats.
1713fa94a07fSbrendan 		 */
1714fa94a07fSbrendan 
1715fa94a07fSbrendan 		for (i = 0; i < nl2cache; i++) {
1716fa94a07fSbrendan 			VERIFY(nvlist_lookup_uint64(l2cache[i],
1717fa94a07fSbrendan 			    ZPOOL_CONFIG_GUID, &guid) == 0);
1718fa94a07fSbrendan 
1719fa94a07fSbrendan 			vd = NULL;
1720fa94a07fSbrendan 			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
1721fa94a07fSbrendan 				if (guid ==
1722fa94a07fSbrendan 				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
1723fa94a07fSbrendan 					vd = spa->spa_l2cache.sav_vdevs[j];
1724fa94a07fSbrendan 					break;
1725fa94a07fSbrendan 				}
1726fa94a07fSbrendan 			}
1727fa94a07fSbrendan 			ASSERT(vd != NULL);
1728fa94a07fSbrendan 
1729fa94a07fSbrendan 			VERIFY(nvlist_lookup_uint64_array(l2cache[i],
1730fa94a07fSbrendan 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
1731fa94a07fSbrendan 			vdev_get_stats(vd, vs);
1732fa94a07fSbrendan 		}
1733fa94a07fSbrendan 	}
1734fa94a07fSbrendan 
1735e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_CONFIG, FTAG);
1736fa94a07fSbrendan }
1737fa94a07fSbrendan 
1738fa9e4066Sahrens int
1739ea8dc4b6Seschrock spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
1740fa9e4066Sahrens {
1741fa9e4066Sahrens 	int error;
1742fa9e4066Sahrens 	spa_t *spa;
1743fa9e4066Sahrens 
1744fa9e4066Sahrens 	*config = NULL;
1745fa9e4066Sahrens 	error = spa_open_common(name, &spa, FTAG, config);
1746fa9e4066Sahrens 
174799653d4eSeschrock 	if (spa && *config != NULL) {
1748ea8dc4b6Seschrock 		VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT,
1749ea8dc4b6Seschrock 		    spa_get_errlog_size(spa)) == 0);
1750ea8dc4b6Seschrock 
1751e14bb325SJeff Bonwick 		if (spa_suspended(spa))
1752e14bb325SJeff Bonwick 			VERIFY(nvlist_add_uint64(*config,
1753e14bb325SJeff Bonwick 			    ZPOOL_CONFIG_SUSPENDED, spa->spa_failmode) == 0);
1754e14bb325SJeff Bonwick 
175599653d4eSeschrock 		spa_add_spares(spa, *config);
1756fa94a07fSbrendan 		spa_add_l2cache(spa, *config);
175799653d4eSeschrock 	}
175899653d4eSeschrock 
1759ea8dc4b6Seschrock 	/*
1760ea8dc4b6Seschrock 	 * We want to get the alternate root even for faulted pools, so we cheat
1761ea8dc4b6Seschrock 	 * and call spa_lookup() directly.
1762ea8dc4b6Seschrock 	 */
1763ea8dc4b6Seschrock 	if (altroot) {
1764ea8dc4b6Seschrock 		if (spa == NULL) {
1765ea8dc4b6Seschrock 			mutex_enter(&spa_namespace_lock);
1766ea8dc4b6Seschrock 			spa = spa_lookup(name);
1767ea8dc4b6Seschrock 			if (spa)
1768ea8dc4b6Seschrock 				spa_altroot(spa, altroot, buflen);
1769ea8dc4b6Seschrock 			else
1770ea8dc4b6Seschrock 				altroot[0] = '\0';
1771ea8dc4b6Seschrock 			spa = NULL;
1772ea8dc4b6Seschrock 			mutex_exit(&spa_namespace_lock);
1773ea8dc4b6Seschrock 		} else {
1774ea8dc4b6Seschrock 			spa_altroot(spa, altroot, buflen);
1775ea8dc4b6Seschrock 		}
1776ea8dc4b6Seschrock 	}
1777ea8dc4b6Seschrock 
1778fa9e4066Sahrens 	if (spa != NULL)
1779fa9e4066Sahrens 		spa_close(spa, FTAG);
1780fa9e4066Sahrens 
1781fa9e4066Sahrens 	return (error);
1782fa9e4066Sahrens }
1783fa9e4066Sahrens 
178499653d4eSeschrock /*
1785fa94a07fSbrendan  * Validate that the auxiliary device array is well formed.  We must have an
1786fa94a07fSbrendan  * array of nvlists, each which describes a valid leaf vdev.  If this is an
1787fa94a07fSbrendan  * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
1788fa94a07fSbrendan  * specified, as long as they are well-formed.
178999653d4eSeschrock  */
179099653d4eSeschrock static int
1791fa94a07fSbrendan spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
1792fa94a07fSbrendan     spa_aux_vdev_t *sav, const char *config, uint64_t version,
1793fa94a07fSbrendan     vdev_labeltype_t label)
179499653d4eSeschrock {
1795fa94a07fSbrendan 	nvlist_t **dev;
1796fa94a07fSbrendan 	uint_t i, ndev;
179799653d4eSeschrock 	vdev_t *vd;
179899653d4eSeschrock 	int error;
179999653d4eSeschrock 
1800e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1801e14bb325SJeff Bonwick 
180299653d4eSeschrock 	/*
1803fa94a07fSbrendan 	 * It's acceptable to have no devs specified.
180499653d4eSeschrock 	 */
1805fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
180699653d4eSeschrock 		return (0);
180799653d4eSeschrock 
1808fa94a07fSbrendan 	if (ndev == 0)
180999653d4eSeschrock 		return (EINVAL);
181099653d4eSeschrock 
181199653d4eSeschrock 	/*
1812fa94a07fSbrendan 	 * Make sure the pool is formatted with a version that supports this
1813fa94a07fSbrendan 	 * device type.
181499653d4eSeschrock 	 */
1815fa94a07fSbrendan 	if (spa_version(spa) < version)
181699653d4eSeschrock 		return (ENOTSUP);
181799653d4eSeschrock 
181839c23413Seschrock 	/*
1819fa94a07fSbrendan 	 * Set the pending device list so we correctly handle device in-use
182039c23413Seschrock 	 * checking.
182139c23413Seschrock 	 */
1822fa94a07fSbrendan 	sav->sav_pending = dev;
1823fa94a07fSbrendan 	sav->sav_npending = ndev;
182439c23413Seschrock 
1825fa94a07fSbrendan 	for (i = 0; i < ndev; i++) {
1826fa94a07fSbrendan 		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
182799653d4eSeschrock 		    mode)) != 0)
182839c23413Seschrock 			goto out;
182999653d4eSeschrock 
183099653d4eSeschrock 		if (!vd->vdev_ops->vdev_op_leaf) {
183199653d4eSeschrock 			vdev_free(vd);
183239c23413Seschrock 			error = EINVAL;
183339c23413Seschrock 			goto out;
183499653d4eSeschrock 		}
183599653d4eSeschrock 
1836fa94a07fSbrendan 		/*
1837e14bb325SJeff Bonwick 		 * The L2ARC currently only supports disk devices in
1838e14bb325SJeff Bonwick 		 * kernel context.  For user-level testing, we allow it.
1839fa94a07fSbrendan 		 */
1840e14bb325SJeff Bonwick #ifdef _KERNEL
1841fa94a07fSbrendan 		if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
1842fa94a07fSbrendan 		    strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
1843fa94a07fSbrendan 			error = ENOTBLK;
1844fa94a07fSbrendan 			goto out;
1845fa94a07fSbrendan 		}
1846e14bb325SJeff Bonwick #endif
184799653d4eSeschrock 		vd->vdev_top = vd;
184899653d4eSeschrock 
184939c23413Seschrock 		if ((error = vdev_open(vd)) == 0 &&
1850fa94a07fSbrendan 		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
1851fa94a07fSbrendan 			VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
185239c23413Seschrock 			    vd->vdev_guid) == 0);
185339c23413Seschrock 		}
185499653d4eSeschrock 
185599653d4eSeschrock 		vdev_free(vd);
185639c23413Seschrock 
1857fa94a07fSbrendan 		if (error &&
1858fa94a07fSbrendan 		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
185939c23413Seschrock 			goto out;
186039c23413Seschrock 		else
186139c23413Seschrock 			error = 0;
186299653d4eSeschrock 	}
186399653d4eSeschrock 
186439c23413Seschrock out:
1865fa94a07fSbrendan 	sav->sav_pending = NULL;
1866fa94a07fSbrendan 	sav->sav_npending = 0;
186739c23413Seschrock 	return (error);
186899653d4eSeschrock }
186999653d4eSeschrock 
1870fa94a07fSbrendan static int
1871fa94a07fSbrendan spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
1872fa94a07fSbrendan {
1873fa94a07fSbrendan 	int error;
1874fa94a07fSbrendan 
1875e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1876e14bb325SJeff Bonwick 
1877fa94a07fSbrendan 	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1878fa94a07fSbrendan 	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
1879fa94a07fSbrendan 	    VDEV_LABEL_SPARE)) != 0) {
1880fa94a07fSbrendan 		return (error);
1881fa94a07fSbrendan 	}
1882fa94a07fSbrendan 
1883fa94a07fSbrendan 	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1884fa94a07fSbrendan 	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
1885fa94a07fSbrendan 	    VDEV_LABEL_L2CACHE));
1886fa94a07fSbrendan }
1887fa94a07fSbrendan 
1888fa94a07fSbrendan static void
1889fa94a07fSbrendan spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
1890fa94a07fSbrendan     const char *config)
1891fa94a07fSbrendan {
1892fa94a07fSbrendan 	int i;
1893fa94a07fSbrendan 
1894fa94a07fSbrendan 	if (sav->sav_config != NULL) {
1895fa94a07fSbrendan 		nvlist_t **olddevs;
1896fa94a07fSbrendan 		uint_t oldndevs;
1897fa94a07fSbrendan 		nvlist_t **newdevs;
1898fa94a07fSbrendan 
1899fa94a07fSbrendan 		/*
1900fa94a07fSbrendan 		 * Generate new dev list by concatentating with the
1901fa94a07fSbrendan 		 * current dev list.
1902fa94a07fSbrendan 		 */
1903fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
1904fa94a07fSbrendan 		    &olddevs, &oldndevs) == 0);
1905fa94a07fSbrendan 
1906fa94a07fSbrendan 		newdevs = kmem_alloc(sizeof (void *) *
1907fa94a07fSbrendan 		    (ndevs + oldndevs), KM_SLEEP);
1908fa94a07fSbrendan 		for (i = 0; i < oldndevs; i++)
1909fa94a07fSbrendan 			VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
1910fa94a07fSbrendan 			    KM_SLEEP) == 0);
1911fa94a07fSbrendan 		for (i = 0; i < ndevs; i++)
1912fa94a07fSbrendan 			VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
1913fa94a07fSbrendan 			    KM_SLEEP) == 0);
1914fa94a07fSbrendan 
1915fa94a07fSbrendan 		VERIFY(nvlist_remove(sav->sav_config, config,
1916fa94a07fSbrendan 		    DATA_TYPE_NVLIST_ARRAY) == 0);
1917fa94a07fSbrendan 
1918fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1919fa94a07fSbrendan 		    config, newdevs, ndevs + oldndevs) == 0);
1920fa94a07fSbrendan 		for (i = 0; i < oldndevs + ndevs; i++)
1921fa94a07fSbrendan 			nvlist_free(newdevs[i]);
1922fa94a07fSbrendan 		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
1923fa94a07fSbrendan 	} else {
1924fa94a07fSbrendan 		/*
1925fa94a07fSbrendan 		 * Generate a new dev list.
1926fa94a07fSbrendan 		 */
1927fa94a07fSbrendan 		VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
1928fa94a07fSbrendan 		    KM_SLEEP) == 0);
1929fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
1930fa94a07fSbrendan 		    devs, ndevs) == 0);
1931fa94a07fSbrendan 	}
1932fa94a07fSbrendan }
1933fa94a07fSbrendan 
1934fa94a07fSbrendan /*
1935fa94a07fSbrendan  * Stop and drop level 2 ARC devices
1936fa94a07fSbrendan  */
1937fa94a07fSbrendan void
1938fa94a07fSbrendan spa_l2cache_drop(spa_t *spa)
1939fa94a07fSbrendan {
1940fa94a07fSbrendan 	vdev_t *vd;
1941fa94a07fSbrendan 	int i;
1942fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
1943fa94a07fSbrendan 
1944fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++) {
1945fa94a07fSbrendan 		uint64_t pool;
1946fa94a07fSbrendan 
1947fa94a07fSbrendan 		vd = sav->sav_vdevs[i];
1948fa94a07fSbrendan 		ASSERT(vd != NULL);
1949fa94a07fSbrendan 
19508ad4d6ddSJeff Bonwick 		if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
19518ad4d6ddSJeff Bonwick 		    pool != 0ULL && l2arc_vdev_present(vd))
1952fa94a07fSbrendan 			l2arc_remove_vdev(vd);
1953fa94a07fSbrendan 		if (vd->vdev_isl2cache)
1954fa94a07fSbrendan 			spa_l2cache_remove(vd);
1955fa94a07fSbrendan 		vdev_clear_stats(vd);
1956fa94a07fSbrendan 		(void) vdev_close(vd);
1957fa94a07fSbrendan 	}
1958fa94a07fSbrendan }
1959fa94a07fSbrendan 
1960fa9e4066Sahrens /*
1961fa9e4066Sahrens  * Pool Creation
1962fa9e4066Sahrens  */
1963fa9e4066Sahrens int
1964990b4856Slling spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
19650a48a24eStimh     const char *history_str, nvlist_t *zplprops)
1966fa9e4066Sahrens {
1967fa9e4066Sahrens 	spa_t *spa;
1968990b4856Slling 	char *altroot = NULL;
19690373e76bSbonwick 	vdev_t *rvd;
1970fa9e4066Sahrens 	dsl_pool_t *dp;
1971fa9e4066Sahrens 	dmu_tx_t *tx;
197299653d4eSeschrock 	int c, error = 0;
1973fa9e4066Sahrens 	uint64_t txg = TXG_INITIAL;
1974fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
1975fa94a07fSbrendan 	uint_t nspares, nl2cache;
1976990b4856Slling 	uint64_t version;
1977fa9e4066Sahrens 
1978fa9e4066Sahrens 	/*
1979fa9e4066Sahrens 	 * If this pool already exists, return failure.
1980fa9e4066Sahrens 	 */
1981fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
1982fa9e4066Sahrens 	if (spa_lookup(pool) != NULL) {
1983fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1984fa9e4066Sahrens 		return (EEXIST);
1985fa9e4066Sahrens 	}
1986fa9e4066Sahrens 
1987fa9e4066Sahrens 	/*
1988fa9e4066Sahrens 	 * Allocate a new spa_t structure.
1989fa9e4066Sahrens 	 */
1990990b4856Slling 	(void) nvlist_lookup_string(props,
1991990b4856Slling 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
19920373e76bSbonwick 	spa = spa_add(pool, altroot);
19938ad4d6ddSJeff Bonwick 	spa_activate(spa, spa_mode_global);
1994fa9e4066Sahrens 
1995fa9e4066Sahrens 	spa->spa_uberblock.ub_txg = txg - 1;
1996990b4856Slling 
1997990b4856Slling 	if (props && (error = spa_prop_validate(spa, props))) {
1998990b4856Slling 		spa_unload(spa);
1999990b4856Slling 		spa_deactivate(spa);
2000990b4856Slling 		spa_remove(spa);
2001c5904d13Seschrock 		mutex_exit(&spa_namespace_lock);
2002990b4856Slling 		return (error);
2003990b4856Slling 	}
2004990b4856Slling 
2005990b4856Slling 	if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION),
2006990b4856Slling 	    &version) != 0)
2007990b4856Slling 		version = SPA_VERSION;
2008990b4856Slling 	ASSERT(version <= SPA_VERSION);
2009990b4856Slling 	spa->spa_uberblock.ub_version = version;
2010fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
2011fa9e4066Sahrens 
20120373e76bSbonwick 	/*
20130373e76bSbonwick 	 * Create the root vdev.
20140373e76bSbonwick 	 */
2015e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
20160373e76bSbonwick 
201799653d4eSeschrock 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
20180373e76bSbonwick 
201999653d4eSeschrock 	ASSERT(error != 0 || rvd != NULL);
202099653d4eSeschrock 	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
20210373e76bSbonwick 
2022b7b97454Sperrin 	if (error == 0 && !zfs_allocatable_devs(nvroot))
20230373e76bSbonwick 		error = EINVAL;
202499653d4eSeschrock 
202599653d4eSeschrock 	if (error == 0 &&
202699653d4eSeschrock 	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
2027fa94a07fSbrendan 	    (error = spa_validate_aux(spa, nvroot, txg,
202899653d4eSeschrock 	    VDEV_ALLOC_ADD)) == 0) {
202999653d4eSeschrock 		for (c = 0; c < rvd->vdev_children; c++)
203099653d4eSeschrock 			vdev_init(rvd->vdev_child[c], txg);
203199653d4eSeschrock 		vdev_config_dirty(rvd);
20320373e76bSbonwick 	}
20330373e76bSbonwick 
2034e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
2035fa9e4066Sahrens 
203699653d4eSeschrock 	if (error != 0) {
2037fa9e4066Sahrens 		spa_unload(spa);
2038fa9e4066Sahrens 		spa_deactivate(spa);
2039fa9e4066Sahrens 		spa_remove(spa);
2040fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2041fa9e4066Sahrens 		return (error);
2042fa9e4066Sahrens 	}
2043fa9e4066Sahrens 
204499653d4eSeschrock 	/*
204599653d4eSeschrock 	 * Get the list of spares, if specified.
204699653d4eSeschrock 	 */
204799653d4eSeschrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
204899653d4eSeschrock 	    &spares, &nspares) == 0) {
2049fa94a07fSbrendan 		VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
205099653d4eSeschrock 		    KM_SLEEP) == 0);
2051fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
205299653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2053e14bb325SJeff Bonwick 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
205499653d4eSeschrock 		spa_load_spares(spa);
2055e14bb325SJeff Bonwick 		spa_config_exit(spa, SCL_ALL, FTAG);
2056fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
2057fa94a07fSbrendan 	}
2058fa94a07fSbrendan 
2059fa94a07fSbrendan 	/*
2060fa94a07fSbrendan 	 * Get the list of level 2 cache devices, if specified.
2061fa94a07fSbrendan 	 */
2062fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2063fa94a07fSbrendan 	    &l2cache, &nl2cache) == 0) {
2064fa94a07fSbrendan 		VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2065fa94a07fSbrendan 		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2066fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2067fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2068e14bb325SJeff Bonwick 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2069fa94a07fSbrendan 		spa_load_l2cache(spa);
2070e14bb325SJeff Bonwick 		spa_config_exit(spa, SCL_ALL, FTAG);
2071fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
207299653d4eSeschrock 	}
207399653d4eSeschrock 
20740a48a24eStimh 	spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
2075fa9e4066Sahrens 	spa->spa_meta_objset = dp->dp_meta_objset;
2076fa9e4066Sahrens 
2077fa9e4066Sahrens 	tx = dmu_tx_create_assigned(dp, txg);
2078fa9e4066Sahrens 
2079fa9e4066Sahrens 	/*
2080fa9e4066Sahrens 	 * Create the pool config object.
2081fa9e4066Sahrens 	 */
2082fa9e4066Sahrens 	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
2083f7991ba4STim Haley 	    DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
2084fa9e4066Sahrens 	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
2085fa9e4066Sahrens 
2086ea8dc4b6Seschrock 	if (zap_add(spa->spa_meta_objset,
2087fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
2088ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
2089ea8dc4b6Seschrock 		cmn_err(CE_PANIC, "failed to add pool config");
2090ea8dc4b6Seschrock 	}
2091fa9e4066Sahrens 
2092990b4856Slling 	/* Newly created pools with the right version are always deflated. */
2093990b4856Slling 	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
2094990b4856Slling 		spa->spa_deflate = TRUE;
2095990b4856Slling 		if (zap_add(spa->spa_meta_objset,
2096990b4856Slling 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
2097990b4856Slling 		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
2098990b4856Slling 			cmn_err(CE_PANIC, "failed to add deflate");
2099990b4856Slling 		}
210099653d4eSeschrock 	}
210199653d4eSeschrock 
2102fa9e4066Sahrens 	/*
2103fa9e4066Sahrens 	 * Create the deferred-free bplist object.  Turn off compression
2104fa9e4066Sahrens 	 * because sync-to-convergence takes longer if the blocksize
2105fa9e4066Sahrens 	 * keeps changing.
2106fa9e4066Sahrens 	 */
2107fa9e4066Sahrens 	spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset,
2108fa9e4066Sahrens 	    1 << 14, tx);
2109fa9e4066Sahrens 	dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj,
2110fa9e4066Sahrens 	    ZIO_COMPRESS_OFF, tx);
2111fa9e4066Sahrens 
2112ea8dc4b6Seschrock 	if (zap_add(spa->spa_meta_objset,
2113fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
2114ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) {
2115ea8dc4b6Seschrock 		cmn_err(CE_PANIC, "failed to add bplist");
2116ea8dc4b6Seschrock 	}
2117fa9e4066Sahrens 
211806eeb2adSek 	/*
211906eeb2adSek 	 * Create the pool's history object.
212006eeb2adSek 	 */
2121990b4856Slling 	if (version >= SPA_VERSION_ZPOOL_HISTORY)
2122990b4856Slling 		spa_history_create_obj(spa, tx);
2123990b4856Slling 
2124990b4856Slling 	/*
2125990b4856Slling 	 * Set pool properties.
2126990b4856Slling 	 */
2127990b4856Slling 	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
2128990b4856Slling 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
21290a4e9518Sgw 	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
2130379c004dSEric Schrock 	if (props != NULL) {
2131379c004dSEric Schrock 		spa_configfile_set(spa, props, B_FALSE);
2132990b4856Slling 		spa_sync_props(spa, props, CRED(), tx);
2133379c004dSEric Schrock 	}
213406eeb2adSek 
2135fa9e4066Sahrens 	dmu_tx_commit(tx);
2136fa9e4066Sahrens 
2137fa9e4066Sahrens 	spa->spa_sync_on = B_TRUE;
2138fa9e4066Sahrens 	txg_sync_start(spa->spa_dsl_pool);
2139fa9e4066Sahrens 
2140fa9e4066Sahrens 	/*
2141fa9e4066Sahrens 	 * We explicitly wait for the first transaction to complete so that our
2142fa9e4066Sahrens 	 * bean counters are appropriately updated.
2143fa9e4066Sahrens 	 */
2144fa9e4066Sahrens 	txg_wait_synced(spa->spa_dsl_pool, txg);
2145fa9e4066Sahrens 
2146c5904d13Seschrock 	spa_config_sync(spa, B_FALSE, B_TRUE);
2147fa9e4066Sahrens 
2148990b4856Slling 	if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL)
2149228975ccSek 		(void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE);
2150228975ccSek 
2151fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2152fa9e4066Sahrens 
2153088f3894Sahrens 	spa->spa_minref = refcount_count(&spa->spa_refcount);
2154088f3894Sahrens 
2155fa9e4066Sahrens 	return (0);
2156fa9e4066Sahrens }
2157fa9e4066Sahrens 
2158fa9e4066Sahrens /*
2159fa9e4066Sahrens  * Import the given pool into the system.  We set up the necessary spa_t and
2160fa9e4066Sahrens  * then call spa_load() to do the dirty work.
2161fa9e4066Sahrens  */
2162e7cbe64fSgw static int
2163e7cbe64fSgw spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props,
2164c5904d13Seschrock     boolean_t isroot, boolean_t allowfaulted)
2165fa9e4066Sahrens {
2166fa9e4066Sahrens 	spa_t *spa;
2167990b4856Slling 	char *altroot = NULL;
2168c5904d13Seschrock 	int error, loaderr;
216999653d4eSeschrock 	nvlist_t *nvroot;
2170fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
2171fa94a07fSbrendan 	uint_t nspares, nl2cache;
2172fa9e4066Sahrens 
2173fa9e4066Sahrens 	/*
2174fa9e4066Sahrens 	 * If a pool with this name exists, return failure.
2175fa9e4066Sahrens 	 */
2176fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
217700504c01SLin Ling 	if ((spa = spa_lookup(pool)) != NULL) {
217800504c01SLin Ling 		if (isroot) {
217900504c01SLin Ling 			/*
218000504c01SLin Ling 			 * Remove the existing root pool from the
218100504c01SLin Ling 			 * namespace so that we can replace it with
218200504c01SLin Ling 			 * the correct config we just read in.
218300504c01SLin Ling 			 */
218400504c01SLin Ling 			ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
218500504c01SLin Ling 			spa_remove(spa);
218600504c01SLin Ling 		} else {
218700504c01SLin Ling 			mutex_exit(&spa_namespace_lock);
218800504c01SLin Ling 			return (EEXIST);
218900504c01SLin Ling 		}
2190fa9e4066Sahrens 	}
2191fa9e4066Sahrens 
2192fa9e4066Sahrens 	/*
21930373e76bSbonwick 	 * Create and initialize the spa structure.
2194fa9e4066Sahrens 	 */
2195990b4856Slling 	(void) nvlist_lookup_string(props,
2196990b4856Slling 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
21970373e76bSbonwick 	spa = spa_add(pool, altroot);
21988ad4d6ddSJeff Bonwick 	spa_activate(spa, spa_mode_global);
2199fa9e4066Sahrens 
2200c5904d13Seschrock 	if (allowfaulted)
2201c5904d13Seschrock 		spa->spa_import_faulted = B_TRUE;
2202bf82a41bSeschrock 	spa->spa_is_root = isroot;
2203c5904d13Seschrock 
22045dabedeeSbonwick 	/*
22050373e76bSbonwick 	 * Pass off the heavy lifting to spa_load().
2206088f3894Sahrens 	 * Pass TRUE for mosconfig (unless this is a root pool) because
2207088f3894Sahrens 	 * the user-supplied config is actually the one to trust when
2208088f3894Sahrens 	 * doing an import.
22095dabedeeSbonwick 	 */
2210088f3894Sahrens 	loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, !isroot);
2211fa9e4066Sahrens 
2212e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
221399653d4eSeschrock 	/*
221499653d4eSeschrock 	 * Toss any existing sparelist, as it doesn't have any validity anymore,
221599653d4eSeschrock 	 * and conflicts with spa_has_spare().
221699653d4eSeschrock 	 */
2217e7cbe64fSgw 	if (!isroot && spa->spa_spares.sav_config) {
2218fa94a07fSbrendan 		nvlist_free(spa->spa_spares.sav_config);
2219fa94a07fSbrendan 		spa->spa_spares.sav_config = NULL;
222099653d4eSeschrock 		spa_load_spares(spa);
222199653d4eSeschrock 	}
2222e7cbe64fSgw 	if (!isroot && spa->spa_l2cache.sav_config) {
2223fa94a07fSbrendan 		nvlist_free(spa->spa_l2cache.sav_config);
2224fa94a07fSbrendan 		spa->spa_l2cache.sav_config = NULL;
2225fa94a07fSbrendan 		spa_load_l2cache(spa);
2226fa94a07fSbrendan 	}
222799653d4eSeschrock 
222899653d4eSeschrock 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
222999653d4eSeschrock 	    &nvroot) == 0);
2230fa94a07fSbrendan 	if (error == 0)
2231fa94a07fSbrendan 		error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE);
2232fa94a07fSbrendan 	if (error == 0)
2233fa94a07fSbrendan 		error = spa_validate_aux(spa, nvroot, -1ULL,
2234fa94a07fSbrendan 		    VDEV_ALLOC_L2CACHE);
2235e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
223699653d4eSeschrock 
2237379c004dSEric Schrock 	if (props != NULL)
2238379c004dSEric Schrock 		spa_configfile_set(spa, props, B_FALSE);
2239379c004dSEric Schrock 
22408ad4d6ddSJeff Bonwick 	if (error != 0 || (props && spa_writeable(spa) &&
22418ad4d6ddSJeff Bonwick 	    (error = spa_prop_set(spa, props)))) {
2242c5904d13Seschrock 		if (loaderr != 0 && loaderr != EINVAL && allowfaulted) {
2243c5904d13Seschrock 			/*
2244c5904d13Seschrock 			 * If we failed to load the pool, but 'allowfaulted' is
2245c5904d13Seschrock 			 * set, then manually set the config as if the config
2246c5904d13Seschrock 			 * passed in was specified in the cache file.
2247c5904d13Seschrock 			 */
2248c5904d13Seschrock 			error = 0;
2249c5904d13Seschrock 			spa->spa_import_faulted = B_FALSE;
2250e14bb325SJeff Bonwick 			if (spa->spa_config == NULL)
2251c5904d13Seschrock 				spa->spa_config = spa_config_generate(spa,
2252c5904d13Seschrock 				    NULL, -1ULL, B_TRUE);
2253c5904d13Seschrock 			spa_unload(spa);
2254c5904d13Seschrock 			spa_deactivate(spa);
2255c5904d13Seschrock 			spa_config_sync(spa, B_FALSE, B_TRUE);
2256c5904d13Seschrock 		} else {
2257c5904d13Seschrock 			spa_unload(spa);
2258c5904d13Seschrock 			spa_deactivate(spa);
2259c5904d13Seschrock 			spa_remove(spa);
2260c5904d13Seschrock 		}
2261fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2262fa9e4066Sahrens 		return (error);
2263fa9e4066Sahrens 	}
2264fa9e4066Sahrens 
226599653d4eSeschrock 	/*
2266fa94a07fSbrendan 	 * Override any spares and level 2 cache devices as specified by
2267fa94a07fSbrendan 	 * the user, as these may have correct device names/devids, etc.
226899653d4eSeschrock 	 */
226999653d4eSeschrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
227099653d4eSeschrock 	    &spares, &nspares) == 0) {
2271fa94a07fSbrendan 		if (spa->spa_spares.sav_config)
2272fa94a07fSbrendan 			VERIFY(nvlist_remove(spa->spa_spares.sav_config,
227399653d4eSeschrock 			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
227499653d4eSeschrock 		else
2275fa94a07fSbrendan 			VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
227699653d4eSeschrock 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2277fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
227899653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2279e14bb325SJeff Bonwick 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
228099653d4eSeschrock 		spa_load_spares(spa);
2281e14bb325SJeff Bonwick 		spa_config_exit(spa, SCL_ALL, FTAG);
2282fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
2283fa94a07fSbrendan 	}
2284fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2285fa94a07fSbrendan 	    &l2cache, &nl2cache) == 0) {
2286fa94a07fSbrendan 		if (spa->spa_l2cache.sav_config)
2287fa94a07fSbrendan 			VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
2288fa94a07fSbrendan 			    ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
2289fa94a07fSbrendan 		else
2290fa94a07fSbrendan 			VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2291fa94a07fSbrendan 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2292fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2293fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2294e14bb325SJeff Bonwick 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2295fa94a07fSbrendan 		spa_load_l2cache(spa);
2296e14bb325SJeff Bonwick 		spa_config_exit(spa, SCL_ALL, FTAG);
2297fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
229899653d4eSeschrock 	}
229999653d4eSeschrock 
23008ad4d6ddSJeff Bonwick 	if (spa_writeable(spa)) {
2301c5904d13Seschrock 		/*
2302c5904d13Seschrock 		 * Update the config cache to include the newly-imported pool.
2303c5904d13Seschrock 		 */
2304e7cbe64fSgw 		spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot);
2305c5904d13Seschrock 	}
2306fa9e4066Sahrens 
2307c5904d13Seschrock 	spa->spa_import_faulted = B_FALSE;
23083d7072f8Seschrock 	mutex_exit(&spa_namespace_lock);
23093d7072f8Seschrock 
2310fa9e4066Sahrens 	return (0);
2311fa9e4066Sahrens }
2312fa9e4066Sahrens 
2313e7cbe64fSgw #ifdef _KERNEL
2314e7cbe64fSgw /*
2315e7cbe64fSgw  * Build a "root" vdev for a top level vdev read in from a rootpool
2316e7cbe64fSgw  * device label.
2317e7cbe64fSgw  */
2318e7cbe64fSgw static void
2319e7cbe64fSgw spa_build_rootpool_config(nvlist_t *config)
2320e7cbe64fSgw {
2321e7cbe64fSgw 	nvlist_t *nvtop, *nvroot;
2322e7cbe64fSgw 	uint64_t pgid;
2323e7cbe64fSgw 
2324e7cbe64fSgw 	/*
2325e7cbe64fSgw 	 * Add this top-level vdev to the child array.
2326e7cbe64fSgw 	 */
2327e7cbe64fSgw 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop)
2328e7cbe64fSgw 	    == 0);
2329e7cbe64fSgw 	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid)
2330e7cbe64fSgw 	    == 0);
2331e7cbe64fSgw 
2332e7cbe64fSgw 	/*
2333e7cbe64fSgw 	 * Put this pool's top-level vdevs into a root vdev.
2334e7cbe64fSgw 	 */
2335e7cbe64fSgw 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2336e7cbe64fSgw 	VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT)
2337e7cbe64fSgw 	    == 0);
2338e7cbe64fSgw 	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
2339e7cbe64fSgw 	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
2340e7cbe64fSgw 	VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2341e7cbe64fSgw 	    &nvtop, 1) == 0);
2342e7cbe64fSgw 
2343e7cbe64fSgw 	/*
2344e7cbe64fSgw 	 * Replace the existing vdev_tree with the new root vdev in
2345e7cbe64fSgw 	 * this pool's configuration (remove the old, add the new).
2346e7cbe64fSgw 	 */
2347e7cbe64fSgw 	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
2348e7cbe64fSgw 	nvlist_free(nvroot);
2349e7cbe64fSgw }
2350e7cbe64fSgw 
2351e7cbe64fSgw /*
2352e7cbe64fSgw  * Get the root pool information from the root disk, then import the root pool
2353e7cbe64fSgw  * during the system boot up time.
2354e7cbe64fSgw  */
2355f940fbb1SLin Ling extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
2356e7cbe64fSgw 
2357051aabe6Staylor int
2358051aabe6Staylor spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf,
2359e7cbe64fSgw     uint64_t *besttxg)
2360e7cbe64fSgw {
2361e7cbe64fSgw 	nvlist_t *config;
2362e7cbe64fSgw 	uint64_t txg;
2363f940fbb1SLin Ling 	int error;
2364e7cbe64fSgw 
2365f940fbb1SLin Ling 	if (error = vdev_disk_read_rootlabel(devpath, devid, &config))
2366f940fbb1SLin Ling 		return (error);
2367e7cbe64fSgw 
2368e7cbe64fSgw 	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
2369e7cbe64fSgw 
2370051aabe6Staylor 	if (bestconf != NULL)
2371e7cbe64fSgw 		*bestconf = config;
2372f940fbb1SLin Ling 	else
2373f940fbb1SLin Ling 		nvlist_free(config);
2374051aabe6Staylor 	*besttxg = txg;
2375051aabe6Staylor 	return (0);
2376e7cbe64fSgw }
2377e7cbe64fSgw 
2378e7cbe64fSgw boolean_t
2379e7cbe64fSgw spa_rootdev_validate(nvlist_t *nv)
2380e7cbe64fSgw {
2381e7cbe64fSgw 	uint64_t ival;
2382e7cbe64fSgw 
2383e7cbe64fSgw 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2384e7cbe64fSgw 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2385e7cbe64fSgw 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2386e7cbe64fSgw 		return (B_FALSE);
2387e7cbe64fSgw 
2388e7cbe64fSgw 	return (B_TRUE);
2389e7cbe64fSgw }
2390e7cbe64fSgw 
2391051aabe6Staylor 
2392051aabe6Staylor /*
2393051aabe6Staylor  * Given the boot device's physical path or devid, check if the device
2394051aabe6Staylor  * is in a valid state.  If so, return the configuration from the vdev
2395051aabe6Staylor  * label.
2396051aabe6Staylor  */
2397051aabe6Staylor int
2398051aabe6Staylor spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf)
2399051aabe6Staylor {
2400051aabe6Staylor 	nvlist_t *conf = NULL;
2401051aabe6Staylor 	uint64_t txg = 0;
2402051aabe6Staylor 	nvlist_t *nvtop, **child;
2403051aabe6Staylor 	char *type;
2404051aabe6Staylor 	char *bootpath = NULL;
2405051aabe6Staylor 	uint_t children, c;
2406051aabe6Staylor 	char *tmp;
2407f940fbb1SLin Ling 	int error;
2408051aabe6Staylor 
2409051aabe6Staylor 	if (devpath && ((tmp = strchr(devpath, ' ')) != NULL))
2410051aabe6Staylor 		*tmp = '\0';
2411f940fbb1SLin Ling 	if (error = spa_check_rootconf(devpath, devid, &conf, &txg)) {
2412051aabe6Staylor 		cmn_err(CE_NOTE, "error reading device label");
2413f940fbb1SLin Ling 		return (error);
2414051aabe6Staylor 	}
2415051aabe6Staylor 	if (txg == 0) {
2416051aabe6Staylor 		cmn_err(CE_NOTE, "this device is detached");
2417051aabe6Staylor 		nvlist_free(conf);
2418051aabe6Staylor 		return (EINVAL);
2419051aabe6Staylor 	}
2420051aabe6Staylor 
2421051aabe6Staylor 	VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE,
2422051aabe6Staylor 	    &nvtop) == 0);
2423051aabe6Staylor 	VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0);
2424051aabe6Staylor 
2425051aabe6Staylor 	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2426051aabe6Staylor 		if (spa_rootdev_validate(nvtop)) {
2427051aabe6Staylor 			goto out;
2428051aabe6Staylor 		} else {
2429051aabe6Staylor 			nvlist_free(conf);
2430051aabe6Staylor 			return (EINVAL);
2431051aabe6Staylor 		}
2432051aabe6Staylor 	}
2433051aabe6Staylor 
2434051aabe6Staylor 	ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0);
2435051aabe6Staylor 
2436051aabe6Staylor 	VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN,
2437051aabe6Staylor 	    &child, &children) == 0);
2438051aabe6Staylor 
2439051aabe6Staylor 	/*
2440051aabe6Staylor 	 * Go thru vdevs in the mirror to see if the given device
2441051aabe6Staylor 	 * has the most recent txg. Only the device with the most
2442051aabe6Staylor 	 * recent txg has valid information and should be booted.
2443051aabe6Staylor 	 */
2444051aabe6Staylor 	for (c = 0; c < children; c++) {
2445051aabe6Staylor 		char *cdevid, *cpath;
2446051aabe6Staylor 		uint64_t tmptxg;
2447051aabe6Staylor 
2448ffb5616eSLin Ling 		cpath = NULL;
2449ffb5616eSLin Ling 		cdevid = NULL;
2450051aabe6Staylor 		if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH,
2451ffb5616eSLin Ling 		    &cpath) != 0 && nvlist_lookup_string(child[c],
2452ffb5616eSLin Ling 		    ZPOOL_CONFIG_DEVID, &cdevid) != 0)
2453051aabe6Staylor 			return (EINVAL);
2454f4565e39SLin Ling 		if ((spa_check_rootconf(cpath, cdevid, NULL,
2455f4565e39SLin Ling 		    &tmptxg) == 0) && (tmptxg > txg)) {
2456051aabe6Staylor 			txg = tmptxg;
2457051aabe6Staylor 			VERIFY(nvlist_lookup_string(child[c],
2458051aabe6Staylor 			    ZPOOL_CONFIG_PATH, &bootpath) == 0);
2459051aabe6Staylor 		}
2460051aabe6Staylor 	}
2461051aabe6Staylor 
2462051aabe6Staylor 	/* Does the best device match the one we've booted from? */
2463051aabe6Staylor 	if (bootpath) {
2464051aabe6Staylor 		cmn_err(CE_NOTE, "try booting from '%s'", bootpath);
2465051aabe6Staylor 		return (EINVAL);
2466051aabe6Staylor 	}
2467051aabe6Staylor out:
2468051aabe6Staylor 	*bestconf = conf;
2469051aabe6Staylor 	return (0);
2470051aabe6Staylor }
2471051aabe6Staylor 
2472e7cbe64fSgw /*
2473e7cbe64fSgw  * Import a root pool.
2474e7cbe64fSgw  *
2475051aabe6Staylor  * For x86. devpath_list will consist of devid and/or physpath name of
2476051aabe6Staylor  * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
2477051aabe6Staylor  * The GRUB "findroot" command will return the vdev we should boot.
2478e7cbe64fSgw  *
2479e7cbe64fSgw  * For Sparc, devpath_list consists the physpath name of the booting device
2480e7cbe64fSgw  * no matter the rootpool is a single device pool or a mirrored pool.
2481e7cbe64fSgw  * e.g.
2482e7cbe64fSgw  *	"/pci@1f,0/ide@d/disk@0,0:a"
2483e7cbe64fSgw  */
2484e7cbe64fSgw int
2485051aabe6Staylor spa_import_rootpool(char *devpath, char *devid)
2486e7cbe64fSgw {
2487e7cbe64fSgw 	nvlist_t *conf = NULL;
2488e7cbe64fSgw 	char *pname;
2489e7cbe64fSgw 	int error;
2490e7cbe64fSgw 
2491e7cbe64fSgw 	/*
2492e7cbe64fSgw 	 * Get the vdev pathname and configuation from the most
2493e7cbe64fSgw 	 * recently updated vdev (highest txg).
2494e7cbe64fSgw 	 */
2495051aabe6Staylor 	if (error = spa_get_rootconf(devpath, devid, &conf))
2496e7cbe64fSgw 		goto msg_out;
2497e7cbe64fSgw 
2498e7cbe64fSgw 	/*
2499e7cbe64fSgw 	 * Add type "root" vdev to the config.
2500e7cbe64fSgw 	 */
2501e7cbe64fSgw 	spa_build_rootpool_config(conf);
2502e7cbe64fSgw 
2503e7cbe64fSgw 	VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0);
2504e7cbe64fSgw 
2505bf82a41bSeschrock 	/*
2506bf82a41bSeschrock 	 * We specify 'allowfaulted' for this to be treated like spa_open()
2507bf82a41bSeschrock 	 * instead of spa_import().  This prevents us from marking vdevs as
2508bf82a41bSeschrock 	 * persistently unavailable, and generates FMA ereports as if it were a
2509bf82a41bSeschrock 	 * pool open, not import.
2510bf82a41bSeschrock 	 */
2511bf82a41bSeschrock 	error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE);
251200504c01SLin Ling 	ASSERT(error != EEXIST);
2513e7cbe64fSgw 
2514e7cbe64fSgw 	nvlist_free(conf);
2515e7cbe64fSgw 	return (error);
2516e7cbe64fSgw 
2517e7cbe64fSgw msg_out:
2518051aabe6Staylor 	cmn_err(CE_NOTE, "\n"
2519e7cbe64fSgw 	    "  ***************************************************  \n"
2520e7cbe64fSgw 	    "  *  This device is not bootable!                   *  \n"
2521e7cbe64fSgw 	    "  *  It is either offlined or detached or faulted.  *  \n"
2522e7cbe64fSgw 	    "  *  Please try to boot from a different device.    *  \n"
2523051aabe6Staylor 	    "  ***************************************************  ");
2524e7cbe64fSgw 
2525e7cbe64fSgw 	return (error);
2526e7cbe64fSgw }
2527e7cbe64fSgw #endif
2528e7cbe64fSgw 
2529e7cbe64fSgw /*
2530e7cbe64fSgw  * Import a non-root pool into the system.
2531e7cbe64fSgw  */
2532e7cbe64fSgw int
2533e7cbe64fSgw spa_import(const char *pool, nvlist_t *config, nvlist_t *props)
2534e7cbe64fSgw {
2535c5904d13Seschrock 	return (spa_import_common(pool, config, props, B_FALSE, B_FALSE));
2536e7cbe64fSgw }
2537e7cbe64fSgw 
2538c5904d13Seschrock int
2539c5904d13Seschrock spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props)
2540c5904d13Seschrock {
2541c5904d13Seschrock 	return (spa_import_common(pool, config, props, B_FALSE, B_TRUE));
2542c5904d13Seschrock }
2543c5904d13Seschrock 
2544c5904d13Seschrock 
2545fa9e4066Sahrens /*
2546fa9e4066Sahrens  * This (illegal) pool name is used when temporarily importing a spa_t in order
2547fa9e4066Sahrens  * to get the vdev stats associated with the imported devices.
2548fa9e4066Sahrens  */
2549fa9e4066Sahrens #define	TRYIMPORT_NAME	"$import"
2550fa9e4066Sahrens 
2551fa9e4066Sahrens nvlist_t *
2552fa9e4066Sahrens spa_tryimport(nvlist_t *tryconfig)
2553fa9e4066Sahrens {
2554fa9e4066Sahrens 	nvlist_t *config = NULL;
2555fa9e4066Sahrens 	char *poolname;
2556fa9e4066Sahrens 	spa_t *spa;
2557fa9e4066Sahrens 	uint64_t state;
2558fa9e4066Sahrens 
2559fa9e4066Sahrens 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
2560fa9e4066Sahrens 		return (NULL);
2561fa9e4066Sahrens 
2562fa9e4066Sahrens 	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
2563fa9e4066Sahrens 		return (NULL);
2564fa9e4066Sahrens 
2565fa9e4066Sahrens 	/*
25660373e76bSbonwick 	 * Create and initialize the spa structure.
2567fa9e4066Sahrens 	 */
25680373e76bSbonwick 	mutex_enter(&spa_namespace_lock);
25690373e76bSbonwick 	spa = spa_add(TRYIMPORT_NAME, NULL);
25708ad4d6ddSJeff Bonwick 	spa_activate(spa, FREAD);
2571fa9e4066Sahrens 
2572fa9e4066Sahrens 	/*
25730373e76bSbonwick 	 * Pass off the heavy lifting to spa_load().
2574ecc2d604Sbonwick 	 * Pass TRUE for mosconfig because the user-supplied config
2575ecc2d604Sbonwick 	 * is actually the one to trust when doing an import.
2576fa9e4066Sahrens 	 */
2577ecc2d604Sbonwick 	(void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
2578fa9e4066Sahrens 
2579fa9e4066Sahrens 	/*
2580fa9e4066Sahrens 	 * If 'tryconfig' was at least parsable, return the current config.
2581fa9e4066Sahrens 	 */
2582fa9e4066Sahrens 	if (spa->spa_root_vdev != NULL) {
2583fa9e4066Sahrens 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2584fa9e4066Sahrens 		VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
2585fa9e4066Sahrens 		    poolname) == 0);
2586fa9e4066Sahrens 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2587fa9e4066Sahrens 		    state) == 0);
258895173954Sek 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
258995173954Sek 		    spa->spa_uberblock.ub_timestamp) == 0);
259099653d4eSeschrock 
2591e7cbe64fSgw 		/*
2592e7cbe64fSgw 		 * If the bootfs property exists on this pool then we
2593e7cbe64fSgw 		 * copy it out so that external consumers can tell which
2594e7cbe64fSgw 		 * pools are bootable.
2595e7cbe64fSgw 		 */
2596e7cbe64fSgw 		if (spa->spa_bootfs) {
2597e7cbe64fSgw 			char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2598e7cbe64fSgw 
2599e7cbe64fSgw 			/*
2600e7cbe64fSgw 			 * We have to play games with the name since the
2601e7cbe64fSgw 			 * pool was opened as TRYIMPORT_NAME.
2602e7cbe64fSgw 			 */
2603e14bb325SJeff Bonwick 			if (dsl_dsobj_to_dsname(spa_name(spa),
2604e7cbe64fSgw 			    spa->spa_bootfs, tmpname) == 0) {
2605e7cbe64fSgw 				char *cp;
2606e7cbe64fSgw 				char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2607e7cbe64fSgw 
2608e7cbe64fSgw 				cp = strchr(tmpname, '/');
2609e7cbe64fSgw 				if (cp == NULL) {
2610e7cbe64fSgw 					(void) strlcpy(dsname, tmpname,
2611e7cbe64fSgw 					    MAXPATHLEN);
2612e7cbe64fSgw 				} else {
2613e7cbe64fSgw 					(void) snprintf(dsname, MAXPATHLEN,
2614e7cbe64fSgw 					    "%s/%s", poolname, ++cp);
2615e7cbe64fSgw 				}
2616e7cbe64fSgw 				VERIFY(nvlist_add_string(config,
2617e7cbe64fSgw 				    ZPOOL_CONFIG_BOOTFS, dsname) == 0);
2618e7cbe64fSgw 				kmem_free(dsname, MAXPATHLEN);
2619e7cbe64fSgw 			}
2620e7cbe64fSgw 			kmem_free(tmpname, MAXPATHLEN);
2621e7cbe64fSgw 		}
2622e7cbe64fSgw 
262399653d4eSeschrock 		/*
2624fa94a07fSbrendan 		 * Add the list of hot spares and level 2 cache devices.
262599653d4eSeschrock 		 */
262699653d4eSeschrock 		spa_add_spares(spa, config);
2627fa94a07fSbrendan 		spa_add_l2cache(spa, config);
2628fa9e4066Sahrens 	}
2629fa9e4066Sahrens 
2630fa9e4066Sahrens 	spa_unload(spa);
2631fa9e4066Sahrens 	spa_deactivate(spa);
2632fa9e4066Sahrens 	spa_remove(spa);
2633fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2634fa9e4066Sahrens 
2635fa9e4066Sahrens 	return (config);
2636fa9e4066Sahrens }
2637fa9e4066Sahrens 
2638fa9e4066Sahrens /*
2639fa9e4066Sahrens  * Pool export/destroy
2640fa9e4066Sahrens  *
2641fa9e4066Sahrens  * The act of destroying or exporting a pool is very simple.  We make sure there
2642fa9e4066Sahrens  * is no more pending I/O and any references to the pool are gone.  Then, we
2643fa9e4066Sahrens  * update the pool state and sync all the labels to disk, removing the
2644394ab0cbSGeorge Wilson  * configuration from the cache afterwards. If the 'hardforce' flag is set, then
2645394ab0cbSGeorge Wilson  * we don't sync the labels or remove the configuration cache.
2646fa9e4066Sahrens  */
2647fa9e4066Sahrens static int
264889a89ebfSlling spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
2649394ab0cbSGeorge Wilson     boolean_t force, boolean_t hardforce)
2650fa9e4066Sahrens {
2651fa9e4066Sahrens 	spa_t *spa;
2652fa9e4066Sahrens 
265344cd46caSbillm 	if (oldconfig)
265444cd46caSbillm 		*oldconfig = NULL;
265544cd46caSbillm 
26568ad4d6ddSJeff Bonwick 	if (!(spa_mode_global & FWRITE))
2657fa9e4066Sahrens 		return (EROFS);
2658fa9e4066Sahrens 
2659fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
2660fa9e4066Sahrens 	if ((spa = spa_lookup(pool)) == NULL) {
2661fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2662fa9e4066Sahrens 		return (ENOENT);
2663fa9e4066Sahrens 	}
2664fa9e4066Sahrens 
2665ea8dc4b6Seschrock 	/*
2666ea8dc4b6Seschrock 	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
2667ea8dc4b6Seschrock 	 * reacquire the namespace lock, and see if we can export.
2668ea8dc4b6Seschrock 	 */
2669ea8dc4b6Seschrock 	spa_open_ref(spa, FTAG);
2670ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
2671ea8dc4b6Seschrock 	spa_async_suspend(spa);
2672ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
2673ea8dc4b6Seschrock 	spa_close(spa, FTAG);
2674ea8dc4b6Seschrock 
2675fa9e4066Sahrens 	/*
2676fa9e4066Sahrens 	 * The pool will be in core if it's openable,
2677fa9e4066Sahrens 	 * in which case we can modify its state.
2678fa9e4066Sahrens 	 */
2679fa9e4066Sahrens 	if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
2680fa9e4066Sahrens 		/*
2681fa9e4066Sahrens 		 * Objsets may be open only because they're dirty, so we
2682fa9e4066Sahrens 		 * have to force it to sync before checking spa_refcnt.
2683fa9e4066Sahrens 		 */
2684fa9e4066Sahrens 		txg_wait_synced(spa->spa_dsl_pool, 0);
2685fa9e4066Sahrens 
2686ea8dc4b6Seschrock 		/*
2687ea8dc4b6Seschrock 		 * A pool cannot be exported or destroyed if there are active
2688ea8dc4b6Seschrock 		 * references.  If we are resetting a pool, allow references by
2689ea8dc4b6Seschrock 		 * fault injection handlers.
2690ea8dc4b6Seschrock 		 */
2691ea8dc4b6Seschrock 		if (!spa_refcount_zero(spa) ||
2692ea8dc4b6Seschrock 		    (spa->spa_inject_ref != 0 &&
2693ea8dc4b6Seschrock 		    new_state != POOL_STATE_UNINITIALIZED)) {
2694ea8dc4b6Seschrock 			spa_async_resume(spa);
2695fa9e4066Sahrens 			mutex_exit(&spa_namespace_lock);
2696fa9e4066Sahrens 			return (EBUSY);
2697fa9e4066Sahrens 		}
2698fa9e4066Sahrens 
269989a89ebfSlling 		/*
270089a89ebfSlling 		 * A pool cannot be exported if it has an active shared spare.
270189a89ebfSlling 		 * This is to prevent other pools stealing the active spare
270289a89ebfSlling 		 * from an exported pool. At user's own will, such pool can
270389a89ebfSlling 		 * be forcedly exported.
270489a89ebfSlling 		 */
270589a89ebfSlling 		if (!force && new_state == POOL_STATE_EXPORTED &&
270689a89ebfSlling 		    spa_has_active_shared_spare(spa)) {
270789a89ebfSlling 			spa_async_resume(spa);
270889a89ebfSlling 			mutex_exit(&spa_namespace_lock);
270989a89ebfSlling 			return (EXDEV);
271089a89ebfSlling 		}
271189a89ebfSlling 
2712fa9e4066Sahrens 		/*
2713fa9e4066Sahrens 		 * We want this to be reflected on every label,
2714fa9e4066Sahrens 		 * so mark them all dirty.  spa_unload() will do the
2715fa9e4066Sahrens 		 * final sync that pushes these changes out.
2716fa9e4066Sahrens 		 */
2717394ab0cbSGeorge Wilson 		if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
2718e14bb325SJeff Bonwick 			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2719ea8dc4b6Seschrock 			spa->spa_state = new_state;
27200373e76bSbonwick 			spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
2721ea8dc4b6Seschrock 			vdev_config_dirty(spa->spa_root_vdev);
2722e14bb325SJeff Bonwick 			spa_config_exit(spa, SCL_ALL, FTAG);
2723ea8dc4b6Seschrock 		}
2724fa9e4066Sahrens 	}
2725fa9e4066Sahrens 
27263d7072f8Seschrock 	spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
27273d7072f8Seschrock 
2728fa9e4066Sahrens 	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
2729fa9e4066Sahrens 		spa_unload(spa);
2730fa9e4066Sahrens 		spa_deactivate(spa);
2731fa9e4066Sahrens 	}
2732fa9e4066Sahrens 
273344cd46caSbillm 	if (oldconfig && spa->spa_config)
273444cd46caSbillm 		VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
273544cd46caSbillm 
2736ea8dc4b6Seschrock 	if (new_state != POOL_STATE_UNINITIALIZED) {
2737394ab0cbSGeorge Wilson 		if (!hardforce)
2738394ab0cbSGeorge Wilson 			spa_config_sync(spa, B_TRUE, B_TRUE);
2739ea8dc4b6Seschrock 		spa_remove(spa);
2740ea8dc4b6Seschrock 	}
2741fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2742fa9e4066Sahrens 
2743fa9e4066Sahrens 	return (0);
2744fa9e4066Sahrens }
2745fa9e4066Sahrens 
2746fa9e4066Sahrens /*
2747fa9e4066Sahrens  * Destroy a storage pool.
2748fa9e4066Sahrens  */
2749fa9e4066Sahrens int
2750fa9e4066Sahrens spa_destroy(char *pool)
2751fa9e4066Sahrens {
2752394ab0cbSGeorge Wilson 	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
2753394ab0cbSGeorge Wilson 	    B_FALSE, B_FALSE));
2754fa9e4066Sahrens }
2755fa9e4066Sahrens 
2756fa9e4066Sahrens /*
2757fa9e4066Sahrens  * Export a storage pool.
2758fa9e4066Sahrens  */
2759fa9e4066Sahrens int
2760394ab0cbSGeorge Wilson spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
2761394ab0cbSGeorge Wilson     boolean_t hardforce)
2762fa9e4066Sahrens {
2763394ab0cbSGeorge Wilson 	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
2764394ab0cbSGeorge Wilson 	    force, hardforce));
2765fa9e4066Sahrens }
2766fa9e4066Sahrens 
2767ea8dc4b6Seschrock /*
2768ea8dc4b6Seschrock  * Similar to spa_export(), this unloads the spa_t without actually removing it
2769ea8dc4b6Seschrock  * from the namespace in any way.
2770ea8dc4b6Seschrock  */
2771ea8dc4b6Seschrock int
2772ea8dc4b6Seschrock spa_reset(char *pool)
2773ea8dc4b6Seschrock {
277489a89ebfSlling 	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
2775394ab0cbSGeorge Wilson 	    B_FALSE, B_FALSE));
2776ea8dc4b6Seschrock }
2777ea8dc4b6Seschrock 
2778fa9e4066Sahrens /*
2779fa9e4066Sahrens  * ==========================================================================
2780fa9e4066Sahrens  * Device manipulation
2781fa9e4066Sahrens  * ==========================================================================
2782fa9e4066Sahrens  */
2783fa9e4066Sahrens 
2784fa9e4066Sahrens /*
27858654d025Sperrin  * Add a device to a storage pool.
2786fa9e4066Sahrens  */
2787fa9e4066Sahrens int
2788fa9e4066Sahrens spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
2789fa9e4066Sahrens {
2790fa9e4066Sahrens 	uint64_t txg;
27918ad4d6ddSJeff Bonwick 	int error;
2792fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
27930e34b6a7Sbonwick 	vdev_t *vd, *tvd;
2794fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
2795fa94a07fSbrendan 	uint_t nspares, nl2cache;
2796fa9e4066Sahrens 
2797fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
2798fa9e4066Sahrens 
279999653d4eSeschrock 	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
280099653d4eSeschrock 	    VDEV_ALLOC_ADD)) != 0)
280199653d4eSeschrock 		return (spa_vdev_exit(spa, NULL, txg, error));
2802fa9e4066Sahrens 
2803e14bb325SJeff Bonwick 	spa->spa_pending_vdev = vd;	/* spa_vdev_exit() will clear this */
280499653d4eSeschrock 
2805fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
2806fa94a07fSbrendan 	    &nspares) != 0)
280799653d4eSeschrock 		nspares = 0;
280899653d4eSeschrock 
2809fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
2810fa94a07fSbrendan 	    &nl2cache) != 0)
2811fa94a07fSbrendan 		nl2cache = 0;
2812fa94a07fSbrendan 
2813e14bb325SJeff Bonwick 	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
2814fa9e4066Sahrens 		return (spa_vdev_exit(spa, vd, txg, EINVAL));
2815fa9e4066Sahrens 
2816e14bb325SJeff Bonwick 	if (vd->vdev_children != 0 &&
2817e14bb325SJeff Bonwick 	    (error = vdev_create(vd, txg, B_FALSE)) != 0)
2818e14bb325SJeff Bonwick 		return (spa_vdev_exit(spa, vd, txg, error));
281999653d4eSeschrock 
282039c23413Seschrock 	/*
2821fa94a07fSbrendan 	 * We must validate the spares and l2cache devices after checking the
2822fa94a07fSbrendan 	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
282339c23413Seschrock 	 */
2824e14bb325SJeff Bonwick 	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
282539c23413Seschrock 		return (spa_vdev_exit(spa, vd, txg, error));
282639c23413Seschrock 
282739c23413Seschrock 	/*
282839c23413Seschrock 	 * Transfer each new top-level vdev from vd to rvd.
282939c23413Seschrock 	 */
28308ad4d6ddSJeff Bonwick 	for (int c = 0; c < vd->vdev_children; c++) {
283139c23413Seschrock 		tvd = vd->vdev_child[c];
283239c23413Seschrock 		vdev_remove_child(vd, tvd);
283339c23413Seschrock 		tvd->vdev_id = rvd->vdev_children;
283439c23413Seschrock 		vdev_add_child(rvd, tvd);
283539c23413Seschrock 		vdev_config_dirty(tvd);
283639c23413Seschrock 	}
283739c23413Seschrock 
283899653d4eSeschrock 	if (nspares != 0) {
2839fa94a07fSbrendan 		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
2840fa94a07fSbrendan 		    ZPOOL_CONFIG_SPARES);
284199653d4eSeschrock 		spa_load_spares(spa);
2842fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
2843fa94a07fSbrendan 	}
2844fa94a07fSbrendan 
2845fa94a07fSbrendan 	if (nl2cache != 0) {
2846fa94a07fSbrendan 		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
2847fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE);
2848fa94a07fSbrendan 		spa_load_l2cache(spa);
2849fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
2850fa9e4066Sahrens 	}
2851fa9e4066Sahrens 
2852fa9e4066Sahrens 	/*
28530e34b6a7Sbonwick 	 * We have to be careful when adding new vdevs to an existing pool.
28540e34b6a7Sbonwick 	 * If other threads start allocating from these vdevs before we
28550e34b6a7Sbonwick 	 * sync the config cache, and we lose power, then upon reboot we may
28560e34b6a7Sbonwick 	 * fail to open the pool because there are DVAs that the config cache
28570e34b6a7Sbonwick 	 * can't translate.  Therefore, we first add the vdevs without
28580e34b6a7Sbonwick 	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
28590373e76bSbonwick 	 * and then let spa_config_update() initialize the new metaslabs.
28600e34b6a7Sbonwick 	 *
28610e34b6a7Sbonwick 	 * spa_load() checks for added-but-not-initialized vdevs, so that
28620e34b6a7Sbonwick 	 * if we lose power at any point in this sequence, the remaining
28630e34b6a7Sbonwick 	 * steps will be completed the next time we load the pool.
28640e34b6a7Sbonwick 	 */
28650373e76bSbonwick 	(void) spa_vdev_exit(spa, vd, txg, 0);
28660e34b6a7Sbonwick 
28670373e76bSbonwick 	mutex_enter(&spa_namespace_lock);
28680373e76bSbonwick 	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
28690373e76bSbonwick 	mutex_exit(&spa_namespace_lock);
2870fa9e4066Sahrens 
28710373e76bSbonwick 	return (0);
2872fa9e4066Sahrens }
2873fa9e4066Sahrens 
2874fa9e4066Sahrens /*
2875fa9e4066Sahrens  * Attach a device to a mirror.  The arguments are the path to any device
2876fa9e4066Sahrens  * in the mirror, and the nvroot for the new device.  If the path specifies
2877fa9e4066Sahrens  * a device that is not mirrored, we automatically insert the mirror vdev.
2878fa9e4066Sahrens  *
2879fa9e4066Sahrens  * If 'replacing' is specified, the new device is intended to replace the
2880fa9e4066Sahrens  * existing device; in this case the two devices are made into their own
28813d7072f8Seschrock  * mirror using the 'replacing' vdev, which is functionally identical to
2882fa9e4066Sahrens  * the mirror vdev (it actually reuses all the same ops) but has a few
2883fa9e4066Sahrens  * extra rules: you can't attach to it after it's been created, and upon
2884fa9e4066Sahrens  * completion of resilvering, the first disk (the one being replaced)
2885fa9e4066Sahrens  * is automatically detached.
2886fa9e4066Sahrens  */
2887fa9e4066Sahrens int
2888ea8dc4b6Seschrock spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
2889fa9e4066Sahrens {
2890fa9e4066Sahrens 	uint64_t txg, open_txg;
2891fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
2892fa9e4066Sahrens 	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
289399653d4eSeschrock 	vdev_ops_t *pvops;
28949b3f6b42SEric Kustarz 	dmu_tx_t *tx;
28959b3f6b42SEric Kustarz 	char *oldvdpath, *newvdpath;
28969b3f6b42SEric Kustarz 	int newvd_isspare;
28979b3f6b42SEric Kustarz 	int error;
2898fa9e4066Sahrens 
2899fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
2900fa9e4066Sahrens 
2901c5904d13Seschrock 	oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
2902fa9e4066Sahrens 
2903fa9e4066Sahrens 	if (oldvd == NULL)
2904fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
2905fa9e4066Sahrens 
29060e34b6a7Sbonwick 	if (!oldvd->vdev_ops->vdev_op_leaf)
29070e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
29080e34b6a7Sbonwick 
2909fa9e4066Sahrens 	pvd = oldvd->vdev_parent;
2910fa9e4066Sahrens 
291199653d4eSeschrock 	if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
29123d7072f8Seschrock 	    VDEV_ALLOC_ADD)) != 0)
29133d7072f8Seschrock 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
29143d7072f8Seschrock 
29153d7072f8Seschrock 	if (newrootvd->vdev_children != 1)
2916fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
2917fa9e4066Sahrens 
2918fa9e4066Sahrens 	newvd = newrootvd->vdev_child[0];
2919fa9e4066Sahrens 
2920fa9e4066Sahrens 	if (!newvd->vdev_ops->vdev_op_leaf)
2921fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
2922fa9e4066Sahrens 
292399653d4eSeschrock 	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
2924fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, error));
2925fa9e4066Sahrens 
29268654d025Sperrin 	/*
29278654d025Sperrin 	 * Spares can't replace logs
29288654d025Sperrin 	 */
2929ee0eb9f2SEric Schrock 	if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
29308654d025Sperrin 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
29318654d025Sperrin 
293299653d4eSeschrock 	if (!replacing) {
293399653d4eSeschrock 		/*
293499653d4eSeschrock 		 * For attach, the only allowable parent is a mirror or the root
293599653d4eSeschrock 		 * vdev.
293699653d4eSeschrock 		 */
293799653d4eSeschrock 		if (pvd->vdev_ops != &vdev_mirror_ops &&
293899653d4eSeschrock 		    pvd->vdev_ops != &vdev_root_ops)
293999653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
294099653d4eSeschrock 
294199653d4eSeschrock 		pvops = &vdev_mirror_ops;
294299653d4eSeschrock 	} else {
294399653d4eSeschrock 		/*
294499653d4eSeschrock 		 * Active hot spares can only be replaced by inactive hot
294599653d4eSeschrock 		 * spares.
294699653d4eSeschrock 		 */
294799653d4eSeschrock 		if (pvd->vdev_ops == &vdev_spare_ops &&
294899653d4eSeschrock 		    pvd->vdev_child[1] == oldvd &&
294999653d4eSeschrock 		    !spa_has_spare(spa, newvd->vdev_guid))
295099653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
295199653d4eSeschrock 
295299653d4eSeschrock 		/*
295399653d4eSeschrock 		 * If the source is a hot spare, and the parent isn't already a
295499653d4eSeschrock 		 * spare, then we want to create a new hot spare.  Otherwise, we
295539c23413Seschrock 		 * want to create a replacing vdev.  The user is not allowed to
295639c23413Seschrock 		 * attach to a spared vdev child unless the 'isspare' state is
295739c23413Seschrock 		 * the same (spare replaces spare, non-spare replaces
295839c23413Seschrock 		 * non-spare).
295999653d4eSeschrock 		 */
296099653d4eSeschrock 		if (pvd->vdev_ops == &vdev_replacing_ops)
296199653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
296239c23413Seschrock 		else if (pvd->vdev_ops == &vdev_spare_ops &&
296339c23413Seschrock 		    newvd->vdev_isspare != oldvd->vdev_isspare)
296439c23413Seschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
296599653d4eSeschrock 		else if (pvd->vdev_ops != &vdev_spare_ops &&
296699653d4eSeschrock 		    newvd->vdev_isspare)
296799653d4eSeschrock 			pvops = &vdev_spare_ops;
296899653d4eSeschrock 		else
296999653d4eSeschrock 			pvops = &vdev_replacing_ops;
297099653d4eSeschrock 	}
297199653d4eSeschrock 
29722a79c5feSlling 	/*
29732a79c5feSlling 	 * Compare the new device size with the replaceable/attachable
29742a79c5feSlling 	 * device size.
29752a79c5feSlling 	 */
29762a79c5feSlling 	if (newvd->vdev_psize < vdev_get_rsize(oldvd))
2977fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
2978fa9e4066Sahrens 
2979ecc2d604Sbonwick 	/*
2980ecc2d604Sbonwick 	 * The new device cannot have a higher alignment requirement
2981ecc2d604Sbonwick 	 * than the top-level vdev.
2982ecc2d604Sbonwick 	 */
2983ecc2d604Sbonwick 	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
2984fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
2985fa9e4066Sahrens 
2986fa9e4066Sahrens 	/*
2987fa9e4066Sahrens 	 * If this is an in-place replacement, update oldvd's path and devid
2988fa9e4066Sahrens 	 * to make it distinguishable from newvd, and unopenable from now on.
2989fa9e4066Sahrens 	 */
2990fa9e4066Sahrens 	if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
2991fa9e4066Sahrens 		spa_strfree(oldvd->vdev_path);
2992fa9e4066Sahrens 		oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
2993fa9e4066Sahrens 		    KM_SLEEP);
2994fa9e4066Sahrens 		(void) sprintf(oldvd->vdev_path, "%s/%s",
2995fa9e4066Sahrens 		    newvd->vdev_path, "old");
2996fa9e4066Sahrens 		if (oldvd->vdev_devid != NULL) {
2997fa9e4066Sahrens 			spa_strfree(oldvd->vdev_devid);
2998fa9e4066Sahrens 			oldvd->vdev_devid = NULL;
2999fa9e4066Sahrens 		}
3000fa9e4066Sahrens 	}
3001fa9e4066Sahrens 
3002fa9e4066Sahrens 	/*
300399653d4eSeschrock 	 * If the parent is not a mirror, or if we're replacing, insert the new
300499653d4eSeschrock 	 * mirror/replacing/spare vdev above oldvd.
3005fa9e4066Sahrens 	 */
3006fa9e4066Sahrens 	if (pvd->vdev_ops != pvops)
3007fa9e4066Sahrens 		pvd = vdev_add_parent(oldvd, pvops);
3008fa9e4066Sahrens 
3009fa9e4066Sahrens 	ASSERT(pvd->vdev_top->vdev_parent == rvd);
3010fa9e4066Sahrens 	ASSERT(pvd->vdev_ops == pvops);
3011fa9e4066Sahrens 	ASSERT(oldvd->vdev_parent == pvd);
3012fa9e4066Sahrens 
3013fa9e4066Sahrens 	/*
3014fa9e4066Sahrens 	 * Extract the new device from its root and add it to pvd.
3015fa9e4066Sahrens 	 */
3016fa9e4066Sahrens 	vdev_remove_child(newrootvd, newvd);
3017fa9e4066Sahrens 	newvd->vdev_id = pvd->vdev_children;
3018fa9e4066Sahrens 	vdev_add_child(pvd, newvd);
3019fa9e4066Sahrens 
3020ea8dc4b6Seschrock 	/*
3021ea8dc4b6Seschrock 	 * If newvd is smaller than oldvd, but larger than its rsize,
3022ea8dc4b6Seschrock 	 * the addition of newvd may have decreased our parent's asize.
3023ea8dc4b6Seschrock 	 */
3024ea8dc4b6Seschrock 	pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
3025ea8dc4b6Seschrock 
3026fa9e4066Sahrens 	tvd = newvd->vdev_top;
3027fa9e4066Sahrens 	ASSERT(pvd->vdev_top == tvd);
3028fa9e4066Sahrens 	ASSERT(tvd->vdev_parent == rvd);
3029fa9e4066Sahrens 
3030fa9e4066Sahrens 	vdev_config_dirty(tvd);
3031fa9e4066Sahrens 
3032fa9e4066Sahrens 	/*
3033fa9e4066Sahrens 	 * Set newvd's DTL to [TXG_INITIAL, open_txg].  It will propagate
3034fa9e4066Sahrens 	 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
3035fa9e4066Sahrens 	 */
3036fa9e4066Sahrens 	open_txg = txg + TXG_CONCURRENT_STATES - 1;
3037fa9e4066Sahrens 
30388ad4d6ddSJeff Bonwick 	vdev_dtl_dirty(newvd, DTL_MISSING,
30398ad4d6ddSJeff Bonwick 	    TXG_INITIAL, open_txg - TXG_INITIAL + 1);
3040fa9e4066Sahrens 
304139c23413Seschrock 	if (newvd->vdev_isspare)
304239c23413Seschrock 		spa_spare_activate(newvd);
3043e14bb325SJeff Bonwick 	oldvdpath = spa_strdup(oldvd->vdev_path);
3044e14bb325SJeff Bonwick 	newvdpath = spa_strdup(newvd->vdev_path);
30459b3f6b42SEric Kustarz 	newvd_isspare = newvd->vdev_isspare;
3046ea8dc4b6Seschrock 
3047fa9e4066Sahrens 	/*
3048fa9e4066Sahrens 	 * Mark newvd's DTL dirty in this txg.
3049fa9e4066Sahrens 	 */
3050ecc2d604Sbonwick 	vdev_dirty(tvd, VDD_DTL, newvd, txg);
3051fa9e4066Sahrens 
3052fa9e4066Sahrens 	(void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
3053fa9e4066Sahrens 
30549b3f6b42SEric Kustarz 	tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
30559b3f6b42SEric Kustarz 	if (dmu_tx_assign(tx, TXG_WAIT) == 0) {
30569b3f6b42SEric Kustarz 		spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx,
30579b3f6b42SEric Kustarz 		    CRED(),  "%s vdev=%s %s vdev=%s",
30589b3f6b42SEric Kustarz 		    replacing && newvd_isspare ? "spare in" :
30599b3f6b42SEric Kustarz 		    replacing ? "replace" : "attach", newvdpath,
30609b3f6b42SEric Kustarz 		    replacing ? "for" : "to", oldvdpath);
30619b3f6b42SEric Kustarz 		dmu_tx_commit(tx);
30629b3f6b42SEric Kustarz 	} else {
30639b3f6b42SEric Kustarz 		dmu_tx_abort(tx);
30649b3f6b42SEric Kustarz 	}
30659b3f6b42SEric Kustarz 
30669b3f6b42SEric Kustarz 	spa_strfree(oldvdpath);
30679b3f6b42SEric Kustarz 	spa_strfree(newvdpath);
30689b3f6b42SEric Kustarz 
3069fa9e4066Sahrens 	/*
3070088f3894Sahrens 	 * Kick off a resilver to update newvd.
3071fa9e4066Sahrens 	 */
3072088f3894Sahrens 	VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0);
3073fa9e4066Sahrens 
3074fa9e4066Sahrens 	return (0);
3075fa9e4066Sahrens }
3076fa9e4066Sahrens 
3077fa9e4066Sahrens /*
3078fa9e4066Sahrens  * Detach a device from a mirror or replacing vdev.
3079fa9e4066Sahrens  * If 'replace_done' is specified, only detach if the parent
3080fa9e4066Sahrens  * is a replacing vdev.
3081fa9e4066Sahrens  */
3082fa9e4066Sahrens int
30838ad4d6ddSJeff Bonwick spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
3084fa9e4066Sahrens {
3085fa9e4066Sahrens 	uint64_t txg;
30868ad4d6ddSJeff Bonwick 	int error;
3087fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
3088fa9e4066Sahrens 	vdev_t *vd, *pvd, *cvd, *tvd;
308999653d4eSeschrock 	boolean_t unspare = B_FALSE;
309099653d4eSeschrock 	uint64_t unspare_guid;
3091bf82a41bSeschrock 	size_t len;
3092fa9e4066Sahrens 
3093fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
3094fa9e4066Sahrens 
3095c5904d13Seschrock 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3096fa9e4066Sahrens 
3097fa9e4066Sahrens 	if (vd == NULL)
3098fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
3099fa9e4066Sahrens 
31000e34b6a7Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
31010e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
31020e34b6a7Sbonwick 
3103fa9e4066Sahrens 	pvd = vd->vdev_parent;
3104fa9e4066Sahrens 
31058ad4d6ddSJeff Bonwick 	/*
31068ad4d6ddSJeff Bonwick 	 * If the parent/child relationship is not as expected, don't do it.
31078ad4d6ddSJeff Bonwick 	 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
31088ad4d6ddSJeff Bonwick 	 * vdev that's replacing B with C.  The user's intent in replacing
31098ad4d6ddSJeff Bonwick 	 * is to go from M(A,B) to M(A,C).  If the user decides to cancel
31108ad4d6ddSJeff Bonwick 	 * the replace by detaching C, the expected behavior is to end up
31118ad4d6ddSJeff Bonwick 	 * M(A,B).  But suppose that right after deciding to detach C,
31128ad4d6ddSJeff Bonwick 	 * the replacement of B completes.  We would have M(A,C), and then
31138ad4d6ddSJeff Bonwick 	 * ask to detach C, which would leave us with just A -- not what
31148ad4d6ddSJeff Bonwick 	 * the user wanted.  To prevent this, we make sure that the
31158ad4d6ddSJeff Bonwick 	 * parent/child relationship hasn't changed -- in this example,
31168ad4d6ddSJeff Bonwick 	 * that C's parent is still the replacing vdev R.
31178ad4d6ddSJeff Bonwick 	 */
31188ad4d6ddSJeff Bonwick 	if (pvd->vdev_guid != pguid && pguid != 0)
31198ad4d6ddSJeff Bonwick 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
31208ad4d6ddSJeff Bonwick 
3121fa9e4066Sahrens 	/*
3122fa9e4066Sahrens 	 * If replace_done is specified, only remove this device if it's
312399653d4eSeschrock 	 * the first child of a replacing vdev.  For the 'spare' vdev, either
312499653d4eSeschrock 	 * disk can be removed.
312599653d4eSeschrock 	 */
312699653d4eSeschrock 	if (replace_done) {
312799653d4eSeschrock 		if (pvd->vdev_ops == &vdev_replacing_ops) {
312899653d4eSeschrock 			if (vd->vdev_id != 0)
312999653d4eSeschrock 				return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
313099653d4eSeschrock 		} else if (pvd->vdev_ops != &vdev_spare_ops) {
313199653d4eSeschrock 			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
313299653d4eSeschrock 		}
313399653d4eSeschrock 	}
313499653d4eSeschrock 
313599653d4eSeschrock 	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
3136e7437265Sahrens 	    spa_version(spa) >= SPA_VERSION_SPARES);
3137fa9e4066Sahrens 
3138fa9e4066Sahrens 	/*
313999653d4eSeschrock 	 * Only mirror, replacing, and spare vdevs support detach.
3140fa9e4066Sahrens 	 */
3141fa9e4066Sahrens 	if (pvd->vdev_ops != &vdev_replacing_ops &&
314299653d4eSeschrock 	    pvd->vdev_ops != &vdev_mirror_ops &&
314399653d4eSeschrock 	    pvd->vdev_ops != &vdev_spare_ops)
3144fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3145fa9e4066Sahrens 
3146fa9e4066Sahrens 	/*
31478ad4d6ddSJeff Bonwick 	 * If this device has the only valid copy of some data,
31488ad4d6ddSJeff Bonwick 	 * we cannot safely detach it.
3149fa9e4066Sahrens 	 */
31508ad4d6ddSJeff Bonwick 	if (vdev_dtl_required(vd))
3151fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3152fa9e4066Sahrens 
31538ad4d6ddSJeff Bonwick 	ASSERT(pvd->vdev_children >= 2);
3154fa9e4066Sahrens 
3155bf82a41bSeschrock 	/*
3156bf82a41bSeschrock 	 * If we are detaching the second disk from a replacing vdev, then
3157bf82a41bSeschrock 	 * check to see if we changed the original vdev's path to have "/old"
3158bf82a41bSeschrock 	 * at the end in spa_vdev_attach().  If so, undo that change now.
3159bf82a41bSeschrock 	 */
3160bf82a41bSeschrock 	if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 &&
3161bf82a41bSeschrock 	    pvd->vdev_child[0]->vdev_path != NULL &&
3162bf82a41bSeschrock 	    pvd->vdev_child[1]->vdev_path != NULL) {
3163bf82a41bSeschrock 		ASSERT(pvd->vdev_child[1] == vd);
3164bf82a41bSeschrock 		cvd = pvd->vdev_child[0];
3165bf82a41bSeschrock 		len = strlen(vd->vdev_path);
3166bf82a41bSeschrock 		if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
3167bf82a41bSeschrock 		    strcmp(cvd->vdev_path + len, "/old") == 0) {
3168bf82a41bSeschrock 			spa_strfree(cvd->vdev_path);
3169bf82a41bSeschrock 			cvd->vdev_path = spa_strdup(vd->vdev_path);
3170bf82a41bSeschrock 		}
3171bf82a41bSeschrock 	}
3172bf82a41bSeschrock 
317399653d4eSeschrock 	/*
317499653d4eSeschrock 	 * If we are detaching the original disk from a spare, then it implies
317599653d4eSeschrock 	 * that the spare should become a real disk, and be removed from the
317699653d4eSeschrock 	 * active spare list for the pool.
317799653d4eSeschrock 	 */
317899653d4eSeschrock 	if (pvd->vdev_ops == &vdev_spare_ops &&
31798ad4d6ddSJeff Bonwick 	    vd->vdev_id == 0 && pvd->vdev_child[1]->vdev_isspare)
318099653d4eSeschrock 		unspare = B_TRUE;
318199653d4eSeschrock 
3182fa9e4066Sahrens 	/*
3183fa9e4066Sahrens 	 * Erase the disk labels so the disk can be used for other things.
3184fa9e4066Sahrens 	 * This must be done after all other error cases are handled,
3185fa9e4066Sahrens 	 * but before we disembowel vd (so we can still do I/O to it).
3186fa9e4066Sahrens 	 * But if we can't do it, don't treat the error as fatal --
3187fa9e4066Sahrens 	 * it may be that the unwritability of the disk is the reason
3188fa9e4066Sahrens 	 * it's being detached!
3189fa9e4066Sahrens 	 */
319039c23413Seschrock 	error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
3191fa9e4066Sahrens 
3192fa9e4066Sahrens 	/*
3193fa9e4066Sahrens 	 * Remove vd from its parent and compact the parent's children.
3194fa9e4066Sahrens 	 */
3195fa9e4066Sahrens 	vdev_remove_child(pvd, vd);
3196fa9e4066Sahrens 	vdev_compact_children(pvd);
3197fa9e4066Sahrens 
3198fa9e4066Sahrens 	/*
3199fa9e4066Sahrens 	 * Remember one of the remaining children so we can get tvd below.
3200fa9e4066Sahrens 	 */
3201fa9e4066Sahrens 	cvd = pvd->vdev_child[0];
3202fa9e4066Sahrens 
320399653d4eSeschrock 	/*
320499653d4eSeschrock 	 * If we need to remove the remaining child from the list of hot spares,
32058ad4d6ddSJeff Bonwick 	 * do it now, marking the vdev as no longer a spare in the process.
32068ad4d6ddSJeff Bonwick 	 * We must do this before vdev_remove_parent(), because that can
32078ad4d6ddSJeff Bonwick 	 * change the GUID if it creates a new toplevel GUID.  For a similar
32088ad4d6ddSJeff Bonwick 	 * reason, we must remove the spare now, in the same txg as the detach;
32098ad4d6ddSJeff Bonwick 	 * otherwise someone could attach a new sibling, change the GUID, and
32108ad4d6ddSJeff Bonwick 	 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
321199653d4eSeschrock 	 */
321299653d4eSeschrock 	if (unspare) {
321399653d4eSeschrock 		ASSERT(cvd->vdev_isspare);
321439c23413Seschrock 		spa_spare_remove(cvd);
321599653d4eSeschrock 		unspare_guid = cvd->vdev_guid;
32168ad4d6ddSJeff Bonwick 		(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
321799653d4eSeschrock 	}
321899653d4eSeschrock 
3219fa9e4066Sahrens 	/*
3220fa9e4066Sahrens 	 * If the parent mirror/replacing vdev only has one child,
3221fa9e4066Sahrens 	 * the parent is no longer needed.  Remove it from the tree.
3222fa9e4066Sahrens 	 */
3223fa9e4066Sahrens 	if (pvd->vdev_children == 1)
3224fa9e4066Sahrens 		vdev_remove_parent(cvd);
3225fa9e4066Sahrens 
3226fa9e4066Sahrens 	/*
3227fa9e4066Sahrens 	 * We don't set tvd until now because the parent we just removed
3228fa9e4066Sahrens 	 * may have been the previous top-level vdev.
3229fa9e4066Sahrens 	 */
3230fa9e4066Sahrens 	tvd = cvd->vdev_top;
3231fa9e4066Sahrens 	ASSERT(tvd->vdev_parent == rvd);
3232fa9e4066Sahrens 
3233fa9e4066Sahrens 	/*
323439c23413Seschrock 	 * Reevaluate the parent vdev state.
3235fa9e4066Sahrens 	 */
32363d7072f8Seschrock 	vdev_propagate_state(cvd);
3237fa9e4066Sahrens 
3238fa9e4066Sahrens 	/*
323939c23413Seschrock 	 * If the device we just detached was smaller than the others, it may be
324039c23413Seschrock 	 * possible to add metaslabs (i.e. grow the pool).  vdev_metaslab_init()
324139c23413Seschrock 	 * can't fail because the existing metaslabs are already in core, so
324239c23413Seschrock 	 * there's nothing to read from disk.
3243fa9e4066Sahrens 	 */
3244ecc2d604Sbonwick 	VERIFY(vdev_metaslab_init(tvd, txg) == 0);
3245fa9e4066Sahrens 
3246fa9e4066Sahrens 	vdev_config_dirty(tvd);
3247fa9e4066Sahrens 
3248fa9e4066Sahrens 	/*
324939c23413Seschrock 	 * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
325039c23413Seschrock 	 * vd->vdev_detached is set and free vd's DTL object in syncing context.
325139c23413Seschrock 	 * But first make sure we're not on any *other* txg's DTL list, to
325239c23413Seschrock 	 * prevent vd from being accessed after it's freed.
3253fa9e4066Sahrens 	 */
32548ad4d6ddSJeff Bonwick 	for (int t = 0; t < TXG_SIZE; t++)
3255fa9e4066Sahrens 		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
3256ecc2d604Sbonwick 	vd->vdev_detached = B_TRUE;
3257ecc2d604Sbonwick 	vdev_dirty(tvd, VDD_DTL, vd, txg);
3258fa9e4066Sahrens 
32593d7072f8Seschrock 	spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
32603d7072f8Seschrock 
326199653d4eSeschrock 	error = spa_vdev_exit(spa, vd, txg, 0);
326299653d4eSeschrock 
326399653d4eSeschrock 	/*
326439c23413Seschrock 	 * If this was the removal of the original device in a hot spare vdev,
326539c23413Seschrock 	 * then we want to go through and remove the device from the hot spare
326639c23413Seschrock 	 * list of every other pool.
326799653d4eSeschrock 	 */
326899653d4eSeschrock 	if (unspare) {
32698ad4d6ddSJeff Bonwick 		spa_t *myspa = spa;
327099653d4eSeschrock 		spa = NULL;
327199653d4eSeschrock 		mutex_enter(&spa_namespace_lock);
327299653d4eSeschrock 		while ((spa = spa_next(spa)) != NULL) {
327399653d4eSeschrock 			if (spa->spa_state != POOL_STATE_ACTIVE)
327499653d4eSeschrock 				continue;
32758ad4d6ddSJeff Bonwick 			if (spa == myspa)
32768ad4d6ddSJeff Bonwick 				continue;
32779af0a4dfSJeff Bonwick 			spa_open_ref(spa, FTAG);
32789af0a4dfSJeff Bonwick 			mutex_exit(&spa_namespace_lock);
327999653d4eSeschrock 			(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
32809af0a4dfSJeff Bonwick 			mutex_enter(&spa_namespace_lock);
32819af0a4dfSJeff Bonwick 			spa_close(spa, FTAG);
328299653d4eSeschrock 		}
328399653d4eSeschrock 		mutex_exit(&spa_namespace_lock);
328499653d4eSeschrock 	}
328599653d4eSeschrock 
328699653d4eSeschrock 	return (error);
328799653d4eSeschrock }
328899653d4eSeschrock 
3289e14bb325SJeff Bonwick static nvlist_t *
3290e14bb325SJeff Bonwick spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
329199653d4eSeschrock {
3292e14bb325SJeff Bonwick 	for (int i = 0; i < count; i++) {
3293e14bb325SJeff Bonwick 		uint64_t guid;
329499653d4eSeschrock 
3295e14bb325SJeff Bonwick 		VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
3296e14bb325SJeff Bonwick 		    &guid) == 0);
329799653d4eSeschrock 
3298e14bb325SJeff Bonwick 		if (guid == target_guid)
3299e14bb325SJeff Bonwick 			return (nvpp[i]);
330099653d4eSeschrock 	}
330199653d4eSeschrock 
3302e14bb325SJeff Bonwick 	return (NULL);
3303fa94a07fSbrendan }
3304fa94a07fSbrendan 
3305e14bb325SJeff Bonwick static void
3306e14bb325SJeff Bonwick spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
3307e14bb325SJeff Bonwick 	nvlist_t *dev_to_remove)
3308fa94a07fSbrendan {
3309e14bb325SJeff Bonwick 	nvlist_t **newdev = NULL;
3310fa94a07fSbrendan 
3311e14bb325SJeff Bonwick 	if (count > 1)
3312e14bb325SJeff Bonwick 		newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
3313fa94a07fSbrendan 
3314e14bb325SJeff Bonwick 	for (int i = 0, j = 0; i < count; i++) {
3315e14bb325SJeff Bonwick 		if (dev[i] == dev_to_remove)
3316e14bb325SJeff Bonwick 			continue;
3317e14bb325SJeff Bonwick 		VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
3318fa94a07fSbrendan 	}
3319fa94a07fSbrendan 
3320e14bb325SJeff Bonwick 	VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
3321e14bb325SJeff Bonwick 	VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
3322fa94a07fSbrendan 
3323e14bb325SJeff Bonwick 	for (int i = 0; i < count - 1; i++)
3324e14bb325SJeff Bonwick 		nvlist_free(newdev[i]);
3325fa94a07fSbrendan 
3326e14bb325SJeff Bonwick 	if (count > 1)
3327e14bb325SJeff Bonwick 		kmem_free(newdev, (count - 1) * sizeof (void *));
3328fa94a07fSbrendan }
3329fa94a07fSbrendan 
3330fa94a07fSbrendan /*
3331fa94a07fSbrendan  * Remove a device from the pool.  Currently, this supports removing only hot
3332fa94a07fSbrendan  * spares and level 2 ARC devices.
3333fa94a07fSbrendan  */
3334fa94a07fSbrendan int
3335fa94a07fSbrendan spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
3336fa94a07fSbrendan {
3337fa94a07fSbrendan 	vdev_t *vd;
3338e14bb325SJeff Bonwick 	nvlist_t **spares, **l2cache, *nv;
3339fa94a07fSbrendan 	uint_t nspares, nl2cache;
33408ad4d6ddSJeff Bonwick 	uint64_t txg = 0;
3341fa94a07fSbrendan 	int error = 0;
33428ad4d6ddSJeff Bonwick 	boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
3343fa94a07fSbrendan 
33448ad4d6ddSJeff Bonwick 	if (!locked)
33458ad4d6ddSJeff Bonwick 		txg = spa_vdev_enter(spa);
3346fa94a07fSbrendan 
3347c5904d13Seschrock 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3348fa94a07fSbrendan 
3349fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs != NULL &&
3350fa94a07fSbrendan 	    nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3351e14bb325SJeff Bonwick 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
3352e14bb325SJeff Bonwick 	    (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
3353e14bb325SJeff Bonwick 		/*
3354e14bb325SJeff Bonwick 		 * Only remove the hot spare if it's not currently in use
3355e14bb325SJeff Bonwick 		 * in this pool.
3356e14bb325SJeff Bonwick 		 */
3357e14bb325SJeff Bonwick 		if (vd == NULL || unspare) {
3358e14bb325SJeff Bonwick 			spa_vdev_remove_aux(spa->spa_spares.sav_config,
3359e14bb325SJeff Bonwick 			    ZPOOL_CONFIG_SPARES, spares, nspares, nv);
3360e14bb325SJeff Bonwick 			spa_load_spares(spa);
3361e14bb325SJeff Bonwick 			spa->spa_spares.sav_sync = B_TRUE;
3362e14bb325SJeff Bonwick 		} else {
3363e14bb325SJeff Bonwick 			error = EBUSY;
3364e14bb325SJeff Bonwick 		}
3365e14bb325SJeff Bonwick 	} else if (spa->spa_l2cache.sav_vdevs != NULL &&
3366fa94a07fSbrendan 	    nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3367e14bb325SJeff Bonwick 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
3368e14bb325SJeff Bonwick 	    (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
3369e14bb325SJeff Bonwick 		/*
3370e14bb325SJeff Bonwick 		 * Cache devices can always be removed.
3371e14bb325SJeff Bonwick 		 */
3372e14bb325SJeff Bonwick 		spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
3373e14bb325SJeff Bonwick 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
3374fa94a07fSbrendan 		spa_load_l2cache(spa);
3375fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
3376e14bb325SJeff Bonwick 	} else if (vd != NULL) {
3377e14bb325SJeff Bonwick 		/*
3378e14bb325SJeff Bonwick 		 * Normal vdevs cannot be removed (yet).
3379e14bb325SJeff Bonwick 		 */
3380e14bb325SJeff Bonwick 		error = ENOTSUP;
3381e14bb325SJeff Bonwick 	} else {
3382e14bb325SJeff Bonwick 		/*
3383e14bb325SJeff Bonwick 		 * There is no vdev of any kind with the specified guid.
3384e14bb325SJeff Bonwick 		 */
3385e14bb325SJeff Bonwick 		error = ENOENT;
3386fa94a07fSbrendan 	}
338799653d4eSeschrock 
33888ad4d6ddSJeff Bonwick 	if (!locked)
33898ad4d6ddSJeff Bonwick 		return (spa_vdev_exit(spa, NULL, txg, error));
33908ad4d6ddSJeff Bonwick 
33918ad4d6ddSJeff Bonwick 	return (error);
3392fa9e4066Sahrens }
3393fa9e4066Sahrens 
3394fa9e4066Sahrens /*
33953d7072f8Seschrock  * Find any device that's done replacing, or a vdev marked 'unspare' that's
33963d7072f8Seschrock  * current spared, so we can detach it.
3397fa9e4066Sahrens  */
3398ea8dc4b6Seschrock static vdev_t *
33993d7072f8Seschrock spa_vdev_resilver_done_hunt(vdev_t *vd)
3400fa9e4066Sahrens {
3401ea8dc4b6Seschrock 	vdev_t *newvd, *oldvd;
3402fa9e4066Sahrens 	int c;
3403fa9e4066Sahrens 
3404ea8dc4b6Seschrock 	for (c = 0; c < vd->vdev_children; c++) {
34053d7072f8Seschrock 		oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
3406ea8dc4b6Seschrock 		if (oldvd != NULL)
3407ea8dc4b6Seschrock 			return (oldvd);
3408ea8dc4b6Seschrock 	}
3409fa9e4066Sahrens 
34103d7072f8Seschrock 	/*
34113d7072f8Seschrock 	 * Check for a completed replacement.
34123d7072f8Seschrock 	 */
3413fa9e4066Sahrens 	if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
3414ea8dc4b6Seschrock 		oldvd = vd->vdev_child[0];
3415ea8dc4b6Seschrock 		newvd = vd->vdev_child[1];
3416ea8dc4b6Seschrock 
34178ad4d6ddSJeff Bonwick 		if (vdev_dtl_empty(newvd, DTL_MISSING) &&
34188ad4d6ddSJeff Bonwick 		    !vdev_dtl_required(oldvd))
3419ea8dc4b6Seschrock 			return (oldvd);
3420fa9e4066Sahrens 	}
3421ea8dc4b6Seschrock 
34223d7072f8Seschrock 	/*
34233d7072f8Seschrock 	 * Check for a completed resilver with the 'unspare' flag set.
34243d7072f8Seschrock 	 */
34253d7072f8Seschrock 	if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) {
34263d7072f8Seschrock 		newvd = vd->vdev_child[0];
34273d7072f8Seschrock 		oldvd = vd->vdev_child[1];
34283d7072f8Seschrock 
34293d7072f8Seschrock 		if (newvd->vdev_unspare &&
34308ad4d6ddSJeff Bonwick 		    vdev_dtl_empty(newvd, DTL_MISSING) &&
34318ad4d6ddSJeff Bonwick 		    !vdev_dtl_required(oldvd)) {
34323d7072f8Seschrock 			newvd->vdev_unspare = 0;
34333d7072f8Seschrock 			return (oldvd);
34343d7072f8Seschrock 		}
34353d7072f8Seschrock 	}
34363d7072f8Seschrock 
3437ea8dc4b6Seschrock 	return (NULL);
3438fa9e4066Sahrens }
3439fa9e4066Sahrens 
3440ea8dc4b6Seschrock static void
34413d7072f8Seschrock spa_vdev_resilver_done(spa_t *spa)
3442fa9e4066Sahrens {
34438ad4d6ddSJeff Bonwick 	vdev_t *vd, *pvd, *ppvd;
34448ad4d6ddSJeff Bonwick 	uint64_t guid, sguid, pguid, ppguid;
3445ea8dc4b6Seschrock 
34468ad4d6ddSJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3447ea8dc4b6Seschrock 
34483d7072f8Seschrock 	while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
34498ad4d6ddSJeff Bonwick 		pvd = vd->vdev_parent;
34508ad4d6ddSJeff Bonwick 		ppvd = pvd->vdev_parent;
3451ea8dc4b6Seschrock 		guid = vd->vdev_guid;
34528ad4d6ddSJeff Bonwick 		pguid = pvd->vdev_guid;
34538ad4d6ddSJeff Bonwick 		ppguid = ppvd->vdev_guid;
34548ad4d6ddSJeff Bonwick 		sguid = 0;
345599653d4eSeschrock 		/*
345699653d4eSeschrock 		 * If we have just finished replacing a hot spared device, then
345799653d4eSeschrock 		 * we need to detach the parent's first child (the original hot
345899653d4eSeschrock 		 * spare) as well.
345999653d4eSeschrock 		 */
34608ad4d6ddSJeff Bonwick 		if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0) {
346199653d4eSeschrock 			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
34628ad4d6ddSJeff Bonwick 			ASSERT(ppvd->vdev_children == 2);
34638ad4d6ddSJeff Bonwick 			sguid = ppvd->vdev_child[1]->vdev_guid;
346499653d4eSeschrock 		}
34658ad4d6ddSJeff Bonwick 		spa_config_exit(spa, SCL_ALL, FTAG);
34668ad4d6ddSJeff Bonwick 		if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
3467ea8dc4b6Seschrock 			return;
34688ad4d6ddSJeff Bonwick 		if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
346999653d4eSeschrock 			return;
34708ad4d6ddSJeff Bonwick 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3471fa9e4066Sahrens 	}
3472fa9e4066Sahrens 
34738ad4d6ddSJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
3474fa9e4066Sahrens }
3475fa9e4066Sahrens 
3476c67d9675Seschrock /*
3477c67d9675Seschrock  * Update the stored path for this vdev.  Dirty the vdev configuration, relying
3478c67d9675Seschrock  * on spa_vdev_enter/exit() to synchronize the labels and cache.
3479c67d9675Seschrock  */
3480c67d9675Seschrock int
3481c67d9675Seschrock spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
3482c67d9675Seschrock {
3483c5904d13Seschrock 	vdev_t *vd;
3484c67d9675Seschrock 	uint64_t txg;
3485c67d9675Seschrock 
3486c67d9675Seschrock 	txg = spa_vdev_enter(spa);
3487c67d9675Seschrock 
3488c5904d13Seschrock 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) {
348999653d4eSeschrock 		/*
3490c5904d13Seschrock 		 * Determine if this is a reference to a hot spare device.  If
3491c5904d13Seschrock 		 * it is, update the path manually as there is no associated
3492c5904d13Seschrock 		 * vdev_t that can be synced to disk.
349399653d4eSeschrock 		 */
3494c5904d13Seschrock 		nvlist_t **spares;
3495c5904d13Seschrock 		uint_t i, nspares;
3496fa94a07fSbrendan 
3497fa94a07fSbrendan 		if (spa->spa_spares.sav_config != NULL) {
3498fa94a07fSbrendan 			VERIFY(nvlist_lookup_nvlist_array(
3499fa94a07fSbrendan 			    spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
3500fa94a07fSbrendan 			    &spares, &nspares) == 0);
350199653d4eSeschrock 			for (i = 0; i < nspares; i++) {
350299653d4eSeschrock 				uint64_t theguid;
350399653d4eSeschrock 				VERIFY(nvlist_lookup_uint64(spares[i],
350499653d4eSeschrock 				    ZPOOL_CONFIG_GUID, &theguid) == 0);
3505fa94a07fSbrendan 				if (theguid == guid) {
3506fa94a07fSbrendan 					VERIFY(nvlist_add_string(spares[i],
3507fa94a07fSbrendan 					    ZPOOL_CONFIG_PATH, newpath) == 0);
3508fa94a07fSbrendan 					spa_load_spares(spa);
3509fa94a07fSbrendan 					spa->spa_spares.sav_sync = B_TRUE;
3510fa94a07fSbrendan 					return (spa_vdev_exit(spa, NULL, txg,
3511fa94a07fSbrendan 					    0));
3512fa94a07fSbrendan 				}
351399653d4eSeschrock 			}
3514fa94a07fSbrendan 		}
351599653d4eSeschrock 
3516fa94a07fSbrendan 		return (spa_vdev_exit(spa, NULL, txg, ENOENT));
351799653d4eSeschrock 	}
3518c67d9675Seschrock 
35190e34b6a7Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
35200e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
35210e34b6a7Sbonwick 
3522c67d9675Seschrock 	spa_strfree(vd->vdev_path);
3523c67d9675Seschrock 	vd->vdev_path = spa_strdup(newpath);
3524c67d9675Seschrock 
3525c67d9675Seschrock 	vdev_config_dirty(vd->vdev_top);
3526c67d9675Seschrock 
3527c67d9675Seschrock 	return (spa_vdev_exit(spa, NULL, txg, 0));
3528c67d9675Seschrock }
3529c67d9675Seschrock 
3530fa9e4066Sahrens /*
3531fa9e4066Sahrens  * ==========================================================================
3532fa9e4066Sahrens  * SPA Scrubbing
3533fa9e4066Sahrens  * ==========================================================================
3534fa9e4066Sahrens  */
3535fa9e4066Sahrens 
3536ea8dc4b6Seschrock int
3537088f3894Sahrens spa_scrub(spa_t *spa, pool_scrub_type_t type)
3538fa9e4066Sahrens {
3539e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
3540bb8b5132Sek 
3541fa9e4066Sahrens 	if ((uint_t)type >= POOL_SCRUB_TYPES)
3542fa9e4066Sahrens 		return (ENOTSUP);
3543fa9e4066Sahrens 
3544fa9e4066Sahrens 	/*
3545088f3894Sahrens 	 * If a resilver was requested, but there is no DTL on a
3546088f3894Sahrens 	 * writeable leaf device, we have nothing to do.
3547fa9e4066Sahrens 	 */
3548088f3894Sahrens 	if (type == POOL_SCRUB_RESILVER &&
3549088f3894Sahrens 	    !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
3550088f3894Sahrens 		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
3551ea8dc4b6Seschrock 		return (0);
3552ea8dc4b6Seschrock 	}
3553fa9e4066Sahrens 
3554088f3894Sahrens 	if (type == POOL_SCRUB_EVERYTHING &&
3555088f3894Sahrens 	    spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE &&
3556088f3894Sahrens 	    spa->spa_dsl_pool->dp_scrub_isresilver)
3557088f3894Sahrens 		return (EBUSY);
3558fa9e4066Sahrens 
3559088f3894Sahrens 	if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) {
3560088f3894Sahrens 		return (dsl_pool_scrub_clean(spa->spa_dsl_pool));
3561088f3894Sahrens 	} else if (type == POOL_SCRUB_NONE) {
3562088f3894Sahrens 		return (dsl_pool_scrub_cancel(spa->spa_dsl_pool));
3563ea8dc4b6Seschrock 	} else {
3564088f3894Sahrens 		return (EINVAL);
3565fa9e4066Sahrens 	}
3566fa9e4066Sahrens }
3567fa9e4066Sahrens 
3568ea8dc4b6Seschrock /*
3569ea8dc4b6Seschrock  * ==========================================================================
3570ea8dc4b6Seschrock  * SPA async task processing
3571ea8dc4b6Seschrock  * ==========================================================================
3572ea8dc4b6Seschrock  */
3573ea8dc4b6Seschrock 
3574ea8dc4b6Seschrock static void
35753d7072f8Seschrock spa_async_remove(spa_t *spa, vdev_t *vd)
3576fa9e4066Sahrens {
357749cf58c0SBrendan Gregg - Sun Microsystems 	if (vd->vdev_remove_wanted) {
357849cf58c0SBrendan Gregg - Sun Microsystems 		vd->vdev_remove_wanted = 0;
357949cf58c0SBrendan Gregg - Sun Microsystems 		vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
3580e14bb325SJeff Bonwick 		vdev_clear(spa, vd);
3581e14bb325SJeff Bonwick 		vdev_state_dirty(vd->vdev_top);
3582ea8dc4b6Seschrock 	}
358349cf58c0SBrendan Gregg - Sun Microsystems 
3584e14bb325SJeff Bonwick 	for (int c = 0; c < vd->vdev_children; c++)
358549cf58c0SBrendan Gregg - Sun Microsystems 		spa_async_remove(spa, vd->vdev_child[c]);
3586ea8dc4b6Seschrock }
3587fa9e4066Sahrens 
3588e14bb325SJeff Bonwick static void
3589e14bb325SJeff Bonwick spa_async_probe(spa_t *spa, vdev_t *vd)
3590e14bb325SJeff Bonwick {
3591e14bb325SJeff Bonwick 	if (vd->vdev_probe_wanted) {
3592e14bb325SJeff Bonwick 		vd->vdev_probe_wanted = 0;
3593e14bb325SJeff Bonwick 		vdev_reopen(vd);	/* vdev_open() does the actual probe */
3594e14bb325SJeff Bonwick 	}
3595e14bb325SJeff Bonwick 
3596e14bb325SJeff Bonwick 	for (int c = 0; c < vd->vdev_children; c++)
3597e14bb325SJeff Bonwick 		spa_async_probe(spa, vd->vdev_child[c]);
3598e14bb325SJeff Bonwick }
3599e14bb325SJeff Bonwick 
3600ea8dc4b6Seschrock static void
3601ea8dc4b6Seschrock spa_async_thread(spa_t *spa)
3602ea8dc4b6Seschrock {
3603e14bb325SJeff Bonwick 	int tasks;
3604ea8dc4b6Seschrock 
3605ea8dc4b6Seschrock 	ASSERT(spa->spa_sync_on);
3606ea8dc4b6Seschrock 
3607ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3608ea8dc4b6Seschrock 	tasks = spa->spa_async_tasks;
3609ea8dc4b6Seschrock 	spa->spa_async_tasks = 0;
3610ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3611ea8dc4b6Seschrock 
36120373e76bSbonwick 	/*
36130373e76bSbonwick 	 * See if the config needs to be updated.
36140373e76bSbonwick 	 */
36150373e76bSbonwick 	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
36160373e76bSbonwick 		mutex_enter(&spa_namespace_lock);
36170373e76bSbonwick 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
36180373e76bSbonwick 		mutex_exit(&spa_namespace_lock);
36190373e76bSbonwick 	}
36200373e76bSbonwick 
3621ea8dc4b6Seschrock 	/*
36223d7072f8Seschrock 	 * See if any devices need to be marked REMOVED.
3623ea8dc4b6Seschrock 	 */
3624e14bb325SJeff Bonwick 	if (tasks & SPA_ASYNC_REMOVE) {
3625e14bb325SJeff Bonwick 		spa_vdev_state_enter(spa);
36263d7072f8Seschrock 		spa_async_remove(spa, spa->spa_root_vdev);
3627e14bb325SJeff Bonwick 		for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
362849cf58c0SBrendan Gregg - Sun Microsystems 			spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
3629e14bb325SJeff Bonwick 		for (int i = 0; i < spa->spa_spares.sav_count; i++)
363049cf58c0SBrendan Gregg - Sun Microsystems 			spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
3631e14bb325SJeff Bonwick 		(void) spa_vdev_state_exit(spa, NULL, 0);
3632e14bb325SJeff Bonwick 	}
3633e14bb325SJeff Bonwick 
3634e14bb325SJeff Bonwick 	/*
3635e14bb325SJeff Bonwick 	 * See if any devices need to be probed.
3636e14bb325SJeff Bonwick 	 */
3637e14bb325SJeff Bonwick 	if (tasks & SPA_ASYNC_PROBE) {
3638e14bb325SJeff Bonwick 		spa_vdev_state_enter(spa);
3639e14bb325SJeff Bonwick 		spa_async_probe(spa, spa->spa_root_vdev);
3640e14bb325SJeff Bonwick 		(void) spa_vdev_state_exit(spa, NULL, 0);
36413d7072f8Seschrock 	}
3642ea8dc4b6Seschrock 
3643ea8dc4b6Seschrock 	/*
3644ea8dc4b6Seschrock 	 * If any devices are done replacing, detach them.
3645ea8dc4b6Seschrock 	 */
36463d7072f8Seschrock 	if (tasks & SPA_ASYNC_RESILVER_DONE)
36473d7072f8Seschrock 		spa_vdev_resilver_done(spa);
3648fa9e4066Sahrens 
3649ea8dc4b6Seschrock 	/*
3650ea8dc4b6Seschrock 	 * Kick off a resilver.
3651ea8dc4b6Seschrock 	 */
3652088f3894Sahrens 	if (tasks & SPA_ASYNC_RESILVER)
3653088f3894Sahrens 		VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0);
3654ea8dc4b6Seschrock 
3655ea8dc4b6Seschrock 	/*
3656ea8dc4b6Seschrock 	 * Let the world know that we're done.
3657ea8dc4b6Seschrock 	 */
3658ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3659ea8dc4b6Seschrock 	spa->spa_async_thread = NULL;
3660ea8dc4b6Seschrock 	cv_broadcast(&spa->spa_async_cv);
3661ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3662ea8dc4b6Seschrock 	thread_exit();
3663ea8dc4b6Seschrock }
3664ea8dc4b6Seschrock 
3665ea8dc4b6Seschrock void
3666ea8dc4b6Seschrock spa_async_suspend(spa_t *spa)
3667ea8dc4b6Seschrock {
3668ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3669ea8dc4b6Seschrock 	spa->spa_async_suspended++;
3670ea8dc4b6Seschrock 	while (spa->spa_async_thread != NULL)
3671ea8dc4b6Seschrock 		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
3672ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3673ea8dc4b6Seschrock }
3674ea8dc4b6Seschrock 
3675ea8dc4b6Seschrock void
3676ea8dc4b6Seschrock spa_async_resume(spa_t *spa)
3677ea8dc4b6Seschrock {
3678ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3679ea8dc4b6Seschrock 	ASSERT(spa->spa_async_suspended != 0);
3680ea8dc4b6Seschrock 	spa->spa_async_suspended--;
3681ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3682ea8dc4b6Seschrock }
3683ea8dc4b6Seschrock 
3684ea8dc4b6Seschrock static void
3685ea8dc4b6Seschrock spa_async_dispatch(spa_t *spa)
3686ea8dc4b6Seschrock {
3687ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3688ea8dc4b6Seschrock 	if (spa->spa_async_tasks && !spa->spa_async_suspended &&
36890373e76bSbonwick 	    spa->spa_async_thread == NULL &&
36900373e76bSbonwick 	    rootdir != NULL && !vn_is_readonly(rootdir))
3691ea8dc4b6Seschrock 		spa->spa_async_thread = thread_create(NULL, 0,
3692ea8dc4b6Seschrock 		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
3693ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3694ea8dc4b6Seschrock }
3695ea8dc4b6Seschrock 
3696ea8dc4b6Seschrock void
3697ea8dc4b6Seschrock spa_async_request(spa_t *spa, int task)
3698ea8dc4b6Seschrock {
3699ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3700ea8dc4b6Seschrock 	spa->spa_async_tasks |= task;
3701ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3702fa9e4066Sahrens }
3703fa9e4066Sahrens 
3704fa9e4066Sahrens /*
3705fa9e4066Sahrens  * ==========================================================================
3706fa9e4066Sahrens  * SPA syncing routines
3707fa9e4066Sahrens  * ==========================================================================
3708fa9e4066Sahrens  */
3709fa9e4066Sahrens 
3710fa9e4066Sahrens static void
3711fa9e4066Sahrens spa_sync_deferred_frees(spa_t *spa, uint64_t txg)
3712fa9e4066Sahrens {
3713fa9e4066Sahrens 	bplist_t *bpl = &spa->spa_sync_bplist;
3714fa9e4066Sahrens 	dmu_tx_t *tx;
3715fa9e4066Sahrens 	blkptr_t blk;
3716fa9e4066Sahrens 	uint64_t itor = 0;
3717fa9e4066Sahrens 	zio_t *zio;
3718fa9e4066Sahrens 	int error;
3719fa9e4066Sahrens 	uint8_t c = 1;
3720fa9e4066Sahrens 
3721e14bb325SJeff Bonwick 	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
3722fa9e4066Sahrens 
3723e14bb325SJeff Bonwick 	while (bplist_iterate(bpl, &itor, &blk) == 0) {
3724e14bb325SJeff Bonwick 		ASSERT(blk.blk_birth < txg);
3725e14bb325SJeff Bonwick 		zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL,
3726e14bb325SJeff Bonwick 		    ZIO_FLAG_MUSTSUCCEED));
3727e14bb325SJeff Bonwick 	}
3728fa9e4066Sahrens 
3729fa9e4066Sahrens 	error = zio_wait(zio);
3730fa9e4066Sahrens 	ASSERT3U(error, ==, 0);
3731fa9e4066Sahrens 
3732fa9e4066Sahrens 	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3733fa9e4066Sahrens 	bplist_vacate(bpl, tx);
3734fa9e4066Sahrens 
3735fa9e4066Sahrens 	/*
3736fa9e4066Sahrens 	 * Pre-dirty the first block so we sync to convergence faster.
3737fa9e4066Sahrens 	 * (Usually only the first block is needed.)
3738fa9e4066Sahrens 	 */
3739fa9e4066Sahrens 	dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx);
3740fa9e4066Sahrens 	dmu_tx_commit(tx);
3741fa9e4066Sahrens }
3742fa9e4066Sahrens 
3743fa9e4066Sahrens static void
374499653d4eSeschrock spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
3745fa9e4066Sahrens {
3746fa9e4066Sahrens 	char *packed = NULL;
3747f7991ba4STim Haley 	size_t bufsize;
3748fa9e4066Sahrens 	size_t nvsize = 0;
3749fa9e4066Sahrens 	dmu_buf_t *db;
3750fa9e4066Sahrens 
375199653d4eSeschrock 	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
3752fa9e4066Sahrens 
3753f7991ba4STim Haley 	/*
3754f7991ba4STim Haley 	 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
3755f7991ba4STim Haley 	 * information.  This avoids the dbuf_will_dirty() path and
3756f7991ba4STim Haley 	 * saves us a pre-read to get data we don't actually care about.
3757f7991ba4STim Haley 	 */
3758f7991ba4STim Haley 	bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE);
3759f7991ba4STim Haley 	packed = kmem_alloc(bufsize, KM_SLEEP);
3760fa9e4066Sahrens 
376199653d4eSeschrock 	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
3762ea8dc4b6Seschrock 	    KM_SLEEP) == 0);
3763f7991ba4STim Haley 	bzero(packed + nvsize, bufsize - nvsize);
3764fa9e4066Sahrens 
3765f7991ba4STim Haley 	dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
3766fa9e4066Sahrens 
3767f7991ba4STim Haley 	kmem_free(packed, bufsize);
3768fa9e4066Sahrens 
376999653d4eSeschrock 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
3770fa9e4066Sahrens 	dmu_buf_will_dirty(db, tx);
3771fa9e4066Sahrens 	*(uint64_t *)db->db_data = nvsize;
3772ea8dc4b6Seschrock 	dmu_buf_rele(db, FTAG);
3773fa9e4066Sahrens }
3774fa9e4066Sahrens 
377599653d4eSeschrock static void
3776fa94a07fSbrendan spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
3777fa94a07fSbrendan     const char *config, const char *entry)
377899653d4eSeschrock {
377999653d4eSeschrock 	nvlist_t *nvroot;
3780fa94a07fSbrendan 	nvlist_t **list;
378199653d4eSeschrock 	int i;
378299653d4eSeschrock 
3783fa94a07fSbrendan 	if (!sav->sav_sync)
378499653d4eSeschrock 		return;
378599653d4eSeschrock 
378699653d4eSeschrock 	/*
3787fa94a07fSbrendan 	 * Update the MOS nvlist describing the list of available devices.
3788fa94a07fSbrendan 	 * spa_validate_aux() will have already made sure this nvlist is
37893d7072f8Seschrock 	 * valid and the vdevs are labeled appropriately.
379099653d4eSeschrock 	 */
3791fa94a07fSbrendan 	if (sav->sav_object == 0) {
3792fa94a07fSbrendan 		sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
3793fa94a07fSbrendan 		    DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
3794fa94a07fSbrendan 		    sizeof (uint64_t), tx);
379599653d4eSeschrock 		VERIFY(zap_update(spa->spa_meta_objset,
3796fa94a07fSbrendan 		    DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
3797fa94a07fSbrendan 		    &sav->sav_object, tx) == 0);
379899653d4eSeschrock 	}
379999653d4eSeschrock 
380099653d4eSeschrock 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3801fa94a07fSbrendan 	if (sav->sav_count == 0) {
3802fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
380399653d4eSeschrock 	} else {
3804fa94a07fSbrendan 		list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
3805fa94a07fSbrendan 		for (i = 0; i < sav->sav_count; i++)
3806fa94a07fSbrendan 			list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
3807fa94a07fSbrendan 			    B_FALSE, B_FALSE, B_TRUE);
3808fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
3809fa94a07fSbrendan 		    sav->sav_count) == 0);
3810fa94a07fSbrendan 		for (i = 0; i < sav->sav_count; i++)
3811fa94a07fSbrendan 			nvlist_free(list[i]);
3812fa94a07fSbrendan 		kmem_free(list, sav->sav_count * sizeof (void *));
381399653d4eSeschrock 	}
381499653d4eSeschrock 
3815fa94a07fSbrendan 	spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
381606eeb2adSek 	nvlist_free(nvroot);
381799653d4eSeschrock 
3818fa94a07fSbrendan 	sav->sav_sync = B_FALSE;
381999653d4eSeschrock }
382099653d4eSeschrock 
382199653d4eSeschrock static void
382299653d4eSeschrock spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
382399653d4eSeschrock {
382499653d4eSeschrock 	nvlist_t *config;
382599653d4eSeschrock 
3826e14bb325SJeff Bonwick 	if (list_is_empty(&spa->spa_config_dirty_list))
382799653d4eSeschrock 		return;
382899653d4eSeschrock 
3829e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
3830e14bb325SJeff Bonwick 
3831e14bb325SJeff Bonwick 	config = spa_config_generate(spa, spa->spa_root_vdev,
3832e14bb325SJeff Bonwick 	    dmu_tx_get_txg(tx), B_FALSE);
3833e14bb325SJeff Bonwick 
3834e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_STATE, FTAG);
383599653d4eSeschrock 
383699653d4eSeschrock 	if (spa->spa_config_syncing)
383799653d4eSeschrock 		nvlist_free(spa->spa_config_syncing);
383899653d4eSeschrock 	spa->spa_config_syncing = config;
383999653d4eSeschrock 
384099653d4eSeschrock 	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
384199653d4eSeschrock }
384299653d4eSeschrock 
3843990b4856Slling /*
3844990b4856Slling  * Set zpool properties.
3845990b4856Slling  */
3846b1b8ab34Slling static void
3847ecd6cf80Smarks spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
3848b1b8ab34Slling {
3849b1b8ab34Slling 	spa_t *spa = arg1;
3850b1b8ab34Slling 	objset_t *mos = spa->spa_meta_objset;
3851990b4856Slling 	nvlist_t *nvp = arg2;
3852990b4856Slling 	nvpair_t *elem;
38533d7072f8Seschrock 	uint64_t intval;
3854c5904d13Seschrock 	char *strval;
3855990b4856Slling 	zpool_prop_t prop;
3856990b4856Slling 	const char *propname;
3857990b4856Slling 	zprop_type_t proptype;
3858b1b8ab34Slling 
3859e14bb325SJeff Bonwick 	mutex_enter(&spa->spa_props_lock);
3860e14bb325SJeff Bonwick 
3861990b4856Slling 	elem = NULL;
3862990b4856Slling 	while ((elem = nvlist_next_nvpair(nvp, elem))) {
3863990b4856Slling 		switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
3864990b4856Slling 		case ZPOOL_PROP_VERSION:
3865990b4856Slling 			/*
3866990b4856Slling 			 * Only set version for non-zpool-creation cases
3867990b4856Slling 			 * (set/import). spa_create() needs special care
3868990b4856Slling 			 * for version setting.
3869990b4856Slling 			 */
3870990b4856Slling 			if (tx->tx_txg != TXG_INITIAL) {
3871990b4856Slling 				VERIFY(nvpair_value_uint64(elem,
3872990b4856Slling 				    &intval) == 0);
3873990b4856Slling 				ASSERT(intval <= SPA_VERSION);
3874990b4856Slling 				ASSERT(intval >= spa_version(spa));
3875990b4856Slling 				spa->spa_uberblock.ub_version = intval;
3876990b4856Slling 				vdev_config_dirty(spa->spa_root_vdev);
3877990b4856Slling 			}
3878ecd6cf80Smarks 			break;
3879990b4856Slling 
3880990b4856Slling 		case ZPOOL_PROP_ALTROOT:
3881990b4856Slling 			/*
3882990b4856Slling 			 * 'altroot' is a non-persistent property. It should
3883990b4856Slling 			 * have been set temporarily at creation or import time.
3884990b4856Slling 			 */
3885990b4856Slling 			ASSERT(spa->spa_root != NULL);
3886b1b8ab34Slling 			break;
38873d7072f8Seschrock 
38882f8aaab3Seschrock 		case ZPOOL_PROP_CACHEFILE:
3889990b4856Slling 			/*
3890379c004dSEric Schrock 			 * 'cachefile' is also a non-persisitent property.
3891990b4856Slling 			 */
38923d7072f8Seschrock 			break;
3893990b4856Slling 		default:
3894990b4856Slling 			/*
3895990b4856Slling 			 * Set pool property values in the poolprops mos object.
3896990b4856Slling 			 */
3897990b4856Slling 			if (spa->spa_pool_props_object == 0) {
3898990b4856Slling 				objset_t *mos = spa->spa_meta_objset;
3899990b4856Slling 
3900990b4856Slling 				VERIFY((spa->spa_pool_props_object =
3901990b4856Slling 				    zap_create(mos, DMU_OT_POOL_PROPS,
3902990b4856Slling 				    DMU_OT_NONE, 0, tx)) > 0);
3903990b4856Slling 
3904990b4856Slling 				VERIFY(zap_update(mos,
3905990b4856Slling 				    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
3906990b4856Slling 				    8, 1, &spa->spa_pool_props_object, tx)
3907990b4856Slling 				    == 0);
3908990b4856Slling 			}
3909990b4856Slling 
3910990b4856Slling 			/* normalize the property name */
3911990b4856Slling 			propname = zpool_prop_to_name(prop);
3912990b4856Slling 			proptype = zpool_prop_get_type(prop);
3913990b4856Slling 
3914990b4856Slling 			if (nvpair_type(elem) == DATA_TYPE_STRING) {
3915990b4856Slling 				ASSERT(proptype == PROP_TYPE_STRING);
3916990b4856Slling 				VERIFY(nvpair_value_string(elem, &strval) == 0);
3917990b4856Slling 				VERIFY(zap_update(mos,
3918990b4856Slling 				    spa->spa_pool_props_object, propname,
3919990b4856Slling 				    1, strlen(strval) + 1, strval, tx) == 0);
3920990b4856Slling 
3921990b4856Slling 			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
3922990b4856Slling 				VERIFY(nvpair_value_uint64(elem, &intval) == 0);
3923990b4856Slling 
3924990b4856Slling 				if (proptype == PROP_TYPE_INDEX) {
3925990b4856Slling 					const char *unused;
3926990b4856Slling 					VERIFY(zpool_prop_index_to_string(
3927990b4856Slling 					    prop, intval, &unused) == 0);
3928990b4856Slling 				}
3929990b4856Slling 				VERIFY(zap_update(mos,
3930990b4856Slling 				    spa->spa_pool_props_object, propname,
3931990b4856Slling 				    8, 1, &intval, tx) == 0);
3932990b4856Slling 			} else {
3933990b4856Slling 				ASSERT(0); /* not allowed */
3934990b4856Slling 			}
3935990b4856Slling 
39360a4e9518Sgw 			switch (prop) {
39370a4e9518Sgw 			case ZPOOL_PROP_DELEGATION:
3938990b4856Slling 				spa->spa_delegation = intval;
39390a4e9518Sgw 				break;
39400a4e9518Sgw 			case ZPOOL_PROP_BOOTFS:
3941990b4856Slling 				spa->spa_bootfs = intval;
39420a4e9518Sgw 				break;
39430a4e9518Sgw 			case ZPOOL_PROP_FAILUREMODE:
39440a4e9518Sgw 				spa->spa_failmode = intval;
39450a4e9518Sgw 				break;
39460a4e9518Sgw 			default:
39470a4e9518Sgw 				break;
39480a4e9518Sgw 			}
3949990b4856Slling 		}
3950990b4856Slling 
3951990b4856Slling 		/* log internal history if this is not a zpool create */
3952990b4856Slling 		if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY &&
3953990b4856Slling 		    tx->tx_txg != TXG_INITIAL) {
3954990b4856Slling 			spa_history_internal_log(LOG_POOL_PROPSET,
3955990b4856Slling 			    spa, tx, cr, "%s %lld %s",
3956e14bb325SJeff Bonwick 			    nvpair_name(elem), intval, spa_name(spa));
3957b1b8ab34Slling 		}
3958b1b8ab34Slling 	}
3959e14bb325SJeff Bonwick 
3960e14bb325SJeff Bonwick 	mutex_exit(&spa->spa_props_lock);
3961b1b8ab34Slling }
3962b1b8ab34Slling 
3963fa9e4066Sahrens /*
3964fa9e4066Sahrens  * Sync the specified transaction group.  New blocks may be dirtied as
3965fa9e4066Sahrens  * part of the process, so we iterate until it converges.
3966fa9e4066Sahrens  */
3967fa9e4066Sahrens void
3968fa9e4066Sahrens spa_sync(spa_t *spa, uint64_t txg)
3969fa9e4066Sahrens {
3970fa9e4066Sahrens 	dsl_pool_t *dp = spa->spa_dsl_pool;
3971fa9e4066Sahrens 	objset_t *mos = spa->spa_meta_objset;
3972fa9e4066Sahrens 	bplist_t *bpl = &spa->spa_sync_bplist;
39730373e76bSbonwick 	vdev_t *rvd = spa->spa_root_vdev;
3974fa9e4066Sahrens 	vdev_t *vd;
3975fa9e4066Sahrens 	dmu_tx_t *tx;
3976fa9e4066Sahrens 	int dirty_vdevs;
3977e14bb325SJeff Bonwick 	int error;
3978fa9e4066Sahrens 
3979fa9e4066Sahrens 	/*
3980fa9e4066Sahrens 	 * Lock out configuration changes.
3981fa9e4066Sahrens 	 */
3982e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3983fa9e4066Sahrens 
3984fa9e4066Sahrens 	spa->spa_syncing_txg = txg;
3985fa9e4066Sahrens 	spa->spa_sync_pass = 0;
3986fa9e4066Sahrens 
3987e14bb325SJeff Bonwick 	/*
3988e14bb325SJeff Bonwick 	 * If there are any pending vdev state changes, convert them
3989e14bb325SJeff Bonwick 	 * into config changes that go out with this transaction group.
3990e14bb325SJeff Bonwick 	 */
3991e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
39928ad4d6ddSJeff Bonwick 	while (list_head(&spa->spa_state_dirty_list) != NULL) {
39938ad4d6ddSJeff Bonwick 		/*
39948ad4d6ddSJeff Bonwick 		 * We need the write lock here because, for aux vdevs,
39958ad4d6ddSJeff Bonwick 		 * calling vdev_config_dirty() modifies sav_config.
39968ad4d6ddSJeff Bonwick 		 * This is ugly and will become unnecessary when we
39978ad4d6ddSJeff Bonwick 		 * eliminate the aux vdev wart by integrating all vdevs
39988ad4d6ddSJeff Bonwick 		 * into the root vdev tree.
39998ad4d6ddSJeff Bonwick 		 */
40008ad4d6ddSJeff Bonwick 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
40018ad4d6ddSJeff Bonwick 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
40028ad4d6ddSJeff Bonwick 		while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
40038ad4d6ddSJeff Bonwick 			vdev_state_clean(vd);
40048ad4d6ddSJeff Bonwick 			vdev_config_dirty(vd);
40058ad4d6ddSJeff Bonwick 		}
40068ad4d6ddSJeff Bonwick 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
40078ad4d6ddSJeff Bonwick 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
4008e14bb325SJeff Bonwick 	}
4009e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_STATE, FTAG);
4010e14bb325SJeff Bonwick 
4011ea8dc4b6Seschrock 	VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj));
4012fa9e4066Sahrens 
401399653d4eSeschrock 	tx = dmu_tx_create_assigned(dp, txg);
401499653d4eSeschrock 
401599653d4eSeschrock 	/*
4016e7437265Sahrens 	 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
401799653d4eSeschrock 	 * set spa_deflate if we have no raid-z vdevs.
401899653d4eSeschrock 	 */
4019e7437265Sahrens 	if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
4020e7437265Sahrens 	    spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
402199653d4eSeschrock 		int i;
402299653d4eSeschrock 
402399653d4eSeschrock 		for (i = 0; i < rvd->vdev_children; i++) {
402499653d4eSeschrock 			vd = rvd->vdev_child[i];
402599653d4eSeschrock 			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
402699653d4eSeschrock 				break;
402799653d4eSeschrock 		}
402899653d4eSeschrock 		if (i == rvd->vdev_children) {
402999653d4eSeschrock 			spa->spa_deflate = TRUE;
403099653d4eSeschrock 			VERIFY(0 == zap_add(spa->spa_meta_objset,
403199653d4eSeschrock 			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
403299653d4eSeschrock 			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
403399653d4eSeschrock 		}
403499653d4eSeschrock 	}
403599653d4eSeschrock 
4036088f3894Sahrens 	if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
4037088f3894Sahrens 	    spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
4038088f3894Sahrens 		dsl_pool_create_origin(dp, tx);
4039088f3894Sahrens 
4040088f3894Sahrens 		/* Keeping the origin open increases spa_minref */
4041088f3894Sahrens 		spa->spa_minref += 3;
4042088f3894Sahrens 	}
4043088f3894Sahrens 
4044088f3894Sahrens 	if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
4045088f3894Sahrens 	    spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
4046088f3894Sahrens 		dsl_pool_upgrade_clones(dp, tx);
4047088f3894Sahrens 	}
4048088f3894Sahrens 
4049fa9e4066Sahrens 	/*
4050fa9e4066Sahrens 	 * If anything has changed in this txg, push the deferred frees
4051fa9e4066Sahrens 	 * from the previous txg.  If not, leave them alone so that we
4052fa9e4066Sahrens 	 * don't generate work on an otherwise idle system.
4053fa9e4066Sahrens 	 */
4054fa9e4066Sahrens 	if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
40551615a317Sek 	    !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
40561615a317Sek 	    !txg_list_empty(&dp->dp_sync_tasks, txg))
4057fa9e4066Sahrens 		spa_sync_deferred_frees(spa, txg);
4058fa9e4066Sahrens 
4059fa9e4066Sahrens 	/*
4060fa9e4066Sahrens 	 * Iterate to convergence.
4061fa9e4066Sahrens 	 */
4062fa9e4066Sahrens 	do {
4063fa9e4066Sahrens 		spa->spa_sync_pass++;
4064fa9e4066Sahrens 
4065fa9e4066Sahrens 		spa_sync_config_object(spa, tx);
4066fa94a07fSbrendan 		spa_sync_aux_dev(spa, &spa->spa_spares, tx,
4067fa94a07fSbrendan 		    ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
4068fa94a07fSbrendan 		spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
4069fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
4070ea8dc4b6Seschrock 		spa_errlog_sync(spa, txg);
4071fa9e4066Sahrens 		dsl_pool_sync(dp, txg);
4072fa9e4066Sahrens 
4073fa9e4066Sahrens 		dirty_vdevs = 0;
4074fa9e4066Sahrens 		while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
4075fa9e4066Sahrens 			vdev_sync(vd, txg);
4076fa9e4066Sahrens 			dirty_vdevs++;
4077fa9e4066Sahrens 		}
4078fa9e4066Sahrens 
4079fa9e4066Sahrens 		bplist_sync(bpl, tx);
4080fa9e4066Sahrens 	} while (dirty_vdevs);
4081fa9e4066Sahrens 
4082fa9e4066Sahrens 	bplist_close(bpl);
4083fa9e4066Sahrens 
4084fa9e4066Sahrens 	dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass);
4085fa9e4066Sahrens 
4086fa9e4066Sahrens 	/*
4087fa9e4066Sahrens 	 * Rewrite the vdev configuration (which includes the uberblock)
4088fa9e4066Sahrens 	 * to commit the transaction group.
40890373e76bSbonwick 	 *
409017f17c2dSbonwick 	 * If there are no dirty vdevs, we sync the uberblock to a few
409117f17c2dSbonwick 	 * random top-level vdevs that are known to be visible in the
4092e14bb325SJeff Bonwick 	 * config cache (see spa_vdev_add() for a complete description).
4093e14bb325SJeff Bonwick 	 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
40940373e76bSbonwick 	 */
4095e14bb325SJeff Bonwick 	for (;;) {
4096e14bb325SJeff Bonwick 		/*
4097e14bb325SJeff Bonwick 		 * We hold SCL_STATE to prevent vdev open/close/etc.
4098e14bb325SJeff Bonwick 		 * while we're attempting to write the vdev labels.
4099e14bb325SJeff Bonwick 		 */
4100e14bb325SJeff Bonwick 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4101e14bb325SJeff Bonwick 
4102e14bb325SJeff Bonwick 		if (list_is_empty(&spa->spa_config_dirty_list)) {
4103e14bb325SJeff Bonwick 			vdev_t *svd[SPA_DVAS_PER_BP];
4104e14bb325SJeff Bonwick 			int svdcount = 0;
4105e14bb325SJeff Bonwick 			int children = rvd->vdev_children;
4106e14bb325SJeff Bonwick 			int c0 = spa_get_random(children);
4107e14bb325SJeff Bonwick 			int c;
4108e14bb325SJeff Bonwick 
4109e14bb325SJeff Bonwick 			for (c = 0; c < children; c++) {
4110e14bb325SJeff Bonwick 				vd = rvd->vdev_child[(c0 + c) % children];
4111e14bb325SJeff Bonwick 				if (vd->vdev_ms_array == 0 || vd->vdev_islog)
4112e14bb325SJeff Bonwick 					continue;
4113e14bb325SJeff Bonwick 				svd[svdcount++] = vd;
4114e14bb325SJeff Bonwick 				if (svdcount == SPA_DVAS_PER_BP)
4115e14bb325SJeff Bonwick 					break;
4116e14bb325SJeff Bonwick 			}
4117e14bb325SJeff Bonwick 			error = vdev_config_sync(svd, svdcount, txg);
4118e14bb325SJeff Bonwick 		} else {
4119e14bb325SJeff Bonwick 			error = vdev_config_sync(rvd->vdev_child,
4120e14bb325SJeff Bonwick 			    rvd->vdev_children, txg);
41210373e76bSbonwick 		}
4122e14bb325SJeff Bonwick 
4123e14bb325SJeff Bonwick 		spa_config_exit(spa, SCL_STATE, FTAG);
4124e14bb325SJeff Bonwick 
4125e14bb325SJeff Bonwick 		if (error == 0)
4126e14bb325SJeff Bonwick 			break;
4127e14bb325SJeff Bonwick 		zio_suspend(spa, NULL);
4128e14bb325SJeff Bonwick 		zio_resume_wait(spa);
41290373e76bSbonwick 	}
413099653d4eSeschrock 	dmu_tx_commit(tx);
413199653d4eSeschrock 
41320373e76bSbonwick 	/*
41330373e76bSbonwick 	 * Clear the dirty config list.
4134fa9e4066Sahrens 	 */
4135e14bb325SJeff Bonwick 	while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
41360373e76bSbonwick 		vdev_config_clean(vd);
41370373e76bSbonwick 
41380373e76bSbonwick 	/*
41390373e76bSbonwick 	 * Now that the new config has synced transactionally,
41400373e76bSbonwick 	 * let it become visible to the config cache.
41410373e76bSbonwick 	 */
41420373e76bSbonwick 	if (spa->spa_config_syncing != NULL) {
41430373e76bSbonwick 		spa_config_set(spa, spa->spa_config_syncing);
41440373e76bSbonwick 		spa->spa_config_txg = txg;
41450373e76bSbonwick 		spa->spa_config_syncing = NULL;
41460373e76bSbonwick 	}
4147fa9e4066Sahrens 
4148fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
4149fa9e4066Sahrens 
4150fa9e4066Sahrens 	/*
4151fa9e4066Sahrens 	 * Clean up the ZIL records for the synced txg.
4152fa9e4066Sahrens 	 */
4153fa9e4066Sahrens 	dsl_pool_zil_clean(dp);
4154fa9e4066Sahrens 
4155fa9e4066Sahrens 	/*
4156fa9e4066Sahrens 	 * Update usable space statistics.
4157fa9e4066Sahrens 	 */
4158fa9e4066Sahrens 	while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
4159fa9e4066Sahrens 		vdev_sync_done(vd, txg);
4160fa9e4066Sahrens 
4161fa9e4066Sahrens 	/*
4162fa9e4066Sahrens 	 * It had better be the case that we didn't dirty anything
416399653d4eSeschrock 	 * since vdev_config_sync().
4164fa9e4066Sahrens 	 */
4165fa9e4066Sahrens 	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
4166fa9e4066Sahrens 	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
4167fa9e4066Sahrens 	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
4168fa9e4066Sahrens 	ASSERT(bpl->bpl_queue == NULL);
4169fa9e4066Sahrens 
4170e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_CONFIG, FTAG);
4171ea8dc4b6Seschrock 
4172ea8dc4b6Seschrock 	/*
4173ea8dc4b6Seschrock 	 * If any async tasks have been requested, kick them off.
4174ea8dc4b6Seschrock 	 */
4175ea8dc4b6Seschrock 	spa_async_dispatch(spa);
4176fa9e4066Sahrens }
4177fa9e4066Sahrens 
4178fa9e4066Sahrens /*
4179fa9e4066Sahrens  * Sync all pools.  We don't want to hold the namespace lock across these
4180fa9e4066Sahrens  * operations, so we take a reference on the spa_t and drop the lock during the
4181fa9e4066Sahrens  * sync.
4182fa9e4066Sahrens  */
4183fa9e4066Sahrens void
4184fa9e4066Sahrens spa_sync_allpools(void)
4185fa9e4066Sahrens {
4186fa9e4066Sahrens 	spa_t *spa = NULL;
4187fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
4188fa9e4066Sahrens 	while ((spa = spa_next(spa)) != NULL) {
4189e14bb325SJeff Bonwick 		if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa))
4190fa9e4066Sahrens 			continue;
4191fa9e4066Sahrens 		spa_open_ref(spa, FTAG);
4192fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
4193fa9e4066Sahrens 		txg_wait_synced(spa_get_dsl(spa), 0);
4194fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
4195fa9e4066Sahrens 		spa_close(spa, FTAG);
4196fa9e4066Sahrens 	}
4197fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
4198fa9e4066Sahrens }
4199fa9e4066Sahrens 
4200fa9e4066Sahrens /*
4201fa9e4066Sahrens  * ==========================================================================
4202fa9e4066Sahrens  * Miscellaneous routines
4203fa9e4066Sahrens  * ==========================================================================
4204fa9e4066Sahrens  */
4205fa9e4066Sahrens 
4206fa9e4066Sahrens /*
4207fa9e4066Sahrens  * Remove all pools in the system.
4208fa9e4066Sahrens  */
4209fa9e4066Sahrens void
4210fa9e4066Sahrens spa_evict_all(void)
4211fa9e4066Sahrens {
4212fa9e4066Sahrens 	spa_t *spa;
4213fa9e4066Sahrens 
4214fa9e4066Sahrens 	/*
4215fa9e4066Sahrens 	 * Remove all cached state.  All pools should be closed now,
4216fa9e4066Sahrens 	 * so every spa in the AVL tree should be unreferenced.
4217fa9e4066Sahrens 	 */
4218fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
4219fa9e4066Sahrens 	while ((spa = spa_next(NULL)) != NULL) {
4220fa9e4066Sahrens 		/*
4221ea8dc4b6Seschrock 		 * Stop async tasks.  The async thread may need to detach
4222ea8dc4b6Seschrock 		 * a device that's been replaced, which requires grabbing
4223ea8dc4b6Seschrock 		 * spa_namespace_lock, so we must drop it here.
4224fa9e4066Sahrens 		 */
4225fa9e4066Sahrens 		spa_open_ref(spa, FTAG);
4226fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
4227ea8dc4b6Seschrock 		spa_async_suspend(spa);
4228fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
4229fa9e4066Sahrens 		spa_close(spa, FTAG);
4230fa9e4066Sahrens 
4231fa9e4066Sahrens 		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4232fa9e4066Sahrens 			spa_unload(spa);
4233fa9e4066Sahrens 			spa_deactivate(spa);
4234fa9e4066Sahrens 		}
4235fa9e4066Sahrens 		spa_remove(spa);
4236fa9e4066Sahrens 	}
4237fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
4238fa9e4066Sahrens }
4239ea8dc4b6Seschrock 
4240ea8dc4b6Seschrock vdev_t *
4241c5904d13Seschrock spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache)
4242ea8dc4b6Seschrock {
4243c5904d13Seschrock 	vdev_t *vd;
4244c5904d13Seschrock 	int i;
4245c5904d13Seschrock 
4246c5904d13Seschrock 	if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
4247c5904d13Seschrock 		return (vd);
4248c5904d13Seschrock 
4249c5904d13Seschrock 	if (l2cache) {
4250c5904d13Seschrock 		for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
4251c5904d13Seschrock 			vd = spa->spa_l2cache.sav_vdevs[i];
4252c5904d13Seschrock 			if (vd->vdev_guid == guid)
4253c5904d13Seschrock 				return (vd);
4254c5904d13Seschrock 		}
4255c5904d13Seschrock 	}
4256c5904d13Seschrock 
4257c5904d13Seschrock 	return (NULL);
4258ea8dc4b6Seschrock }
4259eaca9bbdSeschrock 
4260eaca9bbdSeschrock void
4261990b4856Slling spa_upgrade(spa_t *spa, uint64_t version)
4262eaca9bbdSeschrock {
4263e14bb325SJeff Bonwick 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4264eaca9bbdSeschrock 
4265eaca9bbdSeschrock 	/*
4266eaca9bbdSeschrock 	 * This should only be called for a non-faulted pool, and since a
4267eaca9bbdSeschrock 	 * future version would result in an unopenable pool, this shouldn't be
4268eaca9bbdSeschrock 	 * possible.
4269eaca9bbdSeschrock 	 */
4270e7437265Sahrens 	ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION);
4271990b4856Slling 	ASSERT(version >= spa->spa_uberblock.ub_version);
4272eaca9bbdSeschrock 
4273990b4856Slling 	spa->spa_uberblock.ub_version = version;
4274eaca9bbdSeschrock 	vdev_config_dirty(spa->spa_root_vdev);
4275eaca9bbdSeschrock 
4276e14bb325SJeff Bonwick 	spa_config_exit(spa, SCL_ALL, FTAG);
427799653d4eSeschrock 
427899653d4eSeschrock 	txg_wait_synced(spa_get_dsl(spa), 0);
427999653d4eSeschrock }
428099653d4eSeschrock 
428199653d4eSeschrock boolean_t
428299653d4eSeschrock spa_has_spare(spa_t *spa, uint64_t guid)
428399653d4eSeschrock {
428499653d4eSeschrock 	int i;
428539c23413Seschrock 	uint64_t spareguid;
4286fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_spares;
428799653d4eSeschrock 
4288fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
4289fa94a07fSbrendan 		if (sav->sav_vdevs[i]->vdev_guid == guid)
429099653d4eSeschrock 			return (B_TRUE);
429199653d4eSeschrock 
4292fa94a07fSbrendan 	for (i = 0; i < sav->sav_npending; i++) {
4293fa94a07fSbrendan 		if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
4294fa94a07fSbrendan 		    &spareguid) == 0 && spareguid == guid)
429539c23413Seschrock 			return (B_TRUE);
429639c23413Seschrock 	}
429739c23413Seschrock 
429899653d4eSeschrock 	return (B_FALSE);
4299eaca9bbdSeschrock }
4300b1b8ab34Slling 
430189a89ebfSlling /*
430289a89ebfSlling  * Check if a pool has an active shared spare device.
430389a89ebfSlling  * Note: reference count of an active spare is 2, as a spare and as a replace
430489a89ebfSlling  */
430589a89ebfSlling static boolean_t
430689a89ebfSlling spa_has_active_shared_spare(spa_t *spa)
430789a89ebfSlling {
430889a89ebfSlling 	int i, refcnt;
430989a89ebfSlling 	uint64_t pool;
431089a89ebfSlling 	spa_aux_vdev_t *sav = &spa->spa_spares;
431189a89ebfSlling 
431289a89ebfSlling 	for (i = 0; i < sav->sav_count; i++) {
431389a89ebfSlling 		if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
431489a89ebfSlling 		    &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
431589a89ebfSlling 		    refcnt > 2)
431689a89ebfSlling 			return (B_TRUE);
431789a89ebfSlling 	}
431889a89ebfSlling 
431989a89ebfSlling 	return (B_FALSE);
432089a89ebfSlling }
432189a89ebfSlling 
43223d7072f8Seschrock /*
43233d7072f8Seschrock  * Post a sysevent corresponding to the given event.  The 'name' must be one of
43243d7072f8Seschrock  * the event definitions in sys/sysevent/eventdefs.h.  The payload will be
43253d7072f8Seschrock  * filled in from the spa and (optionally) the vdev.  This doesn't do anything
43263d7072f8Seschrock  * in the userland libzpool, as we don't want consumers to misinterpret ztest
43273d7072f8Seschrock  * or zdb as real changes.
43283d7072f8Seschrock  */
43293d7072f8Seschrock void
43303d7072f8Seschrock spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
43313d7072f8Seschrock {
43323d7072f8Seschrock #ifdef _KERNEL
43333d7072f8Seschrock 	sysevent_t		*ev;
43343d7072f8Seschrock 	sysevent_attr_list_t	*attr = NULL;
43353d7072f8Seschrock 	sysevent_value_t	value;
43363d7072f8Seschrock 	sysevent_id_t		eid;
43373d7072f8Seschrock 
43383d7072f8Seschrock 	ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
43393d7072f8Seschrock 	    SE_SLEEP);
43403d7072f8Seschrock 
43413d7072f8Seschrock 	value.value_type = SE_DATA_TYPE_STRING;
43423d7072f8Seschrock 	value.value.sv_string = spa_name(spa);
43433d7072f8Seschrock 	if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
43443d7072f8Seschrock 		goto done;
43453d7072f8Seschrock 
43463d7072f8Seschrock 	value.value_type = SE_DATA_TYPE_UINT64;
43473d7072f8Seschrock 	value.value.sv_uint64 = spa_guid(spa);
43483d7072f8Seschrock 	if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
43493d7072f8Seschrock 		goto done;
43503d7072f8Seschrock 
43513d7072f8Seschrock 	if (vd) {
43523d7072f8Seschrock 		value.value_type = SE_DATA_TYPE_UINT64;
43533d7072f8Seschrock 		value.value.sv_uint64 = vd->vdev_guid;
43543d7072f8Seschrock 		if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
43553d7072f8Seschrock 		    SE_SLEEP) != 0)
43563d7072f8Seschrock 			goto done;
43573d7072f8Seschrock 
43583d7072f8Seschrock 		if (vd->vdev_path) {
43593d7072f8Seschrock 			value.value_type = SE_DATA_TYPE_STRING;
43603d7072f8Seschrock 			value.value.sv_string = vd->vdev_path;
43613d7072f8Seschrock 			if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
43623d7072f8Seschrock 			    &value, SE_SLEEP) != 0)
43633d7072f8Seschrock 				goto done;
43643d7072f8Seschrock 		}
43653d7072f8Seschrock 	}
43663d7072f8Seschrock 
4367b01c3b58Seschrock 	if (sysevent_attach_attributes(ev, attr) != 0)
4368b01c3b58Seschrock 		goto done;
4369b01c3b58Seschrock 	attr = NULL;
4370b01c3b58Seschrock 
43713d7072f8Seschrock 	(void) log_sysevent(ev, SE_SLEEP, &eid);
43723d7072f8Seschrock 
43733d7072f8Seschrock done:
43743d7072f8Seschrock 	if (attr)
43753d7072f8Seschrock 		sysevent_free_attr(attr);
43763d7072f8Seschrock 	sysevent_free(ev);
43773d7072f8Seschrock #endif
43783d7072f8Seschrock }
4379