xref: /illumos-gate/usr/src/uts/common/fs/zfs/spa.c (revision 745cd3c5)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
2199653d4eSeschrock 
22fa9e4066Sahrens /*
23b01c3b58Seschrock  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24fa9e4066Sahrens  * Use is subject to license terms.
25fa9e4066Sahrens  */
26fa9e4066Sahrens 
27fa9e4066Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
28fa9e4066Sahrens 
29fa9e4066Sahrens /*
30fa9e4066Sahrens  * This file contains all the routines used when modifying on-disk SPA state.
31fa9e4066Sahrens  * This includes opening, importing, destroying, exporting a pool, and syncing a
32fa9e4066Sahrens  * pool.
33fa9e4066Sahrens  */
34fa9e4066Sahrens 
35fa9e4066Sahrens #include <sys/zfs_context.h>
36ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h>
37fa9e4066Sahrens #include <sys/spa_impl.h>
38fa9e4066Sahrens #include <sys/zio.h>
39fa9e4066Sahrens #include <sys/zio_checksum.h>
40fa9e4066Sahrens #include <sys/zio_compress.h>
41fa9e4066Sahrens #include <sys/dmu.h>
42fa9e4066Sahrens #include <sys/dmu_tx.h>
43fa9e4066Sahrens #include <sys/zap.h>
44fa9e4066Sahrens #include <sys/zil.h>
45fa9e4066Sahrens #include <sys/vdev_impl.h>
46fa9e4066Sahrens #include <sys/metaslab.h>
47fa9e4066Sahrens #include <sys/uberblock_impl.h>
48fa9e4066Sahrens #include <sys/txg.h>
49fa9e4066Sahrens #include <sys/avl.h>
50fa9e4066Sahrens #include <sys/dmu_traverse.h>
51b1b8ab34Slling #include <sys/dmu_objset.h>
52fa9e4066Sahrens #include <sys/unique.h>
53fa9e4066Sahrens #include <sys/dsl_pool.h>
54b1b8ab34Slling #include <sys/dsl_dataset.h>
55fa9e4066Sahrens #include <sys/dsl_dir.h>
56fa9e4066Sahrens #include <sys/dsl_prop.h>
57b1b8ab34Slling #include <sys/dsl_synctask.h>
58fa9e4066Sahrens #include <sys/fs/zfs.h>
59fa94a07fSbrendan #include <sys/arc.h>
60fa9e4066Sahrens #include <sys/callb.h>
6195173954Sek #include <sys/systeminfo.h>
6295173954Sek #include <sys/sunddi.h>
63e7cbe64fSgw #include <sys/spa_boot.h>
64fa9e4066Sahrens 
65990b4856Slling #include "zfs_prop.h"
66b7b97454Sperrin #include "zfs_comutil.h"
67990b4856Slling 
68416e0cd8Sek int zio_taskq_threads = 8;
69416e0cd8Sek 
70990b4856Slling static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx);
71990b4856Slling 
72990b4856Slling /*
73990b4856Slling  * ==========================================================================
74990b4856Slling  * SPA properties routines
75990b4856Slling  * ==========================================================================
76990b4856Slling  */
77990b4856Slling 
78990b4856Slling /*
79990b4856Slling  * Add a (source=src, propname=propval) list to an nvlist.
80990b4856Slling  */
819d82f4f6Slling static void
82990b4856Slling spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
83990b4856Slling     uint64_t intval, zprop_source_t src)
84990b4856Slling {
85990b4856Slling 	const char *propname = zpool_prop_to_name(prop);
86990b4856Slling 	nvlist_t *propval;
87990b4856Slling 
889d82f4f6Slling 	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
899d82f4f6Slling 	VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
90990b4856Slling 
919d82f4f6Slling 	if (strval != NULL)
929d82f4f6Slling 		VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
939d82f4f6Slling 	else
949d82f4f6Slling 		VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
95990b4856Slling 
969d82f4f6Slling 	VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
97990b4856Slling 	nvlist_free(propval);
98990b4856Slling }
99990b4856Slling 
100990b4856Slling /*
101990b4856Slling  * Get property values from the spa configuration.
102990b4856Slling  */
1039d82f4f6Slling static void
104990b4856Slling spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
105990b4856Slling {
106990b4856Slling 	uint64_t size = spa_get_space(spa);
107990b4856Slling 	uint64_t used = spa_get_alloc(spa);
108990b4856Slling 	uint64_t cap, version;
109990b4856Slling 	zprop_source_t src = ZPROP_SRC_NONE;
110c5904d13Seschrock 	spa_config_dirent_t *dp;
111990b4856Slling 
112990b4856Slling 	/*
113990b4856Slling 	 * readonly properties
114990b4856Slling 	 */
1159d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa->spa_name, 0, src);
1169d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
1179d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src);
1189d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, size - used, src);
119990b4856Slling 
120990b4856Slling 	cap = (size == 0) ? 0 : (used * 100 / size);
1219d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
122990b4856Slling 
1239d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
1249d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
1259d82f4f6Slling 	    spa->spa_root_vdev->vdev_state, src);
126990b4856Slling 
127990b4856Slling 	/*
128990b4856Slling 	 * settable properties that are not stored in the pool property object.
129990b4856Slling 	 */
130990b4856Slling 	version = spa_version(spa);
131990b4856Slling 	if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
132990b4856Slling 		src = ZPROP_SRC_DEFAULT;
133990b4856Slling 	else
134990b4856Slling 		src = ZPROP_SRC_LOCAL;
1359d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
136990b4856Slling 
1379d82f4f6Slling 	if (spa->spa_root != NULL)
1389d82f4f6Slling 		spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
1399d82f4f6Slling 		    0, ZPROP_SRC_LOCAL);
140990b4856Slling 
141c5904d13Seschrock 	if ((dp = list_head(&spa->spa_config_list)) != NULL) {
142c5904d13Seschrock 		if (dp->scd_path == NULL) {
1439d82f4f6Slling 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
144c5904d13Seschrock 			    "none", 0, ZPROP_SRC_LOCAL);
145c5904d13Seschrock 		} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
1469d82f4f6Slling 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
147c5904d13Seschrock 			    dp->scd_path, 0, ZPROP_SRC_LOCAL);
1482f8aaab3Seschrock 		}
1492f8aaab3Seschrock 	}
150990b4856Slling }
151990b4856Slling 
152990b4856Slling /*
153990b4856Slling  * Get zpool property values.
154990b4856Slling  */
155990b4856Slling int
156990b4856Slling spa_prop_get(spa_t *spa, nvlist_t **nvp)
157990b4856Slling {
158990b4856Slling 	zap_cursor_t zc;
159990b4856Slling 	zap_attribute_t za;
160990b4856Slling 	objset_t *mos = spa->spa_meta_objset;
161990b4856Slling 	int err;
162990b4856Slling 
1639d82f4f6Slling 	VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
164990b4856Slling 
165990b4856Slling 	/*
166990b4856Slling 	 * Get properties from the spa config.
167990b4856Slling 	 */
1689d82f4f6Slling 	spa_prop_get_config(spa, nvp);
169990b4856Slling 
170990b4856Slling 	mutex_enter(&spa->spa_props_lock);
171990b4856Slling 	/* If no pool property object, no more prop to get. */
172990b4856Slling 	if (spa->spa_pool_props_object == 0) {
173990b4856Slling 		mutex_exit(&spa->spa_props_lock);
174990b4856Slling 		return (0);
175990b4856Slling 	}
176990b4856Slling 
177990b4856Slling 	/*
178990b4856Slling 	 * Get properties from the MOS pool property object.
179990b4856Slling 	 */
180990b4856Slling 	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
181990b4856Slling 	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
182990b4856Slling 	    zap_cursor_advance(&zc)) {
183990b4856Slling 		uint64_t intval = 0;
184990b4856Slling 		char *strval = NULL;
185990b4856Slling 		zprop_source_t src = ZPROP_SRC_DEFAULT;
186990b4856Slling 		zpool_prop_t prop;
187990b4856Slling 
188990b4856Slling 		if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
189990b4856Slling 			continue;
190990b4856Slling 
191990b4856Slling 		switch (za.za_integer_length) {
192990b4856Slling 		case 8:
193990b4856Slling 			/* integer property */
194990b4856Slling 			if (za.za_first_integer !=
195990b4856Slling 			    zpool_prop_default_numeric(prop))
196990b4856Slling 				src = ZPROP_SRC_LOCAL;
197990b4856Slling 
198990b4856Slling 			if (prop == ZPOOL_PROP_BOOTFS) {
199990b4856Slling 				dsl_pool_t *dp;
200990b4856Slling 				dsl_dataset_t *ds = NULL;
201990b4856Slling 
202990b4856Slling 				dp = spa_get_dsl(spa);
203990b4856Slling 				rw_enter(&dp->dp_config_rwlock, RW_READER);
204*745cd3c5Smaybee 				if (err = dsl_dataset_hold_obj(dp,
205*745cd3c5Smaybee 				    za.za_first_integer, FTAG, &ds)) {
206990b4856Slling 					rw_exit(&dp->dp_config_rwlock);
207990b4856Slling 					break;
208990b4856Slling 				}
209990b4856Slling 
210990b4856Slling 				strval = kmem_alloc(
211990b4856Slling 				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
212990b4856Slling 				    KM_SLEEP);
213990b4856Slling 				dsl_dataset_name(ds, strval);
214*745cd3c5Smaybee 				dsl_dataset_rele(ds, FTAG);
215990b4856Slling 				rw_exit(&dp->dp_config_rwlock);
216990b4856Slling 			} else {
217990b4856Slling 				strval = NULL;
218990b4856Slling 				intval = za.za_first_integer;
219990b4856Slling 			}
220990b4856Slling 
2219d82f4f6Slling 			spa_prop_add_list(*nvp, prop, strval, intval, src);
222990b4856Slling 
223990b4856Slling 			if (strval != NULL)
224990b4856Slling 				kmem_free(strval,
225990b4856Slling 				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
226990b4856Slling 
227990b4856Slling 			break;
228990b4856Slling 
229990b4856Slling 		case 1:
230990b4856Slling 			/* string property */
231990b4856Slling 			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
232990b4856Slling 			err = zap_lookup(mos, spa->spa_pool_props_object,
233990b4856Slling 			    za.za_name, 1, za.za_num_integers, strval);
234990b4856Slling 			if (err) {
235990b4856Slling 				kmem_free(strval, za.za_num_integers);
236990b4856Slling 				break;
237990b4856Slling 			}
2389d82f4f6Slling 			spa_prop_add_list(*nvp, prop, strval, 0, src);
239990b4856Slling 			kmem_free(strval, za.za_num_integers);
240990b4856Slling 			break;
241990b4856Slling 
242990b4856Slling 		default:
243990b4856Slling 			break;
244990b4856Slling 		}
245990b4856Slling 	}
246990b4856Slling 	zap_cursor_fini(&zc);
247990b4856Slling 	mutex_exit(&spa->spa_props_lock);
248990b4856Slling out:
249990b4856Slling 	if (err && err != ENOENT) {
250990b4856Slling 		nvlist_free(*nvp);
2519d82f4f6Slling 		*nvp = NULL;
252990b4856Slling 		return (err);
253990b4856Slling 	}
254990b4856Slling 
255990b4856Slling 	return (0);
256990b4856Slling }
257990b4856Slling 
258990b4856Slling /*
259990b4856Slling  * Validate the given pool properties nvlist and modify the list
260990b4856Slling  * for the property values to be set.
261990b4856Slling  */
262990b4856Slling static int
263990b4856Slling spa_prop_validate(spa_t *spa, nvlist_t *props)
264990b4856Slling {
265990b4856Slling 	nvpair_t *elem;
266990b4856Slling 	int error = 0, reset_bootfs = 0;
267990b4856Slling 	uint64_t objnum;
268990b4856Slling 
269990b4856Slling 	elem = NULL;
270990b4856Slling 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
271990b4856Slling 		zpool_prop_t prop;
272990b4856Slling 		char *propname, *strval;
273990b4856Slling 		uint64_t intval;
274990b4856Slling 		vdev_t *rvdev;
275990b4856Slling 		char *vdev_type;
276990b4856Slling 		objset_t *os;
2772f8aaab3Seschrock 		char *slash;
278990b4856Slling 
279990b4856Slling 		propname = nvpair_name(elem);
280990b4856Slling 
281990b4856Slling 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL)
282990b4856Slling 			return (EINVAL);
283990b4856Slling 
284990b4856Slling 		switch (prop) {
285990b4856Slling 		case ZPOOL_PROP_VERSION:
286990b4856Slling 			error = nvpair_value_uint64(elem, &intval);
287990b4856Slling 			if (!error &&
288990b4856Slling 			    (intval < spa_version(spa) || intval > SPA_VERSION))
289990b4856Slling 				error = EINVAL;
290990b4856Slling 			break;
291990b4856Slling 
292990b4856Slling 		case ZPOOL_PROP_DELEGATION:
293990b4856Slling 		case ZPOOL_PROP_AUTOREPLACE:
294990b4856Slling 			error = nvpair_value_uint64(elem, &intval);
295990b4856Slling 			if (!error && intval > 1)
296990b4856Slling 				error = EINVAL;
297990b4856Slling 			break;
298990b4856Slling 
299990b4856Slling 		case ZPOOL_PROP_BOOTFS:
300990b4856Slling 			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
301990b4856Slling 				error = ENOTSUP;
302990b4856Slling 				break;
303990b4856Slling 			}
304990b4856Slling 
305990b4856Slling 			/*
306990b4856Slling 			 * A bootable filesystem can not be on a RAIDZ pool
307990b4856Slling 			 * nor a striped pool with more than 1 device.
308990b4856Slling 			 */
309990b4856Slling 			rvdev = spa->spa_root_vdev;
310990b4856Slling 			vdev_type =
311990b4856Slling 			    rvdev->vdev_child[0]->vdev_ops->vdev_op_type;
312990b4856Slling 			if (rvdev->vdev_children > 1 ||
313990b4856Slling 			    strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 ||
314990b4856Slling 			    strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) {
315990b4856Slling 				error = ENOTSUP;
316990b4856Slling 				break;
317990b4856Slling 			}
318990b4856Slling 
319990b4856Slling 			reset_bootfs = 1;
320990b4856Slling 
321990b4856Slling 			error = nvpair_value_string(elem, &strval);
322990b4856Slling 
323990b4856Slling 			if (!error) {
324990b4856Slling 				if (strval == NULL || strval[0] == '\0') {
325990b4856Slling 					objnum = zpool_prop_default_numeric(
326990b4856Slling 					    ZPOOL_PROP_BOOTFS);
327990b4856Slling 					break;
328990b4856Slling 				}
329990b4856Slling 
330990b4856Slling 				if (error = dmu_objset_open(strval, DMU_OST_ZFS,
331*745cd3c5Smaybee 				    DS_MODE_USER | DS_MODE_READONLY, &os))
332990b4856Slling 					break;
333990b4856Slling 				objnum = dmu_objset_id(os);
334990b4856Slling 				dmu_objset_close(os);
335990b4856Slling 			}
336990b4856Slling 			break;
3370a4e9518Sgw 		case ZPOOL_PROP_FAILUREMODE:
3380a4e9518Sgw 			error = nvpair_value_uint64(elem, &intval);
3390a4e9518Sgw 			if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
3400a4e9518Sgw 			    intval > ZIO_FAILURE_MODE_PANIC))
3410a4e9518Sgw 				error = EINVAL;
3420a4e9518Sgw 
3430a4e9518Sgw 			/*
3440a4e9518Sgw 			 * This is a special case which only occurs when
3450a4e9518Sgw 			 * the pool has completely failed. This allows
3460a4e9518Sgw 			 * the user to change the in-core failmode property
3470a4e9518Sgw 			 * without syncing it out to disk (I/Os might
3480a4e9518Sgw 			 * currently be blocked). We do this by returning
3490a4e9518Sgw 			 * EIO to the caller (spa_prop_set) to trick it
3500a4e9518Sgw 			 * into thinking we encountered a property validation
3510a4e9518Sgw 			 * error.
3520a4e9518Sgw 			 */
3530a4e9518Sgw 			if (!error && spa_state(spa) == POOL_STATE_IO_FAILURE) {
3540a4e9518Sgw 				spa->spa_failmode = intval;
3550a4e9518Sgw 				error = EIO;
3560a4e9518Sgw 			}
3570a4e9518Sgw 			break;
3582f8aaab3Seschrock 
3592f8aaab3Seschrock 		case ZPOOL_PROP_CACHEFILE:
3602f8aaab3Seschrock 			if ((error = nvpair_value_string(elem, &strval)) != 0)
3612f8aaab3Seschrock 				break;
3622f8aaab3Seschrock 
3632f8aaab3Seschrock 			if (strval[0] == '\0')
3642f8aaab3Seschrock 				break;
3652f8aaab3Seschrock 
3662f8aaab3Seschrock 			if (strcmp(strval, "none") == 0)
3672f8aaab3Seschrock 				break;
3682f8aaab3Seschrock 
3692f8aaab3Seschrock 			if (strval[0] != '/') {
3702f8aaab3Seschrock 				error = EINVAL;
3712f8aaab3Seschrock 				break;
3722f8aaab3Seschrock 			}
3732f8aaab3Seschrock 
3742f8aaab3Seschrock 			slash = strrchr(strval, '/');
3752f8aaab3Seschrock 			ASSERT(slash != NULL);
3762f8aaab3Seschrock 
3772f8aaab3Seschrock 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
3782f8aaab3Seschrock 			    strcmp(slash, "/..") == 0)
3792f8aaab3Seschrock 				error = EINVAL;
3802f8aaab3Seschrock 			break;
381990b4856Slling 		}
382990b4856Slling 
383990b4856Slling 		if (error)
384990b4856Slling 			break;
385990b4856Slling 	}
386990b4856Slling 
387990b4856Slling 	if (!error && reset_bootfs) {
388990b4856Slling 		error = nvlist_remove(props,
389990b4856Slling 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
390990b4856Slling 
391990b4856Slling 		if (!error) {
392990b4856Slling 			error = nvlist_add_uint64(props,
393990b4856Slling 			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
394990b4856Slling 		}
395990b4856Slling 	}
396990b4856Slling 
397990b4856Slling 	return (error);
398990b4856Slling }
399990b4856Slling 
400990b4856Slling int
401990b4856Slling spa_prop_set(spa_t *spa, nvlist_t *nvp)
402990b4856Slling {
403990b4856Slling 	int error;
404990b4856Slling 
405990b4856Slling 	if ((error = spa_prop_validate(spa, nvp)) != 0)
406990b4856Slling 		return (error);
407990b4856Slling 
408990b4856Slling 	return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
409990b4856Slling 	    spa, nvp, 3));
410990b4856Slling }
411990b4856Slling 
412990b4856Slling /*
413990b4856Slling  * If the bootfs property value is dsobj, clear it.
414990b4856Slling  */
415990b4856Slling void
416990b4856Slling spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
417990b4856Slling {
418990b4856Slling 	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
419990b4856Slling 		VERIFY(zap_remove(spa->spa_meta_objset,
420990b4856Slling 		    spa->spa_pool_props_object,
421990b4856Slling 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
422990b4856Slling 		spa->spa_bootfs = 0;
423990b4856Slling 	}
424990b4856Slling }
425990b4856Slling 
426fa9e4066Sahrens /*
427fa9e4066Sahrens  * ==========================================================================
428fa9e4066Sahrens  * SPA state manipulation (open/create/destroy/import/export)
429fa9e4066Sahrens  * ==========================================================================
430fa9e4066Sahrens  */
431fa9e4066Sahrens 
432ea8dc4b6Seschrock static int
433ea8dc4b6Seschrock spa_error_entry_compare(const void *a, const void *b)
434ea8dc4b6Seschrock {
435ea8dc4b6Seschrock 	spa_error_entry_t *sa = (spa_error_entry_t *)a;
436ea8dc4b6Seschrock 	spa_error_entry_t *sb = (spa_error_entry_t *)b;
437ea8dc4b6Seschrock 	int ret;
438ea8dc4b6Seschrock 
439ea8dc4b6Seschrock 	ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
440ea8dc4b6Seschrock 	    sizeof (zbookmark_t));
441ea8dc4b6Seschrock 
442ea8dc4b6Seschrock 	if (ret < 0)
443ea8dc4b6Seschrock 		return (-1);
444ea8dc4b6Seschrock 	else if (ret > 0)
445ea8dc4b6Seschrock 		return (1);
446ea8dc4b6Seschrock 	else
447ea8dc4b6Seschrock 		return (0);
448ea8dc4b6Seschrock }
449ea8dc4b6Seschrock 
450ea8dc4b6Seschrock /*
451ea8dc4b6Seschrock  * Utility function which retrieves copies of the current logs and
452ea8dc4b6Seschrock  * re-initializes them in the process.
453ea8dc4b6Seschrock  */
454ea8dc4b6Seschrock void
455ea8dc4b6Seschrock spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
456ea8dc4b6Seschrock {
457ea8dc4b6Seschrock 	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
458ea8dc4b6Seschrock 
459ea8dc4b6Seschrock 	bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
460ea8dc4b6Seschrock 	bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
461ea8dc4b6Seschrock 
462ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_scrub,
463ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
464ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
465ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_last,
466ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
467ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
468ea8dc4b6Seschrock }
469ea8dc4b6Seschrock 
470fa9e4066Sahrens /*
471fa9e4066Sahrens  * Activate an uninitialized pool.
472fa9e4066Sahrens  */
473fa9e4066Sahrens static void
474fa9e4066Sahrens spa_activate(spa_t *spa)
475fa9e4066Sahrens {
476fa9e4066Sahrens 	int t;
477fa9e4066Sahrens 
478fa9e4066Sahrens 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
479fa9e4066Sahrens 
480fa9e4066Sahrens 	spa->spa_state = POOL_STATE_ACTIVE;
481fa9e4066Sahrens 
482fa9e4066Sahrens 	spa->spa_normal_class = metaslab_class_create();
4838654d025Sperrin 	spa->spa_log_class = metaslab_class_create();
484fa9e4066Sahrens 
485fa9e4066Sahrens 	for (t = 0; t < ZIO_TYPES; t++) {
486fa9e4066Sahrens 		spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue",
487416e0cd8Sek 		    zio_taskq_threads, maxclsyspri, 50, INT_MAX,
488fa9e4066Sahrens 		    TASKQ_PREPOPULATE);
489fa9e4066Sahrens 		spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr",
490416e0cd8Sek 		    zio_taskq_threads, maxclsyspri, 50, INT_MAX,
491fa9e4066Sahrens 		    TASKQ_PREPOPULATE);
492fa9e4066Sahrens 	}
493fa9e4066Sahrens 
494fa9e4066Sahrens 	list_create(&spa->spa_dirty_list, sizeof (vdev_t),
495fa9e4066Sahrens 	    offsetof(vdev_t, vdev_dirty_node));
4960a4e9518Sgw 	list_create(&spa->spa_zio_list, sizeof (zio_t),
4970a4e9518Sgw 	    offsetof(zio_t, zio_link_node));
498fa9e4066Sahrens 
499fa9e4066Sahrens 	txg_list_create(&spa->spa_vdev_txg_list,
500fa9e4066Sahrens 	    offsetof(struct vdev, vdev_txg_node));
501ea8dc4b6Seschrock 
502ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_scrub,
503ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
504ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
505ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_last,
506ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
507ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
508fa9e4066Sahrens }
509fa9e4066Sahrens 
510fa9e4066Sahrens /*
511fa9e4066Sahrens  * Opposite of spa_activate().
512fa9e4066Sahrens  */
513fa9e4066Sahrens static void
514fa9e4066Sahrens spa_deactivate(spa_t *spa)
515fa9e4066Sahrens {
516fa9e4066Sahrens 	int t;
517fa9e4066Sahrens 
518fa9e4066Sahrens 	ASSERT(spa->spa_sync_on == B_FALSE);
519fa9e4066Sahrens 	ASSERT(spa->spa_dsl_pool == NULL);
520fa9e4066Sahrens 	ASSERT(spa->spa_root_vdev == NULL);
521fa9e4066Sahrens 
522fa9e4066Sahrens 	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
523fa9e4066Sahrens 
524fa9e4066Sahrens 	txg_list_destroy(&spa->spa_vdev_txg_list);
525fa9e4066Sahrens 
526fa9e4066Sahrens 	list_destroy(&spa->spa_dirty_list);
5270a4e9518Sgw 	list_destroy(&spa->spa_zio_list);
528fa9e4066Sahrens 
529fa9e4066Sahrens 	for (t = 0; t < ZIO_TYPES; t++) {
530fa9e4066Sahrens 		taskq_destroy(spa->spa_zio_issue_taskq[t]);
531fa9e4066Sahrens 		taskq_destroy(spa->spa_zio_intr_taskq[t]);
532fa9e4066Sahrens 		spa->spa_zio_issue_taskq[t] = NULL;
533fa9e4066Sahrens 		spa->spa_zio_intr_taskq[t] = NULL;
534fa9e4066Sahrens 	}
535fa9e4066Sahrens 
536fa9e4066Sahrens 	metaslab_class_destroy(spa->spa_normal_class);
537fa9e4066Sahrens 	spa->spa_normal_class = NULL;
538fa9e4066Sahrens 
5398654d025Sperrin 	metaslab_class_destroy(spa->spa_log_class);
5408654d025Sperrin 	spa->spa_log_class = NULL;
5418654d025Sperrin 
542ea8dc4b6Seschrock 	/*
543ea8dc4b6Seschrock 	 * If this was part of an import or the open otherwise failed, we may
544ea8dc4b6Seschrock 	 * still have errors left in the queues.  Empty them just in case.
545ea8dc4b6Seschrock 	 */
546ea8dc4b6Seschrock 	spa_errlog_drain(spa);
547ea8dc4b6Seschrock 
548ea8dc4b6Seschrock 	avl_destroy(&spa->spa_errlist_scrub);
549ea8dc4b6Seschrock 	avl_destroy(&spa->spa_errlist_last);
550ea8dc4b6Seschrock 
551fa9e4066Sahrens 	spa->spa_state = POOL_STATE_UNINITIALIZED;
552fa9e4066Sahrens }
553fa9e4066Sahrens 
554fa9e4066Sahrens /*
555fa9e4066Sahrens  * Verify a pool configuration, and construct the vdev tree appropriately.  This
556fa9e4066Sahrens  * will create all the necessary vdevs in the appropriate layout, with each vdev
557fa9e4066Sahrens  * in the CLOSED state.  This will prep the pool before open/creation/import.
558fa9e4066Sahrens  * All vdev validation is done by the vdev_alloc() routine.
559fa9e4066Sahrens  */
56099653d4eSeschrock static int
56199653d4eSeschrock spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
56299653d4eSeschrock     uint_t id, int atype)
563fa9e4066Sahrens {
564fa9e4066Sahrens 	nvlist_t **child;
565fa9e4066Sahrens 	uint_t c, children;
56699653d4eSeschrock 	int error;
567fa9e4066Sahrens 
56899653d4eSeschrock 	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
56999653d4eSeschrock 		return (error);
570fa9e4066Sahrens 
57199653d4eSeschrock 	if ((*vdp)->vdev_ops->vdev_op_leaf)
57299653d4eSeschrock 		return (0);
573fa9e4066Sahrens 
574fa9e4066Sahrens 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
575fa9e4066Sahrens 	    &child, &children) != 0) {
57699653d4eSeschrock 		vdev_free(*vdp);
57799653d4eSeschrock 		*vdp = NULL;
57899653d4eSeschrock 		return (EINVAL);
579fa9e4066Sahrens 	}
580fa9e4066Sahrens 
581fa9e4066Sahrens 	for (c = 0; c < children; c++) {
58299653d4eSeschrock 		vdev_t *vd;
58399653d4eSeschrock 		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
58499653d4eSeschrock 		    atype)) != 0) {
58599653d4eSeschrock 			vdev_free(*vdp);
58699653d4eSeschrock 			*vdp = NULL;
58799653d4eSeschrock 			return (error);
588fa9e4066Sahrens 		}
589fa9e4066Sahrens 	}
590fa9e4066Sahrens 
59199653d4eSeschrock 	ASSERT(*vdp != NULL);
59299653d4eSeschrock 
59399653d4eSeschrock 	return (0);
594fa9e4066Sahrens }
595fa9e4066Sahrens 
596fa9e4066Sahrens /*
597fa9e4066Sahrens  * Opposite of spa_load().
598fa9e4066Sahrens  */
599fa9e4066Sahrens static void
600fa9e4066Sahrens spa_unload(spa_t *spa)
601fa9e4066Sahrens {
60299653d4eSeschrock 	int i;
60399653d4eSeschrock 
604ea8dc4b6Seschrock 	/*
605ea8dc4b6Seschrock 	 * Stop async tasks.
606ea8dc4b6Seschrock 	 */
607ea8dc4b6Seschrock 	spa_async_suspend(spa);
608ea8dc4b6Seschrock 
609fa9e4066Sahrens 	/*
610fa9e4066Sahrens 	 * Stop syncing.
611fa9e4066Sahrens 	 */
612fa9e4066Sahrens 	if (spa->spa_sync_on) {
613fa9e4066Sahrens 		txg_sync_stop(spa->spa_dsl_pool);
614fa9e4066Sahrens 		spa->spa_sync_on = B_FALSE;
615fa9e4066Sahrens 	}
616fa9e4066Sahrens 
617fa9e4066Sahrens 	/*
618fa9e4066Sahrens 	 * Wait for any outstanding prefetch I/O to complete.
619fa9e4066Sahrens 	 */
620ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
621ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
622fa9e4066Sahrens 
623fa94a07fSbrendan 	/*
624fa94a07fSbrendan 	 * Drop and purge level 2 cache
625fa94a07fSbrendan 	 */
626fa94a07fSbrendan 	spa_l2cache_drop(spa);
627fa94a07fSbrendan 
628fa9e4066Sahrens 	/*
629fa9e4066Sahrens 	 * Close the dsl pool.
630fa9e4066Sahrens 	 */
631fa9e4066Sahrens 	if (spa->spa_dsl_pool) {
632fa9e4066Sahrens 		dsl_pool_close(spa->spa_dsl_pool);
633fa9e4066Sahrens 		spa->spa_dsl_pool = NULL;
634fa9e4066Sahrens 	}
635fa9e4066Sahrens 
636fa9e4066Sahrens 	/*
637fa9e4066Sahrens 	 * Close all vdevs.
638fa9e4066Sahrens 	 */
6390e34b6a7Sbonwick 	if (spa->spa_root_vdev)
640fa9e4066Sahrens 		vdev_free(spa->spa_root_vdev);
6410e34b6a7Sbonwick 	ASSERT(spa->spa_root_vdev == NULL);
642ea8dc4b6Seschrock 
643fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
644fa94a07fSbrendan 		vdev_free(spa->spa_spares.sav_vdevs[i]);
645fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs) {
646fa94a07fSbrendan 		kmem_free(spa->spa_spares.sav_vdevs,
647fa94a07fSbrendan 		    spa->spa_spares.sav_count * sizeof (void *));
648fa94a07fSbrendan 		spa->spa_spares.sav_vdevs = NULL;
64999653d4eSeschrock 	}
650fa94a07fSbrendan 	if (spa->spa_spares.sav_config) {
651fa94a07fSbrendan 		nvlist_free(spa->spa_spares.sav_config);
652fa94a07fSbrendan 		spa->spa_spares.sav_config = NULL;
653fa94a07fSbrendan 	}
654fa94a07fSbrendan 
655fa94a07fSbrendan 	for (i = 0; i < spa->spa_l2cache.sav_count; i++)
656fa94a07fSbrendan 		vdev_free(spa->spa_l2cache.sav_vdevs[i]);
657fa94a07fSbrendan 	if (spa->spa_l2cache.sav_vdevs) {
658fa94a07fSbrendan 		kmem_free(spa->spa_l2cache.sav_vdevs,
659fa94a07fSbrendan 		    spa->spa_l2cache.sav_count * sizeof (void *));
660fa94a07fSbrendan 		spa->spa_l2cache.sav_vdevs = NULL;
661fa94a07fSbrendan 	}
662fa94a07fSbrendan 	if (spa->spa_l2cache.sav_config) {
663fa94a07fSbrendan 		nvlist_free(spa->spa_l2cache.sav_config);
664fa94a07fSbrendan 		spa->spa_l2cache.sav_config = NULL;
66599653d4eSeschrock 	}
66699653d4eSeschrock 
667ea8dc4b6Seschrock 	spa->spa_async_suspended = 0;
668fa9e4066Sahrens }
669fa9e4066Sahrens 
67099653d4eSeschrock /*
67199653d4eSeschrock  * Load (or re-load) the current list of vdevs describing the active spares for
67299653d4eSeschrock  * this pool.  When this is called, we have some form of basic information in
673fa94a07fSbrendan  * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
674fa94a07fSbrendan  * then re-generate a more complete list including status information.
67599653d4eSeschrock  */
67699653d4eSeschrock static void
67799653d4eSeschrock spa_load_spares(spa_t *spa)
67899653d4eSeschrock {
67999653d4eSeschrock 	nvlist_t **spares;
68099653d4eSeschrock 	uint_t nspares;
68199653d4eSeschrock 	int i;
68239c23413Seschrock 	vdev_t *vd, *tvd;
68399653d4eSeschrock 
68499653d4eSeschrock 	/*
68599653d4eSeschrock 	 * First, close and free any existing spare vdevs.
68699653d4eSeschrock 	 */
687fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
688fa94a07fSbrendan 		vd = spa->spa_spares.sav_vdevs[i];
68939c23413Seschrock 
69039c23413Seschrock 		/* Undo the call to spa_activate() below */
691c5904d13Seschrock 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
692c5904d13Seschrock 		    B_FALSE)) != NULL && tvd->vdev_isspare)
69339c23413Seschrock 			spa_spare_remove(tvd);
69439c23413Seschrock 		vdev_close(vd);
69539c23413Seschrock 		vdev_free(vd);
69699653d4eSeschrock 	}
69739c23413Seschrock 
698fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs)
699fa94a07fSbrendan 		kmem_free(spa->spa_spares.sav_vdevs,
700fa94a07fSbrendan 		    spa->spa_spares.sav_count * sizeof (void *));
70199653d4eSeschrock 
702fa94a07fSbrendan 	if (spa->spa_spares.sav_config == NULL)
70399653d4eSeschrock 		nspares = 0;
70499653d4eSeschrock 	else
705fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
70699653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
70799653d4eSeschrock 
708fa94a07fSbrendan 	spa->spa_spares.sav_count = (int)nspares;
709fa94a07fSbrendan 	spa->spa_spares.sav_vdevs = NULL;
71099653d4eSeschrock 
71199653d4eSeschrock 	if (nspares == 0)
71299653d4eSeschrock 		return;
71399653d4eSeschrock 
71499653d4eSeschrock 	/*
71599653d4eSeschrock 	 * Construct the array of vdevs, opening them to get status in the
71639c23413Seschrock 	 * process.   For each spare, there is potentially two different vdev_t
71739c23413Seschrock 	 * structures associated with it: one in the list of spares (used only
71839c23413Seschrock 	 * for basic validation purposes) and one in the active vdev
71939c23413Seschrock 	 * configuration (if it's spared in).  During this phase we open and
72039c23413Seschrock 	 * validate each vdev on the spare list.  If the vdev also exists in the
72139c23413Seschrock 	 * active configuration, then we also mark this vdev as an active spare.
72299653d4eSeschrock 	 */
723fa94a07fSbrendan 	spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
724fa94a07fSbrendan 	    KM_SLEEP);
725fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
72699653d4eSeschrock 		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
72799653d4eSeschrock 		    VDEV_ALLOC_SPARE) == 0);
72899653d4eSeschrock 		ASSERT(vd != NULL);
72999653d4eSeschrock 
730fa94a07fSbrendan 		spa->spa_spares.sav_vdevs[i] = vd;
73199653d4eSeschrock 
732c5904d13Seschrock 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
733c5904d13Seschrock 		    B_FALSE)) != NULL) {
73439c23413Seschrock 			if (!tvd->vdev_isspare)
73539c23413Seschrock 				spa_spare_add(tvd);
73639c23413Seschrock 
73739c23413Seschrock 			/*
73839c23413Seschrock 			 * We only mark the spare active if we were successfully
73939c23413Seschrock 			 * able to load the vdev.  Otherwise, importing a pool
74039c23413Seschrock 			 * with a bad active spare would result in strange
74139c23413Seschrock 			 * behavior, because multiple pool would think the spare
74239c23413Seschrock 			 * is actively in use.
74339c23413Seschrock 			 *
74439c23413Seschrock 			 * There is a vulnerability here to an equally bizarre
74539c23413Seschrock 			 * circumstance, where a dead active spare is later
74639c23413Seschrock 			 * brought back to life (onlined or otherwise).  Given
74739c23413Seschrock 			 * the rarity of this scenario, and the extra complexity
74839c23413Seschrock 			 * it adds, we ignore the possibility.
74939c23413Seschrock 			 */
75039c23413Seschrock 			if (!vdev_is_dead(tvd))
75139c23413Seschrock 				spa_spare_activate(tvd);
75239c23413Seschrock 		}
75339c23413Seschrock 
75499653d4eSeschrock 		if (vdev_open(vd) != 0)
75599653d4eSeschrock 			continue;
75699653d4eSeschrock 
75799653d4eSeschrock 		vd->vdev_top = vd;
758fa94a07fSbrendan 		if (vdev_validate_aux(vd) == 0)
759fa94a07fSbrendan 			spa_spare_add(vd);
76099653d4eSeschrock 	}
76199653d4eSeschrock 
76299653d4eSeschrock 	/*
76399653d4eSeschrock 	 * Recompute the stashed list of spares, with status information
76499653d4eSeschrock 	 * this time.
76599653d4eSeschrock 	 */
766fa94a07fSbrendan 	VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
76799653d4eSeschrock 	    DATA_TYPE_NVLIST_ARRAY) == 0);
76899653d4eSeschrock 
769fa94a07fSbrendan 	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
770fa94a07fSbrendan 	    KM_SLEEP);
771fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
772fa94a07fSbrendan 		spares[i] = vdev_config_generate(spa,
773fa94a07fSbrendan 		    spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE);
774fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
775fa94a07fSbrendan 	    ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
776fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
77799653d4eSeschrock 		nvlist_free(spares[i]);
778fa94a07fSbrendan 	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
779fa94a07fSbrendan }
780fa94a07fSbrendan 
781fa94a07fSbrendan /*
782fa94a07fSbrendan  * Load (or re-load) the current list of vdevs describing the active l2cache for
783fa94a07fSbrendan  * this pool.  When this is called, we have some form of basic information in
784fa94a07fSbrendan  * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
785fa94a07fSbrendan  * then re-generate a more complete list including status information.
786fa94a07fSbrendan  * Devices which are already active have their details maintained, and are
787fa94a07fSbrendan  * not re-opened.
788fa94a07fSbrendan  */
789fa94a07fSbrendan static void
790fa94a07fSbrendan spa_load_l2cache(spa_t *spa)
791fa94a07fSbrendan {
792fa94a07fSbrendan 	nvlist_t **l2cache;
793fa94a07fSbrendan 	uint_t nl2cache;
794fa94a07fSbrendan 	int i, j, oldnvdevs;
795c5904d13Seschrock 	uint64_t guid, size;
796fa94a07fSbrendan 	vdev_t *vd, **oldvdevs, **newvdevs;
797fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
798fa94a07fSbrendan 
799fa94a07fSbrendan 	if (sav->sav_config != NULL) {
800fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
801fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
802fa94a07fSbrendan 		newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
803fa94a07fSbrendan 	} else {
804fa94a07fSbrendan 		nl2cache = 0;
805fa94a07fSbrendan 	}
806fa94a07fSbrendan 
807fa94a07fSbrendan 	oldvdevs = sav->sav_vdevs;
808fa94a07fSbrendan 	oldnvdevs = sav->sav_count;
809fa94a07fSbrendan 	sav->sav_vdevs = NULL;
810fa94a07fSbrendan 	sav->sav_count = 0;
811fa94a07fSbrendan 
812fa94a07fSbrendan 	/*
813fa94a07fSbrendan 	 * Process new nvlist of vdevs.
814fa94a07fSbrendan 	 */
815fa94a07fSbrendan 	for (i = 0; i < nl2cache; i++) {
816fa94a07fSbrendan 		VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
817fa94a07fSbrendan 		    &guid) == 0);
818fa94a07fSbrendan 
819fa94a07fSbrendan 		newvdevs[i] = NULL;
820fa94a07fSbrendan 		for (j = 0; j < oldnvdevs; j++) {
821fa94a07fSbrendan 			vd = oldvdevs[j];
822fa94a07fSbrendan 			if (vd != NULL && guid == vd->vdev_guid) {
823fa94a07fSbrendan 				/*
824fa94a07fSbrendan 				 * Retain previous vdev for add/remove ops.
825fa94a07fSbrendan 				 */
826fa94a07fSbrendan 				newvdevs[i] = vd;
827fa94a07fSbrendan 				oldvdevs[j] = NULL;
828fa94a07fSbrendan 				break;
829fa94a07fSbrendan 			}
830fa94a07fSbrendan 		}
831fa94a07fSbrendan 
832fa94a07fSbrendan 		if (newvdevs[i] == NULL) {
833fa94a07fSbrendan 			/*
834fa94a07fSbrendan 			 * Create new vdev
835fa94a07fSbrendan 			 */
836fa94a07fSbrendan 			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
837fa94a07fSbrendan 			    VDEV_ALLOC_L2CACHE) == 0);
838fa94a07fSbrendan 			ASSERT(vd != NULL);
839fa94a07fSbrendan 			newvdevs[i] = vd;
840fa94a07fSbrendan 
841fa94a07fSbrendan 			/*
842fa94a07fSbrendan 			 * Commit this vdev as an l2cache device,
843fa94a07fSbrendan 			 * even if it fails to open.
844fa94a07fSbrendan 			 */
845fa94a07fSbrendan 			spa_l2cache_add(vd);
846fa94a07fSbrendan 
847c5904d13Seschrock 			vd->vdev_top = vd;
848c5904d13Seschrock 			vd->vdev_aux = sav;
849c5904d13Seschrock 
850c5904d13Seschrock 			spa_l2cache_activate(vd);
851c5904d13Seschrock 
852fa94a07fSbrendan 			if (vdev_open(vd) != 0)
853fa94a07fSbrendan 				continue;
854fa94a07fSbrendan 
855fa94a07fSbrendan 			(void) vdev_validate_aux(vd);
856fa94a07fSbrendan 
857fa94a07fSbrendan 			if (!vdev_is_dead(vd)) {
858fa94a07fSbrendan 				size = vdev_get_rsize(vd);
859c5904d13Seschrock 				l2arc_add_vdev(spa, vd,
860c5904d13Seschrock 				    VDEV_LABEL_START_SIZE,
861c5904d13Seschrock 				    size - VDEV_LABEL_START_SIZE);
862fa94a07fSbrendan 			}
863fa94a07fSbrendan 		}
864fa94a07fSbrendan 	}
865fa94a07fSbrendan 
866fa94a07fSbrendan 	/*
867fa94a07fSbrendan 	 * Purge vdevs that were dropped
868fa94a07fSbrendan 	 */
869fa94a07fSbrendan 	for (i = 0; i < oldnvdevs; i++) {
870fa94a07fSbrendan 		uint64_t pool;
871fa94a07fSbrendan 
872fa94a07fSbrendan 		vd = oldvdevs[i];
873fa94a07fSbrendan 		if (vd != NULL) {
874fa94a07fSbrendan 			if (spa_mode & FWRITE &&
875fa94a07fSbrendan 			    spa_l2cache_exists(vd->vdev_guid, &pool) &&
876c5904d13Seschrock 			    pool != 0ULL &&
877c5904d13Seschrock 			    l2arc_vdev_present(vd)) {
878fa94a07fSbrendan 				l2arc_remove_vdev(vd);
879fa94a07fSbrendan 			}
880fa94a07fSbrendan 			(void) vdev_close(vd);
881fa94a07fSbrendan 			spa_l2cache_remove(vd);
882fa94a07fSbrendan 		}
883fa94a07fSbrendan 	}
884fa94a07fSbrendan 
885fa94a07fSbrendan 	if (oldvdevs)
886fa94a07fSbrendan 		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
887fa94a07fSbrendan 
888fa94a07fSbrendan 	if (sav->sav_config == NULL)
889fa94a07fSbrendan 		goto out;
890fa94a07fSbrendan 
891fa94a07fSbrendan 	sav->sav_vdevs = newvdevs;
892fa94a07fSbrendan 	sav->sav_count = (int)nl2cache;
893fa94a07fSbrendan 
894fa94a07fSbrendan 	/*
895fa94a07fSbrendan 	 * Recompute the stashed list of l2cache devices, with status
896fa94a07fSbrendan 	 * information this time.
897fa94a07fSbrendan 	 */
898fa94a07fSbrendan 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
899fa94a07fSbrendan 	    DATA_TYPE_NVLIST_ARRAY) == 0);
900fa94a07fSbrendan 
901fa94a07fSbrendan 	l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
902fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
903fa94a07fSbrendan 		l2cache[i] = vdev_config_generate(spa,
904fa94a07fSbrendan 		    sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE);
905fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
906fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
907fa94a07fSbrendan out:
908fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
909fa94a07fSbrendan 		nvlist_free(l2cache[i]);
910fa94a07fSbrendan 	if (sav->sav_count)
911fa94a07fSbrendan 		kmem_free(l2cache, sav->sav_count * sizeof (void *));
91299653d4eSeschrock }
91399653d4eSeschrock 
91499653d4eSeschrock static int
91599653d4eSeschrock load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
91699653d4eSeschrock {
91799653d4eSeschrock 	dmu_buf_t *db;
91899653d4eSeschrock 	char *packed = NULL;
91999653d4eSeschrock 	size_t nvsize = 0;
92099653d4eSeschrock 	int error;
92199653d4eSeschrock 	*value = NULL;
92299653d4eSeschrock 
92399653d4eSeschrock 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
92499653d4eSeschrock 	nvsize = *(uint64_t *)db->db_data;
92599653d4eSeschrock 	dmu_buf_rele(db, FTAG);
92699653d4eSeschrock 
92799653d4eSeschrock 	packed = kmem_alloc(nvsize, KM_SLEEP);
92899653d4eSeschrock 	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed);
92999653d4eSeschrock 	if (error == 0)
93099653d4eSeschrock 		error = nvlist_unpack(packed, nvsize, value, 0);
93199653d4eSeschrock 	kmem_free(packed, nvsize);
93299653d4eSeschrock 
93399653d4eSeschrock 	return (error);
93499653d4eSeschrock }
93599653d4eSeschrock 
9363d7072f8Seschrock /*
9373d7072f8Seschrock  * Checks to see if the given vdev could not be opened, in which case we post a
9383d7072f8Seschrock  * sysevent to notify the autoreplace code that the device has been removed.
9393d7072f8Seschrock  */
9403d7072f8Seschrock static void
9413d7072f8Seschrock spa_check_removed(vdev_t *vd)
9423d7072f8Seschrock {
9433d7072f8Seschrock 	int c;
9443d7072f8Seschrock 
9453d7072f8Seschrock 	for (c = 0; c < vd->vdev_children; c++)
9463d7072f8Seschrock 		spa_check_removed(vd->vdev_child[c]);
9473d7072f8Seschrock 
9483d7072f8Seschrock 	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
9493d7072f8Seschrock 		zfs_post_autoreplace(vd->vdev_spa, vd);
9503d7072f8Seschrock 		spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
9513d7072f8Seschrock 	}
9523d7072f8Seschrock }
9533d7072f8Seschrock 
954fa9e4066Sahrens /*
955fa9e4066Sahrens  * Load an existing storage pool, using the pool's builtin spa_config as a
956ea8dc4b6Seschrock  * source of configuration information.
957fa9e4066Sahrens  */
958fa9e4066Sahrens static int
959ea8dc4b6Seschrock spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig)
960fa9e4066Sahrens {
961fa9e4066Sahrens 	int error = 0;
962fa9e4066Sahrens 	nvlist_t *nvroot = NULL;
963fa9e4066Sahrens 	vdev_t *rvd;
964fa9e4066Sahrens 	uberblock_t *ub = &spa->spa_uberblock;
9650373e76bSbonwick 	uint64_t config_cache_txg = spa->spa_config_txg;
966fa9e4066Sahrens 	uint64_t pool_guid;
96799653d4eSeschrock 	uint64_t version;
968fa9e4066Sahrens 	zio_t *zio;
9693d7072f8Seschrock 	uint64_t autoreplace = 0;
970fa9e4066Sahrens 
971ea8dc4b6Seschrock 	spa->spa_load_state = state;
9720373e76bSbonwick 
973fa9e4066Sahrens 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
974a9926bf0Sbonwick 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
975ea8dc4b6Seschrock 		error = EINVAL;
976ea8dc4b6Seschrock 		goto out;
977ea8dc4b6Seschrock 	}
978fa9e4066Sahrens 
97999653d4eSeschrock 	/*
98099653d4eSeschrock 	 * Versioning wasn't explicitly added to the label until later, so if
98199653d4eSeschrock 	 * it's not present treat it as the initial version.
98299653d4eSeschrock 	 */
98399653d4eSeschrock 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
984e7437265Sahrens 		version = SPA_VERSION_INITIAL;
98599653d4eSeschrock 
986a9926bf0Sbonwick 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
987a9926bf0Sbonwick 	    &spa->spa_config_txg);
988a9926bf0Sbonwick 
9890373e76bSbonwick 	if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
990ea8dc4b6Seschrock 	    spa_guid_exists(pool_guid, 0)) {
991ea8dc4b6Seschrock 		error = EEXIST;
992ea8dc4b6Seschrock 		goto out;
993ea8dc4b6Seschrock 	}
994fa9e4066Sahrens 
995b5989ec7Seschrock 	spa->spa_load_guid = pool_guid;
996b5989ec7Seschrock 
997fa9e4066Sahrens 	/*
99899653d4eSeschrock 	 * Parse the configuration into a vdev tree.  We explicitly set the
99999653d4eSeschrock 	 * value that will be returned by spa_version() since parsing the
100099653d4eSeschrock 	 * configuration requires knowing the version number.
1001fa9e4066Sahrens 	 */
1002ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
100399653d4eSeschrock 	spa->spa_ubsync.ub_version = version;
100499653d4eSeschrock 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
1005ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
1006fa9e4066Sahrens 
100799653d4eSeschrock 	if (error != 0)
1008ea8dc4b6Seschrock 		goto out;
1009fa9e4066Sahrens 
10100e34b6a7Sbonwick 	ASSERT(spa->spa_root_vdev == rvd);
1011fa9e4066Sahrens 	ASSERT(spa_guid(spa) == pool_guid);
1012fa9e4066Sahrens 
1013fa9e4066Sahrens 	/*
1014fa9e4066Sahrens 	 * Try to open all vdevs, loading each label in the process.
1015fa9e4066Sahrens 	 */
10160bf246f5Smc 	error = vdev_open(rvd);
10170bf246f5Smc 	if (error != 0)
1018ea8dc4b6Seschrock 		goto out;
1019fa9e4066Sahrens 
1020560e6e96Seschrock 	/*
1021560e6e96Seschrock 	 * Validate the labels for all leaf vdevs.  We need to grab the config
1022560e6e96Seschrock 	 * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD
1023560e6e96Seschrock 	 * flag.
1024560e6e96Seschrock 	 */
1025560e6e96Seschrock 	spa_config_enter(spa, RW_READER, FTAG);
1026560e6e96Seschrock 	error = vdev_validate(rvd);
1027560e6e96Seschrock 	spa_config_exit(spa, FTAG);
1028560e6e96Seschrock 
10290bf246f5Smc 	if (error != 0)
1030560e6e96Seschrock 		goto out;
1031560e6e96Seschrock 
1032560e6e96Seschrock 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1033560e6e96Seschrock 		error = ENXIO;
1034560e6e96Seschrock 		goto out;
1035560e6e96Seschrock 	}
1036560e6e96Seschrock 
1037fa9e4066Sahrens 	/*
1038fa9e4066Sahrens 	 * Find the best uberblock.
1039fa9e4066Sahrens 	 */
1040fa9e4066Sahrens 	bzero(ub, sizeof (uberblock_t));
1041fa9e4066Sahrens 
1042fa9e4066Sahrens 	zio = zio_root(spa, NULL, NULL,
1043fa9e4066Sahrens 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
1044fa9e4066Sahrens 	vdev_uberblock_load(zio, rvd, ub);
1045fa9e4066Sahrens 	error = zio_wait(zio);
1046fa9e4066Sahrens 
1047fa9e4066Sahrens 	/*
1048fa9e4066Sahrens 	 * If we weren't able to find a single valid uberblock, return failure.
1049fa9e4066Sahrens 	 */
1050fa9e4066Sahrens 	if (ub->ub_txg == 0) {
1051eaca9bbdSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1052eaca9bbdSeschrock 		    VDEV_AUX_CORRUPT_DATA);
1053ea8dc4b6Seschrock 		error = ENXIO;
1054ea8dc4b6Seschrock 		goto out;
1055ea8dc4b6Seschrock 	}
1056ea8dc4b6Seschrock 
1057ea8dc4b6Seschrock 	/*
1058ea8dc4b6Seschrock 	 * If the pool is newer than the code, we can't open it.
1059ea8dc4b6Seschrock 	 */
1060e7437265Sahrens 	if (ub->ub_version > SPA_VERSION) {
1061eaca9bbdSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1062eaca9bbdSeschrock 		    VDEV_AUX_VERSION_NEWER);
1063ea8dc4b6Seschrock 		error = ENOTSUP;
1064ea8dc4b6Seschrock 		goto out;
1065fa9e4066Sahrens 	}
1066fa9e4066Sahrens 
1067fa9e4066Sahrens 	/*
1068fa9e4066Sahrens 	 * If the vdev guid sum doesn't match the uberblock, we have an
1069fa9e4066Sahrens 	 * incomplete configuration.
1070fa9e4066Sahrens 	 */
1071ecc2d604Sbonwick 	if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
1072ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1073ea8dc4b6Seschrock 		    VDEV_AUX_BAD_GUID_SUM);
1074ea8dc4b6Seschrock 		error = ENXIO;
1075ea8dc4b6Seschrock 		goto out;
1076fa9e4066Sahrens 	}
1077fa9e4066Sahrens 
1078fa9e4066Sahrens 	/*
1079fa9e4066Sahrens 	 * Initialize internal SPA structures.
1080fa9e4066Sahrens 	 */
1081fa9e4066Sahrens 	spa->spa_state = POOL_STATE_ACTIVE;
1082fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
1083fa9e4066Sahrens 	spa->spa_first_txg = spa_last_synced_txg(spa) + 1;
1084ea8dc4b6Seschrock 	error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
1085ea8dc4b6Seschrock 	if (error) {
1086ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1087ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1088ea8dc4b6Seschrock 		goto out;
1089ea8dc4b6Seschrock 	}
1090fa9e4066Sahrens 	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
1091fa9e4066Sahrens 
1092ea8dc4b6Seschrock 	if (zap_lookup(spa->spa_meta_objset,
1093fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1094ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
1095ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1096ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1097ea8dc4b6Seschrock 		error = EIO;
1098ea8dc4b6Seschrock 		goto out;
1099ea8dc4b6Seschrock 	}
1100fa9e4066Sahrens 
1101fa9e4066Sahrens 	if (!mosconfig) {
110299653d4eSeschrock 		nvlist_t *newconfig;
110395173954Sek 		uint64_t hostid;
1104fa9e4066Sahrens 
110599653d4eSeschrock 		if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) {
1106ea8dc4b6Seschrock 			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1107ea8dc4b6Seschrock 			    VDEV_AUX_CORRUPT_DATA);
1108ea8dc4b6Seschrock 			error = EIO;
1109ea8dc4b6Seschrock 			goto out;
1110ea8dc4b6Seschrock 		}
1111fa9e4066Sahrens 
111295173954Sek 		if (nvlist_lookup_uint64(newconfig, ZPOOL_CONFIG_HOSTID,
111395173954Sek 		    &hostid) == 0) {
111495173954Sek 			char *hostname;
111595173954Sek 			unsigned long myhostid = 0;
111695173954Sek 
111795173954Sek 			VERIFY(nvlist_lookup_string(newconfig,
111895173954Sek 			    ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
111995173954Sek 
112095173954Sek 			(void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
112117194a52Slling 			if (hostid != 0 && myhostid != 0 &&
112217194a52Slling 			    (unsigned long)hostid != myhostid) {
112395173954Sek 				cmn_err(CE_WARN, "pool '%s' could not be "
112495173954Sek 				    "loaded as it was last accessed by "
112595173954Sek 				    "another system (host: %s hostid: 0x%lx).  "
112695173954Sek 				    "See: http://www.sun.com/msg/ZFS-8000-EY",
112795173954Sek 				    spa->spa_name, hostname,
112895173954Sek 				    (unsigned long)hostid);
112995173954Sek 				error = EBADF;
113095173954Sek 				goto out;
113195173954Sek 			}
113295173954Sek 		}
113395173954Sek 
1134fa9e4066Sahrens 		spa_config_set(spa, newconfig);
1135fa9e4066Sahrens 		spa_unload(spa);
1136fa9e4066Sahrens 		spa_deactivate(spa);
1137fa9e4066Sahrens 		spa_activate(spa);
1138fa9e4066Sahrens 
1139ea8dc4b6Seschrock 		return (spa_load(spa, newconfig, state, B_TRUE));
1140fa9e4066Sahrens 	}
1141fa9e4066Sahrens 
1142ea8dc4b6Seschrock 	if (zap_lookup(spa->spa_meta_objset,
1143fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1144ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) {
1145ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1146ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1147ea8dc4b6Seschrock 		error = EIO;
1148ea8dc4b6Seschrock 		goto out;
1149ea8dc4b6Seschrock 	}
1150fa9e4066Sahrens 
115199653d4eSeschrock 	/*
115299653d4eSeschrock 	 * Load the bit that tells us to use the new accounting function
115399653d4eSeschrock 	 * (raid-z deflation).  If we have an older pool, this will not
115499653d4eSeschrock 	 * be present.
115599653d4eSeschrock 	 */
115699653d4eSeschrock 	error = zap_lookup(spa->spa_meta_objset,
115799653d4eSeschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
115899653d4eSeschrock 	    sizeof (uint64_t), 1, &spa->spa_deflate);
115999653d4eSeschrock 	if (error != 0 && error != ENOENT) {
116099653d4eSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
116199653d4eSeschrock 		    VDEV_AUX_CORRUPT_DATA);
116299653d4eSeschrock 		error = EIO;
116399653d4eSeschrock 		goto out;
116499653d4eSeschrock 	}
116599653d4eSeschrock 
1166fa9e4066Sahrens 	/*
1167ea8dc4b6Seschrock 	 * Load the persistent error log.  If we have an older pool, this will
1168ea8dc4b6Seschrock 	 * not be present.
1169fa9e4066Sahrens 	 */
1170ea8dc4b6Seschrock 	error = zap_lookup(spa->spa_meta_objset,
1171ea8dc4b6Seschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
1172ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_errlog_last);
1173d80c45e0Sbonwick 	if (error != 0 && error != ENOENT) {
1174ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1175ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1176ea8dc4b6Seschrock 		error = EIO;
1177ea8dc4b6Seschrock 		goto out;
1178ea8dc4b6Seschrock 	}
1179ea8dc4b6Seschrock 
1180ea8dc4b6Seschrock 	error = zap_lookup(spa->spa_meta_objset,
1181ea8dc4b6Seschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
1182ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
1183ea8dc4b6Seschrock 	if (error != 0 && error != ENOENT) {
1184ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1185ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1186ea8dc4b6Seschrock 		error = EIO;
1187ea8dc4b6Seschrock 		goto out;
1188ea8dc4b6Seschrock 	}
1189ea8dc4b6Seschrock 
119006eeb2adSek 	/*
119106eeb2adSek 	 * Load the history object.  If we have an older pool, this
119206eeb2adSek 	 * will not be present.
119306eeb2adSek 	 */
119406eeb2adSek 	error = zap_lookup(spa->spa_meta_objset,
119506eeb2adSek 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
119606eeb2adSek 	    sizeof (uint64_t), 1, &spa->spa_history);
119706eeb2adSek 	if (error != 0 && error != ENOENT) {
119806eeb2adSek 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
119906eeb2adSek 		    VDEV_AUX_CORRUPT_DATA);
120006eeb2adSek 		error = EIO;
120106eeb2adSek 		goto out;
120206eeb2adSek 	}
120306eeb2adSek 
120499653d4eSeschrock 	/*
120599653d4eSeschrock 	 * Load any hot spares for this pool.
120699653d4eSeschrock 	 */
120799653d4eSeschrock 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1208fa94a07fSbrendan 	    DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object);
120999653d4eSeschrock 	if (error != 0 && error != ENOENT) {
121099653d4eSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
121199653d4eSeschrock 		    VDEV_AUX_CORRUPT_DATA);
121299653d4eSeschrock 		error = EIO;
121399653d4eSeschrock 		goto out;
121499653d4eSeschrock 	}
121599653d4eSeschrock 	if (error == 0) {
1216e7437265Sahrens 		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
1217fa94a07fSbrendan 		if (load_nvlist(spa, spa->spa_spares.sav_object,
1218fa94a07fSbrendan 		    &spa->spa_spares.sav_config) != 0) {
121999653d4eSeschrock 			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
122099653d4eSeschrock 			    VDEV_AUX_CORRUPT_DATA);
122199653d4eSeschrock 			error = EIO;
122299653d4eSeschrock 			goto out;
122399653d4eSeschrock 		}
122499653d4eSeschrock 
122599653d4eSeschrock 		spa_config_enter(spa, RW_WRITER, FTAG);
122699653d4eSeschrock 		spa_load_spares(spa);
122799653d4eSeschrock 		spa_config_exit(spa, FTAG);
122899653d4eSeschrock 	}
122999653d4eSeschrock 
1230fa94a07fSbrendan 	/*
1231fa94a07fSbrendan 	 * Load any level 2 ARC devices for this pool.
1232fa94a07fSbrendan 	 */
1233fa94a07fSbrendan 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1234fa94a07fSbrendan 	    DMU_POOL_L2CACHE, sizeof (uint64_t), 1,
1235fa94a07fSbrendan 	    &spa->spa_l2cache.sav_object);
1236fa94a07fSbrendan 	if (error != 0 && error != ENOENT) {
1237fa94a07fSbrendan 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1238fa94a07fSbrendan 		    VDEV_AUX_CORRUPT_DATA);
1239fa94a07fSbrendan 		error = EIO;
1240fa94a07fSbrendan 		goto out;
1241fa94a07fSbrendan 	}
1242fa94a07fSbrendan 	if (error == 0) {
1243fa94a07fSbrendan 		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
1244fa94a07fSbrendan 		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
1245fa94a07fSbrendan 		    &spa->spa_l2cache.sav_config) != 0) {
1246fa94a07fSbrendan 			vdev_set_state(rvd, B_TRUE,
1247fa94a07fSbrendan 			    VDEV_STATE_CANT_OPEN,
1248fa94a07fSbrendan 			    VDEV_AUX_CORRUPT_DATA);
1249fa94a07fSbrendan 			error = EIO;
1250fa94a07fSbrendan 			goto out;
1251fa94a07fSbrendan 		}
1252fa94a07fSbrendan 
1253fa94a07fSbrendan 		spa_config_enter(spa, RW_WRITER, FTAG);
1254fa94a07fSbrendan 		spa_load_l2cache(spa);
1255fa94a07fSbrendan 		spa_config_exit(spa, FTAG);
1256fa94a07fSbrendan 	}
1257fa94a07fSbrendan 
1258990b4856Slling 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
1259ecd6cf80Smarks 
1260b1b8ab34Slling 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1261b1b8ab34Slling 	    DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object);
1262b1b8ab34Slling 
1263b1b8ab34Slling 	if (error && error != ENOENT) {
1264b1b8ab34Slling 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1265b1b8ab34Slling 		    VDEV_AUX_CORRUPT_DATA);
1266b1b8ab34Slling 		error = EIO;
1267b1b8ab34Slling 		goto out;
1268b1b8ab34Slling 	}
1269b1b8ab34Slling 
1270b1b8ab34Slling 	if (error == 0) {
1271b1b8ab34Slling 		(void) zap_lookup(spa->spa_meta_objset,
1272b1b8ab34Slling 		    spa->spa_pool_props_object,
12733d7072f8Seschrock 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS),
1274b1b8ab34Slling 		    sizeof (uint64_t), 1, &spa->spa_bootfs);
12753d7072f8Seschrock 		(void) zap_lookup(spa->spa_meta_objset,
12763d7072f8Seschrock 		    spa->spa_pool_props_object,
12773d7072f8Seschrock 		    zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE),
12783d7072f8Seschrock 		    sizeof (uint64_t), 1, &autoreplace);
1279ecd6cf80Smarks 		(void) zap_lookup(spa->spa_meta_objset,
1280ecd6cf80Smarks 		    spa->spa_pool_props_object,
1281ecd6cf80Smarks 		    zpool_prop_to_name(ZPOOL_PROP_DELEGATION),
1282ecd6cf80Smarks 		    sizeof (uint64_t), 1, &spa->spa_delegation);
12830a4e9518Sgw 		(void) zap_lookup(spa->spa_meta_objset,
12840a4e9518Sgw 		    spa->spa_pool_props_object,
12850a4e9518Sgw 		    zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
12860a4e9518Sgw 		    sizeof (uint64_t), 1, &spa->spa_failmode);
1287b1b8ab34Slling 	}
1288b1b8ab34Slling 
12893d7072f8Seschrock 	/*
12903d7072f8Seschrock 	 * If the 'autoreplace' property is set, then post a resource notifying
12913d7072f8Seschrock 	 * the ZFS DE that it should not issue any faults for unopenable
12923d7072f8Seschrock 	 * devices.  We also iterate over the vdevs, and post a sysevent for any
12933d7072f8Seschrock 	 * unopenable vdevs so that the normal autoreplace handler can take
12943d7072f8Seschrock 	 * over.
12953d7072f8Seschrock 	 */
1296b01c3b58Seschrock 	if (autoreplace && state != SPA_LOAD_TRYIMPORT)
12973d7072f8Seschrock 		spa_check_removed(spa->spa_root_vdev);
12983d7072f8Seschrock 
1299ea8dc4b6Seschrock 	/*
1300560e6e96Seschrock 	 * Load the vdev state for all toplevel vdevs.
1301ea8dc4b6Seschrock 	 */
1302560e6e96Seschrock 	vdev_load(rvd);
13030373e76bSbonwick 
1304fa9e4066Sahrens 	/*
1305fa9e4066Sahrens 	 * Propagate the leaf DTLs we just loaded all the way up the tree.
1306fa9e4066Sahrens 	 */
1307ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
1308fa9e4066Sahrens 	vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
1309ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
1310fa9e4066Sahrens 
1311fa9e4066Sahrens 	/*
1312fa9e4066Sahrens 	 * Check the state of the root vdev.  If it can't be opened, it
1313fa9e4066Sahrens 	 * indicates one or more toplevel vdevs are faulted.
1314fa9e4066Sahrens 	 */
1315ea8dc4b6Seschrock 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1316ea8dc4b6Seschrock 		error = ENXIO;
1317ea8dc4b6Seschrock 		goto out;
1318ea8dc4b6Seschrock 	}
1319fa9e4066Sahrens 
1320ea8dc4b6Seschrock 	if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) {
13215dabedeeSbonwick 		dmu_tx_t *tx;
13220373e76bSbonwick 		int need_update = B_FALSE;
13230373e76bSbonwick 		int c;
13245dabedeeSbonwick 
13250373e76bSbonwick 		/*
13260373e76bSbonwick 		 * Claim log blocks that haven't been committed yet.
13270373e76bSbonwick 		 * This must all happen in a single txg.
13280373e76bSbonwick 		 */
13295dabedeeSbonwick 		tx = dmu_tx_create_assigned(spa_get_dsl(spa),
1330fa9e4066Sahrens 		    spa_first_txg(spa));
13310b69c2f0Sahrens 		(void) dmu_objset_find(spa->spa_name,
13320b69c2f0Sahrens 		    zil_claim, tx, DS_FIND_CHILDREN);
1333fa9e4066Sahrens 		dmu_tx_commit(tx);
1334fa9e4066Sahrens 
1335fa9e4066Sahrens 		spa->spa_sync_on = B_TRUE;
1336fa9e4066Sahrens 		txg_sync_start(spa->spa_dsl_pool);
1337fa9e4066Sahrens 
1338fa9e4066Sahrens 		/*
1339fa9e4066Sahrens 		 * Wait for all claims to sync.
1340fa9e4066Sahrens 		 */
1341fa9e4066Sahrens 		txg_wait_synced(spa->spa_dsl_pool, 0);
13420e34b6a7Sbonwick 
13430e34b6a7Sbonwick 		/*
13440373e76bSbonwick 		 * If the config cache is stale, or we have uninitialized
13450373e76bSbonwick 		 * metaslabs (see spa_vdev_add()), then update the config.
13460e34b6a7Sbonwick 		 */
13470373e76bSbonwick 		if (config_cache_txg != spa->spa_config_txg ||
13480373e76bSbonwick 		    state == SPA_LOAD_IMPORT)
13490373e76bSbonwick 			need_update = B_TRUE;
13500373e76bSbonwick 
13510373e76bSbonwick 		for (c = 0; c < rvd->vdev_children; c++)
13520373e76bSbonwick 			if (rvd->vdev_child[c]->vdev_ms_array == 0)
13530373e76bSbonwick 				need_update = B_TRUE;
13540e34b6a7Sbonwick 
13550e34b6a7Sbonwick 		/*
13560373e76bSbonwick 		 * Update the config cache asychronously in case we're the
13570373e76bSbonwick 		 * root pool, in which case the config cache isn't writable yet.
13580e34b6a7Sbonwick 		 */
13590373e76bSbonwick 		if (need_update)
13600373e76bSbonwick 			spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
1361fa9e4066Sahrens 	}
1362fa9e4066Sahrens 
1363ea8dc4b6Seschrock 	error = 0;
1364ea8dc4b6Seschrock out:
136599653d4eSeschrock 	if (error && error != EBADF)
1366ea8dc4b6Seschrock 		zfs_ereport_post(FM_EREPORT_ZFS_POOL, spa, NULL, NULL, 0, 0);
1367ea8dc4b6Seschrock 	spa->spa_load_state = SPA_LOAD_NONE;
1368ea8dc4b6Seschrock 	spa->spa_ena = 0;
1369ea8dc4b6Seschrock 
1370ea8dc4b6Seschrock 	return (error);
1371fa9e4066Sahrens }
1372fa9e4066Sahrens 
1373fa9e4066Sahrens /*
1374fa9e4066Sahrens  * Pool Open/Import
1375fa9e4066Sahrens  *
1376fa9e4066Sahrens  * The import case is identical to an open except that the configuration is sent
1377fa9e4066Sahrens  * down from userland, instead of grabbed from the configuration cache.  For the
1378fa9e4066Sahrens  * case of an open, the pool configuration will exist in the
13793d7072f8Seschrock  * POOL_STATE_UNINITIALIZED state.
1380fa9e4066Sahrens  *
1381fa9e4066Sahrens  * The stats information (gen/count/ustats) is used to gather vdev statistics at
1382fa9e4066Sahrens  * the same time open the pool, without having to keep around the spa_t in some
1383fa9e4066Sahrens  * ambiguous state.
1384fa9e4066Sahrens  */
1385fa9e4066Sahrens static int
1386fa9e4066Sahrens spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config)
1387fa9e4066Sahrens {
1388fa9e4066Sahrens 	spa_t *spa;
1389fa9e4066Sahrens 	int error;
1390fa9e4066Sahrens 	int loaded = B_FALSE;
1391fa9e4066Sahrens 	int locked = B_FALSE;
1392fa9e4066Sahrens 
1393fa9e4066Sahrens 	*spapp = NULL;
1394fa9e4066Sahrens 
1395fa9e4066Sahrens 	/*
1396fa9e4066Sahrens 	 * As disgusting as this is, we need to support recursive calls to this
1397fa9e4066Sahrens 	 * function because dsl_dir_open() is called during spa_load(), and ends
1398fa9e4066Sahrens 	 * up calling spa_open() again.  The real fix is to figure out how to
1399fa9e4066Sahrens 	 * avoid dsl_dir_open() calling this in the first place.
1400fa9e4066Sahrens 	 */
1401fa9e4066Sahrens 	if (mutex_owner(&spa_namespace_lock) != curthread) {
1402fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
1403fa9e4066Sahrens 		locked = B_TRUE;
1404fa9e4066Sahrens 	}
1405fa9e4066Sahrens 
1406fa9e4066Sahrens 	if ((spa = spa_lookup(pool)) == NULL) {
1407fa9e4066Sahrens 		if (locked)
1408fa9e4066Sahrens 			mutex_exit(&spa_namespace_lock);
1409fa9e4066Sahrens 		return (ENOENT);
1410fa9e4066Sahrens 	}
1411fa9e4066Sahrens 	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
1412fa9e4066Sahrens 
1413fa9e4066Sahrens 		spa_activate(spa);
1414fa9e4066Sahrens 
14150373e76bSbonwick 		error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
1416fa9e4066Sahrens 
1417fa9e4066Sahrens 		if (error == EBADF) {
1418fa9e4066Sahrens 			/*
1419560e6e96Seschrock 			 * If vdev_validate() returns failure (indicated by
1420560e6e96Seschrock 			 * EBADF), it indicates that one of the vdevs indicates
1421560e6e96Seschrock 			 * that the pool has been exported or destroyed.  If
1422560e6e96Seschrock 			 * this is the case, the config cache is out of sync and
1423560e6e96Seschrock 			 * we should remove the pool from the namespace.
1424fa9e4066Sahrens 			 */
1425fa9e4066Sahrens 			spa_unload(spa);
1426fa9e4066Sahrens 			spa_deactivate(spa);
1427c5904d13Seschrock 			spa_config_sync(spa, B_TRUE, B_TRUE);
1428fa9e4066Sahrens 			spa_remove(spa);
1429fa9e4066Sahrens 			if (locked)
1430fa9e4066Sahrens 				mutex_exit(&spa_namespace_lock);
1431fa9e4066Sahrens 			return (ENOENT);
1432ea8dc4b6Seschrock 		}
1433ea8dc4b6Seschrock 
1434ea8dc4b6Seschrock 		if (error) {
1435fa9e4066Sahrens 			/*
1436fa9e4066Sahrens 			 * We can't open the pool, but we still have useful
1437fa9e4066Sahrens 			 * information: the state of each vdev after the
1438fa9e4066Sahrens 			 * attempted vdev_open().  Return this to the user.
1439fa9e4066Sahrens 			 */
14400373e76bSbonwick 			if (config != NULL && spa->spa_root_vdev != NULL) {
14410373e76bSbonwick 				spa_config_enter(spa, RW_READER, FTAG);
1442fa9e4066Sahrens 				*config = spa_config_generate(spa, NULL, -1ULL,
1443fa9e4066Sahrens 				    B_TRUE);
14440373e76bSbonwick 				spa_config_exit(spa, FTAG);
14450373e76bSbonwick 			}
1446fa9e4066Sahrens 			spa_unload(spa);
1447fa9e4066Sahrens 			spa_deactivate(spa);
1448ea8dc4b6Seschrock 			spa->spa_last_open_failed = B_TRUE;
1449fa9e4066Sahrens 			if (locked)
1450fa9e4066Sahrens 				mutex_exit(&spa_namespace_lock);
1451fa9e4066Sahrens 			*spapp = NULL;
1452fa9e4066Sahrens 			return (error);
1453ea8dc4b6Seschrock 		} else {
1454ea8dc4b6Seschrock 			spa->spa_last_open_failed = B_FALSE;
1455fa9e4066Sahrens 		}
1456fa9e4066Sahrens 
1457fa9e4066Sahrens 		loaded = B_TRUE;
1458fa9e4066Sahrens 	}
1459fa9e4066Sahrens 
1460fa9e4066Sahrens 	spa_open_ref(spa, tag);
14613d7072f8Seschrock 
14623d7072f8Seschrock 	/*
14633d7072f8Seschrock 	 * If we just loaded the pool, resilver anything that's out of date.
14643d7072f8Seschrock 	 */
14653d7072f8Seschrock 	if (loaded && (spa_mode & FWRITE))
14663d7072f8Seschrock 		VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
14673d7072f8Seschrock 
1468fa9e4066Sahrens 	if (locked)
1469fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1470fa9e4066Sahrens 
1471fa9e4066Sahrens 	*spapp = spa;
1472fa9e4066Sahrens 
1473fa9e4066Sahrens 	if (config != NULL) {
1474ea8dc4b6Seschrock 		spa_config_enter(spa, RW_READER, FTAG);
1475fa9e4066Sahrens 		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1476ea8dc4b6Seschrock 		spa_config_exit(spa, FTAG);
1477fa9e4066Sahrens 	}
1478fa9e4066Sahrens 
1479fa9e4066Sahrens 	return (0);
1480fa9e4066Sahrens }
1481fa9e4066Sahrens 
1482fa9e4066Sahrens int
1483fa9e4066Sahrens spa_open(const char *name, spa_t **spapp, void *tag)
1484fa9e4066Sahrens {
1485fa9e4066Sahrens 	return (spa_open_common(name, spapp, tag, NULL));
1486fa9e4066Sahrens }
1487fa9e4066Sahrens 
1488ea8dc4b6Seschrock /*
1489ea8dc4b6Seschrock  * Lookup the given spa_t, incrementing the inject count in the process,
1490ea8dc4b6Seschrock  * preventing it from being exported or destroyed.
1491ea8dc4b6Seschrock  */
1492ea8dc4b6Seschrock spa_t *
1493ea8dc4b6Seschrock spa_inject_addref(char *name)
1494ea8dc4b6Seschrock {
1495ea8dc4b6Seschrock 	spa_t *spa;
1496ea8dc4b6Seschrock 
1497ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
1498ea8dc4b6Seschrock 	if ((spa = spa_lookup(name)) == NULL) {
1499ea8dc4b6Seschrock 		mutex_exit(&spa_namespace_lock);
1500ea8dc4b6Seschrock 		return (NULL);
1501ea8dc4b6Seschrock 	}
1502ea8dc4b6Seschrock 	spa->spa_inject_ref++;
1503ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
1504ea8dc4b6Seschrock 
1505ea8dc4b6Seschrock 	return (spa);
1506ea8dc4b6Seschrock }
1507ea8dc4b6Seschrock 
1508ea8dc4b6Seschrock void
1509ea8dc4b6Seschrock spa_inject_delref(spa_t *spa)
1510ea8dc4b6Seschrock {
1511ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
1512ea8dc4b6Seschrock 	spa->spa_inject_ref--;
1513ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
1514ea8dc4b6Seschrock }
1515ea8dc4b6Seschrock 
1516fa94a07fSbrendan /*
1517fa94a07fSbrendan  * Add spares device information to the nvlist.
1518fa94a07fSbrendan  */
151999653d4eSeschrock static void
152099653d4eSeschrock spa_add_spares(spa_t *spa, nvlist_t *config)
152199653d4eSeschrock {
152299653d4eSeschrock 	nvlist_t **spares;
152399653d4eSeschrock 	uint_t i, nspares;
152499653d4eSeschrock 	nvlist_t *nvroot;
152599653d4eSeschrock 	uint64_t guid;
152699653d4eSeschrock 	vdev_stat_t *vs;
152799653d4eSeschrock 	uint_t vsc;
152839c23413Seschrock 	uint64_t pool;
152999653d4eSeschrock 
1530fa94a07fSbrendan 	if (spa->spa_spares.sav_count == 0)
153199653d4eSeschrock 		return;
153299653d4eSeschrock 
153399653d4eSeschrock 	VERIFY(nvlist_lookup_nvlist(config,
153499653d4eSeschrock 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1535fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
153699653d4eSeschrock 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
153799653d4eSeschrock 	if (nspares != 0) {
153899653d4eSeschrock 		VERIFY(nvlist_add_nvlist_array(nvroot,
153999653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
154099653d4eSeschrock 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
154199653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
154299653d4eSeschrock 
154399653d4eSeschrock 		/*
154499653d4eSeschrock 		 * Go through and find any spares which have since been
154599653d4eSeschrock 		 * repurposed as an active spare.  If this is the case, update
154699653d4eSeschrock 		 * their status appropriately.
154799653d4eSeschrock 		 */
154899653d4eSeschrock 		for (i = 0; i < nspares; i++) {
154999653d4eSeschrock 			VERIFY(nvlist_lookup_uint64(spares[i],
155099653d4eSeschrock 			    ZPOOL_CONFIG_GUID, &guid) == 0);
155139c23413Seschrock 			if (spa_spare_exists(guid, &pool) && pool != 0ULL) {
155299653d4eSeschrock 				VERIFY(nvlist_lookup_uint64_array(
155399653d4eSeschrock 				    spares[i], ZPOOL_CONFIG_STATS,
155499653d4eSeschrock 				    (uint64_t **)&vs, &vsc) == 0);
155599653d4eSeschrock 				vs->vs_state = VDEV_STATE_CANT_OPEN;
155699653d4eSeschrock 				vs->vs_aux = VDEV_AUX_SPARED;
155799653d4eSeschrock 			}
155899653d4eSeschrock 		}
155999653d4eSeschrock 	}
156099653d4eSeschrock }
156199653d4eSeschrock 
1562fa94a07fSbrendan /*
1563fa94a07fSbrendan  * Add l2cache device information to the nvlist, including vdev stats.
1564fa94a07fSbrendan  */
1565fa94a07fSbrendan static void
1566fa94a07fSbrendan spa_add_l2cache(spa_t *spa, nvlist_t *config)
1567fa94a07fSbrendan {
1568fa94a07fSbrendan 	nvlist_t **l2cache;
1569fa94a07fSbrendan 	uint_t i, j, nl2cache;
1570fa94a07fSbrendan 	nvlist_t *nvroot;
1571fa94a07fSbrendan 	uint64_t guid;
1572fa94a07fSbrendan 	vdev_t *vd;
1573fa94a07fSbrendan 	vdev_stat_t *vs;
1574fa94a07fSbrendan 	uint_t vsc;
1575fa94a07fSbrendan 
1576fa94a07fSbrendan 	if (spa->spa_l2cache.sav_count == 0)
1577fa94a07fSbrendan 		return;
1578fa94a07fSbrendan 
1579fa94a07fSbrendan 	spa_config_enter(spa, RW_READER, FTAG);
1580fa94a07fSbrendan 
1581fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist(config,
1582fa94a07fSbrendan 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1583fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
1584fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1585fa94a07fSbrendan 	if (nl2cache != 0) {
1586fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot,
1587fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1588fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
1589fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1590fa94a07fSbrendan 
1591fa94a07fSbrendan 		/*
1592fa94a07fSbrendan 		 * Update level 2 cache device stats.
1593fa94a07fSbrendan 		 */
1594fa94a07fSbrendan 
1595fa94a07fSbrendan 		for (i = 0; i < nl2cache; i++) {
1596fa94a07fSbrendan 			VERIFY(nvlist_lookup_uint64(l2cache[i],
1597fa94a07fSbrendan 			    ZPOOL_CONFIG_GUID, &guid) == 0);
1598fa94a07fSbrendan 
1599fa94a07fSbrendan 			vd = NULL;
1600fa94a07fSbrendan 			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
1601fa94a07fSbrendan 				if (guid ==
1602fa94a07fSbrendan 				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
1603fa94a07fSbrendan 					vd = spa->spa_l2cache.sav_vdevs[j];
1604fa94a07fSbrendan 					break;
1605fa94a07fSbrendan 				}
1606fa94a07fSbrendan 			}
1607fa94a07fSbrendan 			ASSERT(vd != NULL);
1608fa94a07fSbrendan 
1609fa94a07fSbrendan 			VERIFY(nvlist_lookup_uint64_array(l2cache[i],
1610fa94a07fSbrendan 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
1611fa94a07fSbrendan 			vdev_get_stats(vd, vs);
1612fa94a07fSbrendan 		}
1613fa94a07fSbrendan 	}
1614fa94a07fSbrendan 
1615fa94a07fSbrendan 	spa_config_exit(spa, FTAG);
1616fa94a07fSbrendan }
1617fa94a07fSbrendan 
1618fa9e4066Sahrens int
1619ea8dc4b6Seschrock spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
1620fa9e4066Sahrens {
1621fa9e4066Sahrens 	int error;
1622fa9e4066Sahrens 	spa_t *spa;
1623fa9e4066Sahrens 
1624fa9e4066Sahrens 	*config = NULL;
1625fa9e4066Sahrens 	error = spa_open_common(name, &spa, FTAG, config);
1626fa9e4066Sahrens 
162799653d4eSeschrock 	if (spa && *config != NULL) {
1628ea8dc4b6Seschrock 		VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT,
1629ea8dc4b6Seschrock 		    spa_get_errlog_size(spa)) == 0);
1630ea8dc4b6Seschrock 
163199653d4eSeschrock 		spa_add_spares(spa, *config);
1632fa94a07fSbrendan 		spa_add_l2cache(spa, *config);
163399653d4eSeschrock 	}
163499653d4eSeschrock 
1635ea8dc4b6Seschrock 	/*
1636ea8dc4b6Seschrock 	 * We want to get the alternate root even for faulted pools, so we cheat
1637ea8dc4b6Seschrock 	 * and call spa_lookup() directly.
1638ea8dc4b6Seschrock 	 */
1639ea8dc4b6Seschrock 	if (altroot) {
1640ea8dc4b6Seschrock 		if (spa == NULL) {
1641ea8dc4b6Seschrock 			mutex_enter(&spa_namespace_lock);
1642ea8dc4b6Seschrock 			spa = spa_lookup(name);
1643ea8dc4b6Seschrock 			if (spa)
1644ea8dc4b6Seschrock 				spa_altroot(spa, altroot, buflen);
1645ea8dc4b6Seschrock 			else
1646ea8dc4b6Seschrock 				altroot[0] = '\0';
1647ea8dc4b6Seschrock 			spa = NULL;
1648ea8dc4b6Seschrock 			mutex_exit(&spa_namespace_lock);
1649ea8dc4b6Seschrock 		} else {
1650ea8dc4b6Seschrock 			spa_altroot(spa, altroot, buflen);
1651ea8dc4b6Seschrock 		}
1652ea8dc4b6Seschrock 	}
1653ea8dc4b6Seschrock 
1654fa9e4066Sahrens 	if (spa != NULL)
1655fa9e4066Sahrens 		spa_close(spa, FTAG);
1656fa9e4066Sahrens 
1657fa9e4066Sahrens 	return (error);
1658fa9e4066Sahrens }
1659fa9e4066Sahrens 
166099653d4eSeschrock /*
1661fa94a07fSbrendan  * Validate that the auxiliary device array is well formed.  We must have an
1662fa94a07fSbrendan  * array of nvlists, each which describes a valid leaf vdev.  If this is an
1663fa94a07fSbrendan  * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
1664fa94a07fSbrendan  * specified, as long as they are well-formed.
166599653d4eSeschrock  */
166699653d4eSeschrock static int
1667fa94a07fSbrendan spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
1668fa94a07fSbrendan     spa_aux_vdev_t *sav, const char *config, uint64_t version,
1669fa94a07fSbrendan     vdev_labeltype_t label)
167099653d4eSeschrock {
1671fa94a07fSbrendan 	nvlist_t **dev;
1672fa94a07fSbrendan 	uint_t i, ndev;
167399653d4eSeschrock 	vdev_t *vd;
167499653d4eSeschrock 	int error;
167599653d4eSeschrock 
167699653d4eSeschrock 	/*
1677fa94a07fSbrendan 	 * It's acceptable to have no devs specified.
167899653d4eSeschrock 	 */
1679fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
168099653d4eSeschrock 		return (0);
168199653d4eSeschrock 
1682fa94a07fSbrendan 	if (ndev == 0)
168399653d4eSeschrock 		return (EINVAL);
168499653d4eSeschrock 
168599653d4eSeschrock 	/*
1686fa94a07fSbrendan 	 * Make sure the pool is formatted with a version that supports this
1687fa94a07fSbrendan 	 * device type.
168899653d4eSeschrock 	 */
1689fa94a07fSbrendan 	if (spa_version(spa) < version)
169099653d4eSeschrock 		return (ENOTSUP);
169199653d4eSeschrock 
169239c23413Seschrock 	/*
1693fa94a07fSbrendan 	 * Set the pending device list so we correctly handle device in-use
169439c23413Seschrock 	 * checking.
169539c23413Seschrock 	 */
1696fa94a07fSbrendan 	sav->sav_pending = dev;
1697fa94a07fSbrendan 	sav->sav_npending = ndev;
169839c23413Seschrock 
1699fa94a07fSbrendan 	for (i = 0; i < ndev; i++) {
1700fa94a07fSbrendan 		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
170199653d4eSeschrock 		    mode)) != 0)
170239c23413Seschrock 			goto out;
170399653d4eSeschrock 
170499653d4eSeschrock 		if (!vd->vdev_ops->vdev_op_leaf) {
170599653d4eSeschrock 			vdev_free(vd);
170639c23413Seschrock 			error = EINVAL;
170739c23413Seschrock 			goto out;
170899653d4eSeschrock 		}
170999653d4eSeschrock 
1710fa94a07fSbrendan 		/*
1711fa94a07fSbrendan 		 * The L2ARC currently only supports disk devices.
1712fa94a07fSbrendan 		 */
1713fa94a07fSbrendan 		if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
1714fa94a07fSbrendan 		    strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
1715fa94a07fSbrendan 			error = ENOTBLK;
1716fa94a07fSbrendan 			goto out;
1717fa94a07fSbrendan 		}
1718fa94a07fSbrendan 
171999653d4eSeschrock 		vd->vdev_top = vd;
172099653d4eSeschrock 
172139c23413Seschrock 		if ((error = vdev_open(vd)) == 0 &&
1722fa94a07fSbrendan 		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
1723fa94a07fSbrendan 			VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
172439c23413Seschrock 			    vd->vdev_guid) == 0);
172539c23413Seschrock 		}
172699653d4eSeschrock 
172799653d4eSeschrock 		vdev_free(vd);
172839c23413Seschrock 
1729fa94a07fSbrendan 		if (error &&
1730fa94a07fSbrendan 		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
173139c23413Seschrock 			goto out;
173239c23413Seschrock 		else
173339c23413Seschrock 			error = 0;
173499653d4eSeschrock 	}
173599653d4eSeschrock 
173639c23413Seschrock out:
1737fa94a07fSbrendan 	sav->sav_pending = NULL;
1738fa94a07fSbrendan 	sav->sav_npending = 0;
173939c23413Seschrock 	return (error);
174099653d4eSeschrock }
174199653d4eSeschrock 
1742fa94a07fSbrendan static int
1743fa94a07fSbrendan spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
1744fa94a07fSbrendan {
1745fa94a07fSbrendan 	int error;
1746fa94a07fSbrendan 
1747fa94a07fSbrendan 	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1748fa94a07fSbrendan 	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
1749fa94a07fSbrendan 	    VDEV_LABEL_SPARE)) != 0) {
1750fa94a07fSbrendan 		return (error);
1751fa94a07fSbrendan 	}
1752fa94a07fSbrendan 
1753fa94a07fSbrendan 	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1754fa94a07fSbrendan 	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
1755fa94a07fSbrendan 	    VDEV_LABEL_L2CACHE));
1756fa94a07fSbrendan }
1757fa94a07fSbrendan 
1758fa94a07fSbrendan static void
1759fa94a07fSbrendan spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
1760fa94a07fSbrendan     const char *config)
1761fa94a07fSbrendan {
1762fa94a07fSbrendan 	int i;
1763fa94a07fSbrendan 
1764fa94a07fSbrendan 	if (sav->sav_config != NULL) {
1765fa94a07fSbrendan 		nvlist_t **olddevs;
1766fa94a07fSbrendan 		uint_t oldndevs;
1767fa94a07fSbrendan 		nvlist_t **newdevs;
1768fa94a07fSbrendan 
1769fa94a07fSbrendan 		/*
1770fa94a07fSbrendan 		 * Generate new dev list by concatentating with the
1771fa94a07fSbrendan 		 * current dev list.
1772fa94a07fSbrendan 		 */
1773fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
1774fa94a07fSbrendan 		    &olddevs, &oldndevs) == 0);
1775fa94a07fSbrendan 
1776fa94a07fSbrendan 		newdevs = kmem_alloc(sizeof (void *) *
1777fa94a07fSbrendan 		    (ndevs + oldndevs), KM_SLEEP);
1778fa94a07fSbrendan 		for (i = 0; i < oldndevs; i++)
1779fa94a07fSbrendan 			VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
1780fa94a07fSbrendan 			    KM_SLEEP) == 0);
1781fa94a07fSbrendan 		for (i = 0; i < ndevs; i++)
1782fa94a07fSbrendan 			VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
1783fa94a07fSbrendan 			    KM_SLEEP) == 0);
1784fa94a07fSbrendan 
1785fa94a07fSbrendan 		VERIFY(nvlist_remove(sav->sav_config, config,
1786fa94a07fSbrendan 		    DATA_TYPE_NVLIST_ARRAY) == 0);
1787fa94a07fSbrendan 
1788fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1789fa94a07fSbrendan 		    config, newdevs, ndevs + oldndevs) == 0);
1790fa94a07fSbrendan 		for (i = 0; i < oldndevs + ndevs; i++)
1791fa94a07fSbrendan 			nvlist_free(newdevs[i]);
1792fa94a07fSbrendan 		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
1793fa94a07fSbrendan 	} else {
1794fa94a07fSbrendan 		/*
1795fa94a07fSbrendan 		 * Generate a new dev list.
1796fa94a07fSbrendan 		 */
1797fa94a07fSbrendan 		VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
1798fa94a07fSbrendan 		    KM_SLEEP) == 0);
1799fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
1800fa94a07fSbrendan 		    devs, ndevs) == 0);
1801fa94a07fSbrendan 	}
1802fa94a07fSbrendan }
1803fa94a07fSbrendan 
1804fa94a07fSbrendan /*
1805fa94a07fSbrendan  * Stop and drop level 2 ARC devices
1806fa94a07fSbrendan  */
1807fa94a07fSbrendan void
1808fa94a07fSbrendan spa_l2cache_drop(spa_t *spa)
1809fa94a07fSbrendan {
1810fa94a07fSbrendan 	vdev_t *vd;
1811fa94a07fSbrendan 	int i;
1812fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
1813fa94a07fSbrendan 
1814fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++) {
1815fa94a07fSbrendan 		uint64_t pool;
1816fa94a07fSbrendan 
1817fa94a07fSbrendan 		vd = sav->sav_vdevs[i];
1818fa94a07fSbrendan 		ASSERT(vd != NULL);
1819fa94a07fSbrendan 
1820fa94a07fSbrendan 		if (spa_mode & FWRITE &&
1821c5904d13Seschrock 		    spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL &&
1822c5904d13Seschrock 		    l2arc_vdev_present(vd)) {
1823fa94a07fSbrendan 			l2arc_remove_vdev(vd);
1824fa94a07fSbrendan 		}
1825fa94a07fSbrendan 		if (vd->vdev_isl2cache)
1826fa94a07fSbrendan 			spa_l2cache_remove(vd);
1827fa94a07fSbrendan 		vdev_clear_stats(vd);
1828fa94a07fSbrendan 		(void) vdev_close(vd);
1829fa94a07fSbrendan 	}
1830fa94a07fSbrendan }
1831fa94a07fSbrendan 
1832fa9e4066Sahrens /*
1833fa9e4066Sahrens  * Pool Creation
1834fa9e4066Sahrens  */
1835fa9e4066Sahrens int
1836990b4856Slling spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
1837228975ccSek     const char *history_str)
1838fa9e4066Sahrens {
1839fa9e4066Sahrens 	spa_t *spa;
1840990b4856Slling 	char *altroot = NULL;
18410373e76bSbonwick 	vdev_t *rvd;
1842fa9e4066Sahrens 	dsl_pool_t *dp;
1843fa9e4066Sahrens 	dmu_tx_t *tx;
184499653d4eSeschrock 	int c, error = 0;
1845fa9e4066Sahrens 	uint64_t txg = TXG_INITIAL;
1846fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
1847fa94a07fSbrendan 	uint_t nspares, nl2cache;
1848990b4856Slling 	uint64_t version;
1849fa9e4066Sahrens 
1850fa9e4066Sahrens 	/*
1851fa9e4066Sahrens 	 * If this pool already exists, return failure.
1852fa9e4066Sahrens 	 */
1853fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
1854fa9e4066Sahrens 	if (spa_lookup(pool) != NULL) {
1855fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1856fa9e4066Sahrens 		return (EEXIST);
1857fa9e4066Sahrens 	}
1858fa9e4066Sahrens 
1859fa9e4066Sahrens 	/*
1860fa9e4066Sahrens 	 * Allocate a new spa_t structure.
1861fa9e4066Sahrens 	 */
1862990b4856Slling 	(void) nvlist_lookup_string(props,
1863990b4856Slling 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
18640373e76bSbonwick 	spa = spa_add(pool, altroot);
1865fa9e4066Sahrens 	spa_activate(spa);
1866fa9e4066Sahrens 
1867fa9e4066Sahrens 	spa->spa_uberblock.ub_txg = txg - 1;
1868990b4856Slling 
1869990b4856Slling 	if (props && (error = spa_prop_validate(spa, props))) {
1870990b4856Slling 		spa_unload(spa);
1871990b4856Slling 		spa_deactivate(spa);
1872990b4856Slling 		spa_remove(spa);
1873c5904d13Seschrock 		mutex_exit(&spa_namespace_lock);
1874990b4856Slling 		return (error);
1875990b4856Slling 	}
1876990b4856Slling 
1877990b4856Slling 	if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION),
1878990b4856Slling 	    &version) != 0)
1879990b4856Slling 		version = SPA_VERSION;
1880990b4856Slling 	ASSERT(version <= SPA_VERSION);
1881990b4856Slling 	spa->spa_uberblock.ub_version = version;
1882fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
1883fa9e4066Sahrens 
18840373e76bSbonwick 	/*
18850373e76bSbonwick 	 * Create the root vdev.
18860373e76bSbonwick 	 */
18870373e76bSbonwick 	spa_config_enter(spa, RW_WRITER, FTAG);
18880373e76bSbonwick 
188999653d4eSeschrock 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
18900373e76bSbonwick 
189199653d4eSeschrock 	ASSERT(error != 0 || rvd != NULL);
189299653d4eSeschrock 	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
18930373e76bSbonwick 
1894b7b97454Sperrin 	if (error == 0 && !zfs_allocatable_devs(nvroot))
18950373e76bSbonwick 		error = EINVAL;
189699653d4eSeschrock 
189799653d4eSeschrock 	if (error == 0 &&
189899653d4eSeschrock 	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
1899fa94a07fSbrendan 	    (error = spa_validate_aux(spa, nvroot, txg,
190099653d4eSeschrock 	    VDEV_ALLOC_ADD)) == 0) {
190199653d4eSeschrock 		for (c = 0; c < rvd->vdev_children; c++)
190299653d4eSeschrock 			vdev_init(rvd->vdev_child[c], txg);
190399653d4eSeschrock 		vdev_config_dirty(rvd);
19040373e76bSbonwick 	}
19050373e76bSbonwick 
19060373e76bSbonwick 	spa_config_exit(spa, FTAG);
1907fa9e4066Sahrens 
190899653d4eSeschrock 	if (error != 0) {
1909fa9e4066Sahrens 		spa_unload(spa);
1910fa9e4066Sahrens 		spa_deactivate(spa);
1911fa9e4066Sahrens 		spa_remove(spa);
1912fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1913fa9e4066Sahrens 		return (error);
1914fa9e4066Sahrens 	}
1915fa9e4066Sahrens 
191699653d4eSeschrock 	/*
191799653d4eSeschrock 	 * Get the list of spares, if specified.
191899653d4eSeschrock 	 */
191999653d4eSeschrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
192099653d4eSeschrock 	    &spares, &nspares) == 0) {
1921fa94a07fSbrendan 		VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
192299653d4eSeschrock 		    KM_SLEEP) == 0);
1923fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
192499653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
192599653d4eSeschrock 		spa_config_enter(spa, RW_WRITER, FTAG);
192699653d4eSeschrock 		spa_load_spares(spa);
192799653d4eSeschrock 		spa_config_exit(spa, FTAG);
1928fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
1929fa94a07fSbrendan 	}
1930fa94a07fSbrendan 
1931fa94a07fSbrendan 	/*
1932fa94a07fSbrendan 	 * Get the list of level 2 cache devices, if specified.
1933fa94a07fSbrendan 	 */
1934fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1935fa94a07fSbrendan 	    &l2cache, &nl2cache) == 0) {
1936fa94a07fSbrendan 		VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
1937fa94a07fSbrendan 		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
1938fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
1939fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1940fa94a07fSbrendan 		spa_config_enter(spa, RW_WRITER, FTAG);
1941fa94a07fSbrendan 		spa_load_l2cache(spa);
1942fa94a07fSbrendan 		spa_config_exit(spa, FTAG);
1943fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
194499653d4eSeschrock 	}
194599653d4eSeschrock 
1946fa9e4066Sahrens 	spa->spa_dsl_pool = dp = dsl_pool_create(spa, txg);
1947fa9e4066Sahrens 	spa->spa_meta_objset = dp->dp_meta_objset;
1948fa9e4066Sahrens 
1949fa9e4066Sahrens 	tx = dmu_tx_create_assigned(dp, txg);
1950fa9e4066Sahrens 
1951fa9e4066Sahrens 	/*
1952fa9e4066Sahrens 	 * Create the pool config object.
1953fa9e4066Sahrens 	 */
1954fa9e4066Sahrens 	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
1955fa9e4066Sahrens 	    DMU_OT_PACKED_NVLIST, 1 << 14,
1956fa9e4066Sahrens 	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
1957fa9e4066Sahrens 
1958ea8dc4b6Seschrock 	if (zap_add(spa->spa_meta_objset,
1959fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1960ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
1961ea8dc4b6Seschrock 		cmn_err(CE_PANIC, "failed to add pool config");
1962ea8dc4b6Seschrock 	}
1963fa9e4066Sahrens 
1964990b4856Slling 	/* Newly created pools with the right version are always deflated. */
1965990b4856Slling 	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
1966990b4856Slling 		spa->spa_deflate = TRUE;
1967990b4856Slling 		if (zap_add(spa->spa_meta_objset,
1968990b4856Slling 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
1969990b4856Slling 		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
1970990b4856Slling 			cmn_err(CE_PANIC, "failed to add deflate");
1971990b4856Slling 		}
197299653d4eSeschrock 	}
197399653d4eSeschrock 
1974fa9e4066Sahrens 	/*
1975fa9e4066Sahrens 	 * Create the deferred-free bplist object.  Turn off compression
1976fa9e4066Sahrens 	 * because sync-to-convergence takes longer if the blocksize
1977fa9e4066Sahrens 	 * keeps changing.
1978fa9e4066Sahrens 	 */
1979fa9e4066Sahrens 	spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset,
1980fa9e4066Sahrens 	    1 << 14, tx);
1981fa9e4066Sahrens 	dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj,
1982fa9e4066Sahrens 	    ZIO_COMPRESS_OFF, tx);
1983fa9e4066Sahrens 
1984ea8dc4b6Seschrock 	if (zap_add(spa->spa_meta_objset,
1985fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1986ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) {
1987ea8dc4b6Seschrock 		cmn_err(CE_PANIC, "failed to add bplist");
1988ea8dc4b6Seschrock 	}
1989fa9e4066Sahrens 
199006eeb2adSek 	/*
199106eeb2adSek 	 * Create the pool's history object.
199206eeb2adSek 	 */
1993990b4856Slling 	if (version >= SPA_VERSION_ZPOOL_HISTORY)
1994990b4856Slling 		spa_history_create_obj(spa, tx);
1995990b4856Slling 
1996990b4856Slling 	/*
1997990b4856Slling 	 * Set pool properties.
1998990b4856Slling 	 */
1999990b4856Slling 	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
2000990b4856Slling 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
20010a4e9518Sgw 	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
2002990b4856Slling 	if (props)
2003990b4856Slling 		spa_sync_props(spa, props, CRED(), tx);
200406eeb2adSek 
2005fa9e4066Sahrens 	dmu_tx_commit(tx);
2006fa9e4066Sahrens 
2007fa9e4066Sahrens 	spa->spa_sync_on = B_TRUE;
2008fa9e4066Sahrens 	txg_sync_start(spa->spa_dsl_pool);
2009fa9e4066Sahrens 
2010fa9e4066Sahrens 	/*
2011fa9e4066Sahrens 	 * We explicitly wait for the first transaction to complete so that our
2012fa9e4066Sahrens 	 * bean counters are appropriately updated.
2013fa9e4066Sahrens 	 */
2014fa9e4066Sahrens 	txg_wait_synced(spa->spa_dsl_pool, txg);
2015fa9e4066Sahrens 
2016c5904d13Seschrock 	spa_config_sync(spa, B_FALSE, B_TRUE);
2017fa9e4066Sahrens 
2018990b4856Slling 	if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL)
2019228975ccSek 		(void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE);
2020228975ccSek 
2021fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2022fa9e4066Sahrens 
2023fa9e4066Sahrens 	return (0);
2024fa9e4066Sahrens }
2025fa9e4066Sahrens 
2026fa9e4066Sahrens /*
2027fa9e4066Sahrens  * Import the given pool into the system.  We set up the necessary spa_t and
2028fa9e4066Sahrens  * then call spa_load() to do the dirty work.
2029fa9e4066Sahrens  */
2030e7cbe64fSgw static int
2031e7cbe64fSgw spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props,
2032c5904d13Seschrock     boolean_t isroot, boolean_t allowfaulted)
2033fa9e4066Sahrens {
2034fa9e4066Sahrens 	spa_t *spa;
2035990b4856Slling 	char *altroot = NULL;
2036c5904d13Seschrock 	int error, loaderr;
203799653d4eSeschrock 	nvlist_t *nvroot;
2038fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
2039fa94a07fSbrendan 	uint_t nspares, nl2cache;
2040e7cbe64fSgw 	int mosconfig = isroot? B_FALSE : B_TRUE;
2041fa9e4066Sahrens 
2042fa9e4066Sahrens 	/*
2043fa9e4066Sahrens 	 * If a pool with this name exists, return failure.
2044fa9e4066Sahrens 	 */
2045fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
2046fa9e4066Sahrens 	if (spa_lookup(pool) != NULL) {
2047fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2048fa9e4066Sahrens 		return (EEXIST);
2049fa9e4066Sahrens 	}
2050fa9e4066Sahrens 
2051fa9e4066Sahrens 	/*
20520373e76bSbonwick 	 * Create and initialize the spa structure.
2053fa9e4066Sahrens 	 */
2054990b4856Slling 	(void) nvlist_lookup_string(props,
2055990b4856Slling 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
20560373e76bSbonwick 	spa = spa_add(pool, altroot);
2057fa9e4066Sahrens 	spa_activate(spa);
2058fa9e4066Sahrens 
2059c5904d13Seschrock 	if (allowfaulted)
2060c5904d13Seschrock 		spa->spa_import_faulted = B_TRUE;
2061bf82a41bSeschrock 	spa->spa_is_root = isroot;
2062c5904d13Seschrock 
20635dabedeeSbonwick 	/*
20640373e76bSbonwick 	 * Pass off the heavy lifting to spa_load().
2065ecc2d604Sbonwick 	 * Pass TRUE for mosconfig because the user-supplied config
2066ecc2d604Sbonwick 	 * is actually the one to trust when doing an import.
20675dabedeeSbonwick 	 */
2068c5904d13Seschrock 	loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, mosconfig);
2069fa9e4066Sahrens 
207099653d4eSeschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
207199653d4eSeschrock 	/*
207299653d4eSeschrock 	 * Toss any existing sparelist, as it doesn't have any validity anymore,
207399653d4eSeschrock 	 * and conflicts with spa_has_spare().
207499653d4eSeschrock 	 */
2075e7cbe64fSgw 	if (!isroot && spa->spa_spares.sav_config) {
2076fa94a07fSbrendan 		nvlist_free(spa->spa_spares.sav_config);
2077fa94a07fSbrendan 		spa->spa_spares.sav_config = NULL;
207899653d4eSeschrock 		spa_load_spares(spa);
207999653d4eSeschrock 	}
2080e7cbe64fSgw 	if (!isroot && spa->spa_l2cache.sav_config) {
2081fa94a07fSbrendan 		nvlist_free(spa->spa_l2cache.sav_config);
2082fa94a07fSbrendan 		spa->spa_l2cache.sav_config = NULL;
2083fa94a07fSbrendan 		spa_load_l2cache(spa);
2084fa94a07fSbrendan 	}
208599653d4eSeschrock 
208699653d4eSeschrock 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
208799653d4eSeschrock 	    &nvroot) == 0);
2088fa94a07fSbrendan 	if (error == 0)
2089fa94a07fSbrendan 		error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE);
2090fa94a07fSbrendan 	if (error == 0)
2091fa94a07fSbrendan 		error = spa_validate_aux(spa, nvroot, -1ULL,
2092fa94a07fSbrendan 		    VDEV_ALLOC_L2CACHE);
209399653d4eSeschrock 	spa_config_exit(spa, FTAG);
209499653d4eSeschrock 
2095990b4856Slling 	if (error != 0 || (props && (error = spa_prop_set(spa, props)))) {
2096c5904d13Seschrock 		if (loaderr != 0 && loaderr != EINVAL && allowfaulted) {
2097c5904d13Seschrock 			/*
2098c5904d13Seschrock 			 * If we failed to load the pool, but 'allowfaulted' is
2099c5904d13Seschrock 			 * set, then manually set the config as if the config
2100c5904d13Seschrock 			 * passed in was specified in the cache file.
2101c5904d13Seschrock 			 */
2102c5904d13Seschrock 			error = 0;
2103c5904d13Seschrock 			spa->spa_import_faulted = B_FALSE;
2104c5904d13Seschrock 			if (spa->spa_config == NULL) {
2105c5904d13Seschrock 				spa_config_enter(spa, RW_READER, FTAG);
2106c5904d13Seschrock 				spa->spa_config = spa_config_generate(spa,
2107c5904d13Seschrock 				    NULL, -1ULL, B_TRUE);
2108c5904d13Seschrock 				spa_config_exit(spa, FTAG);
2109c5904d13Seschrock 			}
2110c5904d13Seschrock 			spa_unload(spa);
2111c5904d13Seschrock 			spa_deactivate(spa);
2112c5904d13Seschrock 			spa_config_sync(spa, B_FALSE, B_TRUE);
2113c5904d13Seschrock 		} else {
2114c5904d13Seschrock 			spa_unload(spa);
2115c5904d13Seschrock 			spa_deactivate(spa);
2116c5904d13Seschrock 			spa_remove(spa);
2117c5904d13Seschrock 		}
2118fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2119fa9e4066Sahrens 		return (error);
2120fa9e4066Sahrens 	}
2121fa9e4066Sahrens 
212299653d4eSeschrock 	/*
2123fa94a07fSbrendan 	 * Override any spares and level 2 cache devices as specified by
2124fa94a07fSbrendan 	 * the user, as these may have correct device names/devids, etc.
212599653d4eSeschrock 	 */
212699653d4eSeschrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
212799653d4eSeschrock 	    &spares, &nspares) == 0) {
2128fa94a07fSbrendan 		if (spa->spa_spares.sav_config)
2129fa94a07fSbrendan 			VERIFY(nvlist_remove(spa->spa_spares.sav_config,
213099653d4eSeschrock 			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
213199653d4eSeschrock 		else
2132fa94a07fSbrendan 			VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
213399653d4eSeschrock 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2134fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
213599653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
213699653d4eSeschrock 		spa_config_enter(spa, RW_WRITER, FTAG);
213799653d4eSeschrock 		spa_load_spares(spa);
213899653d4eSeschrock 		spa_config_exit(spa, FTAG);
2139fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
2140fa94a07fSbrendan 	}
2141fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2142fa94a07fSbrendan 	    &l2cache, &nl2cache) == 0) {
2143fa94a07fSbrendan 		if (spa->spa_l2cache.sav_config)
2144fa94a07fSbrendan 			VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
2145fa94a07fSbrendan 			    ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
2146fa94a07fSbrendan 		else
2147fa94a07fSbrendan 			VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2148fa94a07fSbrendan 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2149fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2150fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2151fa94a07fSbrendan 		spa_config_enter(spa, RW_WRITER, FTAG);
2152fa94a07fSbrendan 		spa_load_l2cache(spa);
2153fa94a07fSbrendan 		spa_config_exit(spa, FTAG);
2154fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
215599653d4eSeschrock 	}
215699653d4eSeschrock 
2157c5904d13Seschrock 	if (spa_mode & FWRITE) {
2158c5904d13Seschrock 		/*
2159c5904d13Seschrock 		 * Update the config cache to include the newly-imported pool.
2160c5904d13Seschrock 		 */
2161e7cbe64fSgw 		spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot);
21620373e76bSbonwick 
2163c5904d13Seschrock 		/*
2164c5904d13Seschrock 		 * Resilver anything that's out of date.
2165c5904d13Seschrock 		 */
2166c5904d13Seschrock 		if (!isroot)
2167c5904d13Seschrock 			VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER,
2168c5904d13Seschrock 			    B_TRUE) == 0);
2169c5904d13Seschrock 	}
2170fa9e4066Sahrens 
2171c5904d13Seschrock 	spa->spa_import_faulted = B_FALSE;
21723d7072f8Seschrock 	mutex_exit(&spa_namespace_lock);
21733d7072f8Seschrock 
2174fa9e4066Sahrens 	return (0);
2175fa9e4066Sahrens }
2176fa9e4066Sahrens 
2177e7cbe64fSgw #ifdef _KERNEL
2178e7cbe64fSgw /*
2179e7cbe64fSgw  * Build a "root" vdev for a top level vdev read in from a rootpool
2180e7cbe64fSgw  * device label.
2181e7cbe64fSgw  */
2182e7cbe64fSgw static void
2183e7cbe64fSgw spa_build_rootpool_config(nvlist_t *config)
2184e7cbe64fSgw {
2185e7cbe64fSgw 	nvlist_t *nvtop, *nvroot;
2186e7cbe64fSgw 	uint64_t pgid;
2187e7cbe64fSgw 
2188e7cbe64fSgw 	/*
2189e7cbe64fSgw 	 * Add this top-level vdev to the child array.
2190e7cbe64fSgw 	 */
2191e7cbe64fSgw 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop)
2192e7cbe64fSgw 	    == 0);
2193e7cbe64fSgw 	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid)
2194e7cbe64fSgw 	    == 0);
2195e7cbe64fSgw 
2196e7cbe64fSgw 	/*
2197e7cbe64fSgw 	 * Put this pool's top-level vdevs into a root vdev.
2198e7cbe64fSgw 	 */
2199e7cbe64fSgw 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2200e7cbe64fSgw 	VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT)
2201e7cbe64fSgw 	    == 0);
2202e7cbe64fSgw 	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
2203e7cbe64fSgw 	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
2204e7cbe64fSgw 	VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2205e7cbe64fSgw 	    &nvtop, 1) == 0);
2206e7cbe64fSgw 
2207e7cbe64fSgw 	/*
2208e7cbe64fSgw 	 * Replace the existing vdev_tree with the new root vdev in
2209e7cbe64fSgw 	 * this pool's configuration (remove the old, add the new).
2210e7cbe64fSgw 	 */
2211e7cbe64fSgw 	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
2212e7cbe64fSgw 	nvlist_free(nvroot);
2213e7cbe64fSgw }
2214e7cbe64fSgw 
2215e7cbe64fSgw /*
2216e7cbe64fSgw  * Get the root pool information from the root disk, then import the root pool
2217e7cbe64fSgw  * during the system boot up time.
2218e7cbe64fSgw  */
2219e7cbe64fSgw extern nvlist_t *vdev_disk_read_rootlabel(char *);
2220e7cbe64fSgw 
2221e7cbe64fSgw void
2222e7cbe64fSgw spa_check_rootconf(char *devpath, char **bestdev, nvlist_t **bestconf,
2223e7cbe64fSgw     uint64_t *besttxg)
2224e7cbe64fSgw {
2225e7cbe64fSgw 	nvlist_t *config;
2226e7cbe64fSgw 	uint64_t txg;
2227e7cbe64fSgw 
2228e7cbe64fSgw 	if ((config = vdev_disk_read_rootlabel(devpath)) == NULL)
2229e7cbe64fSgw 		return;
2230e7cbe64fSgw 
2231e7cbe64fSgw 	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
2232e7cbe64fSgw 
2233e7cbe64fSgw 	if (txg > *besttxg) {
2234e7cbe64fSgw 		*besttxg = txg;
2235e7cbe64fSgw 		if (*bestconf != NULL)
2236e7cbe64fSgw 			nvlist_free(*bestconf);
2237e7cbe64fSgw 		*bestconf = config;
2238e7cbe64fSgw 		*bestdev = devpath;
2239e7cbe64fSgw 	}
2240e7cbe64fSgw }
2241e7cbe64fSgw 
2242e7cbe64fSgw boolean_t
2243e7cbe64fSgw spa_rootdev_validate(nvlist_t *nv)
2244e7cbe64fSgw {
2245e7cbe64fSgw 	uint64_t ival;
2246e7cbe64fSgw 
2247e7cbe64fSgw 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2248e7cbe64fSgw 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2249e7cbe64fSgw 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, &ival) == 0 ||
2250e7cbe64fSgw 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2251e7cbe64fSgw 		return (B_FALSE);
2252e7cbe64fSgw 
2253e7cbe64fSgw 	return (B_TRUE);
2254e7cbe64fSgw }
2255e7cbe64fSgw 
2256e7cbe64fSgw /*
2257e7cbe64fSgw  * Import a root pool.
2258e7cbe64fSgw  *
2259e7cbe64fSgw  * For x86. devpath_list will consist the physpath name of the vdev in a single
2260e7cbe64fSgw  * disk root pool or a list of physnames for the vdevs in a mirrored rootpool.
2261e7cbe64fSgw  * e.g.
2262e7cbe64fSgw  *	"/pci@1f,0/ide@d/disk@0,0:a /pci@1f,o/ide@d/disk@2,0:a"
2263e7cbe64fSgw  *
2264e7cbe64fSgw  * For Sparc, devpath_list consists the physpath name of the booting device
2265e7cbe64fSgw  * no matter the rootpool is a single device pool or a mirrored pool.
2266e7cbe64fSgw  * e.g.
2267e7cbe64fSgw  *	"/pci@1f,0/ide@d/disk@0,0:a"
2268e7cbe64fSgw  */
2269e7cbe64fSgw int
2270e7cbe64fSgw spa_import_rootpool(char *devpath_list)
2271e7cbe64fSgw {
2272e7cbe64fSgw 	nvlist_t *conf = NULL;
2273e7cbe64fSgw 	char *dev = NULL;
2274e7cbe64fSgw 	char *pname;
2275e7cbe64fSgw 	int error;
2276e7cbe64fSgw 
2277e7cbe64fSgw 	/*
2278e7cbe64fSgw 	 * Get the vdev pathname and configuation from the most
2279e7cbe64fSgw 	 * recently updated vdev (highest txg).
2280e7cbe64fSgw 	 */
2281e7cbe64fSgw 	if (error = spa_get_rootconf(devpath_list, &dev, &conf))
2282e7cbe64fSgw 		goto msg_out;
2283e7cbe64fSgw 
2284e7cbe64fSgw 	/*
2285e7cbe64fSgw 	 * Add type "root" vdev to the config.
2286e7cbe64fSgw 	 */
2287e7cbe64fSgw 	spa_build_rootpool_config(conf);
2288e7cbe64fSgw 
2289e7cbe64fSgw 	VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0);
2290e7cbe64fSgw 
2291bf82a41bSeschrock 	/*
2292bf82a41bSeschrock 	 * We specify 'allowfaulted' for this to be treated like spa_open()
2293bf82a41bSeschrock 	 * instead of spa_import().  This prevents us from marking vdevs as
2294bf82a41bSeschrock 	 * persistently unavailable, and generates FMA ereports as if it were a
2295bf82a41bSeschrock 	 * pool open, not import.
2296bf82a41bSeschrock 	 */
2297bf82a41bSeschrock 	error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE);
2298e7cbe64fSgw 	if (error == EEXIST)
2299e7cbe64fSgw 		error = 0;
2300e7cbe64fSgw 
2301e7cbe64fSgw 	nvlist_free(conf);
2302e7cbe64fSgw 	return (error);
2303e7cbe64fSgw 
2304e7cbe64fSgw msg_out:
2305e7cbe64fSgw 	cmn_err(CE_NOTE, "\n\n"
2306e7cbe64fSgw 	    "  ***************************************************  \n"
2307e7cbe64fSgw 	    "  *  This device is not bootable!                   *  \n"
2308e7cbe64fSgw 	    "  *  It is either offlined or detached or faulted.  *  \n"
2309e7cbe64fSgw 	    "  *  Please try to boot from a different device.    *  \n"
2310e7cbe64fSgw 	    "  ***************************************************  \n\n");
2311e7cbe64fSgw 
2312e7cbe64fSgw 	return (error);
2313e7cbe64fSgw }
2314e7cbe64fSgw #endif
2315e7cbe64fSgw 
2316e7cbe64fSgw /*
2317e7cbe64fSgw  * Import a non-root pool into the system.
2318e7cbe64fSgw  */
2319e7cbe64fSgw int
2320e7cbe64fSgw spa_import(const char *pool, nvlist_t *config, nvlist_t *props)
2321e7cbe64fSgw {
2322c5904d13Seschrock 	return (spa_import_common(pool, config, props, B_FALSE, B_FALSE));
2323e7cbe64fSgw }
2324e7cbe64fSgw 
2325c5904d13Seschrock int
2326c5904d13Seschrock spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props)
2327c5904d13Seschrock {
2328c5904d13Seschrock 	return (spa_import_common(pool, config, props, B_FALSE, B_TRUE));
2329c5904d13Seschrock }
2330c5904d13Seschrock 
2331c5904d13Seschrock 
2332fa9e4066Sahrens /*
2333fa9e4066Sahrens  * This (illegal) pool name is used when temporarily importing a spa_t in order
2334fa9e4066Sahrens  * to get the vdev stats associated with the imported devices.
2335fa9e4066Sahrens  */
2336fa9e4066Sahrens #define	TRYIMPORT_NAME	"$import"
2337fa9e4066Sahrens 
2338fa9e4066Sahrens nvlist_t *
2339fa9e4066Sahrens spa_tryimport(nvlist_t *tryconfig)
2340fa9e4066Sahrens {
2341fa9e4066Sahrens 	nvlist_t *config = NULL;
2342fa9e4066Sahrens 	char *poolname;
2343fa9e4066Sahrens 	spa_t *spa;
2344fa9e4066Sahrens 	uint64_t state;
2345fa9e4066Sahrens 
2346fa9e4066Sahrens 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
2347fa9e4066Sahrens 		return (NULL);
2348fa9e4066Sahrens 
2349fa9e4066Sahrens 	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
2350fa9e4066Sahrens 		return (NULL);
2351fa9e4066Sahrens 
2352fa9e4066Sahrens 	/*
23530373e76bSbonwick 	 * Create and initialize the spa structure.
2354fa9e4066Sahrens 	 */
23550373e76bSbonwick 	mutex_enter(&spa_namespace_lock);
23560373e76bSbonwick 	spa = spa_add(TRYIMPORT_NAME, NULL);
2357fa9e4066Sahrens 	spa_activate(spa);
2358fa9e4066Sahrens 
2359fa9e4066Sahrens 	/*
23600373e76bSbonwick 	 * Pass off the heavy lifting to spa_load().
2361ecc2d604Sbonwick 	 * Pass TRUE for mosconfig because the user-supplied config
2362ecc2d604Sbonwick 	 * is actually the one to trust when doing an import.
2363fa9e4066Sahrens 	 */
2364ecc2d604Sbonwick 	(void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
2365fa9e4066Sahrens 
2366fa9e4066Sahrens 	/*
2367fa9e4066Sahrens 	 * If 'tryconfig' was at least parsable, return the current config.
2368fa9e4066Sahrens 	 */
2369fa9e4066Sahrens 	if (spa->spa_root_vdev != NULL) {
23700373e76bSbonwick 		spa_config_enter(spa, RW_READER, FTAG);
2371fa9e4066Sahrens 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
23720373e76bSbonwick 		spa_config_exit(spa, FTAG);
2373fa9e4066Sahrens 		VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
2374fa9e4066Sahrens 		    poolname) == 0);
2375fa9e4066Sahrens 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2376fa9e4066Sahrens 		    state) == 0);
237795173954Sek 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
237895173954Sek 		    spa->spa_uberblock.ub_timestamp) == 0);
237999653d4eSeschrock 
2380e7cbe64fSgw 		/*
2381e7cbe64fSgw 		 * If the bootfs property exists on this pool then we
2382e7cbe64fSgw 		 * copy it out so that external consumers can tell which
2383e7cbe64fSgw 		 * pools are bootable.
2384e7cbe64fSgw 		 */
2385e7cbe64fSgw 		if (spa->spa_bootfs) {
2386e7cbe64fSgw 			char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2387e7cbe64fSgw 
2388e7cbe64fSgw 			/*
2389e7cbe64fSgw 			 * We have to play games with the name since the
2390e7cbe64fSgw 			 * pool was opened as TRYIMPORT_NAME.
2391e7cbe64fSgw 			 */
2392e7cbe64fSgw 			if (dsl_dsobj_to_dsname(spa->spa_name,
2393e7cbe64fSgw 			    spa->spa_bootfs, tmpname) == 0) {
2394e7cbe64fSgw 				char *cp;
2395e7cbe64fSgw 				char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2396e7cbe64fSgw 
2397e7cbe64fSgw 				cp = strchr(tmpname, '/');
2398e7cbe64fSgw 				if (cp == NULL) {
2399e7cbe64fSgw 					(void) strlcpy(dsname, tmpname,
2400e7cbe64fSgw 					    MAXPATHLEN);
2401e7cbe64fSgw 				} else {
2402e7cbe64fSgw 					(void) snprintf(dsname, MAXPATHLEN,
2403e7cbe64fSgw 					    "%s/%s", poolname, ++cp);
2404e7cbe64fSgw 				}
2405e7cbe64fSgw 				VERIFY(nvlist_add_string(config,
2406e7cbe64fSgw 				    ZPOOL_CONFIG_BOOTFS, dsname) == 0);
2407e7cbe64fSgw 				kmem_free(dsname, MAXPATHLEN);
2408e7cbe64fSgw 			}
2409e7cbe64fSgw 			kmem_free(tmpname, MAXPATHLEN);
2410e7cbe64fSgw 		}
2411e7cbe64fSgw 
241299653d4eSeschrock 		/*
2413fa94a07fSbrendan 		 * Add the list of hot spares and level 2 cache devices.
241499653d4eSeschrock 		 */
241599653d4eSeschrock 		spa_add_spares(spa, config);
2416fa94a07fSbrendan 		spa_add_l2cache(spa, config);
2417fa9e4066Sahrens 	}
2418fa9e4066Sahrens 
2419fa9e4066Sahrens 	spa_unload(spa);
2420fa9e4066Sahrens 	spa_deactivate(spa);
2421fa9e4066Sahrens 	spa_remove(spa);
2422fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2423fa9e4066Sahrens 
2424fa9e4066Sahrens 	return (config);
2425fa9e4066Sahrens }
2426fa9e4066Sahrens 
2427fa9e4066Sahrens /*
2428fa9e4066Sahrens  * Pool export/destroy
2429fa9e4066Sahrens  *
2430fa9e4066Sahrens  * The act of destroying or exporting a pool is very simple.  We make sure there
2431fa9e4066Sahrens  * is no more pending I/O and any references to the pool are gone.  Then, we
2432fa9e4066Sahrens  * update the pool state and sync all the labels to disk, removing the
2433fa9e4066Sahrens  * configuration from the cache afterwards.
2434fa9e4066Sahrens  */
2435fa9e4066Sahrens static int
243644cd46caSbillm spa_export_common(char *pool, int new_state, nvlist_t **oldconfig)
2437fa9e4066Sahrens {
2438fa9e4066Sahrens 	spa_t *spa;
2439fa9e4066Sahrens 
244044cd46caSbillm 	if (oldconfig)
244144cd46caSbillm 		*oldconfig = NULL;
244244cd46caSbillm 
2443fa9e4066Sahrens 	if (!(spa_mode & FWRITE))
2444fa9e4066Sahrens 		return (EROFS);
2445fa9e4066Sahrens 
2446fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
2447fa9e4066Sahrens 	if ((spa = spa_lookup(pool)) == NULL) {
2448fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2449fa9e4066Sahrens 		return (ENOENT);
2450fa9e4066Sahrens 	}
2451fa9e4066Sahrens 
2452ea8dc4b6Seschrock 	/*
2453ea8dc4b6Seschrock 	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
2454ea8dc4b6Seschrock 	 * reacquire the namespace lock, and see if we can export.
2455ea8dc4b6Seschrock 	 */
2456ea8dc4b6Seschrock 	spa_open_ref(spa, FTAG);
2457ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
2458ea8dc4b6Seschrock 	spa_async_suspend(spa);
2459ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
2460ea8dc4b6Seschrock 	spa_close(spa, FTAG);
2461ea8dc4b6Seschrock 
2462fa9e4066Sahrens 	/*
2463fa9e4066Sahrens 	 * The pool will be in core if it's openable,
2464fa9e4066Sahrens 	 * in which case we can modify its state.
2465fa9e4066Sahrens 	 */
2466fa9e4066Sahrens 	if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
2467fa9e4066Sahrens 		/*
2468fa9e4066Sahrens 		 * Objsets may be open only because they're dirty, so we
2469fa9e4066Sahrens 		 * have to force it to sync before checking spa_refcnt.
2470fa9e4066Sahrens 		 */
2471fa9e4066Sahrens 		spa_scrub_suspend(spa);
2472fa9e4066Sahrens 		txg_wait_synced(spa->spa_dsl_pool, 0);
2473fa9e4066Sahrens 
2474ea8dc4b6Seschrock 		/*
2475ea8dc4b6Seschrock 		 * A pool cannot be exported or destroyed if there are active
2476ea8dc4b6Seschrock 		 * references.  If we are resetting a pool, allow references by
2477ea8dc4b6Seschrock 		 * fault injection handlers.
2478ea8dc4b6Seschrock 		 */
2479ea8dc4b6Seschrock 		if (!spa_refcount_zero(spa) ||
2480ea8dc4b6Seschrock 		    (spa->spa_inject_ref != 0 &&
2481ea8dc4b6Seschrock 		    new_state != POOL_STATE_UNINITIALIZED)) {
2482fa9e4066Sahrens 			spa_scrub_resume(spa);
2483ea8dc4b6Seschrock 			spa_async_resume(spa);
2484fa9e4066Sahrens 			mutex_exit(&spa_namespace_lock);
2485fa9e4066Sahrens 			return (EBUSY);
2486fa9e4066Sahrens 		}
2487fa9e4066Sahrens 
2488fa9e4066Sahrens 		spa_scrub_resume(spa);
2489fa9e4066Sahrens 		VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0);
2490fa9e4066Sahrens 
2491fa9e4066Sahrens 		/*
2492fa9e4066Sahrens 		 * We want this to be reflected on every label,
2493fa9e4066Sahrens 		 * so mark them all dirty.  spa_unload() will do the
2494fa9e4066Sahrens 		 * final sync that pushes these changes out.
2495fa9e4066Sahrens 		 */
2496ea8dc4b6Seschrock 		if (new_state != POOL_STATE_UNINITIALIZED) {
24975dabedeeSbonwick 			spa_config_enter(spa, RW_WRITER, FTAG);
2498ea8dc4b6Seschrock 			spa->spa_state = new_state;
24990373e76bSbonwick 			spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
2500ea8dc4b6Seschrock 			vdev_config_dirty(spa->spa_root_vdev);
25015dabedeeSbonwick 			spa_config_exit(spa, FTAG);
2502ea8dc4b6Seschrock 		}
2503fa9e4066Sahrens 	}
2504fa9e4066Sahrens 
25053d7072f8Seschrock 	spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
25063d7072f8Seschrock 
2507fa9e4066Sahrens 	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
2508fa9e4066Sahrens 		spa_unload(spa);
2509fa9e4066Sahrens 		spa_deactivate(spa);
2510fa9e4066Sahrens 	}
2511fa9e4066Sahrens 
251244cd46caSbillm 	if (oldconfig && spa->spa_config)
251344cd46caSbillm 		VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
251444cd46caSbillm 
2515ea8dc4b6Seschrock 	if (new_state != POOL_STATE_UNINITIALIZED) {
2516c5904d13Seschrock 		spa_config_sync(spa, B_TRUE, B_TRUE);
2517ea8dc4b6Seschrock 		spa_remove(spa);
2518ea8dc4b6Seschrock 	}
2519fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2520fa9e4066Sahrens 
2521fa9e4066Sahrens 	return (0);
2522fa9e4066Sahrens }
2523fa9e4066Sahrens 
2524fa9e4066Sahrens /*
2525fa9e4066Sahrens  * Destroy a storage pool.
2526fa9e4066Sahrens  */
2527fa9e4066Sahrens int
2528fa9e4066Sahrens spa_destroy(char *pool)
2529fa9e4066Sahrens {
253044cd46caSbillm 	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL));
2531fa9e4066Sahrens }
2532fa9e4066Sahrens 
2533fa9e4066Sahrens /*
2534fa9e4066Sahrens  * Export a storage pool.
2535fa9e4066Sahrens  */
2536fa9e4066Sahrens int
253744cd46caSbillm spa_export(char *pool, nvlist_t **oldconfig)
2538fa9e4066Sahrens {
253944cd46caSbillm 	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig));
2540fa9e4066Sahrens }
2541fa9e4066Sahrens 
2542ea8dc4b6Seschrock /*
2543ea8dc4b6Seschrock  * Similar to spa_export(), this unloads the spa_t without actually removing it
2544ea8dc4b6Seschrock  * from the namespace in any way.
2545ea8dc4b6Seschrock  */
2546ea8dc4b6Seschrock int
2547ea8dc4b6Seschrock spa_reset(char *pool)
2548ea8dc4b6Seschrock {
254944cd46caSbillm 	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL));
2550ea8dc4b6Seschrock }
2551ea8dc4b6Seschrock 
2552ea8dc4b6Seschrock 
2553fa9e4066Sahrens /*
2554fa9e4066Sahrens  * ==========================================================================
2555fa9e4066Sahrens  * Device manipulation
2556fa9e4066Sahrens  * ==========================================================================
2557fa9e4066Sahrens  */
2558fa9e4066Sahrens 
2559fa9e4066Sahrens /*
25608654d025Sperrin  * Add a device to a storage pool.
2561fa9e4066Sahrens  */
2562fa9e4066Sahrens int
2563fa9e4066Sahrens spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
2564fa9e4066Sahrens {
2565fa9e4066Sahrens 	uint64_t txg;
25660373e76bSbonwick 	int c, error;
2567fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
25680e34b6a7Sbonwick 	vdev_t *vd, *tvd;
2569fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
2570fa94a07fSbrendan 	uint_t nspares, nl2cache;
2571fa9e4066Sahrens 
2572fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
2573fa9e4066Sahrens 
257499653d4eSeschrock 	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
257599653d4eSeschrock 	    VDEV_ALLOC_ADD)) != 0)
257699653d4eSeschrock 		return (spa_vdev_exit(spa, NULL, txg, error));
2577fa9e4066Sahrens 
257839c23413Seschrock 	spa->spa_pending_vdev = vd;
257999653d4eSeschrock 
2580fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
2581fa94a07fSbrendan 	    &nspares) != 0)
258299653d4eSeschrock 		nspares = 0;
258399653d4eSeschrock 
2584fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
2585fa94a07fSbrendan 	    &nl2cache) != 0)
2586fa94a07fSbrendan 		nl2cache = 0;
2587fa94a07fSbrendan 
2588fa94a07fSbrendan 	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) {
258939c23413Seschrock 		spa->spa_pending_vdev = NULL;
2590fa9e4066Sahrens 		return (spa_vdev_exit(spa, vd, txg, EINVAL));
259139c23413Seschrock 	}
2592fa9e4066Sahrens 
259399653d4eSeschrock 	if (vd->vdev_children != 0) {
259439c23413Seschrock 		if ((error = vdev_create(vd, txg, B_FALSE)) != 0) {
259539c23413Seschrock 			spa->spa_pending_vdev = NULL;
259699653d4eSeschrock 			return (spa_vdev_exit(spa, vd, txg, error));
259799653d4eSeschrock 		}
259899653d4eSeschrock 	}
259999653d4eSeschrock 
260039c23413Seschrock 	/*
2601fa94a07fSbrendan 	 * We must validate the spares and l2cache devices after checking the
2602fa94a07fSbrendan 	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
260339c23413Seschrock 	 */
2604fa94a07fSbrendan 	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) {
260539c23413Seschrock 		spa->spa_pending_vdev = NULL;
260639c23413Seschrock 		return (spa_vdev_exit(spa, vd, txg, error));
260739c23413Seschrock 	}
260839c23413Seschrock 
260939c23413Seschrock 	spa->spa_pending_vdev = NULL;
261039c23413Seschrock 
261139c23413Seschrock 	/*
261239c23413Seschrock 	 * Transfer each new top-level vdev from vd to rvd.
261339c23413Seschrock 	 */
261439c23413Seschrock 	for (c = 0; c < vd->vdev_children; c++) {
261539c23413Seschrock 		tvd = vd->vdev_child[c];
261639c23413Seschrock 		vdev_remove_child(vd, tvd);
261739c23413Seschrock 		tvd->vdev_id = rvd->vdev_children;
261839c23413Seschrock 		vdev_add_child(rvd, tvd);
261939c23413Seschrock 		vdev_config_dirty(tvd);
262039c23413Seschrock 	}
262139c23413Seschrock 
262299653d4eSeschrock 	if (nspares != 0) {
2623fa94a07fSbrendan 		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
2624fa94a07fSbrendan 		    ZPOOL_CONFIG_SPARES);
262599653d4eSeschrock 		spa_load_spares(spa);
2626fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
2627fa94a07fSbrendan 	}
2628fa94a07fSbrendan 
2629fa94a07fSbrendan 	if (nl2cache != 0) {
2630fa94a07fSbrendan 		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
2631fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE);
2632fa94a07fSbrendan 		spa_load_l2cache(spa);
2633fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
2634fa9e4066Sahrens 	}
2635fa9e4066Sahrens 
2636fa9e4066Sahrens 	/*
26370e34b6a7Sbonwick 	 * We have to be careful when adding new vdevs to an existing pool.
26380e34b6a7Sbonwick 	 * If other threads start allocating from these vdevs before we
26390e34b6a7Sbonwick 	 * sync the config cache, and we lose power, then upon reboot we may
26400e34b6a7Sbonwick 	 * fail to open the pool because there are DVAs that the config cache
26410e34b6a7Sbonwick 	 * can't translate.  Therefore, we first add the vdevs without
26420e34b6a7Sbonwick 	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
26430373e76bSbonwick 	 * and then let spa_config_update() initialize the new metaslabs.
26440e34b6a7Sbonwick 	 *
26450e34b6a7Sbonwick 	 * spa_load() checks for added-but-not-initialized vdevs, so that
26460e34b6a7Sbonwick 	 * if we lose power at any point in this sequence, the remaining
26470e34b6a7Sbonwick 	 * steps will be completed the next time we load the pool.
26480e34b6a7Sbonwick 	 */
26490373e76bSbonwick 	(void) spa_vdev_exit(spa, vd, txg, 0);
26500e34b6a7Sbonwick 
26510373e76bSbonwick 	mutex_enter(&spa_namespace_lock);
26520373e76bSbonwick 	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
26530373e76bSbonwick 	mutex_exit(&spa_namespace_lock);
2654fa9e4066Sahrens 
26550373e76bSbonwick 	return (0);
2656fa9e4066Sahrens }
2657fa9e4066Sahrens 
2658fa9e4066Sahrens /*
2659fa9e4066Sahrens  * Attach a device to a mirror.  The arguments are the path to any device
2660fa9e4066Sahrens  * in the mirror, and the nvroot for the new device.  If the path specifies
2661fa9e4066Sahrens  * a device that is not mirrored, we automatically insert the mirror vdev.
2662fa9e4066Sahrens  *
2663fa9e4066Sahrens  * If 'replacing' is specified, the new device is intended to replace the
2664fa9e4066Sahrens  * existing device; in this case the two devices are made into their own
26653d7072f8Seschrock  * mirror using the 'replacing' vdev, which is functionally identical to
2666fa9e4066Sahrens  * the mirror vdev (it actually reuses all the same ops) but has a few
2667fa9e4066Sahrens  * extra rules: you can't attach to it after it's been created, and upon
2668fa9e4066Sahrens  * completion of resilvering, the first disk (the one being replaced)
2669fa9e4066Sahrens  * is automatically detached.
2670fa9e4066Sahrens  */
2671fa9e4066Sahrens int
2672ea8dc4b6Seschrock spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
2673fa9e4066Sahrens {
2674fa9e4066Sahrens 	uint64_t txg, open_txg;
2675fa9e4066Sahrens 	int error;
2676fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
2677fa9e4066Sahrens 	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
267899653d4eSeschrock 	vdev_ops_t *pvops;
26798654d025Sperrin 	int is_log;
2680fa9e4066Sahrens 
2681fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
2682fa9e4066Sahrens 
2683c5904d13Seschrock 	oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
2684fa9e4066Sahrens 
2685fa9e4066Sahrens 	if (oldvd == NULL)
2686fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
2687fa9e4066Sahrens 
26880e34b6a7Sbonwick 	if (!oldvd->vdev_ops->vdev_op_leaf)
26890e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
26900e34b6a7Sbonwick 
2691fa9e4066Sahrens 	pvd = oldvd->vdev_parent;
2692fa9e4066Sahrens 
269399653d4eSeschrock 	if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
26943d7072f8Seschrock 	    VDEV_ALLOC_ADD)) != 0)
26953d7072f8Seschrock 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
26963d7072f8Seschrock 
26973d7072f8Seschrock 	if (newrootvd->vdev_children != 1)
2698fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
2699fa9e4066Sahrens 
2700fa9e4066Sahrens 	newvd = newrootvd->vdev_child[0];
2701fa9e4066Sahrens 
2702fa9e4066Sahrens 	if (!newvd->vdev_ops->vdev_op_leaf)
2703fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
2704fa9e4066Sahrens 
270599653d4eSeschrock 	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
2706fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, error));
2707fa9e4066Sahrens 
27088654d025Sperrin 	/*
27098654d025Sperrin 	 * Spares can't replace logs
27108654d025Sperrin 	 */
27118654d025Sperrin 	is_log = oldvd->vdev_islog;
27128654d025Sperrin 	if (is_log && newvd->vdev_isspare)
27138654d025Sperrin 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
27148654d025Sperrin 
271599653d4eSeschrock 	if (!replacing) {
271699653d4eSeschrock 		/*
271799653d4eSeschrock 		 * For attach, the only allowable parent is a mirror or the root
271899653d4eSeschrock 		 * vdev.
271999653d4eSeschrock 		 */
272099653d4eSeschrock 		if (pvd->vdev_ops != &vdev_mirror_ops &&
272199653d4eSeschrock 		    pvd->vdev_ops != &vdev_root_ops)
272299653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
272399653d4eSeschrock 
272499653d4eSeschrock 		pvops = &vdev_mirror_ops;
272599653d4eSeschrock 	} else {
272699653d4eSeschrock 		/*
272799653d4eSeschrock 		 * Active hot spares can only be replaced by inactive hot
272899653d4eSeschrock 		 * spares.
272999653d4eSeschrock 		 */
273099653d4eSeschrock 		if (pvd->vdev_ops == &vdev_spare_ops &&
273199653d4eSeschrock 		    pvd->vdev_child[1] == oldvd &&
273299653d4eSeschrock 		    !spa_has_spare(spa, newvd->vdev_guid))
273399653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
273499653d4eSeschrock 
273599653d4eSeschrock 		/*
273699653d4eSeschrock 		 * If the source is a hot spare, and the parent isn't already a
273799653d4eSeschrock 		 * spare, then we want to create a new hot spare.  Otherwise, we
273839c23413Seschrock 		 * want to create a replacing vdev.  The user is not allowed to
273939c23413Seschrock 		 * attach to a spared vdev child unless the 'isspare' state is
274039c23413Seschrock 		 * the same (spare replaces spare, non-spare replaces
274139c23413Seschrock 		 * non-spare).
274299653d4eSeschrock 		 */
274399653d4eSeschrock 		if (pvd->vdev_ops == &vdev_replacing_ops)
274499653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
274539c23413Seschrock 		else if (pvd->vdev_ops == &vdev_spare_ops &&
274639c23413Seschrock 		    newvd->vdev_isspare != oldvd->vdev_isspare)
274739c23413Seschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
274899653d4eSeschrock 		else if (pvd->vdev_ops != &vdev_spare_ops &&
274999653d4eSeschrock 		    newvd->vdev_isspare)
275099653d4eSeschrock 			pvops = &vdev_spare_ops;
275199653d4eSeschrock 		else
275299653d4eSeschrock 			pvops = &vdev_replacing_ops;
275399653d4eSeschrock 	}
275499653d4eSeschrock 
27552a79c5feSlling 	/*
27562a79c5feSlling 	 * Compare the new device size with the replaceable/attachable
27572a79c5feSlling 	 * device size.
27582a79c5feSlling 	 */
27592a79c5feSlling 	if (newvd->vdev_psize < vdev_get_rsize(oldvd))
2760fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
2761fa9e4066Sahrens 
2762ecc2d604Sbonwick 	/*
2763ecc2d604Sbonwick 	 * The new device cannot have a higher alignment requirement
2764ecc2d604Sbonwick 	 * than the top-level vdev.
2765ecc2d604Sbonwick 	 */
2766ecc2d604Sbonwick 	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
2767fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
2768fa9e4066Sahrens 
2769fa9e4066Sahrens 	/*
2770fa9e4066Sahrens 	 * If this is an in-place replacement, update oldvd's path and devid
2771fa9e4066Sahrens 	 * to make it distinguishable from newvd, and unopenable from now on.
2772fa9e4066Sahrens 	 */
2773fa9e4066Sahrens 	if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
2774fa9e4066Sahrens 		spa_strfree(oldvd->vdev_path);
2775fa9e4066Sahrens 		oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
2776fa9e4066Sahrens 		    KM_SLEEP);
2777fa9e4066Sahrens 		(void) sprintf(oldvd->vdev_path, "%s/%s",
2778fa9e4066Sahrens 		    newvd->vdev_path, "old");
2779fa9e4066Sahrens 		if (oldvd->vdev_devid != NULL) {
2780fa9e4066Sahrens 			spa_strfree(oldvd->vdev_devid);
2781fa9e4066Sahrens 			oldvd->vdev_devid = NULL;
2782fa9e4066Sahrens 		}
2783fa9e4066Sahrens 	}
2784fa9e4066Sahrens 
2785fa9e4066Sahrens 	/*
278699653d4eSeschrock 	 * If the parent is not a mirror, or if we're replacing, insert the new
278799653d4eSeschrock 	 * mirror/replacing/spare vdev above oldvd.
2788fa9e4066Sahrens 	 */
2789fa9e4066Sahrens 	if (pvd->vdev_ops != pvops)
2790fa9e4066Sahrens 		pvd = vdev_add_parent(oldvd, pvops);
2791fa9e4066Sahrens 
2792fa9e4066Sahrens 	ASSERT(pvd->vdev_top->vdev_parent == rvd);
2793fa9e4066Sahrens 	ASSERT(pvd->vdev_ops == pvops);
2794fa9e4066Sahrens 	ASSERT(oldvd->vdev_parent == pvd);
2795fa9e4066Sahrens 
2796fa9e4066Sahrens 	/*
2797fa9e4066Sahrens 	 * Extract the new device from its root and add it to pvd.
2798fa9e4066Sahrens 	 */
2799fa9e4066Sahrens 	vdev_remove_child(newrootvd, newvd);
2800fa9e4066Sahrens 	newvd->vdev_id = pvd->vdev_children;
2801fa9e4066Sahrens 	vdev_add_child(pvd, newvd);
2802fa9e4066Sahrens 
2803ea8dc4b6Seschrock 	/*
2804ea8dc4b6Seschrock 	 * If newvd is smaller than oldvd, but larger than its rsize,
2805ea8dc4b6Seschrock 	 * the addition of newvd may have decreased our parent's asize.
2806ea8dc4b6Seschrock 	 */
2807ea8dc4b6Seschrock 	pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
2808ea8dc4b6Seschrock 
2809fa9e4066Sahrens 	tvd = newvd->vdev_top;
2810fa9e4066Sahrens 	ASSERT(pvd->vdev_top == tvd);
2811fa9e4066Sahrens 	ASSERT(tvd->vdev_parent == rvd);
2812fa9e4066Sahrens 
2813fa9e4066Sahrens 	vdev_config_dirty(tvd);
2814fa9e4066Sahrens 
2815fa9e4066Sahrens 	/*
2816fa9e4066Sahrens 	 * Set newvd's DTL to [TXG_INITIAL, open_txg].  It will propagate
2817fa9e4066Sahrens 	 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
2818fa9e4066Sahrens 	 */
2819fa9e4066Sahrens 	open_txg = txg + TXG_CONCURRENT_STATES - 1;
2820fa9e4066Sahrens 
2821fa9e4066Sahrens 	mutex_enter(&newvd->vdev_dtl_lock);
2822fa9e4066Sahrens 	space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL,
2823fa9e4066Sahrens 	    open_txg - TXG_INITIAL + 1);
2824fa9e4066Sahrens 	mutex_exit(&newvd->vdev_dtl_lock);
2825fa9e4066Sahrens 
282639c23413Seschrock 	if (newvd->vdev_isspare)
282739c23413Seschrock 		spa_spare_activate(newvd);
2828ea8dc4b6Seschrock 
2829fa9e4066Sahrens 	/*
2830fa9e4066Sahrens 	 * Mark newvd's DTL dirty in this txg.
2831fa9e4066Sahrens 	 */
2832ecc2d604Sbonwick 	vdev_dirty(tvd, VDD_DTL, newvd, txg);
2833fa9e4066Sahrens 
2834fa9e4066Sahrens 	(void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
2835fa9e4066Sahrens 
2836fa9e4066Sahrens 	/*
28373d7072f8Seschrock 	 * Kick off a resilver to update newvd.  We need to grab the namespace
28383d7072f8Seschrock 	 * lock because spa_scrub() needs to post a sysevent with the pool name.
2839fa9e4066Sahrens 	 */
28403d7072f8Seschrock 	mutex_enter(&spa_namespace_lock);
2841fa9e4066Sahrens 	VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
28423d7072f8Seschrock 	mutex_exit(&spa_namespace_lock);
2843fa9e4066Sahrens 
2844fa9e4066Sahrens 	return (0);
2845fa9e4066Sahrens }
2846fa9e4066Sahrens 
2847fa9e4066Sahrens /*
2848fa9e4066Sahrens  * Detach a device from a mirror or replacing vdev.
2849fa9e4066Sahrens  * If 'replace_done' is specified, only detach if the parent
2850fa9e4066Sahrens  * is a replacing vdev.
2851fa9e4066Sahrens  */
2852fa9e4066Sahrens int
2853ea8dc4b6Seschrock spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done)
2854fa9e4066Sahrens {
2855fa9e4066Sahrens 	uint64_t txg;
2856fa9e4066Sahrens 	int c, t, error;
2857fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
2858fa9e4066Sahrens 	vdev_t *vd, *pvd, *cvd, *tvd;
285999653d4eSeschrock 	boolean_t unspare = B_FALSE;
286099653d4eSeschrock 	uint64_t unspare_guid;
2861bf82a41bSeschrock 	size_t len;
2862fa9e4066Sahrens 
2863fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
2864fa9e4066Sahrens 
2865c5904d13Seschrock 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
2866fa9e4066Sahrens 
2867fa9e4066Sahrens 	if (vd == NULL)
2868fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
2869fa9e4066Sahrens 
28700e34b6a7Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
28710e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
28720e34b6a7Sbonwick 
2873fa9e4066Sahrens 	pvd = vd->vdev_parent;
2874fa9e4066Sahrens 
2875fa9e4066Sahrens 	/*
2876fa9e4066Sahrens 	 * If replace_done is specified, only remove this device if it's
287799653d4eSeschrock 	 * the first child of a replacing vdev.  For the 'spare' vdev, either
287899653d4eSeschrock 	 * disk can be removed.
287999653d4eSeschrock 	 */
288099653d4eSeschrock 	if (replace_done) {
288199653d4eSeschrock 		if (pvd->vdev_ops == &vdev_replacing_ops) {
288299653d4eSeschrock 			if (vd->vdev_id != 0)
288399653d4eSeschrock 				return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
288499653d4eSeschrock 		} else if (pvd->vdev_ops != &vdev_spare_ops) {
288599653d4eSeschrock 			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
288699653d4eSeschrock 		}
288799653d4eSeschrock 	}
288899653d4eSeschrock 
288999653d4eSeschrock 	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
2890e7437265Sahrens 	    spa_version(spa) >= SPA_VERSION_SPARES);
2891fa9e4066Sahrens 
2892fa9e4066Sahrens 	/*
289399653d4eSeschrock 	 * Only mirror, replacing, and spare vdevs support detach.
2894fa9e4066Sahrens 	 */
2895fa9e4066Sahrens 	if (pvd->vdev_ops != &vdev_replacing_ops &&
289699653d4eSeschrock 	    pvd->vdev_ops != &vdev_mirror_ops &&
289799653d4eSeschrock 	    pvd->vdev_ops != &vdev_spare_ops)
2898fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
2899fa9e4066Sahrens 
2900fa9e4066Sahrens 	/*
2901fa9e4066Sahrens 	 * If there's only one replica, you can't detach it.
2902fa9e4066Sahrens 	 */
2903fa9e4066Sahrens 	if (pvd->vdev_children <= 1)
2904fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
2905fa9e4066Sahrens 
2906fa9e4066Sahrens 	/*
2907fa9e4066Sahrens 	 * If all siblings have non-empty DTLs, this device may have the only
2908fa9e4066Sahrens 	 * valid copy of the data, which means we cannot safely detach it.
2909fa9e4066Sahrens 	 *
2910fa9e4066Sahrens 	 * XXX -- as in the vdev_offline() case, we really want a more
2911fa9e4066Sahrens 	 * precise DTL check.
2912fa9e4066Sahrens 	 */
2913fa9e4066Sahrens 	for (c = 0; c < pvd->vdev_children; c++) {
2914fa9e4066Sahrens 		uint64_t dirty;
2915fa9e4066Sahrens 
2916fa9e4066Sahrens 		cvd = pvd->vdev_child[c];
2917fa9e4066Sahrens 		if (cvd == vd)
2918fa9e4066Sahrens 			continue;
2919fa9e4066Sahrens 		if (vdev_is_dead(cvd))
2920fa9e4066Sahrens 			continue;
2921fa9e4066Sahrens 		mutex_enter(&cvd->vdev_dtl_lock);
2922fa9e4066Sahrens 		dirty = cvd->vdev_dtl_map.sm_space |
2923fa9e4066Sahrens 		    cvd->vdev_dtl_scrub.sm_space;
2924fa9e4066Sahrens 		mutex_exit(&cvd->vdev_dtl_lock);
2925fa9e4066Sahrens 		if (!dirty)
2926fa9e4066Sahrens 			break;
2927fa9e4066Sahrens 	}
292899653d4eSeschrock 
292999653d4eSeschrock 	/*
293099653d4eSeschrock 	 * If we are a replacing or spare vdev, then we can always detach the
293199653d4eSeschrock 	 * latter child, as that is how one cancels the operation.
293299653d4eSeschrock 	 */
293399653d4eSeschrock 	if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) &&
293499653d4eSeschrock 	    c == pvd->vdev_children)
2935fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
2936fa9e4066Sahrens 
2937bf82a41bSeschrock 	/*
2938bf82a41bSeschrock 	 * If we are detaching the second disk from a replacing vdev, then
2939bf82a41bSeschrock 	 * check to see if we changed the original vdev's path to have "/old"
2940bf82a41bSeschrock 	 * at the end in spa_vdev_attach().  If so, undo that change now.
2941bf82a41bSeschrock 	 */
2942bf82a41bSeschrock 	if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 &&
2943bf82a41bSeschrock 	    pvd->vdev_child[0]->vdev_path != NULL &&
2944bf82a41bSeschrock 	    pvd->vdev_child[1]->vdev_path != NULL) {
2945bf82a41bSeschrock 		ASSERT(pvd->vdev_child[1] == vd);
2946bf82a41bSeschrock 		cvd = pvd->vdev_child[0];
2947bf82a41bSeschrock 		len = strlen(vd->vdev_path);
2948bf82a41bSeschrock 		if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
2949bf82a41bSeschrock 		    strcmp(cvd->vdev_path + len, "/old") == 0) {
2950bf82a41bSeschrock 			spa_strfree(cvd->vdev_path);
2951bf82a41bSeschrock 			cvd->vdev_path = spa_strdup(vd->vdev_path);
2952bf82a41bSeschrock 		}
2953bf82a41bSeschrock 	}
2954bf82a41bSeschrock 
295599653d4eSeschrock 	/*
295699653d4eSeschrock 	 * If we are detaching the original disk from a spare, then it implies
295799653d4eSeschrock 	 * that the spare should become a real disk, and be removed from the
295899653d4eSeschrock 	 * active spare list for the pool.
295999653d4eSeschrock 	 */
296099653d4eSeschrock 	if (pvd->vdev_ops == &vdev_spare_ops &&
296199653d4eSeschrock 	    vd->vdev_id == 0)
296299653d4eSeschrock 		unspare = B_TRUE;
296399653d4eSeschrock 
2964fa9e4066Sahrens 	/*
2965fa9e4066Sahrens 	 * Erase the disk labels so the disk can be used for other things.
2966fa9e4066Sahrens 	 * This must be done after all other error cases are handled,
2967fa9e4066Sahrens 	 * but before we disembowel vd (so we can still do I/O to it).
2968fa9e4066Sahrens 	 * But if we can't do it, don't treat the error as fatal --
2969fa9e4066Sahrens 	 * it may be that the unwritability of the disk is the reason
2970fa9e4066Sahrens 	 * it's being detached!
2971fa9e4066Sahrens 	 */
297239c23413Seschrock 	error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
2973fa9e4066Sahrens 
2974fa9e4066Sahrens 	/*
2975fa9e4066Sahrens 	 * Remove vd from its parent and compact the parent's children.
2976fa9e4066Sahrens 	 */
2977fa9e4066Sahrens 	vdev_remove_child(pvd, vd);
2978fa9e4066Sahrens 	vdev_compact_children(pvd);
2979fa9e4066Sahrens 
2980fa9e4066Sahrens 	/*
2981fa9e4066Sahrens 	 * Remember one of the remaining children so we can get tvd below.
2982fa9e4066Sahrens 	 */
2983fa9e4066Sahrens 	cvd = pvd->vdev_child[0];
2984fa9e4066Sahrens 
298599653d4eSeschrock 	/*
298699653d4eSeschrock 	 * If we need to remove the remaining child from the list of hot spares,
298799653d4eSeschrock 	 * do it now, marking the vdev as no longer a spare in the process.  We
298899653d4eSeschrock 	 * must do this before vdev_remove_parent(), because that can change the
298999653d4eSeschrock 	 * GUID if it creates a new toplevel GUID.
299099653d4eSeschrock 	 */
299199653d4eSeschrock 	if (unspare) {
299299653d4eSeschrock 		ASSERT(cvd->vdev_isspare);
299339c23413Seschrock 		spa_spare_remove(cvd);
299499653d4eSeschrock 		unspare_guid = cvd->vdev_guid;
299599653d4eSeschrock 	}
299699653d4eSeschrock 
2997fa9e4066Sahrens 	/*
2998fa9e4066Sahrens 	 * If the parent mirror/replacing vdev only has one child,
2999fa9e4066Sahrens 	 * the parent is no longer needed.  Remove it from the tree.
3000fa9e4066Sahrens 	 */
3001fa9e4066Sahrens 	if (pvd->vdev_children == 1)
3002fa9e4066Sahrens 		vdev_remove_parent(cvd);
3003fa9e4066Sahrens 
3004fa9e4066Sahrens 	/*
3005fa9e4066Sahrens 	 * We don't set tvd until now because the parent we just removed
3006fa9e4066Sahrens 	 * may have been the previous top-level vdev.
3007fa9e4066Sahrens 	 */
3008fa9e4066Sahrens 	tvd = cvd->vdev_top;
3009fa9e4066Sahrens 	ASSERT(tvd->vdev_parent == rvd);
3010fa9e4066Sahrens 
3011fa9e4066Sahrens 	/*
301239c23413Seschrock 	 * Reevaluate the parent vdev state.
3013fa9e4066Sahrens 	 */
30143d7072f8Seschrock 	vdev_propagate_state(cvd);
3015fa9e4066Sahrens 
3016fa9e4066Sahrens 	/*
301739c23413Seschrock 	 * If the device we just detached was smaller than the others, it may be
301839c23413Seschrock 	 * possible to add metaslabs (i.e. grow the pool).  vdev_metaslab_init()
301939c23413Seschrock 	 * can't fail because the existing metaslabs are already in core, so
302039c23413Seschrock 	 * there's nothing to read from disk.
3021fa9e4066Sahrens 	 */
3022ecc2d604Sbonwick 	VERIFY(vdev_metaslab_init(tvd, txg) == 0);
3023fa9e4066Sahrens 
3024fa9e4066Sahrens 	vdev_config_dirty(tvd);
3025fa9e4066Sahrens 
3026fa9e4066Sahrens 	/*
302739c23413Seschrock 	 * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
302839c23413Seschrock 	 * vd->vdev_detached is set and free vd's DTL object in syncing context.
302939c23413Seschrock 	 * But first make sure we're not on any *other* txg's DTL list, to
303039c23413Seschrock 	 * prevent vd from being accessed after it's freed.
3031fa9e4066Sahrens 	 */
3032fa9e4066Sahrens 	for (t = 0; t < TXG_SIZE; t++)
3033fa9e4066Sahrens 		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
3034ecc2d604Sbonwick 	vd->vdev_detached = B_TRUE;
3035ecc2d604Sbonwick 	vdev_dirty(tvd, VDD_DTL, vd, txg);
3036fa9e4066Sahrens 
30373d7072f8Seschrock 	spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
30383d7072f8Seschrock 
303999653d4eSeschrock 	error = spa_vdev_exit(spa, vd, txg, 0);
304099653d4eSeschrock 
304199653d4eSeschrock 	/*
304239c23413Seschrock 	 * If this was the removal of the original device in a hot spare vdev,
304339c23413Seschrock 	 * then we want to go through and remove the device from the hot spare
304439c23413Seschrock 	 * list of every other pool.
304599653d4eSeschrock 	 */
304699653d4eSeschrock 	if (unspare) {
304799653d4eSeschrock 		spa = NULL;
304899653d4eSeschrock 		mutex_enter(&spa_namespace_lock);
304999653d4eSeschrock 		while ((spa = spa_next(spa)) != NULL) {
305099653d4eSeschrock 			if (spa->spa_state != POOL_STATE_ACTIVE)
305199653d4eSeschrock 				continue;
305299653d4eSeschrock 
305399653d4eSeschrock 			(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
305499653d4eSeschrock 		}
305599653d4eSeschrock 		mutex_exit(&spa_namespace_lock);
305699653d4eSeschrock 	}
305799653d4eSeschrock 
305899653d4eSeschrock 	return (error);
305999653d4eSeschrock }
306099653d4eSeschrock 
306199653d4eSeschrock /*
3062fa94a07fSbrendan  * Remove a spares vdev from the nvlist config.
306399653d4eSeschrock  */
3064fa94a07fSbrendan static int
3065fa94a07fSbrendan spa_remove_spares(spa_aux_vdev_t *sav, uint64_t guid, boolean_t unspare,
3066fa94a07fSbrendan     nvlist_t **spares, int nspares, vdev_t *vd)
306799653d4eSeschrock {
3068fa94a07fSbrendan 	nvlist_t *nv, **newspares;
3069fa94a07fSbrendan 	int i, j;
307099653d4eSeschrock 
307199653d4eSeschrock 	nv = NULL;
3072fa94a07fSbrendan 	for (i = 0; i < nspares; i++) {
3073fa94a07fSbrendan 		uint64_t theguid;
307499653d4eSeschrock 
3075fa94a07fSbrendan 		VERIFY(nvlist_lookup_uint64(spares[i],
3076fa94a07fSbrendan 		    ZPOOL_CONFIG_GUID, &theguid) == 0);
3077fa94a07fSbrendan 		if (theguid == guid) {
3078fa94a07fSbrendan 			nv = spares[i];
3079fa94a07fSbrendan 			break;
308099653d4eSeschrock 		}
308199653d4eSeschrock 	}
308299653d4eSeschrock 
308399653d4eSeschrock 	/*
3084fa94a07fSbrendan 	 * Only remove the hot spare if it's not currently in use in this pool.
308599653d4eSeschrock 	 */
3086fa94a07fSbrendan 	if (nv == NULL && vd == NULL)
3087fa94a07fSbrendan 		return (ENOENT);
308899653d4eSeschrock 
3089fa94a07fSbrendan 	if (nv == NULL && vd != NULL)
3090fa94a07fSbrendan 		return (ENOTSUP);
309199653d4eSeschrock 
3092fa94a07fSbrendan 	if (!unspare && nv != NULL && vd != NULL)
3093fa94a07fSbrendan 		return (EBUSY);
309499653d4eSeschrock 
309599653d4eSeschrock 	if (nspares == 1) {
309699653d4eSeschrock 		newspares = NULL;
309799653d4eSeschrock 	} else {
309899653d4eSeschrock 		newspares = kmem_alloc((nspares - 1) * sizeof (void *),
309999653d4eSeschrock 		    KM_SLEEP);
310099653d4eSeschrock 		for (i = 0, j = 0; i < nspares; i++) {
310199653d4eSeschrock 			if (spares[i] != nv)
310299653d4eSeschrock 				VERIFY(nvlist_dup(spares[i],
310399653d4eSeschrock 				    &newspares[j++], KM_SLEEP) == 0);
310499653d4eSeschrock 		}
310599653d4eSeschrock 	}
310699653d4eSeschrock 
3107fa94a07fSbrendan 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_SPARES,
310899653d4eSeschrock 	    DATA_TYPE_NVLIST_ARRAY) == 0);
3109fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3110fa94a07fSbrendan 	    ZPOOL_CONFIG_SPARES, newspares, nspares - 1) == 0);
311199653d4eSeschrock 	for (i = 0; i < nspares - 1; i++)
311299653d4eSeschrock 		nvlist_free(newspares[i]);
311399653d4eSeschrock 	kmem_free(newspares, (nspares - 1) * sizeof (void *));
3114fa94a07fSbrendan 
3115fa94a07fSbrendan 	return (0);
3116fa94a07fSbrendan }
3117fa94a07fSbrendan 
3118fa94a07fSbrendan /*
3119fa94a07fSbrendan  * Remove an l2cache vdev from the nvlist config.
3120fa94a07fSbrendan  */
3121fa94a07fSbrendan static int
3122fa94a07fSbrendan spa_remove_l2cache(spa_aux_vdev_t *sav, uint64_t guid, nvlist_t **l2cache,
3123fa94a07fSbrendan     int nl2cache, vdev_t *vd)
3124fa94a07fSbrendan {
3125fa94a07fSbrendan 	nvlist_t *nv, **newl2cache;
3126fa94a07fSbrendan 	int i, j;
3127fa94a07fSbrendan 
3128fa94a07fSbrendan 	nv = NULL;
3129fa94a07fSbrendan 	for (i = 0; i < nl2cache; i++) {
3130fa94a07fSbrendan 		uint64_t theguid;
3131fa94a07fSbrendan 
3132fa94a07fSbrendan 		VERIFY(nvlist_lookup_uint64(l2cache[i],
3133fa94a07fSbrendan 		    ZPOOL_CONFIG_GUID, &theguid) == 0);
3134fa94a07fSbrendan 		if (theguid == guid) {
3135fa94a07fSbrendan 			nv = l2cache[i];
3136fa94a07fSbrendan 			break;
3137fa94a07fSbrendan 		}
3138fa94a07fSbrendan 	}
3139fa94a07fSbrendan 
3140fa94a07fSbrendan 	if (vd == NULL) {
3141fa94a07fSbrendan 		for (i = 0; i < nl2cache; i++) {
3142fa94a07fSbrendan 			if (sav->sav_vdevs[i]->vdev_guid == guid) {
3143fa94a07fSbrendan 				vd = sav->sav_vdevs[i];
3144fa94a07fSbrendan 				break;
3145fa94a07fSbrendan 			}
3146fa94a07fSbrendan 		}
3147fa94a07fSbrendan 	}
3148fa94a07fSbrendan 
3149fa94a07fSbrendan 	if (nv == NULL && vd == NULL)
3150fa94a07fSbrendan 		return (ENOENT);
3151fa94a07fSbrendan 
3152fa94a07fSbrendan 	if (nv == NULL && vd != NULL)
3153fa94a07fSbrendan 		return (ENOTSUP);
3154fa94a07fSbrendan 
3155fa94a07fSbrendan 	if (nl2cache == 1) {
3156fa94a07fSbrendan 		newl2cache = NULL;
3157fa94a07fSbrendan 	} else {
3158fa94a07fSbrendan 		newl2cache = kmem_alloc((nl2cache - 1) * sizeof (void *),
3159fa94a07fSbrendan 		    KM_SLEEP);
3160fa94a07fSbrendan 		for (i = 0, j = 0; i < nl2cache; i++) {
3161fa94a07fSbrendan 			if (l2cache[i] != nv)
3162fa94a07fSbrendan 				VERIFY(nvlist_dup(l2cache[i],
3163fa94a07fSbrendan 				    &newl2cache[j++], KM_SLEEP) == 0);
3164fa94a07fSbrendan 		}
3165fa94a07fSbrendan 	}
3166fa94a07fSbrendan 
3167fa94a07fSbrendan 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
3168fa94a07fSbrendan 	    DATA_TYPE_NVLIST_ARRAY) == 0);
3169fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3170fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, newl2cache, nl2cache - 1) == 0);
3171fa94a07fSbrendan 	for (i = 0; i < nl2cache - 1; i++)
3172fa94a07fSbrendan 		nvlist_free(newl2cache[i]);
3173fa94a07fSbrendan 	kmem_free(newl2cache, (nl2cache - 1) * sizeof (void *));
3174fa94a07fSbrendan 
3175fa94a07fSbrendan 	return (0);
3176fa94a07fSbrendan }
3177fa94a07fSbrendan 
3178fa94a07fSbrendan /*
3179fa94a07fSbrendan  * Remove a device from the pool.  Currently, this supports removing only hot
3180fa94a07fSbrendan  * spares and level 2 ARC devices.
3181fa94a07fSbrendan  */
3182fa94a07fSbrendan int
3183fa94a07fSbrendan spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
3184fa94a07fSbrendan {
3185fa94a07fSbrendan 	vdev_t *vd;
3186fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
3187fa94a07fSbrendan 	uint_t nspares, nl2cache;
3188fa94a07fSbrendan 	int error = 0;
3189fa94a07fSbrendan 
3190fa94a07fSbrendan 	spa_config_enter(spa, RW_WRITER, FTAG);
3191fa94a07fSbrendan 
3192c5904d13Seschrock 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3193fa94a07fSbrendan 
3194fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs != NULL &&
3195fa94a07fSbrendan 	    spa_spare_exists(guid, NULL) &&
3196fa94a07fSbrendan 	    nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3197fa94a07fSbrendan 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
3198fa94a07fSbrendan 		if ((error = spa_remove_spares(&spa->spa_spares, guid, unspare,
3199fa94a07fSbrendan 		    spares, nspares, vd)) != 0)
3200fa94a07fSbrendan 			goto out;
3201fa94a07fSbrendan 		spa_load_spares(spa);
3202fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
3203fa94a07fSbrendan 		goto out;
3204fa94a07fSbrendan 	}
3205fa94a07fSbrendan 
3206fa94a07fSbrendan 	if (spa->spa_l2cache.sav_vdevs != NULL &&
3207fa94a07fSbrendan 	    spa_l2cache_exists(guid, NULL) &&
3208fa94a07fSbrendan 	    nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3209fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) {
3210fa94a07fSbrendan 		if ((error = spa_remove_l2cache(&spa->spa_l2cache, guid,
3211fa94a07fSbrendan 		    l2cache, nl2cache, vd)) != 0)
3212fa94a07fSbrendan 			goto out;
3213fa94a07fSbrendan 		spa_load_l2cache(spa);
3214fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
3215fa94a07fSbrendan 	}
321699653d4eSeschrock 
321799653d4eSeschrock out:
321899653d4eSeschrock 	spa_config_exit(spa, FTAG);
3219fa94a07fSbrendan 	return (error);
3220fa9e4066Sahrens }
3221fa9e4066Sahrens 
3222fa9e4066Sahrens /*
32233d7072f8Seschrock  * Find any device that's done replacing, or a vdev marked 'unspare' that's
32243d7072f8Seschrock  * current spared, so we can detach it.
3225fa9e4066Sahrens  */
3226ea8dc4b6Seschrock static vdev_t *
32273d7072f8Seschrock spa_vdev_resilver_done_hunt(vdev_t *vd)
3228fa9e4066Sahrens {
3229ea8dc4b6Seschrock 	vdev_t *newvd, *oldvd;
3230fa9e4066Sahrens 	int c;
3231fa9e4066Sahrens 
3232ea8dc4b6Seschrock 	for (c = 0; c < vd->vdev_children; c++) {
32333d7072f8Seschrock 		oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
3234ea8dc4b6Seschrock 		if (oldvd != NULL)
3235ea8dc4b6Seschrock 			return (oldvd);
3236ea8dc4b6Seschrock 	}
3237fa9e4066Sahrens 
32383d7072f8Seschrock 	/*
32393d7072f8Seschrock 	 * Check for a completed replacement.
32403d7072f8Seschrock 	 */
3241fa9e4066Sahrens 	if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
3242ea8dc4b6Seschrock 		oldvd = vd->vdev_child[0];
3243ea8dc4b6Seschrock 		newvd = vd->vdev_child[1];
3244ea8dc4b6Seschrock 
3245ea8dc4b6Seschrock 		mutex_enter(&newvd->vdev_dtl_lock);
3246ea8dc4b6Seschrock 		if (newvd->vdev_dtl_map.sm_space == 0 &&
3247ea8dc4b6Seschrock 		    newvd->vdev_dtl_scrub.sm_space == 0) {
3248ea8dc4b6Seschrock 			mutex_exit(&newvd->vdev_dtl_lock);
3249ea8dc4b6Seschrock 			return (oldvd);
3250fa9e4066Sahrens 		}
3251ea8dc4b6Seschrock 		mutex_exit(&newvd->vdev_dtl_lock);
3252fa9e4066Sahrens 	}
3253ea8dc4b6Seschrock 
32543d7072f8Seschrock 	/*
32553d7072f8Seschrock 	 * Check for a completed resilver with the 'unspare' flag set.
32563d7072f8Seschrock 	 */
32573d7072f8Seschrock 	if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) {
32583d7072f8Seschrock 		newvd = vd->vdev_child[0];
32593d7072f8Seschrock 		oldvd = vd->vdev_child[1];
32603d7072f8Seschrock 
32613d7072f8Seschrock 		mutex_enter(&newvd->vdev_dtl_lock);
32623d7072f8Seschrock 		if (newvd->vdev_unspare &&
32633d7072f8Seschrock 		    newvd->vdev_dtl_map.sm_space == 0 &&
32643d7072f8Seschrock 		    newvd->vdev_dtl_scrub.sm_space == 0) {
32653d7072f8Seschrock 			newvd->vdev_unspare = 0;
32663d7072f8Seschrock 			mutex_exit(&newvd->vdev_dtl_lock);
32673d7072f8Seschrock 			return (oldvd);
32683d7072f8Seschrock 		}
32693d7072f8Seschrock 		mutex_exit(&newvd->vdev_dtl_lock);
32703d7072f8Seschrock 	}
32713d7072f8Seschrock 
3272ea8dc4b6Seschrock 	return (NULL);
3273fa9e4066Sahrens }
3274fa9e4066Sahrens 
3275ea8dc4b6Seschrock static void
32763d7072f8Seschrock spa_vdev_resilver_done(spa_t *spa)
3277fa9e4066Sahrens {
3278ea8dc4b6Seschrock 	vdev_t *vd;
327999653d4eSeschrock 	vdev_t *pvd;
3280ea8dc4b6Seschrock 	uint64_t guid;
328199653d4eSeschrock 	uint64_t pguid = 0;
3282ea8dc4b6Seschrock 
3283ea8dc4b6Seschrock 	spa_config_enter(spa, RW_READER, FTAG);
3284ea8dc4b6Seschrock 
32853d7072f8Seschrock 	while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
3286ea8dc4b6Seschrock 		guid = vd->vdev_guid;
328799653d4eSeschrock 		/*
328899653d4eSeschrock 		 * If we have just finished replacing a hot spared device, then
328999653d4eSeschrock 		 * we need to detach the parent's first child (the original hot
329099653d4eSeschrock 		 * spare) as well.
329199653d4eSeschrock 		 */
329299653d4eSeschrock 		pvd = vd->vdev_parent;
329399653d4eSeschrock 		if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
329499653d4eSeschrock 		    pvd->vdev_id == 0) {
329599653d4eSeschrock 			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
329699653d4eSeschrock 			ASSERT(pvd->vdev_parent->vdev_children == 2);
329799653d4eSeschrock 			pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid;
329899653d4eSeschrock 		}
3299ea8dc4b6Seschrock 		spa_config_exit(spa, FTAG);
3300ea8dc4b6Seschrock 		if (spa_vdev_detach(spa, guid, B_TRUE) != 0)
3301ea8dc4b6Seschrock 			return;
330299653d4eSeschrock 		if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0)
330399653d4eSeschrock 			return;
3304ea8dc4b6Seschrock 		spa_config_enter(spa, RW_READER, FTAG);
3305fa9e4066Sahrens 	}
3306fa9e4066Sahrens 
3307ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
3308fa9e4066Sahrens }
3309fa9e4066Sahrens 
3310c67d9675Seschrock /*
3311c67d9675Seschrock  * Update the stored path for this vdev.  Dirty the vdev configuration, relying
3312c67d9675Seschrock  * on spa_vdev_enter/exit() to synchronize the labels and cache.
3313c67d9675Seschrock  */
3314c67d9675Seschrock int
3315c67d9675Seschrock spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
3316c67d9675Seschrock {
3317c5904d13Seschrock 	vdev_t *vd;
3318c67d9675Seschrock 	uint64_t txg;
3319c67d9675Seschrock 
3320c67d9675Seschrock 	txg = spa_vdev_enter(spa);
3321c67d9675Seschrock 
3322c5904d13Seschrock 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) {
332399653d4eSeschrock 		/*
3324c5904d13Seschrock 		 * Determine if this is a reference to a hot spare device.  If
3325c5904d13Seschrock 		 * it is, update the path manually as there is no associated
3326c5904d13Seschrock 		 * vdev_t that can be synced to disk.
332799653d4eSeschrock 		 */
3328c5904d13Seschrock 		nvlist_t **spares;
3329c5904d13Seschrock 		uint_t i, nspares;
3330fa94a07fSbrendan 
3331fa94a07fSbrendan 		if (spa->spa_spares.sav_config != NULL) {
3332fa94a07fSbrendan 			VERIFY(nvlist_lookup_nvlist_array(
3333fa94a07fSbrendan 			    spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
3334fa94a07fSbrendan 			    &spares, &nspares) == 0);
333599653d4eSeschrock 			for (i = 0; i < nspares; i++) {
333699653d4eSeschrock 				uint64_t theguid;
333799653d4eSeschrock 				VERIFY(nvlist_lookup_uint64(spares[i],
333899653d4eSeschrock 				    ZPOOL_CONFIG_GUID, &theguid) == 0);
3339fa94a07fSbrendan 				if (theguid == guid) {
3340fa94a07fSbrendan 					VERIFY(nvlist_add_string(spares[i],
3341fa94a07fSbrendan 					    ZPOOL_CONFIG_PATH, newpath) == 0);
3342fa94a07fSbrendan 					spa_load_spares(spa);
3343fa94a07fSbrendan 					spa->spa_spares.sav_sync = B_TRUE;
3344fa94a07fSbrendan 					return (spa_vdev_exit(spa, NULL, txg,
3345fa94a07fSbrendan 					    0));
3346fa94a07fSbrendan 				}
334799653d4eSeschrock 			}
3348fa94a07fSbrendan 		}
334999653d4eSeschrock 
3350fa94a07fSbrendan 		return (spa_vdev_exit(spa, NULL, txg, ENOENT));
335199653d4eSeschrock 	}
3352c67d9675Seschrock 
33530e34b6a7Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
33540e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
33550e34b6a7Sbonwick 
3356c67d9675Seschrock 	spa_strfree(vd->vdev_path);
3357c67d9675Seschrock 	vd->vdev_path = spa_strdup(newpath);
3358c67d9675Seschrock 
3359c67d9675Seschrock 	vdev_config_dirty(vd->vdev_top);
3360c67d9675Seschrock 
3361c67d9675Seschrock 	return (spa_vdev_exit(spa, NULL, txg, 0));
3362c67d9675Seschrock }
3363c67d9675Seschrock 
3364fa9e4066Sahrens /*
3365fa9e4066Sahrens  * ==========================================================================
3366fa9e4066Sahrens  * SPA Scrubbing
3367fa9e4066Sahrens  * ==========================================================================
3368fa9e4066Sahrens  */
3369fa9e4066Sahrens 
3370fa9e4066Sahrens static void
3371fa9e4066Sahrens spa_scrub_io_done(zio_t *zio)
3372fa9e4066Sahrens {
3373fa9e4066Sahrens 	spa_t *spa = zio->io_spa;
3374fa9e4066Sahrens 
33750e8c6158Smaybee 	arc_data_buf_free(zio->io_data, zio->io_size);
3376fa9e4066Sahrens 
3377fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
3378ea8dc4b6Seschrock 	if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
337944cd46caSbillm 		vdev_t *vd = zio->io_vd ? zio->io_vd : spa->spa_root_vdev;
3380ea8dc4b6Seschrock 		spa->spa_scrub_errors++;
3381fa9e4066Sahrens 		mutex_enter(&vd->vdev_stat_lock);
3382fa9e4066Sahrens 		vd->vdev_stat.vs_scrub_errors++;
3383fa9e4066Sahrens 		mutex_exit(&vd->vdev_stat_lock);
3384fa9e4066Sahrens 	}
338505b2b3b8Smishra 
338605b2b3b8Smishra 	if (--spa->spa_scrub_inflight < spa->spa_scrub_maxinflight)
3387ea8dc4b6Seschrock 		cv_broadcast(&spa->spa_scrub_io_cv);
338805b2b3b8Smishra 
338905b2b3b8Smishra 	ASSERT(spa->spa_scrub_inflight >= 0);
339005b2b3b8Smishra 
3391ea8dc4b6Seschrock 	mutex_exit(&spa->spa_scrub_lock);
3392fa9e4066Sahrens }
3393fa9e4066Sahrens 
3394fa9e4066Sahrens static void
3395ea8dc4b6Seschrock spa_scrub_io_start(spa_t *spa, blkptr_t *bp, int priority, int flags,
3396ea8dc4b6Seschrock     zbookmark_t *zb)
3397fa9e4066Sahrens {
3398fa9e4066Sahrens 	size_t size = BP_GET_LSIZE(bp);
339905b2b3b8Smishra 	void *data;
3400fa9e4066Sahrens 
3401fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
340205b2b3b8Smishra 	/*
340305b2b3b8Smishra 	 * Do not give too much work to vdev(s).
340405b2b3b8Smishra 	 */
340505b2b3b8Smishra 	while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight) {
340605b2b3b8Smishra 		cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
340705b2b3b8Smishra 	}
3408fa9e4066Sahrens 	spa->spa_scrub_inflight++;
3409fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
3410fa9e4066Sahrens 
34110e8c6158Smaybee 	data = arc_data_buf_alloc(size);
341205b2b3b8Smishra 
3413ea8dc4b6Seschrock 	if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
3414ea8dc4b6Seschrock 		flags |= ZIO_FLAG_SPECULATIVE;	/* intent log block */
3415ea8dc4b6Seschrock 
3416d80c45e0Sbonwick 	flags |= ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL;
3417ea8dc4b6Seschrock 
3418fa9e4066Sahrens 	zio_nowait(zio_read(NULL, spa, bp, data, size,
3419ea8dc4b6Seschrock 	    spa_scrub_io_done, NULL, priority, flags, zb));
3420fa9e4066Sahrens }
3421fa9e4066Sahrens 
3422fa9e4066Sahrens /* ARGSUSED */
3423fa9e4066Sahrens static int
3424fa9e4066Sahrens spa_scrub_cb(traverse_blk_cache_t *bc, spa_t *spa, void *a)
3425fa9e4066Sahrens {
3426fa9e4066Sahrens 	blkptr_t *bp = &bc->bc_blkptr;
342744cd46caSbillm 	vdev_t *vd = spa->spa_root_vdev;
342844cd46caSbillm 	dva_t *dva = bp->blk_dva;
342944cd46caSbillm 	int needs_resilver = B_FALSE;
343044cd46caSbillm 	int d;
3431fa9e4066Sahrens 
343244cd46caSbillm 	if (bc->bc_errno) {
3433fa9e4066Sahrens 		/*
3434fa9e4066Sahrens 		 * We can't scrub this block, but we can continue to scrub
3435fa9e4066Sahrens 		 * the rest of the pool.  Note the error and move along.
3436fa9e4066Sahrens 		 */
3437fa9e4066Sahrens 		mutex_enter(&spa->spa_scrub_lock);
3438fa9e4066Sahrens 		spa->spa_scrub_errors++;
3439fa9e4066Sahrens 		mutex_exit(&spa->spa_scrub_lock);
3440fa9e4066Sahrens 
344144cd46caSbillm 		mutex_enter(&vd->vdev_stat_lock);
344244cd46caSbillm 		vd->vdev_stat.vs_scrub_errors++;
344344cd46caSbillm 		mutex_exit(&vd->vdev_stat_lock);
3444fa9e4066Sahrens 
3445fa9e4066Sahrens 		return (ERESTART);
3446fa9e4066Sahrens 	}
3447fa9e4066Sahrens 
3448fa9e4066Sahrens 	ASSERT(bp->blk_birth < spa->spa_scrub_maxtxg);
3449fa9e4066Sahrens 
345044cd46caSbillm 	for (d = 0; d < BP_GET_NDVAS(bp); d++) {
345144cd46caSbillm 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]));
3452fa9e4066Sahrens 
345344cd46caSbillm 		ASSERT(vd != NULL);
345444cd46caSbillm 
345544cd46caSbillm 		/*
345644cd46caSbillm 		 * Keep track of how much data we've examined so that
345744cd46caSbillm 		 * zpool(1M) status can make useful progress reports.
345844cd46caSbillm 		 */
345944cd46caSbillm 		mutex_enter(&vd->vdev_stat_lock);
346044cd46caSbillm 		vd->vdev_stat.vs_scrub_examined += DVA_GET_ASIZE(&dva[d]);
346144cd46caSbillm 		mutex_exit(&vd->vdev_stat_lock);
346244cd46caSbillm 
346344cd46caSbillm 		if (spa->spa_scrub_type == POOL_SCRUB_RESILVER) {
346444cd46caSbillm 			if (DVA_GET_GANG(&dva[d])) {
346544cd46caSbillm 				/*
346644cd46caSbillm 				 * Gang members may be spread across multiple
346744cd46caSbillm 				 * vdevs, so the best we can do is look at the
346844cd46caSbillm 				 * pool-wide DTL.
346944cd46caSbillm 				 * XXX -- it would be better to change our
347044cd46caSbillm 				 * allocation policy to ensure that this can't
347144cd46caSbillm 				 * happen.
347244cd46caSbillm 				 */
347344cd46caSbillm 				vd = spa->spa_root_vdev;
347444cd46caSbillm 			}
347544cd46caSbillm 			if (vdev_dtl_contains(&vd->vdev_dtl_map,
347644cd46caSbillm 			    bp->blk_birth, 1))
347744cd46caSbillm 				needs_resilver = B_TRUE;
3478fa9e4066Sahrens 		}
347944cd46caSbillm 	}
348044cd46caSbillm 
348144cd46caSbillm 	if (spa->spa_scrub_type == POOL_SCRUB_EVERYTHING)
3482fa9e4066Sahrens 		spa_scrub_io_start(spa, bp, ZIO_PRIORITY_SCRUB,
3483ea8dc4b6Seschrock 		    ZIO_FLAG_SCRUB, &bc->bc_bookmark);
348444cd46caSbillm 	else if (needs_resilver)
348544cd46caSbillm 		spa_scrub_io_start(spa, bp, ZIO_PRIORITY_RESILVER,
348644cd46caSbillm 		    ZIO_FLAG_RESILVER, &bc->bc_bookmark);
3487fa9e4066Sahrens 
3488fa9e4066Sahrens 	return (0);
3489fa9e4066Sahrens }
3490fa9e4066Sahrens 
3491fa9e4066Sahrens static void
3492fa9e4066Sahrens spa_scrub_thread(spa_t *spa)
3493fa9e4066Sahrens {
3494fa9e4066Sahrens 	callb_cpr_t cprinfo;
3495fa9e4066Sahrens 	traverse_handle_t *th = spa->spa_scrub_th;
3496fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
3497fa9e4066Sahrens 	pool_scrub_type_t scrub_type = spa->spa_scrub_type;
3498fa9e4066Sahrens 	int error = 0;
3499fa9e4066Sahrens 	boolean_t complete;
3500fa9e4066Sahrens 
3501fa9e4066Sahrens 	CALLB_CPR_INIT(&cprinfo, &spa->spa_scrub_lock, callb_generic_cpr, FTAG);
3502fa9e4066Sahrens 
3503f0aa80d4Sbonwick 	/*
3504f0aa80d4Sbonwick 	 * If we're restarting due to a snapshot create/delete,
3505f0aa80d4Sbonwick 	 * wait for that to complete.
3506f0aa80d4Sbonwick 	 */
3507f0aa80d4Sbonwick 	txg_wait_synced(spa_get_dsl(spa), 0);
3508f0aa80d4Sbonwick 
3509ea8dc4b6Seschrock 	dprintf("start %s mintxg=%llu maxtxg=%llu\n",
3510ea8dc4b6Seschrock 	    scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub",
3511ea8dc4b6Seschrock 	    spa->spa_scrub_mintxg, spa->spa_scrub_maxtxg);
3512ea8dc4b6Seschrock 
3513ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
3514ea8dc4b6Seschrock 	vdev_reopen(rvd);		/* purge all vdev caches */
3515fa9e4066Sahrens 	vdev_config_dirty(rvd);		/* rewrite all disk labels */
3516fa9e4066Sahrens 	vdev_scrub_stat_update(rvd, scrub_type, B_FALSE);
3517ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
3518fa9e4066Sahrens 
3519fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
3520fa9e4066Sahrens 	spa->spa_scrub_errors = 0;
3521fa9e4066Sahrens 	spa->spa_scrub_active = 1;
3522ea8dc4b6Seschrock 	ASSERT(spa->spa_scrub_inflight == 0);
3523fa9e4066Sahrens 
3524fa9e4066Sahrens 	while (!spa->spa_scrub_stop) {
3525fa9e4066Sahrens 		CALLB_CPR_SAFE_BEGIN(&cprinfo);
3526ea8dc4b6Seschrock 		while (spa->spa_scrub_suspended) {
3527fa9e4066Sahrens 			spa->spa_scrub_active = 0;
3528fa9e4066Sahrens 			cv_broadcast(&spa->spa_scrub_cv);
3529fa9e4066Sahrens 			cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
3530fa9e4066Sahrens 			spa->spa_scrub_active = 1;
3531fa9e4066Sahrens 		}
3532fa9e4066Sahrens 		CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_scrub_lock);
3533fa9e4066Sahrens 
3534fa9e4066Sahrens 		if (spa->spa_scrub_restart_txg != 0)
3535fa9e4066Sahrens 			break;
3536fa9e4066Sahrens 
3537fa9e4066Sahrens 		mutex_exit(&spa->spa_scrub_lock);
3538fa9e4066Sahrens 		error = traverse_more(th);
3539fa9e4066Sahrens 		mutex_enter(&spa->spa_scrub_lock);
3540fa9e4066Sahrens 		if (error != EAGAIN)
3541fa9e4066Sahrens 			break;
3542fa9e4066Sahrens 	}
3543fa9e4066Sahrens 
3544fa9e4066Sahrens 	while (spa->spa_scrub_inflight)
3545fa9e4066Sahrens 		cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
3546fa9e4066Sahrens 
35475dabedeeSbonwick 	spa->spa_scrub_active = 0;
35485dabedeeSbonwick 	cv_broadcast(&spa->spa_scrub_cv);
35495dabedeeSbonwick 
35505dabedeeSbonwick 	mutex_exit(&spa->spa_scrub_lock);
35515dabedeeSbonwick 
35525dabedeeSbonwick 	spa_config_enter(spa, RW_WRITER, FTAG);
35535dabedeeSbonwick 
35545dabedeeSbonwick 	mutex_enter(&spa->spa_scrub_lock);
35555dabedeeSbonwick 
35565dabedeeSbonwick 	/*
35575dabedeeSbonwick 	 * Note: we check spa_scrub_restart_txg under both spa_scrub_lock
35585dabedeeSbonwick 	 * AND the spa config lock to synchronize with any config changes
35595dabedeeSbonwick 	 * that revise the DTLs under spa_vdev_enter() / spa_vdev_exit().
35605dabedeeSbonwick 	 */
3561fa9e4066Sahrens 	if (spa->spa_scrub_restart_txg != 0)
3562fa9e4066Sahrens 		error = ERESTART;
3563fa9e4066Sahrens 
3564ea8dc4b6Seschrock 	if (spa->spa_scrub_stop)
3565ea8dc4b6Seschrock 		error = EINTR;
3566ea8dc4b6Seschrock 
3567fa9e4066Sahrens 	/*
3568ea8dc4b6Seschrock 	 * Even if there were uncorrectable errors, we consider the scrub
3569ea8dc4b6Seschrock 	 * completed.  The downside is that if there is a transient error during
3570ea8dc4b6Seschrock 	 * a resilver, we won't resilver the data properly to the target.  But
3571ea8dc4b6Seschrock 	 * if the damage is permanent (more likely) we will resilver forever,
3572ea8dc4b6Seschrock 	 * which isn't really acceptable.  Since there is enough information for
3573ea8dc4b6Seschrock 	 * the user to know what has failed and why, this seems like a more
3574ea8dc4b6Seschrock 	 * tractable approach.
3575fa9e4066Sahrens 	 */
3576ea8dc4b6Seschrock 	complete = (error == 0);
3577fa9e4066Sahrens 
3578ea8dc4b6Seschrock 	dprintf("end %s to maxtxg=%llu %s, traverse=%d, %llu errors, stop=%u\n",
3579ea8dc4b6Seschrock 	    scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub",
3580fa9e4066Sahrens 	    spa->spa_scrub_maxtxg, complete ? "done" : "FAILED",
3581fa9e4066Sahrens 	    error, spa->spa_scrub_errors, spa->spa_scrub_stop);
3582fa9e4066Sahrens 
3583fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
3584fa9e4066Sahrens 
3585fa9e4066Sahrens 	/*
3586fa9e4066Sahrens 	 * If the scrub/resilver completed, update all DTLs to reflect this.
3587fa9e4066Sahrens 	 * Whether it succeeded or not, vacate all temporary scrub DTLs.
3588fa9e4066Sahrens 	 */
3589fa9e4066Sahrens 	vdev_dtl_reassess(rvd, spa_last_synced_txg(spa) + 1,
3590fa9e4066Sahrens 	    complete ? spa->spa_scrub_maxtxg : 0, B_TRUE);
3591fa9e4066Sahrens 	vdev_scrub_stat_update(rvd, POOL_SCRUB_NONE, complete);
3592ea8dc4b6Seschrock 	spa_errlog_rotate(spa);
35935dabedeeSbonwick 
35943d7072f8Seschrock 	if (scrub_type == POOL_SCRUB_RESILVER && complete)
35953d7072f8Seschrock 		spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_FINISH);
35963d7072f8Seschrock 
3597ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
3598fa9e4066Sahrens 
3599fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
3600fa9e4066Sahrens 
3601ea8dc4b6Seschrock 	/*
3602ea8dc4b6Seschrock 	 * We may have finished replacing a device.
3603ea8dc4b6Seschrock 	 * Let the async thread assess this and handle the detach.
3604ea8dc4b6Seschrock 	 */
36053d7072f8Seschrock 	spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
3606fa9e4066Sahrens 
3607fa9e4066Sahrens 	/*
3608fa9e4066Sahrens 	 * If we were told to restart, our final act is to start a new scrub.
3609fa9e4066Sahrens 	 */
3610fa9e4066Sahrens 	if (error == ERESTART)
3611ea8dc4b6Seschrock 		spa_async_request(spa, scrub_type == POOL_SCRUB_RESILVER ?
3612ea8dc4b6Seschrock 		    SPA_ASYNC_RESILVER : SPA_ASYNC_SCRUB);
3613fa9e4066Sahrens 
3614ea8dc4b6Seschrock 	spa->spa_scrub_type = POOL_SCRUB_NONE;
3615ea8dc4b6Seschrock 	spa->spa_scrub_active = 0;
3616ea8dc4b6Seschrock 	spa->spa_scrub_thread = NULL;
3617ea8dc4b6Seschrock 	cv_broadcast(&spa->spa_scrub_cv);
3618fa9e4066Sahrens 	CALLB_CPR_EXIT(&cprinfo);	/* drops &spa->spa_scrub_lock */
3619fa9e4066Sahrens 	thread_exit();
3620fa9e4066Sahrens }
3621fa9e4066Sahrens 
3622fa9e4066Sahrens void
3623fa9e4066Sahrens spa_scrub_suspend(spa_t *spa)
3624fa9e4066Sahrens {
3625fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
3626ea8dc4b6Seschrock 	spa->spa_scrub_suspended++;
3627fa9e4066Sahrens 	while (spa->spa_scrub_active) {
3628fa9e4066Sahrens 		cv_broadcast(&spa->spa_scrub_cv);
3629fa9e4066Sahrens 		cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
3630fa9e4066Sahrens 	}
3631fa9e4066Sahrens 	while (spa->spa_scrub_inflight)
3632fa9e4066Sahrens 		cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
3633fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
3634fa9e4066Sahrens }
3635fa9e4066Sahrens 
3636fa9e4066Sahrens void
3637fa9e4066Sahrens spa_scrub_resume(spa_t *spa)
3638fa9e4066Sahrens {
3639fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
3640ea8dc4b6Seschrock 	ASSERT(spa->spa_scrub_suspended != 0);
3641ea8dc4b6Seschrock 	if (--spa->spa_scrub_suspended == 0)
3642fa9e4066Sahrens 		cv_broadcast(&spa->spa_scrub_cv);
3643fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
3644fa9e4066Sahrens }
3645fa9e4066Sahrens 
3646fa9e4066Sahrens void
3647fa9e4066Sahrens spa_scrub_restart(spa_t *spa, uint64_t txg)
3648fa9e4066Sahrens {
3649fa9e4066Sahrens 	/*
3650fa9e4066Sahrens 	 * Something happened (e.g. snapshot create/delete) that means
3651fa9e4066Sahrens 	 * we must restart any in-progress scrubs.  The itinerary will
3652fa9e4066Sahrens 	 * fix this properly.
3653fa9e4066Sahrens 	 */
3654fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
3655fa9e4066Sahrens 	spa->spa_scrub_restart_txg = txg;
3656fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
3657fa9e4066Sahrens }
3658fa9e4066Sahrens 
3659ea8dc4b6Seschrock int
3660ea8dc4b6Seschrock spa_scrub(spa_t *spa, pool_scrub_type_t type, boolean_t force)
3661fa9e4066Sahrens {
3662fa9e4066Sahrens 	space_seg_t *ss;
3663fa9e4066Sahrens 	uint64_t mintxg, maxtxg;
3664fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
3665fa9e4066Sahrens 
3666bb8b5132Sek 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
3667bb8b5132Sek 	ASSERT(!spa_config_held(spa, RW_WRITER));
3668bb8b5132Sek 
3669fa9e4066Sahrens 	if ((uint_t)type >= POOL_SCRUB_TYPES)
3670fa9e4066Sahrens 		return (ENOTSUP);
3671fa9e4066Sahrens 
3672ea8dc4b6Seschrock 	mutex_enter(&spa->spa_scrub_lock);
3673ea8dc4b6Seschrock 
3674fa9e4066Sahrens 	/*
3675fa9e4066Sahrens 	 * If there's a scrub or resilver already in progress, stop it.
3676fa9e4066Sahrens 	 */
3677fa9e4066Sahrens 	while (spa->spa_scrub_thread != NULL) {
3678fa9e4066Sahrens 		/*
3679fa9e4066Sahrens 		 * Don't stop a resilver unless forced.
3680fa9e4066Sahrens 		 */
3681ea8dc4b6Seschrock 		if (spa->spa_scrub_type == POOL_SCRUB_RESILVER && !force) {
3682ea8dc4b6Seschrock 			mutex_exit(&spa->spa_scrub_lock);
3683fa9e4066Sahrens 			return (EBUSY);
3684ea8dc4b6Seschrock 		}
3685fa9e4066Sahrens 		spa->spa_scrub_stop = 1;
3686fa9e4066Sahrens 		cv_broadcast(&spa->spa_scrub_cv);
3687fa9e4066Sahrens 		cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
3688fa9e4066Sahrens 	}
3689fa9e4066Sahrens 
3690fa9e4066Sahrens 	/*
3691fa9e4066Sahrens 	 * Terminate the previous traverse.
3692fa9e4066Sahrens 	 */
3693fa9e4066Sahrens 	if (spa->spa_scrub_th != NULL) {
3694fa9e4066Sahrens 		traverse_fini(spa->spa_scrub_th);
3695fa9e4066Sahrens 		spa->spa_scrub_th = NULL;
3696fa9e4066Sahrens 	}
3697fa9e4066Sahrens 
3698ea8dc4b6Seschrock 	if (rvd == NULL) {
3699ea8dc4b6Seschrock 		ASSERT(spa->spa_scrub_stop == 0);
3700ea8dc4b6Seschrock 		ASSERT(spa->spa_scrub_type == type);
3701ea8dc4b6Seschrock 		ASSERT(spa->spa_scrub_restart_txg == 0);
3702ea8dc4b6Seschrock 		mutex_exit(&spa->spa_scrub_lock);
3703ea8dc4b6Seschrock 		return (0);
3704ea8dc4b6Seschrock 	}
3705fa9e4066Sahrens 
3706fa9e4066Sahrens 	mintxg = TXG_INITIAL - 1;
3707fa9e4066Sahrens 	maxtxg = spa_last_synced_txg(spa) + 1;
3708fa9e4066Sahrens 
3709ea8dc4b6Seschrock 	mutex_enter(&rvd->vdev_dtl_lock);
3710fa9e4066Sahrens 
3711ea8dc4b6Seschrock 	if (rvd->vdev_dtl_map.sm_space == 0) {
3712ea8dc4b6Seschrock 		/*
3713ea8dc4b6Seschrock 		 * The pool-wide DTL is empty.
3714ecc2d604Sbonwick 		 * If this is a resilver, there's nothing to do except
3715ecc2d604Sbonwick 		 * check whether any in-progress replacements have completed.
3716ea8dc4b6Seschrock 		 */
3717ecc2d604Sbonwick 		if (type == POOL_SCRUB_RESILVER) {
3718ea8dc4b6Seschrock 			type = POOL_SCRUB_NONE;
37193d7072f8Seschrock 			spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
3720ecc2d604Sbonwick 		}
3721ea8dc4b6Seschrock 	} else {
3722ea8dc4b6Seschrock 		/*
3723ea8dc4b6Seschrock 		 * The pool-wide DTL is non-empty.
3724ea8dc4b6Seschrock 		 * If this is a normal scrub, upgrade to a resilver instead.
3725ea8dc4b6Seschrock 		 */
3726ea8dc4b6Seschrock 		if (type == POOL_SCRUB_EVERYTHING)
3727ea8dc4b6Seschrock 			type = POOL_SCRUB_RESILVER;
3728ea8dc4b6Seschrock 	}
3729fa9e4066Sahrens 
3730ea8dc4b6Seschrock 	if (type == POOL_SCRUB_RESILVER) {
3731fa9e4066Sahrens 		/*
3732fa9e4066Sahrens 		 * Determine the resilvering boundaries.
3733fa9e4066Sahrens 		 *
3734fa9e4066Sahrens 		 * Note: (mintxg, maxtxg) is an open interval,
3735fa9e4066Sahrens 		 * i.e. mintxg and maxtxg themselves are not included.
3736fa9e4066Sahrens 		 *
3737fa9e4066Sahrens 		 * Note: for maxtxg, we MIN with spa_last_synced_txg(spa) + 1
3738fa9e4066Sahrens 		 * so we don't claim to resilver a txg that's still changing.
3739fa9e4066Sahrens 		 */
3740fa9e4066Sahrens 		ss = avl_first(&rvd->vdev_dtl_map.sm_root);
3741ea8dc4b6Seschrock 		mintxg = ss->ss_start - 1;
3742fa9e4066Sahrens 		ss = avl_last(&rvd->vdev_dtl_map.sm_root);
3743ea8dc4b6Seschrock 		maxtxg = MIN(ss->ss_end, maxtxg);
37443d7072f8Seschrock 
37453d7072f8Seschrock 		spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START);
3746fa9e4066Sahrens 	}
3747fa9e4066Sahrens 
3748ea8dc4b6Seschrock 	mutex_exit(&rvd->vdev_dtl_lock);
3749ea8dc4b6Seschrock 
3750ea8dc4b6Seschrock 	spa->spa_scrub_stop = 0;
3751ea8dc4b6Seschrock 	spa->spa_scrub_type = type;
3752ea8dc4b6Seschrock 	spa->spa_scrub_restart_txg = 0;
3753ea8dc4b6Seschrock 
3754ea8dc4b6Seschrock 	if (type != POOL_SCRUB_NONE) {
3755ea8dc4b6Seschrock 		spa->spa_scrub_mintxg = mintxg;
3756fa9e4066Sahrens 		spa->spa_scrub_maxtxg = maxtxg;
3757fa9e4066Sahrens 		spa->spa_scrub_th = traverse_init(spa, spa_scrub_cb, NULL,
37580373e76bSbonwick 		    ADVANCE_PRE | ADVANCE_PRUNE | ADVANCE_ZIL,
37590373e76bSbonwick 		    ZIO_FLAG_CANFAIL);
3760fa9e4066Sahrens 		traverse_add_pool(spa->spa_scrub_th, mintxg, maxtxg);
3761fa9e4066Sahrens 		spa->spa_scrub_thread = thread_create(NULL, 0,
3762fa9e4066Sahrens 		    spa_scrub_thread, spa, 0, &p0, TS_RUN, minclsyspri);
3763fa9e4066Sahrens 	}
3764fa9e4066Sahrens 
3765ea8dc4b6Seschrock 	mutex_exit(&spa->spa_scrub_lock);
3766ea8dc4b6Seschrock 
3767fa9e4066Sahrens 	return (0);
3768fa9e4066Sahrens }
3769fa9e4066Sahrens 
3770ea8dc4b6Seschrock /*
3771ea8dc4b6Seschrock  * ==========================================================================
3772ea8dc4b6Seschrock  * SPA async task processing
3773ea8dc4b6Seschrock  * ==========================================================================
3774ea8dc4b6Seschrock  */
3775ea8dc4b6Seschrock 
3776ea8dc4b6Seschrock static void
37773d7072f8Seschrock spa_async_remove(spa_t *spa, vdev_t *vd)
3778fa9e4066Sahrens {
3779ea8dc4b6Seschrock 	vdev_t *tvd;
3780ea8dc4b6Seschrock 	int c;
3781fa9e4066Sahrens 
37823d7072f8Seschrock 	for (c = 0; c < vd->vdev_children; c++) {
37833d7072f8Seschrock 		tvd = vd->vdev_child[c];
37843d7072f8Seschrock 		if (tvd->vdev_remove_wanted) {
37853d7072f8Seschrock 			tvd->vdev_remove_wanted = 0;
37863d7072f8Seschrock 			vdev_set_state(tvd, B_FALSE, VDEV_STATE_REMOVED,
37873d7072f8Seschrock 			    VDEV_AUX_NONE);
37880a4e9518Sgw 			vdev_clear(spa, tvd, B_TRUE);
37893d7072f8Seschrock 			vdev_config_dirty(tvd->vdev_top);
3790ea8dc4b6Seschrock 		}
37913d7072f8Seschrock 		spa_async_remove(spa, tvd);
3792ea8dc4b6Seschrock 	}
3793ea8dc4b6Seschrock }
3794fa9e4066Sahrens 
3795ea8dc4b6Seschrock static void
3796ea8dc4b6Seschrock spa_async_thread(spa_t *spa)
3797ea8dc4b6Seschrock {
3798ea8dc4b6Seschrock 	int tasks;
37993d7072f8Seschrock 	uint64_t txg;
3800ea8dc4b6Seschrock 
3801ea8dc4b6Seschrock 	ASSERT(spa->spa_sync_on);
3802ea8dc4b6Seschrock 
3803ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3804ea8dc4b6Seschrock 	tasks = spa->spa_async_tasks;
3805ea8dc4b6Seschrock 	spa->spa_async_tasks = 0;
3806ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3807ea8dc4b6Seschrock 
38080373e76bSbonwick 	/*
38090373e76bSbonwick 	 * See if the config needs to be updated.
38100373e76bSbonwick 	 */
38110373e76bSbonwick 	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
38120373e76bSbonwick 		mutex_enter(&spa_namespace_lock);
38130373e76bSbonwick 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
38140373e76bSbonwick 		mutex_exit(&spa_namespace_lock);
38150373e76bSbonwick 	}
38160373e76bSbonwick 
3817ea8dc4b6Seschrock 	/*
38183d7072f8Seschrock 	 * See if any devices need to be marked REMOVED.
38190a4e9518Sgw 	 *
38200a4e9518Sgw 	 * XXX - We avoid doing this when we are in
38210a4e9518Sgw 	 * I/O failure state since spa_vdev_enter() grabs
38220a4e9518Sgw 	 * the namespace lock and would not be able to obtain
38230a4e9518Sgw 	 * the writer config lock.
3824ea8dc4b6Seschrock 	 */
38250a4e9518Sgw 	if (tasks & SPA_ASYNC_REMOVE &&
38260a4e9518Sgw 	    spa_state(spa) != POOL_STATE_IO_FAILURE) {
38273d7072f8Seschrock 		txg = spa_vdev_enter(spa);
38283d7072f8Seschrock 		spa_async_remove(spa, spa->spa_root_vdev);
38293d7072f8Seschrock 		(void) spa_vdev_exit(spa, NULL, txg, 0);
38303d7072f8Seschrock 	}
3831ea8dc4b6Seschrock 
3832ea8dc4b6Seschrock 	/*
3833ea8dc4b6Seschrock 	 * If any devices are done replacing, detach them.
3834ea8dc4b6Seschrock 	 */
38353d7072f8Seschrock 	if (tasks & SPA_ASYNC_RESILVER_DONE)
38363d7072f8Seschrock 		spa_vdev_resilver_done(spa);
3837fa9e4066Sahrens 
3838ea8dc4b6Seschrock 	/*
38393d7072f8Seschrock 	 * Kick off a scrub.  When starting a RESILVER scrub (or an EVERYTHING
38403d7072f8Seschrock 	 * scrub which can become a resilver), we need to hold
38413d7072f8Seschrock 	 * spa_namespace_lock() because the sysevent we post via
38423d7072f8Seschrock 	 * spa_event_notify() needs to get the name of the pool.
3843ea8dc4b6Seschrock 	 */
38443d7072f8Seschrock 	if (tasks & SPA_ASYNC_SCRUB) {
38453d7072f8Seschrock 		mutex_enter(&spa_namespace_lock);
3846ea8dc4b6Seschrock 		VERIFY(spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_TRUE) == 0);
38473d7072f8Seschrock 		mutex_exit(&spa_namespace_lock);
38483d7072f8Seschrock 	}
3849ea8dc4b6Seschrock 
3850ea8dc4b6Seschrock 	/*
3851ea8dc4b6Seschrock 	 * Kick off a resilver.
3852ea8dc4b6Seschrock 	 */
38533d7072f8Seschrock 	if (tasks & SPA_ASYNC_RESILVER) {
38543d7072f8Seschrock 		mutex_enter(&spa_namespace_lock);
3855ea8dc4b6Seschrock 		VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
38563d7072f8Seschrock 		mutex_exit(&spa_namespace_lock);
38573d7072f8Seschrock 	}
3858ea8dc4b6Seschrock 
3859ea8dc4b6Seschrock 	/*
3860ea8dc4b6Seschrock 	 * Let the world know that we're done.
3861ea8dc4b6Seschrock 	 */
3862ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3863ea8dc4b6Seschrock 	spa->spa_async_thread = NULL;
3864ea8dc4b6Seschrock 	cv_broadcast(&spa->spa_async_cv);
3865ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3866ea8dc4b6Seschrock 	thread_exit();
3867ea8dc4b6Seschrock }
3868ea8dc4b6Seschrock 
3869ea8dc4b6Seschrock void
3870ea8dc4b6Seschrock spa_async_suspend(spa_t *spa)
3871ea8dc4b6Seschrock {
3872ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3873ea8dc4b6Seschrock 	spa->spa_async_suspended++;
3874ea8dc4b6Seschrock 	while (spa->spa_async_thread != NULL)
3875ea8dc4b6Seschrock 		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
3876ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3877ea8dc4b6Seschrock }
3878ea8dc4b6Seschrock 
3879ea8dc4b6Seschrock void
3880ea8dc4b6Seschrock spa_async_resume(spa_t *spa)
3881ea8dc4b6Seschrock {
3882ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3883ea8dc4b6Seschrock 	ASSERT(spa->spa_async_suspended != 0);
3884ea8dc4b6Seschrock 	spa->spa_async_suspended--;
3885ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3886ea8dc4b6Seschrock }
3887ea8dc4b6Seschrock 
3888ea8dc4b6Seschrock static void
3889ea8dc4b6Seschrock spa_async_dispatch(spa_t *spa)
3890ea8dc4b6Seschrock {
3891ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3892ea8dc4b6Seschrock 	if (spa->spa_async_tasks && !spa->spa_async_suspended &&
38930373e76bSbonwick 	    spa->spa_async_thread == NULL &&
38940373e76bSbonwick 	    rootdir != NULL && !vn_is_readonly(rootdir))
3895ea8dc4b6Seschrock 		spa->spa_async_thread = thread_create(NULL, 0,
3896ea8dc4b6Seschrock 		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
3897ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3898ea8dc4b6Seschrock }
3899ea8dc4b6Seschrock 
3900ea8dc4b6Seschrock void
3901ea8dc4b6Seschrock spa_async_request(spa_t *spa, int task)
3902ea8dc4b6Seschrock {
3903ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3904ea8dc4b6Seschrock 	spa->spa_async_tasks |= task;
3905ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3906fa9e4066Sahrens }
3907fa9e4066Sahrens 
3908fa9e4066Sahrens /*
3909fa9e4066Sahrens  * ==========================================================================
3910fa9e4066Sahrens  * SPA syncing routines
3911fa9e4066Sahrens  * ==========================================================================
3912fa9e4066Sahrens  */
3913fa9e4066Sahrens 
3914fa9e4066Sahrens static void
3915fa9e4066Sahrens spa_sync_deferred_frees(spa_t *spa, uint64_t txg)
3916fa9e4066Sahrens {
3917fa9e4066Sahrens 	bplist_t *bpl = &spa->spa_sync_bplist;
3918fa9e4066Sahrens 	dmu_tx_t *tx;
3919fa9e4066Sahrens 	blkptr_t blk;
3920fa9e4066Sahrens 	uint64_t itor = 0;
3921fa9e4066Sahrens 	zio_t *zio;
3922fa9e4066Sahrens 	int error;
3923fa9e4066Sahrens 	uint8_t c = 1;
3924fa9e4066Sahrens 
3925fa9e4066Sahrens 	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD);
3926fa9e4066Sahrens 
3927fa9e4066Sahrens 	while (bplist_iterate(bpl, &itor, &blk) == 0)
3928fa9e4066Sahrens 		zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL));
3929fa9e4066Sahrens 
3930fa9e4066Sahrens 	error = zio_wait(zio);
3931fa9e4066Sahrens 	ASSERT3U(error, ==, 0);
3932fa9e4066Sahrens 
3933fa9e4066Sahrens 	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3934fa9e4066Sahrens 	bplist_vacate(bpl, tx);
3935fa9e4066Sahrens 
3936fa9e4066Sahrens 	/*
3937fa9e4066Sahrens 	 * Pre-dirty the first block so we sync to convergence faster.
3938fa9e4066Sahrens 	 * (Usually only the first block is needed.)
3939fa9e4066Sahrens 	 */
3940fa9e4066Sahrens 	dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx);
3941fa9e4066Sahrens 	dmu_tx_commit(tx);
3942fa9e4066Sahrens }
3943fa9e4066Sahrens 
3944fa9e4066Sahrens static void
394599653d4eSeschrock spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
3946fa9e4066Sahrens {
3947fa9e4066Sahrens 	char *packed = NULL;
3948fa9e4066Sahrens 	size_t nvsize = 0;
3949fa9e4066Sahrens 	dmu_buf_t *db;
3950fa9e4066Sahrens 
395199653d4eSeschrock 	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
3952fa9e4066Sahrens 
3953fa9e4066Sahrens 	packed = kmem_alloc(nvsize, KM_SLEEP);
3954fa9e4066Sahrens 
395599653d4eSeschrock 	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
3956ea8dc4b6Seschrock 	    KM_SLEEP) == 0);
3957fa9e4066Sahrens 
395899653d4eSeschrock 	dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx);
3959fa9e4066Sahrens 
3960fa9e4066Sahrens 	kmem_free(packed, nvsize);
3961fa9e4066Sahrens 
396299653d4eSeschrock 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
3963fa9e4066Sahrens 	dmu_buf_will_dirty(db, tx);
3964fa9e4066Sahrens 	*(uint64_t *)db->db_data = nvsize;
3965ea8dc4b6Seschrock 	dmu_buf_rele(db, FTAG);
3966fa9e4066Sahrens }
3967fa9e4066Sahrens 
396899653d4eSeschrock static void
3969fa94a07fSbrendan spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
3970fa94a07fSbrendan     const char *config, const char *entry)
397199653d4eSeschrock {
397299653d4eSeschrock 	nvlist_t *nvroot;
3973fa94a07fSbrendan 	nvlist_t **list;
397499653d4eSeschrock 	int i;
397599653d4eSeschrock 
3976fa94a07fSbrendan 	if (!sav->sav_sync)
397799653d4eSeschrock 		return;
397899653d4eSeschrock 
397999653d4eSeschrock 	/*
3980fa94a07fSbrendan 	 * Update the MOS nvlist describing the list of available devices.
3981fa94a07fSbrendan 	 * spa_validate_aux() will have already made sure this nvlist is
39823d7072f8Seschrock 	 * valid and the vdevs are labeled appropriately.
398399653d4eSeschrock 	 */
3984fa94a07fSbrendan 	if (sav->sav_object == 0) {
3985fa94a07fSbrendan 		sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
3986fa94a07fSbrendan 		    DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
3987fa94a07fSbrendan 		    sizeof (uint64_t), tx);
398899653d4eSeschrock 		VERIFY(zap_update(spa->spa_meta_objset,
3989fa94a07fSbrendan 		    DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
3990fa94a07fSbrendan 		    &sav->sav_object, tx) == 0);
399199653d4eSeschrock 	}
399299653d4eSeschrock 
399399653d4eSeschrock 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3994fa94a07fSbrendan 	if (sav->sav_count == 0) {
3995fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
399699653d4eSeschrock 	} else {
3997fa94a07fSbrendan 		list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
3998fa94a07fSbrendan 		for (i = 0; i < sav->sav_count; i++)
3999fa94a07fSbrendan 			list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
4000fa94a07fSbrendan 			    B_FALSE, B_FALSE, B_TRUE);
4001fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
4002fa94a07fSbrendan 		    sav->sav_count) == 0);
4003fa94a07fSbrendan 		for (i = 0; i < sav->sav_count; i++)
4004fa94a07fSbrendan 			nvlist_free(list[i]);
4005fa94a07fSbrendan 		kmem_free(list, sav->sav_count * sizeof (void *));
400699653d4eSeschrock 	}
400799653d4eSeschrock 
4008fa94a07fSbrendan 	spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
400906eeb2adSek 	nvlist_free(nvroot);
401099653d4eSeschrock 
4011fa94a07fSbrendan 	sav->sav_sync = B_FALSE;
401299653d4eSeschrock }
401399653d4eSeschrock 
401499653d4eSeschrock static void
401599653d4eSeschrock spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
401699653d4eSeschrock {
401799653d4eSeschrock 	nvlist_t *config;
401899653d4eSeschrock 
401999653d4eSeschrock 	if (list_is_empty(&spa->spa_dirty_list))
402099653d4eSeschrock 		return;
402199653d4eSeschrock 
402299653d4eSeschrock 	config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE);
402399653d4eSeschrock 
402499653d4eSeschrock 	if (spa->spa_config_syncing)
402599653d4eSeschrock 		nvlist_free(spa->spa_config_syncing);
402699653d4eSeschrock 	spa->spa_config_syncing = config;
402799653d4eSeschrock 
402899653d4eSeschrock 	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
402999653d4eSeschrock }
403099653d4eSeschrock 
4031990b4856Slling /*
4032990b4856Slling  * Set zpool properties.
4033990b4856Slling  */
4034b1b8ab34Slling static void
4035ecd6cf80Smarks spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
4036b1b8ab34Slling {
4037b1b8ab34Slling 	spa_t *spa = arg1;
4038b1b8ab34Slling 	objset_t *mos = spa->spa_meta_objset;
4039990b4856Slling 	nvlist_t *nvp = arg2;
4040990b4856Slling 	nvpair_t *elem;
40413d7072f8Seschrock 	uint64_t intval;
4042c5904d13Seschrock 	char *strval;
4043990b4856Slling 	zpool_prop_t prop;
4044990b4856Slling 	const char *propname;
4045990b4856Slling 	zprop_type_t proptype;
4046c5904d13Seschrock 	spa_config_dirent_t *dp;
4047b1b8ab34Slling 
4048990b4856Slling 	elem = NULL;
4049990b4856Slling 	while ((elem = nvlist_next_nvpair(nvp, elem))) {
4050990b4856Slling 		switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
4051990b4856Slling 		case ZPOOL_PROP_VERSION:
4052990b4856Slling 			/*
4053990b4856Slling 			 * Only set version for non-zpool-creation cases
4054990b4856Slling 			 * (set/import). spa_create() needs special care
4055990b4856Slling 			 * for version setting.
4056990b4856Slling 			 */
4057990b4856Slling 			if (tx->tx_txg != TXG_INITIAL) {
4058990b4856Slling 				VERIFY(nvpair_value_uint64(elem,
4059990b4856Slling 				    &intval) == 0);
4060990b4856Slling 				ASSERT(intval <= SPA_VERSION);
4061990b4856Slling 				ASSERT(intval >= spa_version(spa));
4062990b4856Slling 				spa->spa_uberblock.ub_version = intval;
4063990b4856Slling 				vdev_config_dirty(spa->spa_root_vdev);
4064990b4856Slling 			}
4065ecd6cf80Smarks 			break;
4066990b4856Slling 
4067990b4856Slling 		case ZPOOL_PROP_ALTROOT:
4068990b4856Slling 			/*
4069990b4856Slling 			 * 'altroot' is a non-persistent property. It should
4070990b4856Slling 			 * have been set temporarily at creation or import time.
4071990b4856Slling 			 */
4072990b4856Slling 			ASSERT(spa->spa_root != NULL);
4073b1b8ab34Slling 			break;
40743d7072f8Seschrock 
40752f8aaab3Seschrock 		case ZPOOL_PROP_CACHEFILE:
4076990b4856Slling 			/*
40772f8aaab3Seschrock 			 * 'cachefile' is a non-persistent property, but note
40782f8aaab3Seschrock 			 * an async request that the config cache needs to be
40792f8aaab3Seschrock 			 * udpated.
4080990b4856Slling 			 */
40812f8aaab3Seschrock 			VERIFY(nvpair_value_string(elem, &strval) == 0);
4082c5904d13Seschrock 
4083c5904d13Seschrock 			dp = kmem_alloc(sizeof (spa_config_dirent_t),
4084c5904d13Seschrock 			    KM_SLEEP);
4085c5904d13Seschrock 
4086c5904d13Seschrock 			if (strval[0] == '\0')
4087c5904d13Seschrock 				dp->scd_path = spa_strdup(spa_config_path);
4088c5904d13Seschrock 			else if (strcmp(strval, "none") == 0)
4089c5904d13Seschrock 				dp->scd_path = NULL;
4090c5904d13Seschrock 			else
4091c5904d13Seschrock 				dp->scd_path = spa_strdup(strval);
4092c5904d13Seschrock 
4093c5904d13Seschrock 			list_insert_head(&spa->spa_config_list, dp);
40942f8aaab3Seschrock 			spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
40953d7072f8Seschrock 			break;
4096990b4856Slling 		default:
4097990b4856Slling 			/*
4098990b4856Slling 			 * Set pool property values in the poolprops mos object.
4099990b4856Slling 			 */
4100990b4856Slling 			mutex_enter(&spa->spa_props_lock);
4101990b4856Slling 			if (spa->spa_pool_props_object == 0) {
4102990b4856Slling 				objset_t *mos = spa->spa_meta_objset;
4103990b4856Slling 
4104990b4856Slling 				VERIFY((spa->spa_pool_props_object =
4105990b4856Slling 				    zap_create(mos, DMU_OT_POOL_PROPS,
4106990b4856Slling 				    DMU_OT_NONE, 0, tx)) > 0);
4107990b4856Slling 
4108990b4856Slling 				VERIFY(zap_update(mos,
4109990b4856Slling 				    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
4110990b4856Slling 				    8, 1, &spa->spa_pool_props_object, tx)
4111990b4856Slling 				    == 0);
4112990b4856Slling 			}
4113990b4856Slling 			mutex_exit(&spa->spa_props_lock);
4114990b4856Slling 
4115990b4856Slling 			/* normalize the property name */
4116990b4856Slling 			propname = zpool_prop_to_name(prop);
4117990b4856Slling 			proptype = zpool_prop_get_type(prop);
4118990b4856Slling 
4119990b4856Slling 			if (nvpair_type(elem) == DATA_TYPE_STRING) {
4120990b4856Slling 				ASSERT(proptype == PROP_TYPE_STRING);
4121990b4856Slling 				VERIFY(nvpair_value_string(elem, &strval) == 0);
4122990b4856Slling 				VERIFY(zap_update(mos,
4123990b4856Slling 				    spa->spa_pool_props_object, propname,
4124990b4856Slling 				    1, strlen(strval) + 1, strval, tx) == 0);
4125990b4856Slling 
4126990b4856Slling 			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
4127990b4856Slling 				VERIFY(nvpair_value_uint64(elem, &intval) == 0);
4128990b4856Slling 
4129990b4856Slling 				if (proptype == PROP_TYPE_INDEX) {
4130990b4856Slling 					const char *unused;
4131990b4856Slling 					VERIFY(zpool_prop_index_to_string(
4132990b4856Slling 					    prop, intval, &unused) == 0);
4133990b4856Slling 				}
4134990b4856Slling 				VERIFY(zap_update(mos,
4135990b4856Slling 				    spa->spa_pool_props_object, propname,
4136990b4856Slling 				    8, 1, &intval, tx) == 0);
4137990b4856Slling 			} else {
4138990b4856Slling 				ASSERT(0); /* not allowed */
4139990b4856Slling 			}
4140990b4856Slling 
41410a4e9518Sgw 			switch (prop) {
41420a4e9518Sgw 			case ZPOOL_PROP_DELEGATION:
4143990b4856Slling 				spa->spa_delegation = intval;
41440a4e9518Sgw 				break;
41450a4e9518Sgw 			case ZPOOL_PROP_BOOTFS:
4146990b4856Slling 				spa->spa_bootfs = intval;
41470a4e9518Sgw 				break;
41480a4e9518Sgw 			case ZPOOL_PROP_FAILUREMODE:
41490a4e9518Sgw 				spa->spa_failmode = intval;
41500a4e9518Sgw 				break;
41510a4e9518Sgw 			default:
41520a4e9518Sgw 				break;
41530a4e9518Sgw 			}
4154990b4856Slling 		}
4155990b4856Slling 
4156990b4856Slling 		/* log internal history if this is not a zpool create */
4157990b4856Slling 		if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY &&
4158990b4856Slling 		    tx->tx_txg != TXG_INITIAL) {
4159990b4856Slling 			spa_history_internal_log(LOG_POOL_PROPSET,
4160990b4856Slling 			    spa, tx, cr, "%s %lld %s",
4161990b4856Slling 			    nvpair_name(elem), intval, spa->spa_name);
4162b1b8ab34Slling 		}
4163b1b8ab34Slling 	}
4164b1b8ab34Slling }
4165b1b8ab34Slling 
4166fa9e4066Sahrens /*
4167fa9e4066Sahrens  * Sync the specified transaction group.  New blocks may be dirtied as
4168fa9e4066Sahrens  * part of the process, so we iterate until it converges.
4169fa9e4066Sahrens  */
4170fa9e4066Sahrens void
4171fa9e4066Sahrens spa_sync(spa_t *spa, uint64_t txg)
4172fa9e4066Sahrens {
4173fa9e4066Sahrens 	dsl_pool_t *dp = spa->spa_dsl_pool;
4174fa9e4066Sahrens 	objset_t *mos = spa->spa_meta_objset;
4175fa9e4066Sahrens 	bplist_t *bpl = &spa->spa_sync_bplist;
41760373e76bSbonwick 	vdev_t *rvd = spa->spa_root_vdev;
4177fa9e4066Sahrens 	vdev_t *vd;
4178fa9e4066Sahrens 	dmu_tx_t *tx;
4179fa9e4066Sahrens 	int dirty_vdevs;
4180fa9e4066Sahrens 
4181fa9e4066Sahrens 	/*
4182fa9e4066Sahrens 	 * Lock out configuration changes.
4183fa9e4066Sahrens 	 */
4184ea8dc4b6Seschrock 	spa_config_enter(spa, RW_READER, FTAG);
4185fa9e4066Sahrens 
4186fa9e4066Sahrens 	spa->spa_syncing_txg = txg;
4187fa9e4066Sahrens 	spa->spa_sync_pass = 0;
4188fa9e4066Sahrens 
4189ea8dc4b6Seschrock 	VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj));
4190fa9e4066Sahrens 
419199653d4eSeschrock 	tx = dmu_tx_create_assigned(dp, txg);
419299653d4eSeschrock 
419399653d4eSeschrock 	/*
4194e7437265Sahrens 	 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
419599653d4eSeschrock 	 * set spa_deflate if we have no raid-z vdevs.
419699653d4eSeschrock 	 */
4197e7437265Sahrens 	if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
4198e7437265Sahrens 	    spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
419999653d4eSeschrock 		int i;
420099653d4eSeschrock 
420199653d4eSeschrock 		for (i = 0; i < rvd->vdev_children; i++) {
420299653d4eSeschrock 			vd = rvd->vdev_child[i];
420399653d4eSeschrock 			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
420499653d4eSeschrock 				break;
420599653d4eSeschrock 		}
420699653d4eSeschrock 		if (i == rvd->vdev_children) {
420799653d4eSeschrock 			spa->spa_deflate = TRUE;
420899653d4eSeschrock 			VERIFY(0 == zap_add(spa->spa_meta_objset,
420999653d4eSeschrock 			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
421099653d4eSeschrock 			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
421199653d4eSeschrock 		}
421299653d4eSeschrock 	}
421399653d4eSeschrock 
4214fa9e4066Sahrens 	/*
4215fa9e4066Sahrens 	 * If anything has changed in this txg, push the deferred frees
4216fa9e4066Sahrens 	 * from the previous txg.  If not, leave them alone so that we
4217fa9e4066Sahrens 	 * don't generate work on an otherwise idle system.
4218fa9e4066Sahrens 	 */
4219fa9e4066Sahrens 	if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
42201615a317Sek 	    !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
42211615a317Sek 	    !txg_list_empty(&dp->dp_sync_tasks, txg))
4222fa9e4066Sahrens 		spa_sync_deferred_frees(spa, txg);
4223fa9e4066Sahrens 
4224fa9e4066Sahrens 	/*
4225fa9e4066Sahrens 	 * Iterate to convergence.
4226fa9e4066Sahrens 	 */
4227fa9e4066Sahrens 	do {
4228fa9e4066Sahrens 		spa->spa_sync_pass++;
4229fa9e4066Sahrens 
4230fa9e4066Sahrens 		spa_sync_config_object(spa, tx);
4231fa94a07fSbrendan 		spa_sync_aux_dev(spa, &spa->spa_spares, tx,
4232fa94a07fSbrendan 		    ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
4233fa94a07fSbrendan 		spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
4234fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
4235ea8dc4b6Seschrock 		spa_errlog_sync(spa, txg);
4236fa9e4066Sahrens 		dsl_pool_sync(dp, txg);
4237fa9e4066Sahrens 
4238fa9e4066Sahrens 		dirty_vdevs = 0;
4239fa9e4066Sahrens 		while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
4240fa9e4066Sahrens 			vdev_sync(vd, txg);
4241fa9e4066Sahrens 			dirty_vdevs++;
4242fa9e4066Sahrens 		}
4243fa9e4066Sahrens 
4244fa9e4066Sahrens 		bplist_sync(bpl, tx);
4245fa9e4066Sahrens 	} while (dirty_vdevs);
4246fa9e4066Sahrens 
4247fa9e4066Sahrens 	bplist_close(bpl);
4248fa9e4066Sahrens 
4249fa9e4066Sahrens 	dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass);
4250fa9e4066Sahrens 
4251fa9e4066Sahrens 	/*
4252fa9e4066Sahrens 	 * Rewrite the vdev configuration (which includes the uberblock)
4253fa9e4066Sahrens 	 * to commit the transaction group.
42540373e76bSbonwick 	 *
425517f17c2dSbonwick 	 * If there are no dirty vdevs, we sync the uberblock to a few
425617f17c2dSbonwick 	 * random top-level vdevs that are known to be visible in the
425717f17c2dSbonwick 	 * config cache (see spa_vdev_add() for details).  If there *are*
425817f17c2dSbonwick 	 * dirty vdevs -- or if the sync to our random subset fails --
425917f17c2dSbonwick 	 * then sync the uberblock to all vdevs.
42600373e76bSbonwick 	 */
426117f17c2dSbonwick 	if (list_is_empty(&spa->spa_dirty_list)) {
426221bf64a7Sgw 		vdev_t *svd[SPA_DVAS_PER_BP];
426321bf64a7Sgw 		int svdcount = 0;
42640373e76bSbonwick 		int children = rvd->vdev_children;
42650373e76bSbonwick 		int c0 = spa_get_random(children);
42660373e76bSbonwick 		int c;
42670373e76bSbonwick 
42680373e76bSbonwick 		for (c = 0; c < children; c++) {
42690373e76bSbonwick 			vd = rvd->vdev_child[(c0 + c) % children];
427017f17c2dSbonwick 			if (vd->vdev_ms_array == 0 || vd->vdev_islog)
42710373e76bSbonwick 				continue;
427217f17c2dSbonwick 			svd[svdcount++] = vd;
427317f17c2dSbonwick 			if (svdcount == SPA_DVAS_PER_BP)
42740373e76bSbonwick 				break;
42750373e76bSbonwick 		}
427621bf64a7Sgw 		vdev_config_sync(svd, svdcount, txg);
427721bf64a7Sgw 	} else {
427821bf64a7Sgw 		vdev_config_sync(rvd->vdev_child, rvd->vdev_children, txg);
42790373e76bSbonwick 	}
428099653d4eSeschrock 	dmu_tx_commit(tx);
428199653d4eSeschrock 
42820373e76bSbonwick 	/*
42830373e76bSbonwick 	 * Clear the dirty config list.
4284fa9e4066Sahrens 	 */
42850373e76bSbonwick 	while ((vd = list_head(&spa->spa_dirty_list)) != NULL)
42860373e76bSbonwick 		vdev_config_clean(vd);
42870373e76bSbonwick 
42880373e76bSbonwick 	/*
42890373e76bSbonwick 	 * Now that the new config has synced transactionally,
42900373e76bSbonwick 	 * let it become visible to the config cache.
42910373e76bSbonwick 	 */
42920373e76bSbonwick 	if (spa->spa_config_syncing != NULL) {
42930373e76bSbonwick 		spa_config_set(spa, spa->spa_config_syncing);
42940373e76bSbonwick 		spa->spa_config_txg = txg;
42950373e76bSbonwick 		spa->spa_config_syncing = NULL;
42960373e76bSbonwick 	}
4297fa9e4066Sahrens 
4298fa9e4066Sahrens 	/*
4299fa9e4066Sahrens 	 * Make a stable copy of the fully synced uberblock.
4300fa9e4066Sahrens 	 * We use this as the root for pool traversals.
4301fa9e4066Sahrens 	 */
4302fa9e4066Sahrens 	spa->spa_traverse_wanted = 1;	/* tells traverse_more() to stop */
4303fa9e4066Sahrens 
4304fa9e4066Sahrens 	spa_scrub_suspend(spa);		/* stop scrubbing and finish I/Os */
4305fa9e4066Sahrens 
4306fa9e4066Sahrens 	rw_enter(&spa->spa_traverse_lock, RW_WRITER);
4307fa9e4066Sahrens 	spa->spa_traverse_wanted = 0;
4308fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
4309fa9e4066Sahrens 	rw_exit(&spa->spa_traverse_lock);
4310fa9e4066Sahrens 
4311fa9e4066Sahrens 	spa_scrub_resume(spa);		/* resume scrub with new ubsync */
4312fa9e4066Sahrens 
4313fa9e4066Sahrens 	/*
4314fa9e4066Sahrens 	 * Clean up the ZIL records for the synced txg.
4315fa9e4066Sahrens 	 */
4316fa9e4066Sahrens 	dsl_pool_zil_clean(dp);
4317fa9e4066Sahrens 
4318fa9e4066Sahrens 	/*
4319fa9e4066Sahrens 	 * Update usable space statistics.
4320fa9e4066Sahrens 	 */
4321fa9e4066Sahrens 	while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
4322fa9e4066Sahrens 		vdev_sync_done(vd, txg);
4323fa9e4066Sahrens 
4324fa9e4066Sahrens 	/*
4325fa9e4066Sahrens 	 * It had better be the case that we didn't dirty anything
432699653d4eSeschrock 	 * since vdev_config_sync().
4327fa9e4066Sahrens 	 */
4328fa9e4066Sahrens 	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
4329fa9e4066Sahrens 	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
4330fa9e4066Sahrens 	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
4331fa9e4066Sahrens 	ASSERT(bpl->bpl_queue == NULL);
4332fa9e4066Sahrens 
4333ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
4334ea8dc4b6Seschrock 
4335ea8dc4b6Seschrock 	/*
4336ea8dc4b6Seschrock 	 * If any async tasks have been requested, kick them off.
4337ea8dc4b6Seschrock 	 */
4338ea8dc4b6Seschrock 	spa_async_dispatch(spa);
4339fa9e4066Sahrens }
4340fa9e4066Sahrens 
4341fa9e4066Sahrens /*
4342fa9e4066Sahrens  * Sync all pools.  We don't want to hold the namespace lock across these
4343fa9e4066Sahrens  * operations, so we take a reference on the spa_t and drop the lock during the
4344fa9e4066Sahrens  * sync.
4345fa9e4066Sahrens  */
4346fa9e4066Sahrens void
4347fa9e4066Sahrens spa_sync_allpools(void)
4348fa9e4066Sahrens {
4349fa9e4066Sahrens 	spa_t *spa = NULL;
4350fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
4351fa9e4066Sahrens 	while ((spa = spa_next(spa)) != NULL) {
4352fa9e4066Sahrens 		if (spa_state(spa) != POOL_STATE_ACTIVE)
4353fa9e4066Sahrens 			continue;
4354fa9e4066Sahrens 		spa_open_ref(spa, FTAG);
4355fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
4356fa9e4066Sahrens 		txg_wait_synced(spa_get_dsl(spa), 0);
4357fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
4358fa9e4066Sahrens 		spa_close(spa, FTAG);
4359fa9e4066Sahrens 	}
4360fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
4361fa9e4066Sahrens }
4362fa9e4066Sahrens 
4363fa9e4066Sahrens /*
4364fa9e4066Sahrens  * ==========================================================================
4365fa9e4066Sahrens  * Miscellaneous routines
4366fa9e4066Sahrens  * ==========================================================================
4367fa9e4066Sahrens  */
4368fa9e4066Sahrens 
4369fa9e4066Sahrens /*
4370fa9e4066Sahrens  * Remove all pools in the system.
4371fa9e4066Sahrens  */
4372fa9e4066Sahrens void
4373fa9e4066Sahrens spa_evict_all(void)
4374fa9e4066Sahrens {
4375fa9e4066Sahrens 	spa_t *spa;
4376fa9e4066Sahrens 
4377fa9e4066Sahrens 	/*
4378fa9e4066Sahrens 	 * Remove all cached state.  All pools should be closed now,
4379fa9e4066Sahrens 	 * so every spa in the AVL tree should be unreferenced.
4380fa9e4066Sahrens 	 */
4381fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
4382fa9e4066Sahrens 	while ((spa = spa_next(NULL)) != NULL) {
4383fa9e4066Sahrens 		/*
4384ea8dc4b6Seschrock 		 * Stop async tasks.  The async thread may need to detach
4385ea8dc4b6Seschrock 		 * a device that's been replaced, which requires grabbing
4386ea8dc4b6Seschrock 		 * spa_namespace_lock, so we must drop it here.
4387fa9e4066Sahrens 		 */
4388fa9e4066Sahrens 		spa_open_ref(spa, FTAG);
4389fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
4390ea8dc4b6Seschrock 		spa_async_suspend(spa);
4391fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
4392bb8b5132Sek 		VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0);
4393fa9e4066Sahrens 		spa_close(spa, FTAG);
4394fa9e4066Sahrens 
4395fa9e4066Sahrens 		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4396fa9e4066Sahrens 			spa_unload(spa);
4397fa9e4066Sahrens 			spa_deactivate(spa);
4398fa9e4066Sahrens 		}
4399fa9e4066Sahrens 		spa_remove(spa);
4400fa9e4066Sahrens 	}
4401fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
4402fa9e4066Sahrens }
4403ea8dc4b6Seschrock 
4404ea8dc4b6Seschrock vdev_t *
4405c5904d13Seschrock spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache)
4406ea8dc4b6Seschrock {
4407c5904d13Seschrock 	vdev_t *vd;
4408c5904d13Seschrock 	int i;
4409c5904d13Seschrock 
4410c5904d13Seschrock 	if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
4411c5904d13Seschrock 		return (vd);
4412c5904d13Seschrock 
4413c5904d13Seschrock 	if (l2cache) {
4414c5904d13Seschrock 		for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
4415c5904d13Seschrock 			vd = spa->spa_l2cache.sav_vdevs[i];
4416c5904d13Seschrock 			if (vd->vdev_guid == guid)
4417c5904d13Seschrock 				return (vd);
4418c5904d13Seschrock 		}
4419c5904d13Seschrock 	}
4420c5904d13Seschrock 
4421c5904d13Seschrock 	return (NULL);
4422ea8dc4b6Seschrock }
4423eaca9bbdSeschrock 
4424eaca9bbdSeschrock void
4425990b4856Slling spa_upgrade(spa_t *spa, uint64_t version)
4426eaca9bbdSeschrock {
4427eaca9bbdSeschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
4428eaca9bbdSeschrock 
4429eaca9bbdSeschrock 	/*
4430eaca9bbdSeschrock 	 * This should only be called for a non-faulted pool, and since a
4431eaca9bbdSeschrock 	 * future version would result in an unopenable pool, this shouldn't be
4432eaca9bbdSeschrock 	 * possible.
4433eaca9bbdSeschrock 	 */
4434e7437265Sahrens 	ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION);
4435990b4856Slling 	ASSERT(version >= spa->spa_uberblock.ub_version);
4436eaca9bbdSeschrock 
4437990b4856Slling 	spa->spa_uberblock.ub_version = version;
4438eaca9bbdSeschrock 	vdev_config_dirty(spa->spa_root_vdev);
4439eaca9bbdSeschrock 
4440eaca9bbdSeschrock 	spa_config_exit(spa, FTAG);
444199653d4eSeschrock 
444299653d4eSeschrock 	txg_wait_synced(spa_get_dsl(spa), 0);
444399653d4eSeschrock }
444499653d4eSeschrock 
444599653d4eSeschrock boolean_t
444699653d4eSeschrock spa_has_spare(spa_t *spa, uint64_t guid)
444799653d4eSeschrock {
444899653d4eSeschrock 	int i;
444939c23413Seschrock 	uint64_t spareguid;
4450fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_spares;
445199653d4eSeschrock 
4452fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
4453fa94a07fSbrendan 		if (sav->sav_vdevs[i]->vdev_guid == guid)
445499653d4eSeschrock 			return (B_TRUE);
445599653d4eSeschrock 
4456fa94a07fSbrendan 	for (i = 0; i < sav->sav_npending; i++) {
4457fa94a07fSbrendan 		if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
4458fa94a07fSbrendan 		    &spareguid) == 0 && spareguid == guid)
445939c23413Seschrock 			return (B_TRUE);
446039c23413Seschrock 	}
446139c23413Seschrock 
446299653d4eSeschrock 	return (B_FALSE);
4463eaca9bbdSeschrock }
4464b1b8ab34Slling 
44653d7072f8Seschrock /*
44663d7072f8Seschrock  * Post a sysevent corresponding to the given event.  The 'name' must be one of
44673d7072f8Seschrock  * the event definitions in sys/sysevent/eventdefs.h.  The payload will be
44683d7072f8Seschrock  * filled in from the spa and (optionally) the vdev.  This doesn't do anything
44693d7072f8Seschrock  * in the userland libzpool, as we don't want consumers to misinterpret ztest
44703d7072f8Seschrock  * or zdb as real changes.
44713d7072f8Seschrock  */
44723d7072f8Seschrock void
44733d7072f8Seschrock spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
44743d7072f8Seschrock {
44753d7072f8Seschrock #ifdef _KERNEL
44763d7072f8Seschrock 	sysevent_t		*ev;
44773d7072f8Seschrock 	sysevent_attr_list_t	*attr = NULL;
44783d7072f8Seschrock 	sysevent_value_t	value;
44793d7072f8Seschrock 	sysevent_id_t		eid;
44803d7072f8Seschrock 
44813d7072f8Seschrock 	ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
44823d7072f8Seschrock 	    SE_SLEEP);
44833d7072f8Seschrock 
44843d7072f8Seschrock 	value.value_type = SE_DATA_TYPE_STRING;
44853d7072f8Seschrock 	value.value.sv_string = spa_name(spa);
44863d7072f8Seschrock 	if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
44873d7072f8Seschrock 		goto done;
44883d7072f8Seschrock 
44893d7072f8Seschrock 	value.value_type = SE_DATA_TYPE_UINT64;
44903d7072f8Seschrock 	value.value.sv_uint64 = spa_guid(spa);
44913d7072f8Seschrock 	if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
44923d7072f8Seschrock 		goto done;
44933d7072f8Seschrock 
44943d7072f8Seschrock 	if (vd) {
44953d7072f8Seschrock 		value.value_type = SE_DATA_TYPE_UINT64;
44963d7072f8Seschrock 		value.value.sv_uint64 = vd->vdev_guid;
44973d7072f8Seschrock 		if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
44983d7072f8Seschrock 		    SE_SLEEP) != 0)
44993d7072f8Seschrock 			goto done;
45003d7072f8Seschrock 
45013d7072f8Seschrock 		if (vd->vdev_path) {
45023d7072f8Seschrock 			value.value_type = SE_DATA_TYPE_STRING;
45033d7072f8Seschrock 			value.value.sv_string = vd->vdev_path;
45043d7072f8Seschrock 			if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
45053d7072f8Seschrock 			    &value, SE_SLEEP) != 0)
45063d7072f8Seschrock 				goto done;
45073d7072f8Seschrock 		}
45083d7072f8Seschrock 	}
45093d7072f8Seschrock 
4510b01c3b58Seschrock 	if (sysevent_attach_attributes(ev, attr) != 0)
4511b01c3b58Seschrock 		goto done;
4512b01c3b58Seschrock 	attr = NULL;
4513b01c3b58Seschrock 
45143d7072f8Seschrock 	(void) log_sysevent(ev, SE_SLEEP, &eid);
45153d7072f8Seschrock 
45163d7072f8Seschrock done:
45173d7072f8Seschrock 	if (attr)
45183d7072f8Seschrock 		sysevent_free_attr(attr);
45193d7072f8Seschrock 	sysevent_free(ev);
45203d7072f8Seschrock #endif
45213d7072f8Seschrock }
4522