xref: /illumos-gate/usr/src/uts/common/fs/zfs/spa.c (revision d5b5bb25)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
2199653d4eSeschrock 
22fa9e4066Sahrens /*
23b01c3b58Seschrock  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24fa9e4066Sahrens  * Use is subject to license terms.
25fa9e4066Sahrens  */
26fa9e4066Sahrens 
27fa9e4066Sahrens /*
28fa9e4066Sahrens  * This file contains all the routines used when modifying on-disk SPA state.
29fa9e4066Sahrens  * This includes opening, importing, destroying, exporting a pool, and syncing a
30fa9e4066Sahrens  * pool.
31fa9e4066Sahrens  */
32fa9e4066Sahrens 
33fa9e4066Sahrens #include <sys/zfs_context.h>
34ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h>
35fa9e4066Sahrens #include <sys/spa_impl.h>
36fa9e4066Sahrens #include <sys/zio.h>
37fa9e4066Sahrens #include <sys/zio_checksum.h>
38fa9e4066Sahrens #include <sys/zio_compress.h>
39fa9e4066Sahrens #include <sys/dmu.h>
40fa9e4066Sahrens #include <sys/dmu_tx.h>
41fa9e4066Sahrens #include <sys/zap.h>
42fa9e4066Sahrens #include <sys/zil.h>
43fa9e4066Sahrens #include <sys/vdev_impl.h>
44fa9e4066Sahrens #include <sys/metaslab.h>
45fa9e4066Sahrens #include <sys/uberblock_impl.h>
46fa9e4066Sahrens #include <sys/txg.h>
47fa9e4066Sahrens #include <sys/avl.h>
48fa9e4066Sahrens #include <sys/dmu_traverse.h>
49b1b8ab34Slling #include <sys/dmu_objset.h>
50fa9e4066Sahrens #include <sys/unique.h>
51fa9e4066Sahrens #include <sys/dsl_pool.h>
52b1b8ab34Slling #include <sys/dsl_dataset.h>
53fa9e4066Sahrens #include <sys/dsl_dir.h>
54fa9e4066Sahrens #include <sys/dsl_prop.h>
55b1b8ab34Slling #include <sys/dsl_synctask.h>
56fa9e4066Sahrens #include <sys/fs/zfs.h>
57fa94a07fSbrendan #include <sys/arc.h>
58fa9e4066Sahrens #include <sys/callb.h>
5995173954Sek #include <sys/systeminfo.h>
6095173954Sek #include <sys/sunddi.h>
61e7cbe64fSgw #include <sys/spa_boot.h>
62fa9e4066Sahrens 
63990b4856Slling #include "zfs_prop.h"
64b7b97454Sperrin #include "zfs_comutil.h"
65990b4856Slling 
66416e0cd8Sek int zio_taskq_threads = 8;
67416e0cd8Sek 
68990b4856Slling static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx);
6989a89ebfSlling static boolean_t spa_has_active_shared_spare(spa_t *spa);
70990b4856Slling 
71990b4856Slling /*
72990b4856Slling  * ==========================================================================
73990b4856Slling  * SPA properties routines
74990b4856Slling  * ==========================================================================
75990b4856Slling  */
76990b4856Slling 
77990b4856Slling /*
78990b4856Slling  * Add a (source=src, propname=propval) list to an nvlist.
79990b4856Slling  */
809d82f4f6Slling static void
81990b4856Slling spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
82990b4856Slling     uint64_t intval, zprop_source_t src)
83990b4856Slling {
84990b4856Slling 	const char *propname = zpool_prop_to_name(prop);
85990b4856Slling 	nvlist_t *propval;
86990b4856Slling 
879d82f4f6Slling 	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
889d82f4f6Slling 	VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
89990b4856Slling 
909d82f4f6Slling 	if (strval != NULL)
919d82f4f6Slling 		VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
929d82f4f6Slling 	else
939d82f4f6Slling 		VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
94990b4856Slling 
959d82f4f6Slling 	VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
96990b4856Slling 	nvlist_free(propval);
97990b4856Slling }
98990b4856Slling 
99990b4856Slling /*
100990b4856Slling  * Get property values from the spa configuration.
101990b4856Slling  */
1029d82f4f6Slling static void
103990b4856Slling spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
104990b4856Slling {
105990b4856Slling 	uint64_t size = spa_get_space(spa);
106990b4856Slling 	uint64_t used = spa_get_alloc(spa);
107990b4856Slling 	uint64_t cap, version;
108990b4856Slling 	zprop_source_t src = ZPROP_SRC_NONE;
109c5904d13Seschrock 	spa_config_dirent_t *dp;
110990b4856Slling 
111990b4856Slling 	/*
112990b4856Slling 	 * readonly properties
113990b4856Slling 	 */
1149d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa->spa_name, 0, src);
1159d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
1169d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src);
1179d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, size - used, src);
118990b4856Slling 
119990b4856Slling 	cap = (size == 0) ? 0 : (used * 100 / size);
1209d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
121990b4856Slling 
1229d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
1239d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
1249d82f4f6Slling 	    spa->spa_root_vdev->vdev_state, src);
125990b4856Slling 
126990b4856Slling 	/*
127990b4856Slling 	 * settable properties that are not stored in the pool property object.
128990b4856Slling 	 */
129990b4856Slling 	version = spa_version(spa);
130990b4856Slling 	if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
131990b4856Slling 		src = ZPROP_SRC_DEFAULT;
132990b4856Slling 	else
133990b4856Slling 		src = ZPROP_SRC_LOCAL;
1349d82f4f6Slling 	spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
135990b4856Slling 
1369d82f4f6Slling 	if (spa->spa_root != NULL)
1379d82f4f6Slling 		spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
1389d82f4f6Slling 		    0, ZPROP_SRC_LOCAL);
139990b4856Slling 
140c5904d13Seschrock 	if ((dp = list_head(&spa->spa_config_list)) != NULL) {
141c5904d13Seschrock 		if (dp->scd_path == NULL) {
1429d82f4f6Slling 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
143c5904d13Seschrock 			    "none", 0, ZPROP_SRC_LOCAL);
144c5904d13Seschrock 		} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
1459d82f4f6Slling 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
146c5904d13Seschrock 			    dp->scd_path, 0, ZPROP_SRC_LOCAL);
1472f8aaab3Seschrock 		}
1482f8aaab3Seschrock 	}
149990b4856Slling }
150990b4856Slling 
151990b4856Slling /*
152990b4856Slling  * Get zpool property values.
153990b4856Slling  */
154990b4856Slling int
155990b4856Slling spa_prop_get(spa_t *spa, nvlist_t **nvp)
156990b4856Slling {
157990b4856Slling 	zap_cursor_t zc;
158990b4856Slling 	zap_attribute_t za;
159990b4856Slling 	objset_t *mos = spa->spa_meta_objset;
160990b4856Slling 	int err;
161990b4856Slling 
1629d82f4f6Slling 	VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
163990b4856Slling 
164990b4856Slling 	/*
165990b4856Slling 	 * Get properties from the spa config.
166990b4856Slling 	 */
1679d82f4f6Slling 	spa_prop_get_config(spa, nvp);
168990b4856Slling 
169990b4856Slling 	mutex_enter(&spa->spa_props_lock);
170990b4856Slling 	/* If no pool property object, no more prop to get. */
171990b4856Slling 	if (spa->spa_pool_props_object == 0) {
172990b4856Slling 		mutex_exit(&spa->spa_props_lock);
173990b4856Slling 		return (0);
174990b4856Slling 	}
175990b4856Slling 
176990b4856Slling 	/*
177990b4856Slling 	 * Get properties from the MOS pool property object.
178990b4856Slling 	 */
179990b4856Slling 	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
180990b4856Slling 	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
181990b4856Slling 	    zap_cursor_advance(&zc)) {
182990b4856Slling 		uint64_t intval = 0;
183990b4856Slling 		char *strval = NULL;
184990b4856Slling 		zprop_source_t src = ZPROP_SRC_DEFAULT;
185990b4856Slling 		zpool_prop_t prop;
186990b4856Slling 
187990b4856Slling 		if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
188990b4856Slling 			continue;
189990b4856Slling 
190990b4856Slling 		switch (za.za_integer_length) {
191990b4856Slling 		case 8:
192990b4856Slling 			/* integer property */
193990b4856Slling 			if (za.za_first_integer !=
194990b4856Slling 			    zpool_prop_default_numeric(prop))
195990b4856Slling 				src = ZPROP_SRC_LOCAL;
196990b4856Slling 
197990b4856Slling 			if (prop == ZPOOL_PROP_BOOTFS) {
198990b4856Slling 				dsl_pool_t *dp;
199990b4856Slling 				dsl_dataset_t *ds = NULL;
200990b4856Slling 
201990b4856Slling 				dp = spa_get_dsl(spa);
202990b4856Slling 				rw_enter(&dp->dp_config_rwlock, RW_READER);
203745cd3c5Smaybee 				if (err = dsl_dataset_hold_obj(dp,
204745cd3c5Smaybee 				    za.za_first_integer, FTAG, &ds)) {
205990b4856Slling 					rw_exit(&dp->dp_config_rwlock);
206990b4856Slling 					break;
207990b4856Slling 				}
208990b4856Slling 
209990b4856Slling 				strval = kmem_alloc(
210990b4856Slling 				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
211990b4856Slling 				    KM_SLEEP);
212990b4856Slling 				dsl_dataset_name(ds, strval);
213745cd3c5Smaybee 				dsl_dataset_rele(ds, FTAG);
214990b4856Slling 				rw_exit(&dp->dp_config_rwlock);
215990b4856Slling 			} else {
216990b4856Slling 				strval = NULL;
217990b4856Slling 				intval = za.za_first_integer;
218990b4856Slling 			}
219990b4856Slling 
2209d82f4f6Slling 			spa_prop_add_list(*nvp, prop, strval, intval, src);
221990b4856Slling 
222990b4856Slling 			if (strval != NULL)
223990b4856Slling 				kmem_free(strval,
224990b4856Slling 				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
225990b4856Slling 
226990b4856Slling 			break;
227990b4856Slling 
228990b4856Slling 		case 1:
229990b4856Slling 			/* string property */
230990b4856Slling 			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
231990b4856Slling 			err = zap_lookup(mos, spa->spa_pool_props_object,
232990b4856Slling 			    za.za_name, 1, za.za_num_integers, strval);
233990b4856Slling 			if (err) {
234990b4856Slling 				kmem_free(strval, za.za_num_integers);
235990b4856Slling 				break;
236990b4856Slling 			}
2379d82f4f6Slling 			spa_prop_add_list(*nvp, prop, strval, 0, src);
238990b4856Slling 			kmem_free(strval, za.za_num_integers);
239990b4856Slling 			break;
240990b4856Slling 
241990b4856Slling 		default:
242990b4856Slling 			break;
243990b4856Slling 		}
244990b4856Slling 	}
245990b4856Slling 	zap_cursor_fini(&zc);
246990b4856Slling 	mutex_exit(&spa->spa_props_lock);
247990b4856Slling out:
248990b4856Slling 	if (err && err != ENOENT) {
249990b4856Slling 		nvlist_free(*nvp);
2509d82f4f6Slling 		*nvp = NULL;
251990b4856Slling 		return (err);
252990b4856Slling 	}
253990b4856Slling 
254990b4856Slling 	return (0);
255990b4856Slling }
256990b4856Slling 
257990b4856Slling /*
258990b4856Slling  * Validate the given pool properties nvlist and modify the list
259990b4856Slling  * for the property values to be set.
260990b4856Slling  */
261990b4856Slling static int
262990b4856Slling spa_prop_validate(spa_t *spa, nvlist_t *props)
263990b4856Slling {
264990b4856Slling 	nvpair_t *elem;
265990b4856Slling 	int error = 0, reset_bootfs = 0;
266990b4856Slling 	uint64_t objnum;
267990b4856Slling 
268990b4856Slling 	elem = NULL;
269990b4856Slling 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
270990b4856Slling 		zpool_prop_t prop;
271990b4856Slling 		char *propname, *strval;
272990b4856Slling 		uint64_t intval;
273990b4856Slling 		objset_t *os;
2742f8aaab3Seschrock 		char *slash;
275990b4856Slling 
276990b4856Slling 		propname = nvpair_name(elem);
277990b4856Slling 
278990b4856Slling 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL)
279990b4856Slling 			return (EINVAL);
280990b4856Slling 
281990b4856Slling 		switch (prop) {
282990b4856Slling 		case ZPOOL_PROP_VERSION:
283990b4856Slling 			error = nvpair_value_uint64(elem, &intval);
284990b4856Slling 			if (!error &&
285990b4856Slling 			    (intval < spa_version(spa) || intval > SPA_VERSION))
286990b4856Slling 				error = EINVAL;
287990b4856Slling 			break;
288990b4856Slling 
289990b4856Slling 		case ZPOOL_PROP_DELEGATION:
290990b4856Slling 		case ZPOOL_PROP_AUTOREPLACE:
291*d5b5bb25SRich Morris 		case ZPOOL_PROP_LISTSNAPS:
292990b4856Slling 			error = nvpair_value_uint64(elem, &intval);
293990b4856Slling 			if (!error && intval > 1)
294990b4856Slling 				error = EINVAL;
295990b4856Slling 			break;
296990b4856Slling 
297990b4856Slling 		case ZPOOL_PROP_BOOTFS:
298990b4856Slling 			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
299990b4856Slling 				error = ENOTSUP;
300990b4856Slling 				break;
301990b4856Slling 			}
302990b4856Slling 
303990b4856Slling 			/*
30415e6edf1Sgw 			 * Make sure the vdev config is bootable
305990b4856Slling 			 */
30615e6edf1Sgw 			if (!vdev_is_bootable(spa->spa_root_vdev)) {
307990b4856Slling 				error = ENOTSUP;
308990b4856Slling 				break;
309990b4856Slling 			}
310990b4856Slling 
311990b4856Slling 			reset_bootfs = 1;
312990b4856Slling 
313990b4856Slling 			error = nvpair_value_string(elem, &strval);
314990b4856Slling 
315990b4856Slling 			if (!error) {
31615e6edf1Sgw 				uint64_t compress;
31715e6edf1Sgw 
318990b4856Slling 				if (strval == NULL || strval[0] == '\0') {
319990b4856Slling 					objnum = zpool_prop_default_numeric(
320990b4856Slling 					    ZPOOL_PROP_BOOTFS);
321990b4856Slling 					break;
322990b4856Slling 				}
323990b4856Slling 
324990b4856Slling 				if (error = dmu_objset_open(strval, DMU_OST_ZFS,
325745cd3c5Smaybee 				    DS_MODE_USER | DS_MODE_READONLY, &os))
326990b4856Slling 					break;
32715e6edf1Sgw 
32815e6edf1Sgw 				/* We don't support gzip bootable datasets */
32915e6edf1Sgw 				if ((error = dsl_prop_get_integer(strval,
33015e6edf1Sgw 				    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
33115e6edf1Sgw 				    &compress, NULL)) == 0 &&
33215e6edf1Sgw 				    !BOOTFS_COMPRESS_VALID(compress)) {
33315e6edf1Sgw 					error = ENOTSUP;
33415e6edf1Sgw 				} else {
33515e6edf1Sgw 					objnum = dmu_objset_id(os);
33615e6edf1Sgw 				}
337990b4856Slling 				dmu_objset_close(os);
338990b4856Slling 			}
339990b4856Slling 			break;
3400a4e9518Sgw 		case ZPOOL_PROP_FAILUREMODE:
3410a4e9518Sgw 			error = nvpair_value_uint64(elem, &intval);
3420a4e9518Sgw 			if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
3430a4e9518Sgw 			    intval > ZIO_FAILURE_MODE_PANIC))
3440a4e9518Sgw 				error = EINVAL;
3450a4e9518Sgw 
3460a4e9518Sgw 			/*
3470a4e9518Sgw 			 * This is a special case which only occurs when
3480a4e9518Sgw 			 * the pool has completely failed. This allows
3490a4e9518Sgw 			 * the user to change the in-core failmode property
3500a4e9518Sgw 			 * without syncing it out to disk (I/Os might
3510a4e9518Sgw 			 * currently be blocked). We do this by returning
3520a4e9518Sgw 			 * EIO to the caller (spa_prop_set) to trick it
3530a4e9518Sgw 			 * into thinking we encountered a property validation
3540a4e9518Sgw 			 * error.
3550a4e9518Sgw 			 */
3560a4e9518Sgw 			if (!error && spa_state(spa) == POOL_STATE_IO_FAILURE) {
3570a4e9518Sgw 				spa->spa_failmode = intval;
3580a4e9518Sgw 				error = EIO;
3590a4e9518Sgw 			}
3600a4e9518Sgw 			break;
3612f8aaab3Seschrock 
3622f8aaab3Seschrock 		case ZPOOL_PROP_CACHEFILE:
3632f8aaab3Seschrock 			if ((error = nvpair_value_string(elem, &strval)) != 0)
3642f8aaab3Seschrock 				break;
3652f8aaab3Seschrock 
3662f8aaab3Seschrock 			if (strval[0] == '\0')
3672f8aaab3Seschrock 				break;
3682f8aaab3Seschrock 
3692f8aaab3Seschrock 			if (strcmp(strval, "none") == 0)
3702f8aaab3Seschrock 				break;
3712f8aaab3Seschrock 
3722f8aaab3Seschrock 			if (strval[0] != '/') {
3732f8aaab3Seschrock 				error = EINVAL;
3742f8aaab3Seschrock 				break;
3752f8aaab3Seschrock 			}
3762f8aaab3Seschrock 
3772f8aaab3Seschrock 			slash = strrchr(strval, '/');
3782f8aaab3Seschrock 			ASSERT(slash != NULL);
3792f8aaab3Seschrock 
3802f8aaab3Seschrock 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
3812f8aaab3Seschrock 			    strcmp(slash, "/..") == 0)
3822f8aaab3Seschrock 				error = EINVAL;
3832f8aaab3Seschrock 			break;
384990b4856Slling 		}
385990b4856Slling 
386990b4856Slling 		if (error)
387990b4856Slling 			break;
388990b4856Slling 	}
389990b4856Slling 
390990b4856Slling 	if (!error && reset_bootfs) {
391990b4856Slling 		error = nvlist_remove(props,
392990b4856Slling 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
393990b4856Slling 
394990b4856Slling 		if (!error) {
395990b4856Slling 			error = nvlist_add_uint64(props,
396990b4856Slling 			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
397990b4856Slling 		}
398990b4856Slling 	}
399990b4856Slling 
400990b4856Slling 	return (error);
401990b4856Slling }
402990b4856Slling 
403990b4856Slling int
404990b4856Slling spa_prop_set(spa_t *spa, nvlist_t *nvp)
405990b4856Slling {
406990b4856Slling 	int error;
407990b4856Slling 
408990b4856Slling 	if ((error = spa_prop_validate(spa, nvp)) != 0)
409990b4856Slling 		return (error);
410990b4856Slling 
411990b4856Slling 	return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
412990b4856Slling 	    spa, nvp, 3));
413990b4856Slling }
414990b4856Slling 
415990b4856Slling /*
416990b4856Slling  * If the bootfs property value is dsobj, clear it.
417990b4856Slling  */
418990b4856Slling void
419990b4856Slling spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
420990b4856Slling {
421990b4856Slling 	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
422990b4856Slling 		VERIFY(zap_remove(spa->spa_meta_objset,
423990b4856Slling 		    spa->spa_pool_props_object,
424990b4856Slling 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
425990b4856Slling 		spa->spa_bootfs = 0;
426990b4856Slling 	}
427990b4856Slling }
428990b4856Slling 
429fa9e4066Sahrens /*
430fa9e4066Sahrens  * ==========================================================================
431fa9e4066Sahrens  * SPA state manipulation (open/create/destroy/import/export)
432fa9e4066Sahrens  * ==========================================================================
433fa9e4066Sahrens  */
434fa9e4066Sahrens 
435ea8dc4b6Seschrock static int
436ea8dc4b6Seschrock spa_error_entry_compare(const void *a, const void *b)
437ea8dc4b6Seschrock {
438ea8dc4b6Seschrock 	spa_error_entry_t *sa = (spa_error_entry_t *)a;
439ea8dc4b6Seschrock 	spa_error_entry_t *sb = (spa_error_entry_t *)b;
440ea8dc4b6Seschrock 	int ret;
441ea8dc4b6Seschrock 
442ea8dc4b6Seschrock 	ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
443ea8dc4b6Seschrock 	    sizeof (zbookmark_t));
444ea8dc4b6Seschrock 
445ea8dc4b6Seschrock 	if (ret < 0)
446ea8dc4b6Seschrock 		return (-1);
447ea8dc4b6Seschrock 	else if (ret > 0)
448ea8dc4b6Seschrock 		return (1);
449ea8dc4b6Seschrock 	else
450ea8dc4b6Seschrock 		return (0);
451ea8dc4b6Seschrock }
452ea8dc4b6Seschrock 
453ea8dc4b6Seschrock /*
454ea8dc4b6Seschrock  * Utility function which retrieves copies of the current logs and
455ea8dc4b6Seschrock  * re-initializes them in the process.
456ea8dc4b6Seschrock  */
457ea8dc4b6Seschrock void
458ea8dc4b6Seschrock spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
459ea8dc4b6Seschrock {
460ea8dc4b6Seschrock 	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
461ea8dc4b6Seschrock 
462ea8dc4b6Seschrock 	bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
463ea8dc4b6Seschrock 	bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
464ea8dc4b6Seschrock 
465ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_scrub,
466ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
467ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
468ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_last,
469ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
470ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
471ea8dc4b6Seschrock }
472ea8dc4b6Seschrock 
473fa9e4066Sahrens /*
474fa9e4066Sahrens  * Activate an uninitialized pool.
475fa9e4066Sahrens  */
476fa9e4066Sahrens static void
477fa9e4066Sahrens spa_activate(spa_t *spa)
478fa9e4066Sahrens {
479fa9e4066Sahrens 	int t;
480fa9e4066Sahrens 
481fa9e4066Sahrens 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
482fa9e4066Sahrens 
483fa9e4066Sahrens 	spa->spa_state = POOL_STATE_ACTIVE;
484fa9e4066Sahrens 
485fa9e4066Sahrens 	spa->spa_normal_class = metaslab_class_create();
4868654d025Sperrin 	spa->spa_log_class = metaslab_class_create();
487fa9e4066Sahrens 
488fa9e4066Sahrens 	for (t = 0; t < ZIO_TYPES; t++) {
489fa9e4066Sahrens 		spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue",
490416e0cd8Sek 		    zio_taskq_threads, maxclsyspri, 50, INT_MAX,
491fa9e4066Sahrens 		    TASKQ_PREPOPULATE);
492fa9e4066Sahrens 		spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr",
493416e0cd8Sek 		    zio_taskq_threads, maxclsyspri, 50, INT_MAX,
494fa9e4066Sahrens 		    TASKQ_PREPOPULATE);
495fa9e4066Sahrens 	}
496fa9e4066Sahrens 
497fa9e4066Sahrens 	list_create(&spa->spa_dirty_list, sizeof (vdev_t),
498fa9e4066Sahrens 	    offsetof(vdev_t, vdev_dirty_node));
4990a4e9518Sgw 	list_create(&spa->spa_zio_list, sizeof (zio_t),
5000a4e9518Sgw 	    offsetof(zio_t, zio_link_node));
501fa9e4066Sahrens 
502fa9e4066Sahrens 	txg_list_create(&spa->spa_vdev_txg_list,
503fa9e4066Sahrens 	    offsetof(struct vdev, vdev_txg_node));
504ea8dc4b6Seschrock 
505ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_scrub,
506ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
507ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
508ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_last,
509ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
510ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
511fa9e4066Sahrens }
512fa9e4066Sahrens 
513fa9e4066Sahrens /*
514fa9e4066Sahrens  * Opposite of spa_activate().
515fa9e4066Sahrens  */
516fa9e4066Sahrens static void
517fa9e4066Sahrens spa_deactivate(spa_t *spa)
518fa9e4066Sahrens {
519fa9e4066Sahrens 	int t;
520fa9e4066Sahrens 
521fa9e4066Sahrens 	ASSERT(spa->spa_sync_on == B_FALSE);
522fa9e4066Sahrens 	ASSERT(spa->spa_dsl_pool == NULL);
523fa9e4066Sahrens 	ASSERT(spa->spa_root_vdev == NULL);
524fa9e4066Sahrens 
525fa9e4066Sahrens 	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
526fa9e4066Sahrens 
527fa9e4066Sahrens 	txg_list_destroy(&spa->spa_vdev_txg_list);
528fa9e4066Sahrens 
529fa9e4066Sahrens 	list_destroy(&spa->spa_dirty_list);
5300a4e9518Sgw 	list_destroy(&spa->spa_zio_list);
531fa9e4066Sahrens 
532fa9e4066Sahrens 	for (t = 0; t < ZIO_TYPES; t++) {
533fa9e4066Sahrens 		taskq_destroy(spa->spa_zio_issue_taskq[t]);
534fa9e4066Sahrens 		taskq_destroy(spa->spa_zio_intr_taskq[t]);
535fa9e4066Sahrens 		spa->spa_zio_issue_taskq[t] = NULL;
536fa9e4066Sahrens 		spa->spa_zio_intr_taskq[t] = NULL;
537fa9e4066Sahrens 	}
538fa9e4066Sahrens 
539fa9e4066Sahrens 	metaslab_class_destroy(spa->spa_normal_class);
540fa9e4066Sahrens 	spa->spa_normal_class = NULL;
541fa9e4066Sahrens 
5428654d025Sperrin 	metaslab_class_destroy(spa->spa_log_class);
5438654d025Sperrin 	spa->spa_log_class = NULL;
5448654d025Sperrin 
545ea8dc4b6Seschrock 	/*
546ea8dc4b6Seschrock 	 * If this was part of an import or the open otherwise failed, we may
547ea8dc4b6Seschrock 	 * still have errors left in the queues.  Empty them just in case.
548ea8dc4b6Seschrock 	 */
549ea8dc4b6Seschrock 	spa_errlog_drain(spa);
550ea8dc4b6Seschrock 
551ea8dc4b6Seschrock 	avl_destroy(&spa->spa_errlist_scrub);
552ea8dc4b6Seschrock 	avl_destroy(&spa->spa_errlist_last);
553ea8dc4b6Seschrock 
554fa9e4066Sahrens 	spa->spa_state = POOL_STATE_UNINITIALIZED;
555fa9e4066Sahrens }
556fa9e4066Sahrens 
557fa9e4066Sahrens /*
558fa9e4066Sahrens  * Verify a pool configuration, and construct the vdev tree appropriately.  This
559fa9e4066Sahrens  * will create all the necessary vdevs in the appropriate layout, with each vdev
560fa9e4066Sahrens  * in the CLOSED state.  This will prep the pool before open/creation/import.
561fa9e4066Sahrens  * All vdev validation is done by the vdev_alloc() routine.
562fa9e4066Sahrens  */
56399653d4eSeschrock static int
56499653d4eSeschrock spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
56599653d4eSeschrock     uint_t id, int atype)
566fa9e4066Sahrens {
567fa9e4066Sahrens 	nvlist_t **child;
568fa9e4066Sahrens 	uint_t c, children;
56999653d4eSeschrock 	int error;
570fa9e4066Sahrens 
57199653d4eSeschrock 	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
57299653d4eSeschrock 		return (error);
573fa9e4066Sahrens 
57499653d4eSeschrock 	if ((*vdp)->vdev_ops->vdev_op_leaf)
57599653d4eSeschrock 		return (0);
576fa9e4066Sahrens 
577fa9e4066Sahrens 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
578fa9e4066Sahrens 	    &child, &children) != 0) {
57999653d4eSeschrock 		vdev_free(*vdp);
58099653d4eSeschrock 		*vdp = NULL;
58199653d4eSeschrock 		return (EINVAL);
582fa9e4066Sahrens 	}
583fa9e4066Sahrens 
584fa9e4066Sahrens 	for (c = 0; c < children; c++) {
58599653d4eSeschrock 		vdev_t *vd;
58699653d4eSeschrock 		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
58799653d4eSeschrock 		    atype)) != 0) {
58899653d4eSeschrock 			vdev_free(*vdp);
58999653d4eSeschrock 			*vdp = NULL;
59099653d4eSeschrock 			return (error);
591fa9e4066Sahrens 		}
592fa9e4066Sahrens 	}
593fa9e4066Sahrens 
59499653d4eSeschrock 	ASSERT(*vdp != NULL);
59599653d4eSeschrock 
59699653d4eSeschrock 	return (0);
597fa9e4066Sahrens }
598fa9e4066Sahrens 
599fa9e4066Sahrens /*
600fa9e4066Sahrens  * Opposite of spa_load().
601fa9e4066Sahrens  */
602fa9e4066Sahrens static void
603fa9e4066Sahrens spa_unload(spa_t *spa)
604fa9e4066Sahrens {
60599653d4eSeschrock 	int i;
60699653d4eSeschrock 
607ea8dc4b6Seschrock 	/*
608ea8dc4b6Seschrock 	 * Stop async tasks.
609ea8dc4b6Seschrock 	 */
610ea8dc4b6Seschrock 	spa_async_suspend(spa);
611ea8dc4b6Seschrock 
612fa9e4066Sahrens 	/*
613fa9e4066Sahrens 	 * Stop syncing.
614fa9e4066Sahrens 	 */
615fa9e4066Sahrens 	if (spa->spa_sync_on) {
616fa9e4066Sahrens 		txg_sync_stop(spa->spa_dsl_pool);
617fa9e4066Sahrens 		spa->spa_sync_on = B_FALSE;
618fa9e4066Sahrens 	}
619fa9e4066Sahrens 
620fa9e4066Sahrens 	/*
621fa9e4066Sahrens 	 * Wait for any outstanding prefetch I/O to complete.
622fa9e4066Sahrens 	 */
623ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
624ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
625fa9e4066Sahrens 
626fa94a07fSbrendan 	/*
627fa94a07fSbrendan 	 * Drop and purge level 2 cache
628fa94a07fSbrendan 	 */
629fa94a07fSbrendan 	spa_l2cache_drop(spa);
630fa94a07fSbrendan 
631fa9e4066Sahrens 	/*
632fa9e4066Sahrens 	 * Close the dsl pool.
633fa9e4066Sahrens 	 */
634fa9e4066Sahrens 	if (spa->spa_dsl_pool) {
635fa9e4066Sahrens 		dsl_pool_close(spa->spa_dsl_pool);
636fa9e4066Sahrens 		spa->spa_dsl_pool = NULL;
637fa9e4066Sahrens 	}
638fa9e4066Sahrens 
639fa9e4066Sahrens 	/*
640fa9e4066Sahrens 	 * Close all vdevs.
641fa9e4066Sahrens 	 */
6420e34b6a7Sbonwick 	if (spa->spa_root_vdev)
643fa9e4066Sahrens 		vdev_free(spa->spa_root_vdev);
6440e34b6a7Sbonwick 	ASSERT(spa->spa_root_vdev == NULL);
645ea8dc4b6Seschrock 
646fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
647fa94a07fSbrendan 		vdev_free(spa->spa_spares.sav_vdevs[i]);
648fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs) {
649fa94a07fSbrendan 		kmem_free(spa->spa_spares.sav_vdevs,
650fa94a07fSbrendan 		    spa->spa_spares.sav_count * sizeof (void *));
651fa94a07fSbrendan 		spa->spa_spares.sav_vdevs = NULL;
65299653d4eSeschrock 	}
653fa94a07fSbrendan 	if (spa->spa_spares.sav_config) {
654fa94a07fSbrendan 		nvlist_free(spa->spa_spares.sav_config);
655fa94a07fSbrendan 		spa->spa_spares.sav_config = NULL;
656fa94a07fSbrendan 	}
6572ce8af81SEric Schrock 	spa->spa_spares.sav_count = 0;
658fa94a07fSbrendan 
659fa94a07fSbrendan 	for (i = 0; i < spa->spa_l2cache.sav_count; i++)
660fa94a07fSbrendan 		vdev_free(spa->spa_l2cache.sav_vdevs[i]);
661fa94a07fSbrendan 	if (spa->spa_l2cache.sav_vdevs) {
662fa94a07fSbrendan 		kmem_free(spa->spa_l2cache.sav_vdevs,
663fa94a07fSbrendan 		    spa->spa_l2cache.sav_count * sizeof (void *));
664fa94a07fSbrendan 		spa->spa_l2cache.sav_vdevs = NULL;
665fa94a07fSbrendan 	}
666fa94a07fSbrendan 	if (spa->spa_l2cache.sav_config) {
667fa94a07fSbrendan 		nvlist_free(spa->spa_l2cache.sav_config);
668fa94a07fSbrendan 		spa->spa_l2cache.sav_config = NULL;
66999653d4eSeschrock 	}
6702ce8af81SEric Schrock 	spa->spa_l2cache.sav_count = 0;
67199653d4eSeschrock 
672ea8dc4b6Seschrock 	spa->spa_async_suspended = 0;
673fa9e4066Sahrens }
674fa9e4066Sahrens 
67599653d4eSeschrock /*
67699653d4eSeschrock  * Load (or re-load) the current list of vdevs describing the active spares for
67799653d4eSeschrock  * this pool.  When this is called, we have some form of basic information in
678fa94a07fSbrendan  * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
679fa94a07fSbrendan  * then re-generate a more complete list including status information.
68099653d4eSeschrock  */
68199653d4eSeschrock static void
68299653d4eSeschrock spa_load_spares(spa_t *spa)
68399653d4eSeschrock {
68499653d4eSeschrock 	nvlist_t **spares;
68599653d4eSeschrock 	uint_t nspares;
68699653d4eSeschrock 	int i;
68739c23413Seschrock 	vdev_t *vd, *tvd;
68899653d4eSeschrock 
68999653d4eSeschrock 	/*
69099653d4eSeschrock 	 * First, close and free any existing spare vdevs.
69199653d4eSeschrock 	 */
692fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
693fa94a07fSbrendan 		vd = spa->spa_spares.sav_vdevs[i];
69439c23413Seschrock 
69539c23413Seschrock 		/* Undo the call to spa_activate() below */
696c5904d13Seschrock 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
697c5904d13Seschrock 		    B_FALSE)) != NULL && tvd->vdev_isspare)
69839c23413Seschrock 			spa_spare_remove(tvd);
69939c23413Seschrock 		vdev_close(vd);
70039c23413Seschrock 		vdev_free(vd);
70199653d4eSeschrock 	}
70239c23413Seschrock 
703fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs)
704fa94a07fSbrendan 		kmem_free(spa->spa_spares.sav_vdevs,
705fa94a07fSbrendan 		    spa->spa_spares.sav_count * sizeof (void *));
70699653d4eSeschrock 
707fa94a07fSbrendan 	if (spa->spa_spares.sav_config == NULL)
70899653d4eSeschrock 		nspares = 0;
70999653d4eSeschrock 	else
710fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
71199653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
71299653d4eSeschrock 
713fa94a07fSbrendan 	spa->spa_spares.sav_count = (int)nspares;
714fa94a07fSbrendan 	spa->spa_spares.sav_vdevs = NULL;
71599653d4eSeschrock 
71699653d4eSeschrock 	if (nspares == 0)
71799653d4eSeschrock 		return;
71899653d4eSeschrock 
71999653d4eSeschrock 	/*
72099653d4eSeschrock 	 * Construct the array of vdevs, opening them to get status in the
72139c23413Seschrock 	 * process.   For each spare, there is potentially two different vdev_t
72239c23413Seschrock 	 * structures associated with it: one in the list of spares (used only
72339c23413Seschrock 	 * for basic validation purposes) and one in the active vdev
72439c23413Seschrock 	 * configuration (if it's spared in).  During this phase we open and
72539c23413Seschrock 	 * validate each vdev on the spare list.  If the vdev also exists in the
72639c23413Seschrock 	 * active configuration, then we also mark this vdev as an active spare.
72799653d4eSeschrock 	 */
728fa94a07fSbrendan 	spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
729fa94a07fSbrendan 	    KM_SLEEP);
730fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
73199653d4eSeschrock 		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
73299653d4eSeschrock 		    VDEV_ALLOC_SPARE) == 0);
73399653d4eSeschrock 		ASSERT(vd != NULL);
73499653d4eSeschrock 
735fa94a07fSbrendan 		spa->spa_spares.sav_vdevs[i] = vd;
73699653d4eSeschrock 
737c5904d13Seschrock 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
738c5904d13Seschrock 		    B_FALSE)) != NULL) {
73939c23413Seschrock 			if (!tvd->vdev_isspare)
74039c23413Seschrock 				spa_spare_add(tvd);
74139c23413Seschrock 
74239c23413Seschrock 			/*
74339c23413Seschrock 			 * We only mark the spare active if we were successfully
74439c23413Seschrock 			 * able to load the vdev.  Otherwise, importing a pool
74539c23413Seschrock 			 * with a bad active spare would result in strange
74639c23413Seschrock 			 * behavior, because multiple pool would think the spare
74739c23413Seschrock 			 * is actively in use.
74839c23413Seschrock 			 *
74939c23413Seschrock 			 * There is a vulnerability here to an equally bizarre
75039c23413Seschrock 			 * circumstance, where a dead active spare is later
75139c23413Seschrock 			 * brought back to life (onlined or otherwise).  Given
75239c23413Seschrock 			 * the rarity of this scenario, and the extra complexity
75339c23413Seschrock 			 * it adds, we ignore the possibility.
75439c23413Seschrock 			 */
75539c23413Seschrock 			if (!vdev_is_dead(tvd))
75639c23413Seschrock 				spa_spare_activate(tvd);
75739c23413Seschrock 		}
75839c23413Seschrock 
75999653d4eSeschrock 		if (vdev_open(vd) != 0)
76099653d4eSeschrock 			continue;
76199653d4eSeschrock 
76299653d4eSeschrock 		vd->vdev_top = vd;
763fa94a07fSbrendan 		if (vdev_validate_aux(vd) == 0)
764fa94a07fSbrendan 			spa_spare_add(vd);
76599653d4eSeschrock 	}
76699653d4eSeschrock 
76799653d4eSeschrock 	/*
76899653d4eSeschrock 	 * Recompute the stashed list of spares, with status information
76999653d4eSeschrock 	 * this time.
77099653d4eSeschrock 	 */
771fa94a07fSbrendan 	VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
77299653d4eSeschrock 	    DATA_TYPE_NVLIST_ARRAY) == 0);
77399653d4eSeschrock 
774fa94a07fSbrendan 	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
775fa94a07fSbrendan 	    KM_SLEEP);
776fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
777fa94a07fSbrendan 		spares[i] = vdev_config_generate(spa,
778fa94a07fSbrendan 		    spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE);
779fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
780fa94a07fSbrendan 	    ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
781fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
78299653d4eSeschrock 		nvlist_free(spares[i]);
783fa94a07fSbrendan 	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
784fa94a07fSbrendan }
785fa94a07fSbrendan 
786fa94a07fSbrendan /*
787fa94a07fSbrendan  * Load (or re-load) the current list of vdevs describing the active l2cache for
788fa94a07fSbrendan  * this pool.  When this is called, we have some form of basic information in
789fa94a07fSbrendan  * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
790fa94a07fSbrendan  * then re-generate a more complete list including status information.
791fa94a07fSbrendan  * Devices which are already active have their details maintained, and are
792fa94a07fSbrendan  * not re-opened.
793fa94a07fSbrendan  */
794fa94a07fSbrendan static void
795fa94a07fSbrendan spa_load_l2cache(spa_t *spa)
796fa94a07fSbrendan {
797fa94a07fSbrendan 	nvlist_t **l2cache;
798fa94a07fSbrendan 	uint_t nl2cache;
799fa94a07fSbrendan 	int i, j, oldnvdevs;
800c5904d13Seschrock 	uint64_t guid, size;
801fa94a07fSbrendan 	vdev_t *vd, **oldvdevs, **newvdevs;
802fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
803fa94a07fSbrendan 
804fa94a07fSbrendan 	if (sav->sav_config != NULL) {
805fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
806fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
807fa94a07fSbrendan 		newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
808fa94a07fSbrendan 	} else {
809fa94a07fSbrendan 		nl2cache = 0;
810fa94a07fSbrendan 	}
811fa94a07fSbrendan 
812fa94a07fSbrendan 	oldvdevs = sav->sav_vdevs;
813fa94a07fSbrendan 	oldnvdevs = sav->sav_count;
814fa94a07fSbrendan 	sav->sav_vdevs = NULL;
815fa94a07fSbrendan 	sav->sav_count = 0;
816fa94a07fSbrendan 
817fa94a07fSbrendan 	/*
818fa94a07fSbrendan 	 * Process new nvlist of vdevs.
819fa94a07fSbrendan 	 */
820fa94a07fSbrendan 	for (i = 0; i < nl2cache; i++) {
821fa94a07fSbrendan 		VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
822fa94a07fSbrendan 		    &guid) == 0);
823fa94a07fSbrendan 
824fa94a07fSbrendan 		newvdevs[i] = NULL;
825fa94a07fSbrendan 		for (j = 0; j < oldnvdevs; j++) {
826fa94a07fSbrendan 			vd = oldvdevs[j];
827fa94a07fSbrendan 			if (vd != NULL && guid == vd->vdev_guid) {
828fa94a07fSbrendan 				/*
829fa94a07fSbrendan 				 * Retain previous vdev for add/remove ops.
830fa94a07fSbrendan 				 */
831fa94a07fSbrendan 				newvdevs[i] = vd;
832fa94a07fSbrendan 				oldvdevs[j] = NULL;
833fa94a07fSbrendan 				break;
834fa94a07fSbrendan 			}
835fa94a07fSbrendan 		}
836fa94a07fSbrendan 
837fa94a07fSbrendan 		if (newvdevs[i] == NULL) {
838fa94a07fSbrendan 			/*
839fa94a07fSbrendan 			 * Create new vdev
840fa94a07fSbrendan 			 */
841fa94a07fSbrendan 			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
842fa94a07fSbrendan 			    VDEV_ALLOC_L2CACHE) == 0);
843fa94a07fSbrendan 			ASSERT(vd != NULL);
844fa94a07fSbrendan 			newvdevs[i] = vd;
845fa94a07fSbrendan 
846fa94a07fSbrendan 			/*
847fa94a07fSbrendan 			 * Commit this vdev as an l2cache device,
848fa94a07fSbrendan 			 * even if it fails to open.
849fa94a07fSbrendan 			 */
850fa94a07fSbrendan 			spa_l2cache_add(vd);
851fa94a07fSbrendan 
852c5904d13Seschrock 			vd->vdev_top = vd;
853c5904d13Seschrock 			vd->vdev_aux = sav;
854c5904d13Seschrock 
855c5904d13Seschrock 			spa_l2cache_activate(vd);
856c5904d13Seschrock 
857fa94a07fSbrendan 			if (vdev_open(vd) != 0)
858fa94a07fSbrendan 				continue;
859fa94a07fSbrendan 
860fa94a07fSbrendan 			(void) vdev_validate_aux(vd);
861fa94a07fSbrendan 
862fa94a07fSbrendan 			if (!vdev_is_dead(vd)) {
863fa94a07fSbrendan 				size = vdev_get_rsize(vd);
864c5904d13Seschrock 				l2arc_add_vdev(spa, vd,
865c5904d13Seschrock 				    VDEV_LABEL_START_SIZE,
866c5904d13Seschrock 				    size - VDEV_LABEL_START_SIZE);
867fa94a07fSbrendan 			}
868fa94a07fSbrendan 		}
869fa94a07fSbrendan 	}
870fa94a07fSbrendan 
871fa94a07fSbrendan 	/*
872fa94a07fSbrendan 	 * Purge vdevs that were dropped
873fa94a07fSbrendan 	 */
874fa94a07fSbrendan 	for (i = 0; i < oldnvdevs; i++) {
875fa94a07fSbrendan 		uint64_t pool;
876fa94a07fSbrendan 
877fa94a07fSbrendan 		vd = oldvdevs[i];
878fa94a07fSbrendan 		if (vd != NULL) {
879fa94a07fSbrendan 			if (spa_mode & FWRITE &&
880fa94a07fSbrendan 			    spa_l2cache_exists(vd->vdev_guid, &pool) &&
881c5904d13Seschrock 			    pool != 0ULL &&
882c5904d13Seschrock 			    l2arc_vdev_present(vd)) {
883fa94a07fSbrendan 				l2arc_remove_vdev(vd);
884fa94a07fSbrendan 			}
885fa94a07fSbrendan 			(void) vdev_close(vd);
886fa94a07fSbrendan 			spa_l2cache_remove(vd);
887fa94a07fSbrendan 		}
888fa94a07fSbrendan 	}
889fa94a07fSbrendan 
890fa94a07fSbrendan 	if (oldvdevs)
891fa94a07fSbrendan 		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
892fa94a07fSbrendan 
893fa94a07fSbrendan 	if (sav->sav_config == NULL)
894fa94a07fSbrendan 		goto out;
895fa94a07fSbrendan 
896fa94a07fSbrendan 	sav->sav_vdevs = newvdevs;
897fa94a07fSbrendan 	sav->sav_count = (int)nl2cache;
898fa94a07fSbrendan 
899fa94a07fSbrendan 	/*
900fa94a07fSbrendan 	 * Recompute the stashed list of l2cache devices, with status
901fa94a07fSbrendan 	 * information this time.
902fa94a07fSbrendan 	 */
903fa94a07fSbrendan 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
904fa94a07fSbrendan 	    DATA_TYPE_NVLIST_ARRAY) == 0);
905fa94a07fSbrendan 
906fa94a07fSbrendan 	l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
907fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
908fa94a07fSbrendan 		l2cache[i] = vdev_config_generate(spa,
909fa94a07fSbrendan 		    sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE);
910fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
911fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
912fa94a07fSbrendan out:
913fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
914fa94a07fSbrendan 		nvlist_free(l2cache[i]);
915fa94a07fSbrendan 	if (sav->sav_count)
916fa94a07fSbrendan 		kmem_free(l2cache, sav->sav_count * sizeof (void *));
91799653d4eSeschrock }
91899653d4eSeschrock 
91999653d4eSeschrock static int
92099653d4eSeschrock load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
92199653d4eSeschrock {
92299653d4eSeschrock 	dmu_buf_t *db;
92399653d4eSeschrock 	char *packed = NULL;
92499653d4eSeschrock 	size_t nvsize = 0;
92599653d4eSeschrock 	int error;
92699653d4eSeschrock 	*value = NULL;
92799653d4eSeschrock 
92899653d4eSeschrock 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
92999653d4eSeschrock 	nvsize = *(uint64_t *)db->db_data;
93099653d4eSeschrock 	dmu_buf_rele(db, FTAG);
93199653d4eSeschrock 
93299653d4eSeschrock 	packed = kmem_alloc(nvsize, KM_SLEEP);
93399653d4eSeschrock 	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed);
93499653d4eSeschrock 	if (error == 0)
93599653d4eSeschrock 		error = nvlist_unpack(packed, nvsize, value, 0);
93699653d4eSeschrock 	kmem_free(packed, nvsize);
93799653d4eSeschrock 
93899653d4eSeschrock 	return (error);
93999653d4eSeschrock }
94099653d4eSeschrock 
9413d7072f8Seschrock /*
9423d7072f8Seschrock  * Checks to see if the given vdev could not be opened, in which case we post a
9433d7072f8Seschrock  * sysevent to notify the autoreplace code that the device has been removed.
9443d7072f8Seschrock  */
9453d7072f8Seschrock static void
9463d7072f8Seschrock spa_check_removed(vdev_t *vd)
9473d7072f8Seschrock {
9483d7072f8Seschrock 	int c;
9493d7072f8Seschrock 
9503d7072f8Seschrock 	for (c = 0; c < vd->vdev_children; c++)
9513d7072f8Seschrock 		spa_check_removed(vd->vdev_child[c]);
9523d7072f8Seschrock 
9533d7072f8Seschrock 	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
9543d7072f8Seschrock 		zfs_post_autoreplace(vd->vdev_spa, vd);
9553d7072f8Seschrock 		spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
9563d7072f8Seschrock 	}
9573d7072f8Seschrock }
9583d7072f8Seschrock 
959b87f3af3Sperrin /*
960b87f3af3Sperrin  * Check for missing log devices
961b87f3af3Sperrin  */
962b87f3af3Sperrin int
963b87f3af3Sperrin spa_check_logs(spa_t *spa)
964b87f3af3Sperrin {
965b87f3af3Sperrin 	switch (spa->spa_log_state) {
966b87f3af3Sperrin 	case SPA_LOG_MISSING:
967b87f3af3Sperrin 		/* need to recheck in case slog has been restored */
968b87f3af3Sperrin 	case SPA_LOG_UNKNOWN:
969b87f3af3Sperrin 		if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL,
970b87f3af3Sperrin 		    DS_FIND_CHILDREN)) {
971b87f3af3Sperrin 			spa->spa_log_state = SPA_LOG_MISSING;
972b87f3af3Sperrin 			return (1);
973b87f3af3Sperrin 		}
974b87f3af3Sperrin 		break;
975b87f3af3Sperrin 
976b87f3af3Sperrin 	case SPA_LOG_CLEAR:
977b87f3af3Sperrin 		(void) dmu_objset_find(spa->spa_name, zil_clear_log_chain, NULL,
978b87f3af3Sperrin 		    DS_FIND_CHILDREN);
979b87f3af3Sperrin 		break;
980b87f3af3Sperrin 	}
981b87f3af3Sperrin 	spa->spa_log_state = SPA_LOG_GOOD;
982b87f3af3Sperrin 	return (0);
983b87f3af3Sperrin }
984b87f3af3Sperrin 
985fa9e4066Sahrens /*
986fa9e4066Sahrens  * Load an existing storage pool, using the pool's builtin spa_config as a
987ea8dc4b6Seschrock  * source of configuration information.
988fa9e4066Sahrens  */
989fa9e4066Sahrens static int
990ea8dc4b6Seschrock spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig)
991fa9e4066Sahrens {
992fa9e4066Sahrens 	int error = 0;
993fa9e4066Sahrens 	nvlist_t *nvroot = NULL;
994fa9e4066Sahrens 	vdev_t *rvd;
995fa9e4066Sahrens 	uberblock_t *ub = &spa->spa_uberblock;
9960373e76bSbonwick 	uint64_t config_cache_txg = spa->spa_config_txg;
997fa9e4066Sahrens 	uint64_t pool_guid;
99899653d4eSeschrock 	uint64_t version;
999fa9e4066Sahrens 	zio_t *zio;
10003d7072f8Seschrock 	uint64_t autoreplace = 0;
1001b87f3af3Sperrin 	char *ereport = FM_EREPORT_ZFS_POOL;
1002fa9e4066Sahrens 
1003ea8dc4b6Seschrock 	spa->spa_load_state = state;
10040373e76bSbonwick 
1005fa9e4066Sahrens 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
1006a9926bf0Sbonwick 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
1007ea8dc4b6Seschrock 		error = EINVAL;
1008ea8dc4b6Seschrock 		goto out;
1009ea8dc4b6Seschrock 	}
1010fa9e4066Sahrens 
101199653d4eSeschrock 	/*
101299653d4eSeschrock 	 * Versioning wasn't explicitly added to the label until later, so if
101399653d4eSeschrock 	 * it's not present treat it as the initial version.
101499653d4eSeschrock 	 */
101599653d4eSeschrock 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
1016e7437265Sahrens 		version = SPA_VERSION_INITIAL;
101799653d4eSeschrock 
1018a9926bf0Sbonwick 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
1019a9926bf0Sbonwick 	    &spa->spa_config_txg);
1020a9926bf0Sbonwick 
10210373e76bSbonwick 	if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
1022ea8dc4b6Seschrock 	    spa_guid_exists(pool_guid, 0)) {
1023ea8dc4b6Seschrock 		error = EEXIST;
1024ea8dc4b6Seschrock 		goto out;
1025ea8dc4b6Seschrock 	}
1026fa9e4066Sahrens 
1027b5989ec7Seschrock 	spa->spa_load_guid = pool_guid;
1028b5989ec7Seschrock 
1029fa9e4066Sahrens 	/*
103099653d4eSeschrock 	 * Parse the configuration into a vdev tree.  We explicitly set the
103199653d4eSeschrock 	 * value that will be returned by spa_version() since parsing the
103299653d4eSeschrock 	 * configuration requires knowing the version number.
1033fa9e4066Sahrens 	 */
1034ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
103599653d4eSeschrock 	spa->spa_ubsync.ub_version = version;
103699653d4eSeschrock 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
1037ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
1038fa9e4066Sahrens 
103999653d4eSeschrock 	if (error != 0)
1040ea8dc4b6Seschrock 		goto out;
1041fa9e4066Sahrens 
10420e34b6a7Sbonwick 	ASSERT(spa->spa_root_vdev == rvd);
1043fa9e4066Sahrens 	ASSERT(spa_guid(spa) == pool_guid);
1044fa9e4066Sahrens 
1045fa9e4066Sahrens 	/*
1046fa9e4066Sahrens 	 * Try to open all vdevs, loading each label in the process.
1047fa9e4066Sahrens 	 */
10480bf246f5Smc 	error = vdev_open(rvd);
10490bf246f5Smc 	if (error != 0)
1050ea8dc4b6Seschrock 		goto out;
1051fa9e4066Sahrens 
1052560e6e96Seschrock 	/*
1053560e6e96Seschrock 	 * Validate the labels for all leaf vdevs.  We need to grab the config
1054560e6e96Seschrock 	 * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD
1055560e6e96Seschrock 	 * flag.
1056560e6e96Seschrock 	 */
1057560e6e96Seschrock 	spa_config_enter(spa, RW_READER, FTAG);
1058560e6e96Seschrock 	error = vdev_validate(rvd);
1059560e6e96Seschrock 	spa_config_exit(spa, FTAG);
1060560e6e96Seschrock 
10610bf246f5Smc 	if (error != 0)
1062560e6e96Seschrock 		goto out;
1063560e6e96Seschrock 
1064560e6e96Seschrock 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1065560e6e96Seschrock 		error = ENXIO;
1066560e6e96Seschrock 		goto out;
1067560e6e96Seschrock 	}
1068560e6e96Seschrock 
1069fa9e4066Sahrens 	/*
1070fa9e4066Sahrens 	 * Find the best uberblock.
1071fa9e4066Sahrens 	 */
1072fa9e4066Sahrens 	bzero(ub, sizeof (uberblock_t));
1073fa9e4066Sahrens 
1074fa9e4066Sahrens 	zio = zio_root(spa, NULL, NULL,
1075fa9e4066Sahrens 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
1076fa9e4066Sahrens 	vdev_uberblock_load(zio, rvd, ub);
1077fa9e4066Sahrens 	error = zio_wait(zio);
1078fa9e4066Sahrens 
1079fa9e4066Sahrens 	/*
1080fa9e4066Sahrens 	 * If we weren't able to find a single valid uberblock, return failure.
1081fa9e4066Sahrens 	 */
1082fa9e4066Sahrens 	if (ub->ub_txg == 0) {
1083eaca9bbdSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1084eaca9bbdSeschrock 		    VDEV_AUX_CORRUPT_DATA);
1085ea8dc4b6Seschrock 		error = ENXIO;
1086ea8dc4b6Seschrock 		goto out;
1087ea8dc4b6Seschrock 	}
1088ea8dc4b6Seschrock 
1089ea8dc4b6Seschrock 	/*
1090ea8dc4b6Seschrock 	 * If the pool is newer than the code, we can't open it.
1091ea8dc4b6Seschrock 	 */
1092e7437265Sahrens 	if (ub->ub_version > SPA_VERSION) {
1093eaca9bbdSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1094eaca9bbdSeschrock 		    VDEV_AUX_VERSION_NEWER);
1095ea8dc4b6Seschrock 		error = ENOTSUP;
1096ea8dc4b6Seschrock 		goto out;
1097fa9e4066Sahrens 	}
1098fa9e4066Sahrens 
1099fa9e4066Sahrens 	/*
1100fa9e4066Sahrens 	 * If the vdev guid sum doesn't match the uberblock, we have an
1101fa9e4066Sahrens 	 * incomplete configuration.
1102fa9e4066Sahrens 	 */
1103ecc2d604Sbonwick 	if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
1104ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1105ea8dc4b6Seschrock 		    VDEV_AUX_BAD_GUID_SUM);
1106ea8dc4b6Seschrock 		error = ENXIO;
1107ea8dc4b6Seschrock 		goto out;
1108fa9e4066Sahrens 	}
1109fa9e4066Sahrens 
1110fa9e4066Sahrens 	/*
1111fa9e4066Sahrens 	 * Initialize internal SPA structures.
1112fa9e4066Sahrens 	 */
1113fa9e4066Sahrens 	spa->spa_state = POOL_STATE_ACTIVE;
1114fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
1115fa9e4066Sahrens 	spa->spa_first_txg = spa_last_synced_txg(spa) + 1;
1116ea8dc4b6Seschrock 	error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
1117ea8dc4b6Seschrock 	if (error) {
1118ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1119ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1120ea8dc4b6Seschrock 		goto out;
1121ea8dc4b6Seschrock 	}
1122fa9e4066Sahrens 	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
1123fa9e4066Sahrens 
1124ea8dc4b6Seschrock 	if (zap_lookup(spa->spa_meta_objset,
1125fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1126ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
1127ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1128ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1129ea8dc4b6Seschrock 		error = EIO;
1130ea8dc4b6Seschrock 		goto out;
1131ea8dc4b6Seschrock 	}
1132fa9e4066Sahrens 
1133fa9e4066Sahrens 	if (!mosconfig) {
113499653d4eSeschrock 		nvlist_t *newconfig;
113595173954Sek 		uint64_t hostid;
1136fa9e4066Sahrens 
113799653d4eSeschrock 		if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) {
1138ea8dc4b6Seschrock 			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1139ea8dc4b6Seschrock 			    VDEV_AUX_CORRUPT_DATA);
1140ea8dc4b6Seschrock 			error = EIO;
1141ea8dc4b6Seschrock 			goto out;
1142ea8dc4b6Seschrock 		}
1143fa9e4066Sahrens 
114495173954Sek 		if (nvlist_lookup_uint64(newconfig, ZPOOL_CONFIG_HOSTID,
114595173954Sek 		    &hostid) == 0) {
114695173954Sek 			char *hostname;
114795173954Sek 			unsigned long myhostid = 0;
114895173954Sek 
114995173954Sek 			VERIFY(nvlist_lookup_string(newconfig,
115095173954Sek 			    ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
115195173954Sek 
115295173954Sek 			(void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
115317194a52Slling 			if (hostid != 0 && myhostid != 0 &&
115417194a52Slling 			    (unsigned long)hostid != myhostid) {
115595173954Sek 				cmn_err(CE_WARN, "pool '%s' could not be "
115695173954Sek 				    "loaded as it was last accessed by "
115795173954Sek 				    "another system (host: %s hostid: 0x%lx).  "
115895173954Sek 				    "See: http://www.sun.com/msg/ZFS-8000-EY",
115995173954Sek 				    spa->spa_name, hostname,
116095173954Sek 				    (unsigned long)hostid);
116195173954Sek 				error = EBADF;
116295173954Sek 				goto out;
116395173954Sek 			}
116495173954Sek 		}
116595173954Sek 
1166fa9e4066Sahrens 		spa_config_set(spa, newconfig);
1167fa9e4066Sahrens 		spa_unload(spa);
1168fa9e4066Sahrens 		spa_deactivate(spa);
1169fa9e4066Sahrens 		spa_activate(spa);
1170fa9e4066Sahrens 
1171ea8dc4b6Seschrock 		return (spa_load(spa, newconfig, state, B_TRUE));
1172fa9e4066Sahrens 	}
1173fa9e4066Sahrens 
1174ea8dc4b6Seschrock 	if (zap_lookup(spa->spa_meta_objset,
1175fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1176ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) {
1177ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1178ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1179ea8dc4b6Seschrock 		error = EIO;
1180ea8dc4b6Seschrock 		goto out;
1181ea8dc4b6Seschrock 	}
1182fa9e4066Sahrens 
118399653d4eSeschrock 	/*
118499653d4eSeschrock 	 * Load the bit that tells us to use the new accounting function
118599653d4eSeschrock 	 * (raid-z deflation).  If we have an older pool, this will not
118699653d4eSeschrock 	 * be present.
118799653d4eSeschrock 	 */
118899653d4eSeschrock 	error = zap_lookup(spa->spa_meta_objset,
118999653d4eSeschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
119099653d4eSeschrock 	    sizeof (uint64_t), 1, &spa->spa_deflate);
119199653d4eSeschrock 	if (error != 0 && error != ENOENT) {
119299653d4eSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
119399653d4eSeschrock 		    VDEV_AUX_CORRUPT_DATA);
119499653d4eSeschrock 		error = EIO;
119599653d4eSeschrock 		goto out;
119699653d4eSeschrock 	}
119799653d4eSeschrock 
1198fa9e4066Sahrens 	/*
1199ea8dc4b6Seschrock 	 * Load the persistent error log.  If we have an older pool, this will
1200ea8dc4b6Seschrock 	 * not be present.
1201fa9e4066Sahrens 	 */
1202ea8dc4b6Seschrock 	error = zap_lookup(spa->spa_meta_objset,
1203ea8dc4b6Seschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
1204ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_errlog_last);
1205d80c45e0Sbonwick 	if (error != 0 && error != ENOENT) {
1206ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1207ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1208ea8dc4b6Seschrock 		error = EIO;
1209ea8dc4b6Seschrock 		goto out;
1210ea8dc4b6Seschrock 	}
1211ea8dc4b6Seschrock 
1212ea8dc4b6Seschrock 	error = zap_lookup(spa->spa_meta_objset,
1213ea8dc4b6Seschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
1214ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
1215ea8dc4b6Seschrock 	if (error != 0 && error != ENOENT) {
1216ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1217ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1218ea8dc4b6Seschrock 		error = EIO;
1219ea8dc4b6Seschrock 		goto out;
1220ea8dc4b6Seschrock 	}
1221ea8dc4b6Seschrock 
122206eeb2adSek 	/*
122306eeb2adSek 	 * Load the history object.  If we have an older pool, this
122406eeb2adSek 	 * will not be present.
122506eeb2adSek 	 */
122606eeb2adSek 	error = zap_lookup(spa->spa_meta_objset,
122706eeb2adSek 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
122806eeb2adSek 	    sizeof (uint64_t), 1, &spa->spa_history);
122906eeb2adSek 	if (error != 0 && error != ENOENT) {
123006eeb2adSek 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
123106eeb2adSek 		    VDEV_AUX_CORRUPT_DATA);
123206eeb2adSek 		error = EIO;
123306eeb2adSek 		goto out;
123406eeb2adSek 	}
123506eeb2adSek 
123699653d4eSeschrock 	/*
123799653d4eSeschrock 	 * Load any hot spares for this pool.
123899653d4eSeschrock 	 */
123999653d4eSeschrock 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1240fa94a07fSbrendan 	    DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object);
124199653d4eSeschrock 	if (error != 0 && error != ENOENT) {
124299653d4eSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
124399653d4eSeschrock 		    VDEV_AUX_CORRUPT_DATA);
124499653d4eSeschrock 		error = EIO;
124599653d4eSeschrock 		goto out;
124699653d4eSeschrock 	}
124799653d4eSeschrock 	if (error == 0) {
1248e7437265Sahrens 		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
1249fa94a07fSbrendan 		if (load_nvlist(spa, spa->spa_spares.sav_object,
1250fa94a07fSbrendan 		    &spa->spa_spares.sav_config) != 0) {
125199653d4eSeschrock 			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
125299653d4eSeschrock 			    VDEV_AUX_CORRUPT_DATA);
125399653d4eSeschrock 			error = EIO;
125499653d4eSeschrock 			goto out;
125599653d4eSeschrock 		}
125699653d4eSeschrock 
125799653d4eSeschrock 		spa_config_enter(spa, RW_WRITER, FTAG);
125899653d4eSeschrock 		spa_load_spares(spa);
125999653d4eSeschrock 		spa_config_exit(spa, FTAG);
126099653d4eSeschrock 	}
126199653d4eSeschrock 
1262fa94a07fSbrendan 	/*
1263fa94a07fSbrendan 	 * Load any level 2 ARC devices for this pool.
1264fa94a07fSbrendan 	 */
1265fa94a07fSbrendan 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1266fa94a07fSbrendan 	    DMU_POOL_L2CACHE, sizeof (uint64_t), 1,
1267fa94a07fSbrendan 	    &spa->spa_l2cache.sav_object);
1268fa94a07fSbrendan 	if (error != 0 && error != ENOENT) {
1269fa94a07fSbrendan 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1270fa94a07fSbrendan 		    VDEV_AUX_CORRUPT_DATA);
1271fa94a07fSbrendan 		error = EIO;
1272fa94a07fSbrendan 		goto out;
1273fa94a07fSbrendan 	}
1274fa94a07fSbrendan 	if (error == 0) {
1275fa94a07fSbrendan 		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
1276fa94a07fSbrendan 		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
1277fa94a07fSbrendan 		    &spa->spa_l2cache.sav_config) != 0) {
1278fa94a07fSbrendan 			vdev_set_state(rvd, B_TRUE,
1279fa94a07fSbrendan 			    VDEV_STATE_CANT_OPEN,
1280fa94a07fSbrendan 			    VDEV_AUX_CORRUPT_DATA);
1281fa94a07fSbrendan 			error = EIO;
1282fa94a07fSbrendan 			goto out;
1283fa94a07fSbrendan 		}
1284fa94a07fSbrendan 
1285fa94a07fSbrendan 		spa_config_enter(spa, RW_WRITER, FTAG);
1286fa94a07fSbrendan 		spa_load_l2cache(spa);
1287fa94a07fSbrendan 		spa_config_exit(spa, FTAG);
1288fa94a07fSbrendan 	}
1289fa94a07fSbrendan 
1290b87f3af3Sperrin 	if (spa_check_logs(spa)) {
1291b87f3af3Sperrin 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1292b87f3af3Sperrin 		    VDEV_AUX_BAD_LOG);
1293b87f3af3Sperrin 		error = ENXIO;
1294b87f3af3Sperrin 		ereport = FM_EREPORT_ZFS_LOG_REPLAY;
1295b87f3af3Sperrin 		goto out;
1296b87f3af3Sperrin 	}
1297b87f3af3Sperrin 
1298b87f3af3Sperrin 
1299990b4856Slling 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
1300ecd6cf80Smarks 
1301b1b8ab34Slling 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1302b1b8ab34Slling 	    DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object);
1303b1b8ab34Slling 
1304b1b8ab34Slling 	if (error && error != ENOENT) {
1305b1b8ab34Slling 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1306b1b8ab34Slling 		    VDEV_AUX_CORRUPT_DATA);
1307b1b8ab34Slling 		error = EIO;
1308b1b8ab34Slling 		goto out;
1309b1b8ab34Slling 	}
1310b1b8ab34Slling 
1311b1b8ab34Slling 	if (error == 0) {
1312b1b8ab34Slling 		(void) zap_lookup(spa->spa_meta_objset,
1313b1b8ab34Slling 		    spa->spa_pool_props_object,
13143d7072f8Seschrock 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS),
1315b1b8ab34Slling 		    sizeof (uint64_t), 1, &spa->spa_bootfs);
13163d7072f8Seschrock 		(void) zap_lookup(spa->spa_meta_objset,
13173d7072f8Seschrock 		    spa->spa_pool_props_object,
13183d7072f8Seschrock 		    zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE),
13193d7072f8Seschrock 		    sizeof (uint64_t), 1, &autoreplace);
1320ecd6cf80Smarks 		(void) zap_lookup(spa->spa_meta_objset,
1321ecd6cf80Smarks 		    spa->spa_pool_props_object,
1322ecd6cf80Smarks 		    zpool_prop_to_name(ZPOOL_PROP_DELEGATION),
1323ecd6cf80Smarks 		    sizeof (uint64_t), 1, &spa->spa_delegation);
13240a4e9518Sgw 		(void) zap_lookup(spa->spa_meta_objset,
13250a4e9518Sgw 		    spa->spa_pool_props_object,
13260a4e9518Sgw 		    zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
13270a4e9518Sgw 		    sizeof (uint64_t), 1, &spa->spa_failmode);
1328b1b8ab34Slling 	}
1329b1b8ab34Slling 
13303d7072f8Seschrock 	/*
13313d7072f8Seschrock 	 * If the 'autoreplace' property is set, then post a resource notifying
13323d7072f8Seschrock 	 * the ZFS DE that it should not issue any faults for unopenable
13333d7072f8Seschrock 	 * devices.  We also iterate over the vdevs, and post a sysevent for any
13343d7072f8Seschrock 	 * unopenable vdevs so that the normal autoreplace handler can take
13353d7072f8Seschrock 	 * over.
13363d7072f8Seschrock 	 */
1337b01c3b58Seschrock 	if (autoreplace && state != SPA_LOAD_TRYIMPORT)
13383d7072f8Seschrock 		spa_check_removed(spa->spa_root_vdev);
13393d7072f8Seschrock 
1340ea8dc4b6Seschrock 	/*
1341560e6e96Seschrock 	 * Load the vdev state for all toplevel vdevs.
1342ea8dc4b6Seschrock 	 */
1343560e6e96Seschrock 	vdev_load(rvd);
13440373e76bSbonwick 
1345fa9e4066Sahrens 	/*
1346fa9e4066Sahrens 	 * Propagate the leaf DTLs we just loaded all the way up the tree.
1347fa9e4066Sahrens 	 */
1348ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
1349fa9e4066Sahrens 	vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
1350ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
1351fa9e4066Sahrens 
1352fa9e4066Sahrens 	/*
1353fa9e4066Sahrens 	 * Check the state of the root vdev.  If it can't be opened, it
1354fa9e4066Sahrens 	 * indicates one or more toplevel vdevs are faulted.
1355fa9e4066Sahrens 	 */
1356ea8dc4b6Seschrock 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1357ea8dc4b6Seschrock 		error = ENXIO;
1358ea8dc4b6Seschrock 		goto out;
1359ea8dc4b6Seschrock 	}
1360fa9e4066Sahrens 
1361ea8dc4b6Seschrock 	if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) {
13625dabedeeSbonwick 		dmu_tx_t *tx;
13630373e76bSbonwick 		int need_update = B_FALSE;
13640373e76bSbonwick 		int c;
13655dabedeeSbonwick 
13660373e76bSbonwick 		/*
13670373e76bSbonwick 		 * Claim log blocks that haven't been committed yet.
13680373e76bSbonwick 		 * This must all happen in a single txg.
13690373e76bSbonwick 		 */
13705dabedeeSbonwick 		tx = dmu_tx_create_assigned(spa_get_dsl(spa),
1371fa9e4066Sahrens 		    spa_first_txg(spa));
13720b69c2f0Sahrens 		(void) dmu_objset_find(spa->spa_name,
13730b69c2f0Sahrens 		    zil_claim, tx, DS_FIND_CHILDREN);
1374fa9e4066Sahrens 		dmu_tx_commit(tx);
1375fa9e4066Sahrens 
1376fa9e4066Sahrens 		spa->spa_sync_on = B_TRUE;
1377fa9e4066Sahrens 		txg_sync_start(spa->spa_dsl_pool);
1378fa9e4066Sahrens 
1379fa9e4066Sahrens 		/*
1380fa9e4066Sahrens 		 * Wait for all claims to sync.
1381fa9e4066Sahrens 		 */
1382fa9e4066Sahrens 		txg_wait_synced(spa->spa_dsl_pool, 0);
13830e34b6a7Sbonwick 
13840e34b6a7Sbonwick 		/*
13850373e76bSbonwick 		 * If the config cache is stale, or we have uninitialized
13860373e76bSbonwick 		 * metaslabs (see spa_vdev_add()), then update the config.
13870e34b6a7Sbonwick 		 */
13880373e76bSbonwick 		if (config_cache_txg != spa->spa_config_txg ||
13890373e76bSbonwick 		    state == SPA_LOAD_IMPORT)
13900373e76bSbonwick 			need_update = B_TRUE;
13910373e76bSbonwick 
13920373e76bSbonwick 		for (c = 0; c < rvd->vdev_children; c++)
13930373e76bSbonwick 			if (rvd->vdev_child[c]->vdev_ms_array == 0)
13940373e76bSbonwick 				need_update = B_TRUE;
13950e34b6a7Sbonwick 
13960e34b6a7Sbonwick 		/*
13970373e76bSbonwick 		 * Update the config cache asychronously in case we're the
13980373e76bSbonwick 		 * root pool, in which case the config cache isn't writable yet.
13990e34b6a7Sbonwick 		 */
14000373e76bSbonwick 		if (need_update)
14010373e76bSbonwick 			spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
1402fa9e4066Sahrens 	}
1403fa9e4066Sahrens 
1404ea8dc4b6Seschrock 	error = 0;
1405ea8dc4b6Seschrock out:
1406088f3894Sahrens 	spa->spa_minref = refcount_count(&spa->spa_refcount);
140799653d4eSeschrock 	if (error && error != EBADF)
1408b87f3af3Sperrin 		zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
1409ea8dc4b6Seschrock 	spa->spa_load_state = SPA_LOAD_NONE;
1410ea8dc4b6Seschrock 	spa->spa_ena = 0;
1411ea8dc4b6Seschrock 
1412ea8dc4b6Seschrock 	return (error);
1413fa9e4066Sahrens }
1414fa9e4066Sahrens 
1415fa9e4066Sahrens /*
1416fa9e4066Sahrens  * Pool Open/Import
1417fa9e4066Sahrens  *
1418fa9e4066Sahrens  * The import case is identical to an open except that the configuration is sent
1419fa9e4066Sahrens  * down from userland, instead of grabbed from the configuration cache.  For the
1420fa9e4066Sahrens  * case of an open, the pool configuration will exist in the
14213d7072f8Seschrock  * POOL_STATE_UNINITIALIZED state.
1422fa9e4066Sahrens  *
1423fa9e4066Sahrens  * The stats information (gen/count/ustats) is used to gather vdev statistics at
1424fa9e4066Sahrens  * the same time open the pool, without having to keep around the spa_t in some
1425fa9e4066Sahrens  * ambiguous state.
1426fa9e4066Sahrens  */
1427fa9e4066Sahrens static int
1428fa9e4066Sahrens spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config)
1429fa9e4066Sahrens {
1430fa9e4066Sahrens 	spa_t *spa;
1431fa9e4066Sahrens 	int error;
1432fa9e4066Sahrens 	int locked = B_FALSE;
1433fa9e4066Sahrens 
1434fa9e4066Sahrens 	*spapp = NULL;
1435fa9e4066Sahrens 
1436fa9e4066Sahrens 	/*
1437fa9e4066Sahrens 	 * As disgusting as this is, we need to support recursive calls to this
1438fa9e4066Sahrens 	 * function because dsl_dir_open() is called during spa_load(), and ends
1439fa9e4066Sahrens 	 * up calling spa_open() again.  The real fix is to figure out how to
1440fa9e4066Sahrens 	 * avoid dsl_dir_open() calling this in the first place.
1441fa9e4066Sahrens 	 */
1442fa9e4066Sahrens 	if (mutex_owner(&spa_namespace_lock) != curthread) {
1443fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
1444fa9e4066Sahrens 		locked = B_TRUE;
1445fa9e4066Sahrens 	}
1446fa9e4066Sahrens 
1447fa9e4066Sahrens 	if ((spa = spa_lookup(pool)) == NULL) {
1448fa9e4066Sahrens 		if (locked)
1449fa9e4066Sahrens 			mutex_exit(&spa_namespace_lock);
1450fa9e4066Sahrens 		return (ENOENT);
1451fa9e4066Sahrens 	}
1452fa9e4066Sahrens 	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
1453fa9e4066Sahrens 
1454fa9e4066Sahrens 		spa_activate(spa);
1455fa9e4066Sahrens 
14560373e76bSbonwick 		error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
1457fa9e4066Sahrens 
1458fa9e4066Sahrens 		if (error == EBADF) {
1459fa9e4066Sahrens 			/*
1460560e6e96Seschrock 			 * If vdev_validate() returns failure (indicated by
1461560e6e96Seschrock 			 * EBADF), it indicates that one of the vdevs indicates
1462560e6e96Seschrock 			 * that the pool has been exported or destroyed.  If
1463560e6e96Seschrock 			 * this is the case, the config cache is out of sync and
1464560e6e96Seschrock 			 * we should remove the pool from the namespace.
1465fa9e4066Sahrens 			 */
1466fa9e4066Sahrens 			spa_unload(spa);
1467fa9e4066Sahrens 			spa_deactivate(spa);
1468c5904d13Seschrock 			spa_config_sync(spa, B_TRUE, B_TRUE);
1469fa9e4066Sahrens 			spa_remove(spa);
1470fa9e4066Sahrens 			if (locked)
1471fa9e4066Sahrens 				mutex_exit(&spa_namespace_lock);
1472fa9e4066Sahrens 			return (ENOENT);
1473ea8dc4b6Seschrock 		}
1474ea8dc4b6Seschrock 
1475ea8dc4b6Seschrock 		if (error) {
1476fa9e4066Sahrens 			/*
1477fa9e4066Sahrens 			 * We can't open the pool, but we still have useful
1478fa9e4066Sahrens 			 * information: the state of each vdev after the
1479fa9e4066Sahrens 			 * attempted vdev_open().  Return this to the user.
1480fa9e4066Sahrens 			 */
14810373e76bSbonwick 			if (config != NULL && spa->spa_root_vdev != NULL) {
14820373e76bSbonwick 				spa_config_enter(spa, RW_READER, FTAG);
1483fa9e4066Sahrens 				*config = spa_config_generate(spa, NULL, -1ULL,
1484fa9e4066Sahrens 				    B_TRUE);
14850373e76bSbonwick 				spa_config_exit(spa, FTAG);
14860373e76bSbonwick 			}
1487fa9e4066Sahrens 			spa_unload(spa);
1488fa9e4066Sahrens 			spa_deactivate(spa);
1489ea8dc4b6Seschrock 			spa->spa_last_open_failed = B_TRUE;
1490fa9e4066Sahrens 			if (locked)
1491fa9e4066Sahrens 				mutex_exit(&spa_namespace_lock);
1492fa9e4066Sahrens 			*spapp = NULL;
1493fa9e4066Sahrens 			return (error);
1494ea8dc4b6Seschrock 		} else {
1495ea8dc4b6Seschrock 			spa->spa_last_open_failed = B_FALSE;
1496fa9e4066Sahrens 		}
1497fa9e4066Sahrens 	}
1498fa9e4066Sahrens 
1499fa9e4066Sahrens 	spa_open_ref(spa, tag);
15003d7072f8Seschrock 
1501fa9e4066Sahrens 	if (locked)
1502fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1503fa9e4066Sahrens 
1504fa9e4066Sahrens 	*spapp = spa;
1505fa9e4066Sahrens 
1506fa9e4066Sahrens 	if (config != NULL) {
1507ea8dc4b6Seschrock 		spa_config_enter(spa, RW_READER, FTAG);
1508fa9e4066Sahrens 		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1509ea8dc4b6Seschrock 		spa_config_exit(spa, FTAG);
1510fa9e4066Sahrens 	}
1511fa9e4066Sahrens 
1512fa9e4066Sahrens 	return (0);
1513fa9e4066Sahrens }
1514fa9e4066Sahrens 
1515fa9e4066Sahrens int
1516fa9e4066Sahrens spa_open(const char *name, spa_t **spapp, void *tag)
1517fa9e4066Sahrens {
1518fa9e4066Sahrens 	return (spa_open_common(name, spapp, tag, NULL));
1519fa9e4066Sahrens }
1520fa9e4066Sahrens 
1521ea8dc4b6Seschrock /*
1522ea8dc4b6Seschrock  * Lookup the given spa_t, incrementing the inject count in the process,
1523ea8dc4b6Seschrock  * preventing it from being exported or destroyed.
1524ea8dc4b6Seschrock  */
1525ea8dc4b6Seschrock spa_t *
1526ea8dc4b6Seschrock spa_inject_addref(char *name)
1527ea8dc4b6Seschrock {
1528ea8dc4b6Seschrock 	spa_t *spa;
1529ea8dc4b6Seschrock 
1530ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
1531ea8dc4b6Seschrock 	if ((spa = spa_lookup(name)) == NULL) {
1532ea8dc4b6Seschrock 		mutex_exit(&spa_namespace_lock);
1533ea8dc4b6Seschrock 		return (NULL);
1534ea8dc4b6Seschrock 	}
1535ea8dc4b6Seschrock 	spa->spa_inject_ref++;
1536ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
1537ea8dc4b6Seschrock 
1538ea8dc4b6Seschrock 	return (spa);
1539ea8dc4b6Seschrock }
1540ea8dc4b6Seschrock 
1541ea8dc4b6Seschrock void
1542ea8dc4b6Seschrock spa_inject_delref(spa_t *spa)
1543ea8dc4b6Seschrock {
1544ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
1545ea8dc4b6Seschrock 	spa->spa_inject_ref--;
1546ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
1547ea8dc4b6Seschrock }
1548ea8dc4b6Seschrock 
1549fa94a07fSbrendan /*
1550fa94a07fSbrendan  * Add spares device information to the nvlist.
1551fa94a07fSbrendan  */
155299653d4eSeschrock static void
155399653d4eSeschrock spa_add_spares(spa_t *spa, nvlist_t *config)
155499653d4eSeschrock {
155599653d4eSeschrock 	nvlist_t **spares;
155699653d4eSeschrock 	uint_t i, nspares;
155799653d4eSeschrock 	nvlist_t *nvroot;
155899653d4eSeschrock 	uint64_t guid;
155999653d4eSeschrock 	vdev_stat_t *vs;
156099653d4eSeschrock 	uint_t vsc;
156139c23413Seschrock 	uint64_t pool;
156299653d4eSeschrock 
1563fa94a07fSbrendan 	if (spa->spa_spares.sav_count == 0)
156499653d4eSeschrock 		return;
156599653d4eSeschrock 
156699653d4eSeschrock 	VERIFY(nvlist_lookup_nvlist(config,
156799653d4eSeschrock 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1568fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
156999653d4eSeschrock 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
157099653d4eSeschrock 	if (nspares != 0) {
157199653d4eSeschrock 		VERIFY(nvlist_add_nvlist_array(nvroot,
157299653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
157399653d4eSeschrock 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
157499653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
157599653d4eSeschrock 
157699653d4eSeschrock 		/*
157799653d4eSeschrock 		 * Go through and find any spares which have since been
157899653d4eSeschrock 		 * repurposed as an active spare.  If this is the case, update
157999653d4eSeschrock 		 * their status appropriately.
158099653d4eSeschrock 		 */
158199653d4eSeschrock 		for (i = 0; i < nspares; i++) {
158299653d4eSeschrock 			VERIFY(nvlist_lookup_uint64(spares[i],
158399653d4eSeschrock 			    ZPOOL_CONFIG_GUID, &guid) == 0);
158489a89ebfSlling 			if (spa_spare_exists(guid, &pool, NULL) &&
158589a89ebfSlling 			    pool != 0ULL) {
158699653d4eSeschrock 				VERIFY(nvlist_lookup_uint64_array(
158799653d4eSeschrock 				    spares[i], ZPOOL_CONFIG_STATS,
158899653d4eSeschrock 				    (uint64_t **)&vs, &vsc) == 0);
158999653d4eSeschrock 				vs->vs_state = VDEV_STATE_CANT_OPEN;
159099653d4eSeschrock 				vs->vs_aux = VDEV_AUX_SPARED;
159199653d4eSeschrock 			}
159299653d4eSeschrock 		}
159399653d4eSeschrock 	}
159499653d4eSeschrock }
159599653d4eSeschrock 
1596fa94a07fSbrendan /*
1597fa94a07fSbrendan  * Add l2cache device information to the nvlist, including vdev stats.
1598fa94a07fSbrendan  */
1599fa94a07fSbrendan static void
1600fa94a07fSbrendan spa_add_l2cache(spa_t *spa, nvlist_t *config)
1601fa94a07fSbrendan {
1602fa94a07fSbrendan 	nvlist_t **l2cache;
1603fa94a07fSbrendan 	uint_t i, j, nl2cache;
1604fa94a07fSbrendan 	nvlist_t *nvroot;
1605fa94a07fSbrendan 	uint64_t guid;
1606fa94a07fSbrendan 	vdev_t *vd;
1607fa94a07fSbrendan 	vdev_stat_t *vs;
1608fa94a07fSbrendan 	uint_t vsc;
1609fa94a07fSbrendan 
1610fa94a07fSbrendan 	if (spa->spa_l2cache.sav_count == 0)
1611fa94a07fSbrendan 		return;
1612fa94a07fSbrendan 
1613fa94a07fSbrendan 	spa_config_enter(spa, RW_READER, FTAG);
1614fa94a07fSbrendan 
1615fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist(config,
1616fa94a07fSbrendan 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1617fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
1618fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1619fa94a07fSbrendan 	if (nl2cache != 0) {
1620fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot,
1621fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1622fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
1623fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1624fa94a07fSbrendan 
1625fa94a07fSbrendan 		/*
1626fa94a07fSbrendan 		 * Update level 2 cache device stats.
1627fa94a07fSbrendan 		 */
1628fa94a07fSbrendan 
1629fa94a07fSbrendan 		for (i = 0; i < nl2cache; i++) {
1630fa94a07fSbrendan 			VERIFY(nvlist_lookup_uint64(l2cache[i],
1631fa94a07fSbrendan 			    ZPOOL_CONFIG_GUID, &guid) == 0);
1632fa94a07fSbrendan 
1633fa94a07fSbrendan 			vd = NULL;
1634fa94a07fSbrendan 			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
1635fa94a07fSbrendan 				if (guid ==
1636fa94a07fSbrendan 				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
1637fa94a07fSbrendan 					vd = spa->spa_l2cache.sav_vdevs[j];
1638fa94a07fSbrendan 					break;
1639fa94a07fSbrendan 				}
1640fa94a07fSbrendan 			}
1641fa94a07fSbrendan 			ASSERT(vd != NULL);
1642fa94a07fSbrendan 
1643fa94a07fSbrendan 			VERIFY(nvlist_lookup_uint64_array(l2cache[i],
1644fa94a07fSbrendan 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
1645fa94a07fSbrendan 			vdev_get_stats(vd, vs);
1646fa94a07fSbrendan 		}
1647fa94a07fSbrendan 	}
1648fa94a07fSbrendan 
1649fa94a07fSbrendan 	spa_config_exit(spa, FTAG);
1650fa94a07fSbrendan }
1651fa94a07fSbrendan 
1652fa9e4066Sahrens int
1653ea8dc4b6Seschrock spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
1654fa9e4066Sahrens {
1655fa9e4066Sahrens 	int error;
1656fa9e4066Sahrens 	spa_t *spa;
1657fa9e4066Sahrens 
1658fa9e4066Sahrens 	*config = NULL;
1659fa9e4066Sahrens 	error = spa_open_common(name, &spa, FTAG, config);
1660fa9e4066Sahrens 
166199653d4eSeschrock 	if (spa && *config != NULL) {
1662ea8dc4b6Seschrock 		VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT,
1663ea8dc4b6Seschrock 		    spa_get_errlog_size(spa)) == 0);
1664ea8dc4b6Seschrock 
166599653d4eSeschrock 		spa_add_spares(spa, *config);
1666fa94a07fSbrendan 		spa_add_l2cache(spa, *config);
166799653d4eSeschrock 	}
166899653d4eSeschrock 
1669ea8dc4b6Seschrock 	/*
1670ea8dc4b6Seschrock 	 * We want to get the alternate root even for faulted pools, so we cheat
1671ea8dc4b6Seschrock 	 * and call spa_lookup() directly.
1672ea8dc4b6Seschrock 	 */
1673ea8dc4b6Seschrock 	if (altroot) {
1674ea8dc4b6Seschrock 		if (spa == NULL) {
1675ea8dc4b6Seschrock 			mutex_enter(&spa_namespace_lock);
1676ea8dc4b6Seschrock 			spa = spa_lookup(name);
1677ea8dc4b6Seschrock 			if (spa)
1678ea8dc4b6Seschrock 				spa_altroot(spa, altroot, buflen);
1679ea8dc4b6Seschrock 			else
1680ea8dc4b6Seschrock 				altroot[0] = '\0';
1681ea8dc4b6Seschrock 			spa = NULL;
1682ea8dc4b6Seschrock 			mutex_exit(&spa_namespace_lock);
1683ea8dc4b6Seschrock 		} else {
1684ea8dc4b6Seschrock 			spa_altroot(spa, altroot, buflen);
1685ea8dc4b6Seschrock 		}
1686ea8dc4b6Seschrock 	}
1687ea8dc4b6Seschrock 
1688fa9e4066Sahrens 	if (spa != NULL)
1689fa9e4066Sahrens 		spa_close(spa, FTAG);
1690fa9e4066Sahrens 
1691fa9e4066Sahrens 	return (error);
1692fa9e4066Sahrens }
1693fa9e4066Sahrens 
169499653d4eSeschrock /*
1695fa94a07fSbrendan  * Validate that the auxiliary device array is well formed.  We must have an
1696fa94a07fSbrendan  * array of nvlists, each which describes a valid leaf vdev.  If this is an
1697fa94a07fSbrendan  * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
1698fa94a07fSbrendan  * specified, as long as they are well-formed.
169999653d4eSeschrock  */
170099653d4eSeschrock static int
1701fa94a07fSbrendan spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
1702fa94a07fSbrendan     spa_aux_vdev_t *sav, const char *config, uint64_t version,
1703fa94a07fSbrendan     vdev_labeltype_t label)
170499653d4eSeschrock {
1705fa94a07fSbrendan 	nvlist_t **dev;
1706fa94a07fSbrendan 	uint_t i, ndev;
170799653d4eSeschrock 	vdev_t *vd;
170899653d4eSeschrock 	int error;
170999653d4eSeschrock 
171099653d4eSeschrock 	/*
1711fa94a07fSbrendan 	 * It's acceptable to have no devs specified.
171299653d4eSeschrock 	 */
1713fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
171499653d4eSeschrock 		return (0);
171599653d4eSeschrock 
1716fa94a07fSbrendan 	if (ndev == 0)
171799653d4eSeschrock 		return (EINVAL);
171899653d4eSeschrock 
171999653d4eSeschrock 	/*
1720fa94a07fSbrendan 	 * Make sure the pool is formatted with a version that supports this
1721fa94a07fSbrendan 	 * device type.
172299653d4eSeschrock 	 */
1723fa94a07fSbrendan 	if (spa_version(spa) < version)
172499653d4eSeschrock 		return (ENOTSUP);
172599653d4eSeschrock 
172639c23413Seschrock 	/*
1727fa94a07fSbrendan 	 * Set the pending device list so we correctly handle device in-use
172839c23413Seschrock 	 * checking.
172939c23413Seschrock 	 */
1730fa94a07fSbrendan 	sav->sav_pending = dev;
1731fa94a07fSbrendan 	sav->sav_npending = ndev;
173239c23413Seschrock 
1733fa94a07fSbrendan 	for (i = 0; i < ndev; i++) {
1734fa94a07fSbrendan 		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
173599653d4eSeschrock 		    mode)) != 0)
173639c23413Seschrock 			goto out;
173799653d4eSeschrock 
173899653d4eSeschrock 		if (!vd->vdev_ops->vdev_op_leaf) {
173999653d4eSeschrock 			vdev_free(vd);
174039c23413Seschrock 			error = EINVAL;
174139c23413Seschrock 			goto out;
174299653d4eSeschrock 		}
174399653d4eSeschrock 
1744fa94a07fSbrendan 		/*
1745fa94a07fSbrendan 		 * The L2ARC currently only supports disk devices.
1746fa94a07fSbrendan 		 */
1747fa94a07fSbrendan 		if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
1748fa94a07fSbrendan 		    strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
1749fa94a07fSbrendan 			error = ENOTBLK;
1750fa94a07fSbrendan 			goto out;
1751fa94a07fSbrendan 		}
1752fa94a07fSbrendan 
175399653d4eSeschrock 		vd->vdev_top = vd;
175499653d4eSeschrock 
175539c23413Seschrock 		if ((error = vdev_open(vd)) == 0 &&
1756fa94a07fSbrendan 		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
1757fa94a07fSbrendan 			VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
175839c23413Seschrock 			    vd->vdev_guid) == 0);
175939c23413Seschrock 		}
176099653d4eSeschrock 
176199653d4eSeschrock 		vdev_free(vd);
176239c23413Seschrock 
1763fa94a07fSbrendan 		if (error &&
1764fa94a07fSbrendan 		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
176539c23413Seschrock 			goto out;
176639c23413Seschrock 		else
176739c23413Seschrock 			error = 0;
176899653d4eSeschrock 	}
176999653d4eSeschrock 
177039c23413Seschrock out:
1771fa94a07fSbrendan 	sav->sav_pending = NULL;
1772fa94a07fSbrendan 	sav->sav_npending = 0;
177339c23413Seschrock 	return (error);
177499653d4eSeschrock }
177599653d4eSeschrock 
1776fa94a07fSbrendan static int
1777fa94a07fSbrendan spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
1778fa94a07fSbrendan {
1779fa94a07fSbrendan 	int error;
1780fa94a07fSbrendan 
1781fa94a07fSbrendan 	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1782fa94a07fSbrendan 	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
1783fa94a07fSbrendan 	    VDEV_LABEL_SPARE)) != 0) {
1784fa94a07fSbrendan 		return (error);
1785fa94a07fSbrendan 	}
1786fa94a07fSbrendan 
1787fa94a07fSbrendan 	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1788fa94a07fSbrendan 	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
1789fa94a07fSbrendan 	    VDEV_LABEL_L2CACHE));
1790fa94a07fSbrendan }
1791fa94a07fSbrendan 
1792fa94a07fSbrendan static void
1793fa94a07fSbrendan spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
1794fa94a07fSbrendan     const char *config)
1795fa94a07fSbrendan {
1796fa94a07fSbrendan 	int i;
1797fa94a07fSbrendan 
1798fa94a07fSbrendan 	if (sav->sav_config != NULL) {
1799fa94a07fSbrendan 		nvlist_t **olddevs;
1800fa94a07fSbrendan 		uint_t oldndevs;
1801fa94a07fSbrendan 		nvlist_t **newdevs;
1802fa94a07fSbrendan 
1803fa94a07fSbrendan 		/*
1804fa94a07fSbrendan 		 * Generate new dev list by concatentating with the
1805fa94a07fSbrendan 		 * current dev list.
1806fa94a07fSbrendan 		 */
1807fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
1808fa94a07fSbrendan 		    &olddevs, &oldndevs) == 0);
1809fa94a07fSbrendan 
1810fa94a07fSbrendan 		newdevs = kmem_alloc(sizeof (void *) *
1811fa94a07fSbrendan 		    (ndevs + oldndevs), KM_SLEEP);
1812fa94a07fSbrendan 		for (i = 0; i < oldndevs; i++)
1813fa94a07fSbrendan 			VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
1814fa94a07fSbrendan 			    KM_SLEEP) == 0);
1815fa94a07fSbrendan 		for (i = 0; i < ndevs; i++)
1816fa94a07fSbrendan 			VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
1817fa94a07fSbrendan 			    KM_SLEEP) == 0);
1818fa94a07fSbrendan 
1819fa94a07fSbrendan 		VERIFY(nvlist_remove(sav->sav_config, config,
1820fa94a07fSbrendan 		    DATA_TYPE_NVLIST_ARRAY) == 0);
1821fa94a07fSbrendan 
1822fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1823fa94a07fSbrendan 		    config, newdevs, ndevs + oldndevs) == 0);
1824fa94a07fSbrendan 		for (i = 0; i < oldndevs + ndevs; i++)
1825fa94a07fSbrendan 			nvlist_free(newdevs[i]);
1826fa94a07fSbrendan 		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
1827fa94a07fSbrendan 	} else {
1828fa94a07fSbrendan 		/*
1829fa94a07fSbrendan 		 * Generate a new dev list.
1830fa94a07fSbrendan 		 */
1831fa94a07fSbrendan 		VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
1832fa94a07fSbrendan 		    KM_SLEEP) == 0);
1833fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
1834fa94a07fSbrendan 		    devs, ndevs) == 0);
1835fa94a07fSbrendan 	}
1836fa94a07fSbrendan }
1837fa94a07fSbrendan 
1838fa94a07fSbrendan /*
1839fa94a07fSbrendan  * Stop and drop level 2 ARC devices
1840fa94a07fSbrendan  */
1841fa94a07fSbrendan void
1842fa94a07fSbrendan spa_l2cache_drop(spa_t *spa)
1843fa94a07fSbrendan {
1844fa94a07fSbrendan 	vdev_t *vd;
1845fa94a07fSbrendan 	int i;
1846fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
1847fa94a07fSbrendan 
1848fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++) {
1849fa94a07fSbrendan 		uint64_t pool;
1850fa94a07fSbrendan 
1851fa94a07fSbrendan 		vd = sav->sav_vdevs[i];
1852fa94a07fSbrendan 		ASSERT(vd != NULL);
1853fa94a07fSbrendan 
1854fa94a07fSbrendan 		if (spa_mode & FWRITE &&
1855c5904d13Seschrock 		    spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL &&
1856c5904d13Seschrock 		    l2arc_vdev_present(vd)) {
1857fa94a07fSbrendan 			l2arc_remove_vdev(vd);
1858fa94a07fSbrendan 		}
1859fa94a07fSbrendan 		if (vd->vdev_isl2cache)
1860fa94a07fSbrendan 			spa_l2cache_remove(vd);
1861fa94a07fSbrendan 		vdev_clear_stats(vd);
1862fa94a07fSbrendan 		(void) vdev_close(vd);
1863fa94a07fSbrendan 	}
1864fa94a07fSbrendan }
1865fa94a07fSbrendan 
1866fa9e4066Sahrens /*
1867fa9e4066Sahrens  * Pool Creation
1868fa9e4066Sahrens  */
1869fa9e4066Sahrens int
1870990b4856Slling spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
18710a48a24eStimh     const char *history_str, nvlist_t *zplprops)
1872fa9e4066Sahrens {
1873fa9e4066Sahrens 	spa_t *spa;
1874990b4856Slling 	char *altroot = NULL;
18750373e76bSbonwick 	vdev_t *rvd;
1876fa9e4066Sahrens 	dsl_pool_t *dp;
1877fa9e4066Sahrens 	dmu_tx_t *tx;
187899653d4eSeschrock 	int c, error = 0;
1879fa9e4066Sahrens 	uint64_t txg = TXG_INITIAL;
1880fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
1881fa94a07fSbrendan 	uint_t nspares, nl2cache;
1882990b4856Slling 	uint64_t version;
1883fa9e4066Sahrens 
1884fa9e4066Sahrens 	/*
1885fa9e4066Sahrens 	 * If this pool already exists, return failure.
1886fa9e4066Sahrens 	 */
1887fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
1888fa9e4066Sahrens 	if (spa_lookup(pool) != NULL) {
1889fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1890fa9e4066Sahrens 		return (EEXIST);
1891fa9e4066Sahrens 	}
1892fa9e4066Sahrens 
1893fa9e4066Sahrens 	/*
1894fa9e4066Sahrens 	 * Allocate a new spa_t structure.
1895fa9e4066Sahrens 	 */
1896990b4856Slling 	(void) nvlist_lookup_string(props,
1897990b4856Slling 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
18980373e76bSbonwick 	spa = spa_add(pool, altroot);
1899fa9e4066Sahrens 	spa_activate(spa);
1900fa9e4066Sahrens 
1901fa9e4066Sahrens 	spa->spa_uberblock.ub_txg = txg - 1;
1902990b4856Slling 
1903990b4856Slling 	if (props && (error = spa_prop_validate(spa, props))) {
1904990b4856Slling 		spa_unload(spa);
1905990b4856Slling 		spa_deactivate(spa);
1906990b4856Slling 		spa_remove(spa);
1907c5904d13Seschrock 		mutex_exit(&spa_namespace_lock);
1908990b4856Slling 		return (error);
1909990b4856Slling 	}
1910990b4856Slling 
1911990b4856Slling 	if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION),
1912990b4856Slling 	    &version) != 0)
1913990b4856Slling 		version = SPA_VERSION;
1914990b4856Slling 	ASSERT(version <= SPA_VERSION);
1915990b4856Slling 	spa->spa_uberblock.ub_version = version;
1916fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
1917fa9e4066Sahrens 
19180373e76bSbonwick 	/*
19190373e76bSbonwick 	 * Create the root vdev.
19200373e76bSbonwick 	 */
19210373e76bSbonwick 	spa_config_enter(spa, RW_WRITER, FTAG);
19220373e76bSbonwick 
192399653d4eSeschrock 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
19240373e76bSbonwick 
192599653d4eSeschrock 	ASSERT(error != 0 || rvd != NULL);
192699653d4eSeschrock 	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
19270373e76bSbonwick 
1928b7b97454Sperrin 	if (error == 0 && !zfs_allocatable_devs(nvroot))
19290373e76bSbonwick 		error = EINVAL;
193099653d4eSeschrock 
193199653d4eSeschrock 	if (error == 0 &&
193299653d4eSeschrock 	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
1933fa94a07fSbrendan 	    (error = spa_validate_aux(spa, nvroot, txg,
193499653d4eSeschrock 	    VDEV_ALLOC_ADD)) == 0) {
193599653d4eSeschrock 		for (c = 0; c < rvd->vdev_children; c++)
193699653d4eSeschrock 			vdev_init(rvd->vdev_child[c], txg);
193799653d4eSeschrock 		vdev_config_dirty(rvd);
19380373e76bSbonwick 	}
19390373e76bSbonwick 
19400373e76bSbonwick 	spa_config_exit(spa, FTAG);
1941fa9e4066Sahrens 
194299653d4eSeschrock 	if (error != 0) {
1943fa9e4066Sahrens 		spa_unload(spa);
1944fa9e4066Sahrens 		spa_deactivate(spa);
1945fa9e4066Sahrens 		spa_remove(spa);
1946fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1947fa9e4066Sahrens 		return (error);
1948fa9e4066Sahrens 	}
1949fa9e4066Sahrens 
195099653d4eSeschrock 	/*
195199653d4eSeschrock 	 * Get the list of spares, if specified.
195299653d4eSeschrock 	 */
195399653d4eSeschrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
195499653d4eSeschrock 	    &spares, &nspares) == 0) {
1955fa94a07fSbrendan 		VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
195699653d4eSeschrock 		    KM_SLEEP) == 0);
1957fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
195899653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
195999653d4eSeschrock 		spa_config_enter(spa, RW_WRITER, FTAG);
196099653d4eSeschrock 		spa_load_spares(spa);
196199653d4eSeschrock 		spa_config_exit(spa, FTAG);
1962fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
1963fa94a07fSbrendan 	}
1964fa94a07fSbrendan 
1965fa94a07fSbrendan 	/*
1966fa94a07fSbrendan 	 * Get the list of level 2 cache devices, if specified.
1967fa94a07fSbrendan 	 */
1968fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1969fa94a07fSbrendan 	    &l2cache, &nl2cache) == 0) {
1970fa94a07fSbrendan 		VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
1971fa94a07fSbrendan 		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
1972fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
1973fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1974fa94a07fSbrendan 		spa_config_enter(spa, RW_WRITER, FTAG);
1975fa94a07fSbrendan 		spa_load_l2cache(spa);
1976fa94a07fSbrendan 		spa_config_exit(spa, FTAG);
1977fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
197899653d4eSeschrock 	}
197999653d4eSeschrock 
19800a48a24eStimh 	spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
1981fa9e4066Sahrens 	spa->spa_meta_objset = dp->dp_meta_objset;
1982fa9e4066Sahrens 
1983fa9e4066Sahrens 	tx = dmu_tx_create_assigned(dp, txg);
1984fa9e4066Sahrens 
1985fa9e4066Sahrens 	/*
1986fa9e4066Sahrens 	 * Create the pool config object.
1987fa9e4066Sahrens 	 */
1988fa9e4066Sahrens 	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
1989f7991ba4STim Haley 	    DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
1990fa9e4066Sahrens 	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
1991fa9e4066Sahrens 
1992ea8dc4b6Seschrock 	if (zap_add(spa->spa_meta_objset,
1993fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1994ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
1995ea8dc4b6Seschrock 		cmn_err(CE_PANIC, "failed to add pool config");
1996ea8dc4b6Seschrock 	}
1997fa9e4066Sahrens 
1998990b4856Slling 	/* Newly created pools with the right version are always deflated. */
1999990b4856Slling 	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
2000990b4856Slling 		spa->spa_deflate = TRUE;
2001990b4856Slling 		if (zap_add(spa->spa_meta_objset,
2002990b4856Slling 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
2003990b4856Slling 		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
2004990b4856Slling 			cmn_err(CE_PANIC, "failed to add deflate");
2005990b4856Slling 		}
200699653d4eSeschrock 	}
200799653d4eSeschrock 
2008fa9e4066Sahrens 	/*
2009fa9e4066Sahrens 	 * Create the deferred-free bplist object.  Turn off compression
2010fa9e4066Sahrens 	 * because sync-to-convergence takes longer if the blocksize
2011fa9e4066Sahrens 	 * keeps changing.
2012fa9e4066Sahrens 	 */
2013fa9e4066Sahrens 	spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset,
2014fa9e4066Sahrens 	    1 << 14, tx);
2015fa9e4066Sahrens 	dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj,
2016fa9e4066Sahrens 	    ZIO_COMPRESS_OFF, tx);
2017fa9e4066Sahrens 
2018ea8dc4b6Seschrock 	if (zap_add(spa->spa_meta_objset,
2019fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
2020ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) {
2021ea8dc4b6Seschrock 		cmn_err(CE_PANIC, "failed to add bplist");
2022ea8dc4b6Seschrock 	}
2023fa9e4066Sahrens 
202406eeb2adSek 	/*
202506eeb2adSek 	 * Create the pool's history object.
202606eeb2adSek 	 */
2027990b4856Slling 	if (version >= SPA_VERSION_ZPOOL_HISTORY)
2028990b4856Slling 		spa_history_create_obj(spa, tx);
2029990b4856Slling 
2030990b4856Slling 	/*
2031990b4856Slling 	 * Set pool properties.
2032990b4856Slling 	 */
2033990b4856Slling 	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
2034990b4856Slling 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
20350a4e9518Sgw 	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
2036990b4856Slling 	if (props)
2037990b4856Slling 		spa_sync_props(spa, props, CRED(), tx);
203806eeb2adSek 
2039fa9e4066Sahrens 	dmu_tx_commit(tx);
2040fa9e4066Sahrens 
2041fa9e4066Sahrens 	spa->spa_sync_on = B_TRUE;
2042fa9e4066Sahrens 	txg_sync_start(spa->spa_dsl_pool);
2043fa9e4066Sahrens 
2044fa9e4066Sahrens 	/*
2045fa9e4066Sahrens 	 * We explicitly wait for the first transaction to complete so that our
2046fa9e4066Sahrens 	 * bean counters are appropriately updated.
2047fa9e4066Sahrens 	 */
2048fa9e4066Sahrens 	txg_wait_synced(spa->spa_dsl_pool, txg);
2049fa9e4066Sahrens 
2050c5904d13Seschrock 	spa_config_sync(spa, B_FALSE, B_TRUE);
2051fa9e4066Sahrens 
2052990b4856Slling 	if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL)
2053228975ccSek 		(void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE);
2054228975ccSek 
2055fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2056fa9e4066Sahrens 
2057088f3894Sahrens 	spa->spa_minref = refcount_count(&spa->spa_refcount);
2058088f3894Sahrens 
2059fa9e4066Sahrens 	return (0);
2060fa9e4066Sahrens }
2061fa9e4066Sahrens 
2062fa9e4066Sahrens /*
2063fa9e4066Sahrens  * Import the given pool into the system.  We set up the necessary spa_t and
2064fa9e4066Sahrens  * then call spa_load() to do the dirty work.
2065fa9e4066Sahrens  */
2066e7cbe64fSgw static int
2067e7cbe64fSgw spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props,
2068c5904d13Seschrock     boolean_t isroot, boolean_t allowfaulted)
2069fa9e4066Sahrens {
2070fa9e4066Sahrens 	spa_t *spa;
2071990b4856Slling 	char *altroot = NULL;
2072c5904d13Seschrock 	int error, loaderr;
207399653d4eSeschrock 	nvlist_t *nvroot;
2074fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
2075fa94a07fSbrendan 	uint_t nspares, nl2cache;
2076fa9e4066Sahrens 
2077fa9e4066Sahrens 	/*
2078fa9e4066Sahrens 	 * If a pool with this name exists, return failure.
2079fa9e4066Sahrens 	 */
2080fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
2081fa9e4066Sahrens 	if (spa_lookup(pool) != NULL) {
2082fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2083fa9e4066Sahrens 		return (EEXIST);
2084fa9e4066Sahrens 	}
2085fa9e4066Sahrens 
2086fa9e4066Sahrens 	/*
20870373e76bSbonwick 	 * Create and initialize the spa structure.
2088fa9e4066Sahrens 	 */
2089990b4856Slling 	(void) nvlist_lookup_string(props,
2090990b4856Slling 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
20910373e76bSbonwick 	spa = spa_add(pool, altroot);
2092fa9e4066Sahrens 	spa_activate(spa);
2093fa9e4066Sahrens 
2094c5904d13Seschrock 	if (allowfaulted)
2095c5904d13Seschrock 		spa->spa_import_faulted = B_TRUE;
2096bf82a41bSeschrock 	spa->spa_is_root = isroot;
2097c5904d13Seschrock 
20985dabedeeSbonwick 	/*
20990373e76bSbonwick 	 * Pass off the heavy lifting to spa_load().
2100088f3894Sahrens 	 * Pass TRUE for mosconfig (unless this is a root pool) because
2101088f3894Sahrens 	 * the user-supplied config is actually the one to trust when
2102088f3894Sahrens 	 * doing an import.
21035dabedeeSbonwick 	 */
2104088f3894Sahrens 	loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, !isroot);
2105fa9e4066Sahrens 
210699653d4eSeschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
210799653d4eSeschrock 	/*
210899653d4eSeschrock 	 * Toss any existing sparelist, as it doesn't have any validity anymore,
210999653d4eSeschrock 	 * and conflicts with spa_has_spare().
211099653d4eSeschrock 	 */
2111e7cbe64fSgw 	if (!isroot && spa->spa_spares.sav_config) {
2112fa94a07fSbrendan 		nvlist_free(spa->spa_spares.sav_config);
2113fa94a07fSbrendan 		spa->spa_spares.sav_config = NULL;
211499653d4eSeschrock 		spa_load_spares(spa);
211599653d4eSeschrock 	}
2116e7cbe64fSgw 	if (!isroot && spa->spa_l2cache.sav_config) {
2117fa94a07fSbrendan 		nvlist_free(spa->spa_l2cache.sav_config);
2118fa94a07fSbrendan 		spa->spa_l2cache.sav_config = NULL;
2119fa94a07fSbrendan 		spa_load_l2cache(spa);
2120fa94a07fSbrendan 	}
212199653d4eSeschrock 
212299653d4eSeschrock 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
212399653d4eSeschrock 	    &nvroot) == 0);
2124fa94a07fSbrendan 	if (error == 0)
2125fa94a07fSbrendan 		error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE);
2126fa94a07fSbrendan 	if (error == 0)
2127fa94a07fSbrendan 		error = spa_validate_aux(spa, nvroot, -1ULL,
2128fa94a07fSbrendan 		    VDEV_ALLOC_L2CACHE);
212999653d4eSeschrock 	spa_config_exit(spa, FTAG);
213099653d4eSeschrock 
2131990b4856Slling 	if (error != 0 || (props && (error = spa_prop_set(spa, props)))) {
2132c5904d13Seschrock 		if (loaderr != 0 && loaderr != EINVAL && allowfaulted) {
2133c5904d13Seschrock 			/*
2134c5904d13Seschrock 			 * If we failed to load the pool, but 'allowfaulted' is
2135c5904d13Seschrock 			 * set, then manually set the config as if the config
2136c5904d13Seschrock 			 * passed in was specified in the cache file.
2137c5904d13Seschrock 			 */
2138c5904d13Seschrock 			error = 0;
2139c5904d13Seschrock 			spa->spa_import_faulted = B_FALSE;
2140c5904d13Seschrock 			if (spa->spa_config == NULL) {
2141c5904d13Seschrock 				spa_config_enter(spa, RW_READER, FTAG);
2142c5904d13Seschrock 				spa->spa_config = spa_config_generate(spa,
2143c5904d13Seschrock 				    NULL, -1ULL, B_TRUE);
2144c5904d13Seschrock 				spa_config_exit(spa, FTAG);
2145c5904d13Seschrock 			}
2146c5904d13Seschrock 			spa_unload(spa);
2147c5904d13Seschrock 			spa_deactivate(spa);
2148c5904d13Seschrock 			spa_config_sync(spa, B_FALSE, B_TRUE);
2149c5904d13Seschrock 		} else {
2150c5904d13Seschrock 			spa_unload(spa);
2151c5904d13Seschrock 			spa_deactivate(spa);
2152c5904d13Seschrock 			spa_remove(spa);
2153c5904d13Seschrock 		}
2154fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2155fa9e4066Sahrens 		return (error);
2156fa9e4066Sahrens 	}
2157fa9e4066Sahrens 
215899653d4eSeschrock 	/*
2159fa94a07fSbrendan 	 * Override any spares and level 2 cache devices as specified by
2160fa94a07fSbrendan 	 * the user, as these may have correct device names/devids, etc.
216199653d4eSeschrock 	 */
216299653d4eSeschrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
216399653d4eSeschrock 	    &spares, &nspares) == 0) {
2164fa94a07fSbrendan 		if (spa->spa_spares.sav_config)
2165fa94a07fSbrendan 			VERIFY(nvlist_remove(spa->spa_spares.sav_config,
216699653d4eSeschrock 			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
216799653d4eSeschrock 		else
2168fa94a07fSbrendan 			VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
216999653d4eSeschrock 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2170fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
217199653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
217299653d4eSeschrock 		spa_config_enter(spa, RW_WRITER, FTAG);
217399653d4eSeschrock 		spa_load_spares(spa);
217499653d4eSeschrock 		spa_config_exit(spa, FTAG);
2175fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
2176fa94a07fSbrendan 	}
2177fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2178fa94a07fSbrendan 	    &l2cache, &nl2cache) == 0) {
2179fa94a07fSbrendan 		if (spa->spa_l2cache.sav_config)
2180fa94a07fSbrendan 			VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
2181fa94a07fSbrendan 			    ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
2182fa94a07fSbrendan 		else
2183fa94a07fSbrendan 			VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2184fa94a07fSbrendan 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2185fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2186fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2187fa94a07fSbrendan 		spa_config_enter(spa, RW_WRITER, FTAG);
2188fa94a07fSbrendan 		spa_load_l2cache(spa);
2189fa94a07fSbrendan 		spa_config_exit(spa, FTAG);
2190fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
219199653d4eSeschrock 	}
219299653d4eSeschrock 
2193c5904d13Seschrock 	if (spa_mode & FWRITE) {
2194c5904d13Seschrock 		/*
2195c5904d13Seschrock 		 * Update the config cache to include the newly-imported pool.
2196c5904d13Seschrock 		 */
2197e7cbe64fSgw 		spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot);
2198c5904d13Seschrock 	}
2199fa9e4066Sahrens 
2200c5904d13Seschrock 	spa->spa_import_faulted = B_FALSE;
22013d7072f8Seschrock 	mutex_exit(&spa_namespace_lock);
22023d7072f8Seschrock 
2203fa9e4066Sahrens 	return (0);
2204fa9e4066Sahrens }
2205fa9e4066Sahrens 
2206e7cbe64fSgw #ifdef _KERNEL
2207e7cbe64fSgw /*
2208e7cbe64fSgw  * Build a "root" vdev for a top level vdev read in from a rootpool
2209e7cbe64fSgw  * device label.
2210e7cbe64fSgw  */
2211e7cbe64fSgw static void
2212e7cbe64fSgw spa_build_rootpool_config(nvlist_t *config)
2213e7cbe64fSgw {
2214e7cbe64fSgw 	nvlist_t *nvtop, *nvroot;
2215e7cbe64fSgw 	uint64_t pgid;
2216e7cbe64fSgw 
2217e7cbe64fSgw 	/*
2218e7cbe64fSgw 	 * Add this top-level vdev to the child array.
2219e7cbe64fSgw 	 */
2220e7cbe64fSgw 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop)
2221e7cbe64fSgw 	    == 0);
2222e7cbe64fSgw 	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid)
2223e7cbe64fSgw 	    == 0);
2224e7cbe64fSgw 
2225e7cbe64fSgw 	/*
2226e7cbe64fSgw 	 * Put this pool's top-level vdevs into a root vdev.
2227e7cbe64fSgw 	 */
2228e7cbe64fSgw 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2229e7cbe64fSgw 	VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT)
2230e7cbe64fSgw 	    == 0);
2231e7cbe64fSgw 	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
2232e7cbe64fSgw 	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
2233e7cbe64fSgw 	VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2234e7cbe64fSgw 	    &nvtop, 1) == 0);
2235e7cbe64fSgw 
2236e7cbe64fSgw 	/*
2237e7cbe64fSgw 	 * Replace the existing vdev_tree with the new root vdev in
2238e7cbe64fSgw 	 * this pool's configuration (remove the old, add the new).
2239e7cbe64fSgw 	 */
2240e7cbe64fSgw 	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
2241e7cbe64fSgw 	nvlist_free(nvroot);
2242e7cbe64fSgw }
2243e7cbe64fSgw 
2244e7cbe64fSgw /*
2245e7cbe64fSgw  * Get the root pool information from the root disk, then import the root pool
2246e7cbe64fSgw  * during the system boot up time.
2247e7cbe64fSgw  */
2248051aabe6Staylor extern nvlist_t *vdev_disk_read_rootlabel(char *, char *);
2249e7cbe64fSgw 
2250051aabe6Staylor int
2251051aabe6Staylor spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf,
2252e7cbe64fSgw     uint64_t *besttxg)
2253e7cbe64fSgw {
2254e7cbe64fSgw 	nvlist_t *config;
2255e7cbe64fSgw 	uint64_t txg;
2256e7cbe64fSgw 
2257051aabe6Staylor 	if ((config = vdev_disk_read_rootlabel(devpath, devid)) == NULL)
2258051aabe6Staylor 		return (-1);
2259e7cbe64fSgw 
2260e7cbe64fSgw 	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
2261e7cbe64fSgw 
2262051aabe6Staylor 	if (bestconf != NULL)
2263e7cbe64fSgw 		*bestconf = config;
2264051aabe6Staylor 	*besttxg = txg;
2265051aabe6Staylor 	return (0);
2266e7cbe64fSgw }
2267e7cbe64fSgw 
2268e7cbe64fSgw boolean_t
2269e7cbe64fSgw spa_rootdev_validate(nvlist_t *nv)
2270e7cbe64fSgw {
2271e7cbe64fSgw 	uint64_t ival;
2272e7cbe64fSgw 
2273e7cbe64fSgw 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2274e7cbe64fSgw 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2275e7cbe64fSgw 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2276e7cbe64fSgw 		return (B_FALSE);
2277e7cbe64fSgw 
2278e7cbe64fSgw 	return (B_TRUE);
2279e7cbe64fSgw }
2280e7cbe64fSgw 
2281051aabe6Staylor 
2282051aabe6Staylor /*
2283051aabe6Staylor  * Given the boot device's physical path or devid, check if the device
2284051aabe6Staylor  * is in a valid state.  If so, return the configuration from the vdev
2285051aabe6Staylor  * label.
2286051aabe6Staylor  */
2287051aabe6Staylor int
2288051aabe6Staylor spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf)
2289051aabe6Staylor {
2290051aabe6Staylor 	nvlist_t *conf = NULL;
2291051aabe6Staylor 	uint64_t txg = 0;
2292051aabe6Staylor 	nvlist_t *nvtop, **child;
2293051aabe6Staylor 	char *type;
2294051aabe6Staylor 	char *bootpath = NULL;
2295051aabe6Staylor 	uint_t children, c;
2296051aabe6Staylor 	char *tmp;
2297051aabe6Staylor 
2298051aabe6Staylor 	if (devpath && ((tmp = strchr(devpath, ' ')) != NULL))
2299051aabe6Staylor 		*tmp = '\0';
2300051aabe6Staylor 	if (spa_check_rootconf(devpath, devid, &conf, &txg) < 0) {
2301051aabe6Staylor 		cmn_err(CE_NOTE, "error reading device label");
2302051aabe6Staylor 		nvlist_free(conf);
2303051aabe6Staylor 		return (EINVAL);
2304051aabe6Staylor 	}
2305051aabe6Staylor 	if (txg == 0) {
2306051aabe6Staylor 		cmn_err(CE_NOTE, "this device is detached");
2307051aabe6Staylor 		nvlist_free(conf);
2308051aabe6Staylor 		return (EINVAL);
2309051aabe6Staylor 	}
2310051aabe6Staylor 
2311051aabe6Staylor 	VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE,
2312051aabe6Staylor 	    &nvtop) == 0);
2313051aabe6Staylor 	VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0);
2314051aabe6Staylor 
2315051aabe6Staylor 	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2316051aabe6Staylor 		if (spa_rootdev_validate(nvtop)) {
2317051aabe6Staylor 			goto out;
2318051aabe6Staylor 		} else {
2319051aabe6Staylor 			nvlist_free(conf);
2320051aabe6Staylor 			return (EINVAL);
2321051aabe6Staylor 		}
2322051aabe6Staylor 	}
2323051aabe6Staylor 
2324051aabe6Staylor 	ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0);
2325051aabe6Staylor 
2326051aabe6Staylor 	VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN,
2327051aabe6Staylor 	    &child, &children) == 0);
2328051aabe6Staylor 
2329051aabe6Staylor 	/*
2330051aabe6Staylor 	 * Go thru vdevs in the mirror to see if the given device
2331051aabe6Staylor 	 * has the most recent txg. Only the device with the most
2332051aabe6Staylor 	 * recent txg has valid information and should be booted.
2333051aabe6Staylor 	 */
2334051aabe6Staylor 	for (c = 0; c < children; c++) {
2335051aabe6Staylor 		char *cdevid, *cpath;
2336051aabe6Staylor 		uint64_t tmptxg;
2337051aabe6Staylor 
2338051aabe6Staylor 		if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH,
2339051aabe6Staylor 		    &cpath) != 0)
2340051aabe6Staylor 			return (EINVAL);
2341051aabe6Staylor 		if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_DEVID,
2342051aabe6Staylor 		    &cdevid) != 0)
2343051aabe6Staylor 			return (EINVAL);
2344051aabe6Staylor 		if ((spa_check_rootconf(cpath, cdevid, NULL,
2345051aabe6Staylor 		    &tmptxg) == 0) && (tmptxg > txg)) {
2346051aabe6Staylor 			txg = tmptxg;
2347051aabe6Staylor 			VERIFY(nvlist_lookup_string(child[c],
2348051aabe6Staylor 			    ZPOOL_CONFIG_PATH, &bootpath) == 0);
2349051aabe6Staylor 		}
2350051aabe6Staylor 	}
2351051aabe6Staylor 
2352051aabe6Staylor 	/* Does the best device match the one we've booted from? */
2353051aabe6Staylor 	if (bootpath) {
2354051aabe6Staylor 		cmn_err(CE_NOTE, "try booting from '%s'", bootpath);
2355051aabe6Staylor 		return (EINVAL);
2356051aabe6Staylor 	}
2357051aabe6Staylor out:
2358051aabe6Staylor 	*bestconf = conf;
2359051aabe6Staylor 	return (0);
2360051aabe6Staylor }
2361051aabe6Staylor 
2362e7cbe64fSgw /*
2363e7cbe64fSgw  * Import a root pool.
2364e7cbe64fSgw  *
2365051aabe6Staylor  * For x86. devpath_list will consist of devid and/or physpath name of
2366051aabe6Staylor  * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
2367051aabe6Staylor  * The GRUB "findroot" command will return the vdev we should boot.
2368e7cbe64fSgw  *
2369e7cbe64fSgw  * For Sparc, devpath_list consists the physpath name of the booting device
2370e7cbe64fSgw  * no matter the rootpool is a single device pool or a mirrored pool.
2371e7cbe64fSgw  * e.g.
2372e7cbe64fSgw  *	"/pci@1f,0/ide@d/disk@0,0:a"
2373e7cbe64fSgw  */
2374e7cbe64fSgw int
2375051aabe6Staylor spa_import_rootpool(char *devpath, char *devid)
2376e7cbe64fSgw {
2377e7cbe64fSgw 	nvlist_t *conf = NULL;
2378e7cbe64fSgw 	char *pname;
2379e7cbe64fSgw 	int error;
2380e7cbe64fSgw 
2381e7cbe64fSgw 	/*
2382e7cbe64fSgw 	 * Get the vdev pathname and configuation from the most
2383e7cbe64fSgw 	 * recently updated vdev (highest txg).
2384e7cbe64fSgw 	 */
2385051aabe6Staylor 	if (error = spa_get_rootconf(devpath, devid, &conf))
2386e7cbe64fSgw 		goto msg_out;
2387e7cbe64fSgw 
2388e7cbe64fSgw 	/*
2389e7cbe64fSgw 	 * Add type "root" vdev to the config.
2390e7cbe64fSgw 	 */
2391e7cbe64fSgw 	spa_build_rootpool_config(conf);
2392e7cbe64fSgw 
2393e7cbe64fSgw 	VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0);
2394e7cbe64fSgw 
2395bf82a41bSeschrock 	/*
2396bf82a41bSeschrock 	 * We specify 'allowfaulted' for this to be treated like spa_open()
2397bf82a41bSeschrock 	 * instead of spa_import().  This prevents us from marking vdevs as
2398bf82a41bSeschrock 	 * persistently unavailable, and generates FMA ereports as if it were a
2399bf82a41bSeschrock 	 * pool open, not import.
2400bf82a41bSeschrock 	 */
2401bf82a41bSeschrock 	error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE);
2402e7cbe64fSgw 	if (error == EEXIST)
2403e7cbe64fSgw 		error = 0;
2404e7cbe64fSgw 
2405e7cbe64fSgw 	nvlist_free(conf);
2406e7cbe64fSgw 	return (error);
2407e7cbe64fSgw 
2408e7cbe64fSgw msg_out:
2409051aabe6Staylor 	cmn_err(CE_NOTE, "\n"
2410e7cbe64fSgw 	    "  ***************************************************  \n"
2411e7cbe64fSgw 	    "  *  This device is not bootable!                   *  \n"
2412e7cbe64fSgw 	    "  *  It is either offlined or detached or faulted.  *  \n"
2413e7cbe64fSgw 	    "  *  Please try to boot from a different device.    *  \n"
2414051aabe6Staylor 	    "  ***************************************************  ");
2415e7cbe64fSgw 
2416e7cbe64fSgw 	return (error);
2417e7cbe64fSgw }
2418e7cbe64fSgw #endif
2419e7cbe64fSgw 
2420e7cbe64fSgw /*
2421e7cbe64fSgw  * Import a non-root pool into the system.
2422e7cbe64fSgw  */
2423e7cbe64fSgw int
2424e7cbe64fSgw spa_import(const char *pool, nvlist_t *config, nvlist_t *props)
2425e7cbe64fSgw {
2426c5904d13Seschrock 	return (spa_import_common(pool, config, props, B_FALSE, B_FALSE));
2427e7cbe64fSgw }
2428e7cbe64fSgw 
2429c5904d13Seschrock int
2430c5904d13Seschrock spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props)
2431c5904d13Seschrock {
2432c5904d13Seschrock 	return (spa_import_common(pool, config, props, B_FALSE, B_TRUE));
2433c5904d13Seschrock }
2434c5904d13Seschrock 
2435c5904d13Seschrock 
2436fa9e4066Sahrens /*
2437fa9e4066Sahrens  * This (illegal) pool name is used when temporarily importing a spa_t in order
2438fa9e4066Sahrens  * to get the vdev stats associated with the imported devices.
2439fa9e4066Sahrens  */
2440fa9e4066Sahrens #define	TRYIMPORT_NAME	"$import"
2441fa9e4066Sahrens 
2442fa9e4066Sahrens nvlist_t *
2443fa9e4066Sahrens spa_tryimport(nvlist_t *tryconfig)
2444fa9e4066Sahrens {
2445fa9e4066Sahrens 	nvlist_t *config = NULL;
2446fa9e4066Sahrens 	char *poolname;
2447fa9e4066Sahrens 	spa_t *spa;
2448fa9e4066Sahrens 	uint64_t state;
2449fa9e4066Sahrens 
2450fa9e4066Sahrens 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
2451fa9e4066Sahrens 		return (NULL);
2452fa9e4066Sahrens 
2453fa9e4066Sahrens 	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
2454fa9e4066Sahrens 		return (NULL);
2455fa9e4066Sahrens 
2456fa9e4066Sahrens 	/*
24570373e76bSbonwick 	 * Create and initialize the spa structure.
2458fa9e4066Sahrens 	 */
24590373e76bSbonwick 	mutex_enter(&spa_namespace_lock);
24600373e76bSbonwick 	spa = spa_add(TRYIMPORT_NAME, NULL);
2461fa9e4066Sahrens 	spa_activate(spa);
2462fa9e4066Sahrens 
2463fa9e4066Sahrens 	/*
24640373e76bSbonwick 	 * Pass off the heavy lifting to spa_load().
2465ecc2d604Sbonwick 	 * Pass TRUE for mosconfig because the user-supplied config
2466ecc2d604Sbonwick 	 * is actually the one to trust when doing an import.
2467fa9e4066Sahrens 	 */
2468ecc2d604Sbonwick 	(void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
2469fa9e4066Sahrens 
2470fa9e4066Sahrens 	/*
2471fa9e4066Sahrens 	 * If 'tryconfig' was at least parsable, return the current config.
2472fa9e4066Sahrens 	 */
2473fa9e4066Sahrens 	if (spa->spa_root_vdev != NULL) {
24740373e76bSbonwick 		spa_config_enter(spa, RW_READER, FTAG);
2475fa9e4066Sahrens 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
24760373e76bSbonwick 		spa_config_exit(spa, FTAG);
2477fa9e4066Sahrens 		VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
2478fa9e4066Sahrens 		    poolname) == 0);
2479fa9e4066Sahrens 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2480fa9e4066Sahrens 		    state) == 0);
248195173954Sek 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
248295173954Sek 		    spa->spa_uberblock.ub_timestamp) == 0);
248399653d4eSeschrock 
2484e7cbe64fSgw 		/*
2485e7cbe64fSgw 		 * If the bootfs property exists on this pool then we
2486e7cbe64fSgw 		 * copy it out so that external consumers can tell which
2487e7cbe64fSgw 		 * pools are bootable.
2488e7cbe64fSgw 		 */
2489e7cbe64fSgw 		if (spa->spa_bootfs) {
2490e7cbe64fSgw 			char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2491e7cbe64fSgw 
2492e7cbe64fSgw 			/*
2493e7cbe64fSgw 			 * We have to play games with the name since the
2494e7cbe64fSgw 			 * pool was opened as TRYIMPORT_NAME.
2495e7cbe64fSgw 			 */
2496e7cbe64fSgw 			if (dsl_dsobj_to_dsname(spa->spa_name,
2497e7cbe64fSgw 			    spa->spa_bootfs, tmpname) == 0) {
2498e7cbe64fSgw 				char *cp;
2499e7cbe64fSgw 				char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2500e7cbe64fSgw 
2501e7cbe64fSgw 				cp = strchr(tmpname, '/');
2502e7cbe64fSgw 				if (cp == NULL) {
2503e7cbe64fSgw 					(void) strlcpy(dsname, tmpname,
2504e7cbe64fSgw 					    MAXPATHLEN);
2505e7cbe64fSgw 				} else {
2506e7cbe64fSgw 					(void) snprintf(dsname, MAXPATHLEN,
2507e7cbe64fSgw 					    "%s/%s", poolname, ++cp);
2508e7cbe64fSgw 				}
2509e7cbe64fSgw 				VERIFY(nvlist_add_string(config,
2510e7cbe64fSgw 				    ZPOOL_CONFIG_BOOTFS, dsname) == 0);
2511e7cbe64fSgw 				kmem_free(dsname, MAXPATHLEN);
2512e7cbe64fSgw 			}
2513e7cbe64fSgw 			kmem_free(tmpname, MAXPATHLEN);
2514e7cbe64fSgw 		}
2515e7cbe64fSgw 
251699653d4eSeschrock 		/*
2517fa94a07fSbrendan 		 * Add the list of hot spares and level 2 cache devices.
251899653d4eSeschrock 		 */
251999653d4eSeschrock 		spa_add_spares(spa, config);
2520fa94a07fSbrendan 		spa_add_l2cache(spa, config);
2521fa9e4066Sahrens 	}
2522fa9e4066Sahrens 
2523fa9e4066Sahrens 	spa_unload(spa);
2524fa9e4066Sahrens 	spa_deactivate(spa);
2525fa9e4066Sahrens 	spa_remove(spa);
2526fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2527fa9e4066Sahrens 
2528fa9e4066Sahrens 	return (config);
2529fa9e4066Sahrens }
2530fa9e4066Sahrens 
2531fa9e4066Sahrens /*
2532fa9e4066Sahrens  * Pool export/destroy
2533fa9e4066Sahrens  *
2534fa9e4066Sahrens  * The act of destroying or exporting a pool is very simple.  We make sure there
2535fa9e4066Sahrens  * is no more pending I/O and any references to the pool are gone.  Then, we
2536fa9e4066Sahrens  * update the pool state and sync all the labels to disk, removing the
2537fa9e4066Sahrens  * configuration from the cache afterwards.
2538fa9e4066Sahrens  */
2539fa9e4066Sahrens static int
254089a89ebfSlling spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
254189a89ebfSlling     boolean_t force)
2542fa9e4066Sahrens {
2543fa9e4066Sahrens 	spa_t *spa;
2544fa9e4066Sahrens 
254544cd46caSbillm 	if (oldconfig)
254644cd46caSbillm 		*oldconfig = NULL;
254744cd46caSbillm 
2548fa9e4066Sahrens 	if (!(spa_mode & FWRITE))
2549fa9e4066Sahrens 		return (EROFS);
2550fa9e4066Sahrens 
2551fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
2552fa9e4066Sahrens 	if ((spa = spa_lookup(pool)) == NULL) {
2553fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2554fa9e4066Sahrens 		return (ENOENT);
2555fa9e4066Sahrens 	}
2556fa9e4066Sahrens 
2557ea8dc4b6Seschrock 	/*
2558ea8dc4b6Seschrock 	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
2559ea8dc4b6Seschrock 	 * reacquire the namespace lock, and see if we can export.
2560ea8dc4b6Seschrock 	 */
2561ea8dc4b6Seschrock 	spa_open_ref(spa, FTAG);
2562ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
2563ea8dc4b6Seschrock 	spa_async_suspend(spa);
2564ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
2565ea8dc4b6Seschrock 	spa_close(spa, FTAG);
2566ea8dc4b6Seschrock 
2567fa9e4066Sahrens 	/*
2568fa9e4066Sahrens 	 * The pool will be in core if it's openable,
2569fa9e4066Sahrens 	 * in which case we can modify its state.
2570fa9e4066Sahrens 	 */
2571fa9e4066Sahrens 	if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
2572fa9e4066Sahrens 		/*
2573fa9e4066Sahrens 		 * Objsets may be open only because they're dirty, so we
2574fa9e4066Sahrens 		 * have to force it to sync before checking spa_refcnt.
2575fa9e4066Sahrens 		 */
2576fa9e4066Sahrens 		txg_wait_synced(spa->spa_dsl_pool, 0);
2577fa9e4066Sahrens 
2578ea8dc4b6Seschrock 		/*
2579ea8dc4b6Seschrock 		 * A pool cannot be exported or destroyed if there are active
2580ea8dc4b6Seschrock 		 * references.  If we are resetting a pool, allow references by
2581ea8dc4b6Seschrock 		 * fault injection handlers.
2582ea8dc4b6Seschrock 		 */
2583ea8dc4b6Seschrock 		if (!spa_refcount_zero(spa) ||
2584ea8dc4b6Seschrock 		    (spa->spa_inject_ref != 0 &&
2585ea8dc4b6Seschrock 		    new_state != POOL_STATE_UNINITIALIZED)) {
2586ea8dc4b6Seschrock 			spa_async_resume(spa);
2587fa9e4066Sahrens 			mutex_exit(&spa_namespace_lock);
2588fa9e4066Sahrens 			return (EBUSY);
2589fa9e4066Sahrens 		}
2590fa9e4066Sahrens 
259189a89ebfSlling 		/*
259289a89ebfSlling 		 * A pool cannot be exported if it has an active shared spare.
259389a89ebfSlling 		 * This is to prevent other pools stealing the active spare
259489a89ebfSlling 		 * from an exported pool. At user's own will, such pool can
259589a89ebfSlling 		 * be forcedly exported.
259689a89ebfSlling 		 */
259789a89ebfSlling 		if (!force && new_state == POOL_STATE_EXPORTED &&
259889a89ebfSlling 		    spa_has_active_shared_spare(spa)) {
259989a89ebfSlling 			spa_async_resume(spa);
260089a89ebfSlling 			mutex_exit(&spa_namespace_lock);
260189a89ebfSlling 			return (EXDEV);
260289a89ebfSlling 		}
260389a89ebfSlling 
2604fa9e4066Sahrens 		/*
2605fa9e4066Sahrens 		 * We want this to be reflected on every label,
2606fa9e4066Sahrens 		 * so mark them all dirty.  spa_unload() will do the
2607fa9e4066Sahrens 		 * final sync that pushes these changes out.
2608fa9e4066Sahrens 		 */
2609ea8dc4b6Seschrock 		if (new_state != POOL_STATE_UNINITIALIZED) {
26105dabedeeSbonwick 			spa_config_enter(spa, RW_WRITER, FTAG);
2611ea8dc4b6Seschrock 			spa->spa_state = new_state;
26120373e76bSbonwick 			spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
2613ea8dc4b6Seschrock 			vdev_config_dirty(spa->spa_root_vdev);
26145dabedeeSbonwick 			spa_config_exit(spa, FTAG);
2615ea8dc4b6Seschrock 		}
2616fa9e4066Sahrens 	}
2617fa9e4066Sahrens 
26183d7072f8Seschrock 	spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
26193d7072f8Seschrock 
2620fa9e4066Sahrens 	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
2621fa9e4066Sahrens 		spa_unload(spa);
2622fa9e4066Sahrens 		spa_deactivate(spa);
2623fa9e4066Sahrens 	}
2624fa9e4066Sahrens 
262544cd46caSbillm 	if (oldconfig && spa->spa_config)
262644cd46caSbillm 		VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
262744cd46caSbillm 
2628ea8dc4b6Seschrock 	if (new_state != POOL_STATE_UNINITIALIZED) {
2629c5904d13Seschrock 		spa_config_sync(spa, B_TRUE, B_TRUE);
2630ea8dc4b6Seschrock 		spa_remove(spa);
2631ea8dc4b6Seschrock 	}
2632fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2633fa9e4066Sahrens 
2634fa9e4066Sahrens 	return (0);
2635fa9e4066Sahrens }
2636fa9e4066Sahrens 
2637fa9e4066Sahrens /*
2638fa9e4066Sahrens  * Destroy a storage pool.
2639fa9e4066Sahrens  */
2640fa9e4066Sahrens int
2641fa9e4066Sahrens spa_destroy(char *pool)
2642fa9e4066Sahrens {
264389a89ebfSlling 	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, B_FALSE));
2644fa9e4066Sahrens }
2645fa9e4066Sahrens 
2646fa9e4066Sahrens /*
2647fa9e4066Sahrens  * Export a storage pool.
2648fa9e4066Sahrens  */
2649fa9e4066Sahrens int
265089a89ebfSlling spa_export(char *pool, nvlist_t **oldconfig, boolean_t force)
2651fa9e4066Sahrens {
265289a89ebfSlling 	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, force));
2653fa9e4066Sahrens }
2654fa9e4066Sahrens 
2655ea8dc4b6Seschrock /*
2656ea8dc4b6Seschrock  * Similar to spa_export(), this unloads the spa_t without actually removing it
2657ea8dc4b6Seschrock  * from the namespace in any way.
2658ea8dc4b6Seschrock  */
2659ea8dc4b6Seschrock int
2660ea8dc4b6Seschrock spa_reset(char *pool)
2661ea8dc4b6Seschrock {
266289a89ebfSlling 	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
266389a89ebfSlling 	    B_FALSE));
2664ea8dc4b6Seschrock }
2665ea8dc4b6Seschrock 
2666fa9e4066Sahrens /*
2667fa9e4066Sahrens  * ==========================================================================
2668fa9e4066Sahrens  * Device manipulation
2669fa9e4066Sahrens  * ==========================================================================
2670fa9e4066Sahrens  */
2671fa9e4066Sahrens 
2672fa9e4066Sahrens /*
26738654d025Sperrin  * Add a device to a storage pool.
2674fa9e4066Sahrens  */
2675fa9e4066Sahrens int
2676fa9e4066Sahrens spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
2677fa9e4066Sahrens {
2678fa9e4066Sahrens 	uint64_t txg;
26790373e76bSbonwick 	int c, error;
2680fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
26810e34b6a7Sbonwick 	vdev_t *vd, *tvd;
2682fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
2683fa94a07fSbrendan 	uint_t nspares, nl2cache;
2684fa9e4066Sahrens 
2685fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
2686fa9e4066Sahrens 
268799653d4eSeschrock 	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
268899653d4eSeschrock 	    VDEV_ALLOC_ADD)) != 0)
268999653d4eSeschrock 		return (spa_vdev_exit(spa, NULL, txg, error));
2690fa9e4066Sahrens 
269139c23413Seschrock 	spa->spa_pending_vdev = vd;
269299653d4eSeschrock 
2693fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
2694fa94a07fSbrendan 	    &nspares) != 0)
269599653d4eSeschrock 		nspares = 0;
269699653d4eSeschrock 
2697fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
2698fa94a07fSbrendan 	    &nl2cache) != 0)
2699fa94a07fSbrendan 		nl2cache = 0;
2700fa94a07fSbrendan 
2701fa94a07fSbrendan 	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) {
270239c23413Seschrock 		spa->spa_pending_vdev = NULL;
2703fa9e4066Sahrens 		return (spa_vdev_exit(spa, vd, txg, EINVAL));
270439c23413Seschrock 	}
2705fa9e4066Sahrens 
270699653d4eSeschrock 	if (vd->vdev_children != 0) {
270739c23413Seschrock 		if ((error = vdev_create(vd, txg, B_FALSE)) != 0) {
270839c23413Seschrock 			spa->spa_pending_vdev = NULL;
270999653d4eSeschrock 			return (spa_vdev_exit(spa, vd, txg, error));
271099653d4eSeschrock 		}
271199653d4eSeschrock 	}
271299653d4eSeschrock 
271339c23413Seschrock 	/*
2714fa94a07fSbrendan 	 * We must validate the spares and l2cache devices after checking the
2715fa94a07fSbrendan 	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
271639c23413Seschrock 	 */
2717fa94a07fSbrendan 	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) {
271839c23413Seschrock 		spa->spa_pending_vdev = NULL;
271939c23413Seschrock 		return (spa_vdev_exit(spa, vd, txg, error));
272039c23413Seschrock 	}
272139c23413Seschrock 
272239c23413Seschrock 	spa->spa_pending_vdev = NULL;
272339c23413Seschrock 
272439c23413Seschrock 	/*
272539c23413Seschrock 	 * Transfer each new top-level vdev from vd to rvd.
272639c23413Seschrock 	 */
272739c23413Seschrock 	for (c = 0; c < vd->vdev_children; c++) {
272839c23413Seschrock 		tvd = vd->vdev_child[c];
272939c23413Seschrock 		vdev_remove_child(vd, tvd);
273039c23413Seschrock 		tvd->vdev_id = rvd->vdev_children;
273139c23413Seschrock 		vdev_add_child(rvd, tvd);
273239c23413Seschrock 		vdev_config_dirty(tvd);
273339c23413Seschrock 	}
273439c23413Seschrock 
273599653d4eSeschrock 	if (nspares != 0) {
2736fa94a07fSbrendan 		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
2737fa94a07fSbrendan 		    ZPOOL_CONFIG_SPARES);
273899653d4eSeschrock 		spa_load_spares(spa);
2739fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
2740fa94a07fSbrendan 	}
2741fa94a07fSbrendan 
2742fa94a07fSbrendan 	if (nl2cache != 0) {
2743fa94a07fSbrendan 		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
2744fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE);
2745fa94a07fSbrendan 		spa_load_l2cache(spa);
2746fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
2747fa9e4066Sahrens 	}
2748fa9e4066Sahrens 
2749fa9e4066Sahrens 	/*
27500e34b6a7Sbonwick 	 * We have to be careful when adding new vdevs to an existing pool.
27510e34b6a7Sbonwick 	 * If other threads start allocating from these vdevs before we
27520e34b6a7Sbonwick 	 * sync the config cache, and we lose power, then upon reboot we may
27530e34b6a7Sbonwick 	 * fail to open the pool because there are DVAs that the config cache
27540e34b6a7Sbonwick 	 * can't translate.  Therefore, we first add the vdevs without
27550e34b6a7Sbonwick 	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
27560373e76bSbonwick 	 * and then let spa_config_update() initialize the new metaslabs.
27570e34b6a7Sbonwick 	 *
27580e34b6a7Sbonwick 	 * spa_load() checks for added-but-not-initialized vdevs, so that
27590e34b6a7Sbonwick 	 * if we lose power at any point in this sequence, the remaining
27600e34b6a7Sbonwick 	 * steps will be completed the next time we load the pool.
27610e34b6a7Sbonwick 	 */
27620373e76bSbonwick 	(void) spa_vdev_exit(spa, vd, txg, 0);
27630e34b6a7Sbonwick 
27640373e76bSbonwick 	mutex_enter(&spa_namespace_lock);
27650373e76bSbonwick 	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
27660373e76bSbonwick 	mutex_exit(&spa_namespace_lock);
2767fa9e4066Sahrens 
27680373e76bSbonwick 	return (0);
2769fa9e4066Sahrens }
2770fa9e4066Sahrens 
2771fa9e4066Sahrens /*
2772fa9e4066Sahrens  * Attach a device to a mirror.  The arguments are the path to any device
2773fa9e4066Sahrens  * in the mirror, and the nvroot for the new device.  If the path specifies
2774fa9e4066Sahrens  * a device that is not mirrored, we automatically insert the mirror vdev.
2775fa9e4066Sahrens  *
2776fa9e4066Sahrens  * If 'replacing' is specified, the new device is intended to replace the
2777fa9e4066Sahrens  * existing device; in this case the two devices are made into their own
27783d7072f8Seschrock  * mirror using the 'replacing' vdev, which is functionally identical to
2779fa9e4066Sahrens  * the mirror vdev (it actually reuses all the same ops) but has a few
2780fa9e4066Sahrens  * extra rules: you can't attach to it after it's been created, and upon
2781fa9e4066Sahrens  * completion of resilvering, the first disk (the one being replaced)
2782fa9e4066Sahrens  * is automatically detached.
2783fa9e4066Sahrens  */
2784fa9e4066Sahrens int
2785ea8dc4b6Seschrock spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
2786fa9e4066Sahrens {
2787fa9e4066Sahrens 	uint64_t txg, open_txg;
2788fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
2789fa9e4066Sahrens 	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
279099653d4eSeschrock 	vdev_ops_t *pvops;
27919b3f6b42SEric Kustarz 	dmu_tx_t *tx;
27929b3f6b42SEric Kustarz 	char *oldvdpath, *newvdpath;
27939b3f6b42SEric Kustarz 	int newvd_isspare;
27949b3f6b42SEric Kustarz 	int error;
2795fa9e4066Sahrens 
2796fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
2797fa9e4066Sahrens 
2798c5904d13Seschrock 	oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
2799fa9e4066Sahrens 
2800fa9e4066Sahrens 	if (oldvd == NULL)
2801fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
2802fa9e4066Sahrens 
28030e34b6a7Sbonwick 	if (!oldvd->vdev_ops->vdev_op_leaf)
28040e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
28050e34b6a7Sbonwick 
2806fa9e4066Sahrens 	pvd = oldvd->vdev_parent;
2807fa9e4066Sahrens 
280899653d4eSeschrock 	if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
28093d7072f8Seschrock 	    VDEV_ALLOC_ADD)) != 0)
28103d7072f8Seschrock 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
28113d7072f8Seschrock 
28123d7072f8Seschrock 	if (newrootvd->vdev_children != 1)
2813fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
2814fa9e4066Sahrens 
2815fa9e4066Sahrens 	newvd = newrootvd->vdev_child[0];
2816fa9e4066Sahrens 
2817fa9e4066Sahrens 	if (!newvd->vdev_ops->vdev_op_leaf)
2818fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
2819fa9e4066Sahrens 
282099653d4eSeschrock 	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
2821fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, error));
2822fa9e4066Sahrens 
28238654d025Sperrin 	/*
28248654d025Sperrin 	 * Spares can't replace logs
28258654d025Sperrin 	 */
2826ee0eb9f2SEric Schrock 	if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
28278654d025Sperrin 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
28288654d025Sperrin 
282999653d4eSeschrock 	if (!replacing) {
283099653d4eSeschrock 		/*
283199653d4eSeschrock 		 * For attach, the only allowable parent is a mirror or the root
283299653d4eSeschrock 		 * vdev.
283399653d4eSeschrock 		 */
283499653d4eSeschrock 		if (pvd->vdev_ops != &vdev_mirror_ops &&
283599653d4eSeschrock 		    pvd->vdev_ops != &vdev_root_ops)
283699653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
283799653d4eSeschrock 
283899653d4eSeschrock 		pvops = &vdev_mirror_ops;
283999653d4eSeschrock 	} else {
284099653d4eSeschrock 		/*
284199653d4eSeschrock 		 * Active hot spares can only be replaced by inactive hot
284299653d4eSeschrock 		 * spares.
284399653d4eSeschrock 		 */
284499653d4eSeschrock 		if (pvd->vdev_ops == &vdev_spare_ops &&
284599653d4eSeschrock 		    pvd->vdev_child[1] == oldvd &&
284699653d4eSeschrock 		    !spa_has_spare(spa, newvd->vdev_guid))
284799653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
284899653d4eSeschrock 
284999653d4eSeschrock 		/*
285099653d4eSeschrock 		 * If the source is a hot spare, and the parent isn't already a
285199653d4eSeschrock 		 * spare, then we want to create a new hot spare.  Otherwise, we
285239c23413Seschrock 		 * want to create a replacing vdev.  The user is not allowed to
285339c23413Seschrock 		 * attach to a spared vdev child unless the 'isspare' state is
285439c23413Seschrock 		 * the same (spare replaces spare, non-spare replaces
285539c23413Seschrock 		 * non-spare).
285699653d4eSeschrock 		 */
285799653d4eSeschrock 		if (pvd->vdev_ops == &vdev_replacing_ops)
285899653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
285939c23413Seschrock 		else if (pvd->vdev_ops == &vdev_spare_ops &&
286039c23413Seschrock 		    newvd->vdev_isspare != oldvd->vdev_isspare)
286139c23413Seschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
286299653d4eSeschrock 		else if (pvd->vdev_ops != &vdev_spare_ops &&
286399653d4eSeschrock 		    newvd->vdev_isspare)
286499653d4eSeschrock 			pvops = &vdev_spare_ops;
286599653d4eSeschrock 		else
286699653d4eSeschrock 			pvops = &vdev_replacing_ops;
286799653d4eSeschrock 	}
286899653d4eSeschrock 
28692a79c5feSlling 	/*
28702a79c5feSlling 	 * Compare the new device size with the replaceable/attachable
28712a79c5feSlling 	 * device size.
28722a79c5feSlling 	 */
28732a79c5feSlling 	if (newvd->vdev_psize < vdev_get_rsize(oldvd))
2874fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
2875fa9e4066Sahrens 
2876ecc2d604Sbonwick 	/*
2877ecc2d604Sbonwick 	 * The new device cannot have a higher alignment requirement
2878ecc2d604Sbonwick 	 * than the top-level vdev.
2879ecc2d604Sbonwick 	 */
2880ecc2d604Sbonwick 	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
2881fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
2882fa9e4066Sahrens 
2883fa9e4066Sahrens 	/*
2884fa9e4066Sahrens 	 * If this is an in-place replacement, update oldvd's path and devid
2885fa9e4066Sahrens 	 * to make it distinguishable from newvd, and unopenable from now on.
2886fa9e4066Sahrens 	 */
2887fa9e4066Sahrens 	if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
2888fa9e4066Sahrens 		spa_strfree(oldvd->vdev_path);
2889fa9e4066Sahrens 		oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
2890fa9e4066Sahrens 		    KM_SLEEP);
2891fa9e4066Sahrens 		(void) sprintf(oldvd->vdev_path, "%s/%s",
2892fa9e4066Sahrens 		    newvd->vdev_path, "old");
2893fa9e4066Sahrens 		if (oldvd->vdev_devid != NULL) {
2894fa9e4066Sahrens 			spa_strfree(oldvd->vdev_devid);
2895fa9e4066Sahrens 			oldvd->vdev_devid = NULL;
2896fa9e4066Sahrens 		}
2897fa9e4066Sahrens 	}
2898fa9e4066Sahrens 
2899fa9e4066Sahrens 	/*
290099653d4eSeschrock 	 * If the parent is not a mirror, or if we're replacing, insert the new
290199653d4eSeschrock 	 * mirror/replacing/spare vdev above oldvd.
2902fa9e4066Sahrens 	 */
2903fa9e4066Sahrens 	if (pvd->vdev_ops != pvops)
2904fa9e4066Sahrens 		pvd = vdev_add_parent(oldvd, pvops);
2905fa9e4066Sahrens 
2906fa9e4066Sahrens 	ASSERT(pvd->vdev_top->vdev_parent == rvd);
2907fa9e4066Sahrens 	ASSERT(pvd->vdev_ops == pvops);
2908fa9e4066Sahrens 	ASSERT(oldvd->vdev_parent == pvd);
2909fa9e4066Sahrens 
2910fa9e4066Sahrens 	/*
2911fa9e4066Sahrens 	 * Extract the new device from its root and add it to pvd.
2912fa9e4066Sahrens 	 */
2913fa9e4066Sahrens 	vdev_remove_child(newrootvd, newvd);
2914fa9e4066Sahrens 	newvd->vdev_id = pvd->vdev_children;
2915fa9e4066Sahrens 	vdev_add_child(pvd, newvd);
2916fa9e4066Sahrens 
2917ea8dc4b6Seschrock 	/*
2918ea8dc4b6Seschrock 	 * If newvd is smaller than oldvd, but larger than its rsize,
2919ea8dc4b6Seschrock 	 * the addition of newvd may have decreased our parent's asize.
2920ea8dc4b6Seschrock 	 */
2921ea8dc4b6Seschrock 	pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
2922ea8dc4b6Seschrock 
2923fa9e4066Sahrens 	tvd = newvd->vdev_top;
2924fa9e4066Sahrens 	ASSERT(pvd->vdev_top == tvd);
2925fa9e4066Sahrens 	ASSERT(tvd->vdev_parent == rvd);
2926fa9e4066Sahrens 
2927fa9e4066Sahrens 	vdev_config_dirty(tvd);
2928fa9e4066Sahrens 
2929fa9e4066Sahrens 	/*
2930fa9e4066Sahrens 	 * Set newvd's DTL to [TXG_INITIAL, open_txg].  It will propagate
2931fa9e4066Sahrens 	 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
2932fa9e4066Sahrens 	 */
2933fa9e4066Sahrens 	open_txg = txg + TXG_CONCURRENT_STATES - 1;
2934fa9e4066Sahrens 
2935fa9e4066Sahrens 	mutex_enter(&newvd->vdev_dtl_lock);
2936fa9e4066Sahrens 	space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL,
2937fa9e4066Sahrens 	    open_txg - TXG_INITIAL + 1);
2938fa9e4066Sahrens 	mutex_exit(&newvd->vdev_dtl_lock);
2939fa9e4066Sahrens 
294039c23413Seschrock 	if (newvd->vdev_isspare)
294139c23413Seschrock 		spa_spare_activate(newvd);
29429b3f6b42SEric Kustarz 	oldvdpath = spa_strdup(vdev_description(oldvd));
29439b3f6b42SEric Kustarz 	newvdpath = spa_strdup(vdev_description(newvd));
29449b3f6b42SEric Kustarz 	newvd_isspare = newvd->vdev_isspare;
2945ea8dc4b6Seschrock 
2946fa9e4066Sahrens 	/*
2947fa9e4066Sahrens 	 * Mark newvd's DTL dirty in this txg.
2948fa9e4066Sahrens 	 */
2949ecc2d604Sbonwick 	vdev_dirty(tvd, VDD_DTL, newvd, txg);
2950fa9e4066Sahrens 
2951fa9e4066Sahrens 	(void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
2952fa9e4066Sahrens 
29539b3f6b42SEric Kustarz 	tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
29549b3f6b42SEric Kustarz 	if (dmu_tx_assign(tx, TXG_WAIT) == 0) {
29559b3f6b42SEric Kustarz 		spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx,
29569b3f6b42SEric Kustarz 		    CRED(),  "%s vdev=%s %s vdev=%s",
29579b3f6b42SEric Kustarz 		    replacing && newvd_isspare ? "spare in" :
29589b3f6b42SEric Kustarz 		    replacing ? "replace" : "attach", newvdpath,
29599b3f6b42SEric Kustarz 		    replacing ? "for" : "to", oldvdpath);
29609b3f6b42SEric Kustarz 		dmu_tx_commit(tx);
29619b3f6b42SEric Kustarz 	} else {
29629b3f6b42SEric Kustarz 		dmu_tx_abort(tx);
29639b3f6b42SEric Kustarz 	}
29649b3f6b42SEric Kustarz 
29659b3f6b42SEric Kustarz 	spa_strfree(oldvdpath);
29669b3f6b42SEric Kustarz 	spa_strfree(newvdpath);
29679b3f6b42SEric Kustarz 
2968fa9e4066Sahrens 	/*
2969088f3894Sahrens 	 * Kick off a resilver to update newvd.
2970fa9e4066Sahrens 	 */
2971088f3894Sahrens 	VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0);
2972fa9e4066Sahrens 
2973fa9e4066Sahrens 	return (0);
2974fa9e4066Sahrens }
2975fa9e4066Sahrens 
2976fa9e4066Sahrens /*
2977fa9e4066Sahrens  * Detach a device from a mirror or replacing vdev.
2978fa9e4066Sahrens  * If 'replace_done' is specified, only detach if the parent
2979fa9e4066Sahrens  * is a replacing vdev.
2980fa9e4066Sahrens  */
2981fa9e4066Sahrens int
2982ea8dc4b6Seschrock spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done)
2983fa9e4066Sahrens {
2984fa9e4066Sahrens 	uint64_t txg;
2985fa9e4066Sahrens 	int c, t, error;
2986fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
2987fa9e4066Sahrens 	vdev_t *vd, *pvd, *cvd, *tvd;
298899653d4eSeschrock 	boolean_t unspare = B_FALSE;
298999653d4eSeschrock 	uint64_t unspare_guid;
2990bf82a41bSeschrock 	size_t len;
2991fa9e4066Sahrens 
2992fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
2993fa9e4066Sahrens 
2994c5904d13Seschrock 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
2995fa9e4066Sahrens 
2996fa9e4066Sahrens 	if (vd == NULL)
2997fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
2998fa9e4066Sahrens 
29990e34b6a7Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
30000e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
30010e34b6a7Sbonwick 
3002fa9e4066Sahrens 	pvd = vd->vdev_parent;
3003fa9e4066Sahrens 
3004fa9e4066Sahrens 	/*
3005fa9e4066Sahrens 	 * If replace_done is specified, only remove this device if it's
300699653d4eSeschrock 	 * the first child of a replacing vdev.  For the 'spare' vdev, either
300799653d4eSeschrock 	 * disk can be removed.
300899653d4eSeschrock 	 */
300999653d4eSeschrock 	if (replace_done) {
301099653d4eSeschrock 		if (pvd->vdev_ops == &vdev_replacing_ops) {
301199653d4eSeschrock 			if (vd->vdev_id != 0)
301299653d4eSeschrock 				return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
301399653d4eSeschrock 		} else if (pvd->vdev_ops != &vdev_spare_ops) {
301499653d4eSeschrock 			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
301599653d4eSeschrock 		}
301699653d4eSeschrock 	}
301799653d4eSeschrock 
301899653d4eSeschrock 	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
3019e7437265Sahrens 	    spa_version(spa) >= SPA_VERSION_SPARES);
3020fa9e4066Sahrens 
3021fa9e4066Sahrens 	/*
302299653d4eSeschrock 	 * Only mirror, replacing, and spare vdevs support detach.
3023fa9e4066Sahrens 	 */
3024fa9e4066Sahrens 	if (pvd->vdev_ops != &vdev_replacing_ops &&
302599653d4eSeschrock 	    pvd->vdev_ops != &vdev_mirror_ops &&
302699653d4eSeschrock 	    pvd->vdev_ops != &vdev_spare_ops)
3027fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3028fa9e4066Sahrens 
3029fa9e4066Sahrens 	/*
3030fa9e4066Sahrens 	 * If there's only one replica, you can't detach it.
3031fa9e4066Sahrens 	 */
3032fa9e4066Sahrens 	if (pvd->vdev_children <= 1)
3033fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3034fa9e4066Sahrens 
3035fa9e4066Sahrens 	/*
3036fa9e4066Sahrens 	 * If all siblings have non-empty DTLs, this device may have the only
3037fa9e4066Sahrens 	 * valid copy of the data, which means we cannot safely detach it.
3038fa9e4066Sahrens 	 *
3039fa9e4066Sahrens 	 * XXX -- as in the vdev_offline() case, we really want a more
3040fa9e4066Sahrens 	 * precise DTL check.
3041fa9e4066Sahrens 	 */
3042fa9e4066Sahrens 	for (c = 0; c < pvd->vdev_children; c++) {
3043fa9e4066Sahrens 		uint64_t dirty;
3044fa9e4066Sahrens 
3045fa9e4066Sahrens 		cvd = pvd->vdev_child[c];
3046fa9e4066Sahrens 		if (cvd == vd)
3047fa9e4066Sahrens 			continue;
3048fa9e4066Sahrens 		if (vdev_is_dead(cvd))
3049fa9e4066Sahrens 			continue;
3050fa9e4066Sahrens 		mutex_enter(&cvd->vdev_dtl_lock);
3051fa9e4066Sahrens 		dirty = cvd->vdev_dtl_map.sm_space |
3052fa9e4066Sahrens 		    cvd->vdev_dtl_scrub.sm_space;
3053fa9e4066Sahrens 		mutex_exit(&cvd->vdev_dtl_lock);
3054fa9e4066Sahrens 		if (!dirty)
3055fa9e4066Sahrens 			break;
3056fa9e4066Sahrens 	}
305799653d4eSeschrock 
305899653d4eSeschrock 	/*
305999653d4eSeschrock 	 * If we are a replacing or spare vdev, then we can always detach the
306099653d4eSeschrock 	 * latter child, as that is how one cancels the operation.
306199653d4eSeschrock 	 */
306299653d4eSeschrock 	if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) &&
306399653d4eSeschrock 	    c == pvd->vdev_children)
3064fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3065fa9e4066Sahrens 
3066bf82a41bSeschrock 	/*
3067bf82a41bSeschrock 	 * If we are detaching the second disk from a replacing vdev, then
3068bf82a41bSeschrock 	 * check to see if we changed the original vdev's path to have "/old"
3069bf82a41bSeschrock 	 * at the end in spa_vdev_attach().  If so, undo that change now.
3070bf82a41bSeschrock 	 */
3071bf82a41bSeschrock 	if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 &&
3072bf82a41bSeschrock 	    pvd->vdev_child[0]->vdev_path != NULL &&
3073bf82a41bSeschrock 	    pvd->vdev_child[1]->vdev_path != NULL) {
3074bf82a41bSeschrock 		ASSERT(pvd->vdev_child[1] == vd);
3075bf82a41bSeschrock 		cvd = pvd->vdev_child[0];
3076bf82a41bSeschrock 		len = strlen(vd->vdev_path);
3077bf82a41bSeschrock 		if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
3078bf82a41bSeschrock 		    strcmp(cvd->vdev_path + len, "/old") == 0) {
3079bf82a41bSeschrock 			spa_strfree(cvd->vdev_path);
3080bf82a41bSeschrock 			cvd->vdev_path = spa_strdup(vd->vdev_path);
3081bf82a41bSeschrock 		}
3082bf82a41bSeschrock 	}
3083bf82a41bSeschrock 
308499653d4eSeschrock 	/*
308599653d4eSeschrock 	 * If we are detaching the original disk from a spare, then it implies
308699653d4eSeschrock 	 * that the spare should become a real disk, and be removed from the
308799653d4eSeschrock 	 * active spare list for the pool.
308899653d4eSeschrock 	 */
308999653d4eSeschrock 	if (pvd->vdev_ops == &vdev_spare_ops &&
309099653d4eSeschrock 	    vd->vdev_id == 0)
309199653d4eSeschrock 		unspare = B_TRUE;
309299653d4eSeschrock 
3093fa9e4066Sahrens 	/*
3094fa9e4066Sahrens 	 * Erase the disk labels so the disk can be used for other things.
3095fa9e4066Sahrens 	 * This must be done after all other error cases are handled,
3096fa9e4066Sahrens 	 * but before we disembowel vd (so we can still do I/O to it).
3097fa9e4066Sahrens 	 * But if we can't do it, don't treat the error as fatal --
3098fa9e4066Sahrens 	 * it may be that the unwritability of the disk is the reason
3099fa9e4066Sahrens 	 * it's being detached!
3100fa9e4066Sahrens 	 */
310139c23413Seschrock 	error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
3102fa9e4066Sahrens 
3103fa9e4066Sahrens 	/*
3104fa9e4066Sahrens 	 * Remove vd from its parent and compact the parent's children.
3105fa9e4066Sahrens 	 */
3106fa9e4066Sahrens 	vdev_remove_child(pvd, vd);
3107fa9e4066Sahrens 	vdev_compact_children(pvd);
3108fa9e4066Sahrens 
3109fa9e4066Sahrens 	/*
3110fa9e4066Sahrens 	 * Remember one of the remaining children so we can get tvd below.
3111fa9e4066Sahrens 	 */
3112fa9e4066Sahrens 	cvd = pvd->vdev_child[0];
3113fa9e4066Sahrens 
311499653d4eSeschrock 	/*
311599653d4eSeschrock 	 * If we need to remove the remaining child from the list of hot spares,
311699653d4eSeschrock 	 * do it now, marking the vdev as no longer a spare in the process.  We
311799653d4eSeschrock 	 * must do this before vdev_remove_parent(), because that can change the
311899653d4eSeschrock 	 * GUID if it creates a new toplevel GUID.
311999653d4eSeschrock 	 */
312099653d4eSeschrock 	if (unspare) {
312199653d4eSeschrock 		ASSERT(cvd->vdev_isspare);
312239c23413Seschrock 		spa_spare_remove(cvd);
312399653d4eSeschrock 		unspare_guid = cvd->vdev_guid;
312499653d4eSeschrock 	}
312599653d4eSeschrock 
3126fa9e4066Sahrens 	/*
3127fa9e4066Sahrens 	 * If the parent mirror/replacing vdev only has one child,
3128fa9e4066Sahrens 	 * the parent is no longer needed.  Remove it from the tree.
3129fa9e4066Sahrens 	 */
3130fa9e4066Sahrens 	if (pvd->vdev_children == 1)
3131fa9e4066Sahrens 		vdev_remove_parent(cvd);
3132fa9e4066Sahrens 
3133fa9e4066Sahrens 	/*
3134fa9e4066Sahrens 	 * We don't set tvd until now because the parent we just removed
3135fa9e4066Sahrens 	 * may have been the previous top-level vdev.
3136fa9e4066Sahrens 	 */
3137fa9e4066Sahrens 	tvd = cvd->vdev_top;
3138fa9e4066Sahrens 	ASSERT(tvd->vdev_parent == rvd);
3139fa9e4066Sahrens 
3140fa9e4066Sahrens 	/*
314139c23413Seschrock 	 * Reevaluate the parent vdev state.
3142fa9e4066Sahrens 	 */
31433d7072f8Seschrock 	vdev_propagate_state(cvd);
3144fa9e4066Sahrens 
3145fa9e4066Sahrens 	/*
314639c23413Seschrock 	 * If the device we just detached was smaller than the others, it may be
314739c23413Seschrock 	 * possible to add metaslabs (i.e. grow the pool).  vdev_metaslab_init()
314839c23413Seschrock 	 * can't fail because the existing metaslabs are already in core, so
314939c23413Seschrock 	 * there's nothing to read from disk.
3150fa9e4066Sahrens 	 */
3151ecc2d604Sbonwick 	VERIFY(vdev_metaslab_init(tvd, txg) == 0);
3152fa9e4066Sahrens 
3153fa9e4066Sahrens 	vdev_config_dirty(tvd);
3154fa9e4066Sahrens 
3155fa9e4066Sahrens 	/*
315639c23413Seschrock 	 * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
315739c23413Seschrock 	 * vd->vdev_detached is set and free vd's DTL object in syncing context.
315839c23413Seschrock 	 * But first make sure we're not on any *other* txg's DTL list, to
315939c23413Seschrock 	 * prevent vd from being accessed after it's freed.
3160fa9e4066Sahrens 	 */
3161fa9e4066Sahrens 	for (t = 0; t < TXG_SIZE; t++)
3162fa9e4066Sahrens 		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
3163ecc2d604Sbonwick 	vd->vdev_detached = B_TRUE;
3164ecc2d604Sbonwick 	vdev_dirty(tvd, VDD_DTL, vd, txg);
3165fa9e4066Sahrens 
31663d7072f8Seschrock 	spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
31673d7072f8Seschrock 
316899653d4eSeschrock 	error = spa_vdev_exit(spa, vd, txg, 0);
316999653d4eSeschrock 
317099653d4eSeschrock 	/*
317139c23413Seschrock 	 * If this was the removal of the original device in a hot spare vdev,
317239c23413Seschrock 	 * then we want to go through and remove the device from the hot spare
317339c23413Seschrock 	 * list of every other pool.
317499653d4eSeschrock 	 */
317599653d4eSeschrock 	if (unspare) {
317699653d4eSeschrock 		spa = NULL;
317799653d4eSeschrock 		mutex_enter(&spa_namespace_lock);
317899653d4eSeschrock 		while ((spa = spa_next(spa)) != NULL) {
317999653d4eSeschrock 			if (spa->spa_state != POOL_STATE_ACTIVE)
318099653d4eSeschrock 				continue;
318199653d4eSeschrock 
318299653d4eSeschrock 			(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
318399653d4eSeschrock 		}
318499653d4eSeschrock 		mutex_exit(&spa_namespace_lock);
318599653d4eSeschrock 	}
318699653d4eSeschrock 
318799653d4eSeschrock 	return (error);
318899653d4eSeschrock }
318999653d4eSeschrock 
319099653d4eSeschrock /*
3191fa94a07fSbrendan  * Remove a spares vdev from the nvlist config.
319299653d4eSeschrock  */
3193fa94a07fSbrendan static int
3194fa94a07fSbrendan spa_remove_spares(spa_aux_vdev_t *sav, uint64_t guid, boolean_t unspare,
3195fa94a07fSbrendan     nvlist_t **spares, int nspares, vdev_t *vd)
319699653d4eSeschrock {
3197fa94a07fSbrendan 	nvlist_t *nv, **newspares;
3198fa94a07fSbrendan 	int i, j;
319999653d4eSeschrock 
320099653d4eSeschrock 	nv = NULL;
3201fa94a07fSbrendan 	for (i = 0; i < nspares; i++) {
3202fa94a07fSbrendan 		uint64_t theguid;
320399653d4eSeschrock 
3204fa94a07fSbrendan 		VERIFY(nvlist_lookup_uint64(spares[i],
3205fa94a07fSbrendan 		    ZPOOL_CONFIG_GUID, &theguid) == 0);
3206fa94a07fSbrendan 		if (theguid == guid) {
3207fa94a07fSbrendan 			nv = spares[i];
3208fa94a07fSbrendan 			break;
320999653d4eSeschrock 		}
321099653d4eSeschrock 	}
321199653d4eSeschrock 
321299653d4eSeschrock 	/*
3213fa94a07fSbrendan 	 * Only remove the hot spare if it's not currently in use in this pool.
321499653d4eSeschrock 	 */
3215fa94a07fSbrendan 	if (nv == NULL && vd == NULL)
3216fa94a07fSbrendan 		return (ENOENT);
321799653d4eSeschrock 
3218fa94a07fSbrendan 	if (nv == NULL && vd != NULL)
3219fa94a07fSbrendan 		return (ENOTSUP);
322099653d4eSeschrock 
3221fa94a07fSbrendan 	if (!unspare && nv != NULL && vd != NULL)
3222fa94a07fSbrendan 		return (EBUSY);
322399653d4eSeschrock 
322499653d4eSeschrock 	if (nspares == 1) {
322599653d4eSeschrock 		newspares = NULL;
322699653d4eSeschrock 	} else {
322799653d4eSeschrock 		newspares = kmem_alloc((nspares - 1) * sizeof (void *),
322899653d4eSeschrock 		    KM_SLEEP);
322999653d4eSeschrock 		for (i = 0, j = 0; i < nspares; i++) {
323099653d4eSeschrock 			if (spares[i] != nv)
323199653d4eSeschrock 				VERIFY(nvlist_dup(spares[i],
323299653d4eSeschrock 				    &newspares[j++], KM_SLEEP) == 0);
323399653d4eSeschrock 		}
323499653d4eSeschrock 	}
323599653d4eSeschrock 
3236fa94a07fSbrendan 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_SPARES,
323799653d4eSeschrock 	    DATA_TYPE_NVLIST_ARRAY) == 0);
3238fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3239fa94a07fSbrendan 	    ZPOOL_CONFIG_SPARES, newspares, nspares - 1) == 0);
324099653d4eSeschrock 	for (i = 0; i < nspares - 1; i++)
324199653d4eSeschrock 		nvlist_free(newspares[i]);
324299653d4eSeschrock 	kmem_free(newspares, (nspares - 1) * sizeof (void *));
3243fa94a07fSbrendan 
3244fa94a07fSbrendan 	return (0);
3245fa94a07fSbrendan }
3246fa94a07fSbrendan 
3247fa94a07fSbrendan /*
3248fa94a07fSbrendan  * Remove an l2cache vdev from the nvlist config.
3249fa94a07fSbrendan  */
3250fa94a07fSbrendan static int
3251fa94a07fSbrendan spa_remove_l2cache(spa_aux_vdev_t *sav, uint64_t guid, nvlist_t **l2cache,
3252fa94a07fSbrendan     int nl2cache, vdev_t *vd)
3253fa94a07fSbrendan {
3254fa94a07fSbrendan 	nvlist_t *nv, **newl2cache;
3255fa94a07fSbrendan 	int i, j;
3256fa94a07fSbrendan 
3257fa94a07fSbrendan 	nv = NULL;
3258fa94a07fSbrendan 	for (i = 0; i < nl2cache; i++) {
3259fa94a07fSbrendan 		uint64_t theguid;
3260fa94a07fSbrendan 
3261fa94a07fSbrendan 		VERIFY(nvlist_lookup_uint64(l2cache[i],
3262fa94a07fSbrendan 		    ZPOOL_CONFIG_GUID, &theguid) == 0);
3263fa94a07fSbrendan 		if (theguid == guid) {
3264fa94a07fSbrendan 			nv = l2cache[i];
3265fa94a07fSbrendan 			break;
3266fa94a07fSbrendan 		}
3267fa94a07fSbrendan 	}
3268fa94a07fSbrendan 
3269fa94a07fSbrendan 	if (vd == NULL) {
3270fa94a07fSbrendan 		for (i = 0; i < nl2cache; i++) {
3271fa94a07fSbrendan 			if (sav->sav_vdevs[i]->vdev_guid == guid) {
3272fa94a07fSbrendan 				vd = sav->sav_vdevs[i];
3273fa94a07fSbrendan 				break;
3274fa94a07fSbrendan 			}
3275fa94a07fSbrendan 		}
3276fa94a07fSbrendan 	}
3277fa94a07fSbrendan 
3278fa94a07fSbrendan 	if (nv == NULL && vd == NULL)
3279fa94a07fSbrendan 		return (ENOENT);
3280fa94a07fSbrendan 
3281fa94a07fSbrendan 	if (nv == NULL && vd != NULL)
3282fa94a07fSbrendan 		return (ENOTSUP);
3283fa94a07fSbrendan 
3284fa94a07fSbrendan 	if (nl2cache == 1) {
3285fa94a07fSbrendan 		newl2cache = NULL;
3286fa94a07fSbrendan 	} else {
3287fa94a07fSbrendan 		newl2cache = kmem_alloc((nl2cache - 1) * sizeof (void *),
3288fa94a07fSbrendan 		    KM_SLEEP);
3289fa94a07fSbrendan 		for (i = 0, j = 0; i < nl2cache; i++) {
3290fa94a07fSbrendan 			if (l2cache[i] != nv)
3291fa94a07fSbrendan 				VERIFY(nvlist_dup(l2cache[i],
3292fa94a07fSbrendan 				    &newl2cache[j++], KM_SLEEP) == 0);
3293fa94a07fSbrendan 		}
3294fa94a07fSbrendan 	}
3295fa94a07fSbrendan 
3296fa94a07fSbrendan 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
3297fa94a07fSbrendan 	    DATA_TYPE_NVLIST_ARRAY) == 0);
3298fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3299fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, newl2cache, nl2cache - 1) == 0);
3300fa94a07fSbrendan 	for (i = 0; i < nl2cache - 1; i++)
3301fa94a07fSbrendan 		nvlist_free(newl2cache[i]);
3302fa94a07fSbrendan 	kmem_free(newl2cache, (nl2cache - 1) * sizeof (void *));
3303fa94a07fSbrendan 
3304fa94a07fSbrendan 	return (0);
3305fa94a07fSbrendan }
3306fa94a07fSbrendan 
3307fa94a07fSbrendan /*
3308fa94a07fSbrendan  * Remove a device from the pool.  Currently, this supports removing only hot
3309fa94a07fSbrendan  * spares and level 2 ARC devices.
3310fa94a07fSbrendan  */
3311fa94a07fSbrendan int
3312fa94a07fSbrendan spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
3313fa94a07fSbrendan {
3314fa94a07fSbrendan 	vdev_t *vd;
3315fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
3316fa94a07fSbrendan 	uint_t nspares, nl2cache;
3317fa94a07fSbrendan 	int error = 0;
3318fa94a07fSbrendan 
3319fa94a07fSbrendan 	spa_config_enter(spa, RW_WRITER, FTAG);
3320fa94a07fSbrendan 
3321c5904d13Seschrock 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3322fa94a07fSbrendan 
3323fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs != NULL &&
3324fa94a07fSbrendan 	    nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3325fa94a07fSbrendan 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
3326fa94a07fSbrendan 		if ((error = spa_remove_spares(&spa->spa_spares, guid, unspare,
3327fa94a07fSbrendan 		    spares, nspares, vd)) != 0)
332849cf58c0SBrendan Gregg - Sun Microsystems 			goto cache;
3329fa94a07fSbrendan 		spa_load_spares(spa);
3330fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
3331fa94a07fSbrendan 		goto out;
3332fa94a07fSbrendan 	}
3333fa94a07fSbrendan 
333449cf58c0SBrendan Gregg - Sun Microsystems cache:
3335fa94a07fSbrendan 	if (spa->spa_l2cache.sav_vdevs != NULL &&
3336fa94a07fSbrendan 	    nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3337fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) {
3338fa94a07fSbrendan 		if ((error = spa_remove_l2cache(&spa->spa_l2cache, guid,
3339fa94a07fSbrendan 		    l2cache, nl2cache, vd)) != 0)
3340fa94a07fSbrendan 			goto out;
3341fa94a07fSbrendan 		spa_load_l2cache(spa);
3342fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
3343fa94a07fSbrendan 	}
334499653d4eSeschrock 
334599653d4eSeschrock out:
334699653d4eSeschrock 	spa_config_exit(spa, FTAG);
3347fa94a07fSbrendan 	return (error);
3348fa9e4066Sahrens }
3349fa9e4066Sahrens 
3350fa9e4066Sahrens /*
33513d7072f8Seschrock  * Find any device that's done replacing, or a vdev marked 'unspare' that's
33523d7072f8Seschrock  * current spared, so we can detach it.
3353fa9e4066Sahrens  */
3354ea8dc4b6Seschrock static vdev_t *
33553d7072f8Seschrock spa_vdev_resilver_done_hunt(vdev_t *vd)
3356fa9e4066Sahrens {
3357ea8dc4b6Seschrock 	vdev_t *newvd, *oldvd;
3358fa9e4066Sahrens 	int c;
3359fa9e4066Sahrens 
3360ea8dc4b6Seschrock 	for (c = 0; c < vd->vdev_children; c++) {
33613d7072f8Seschrock 		oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
3362ea8dc4b6Seschrock 		if (oldvd != NULL)
3363ea8dc4b6Seschrock 			return (oldvd);
3364ea8dc4b6Seschrock 	}
3365fa9e4066Sahrens 
33663d7072f8Seschrock 	/*
33673d7072f8Seschrock 	 * Check for a completed replacement.
33683d7072f8Seschrock 	 */
3369fa9e4066Sahrens 	if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
3370ea8dc4b6Seschrock 		oldvd = vd->vdev_child[0];
3371ea8dc4b6Seschrock 		newvd = vd->vdev_child[1];
3372ea8dc4b6Seschrock 
3373ea8dc4b6Seschrock 		mutex_enter(&newvd->vdev_dtl_lock);
3374ea8dc4b6Seschrock 		if (newvd->vdev_dtl_map.sm_space == 0 &&
3375ea8dc4b6Seschrock 		    newvd->vdev_dtl_scrub.sm_space == 0) {
3376ea8dc4b6Seschrock 			mutex_exit(&newvd->vdev_dtl_lock);
3377ea8dc4b6Seschrock 			return (oldvd);
3378fa9e4066Sahrens 		}
3379ea8dc4b6Seschrock 		mutex_exit(&newvd->vdev_dtl_lock);
3380fa9e4066Sahrens 	}
3381ea8dc4b6Seschrock 
33823d7072f8Seschrock 	/*
33833d7072f8Seschrock 	 * Check for a completed resilver with the 'unspare' flag set.
33843d7072f8Seschrock 	 */
33853d7072f8Seschrock 	if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) {
33863d7072f8Seschrock 		newvd = vd->vdev_child[0];
33873d7072f8Seschrock 		oldvd = vd->vdev_child[1];
33883d7072f8Seschrock 
33893d7072f8Seschrock 		mutex_enter(&newvd->vdev_dtl_lock);
33903d7072f8Seschrock 		if (newvd->vdev_unspare &&
33913d7072f8Seschrock 		    newvd->vdev_dtl_map.sm_space == 0 &&
33923d7072f8Seschrock 		    newvd->vdev_dtl_scrub.sm_space == 0) {
33933d7072f8Seschrock 			newvd->vdev_unspare = 0;
33943d7072f8Seschrock 			mutex_exit(&newvd->vdev_dtl_lock);
33953d7072f8Seschrock 			return (oldvd);
33963d7072f8Seschrock 		}
33973d7072f8Seschrock 		mutex_exit(&newvd->vdev_dtl_lock);
33983d7072f8Seschrock 	}
33993d7072f8Seschrock 
3400ea8dc4b6Seschrock 	return (NULL);
3401fa9e4066Sahrens }
3402fa9e4066Sahrens 
3403ea8dc4b6Seschrock static void
34043d7072f8Seschrock spa_vdev_resilver_done(spa_t *spa)
3405fa9e4066Sahrens {
3406ea8dc4b6Seschrock 	vdev_t *vd;
340799653d4eSeschrock 	vdev_t *pvd;
3408ea8dc4b6Seschrock 	uint64_t guid;
340999653d4eSeschrock 	uint64_t pguid = 0;
3410ea8dc4b6Seschrock 
3411ea8dc4b6Seschrock 	spa_config_enter(spa, RW_READER, FTAG);
3412ea8dc4b6Seschrock 
34133d7072f8Seschrock 	while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
3414ea8dc4b6Seschrock 		guid = vd->vdev_guid;
341599653d4eSeschrock 		/*
341699653d4eSeschrock 		 * If we have just finished replacing a hot spared device, then
341799653d4eSeschrock 		 * we need to detach the parent's first child (the original hot
341899653d4eSeschrock 		 * spare) as well.
341999653d4eSeschrock 		 */
342099653d4eSeschrock 		pvd = vd->vdev_parent;
342199653d4eSeschrock 		if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
342299653d4eSeschrock 		    pvd->vdev_id == 0) {
342399653d4eSeschrock 			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
342499653d4eSeschrock 			ASSERT(pvd->vdev_parent->vdev_children == 2);
342599653d4eSeschrock 			pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid;
342699653d4eSeschrock 		}
3427ea8dc4b6Seschrock 		spa_config_exit(spa, FTAG);
3428ea8dc4b6Seschrock 		if (spa_vdev_detach(spa, guid, B_TRUE) != 0)
3429ea8dc4b6Seschrock 			return;
343099653d4eSeschrock 		if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0)
343199653d4eSeschrock 			return;
3432ea8dc4b6Seschrock 		spa_config_enter(spa, RW_READER, FTAG);
3433fa9e4066Sahrens 	}
3434fa9e4066Sahrens 
3435ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
3436fa9e4066Sahrens }
3437fa9e4066Sahrens 
3438c67d9675Seschrock /*
3439c67d9675Seschrock  * Update the stored path for this vdev.  Dirty the vdev configuration, relying
3440c67d9675Seschrock  * on spa_vdev_enter/exit() to synchronize the labels and cache.
3441c67d9675Seschrock  */
3442c67d9675Seschrock int
3443c67d9675Seschrock spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
3444c67d9675Seschrock {
3445c5904d13Seschrock 	vdev_t *vd;
3446c67d9675Seschrock 	uint64_t txg;
3447c67d9675Seschrock 
3448c67d9675Seschrock 	txg = spa_vdev_enter(spa);
3449c67d9675Seschrock 
3450c5904d13Seschrock 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) {
345199653d4eSeschrock 		/*
3452c5904d13Seschrock 		 * Determine if this is a reference to a hot spare device.  If
3453c5904d13Seschrock 		 * it is, update the path manually as there is no associated
3454c5904d13Seschrock 		 * vdev_t that can be synced to disk.
345599653d4eSeschrock 		 */
3456c5904d13Seschrock 		nvlist_t **spares;
3457c5904d13Seschrock 		uint_t i, nspares;
3458fa94a07fSbrendan 
3459fa94a07fSbrendan 		if (spa->spa_spares.sav_config != NULL) {
3460fa94a07fSbrendan 			VERIFY(nvlist_lookup_nvlist_array(
3461fa94a07fSbrendan 			    spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
3462fa94a07fSbrendan 			    &spares, &nspares) == 0);
346399653d4eSeschrock 			for (i = 0; i < nspares; i++) {
346499653d4eSeschrock 				uint64_t theguid;
346599653d4eSeschrock 				VERIFY(nvlist_lookup_uint64(spares[i],
346699653d4eSeschrock 				    ZPOOL_CONFIG_GUID, &theguid) == 0);
3467fa94a07fSbrendan 				if (theguid == guid) {
3468fa94a07fSbrendan 					VERIFY(nvlist_add_string(spares[i],
3469fa94a07fSbrendan 					    ZPOOL_CONFIG_PATH, newpath) == 0);
3470fa94a07fSbrendan 					spa_load_spares(spa);
3471fa94a07fSbrendan 					spa->spa_spares.sav_sync = B_TRUE;
3472fa94a07fSbrendan 					return (spa_vdev_exit(spa, NULL, txg,
3473fa94a07fSbrendan 					    0));
3474fa94a07fSbrendan 				}
347599653d4eSeschrock 			}
3476fa94a07fSbrendan 		}
347799653d4eSeschrock 
3478fa94a07fSbrendan 		return (spa_vdev_exit(spa, NULL, txg, ENOENT));
347999653d4eSeschrock 	}
3480c67d9675Seschrock 
34810e34b6a7Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
34820e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
34830e34b6a7Sbonwick 
3484c67d9675Seschrock 	spa_strfree(vd->vdev_path);
3485c67d9675Seschrock 	vd->vdev_path = spa_strdup(newpath);
3486c67d9675Seschrock 
3487c67d9675Seschrock 	vdev_config_dirty(vd->vdev_top);
3488c67d9675Seschrock 
3489c67d9675Seschrock 	return (spa_vdev_exit(spa, NULL, txg, 0));
3490c67d9675Seschrock }
3491c67d9675Seschrock 
3492fa9e4066Sahrens /*
3493fa9e4066Sahrens  * ==========================================================================
3494fa9e4066Sahrens  * SPA Scrubbing
3495fa9e4066Sahrens  * ==========================================================================
3496fa9e4066Sahrens  */
3497fa9e4066Sahrens 
3498ea8dc4b6Seschrock int
3499088f3894Sahrens spa_scrub(spa_t *spa, pool_scrub_type_t type)
3500fa9e4066Sahrens {
3501bb8b5132Sek 	ASSERT(!spa_config_held(spa, RW_WRITER));
3502bb8b5132Sek 
3503fa9e4066Sahrens 	if ((uint_t)type >= POOL_SCRUB_TYPES)
3504fa9e4066Sahrens 		return (ENOTSUP);
3505fa9e4066Sahrens 
3506fa9e4066Sahrens 	/*
3507088f3894Sahrens 	 * If a resilver was requested, but there is no DTL on a
3508088f3894Sahrens 	 * writeable leaf device, we have nothing to do.
3509fa9e4066Sahrens 	 */
3510088f3894Sahrens 	if (type == POOL_SCRUB_RESILVER &&
3511088f3894Sahrens 	    !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
3512088f3894Sahrens 		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
3513ea8dc4b6Seschrock 		return (0);
3514ea8dc4b6Seschrock 	}
3515fa9e4066Sahrens 
3516088f3894Sahrens 	if (type == POOL_SCRUB_EVERYTHING &&
3517088f3894Sahrens 	    spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE &&
3518088f3894Sahrens 	    spa->spa_dsl_pool->dp_scrub_isresilver)
3519088f3894Sahrens 		return (EBUSY);
3520fa9e4066Sahrens 
3521088f3894Sahrens 	if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) {
3522088f3894Sahrens 		return (dsl_pool_scrub_clean(spa->spa_dsl_pool));
3523088f3894Sahrens 	} else if (type == POOL_SCRUB_NONE) {
3524088f3894Sahrens 		return (dsl_pool_scrub_cancel(spa->spa_dsl_pool));
3525ea8dc4b6Seschrock 	} else {
3526088f3894Sahrens 		return (EINVAL);
3527fa9e4066Sahrens 	}
3528fa9e4066Sahrens }
3529fa9e4066Sahrens 
3530ea8dc4b6Seschrock /*
3531ea8dc4b6Seschrock  * ==========================================================================
3532ea8dc4b6Seschrock  * SPA async task processing
3533ea8dc4b6Seschrock  * ==========================================================================
3534ea8dc4b6Seschrock  */
3535ea8dc4b6Seschrock 
3536ea8dc4b6Seschrock static void
35373d7072f8Seschrock spa_async_remove(spa_t *spa, vdev_t *vd)
3538fa9e4066Sahrens {
3539ea8dc4b6Seschrock 	int c;
3540fa9e4066Sahrens 
354149cf58c0SBrendan Gregg - Sun Microsystems 	if (vd->vdev_remove_wanted) {
354249cf58c0SBrendan Gregg - Sun Microsystems 		vd->vdev_remove_wanted = 0;
354349cf58c0SBrendan Gregg - Sun Microsystems 		vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
354449cf58c0SBrendan Gregg - Sun Microsystems 		vdev_clear(spa, vd, B_TRUE);
354549cf58c0SBrendan Gregg - Sun Microsystems 		vdev_config_dirty(vd->vdev_top);
3546ea8dc4b6Seschrock 	}
354749cf58c0SBrendan Gregg - Sun Microsystems 
354849cf58c0SBrendan Gregg - Sun Microsystems 	for (c = 0; c < vd->vdev_children; c++)
354949cf58c0SBrendan Gregg - Sun Microsystems 		spa_async_remove(spa, vd->vdev_child[c]);
3550ea8dc4b6Seschrock }
3551fa9e4066Sahrens 
3552ea8dc4b6Seschrock static void
3553ea8dc4b6Seschrock spa_async_thread(spa_t *spa)
3554ea8dc4b6Seschrock {
355549cf58c0SBrendan Gregg - Sun Microsystems 	int tasks, i;
35563d7072f8Seschrock 	uint64_t txg;
3557ea8dc4b6Seschrock 
3558ea8dc4b6Seschrock 	ASSERT(spa->spa_sync_on);
3559ea8dc4b6Seschrock 
3560ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3561ea8dc4b6Seschrock 	tasks = spa->spa_async_tasks;
3562ea8dc4b6Seschrock 	spa->spa_async_tasks = 0;
3563ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3564ea8dc4b6Seschrock 
35650373e76bSbonwick 	/*
35660373e76bSbonwick 	 * See if the config needs to be updated.
35670373e76bSbonwick 	 */
35680373e76bSbonwick 	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
35690373e76bSbonwick 		mutex_enter(&spa_namespace_lock);
35700373e76bSbonwick 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
35710373e76bSbonwick 		mutex_exit(&spa_namespace_lock);
35720373e76bSbonwick 	}
35730373e76bSbonwick 
3574ea8dc4b6Seschrock 	/*
35753d7072f8Seschrock 	 * See if any devices need to be marked REMOVED.
35760a4e9518Sgw 	 *
35770a4e9518Sgw 	 * XXX - We avoid doing this when we are in
35780a4e9518Sgw 	 * I/O failure state since spa_vdev_enter() grabs
35790a4e9518Sgw 	 * the namespace lock and would not be able to obtain
35800a4e9518Sgw 	 * the writer config lock.
3581ea8dc4b6Seschrock 	 */
35820a4e9518Sgw 	if (tasks & SPA_ASYNC_REMOVE &&
35830a4e9518Sgw 	    spa_state(spa) != POOL_STATE_IO_FAILURE) {
35843d7072f8Seschrock 		txg = spa_vdev_enter(spa);
35853d7072f8Seschrock 		spa_async_remove(spa, spa->spa_root_vdev);
358649cf58c0SBrendan Gregg - Sun Microsystems 		for (i = 0; i < spa->spa_l2cache.sav_count; i++)
358749cf58c0SBrendan Gregg - Sun Microsystems 			spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
358849cf58c0SBrendan Gregg - Sun Microsystems 		for (i = 0; i < spa->spa_spares.sav_count; i++)
358949cf58c0SBrendan Gregg - Sun Microsystems 			spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
35903d7072f8Seschrock 		(void) spa_vdev_exit(spa, NULL, txg, 0);
35913d7072f8Seschrock 	}
3592ea8dc4b6Seschrock 
3593ea8dc4b6Seschrock 	/*
3594ea8dc4b6Seschrock 	 * If any devices are done replacing, detach them.
3595ea8dc4b6Seschrock 	 */
35963d7072f8Seschrock 	if (tasks & SPA_ASYNC_RESILVER_DONE)
35973d7072f8Seschrock 		spa_vdev_resilver_done(spa);
3598fa9e4066Sahrens 
3599ea8dc4b6Seschrock 	/*
3600ea8dc4b6Seschrock 	 * Kick off a resilver.
3601ea8dc4b6Seschrock 	 */
3602088f3894Sahrens 	if (tasks & SPA_ASYNC_RESILVER)
3603088f3894Sahrens 		VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0);
3604ea8dc4b6Seschrock 
3605ea8dc4b6Seschrock 	/*
3606ea8dc4b6Seschrock 	 * Let the world know that we're done.
3607ea8dc4b6Seschrock 	 */
3608ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3609ea8dc4b6Seschrock 	spa->spa_async_thread = NULL;
3610ea8dc4b6Seschrock 	cv_broadcast(&spa->spa_async_cv);
3611ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3612ea8dc4b6Seschrock 	thread_exit();
3613ea8dc4b6Seschrock }
3614ea8dc4b6Seschrock 
3615ea8dc4b6Seschrock void
3616ea8dc4b6Seschrock spa_async_suspend(spa_t *spa)
3617ea8dc4b6Seschrock {
3618ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3619ea8dc4b6Seschrock 	spa->spa_async_suspended++;
3620ea8dc4b6Seschrock 	while (spa->spa_async_thread != NULL)
3621ea8dc4b6Seschrock 		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
3622ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3623ea8dc4b6Seschrock }
3624ea8dc4b6Seschrock 
3625ea8dc4b6Seschrock void
3626ea8dc4b6Seschrock spa_async_resume(spa_t *spa)
3627ea8dc4b6Seschrock {
3628ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3629ea8dc4b6Seschrock 	ASSERT(spa->spa_async_suspended != 0);
3630ea8dc4b6Seschrock 	spa->spa_async_suspended--;
3631ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3632ea8dc4b6Seschrock }
3633ea8dc4b6Seschrock 
3634ea8dc4b6Seschrock static void
3635ea8dc4b6Seschrock spa_async_dispatch(spa_t *spa)
3636ea8dc4b6Seschrock {
3637ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3638ea8dc4b6Seschrock 	if (spa->spa_async_tasks && !spa->spa_async_suspended &&
36390373e76bSbonwick 	    spa->spa_async_thread == NULL &&
36400373e76bSbonwick 	    rootdir != NULL && !vn_is_readonly(rootdir))
3641ea8dc4b6Seschrock 		spa->spa_async_thread = thread_create(NULL, 0,
3642ea8dc4b6Seschrock 		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
3643ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3644ea8dc4b6Seschrock }
3645ea8dc4b6Seschrock 
3646ea8dc4b6Seschrock void
3647ea8dc4b6Seschrock spa_async_request(spa_t *spa, int task)
3648ea8dc4b6Seschrock {
3649ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3650ea8dc4b6Seschrock 	spa->spa_async_tasks |= task;
3651ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3652fa9e4066Sahrens }
3653fa9e4066Sahrens 
3654fa9e4066Sahrens /*
3655fa9e4066Sahrens  * ==========================================================================
3656fa9e4066Sahrens  * SPA syncing routines
3657fa9e4066Sahrens  * ==========================================================================
3658fa9e4066Sahrens  */
3659fa9e4066Sahrens 
3660fa9e4066Sahrens static void
3661fa9e4066Sahrens spa_sync_deferred_frees(spa_t *spa, uint64_t txg)
3662fa9e4066Sahrens {
3663fa9e4066Sahrens 	bplist_t *bpl = &spa->spa_sync_bplist;
3664fa9e4066Sahrens 	dmu_tx_t *tx;
3665fa9e4066Sahrens 	blkptr_t blk;
3666fa9e4066Sahrens 	uint64_t itor = 0;
3667fa9e4066Sahrens 	zio_t *zio;
3668fa9e4066Sahrens 	int error;
3669fa9e4066Sahrens 	uint8_t c = 1;
3670fa9e4066Sahrens 
3671fa9e4066Sahrens 	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD);
3672fa9e4066Sahrens 
3673fa9e4066Sahrens 	while (bplist_iterate(bpl, &itor, &blk) == 0)
3674fa9e4066Sahrens 		zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL));
3675fa9e4066Sahrens 
3676fa9e4066Sahrens 	error = zio_wait(zio);
3677fa9e4066Sahrens 	ASSERT3U(error, ==, 0);
3678fa9e4066Sahrens 
3679fa9e4066Sahrens 	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3680fa9e4066Sahrens 	bplist_vacate(bpl, tx);
3681fa9e4066Sahrens 
3682fa9e4066Sahrens 	/*
3683fa9e4066Sahrens 	 * Pre-dirty the first block so we sync to convergence faster.
3684fa9e4066Sahrens 	 * (Usually only the first block is needed.)
3685fa9e4066Sahrens 	 */
3686fa9e4066Sahrens 	dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx);
3687fa9e4066Sahrens 	dmu_tx_commit(tx);
3688fa9e4066Sahrens }
3689fa9e4066Sahrens 
3690fa9e4066Sahrens static void
369199653d4eSeschrock spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
3692fa9e4066Sahrens {
3693fa9e4066Sahrens 	char *packed = NULL;
3694f7991ba4STim Haley 	size_t bufsize;
3695fa9e4066Sahrens 	size_t nvsize = 0;
3696fa9e4066Sahrens 	dmu_buf_t *db;
3697fa9e4066Sahrens 
369899653d4eSeschrock 	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
3699fa9e4066Sahrens 
3700f7991ba4STim Haley 	/*
3701f7991ba4STim Haley 	 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
3702f7991ba4STim Haley 	 * information.  This avoids the dbuf_will_dirty() path and
3703f7991ba4STim Haley 	 * saves us a pre-read to get data we don't actually care about.
3704f7991ba4STim Haley 	 */
3705f7991ba4STim Haley 	bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE);
3706f7991ba4STim Haley 	packed = kmem_alloc(bufsize, KM_SLEEP);
3707fa9e4066Sahrens 
370899653d4eSeschrock 	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
3709ea8dc4b6Seschrock 	    KM_SLEEP) == 0);
3710f7991ba4STim Haley 	bzero(packed + nvsize, bufsize - nvsize);
3711fa9e4066Sahrens 
3712f7991ba4STim Haley 	dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
3713fa9e4066Sahrens 
3714f7991ba4STim Haley 	kmem_free(packed, bufsize);
3715fa9e4066Sahrens 
371699653d4eSeschrock 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
3717fa9e4066Sahrens 	dmu_buf_will_dirty(db, tx);
3718fa9e4066Sahrens 	*(uint64_t *)db->db_data = nvsize;
3719ea8dc4b6Seschrock 	dmu_buf_rele(db, FTAG);
3720fa9e4066Sahrens }
3721fa9e4066Sahrens 
372299653d4eSeschrock static void
3723fa94a07fSbrendan spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
3724fa94a07fSbrendan     const char *config, const char *entry)
372599653d4eSeschrock {
372699653d4eSeschrock 	nvlist_t *nvroot;
3727fa94a07fSbrendan 	nvlist_t **list;
372899653d4eSeschrock 	int i;
372999653d4eSeschrock 
3730fa94a07fSbrendan 	if (!sav->sav_sync)
373199653d4eSeschrock 		return;
373299653d4eSeschrock 
373399653d4eSeschrock 	/*
3734fa94a07fSbrendan 	 * Update the MOS nvlist describing the list of available devices.
3735fa94a07fSbrendan 	 * spa_validate_aux() will have already made sure this nvlist is
37363d7072f8Seschrock 	 * valid and the vdevs are labeled appropriately.
373799653d4eSeschrock 	 */
3738fa94a07fSbrendan 	if (sav->sav_object == 0) {
3739fa94a07fSbrendan 		sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
3740fa94a07fSbrendan 		    DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
3741fa94a07fSbrendan 		    sizeof (uint64_t), tx);
374299653d4eSeschrock 		VERIFY(zap_update(spa->spa_meta_objset,
3743fa94a07fSbrendan 		    DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
3744fa94a07fSbrendan 		    &sav->sav_object, tx) == 0);
374599653d4eSeschrock 	}
374699653d4eSeschrock 
374799653d4eSeschrock 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3748fa94a07fSbrendan 	if (sav->sav_count == 0) {
3749fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
375099653d4eSeschrock 	} else {
3751fa94a07fSbrendan 		list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
3752fa94a07fSbrendan 		for (i = 0; i < sav->sav_count; i++)
3753fa94a07fSbrendan 			list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
3754fa94a07fSbrendan 			    B_FALSE, B_FALSE, B_TRUE);
3755fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
3756fa94a07fSbrendan 		    sav->sav_count) == 0);
3757fa94a07fSbrendan 		for (i = 0; i < sav->sav_count; i++)
3758fa94a07fSbrendan 			nvlist_free(list[i]);
3759fa94a07fSbrendan 		kmem_free(list, sav->sav_count * sizeof (void *));
376099653d4eSeschrock 	}
376199653d4eSeschrock 
3762fa94a07fSbrendan 	spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
376306eeb2adSek 	nvlist_free(nvroot);
376499653d4eSeschrock 
3765fa94a07fSbrendan 	sav->sav_sync = B_FALSE;
376699653d4eSeschrock }
376799653d4eSeschrock 
376899653d4eSeschrock static void
376999653d4eSeschrock spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
377099653d4eSeschrock {
377199653d4eSeschrock 	nvlist_t *config;
377299653d4eSeschrock 
377399653d4eSeschrock 	if (list_is_empty(&spa->spa_dirty_list))
377499653d4eSeschrock 		return;
377599653d4eSeschrock 
377699653d4eSeschrock 	config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE);
377799653d4eSeschrock 
377899653d4eSeschrock 	if (spa->spa_config_syncing)
377999653d4eSeschrock 		nvlist_free(spa->spa_config_syncing);
378099653d4eSeschrock 	spa->spa_config_syncing = config;
378199653d4eSeschrock 
378299653d4eSeschrock 	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
378399653d4eSeschrock }
378499653d4eSeschrock 
3785990b4856Slling /*
3786990b4856Slling  * Set zpool properties.
3787990b4856Slling  */
3788b1b8ab34Slling static void
3789ecd6cf80Smarks spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
3790b1b8ab34Slling {
3791b1b8ab34Slling 	spa_t *spa = arg1;
3792b1b8ab34Slling 	objset_t *mos = spa->spa_meta_objset;
3793990b4856Slling 	nvlist_t *nvp = arg2;
3794990b4856Slling 	nvpair_t *elem;
37953d7072f8Seschrock 	uint64_t intval;
3796c5904d13Seschrock 	char *strval;
3797990b4856Slling 	zpool_prop_t prop;
3798990b4856Slling 	const char *propname;
3799990b4856Slling 	zprop_type_t proptype;
3800c5904d13Seschrock 	spa_config_dirent_t *dp;
3801b1b8ab34Slling 
3802990b4856Slling 	elem = NULL;
3803990b4856Slling 	while ((elem = nvlist_next_nvpair(nvp, elem))) {
3804990b4856Slling 		switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
3805990b4856Slling 		case ZPOOL_PROP_VERSION:
3806990b4856Slling 			/*
3807990b4856Slling 			 * Only set version for non-zpool-creation cases
3808990b4856Slling 			 * (set/import). spa_create() needs special care
3809990b4856Slling 			 * for version setting.
3810990b4856Slling 			 */
3811990b4856Slling 			if (tx->tx_txg != TXG_INITIAL) {
3812990b4856Slling 				VERIFY(nvpair_value_uint64(elem,
3813990b4856Slling 				    &intval) == 0);
3814990b4856Slling 				ASSERT(intval <= SPA_VERSION);
3815990b4856Slling 				ASSERT(intval >= spa_version(spa));
3816990b4856Slling 				spa->spa_uberblock.ub_version = intval;
3817990b4856Slling 				vdev_config_dirty(spa->spa_root_vdev);
3818990b4856Slling 			}
3819ecd6cf80Smarks 			break;
3820990b4856Slling 
3821990b4856Slling 		case ZPOOL_PROP_ALTROOT:
3822990b4856Slling 			/*
3823990b4856Slling 			 * 'altroot' is a non-persistent property. It should
3824990b4856Slling 			 * have been set temporarily at creation or import time.
3825990b4856Slling 			 */
3826990b4856Slling 			ASSERT(spa->spa_root != NULL);
3827b1b8ab34Slling 			break;
38283d7072f8Seschrock 
38292f8aaab3Seschrock 		case ZPOOL_PROP_CACHEFILE:
3830990b4856Slling 			/*
38312f8aaab3Seschrock 			 * 'cachefile' is a non-persistent property, but note
38322f8aaab3Seschrock 			 * an async request that the config cache needs to be
38332f8aaab3Seschrock 			 * udpated.
3834990b4856Slling 			 */
38352f8aaab3Seschrock 			VERIFY(nvpair_value_string(elem, &strval) == 0);
3836c5904d13Seschrock 
3837c5904d13Seschrock 			dp = kmem_alloc(sizeof (spa_config_dirent_t),
3838c5904d13Seschrock 			    KM_SLEEP);
3839c5904d13Seschrock 
3840c5904d13Seschrock 			if (strval[0] == '\0')
3841c5904d13Seschrock 				dp->scd_path = spa_strdup(spa_config_path);
3842c5904d13Seschrock 			else if (strcmp(strval, "none") == 0)
3843c5904d13Seschrock 				dp->scd_path = NULL;
3844c5904d13Seschrock 			else
3845c5904d13Seschrock 				dp->scd_path = spa_strdup(strval);
3846c5904d13Seschrock 
3847c5904d13Seschrock 			list_insert_head(&spa->spa_config_list, dp);
38482f8aaab3Seschrock 			spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
38493d7072f8Seschrock 			break;
3850990b4856Slling 		default:
3851990b4856Slling 			/*
3852990b4856Slling 			 * Set pool property values in the poolprops mos object.
3853990b4856Slling 			 */
3854990b4856Slling 			mutex_enter(&spa->spa_props_lock);
3855990b4856Slling 			if (spa->spa_pool_props_object == 0) {
3856990b4856Slling 				objset_t *mos = spa->spa_meta_objset;
3857990b4856Slling 
3858990b4856Slling 				VERIFY((spa->spa_pool_props_object =
3859990b4856Slling 				    zap_create(mos, DMU_OT_POOL_PROPS,
3860990b4856Slling 				    DMU_OT_NONE, 0, tx)) > 0);
3861990b4856Slling 
3862990b4856Slling 				VERIFY(zap_update(mos,
3863990b4856Slling 				    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
3864990b4856Slling 				    8, 1, &spa->spa_pool_props_object, tx)
3865990b4856Slling 				    == 0);
3866990b4856Slling 			}
3867990b4856Slling 			mutex_exit(&spa->spa_props_lock);
3868990b4856Slling 
3869990b4856Slling 			/* normalize the property name */
3870990b4856Slling 			propname = zpool_prop_to_name(prop);
3871990b4856Slling 			proptype = zpool_prop_get_type(prop);
3872990b4856Slling 
3873990b4856Slling 			if (nvpair_type(elem) == DATA_TYPE_STRING) {
3874990b4856Slling 				ASSERT(proptype == PROP_TYPE_STRING);
3875990b4856Slling 				VERIFY(nvpair_value_string(elem, &strval) == 0);
3876990b4856Slling 				VERIFY(zap_update(mos,
3877990b4856Slling 				    spa->spa_pool_props_object, propname,
3878990b4856Slling 				    1, strlen(strval) + 1, strval, tx) == 0);
3879990b4856Slling 
3880990b4856Slling 			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
3881990b4856Slling 				VERIFY(nvpair_value_uint64(elem, &intval) == 0);
3882990b4856Slling 
3883990b4856Slling 				if (proptype == PROP_TYPE_INDEX) {
3884990b4856Slling 					const char *unused;
3885990b4856Slling 					VERIFY(zpool_prop_index_to_string(
3886990b4856Slling 					    prop, intval, &unused) == 0);
3887990b4856Slling 				}
3888990b4856Slling 				VERIFY(zap_update(mos,
3889990b4856Slling 				    spa->spa_pool_props_object, propname,
3890990b4856Slling 				    8, 1, &intval, tx) == 0);
3891990b4856Slling 			} else {
3892990b4856Slling 				ASSERT(0); /* not allowed */
3893990b4856Slling 			}
3894990b4856Slling 
38950a4e9518Sgw 			switch (prop) {
38960a4e9518Sgw 			case ZPOOL_PROP_DELEGATION:
3897990b4856Slling 				spa->spa_delegation = intval;
38980a4e9518Sgw 				break;
38990a4e9518Sgw 			case ZPOOL_PROP_BOOTFS:
3900990b4856Slling 				spa->spa_bootfs = intval;
39010a4e9518Sgw 				break;
39020a4e9518Sgw 			case ZPOOL_PROP_FAILUREMODE:
39030a4e9518Sgw 				spa->spa_failmode = intval;
39040a4e9518Sgw 				break;
39050a4e9518Sgw 			default:
39060a4e9518Sgw 				break;
39070a4e9518Sgw 			}
3908990b4856Slling 		}
3909990b4856Slling 
3910990b4856Slling 		/* log internal history if this is not a zpool create */
3911990b4856Slling 		if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY &&
3912990b4856Slling 		    tx->tx_txg != TXG_INITIAL) {
3913990b4856Slling 			spa_history_internal_log(LOG_POOL_PROPSET,
3914990b4856Slling 			    spa, tx, cr, "%s %lld %s",
3915990b4856Slling 			    nvpair_name(elem), intval, spa->spa_name);
3916b1b8ab34Slling 		}
3917b1b8ab34Slling 	}
3918b1b8ab34Slling }
3919b1b8ab34Slling 
3920fa9e4066Sahrens /*
3921fa9e4066Sahrens  * Sync the specified transaction group.  New blocks may be dirtied as
3922fa9e4066Sahrens  * part of the process, so we iterate until it converges.
3923fa9e4066Sahrens  */
3924fa9e4066Sahrens void
3925fa9e4066Sahrens spa_sync(spa_t *spa, uint64_t txg)
3926fa9e4066Sahrens {
3927fa9e4066Sahrens 	dsl_pool_t *dp = spa->spa_dsl_pool;
3928fa9e4066Sahrens 	objset_t *mos = spa->spa_meta_objset;
3929fa9e4066Sahrens 	bplist_t *bpl = &spa->spa_sync_bplist;
39300373e76bSbonwick 	vdev_t *rvd = spa->spa_root_vdev;
3931fa9e4066Sahrens 	vdev_t *vd;
3932fa9e4066Sahrens 	dmu_tx_t *tx;
3933fa9e4066Sahrens 	int dirty_vdevs;
3934fa9e4066Sahrens 
3935fa9e4066Sahrens 	/*
3936fa9e4066Sahrens 	 * Lock out configuration changes.
3937fa9e4066Sahrens 	 */
3938ea8dc4b6Seschrock 	spa_config_enter(spa, RW_READER, FTAG);
3939fa9e4066Sahrens 
3940fa9e4066Sahrens 	spa->spa_syncing_txg = txg;
3941fa9e4066Sahrens 	spa->spa_sync_pass = 0;
3942fa9e4066Sahrens 
3943ea8dc4b6Seschrock 	VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj));
3944fa9e4066Sahrens 
394599653d4eSeschrock 	tx = dmu_tx_create_assigned(dp, txg);
394699653d4eSeschrock 
394799653d4eSeschrock 	/*
3948e7437265Sahrens 	 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
394999653d4eSeschrock 	 * set spa_deflate if we have no raid-z vdevs.
395099653d4eSeschrock 	 */
3951e7437265Sahrens 	if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
3952e7437265Sahrens 	    spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
395399653d4eSeschrock 		int i;
395499653d4eSeschrock 
395599653d4eSeschrock 		for (i = 0; i < rvd->vdev_children; i++) {
395699653d4eSeschrock 			vd = rvd->vdev_child[i];
395799653d4eSeschrock 			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
395899653d4eSeschrock 				break;
395999653d4eSeschrock 		}
396099653d4eSeschrock 		if (i == rvd->vdev_children) {
396199653d4eSeschrock 			spa->spa_deflate = TRUE;
396299653d4eSeschrock 			VERIFY(0 == zap_add(spa->spa_meta_objset,
396399653d4eSeschrock 			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
396499653d4eSeschrock 			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
396599653d4eSeschrock 		}
396699653d4eSeschrock 	}
396799653d4eSeschrock 
3968088f3894Sahrens 	if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
3969088f3894Sahrens 	    spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
3970088f3894Sahrens 		dsl_pool_create_origin(dp, tx);
3971088f3894Sahrens 
3972088f3894Sahrens 		/* Keeping the origin open increases spa_minref */
3973088f3894Sahrens 		spa->spa_minref += 3;
3974088f3894Sahrens 	}
3975088f3894Sahrens 
3976088f3894Sahrens 	if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
3977088f3894Sahrens 	    spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
3978088f3894Sahrens 		dsl_pool_upgrade_clones(dp, tx);
3979088f3894Sahrens 	}
3980088f3894Sahrens 
3981fa9e4066Sahrens 	/*
3982fa9e4066Sahrens 	 * If anything has changed in this txg, push the deferred frees
3983fa9e4066Sahrens 	 * from the previous txg.  If not, leave them alone so that we
3984fa9e4066Sahrens 	 * don't generate work on an otherwise idle system.
3985fa9e4066Sahrens 	 */
3986fa9e4066Sahrens 	if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
39871615a317Sek 	    !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
39881615a317Sek 	    !txg_list_empty(&dp->dp_sync_tasks, txg))
3989fa9e4066Sahrens 		spa_sync_deferred_frees(spa, txg);
3990fa9e4066Sahrens 
3991fa9e4066Sahrens 	/*
3992fa9e4066Sahrens 	 * Iterate to convergence.
3993fa9e4066Sahrens 	 */
3994fa9e4066Sahrens 	do {
3995fa9e4066Sahrens 		spa->spa_sync_pass++;
3996fa9e4066Sahrens 
3997fa9e4066Sahrens 		spa_sync_config_object(spa, tx);
3998fa94a07fSbrendan 		spa_sync_aux_dev(spa, &spa->spa_spares, tx,
3999fa94a07fSbrendan 		    ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
4000fa94a07fSbrendan 		spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
4001fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
4002ea8dc4b6Seschrock 		spa_errlog_sync(spa, txg);
4003fa9e4066Sahrens 		dsl_pool_sync(dp, txg);
4004fa9e4066Sahrens 
4005fa9e4066Sahrens 		dirty_vdevs = 0;
4006fa9e4066Sahrens 		while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
4007fa9e4066Sahrens 			vdev_sync(vd, txg);
4008fa9e4066Sahrens 			dirty_vdevs++;
4009fa9e4066Sahrens 		}
4010fa9e4066Sahrens 
4011fa9e4066Sahrens 		bplist_sync(bpl, tx);
4012fa9e4066Sahrens 	} while (dirty_vdevs);
4013fa9e4066Sahrens 
4014fa9e4066Sahrens 	bplist_close(bpl);
4015fa9e4066Sahrens 
4016fa9e4066Sahrens 	dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass);
4017fa9e4066Sahrens 
4018fa9e4066Sahrens 	/*
4019fa9e4066Sahrens 	 * Rewrite the vdev configuration (which includes the uberblock)
4020fa9e4066Sahrens 	 * to commit the transaction group.
40210373e76bSbonwick 	 *
402217f17c2dSbonwick 	 * If there are no dirty vdevs, we sync the uberblock to a few
402317f17c2dSbonwick 	 * random top-level vdevs that are known to be visible in the
402417f17c2dSbonwick 	 * config cache (see spa_vdev_add() for details).  If there *are*
402517f17c2dSbonwick 	 * dirty vdevs -- or if the sync to our random subset fails --
402617f17c2dSbonwick 	 * then sync the uberblock to all vdevs.
40270373e76bSbonwick 	 */
402817f17c2dSbonwick 	if (list_is_empty(&spa->spa_dirty_list)) {
402921bf64a7Sgw 		vdev_t *svd[SPA_DVAS_PER_BP];
403021bf64a7Sgw 		int svdcount = 0;
40310373e76bSbonwick 		int children = rvd->vdev_children;
40320373e76bSbonwick 		int c0 = spa_get_random(children);
40330373e76bSbonwick 		int c;
40340373e76bSbonwick 
40350373e76bSbonwick 		for (c = 0; c < children; c++) {
40360373e76bSbonwick 			vd = rvd->vdev_child[(c0 + c) % children];
403717f17c2dSbonwick 			if (vd->vdev_ms_array == 0 || vd->vdev_islog)
40380373e76bSbonwick 				continue;
403917f17c2dSbonwick 			svd[svdcount++] = vd;
404017f17c2dSbonwick 			if (svdcount == SPA_DVAS_PER_BP)
40410373e76bSbonwick 				break;
40420373e76bSbonwick 		}
404321bf64a7Sgw 		vdev_config_sync(svd, svdcount, txg);
404421bf64a7Sgw 	} else {
404521bf64a7Sgw 		vdev_config_sync(rvd->vdev_child, rvd->vdev_children, txg);
40460373e76bSbonwick 	}
404799653d4eSeschrock 	dmu_tx_commit(tx);
404899653d4eSeschrock 
40490373e76bSbonwick 	/*
40500373e76bSbonwick 	 * Clear the dirty config list.
4051fa9e4066Sahrens 	 */
40520373e76bSbonwick 	while ((vd = list_head(&spa->spa_dirty_list)) != NULL)
40530373e76bSbonwick 		vdev_config_clean(vd);
40540373e76bSbonwick 
40550373e76bSbonwick 	/*
40560373e76bSbonwick 	 * Now that the new config has synced transactionally,
40570373e76bSbonwick 	 * let it become visible to the config cache.
40580373e76bSbonwick 	 */
40590373e76bSbonwick 	if (spa->spa_config_syncing != NULL) {
40600373e76bSbonwick 		spa_config_set(spa, spa->spa_config_syncing);
40610373e76bSbonwick 		spa->spa_config_txg = txg;
40620373e76bSbonwick 		spa->spa_config_syncing = NULL;
40630373e76bSbonwick 	}
4064fa9e4066Sahrens 
4065088f3894Sahrens 	spa->spa_traverse_wanted = B_TRUE;
4066fa9e4066Sahrens 	rw_enter(&spa->spa_traverse_lock, RW_WRITER);
4067088f3894Sahrens 	spa->spa_traverse_wanted = B_FALSE;
4068fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
4069fa9e4066Sahrens 	rw_exit(&spa->spa_traverse_lock);
4070fa9e4066Sahrens 
4071fa9e4066Sahrens 	/*
4072fa9e4066Sahrens 	 * Clean up the ZIL records for the synced txg.
4073fa9e4066Sahrens 	 */
4074fa9e4066Sahrens 	dsl_pool_zil_clean(dp);
4075fa9e4066Sahrens 
4076fa9e4066Sahrens 	/*
4077fa9e4066Sahrens 	 * Update usable space statistics.
4078fa9e4066Sahrens 	 */
4079fa9e4066Sahrens 	while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
4080fa9e4066Sahrens 		vdev_sync_done(vd, txg);
4081fa9e4066Sahrens 
4082fa9e4066Sahrens 	/*
4083fa9e4066Sahrens 	 * It had better be the case that we didn't dirty anything
408499653d4eSeschrock 	 * since vdev_config_sync().
4085fa9e4066Sahrens 	 */
4086fa9e4066Sahrens 	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
4087fa9e4066Sahrens 	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
4088fa9e4066Sahrens 	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
4089fa9e4066Sahrens 	ASSERT(bpl->bpl_queue == NULL);
4090fa9e4066Sahrens 
4091ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
4092ea8dc4b6Seschrock 
4093ea8dc4b6Seschrock 	/*
4094ea8dc4b6Seschrock 	 * If any async tasks have been requested, kick them off.
4095ea8dc4b6Seschrock 	 */
4096ea8dc4b6Seschrock 	spa_async_dispatch(spa);
4097fa9e4066Sahrens }
4098fa9e4066Sahrens 
4099fa9e4066Sahrens /*
4100fa9e4066Sahrens  * Sync all pools.  We don't want to hold the namespace lock across these
4101fa9e4066Sahrens  * operations, so we take a reference on the spa_t and drop the lock during the
4102fa9e4066Sahrens  * sync.
4103fa9e4066Sahrens  */
4104fa9e4066Sahrens void
4105fa9e4066Sahrens spa_sync_allpools(void)
4106fa9e4066Sahrens {
4107fa9e4066Sahrens 	spa_t *spa = NULL;
4108fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
4109fa9e4066Sahrens 	while ((spa = spa_next(spa)) != NULL) {
4110fa9e4066Sahrens 		if (spa_state(spa) != POOL_STATE_ACTIVE)
4111fa9e4066Sahrens 			continue;
4112fa9e4066Sahrens 		spa_open_ref(spa, FTAG);
4113fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
4114fa9e4066Sahrens 		txg_wait_synced(spa_get_dsl(spa), 0);
4115fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
4116fa9e4066Sahrens 		spa_close(spa, FTAG);
4117fa9e4066Sahrens 	}
4118fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
4119fa9e4066Sahrens }
4120fa9e4066Sahrens 
4121fa9e4066Sahrens /*
4122fa9e4066Sahrens  * ==========================================================================
4123fa9e4066Sahrens  * Miscellaneous routines
4124fa9e4066Sahrens  * ==========================================================================
4125fa9e4066Sahrens  */
4126fa9e4066Sahrens 
4127fa9e4066Sahrens /*
4128fa9e4066Sahrens  * Remove all pools in the system.
4129fa9e4066Sahrens  */
4130fa9e4066Sahrens void
4131fa9e4066Sahrens spa_evict_all(void)
4132fa9e4066Sahrens {
4133fa9e4066Sahrens 	spa_t *spa;
4134fa9e4066Sahrens 
4135fa9e4066Sahrens 	/*
4136fa9e4066Sahrens 	 * Remove all cached state.  All pools should be closed now,
4137fa9e4066Sahrens 	 * so every spa in the AVL tree should be unreferenced.
4138fa9e4066Sahrens 	 */
4139fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
4140fa9e4066Sahrens 	while ((spa = spa_next(NULL)) != NULL) {
4141fa9e4066Sahrens 		/*
4142ea8dc4b6Seschrock 		 * Stop async tasks.  The async thread may need to detach
4143ea8dc4b6Seschrock 		 * a device that's been replaced, which requires grabbing
4144ea8dc4b6Seschrock 		 * spa_namespace_lock, so we must drop it here.
4145fa9e4066Sahrens 		 */
4146fa9e4066Sahrens 		spa_open_ref(spa, FTAG);
4147fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
4148ea8dc4b6Seschrock 		spa_async_suspend(spa);
4149fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
4150fa9e4066Sahrens 		spa_close(spa, FTAG);
4151fa9e4066Sahrens 
4152fa9e4066Sahrens 		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4153fa9e4066Sahrens 			spa_unload(spa);
4154fa9e4066Sahrens 			spa_deactivate(spa);
4155fa9e4066Sahrens 		}
4156fa9e4066Sahrens 		spa_remove(spa);
4157fa9e4066Sahrens 	}
4158fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
4159fa9e4066Sahrens }
4160ea8dc4b6Seschrock 
4161ea8dc4b6Seschrock vdev_t *
4162c5904d13Seschrock spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache)
4163ea8dc4b6Seschrock {
4164c5904d13Seschrock 	vdev_t *vd;
4165c5904d13Seschrock 	int i;
4166c5904d13Seschrock 
4167c5904d13Seschrock 	if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
4168c5904d13Seschrock 		return (vd);
4169c5904d13Seschrock 
4170c5904d13Seschrock 	if (l2cache) {
4171c5904d13Seschrock 		for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
4172c5904d13Seschrock 			vd = spa->spa_l2cache.sav_vdevs[i];
4173c5904d13Seschrock 			if (vd->vdev_guid == guid)
4174c5904d13Seschrock 				return (vd);
4175c5904d13Seschrock 		}
4176c5904d13Seschrock 	}
4177c5904d13Seschrock 
4178c5904d13Seschrock 	return (NULL);
4179ea8dc4b6Seschrock }
4180eaca9bbdSeschrock 
4181eaca9bbdSeschrock void
4182990b4856Slling spa_upgrade(spa_t *spa, uint64_t version)
4183eaca9bbdSeschrock {
4184eaca9bbdSeschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
4185eaca9bbdSeschrock 
4186eaca9bbdSeschrock 	/*
4187eaca9bbdSeschrock 	 * This should only be called for a non-faulted pool, and since a
4188eaca9bbdSeschrock 	 * future version would result in an unopenable pool, this shouldn't be
4189eaca9bbdSeschrock 	 * possible.
4190eaca9bbdSeschrock 	 */
4191e7437265Sahrens 	ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION);
4192990b4856Slling 	ASSERT(version >= spa->spa_uberblock.ub_version);
4193eaca9bbdSeschrock 
4194990b4856Slling 	spa->spa_uberblock.ub_version = version;
4195eaca9bbdSeschrock 	vdev_config_dirty(spa->spa_root_vdev);
4196eaca9bbdSeschrock 
4197eaca9bbdSeschrock 	spa_config_exit(spa, FTAG);
419899653d4eSeschrock 
419999653d4eSeschrock 	txg_wait_synced(spa_get_dsl(spa), 0);
420099653d4eSeschrock }
420199653d4eSeschrock 
420299653d4eSeschrock boolean_t
420399653d4eSeschrock spa_has_spare(spa_t *spa, uint64_t guid)
420499653d4eSeschrock {
420599653d4eSeschrock 	int i;
420639c23413Seschrock 	uint64_t spareguid;
4207fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_spares;
420899653d4eSeschrock 
4209fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
4210fa94a07fSbrendan 		if (sav->sav_vdevs[i]->vdev_guid == guid)
421199653d4eSeschrock 			return (B_TRUE);
421299653d4eSeschrock 
4213fa94a07fSbrendan 	for (i = 0; i < sav->sav_npending; i++) {
4214fa94a07fSbrendan 		if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
4215fa94a07fSbrendan 		    &spareguid) == 0 && spareguid == guid)
421639c23413Seschrock 			return (B_TRUE);
421739c23413Seschrock 	}
421839c23413Seschrock 
421999653d4eSeschrock 	return (B_FALSE);
4220eaca9bbdSeschrock }
4221b1b8ab34Slling 
422289a89ebfSlling /*
422389a89ebfSlling  * Check if a pool has an active shared spare device.
422489a89ebfSlling  * Note: reference count of an active spare is 2, as a spare and as a replace
422589a89ebfSlling  */
422689a89ebfSlling static boolean_t
422789a89ebfSlling spa_has_active_shared_spare(spa_t *spa)
422889a89ebfSlling {
422989a89ebfSlling 	int i, refcnt;
423089a89ebfSlling 	uint64_t pool;
423189a89ebfSlling 	spa_aux_vdev_t *sav = &spa->spa_spares;
423289a89ebfSlling 
423389a89ebfSlling 	for (i = 0; i < sav->sav_count; i++) {
423489a89ebfSlling 		if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
423589a89ebfSlling 		    &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
423689a89ebfSlling 		    refcnt > 2)
423789a89ebfSlling 			return (B_TRUE);
423889a89ebfSlling 	}
423989a89ebfSlling 
424089a89ebfSlling 	return (B_FALSE);
424189a89ebfSlling }
424289a89ebfSlling 
42433d7072f8Seschrock /*
42443d7072f8Seschrock  * Post a sysevent corresponding to the given event.  The 'name' must be one of
42453d7072f8Seschrock  * the event definitions in sys/sysevent/eventdefs.h.  The payload will be
42463d7072f8Seschrock  * filled in from the spa and (optionally) the vdev.  This doesn't do anything
42473d7072f8Seschrock  * in the userland libzpool, as we don't want consumers to misinterpret ztest
42483d7072f8Seschrock  * or zdb as real changes.
42493d7072f8Seschrock  */
42503d7072f8Seschrock void
42513d7072f8Seschrock spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
42523d7072f8Seschrock {
42533d7072f8Seschrock #ifdef _KERNEL
42543d7072f8Seschrock 	sysevent_t		*ev;
42553d7072f8Seschrock 	sysevent_attr_list_t	*attr = NULL;
42563d7072f8Seschrock 	sysevent_value_t	value;
42573d7072f8Seschrock 	sysevent_id_t		eid;
42583d7072f8Seschrock 
42593d7072f8Seschrock 	ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
42603d7072f8Seschrock 	    SE_SLEEP);
42613d7072f8Seschrock 
42623d7072f8Seschrock 	value.value_type = SE_DATA_TYPE_STRING;
42633d7072f8Seschrock 	value.value.sv_string = spa_name(spa);
42643d7072f8Seschrock 	if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
42653d7072f8Seschrock 		goto done;
42663d7072f8Seschrock 
42673d7072f8Seschrock 	value.value_type = SE_DATA_TYPE_UINT64;
42683d7072f8Seschrock 	value.value.sv_uint64 = spa_guid(spa);
42693d7072f8Seschrock 	if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
42703d7072f8Seschrock 		goto done;
42713d7072f8Seschrock 
42723d7072f8Seschrock 	if (vd) {
42733d7072f8Seschrock 		value.value_type = SE_DATA_TYPE_UINT64;
42743d7072f8Seschrock 		value.value.sv_uint64 = vd->vdev_guid;
42753d7072f8Seschrock 		if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
42763d7072f8Seschrock 		    SE_SLEEP) != 0)
42773d7072f8Seschrock 			goto done;
42783d7072f8Seschrock 
42793d7072f8Seschrock 		if (vd->vdev_path) {
42803d7072f8Seschrock 			value.value_type = SE_DATA_TYPE_STRING;
42813d7072f8Seschrock 			value.value.sv_string = vd->vdev_path;
42823d7072f8Seschrock 			if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
42833d7072f8Seschrock 			    &value, SE_SLEEP) != 0)
42843d7072f8Seschrock 				goto done;
42853d7072f8Seschrock 		}
42863d7072f8Seschrock 	}
42873d7072f8Seschrock 
4288b01c3b58Seschrock 	if (sysevent_attach_attributes(ev, attr) != 0)
4289b01c3b58Seschrock 		goto done;
4290b01c3b58Seschrock 	attr = NULL;
4291b01c3b58Seschrock 
42923d7072f8Seschrock 	(void) log_sysevent(ev, SE_SLEEP, &eid);
42933d7072f8Seschrock 
42943d7072f8Seschrock done:
42953d7072f8Seschrock 	if (attr)
42963d7072f8Seschrock 		sysevent_free_attr(attr);
42973d7072f8Seschrock 	sysevent_free(ev);
42983d7072f8Seschrock #endif
42993d7072f8Seschrock }
4300