xref: /illumos-gate/usr/src/uts/common/fs/zfs/spa.c (revision b7b97454)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
2199653d4eSeschrock 
22fa9e4066Sahrens /*
23b01c3b58Seschrock  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24fa9e4066Sahrens  * Use is subject to license terms.
25fa9e4066Sahrens  */
26fa9e4066Sahrens 
27fa9e4066Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
28fa9e4066Sahrens 
29fa9e4066Sahrens /*
30fa9e4066Sahrens  * This file contains all the routines used when modifying on-disk SPA state.
31fa9e4066Sahrens  * This includes opening, importing, destroying, exporting a pool, and syncing a
32fa9e4066Sahrens  * pool.
33fa9e4066Sahrens  */
34fa9e4066Sahrens 
35fa9e4066Sahrens #include <sys/zfs_context.h>
36ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h>
37fa9e4066Sahrens #include <sys/spa_impl.h>
38fa9e4066Sahrens #include <sys/zio.h>
39fa9e4066Sahrens #include <sys/zio_checksum.h>
40fa9e4066Sahrens #include <sys/zio_compress.h>
41fa9e4066Sahrens #include <sys/dmu.h>
42fa9e4066Sahrens #include <sys/dmu_tx.h>
43fa9e4066Sahrens #include <sys/zap.h>
44fa9e4066Sahrens #include <sys/zil.h>
45fa9e4066Sahrens #include <sys/vdev_impl.h>
46fa9e4066Sahrens #include <sys/metaslab.h>
47fa9e4066Sahrens #include <sys/uberblock_impl.h>
48fa9e4066Sahrens #include <sys/txg.h>
49fa9e4066Sahrens #include <sys/avl.h>
50fa9e4066Sahrens #include <sys/dmu_traverse.h>
51b1b8ab34Slling #include <sys/dmu_objset.h>
52fa9e4066Sahrens #include <sys/unique.h>
53fa9e4066Sahrens #include <sys/dsl_pool.h>
54b1b8ab34Slling #include <sys/dsl_dataset.h>
55fa9e4066Sahrens #include <sys/dsl_dir.h>
56fa9e4066Sahrens #include <sys/dsl_prop.h>
57b1b8ab34Slling #include <sys/dsl_synctask.h>
58fa9e4066Sahrens #include <sys/fs/zfs.h>
59fa94a07fSbrendan #include <sys/arc.h>
60fa9e4066Sahrens #include <sys/callb.h>
6195173954Sek #include <sys/systeminfo.h>
6295173954Sek #include <sys/sunddi.h>
63fa9e4066Sahrens 
64990b4856Slling #include "zfs_prop.h"
65*b7b97454Sperrin #include "zfs_comutil.h"
66990b4856Slling 
67416e0cd8Sek int zio_taskq_threads = 8;
68416e0cd8Sek 
69990b4856Slling static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx);
70990b4856Slling 
71990b4856Slling /*
72990b4856Slling  * ==========================================================================
73990b4856Slling  * SPA properties routines
74990b4856Slling  * ==========================================================================
75990b4856Slling  */
76990b4856Slling 
77990b4856Slling /*
78990b4856Slling  * Add a (source=src, propname=propval) list to an nvlist.
79990b4856Slling  */
80990b4856Slling static int
81990b4856Slling spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
82990b4856Slling     uint64_t intval, zprop_source_t src)
83990b4856Slling {
84990b4856Slling 	const char *propname = zpool_prop_to_name(prop);
85990b4856Slling 	nvlist_t *propval;
86990b4856Slling 	int err = 0;
87990b4856Slling 
88990b4856Slling 	if (err = nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP))
89990b4856Slling 		return (err);
90990b4856Slling 
91990b4856Slling 	if (err = nvlist_add_uint64(propval, ZPROP_SOURCE, src))
92990b4856Slling 		goto out;
93990b4856Slling 
94990b4856Slling 	if (strval != NULL) {
95990b4856Slling 		if (err = nvlist_add_string(propval, ZPROP_VALUE, strval))
96990b4856Slling 			goto out;
97990b4856Slling 	} else {
98990b4856Slling 		if (err = nvlist_add_uint64(propval, ZPROP_VALUE, intval))
99990b4856Slling 			goto out;
100990b4856Slling 	}
101990b4856Slling 
102990b4856Slling 	err = nvlist_add_nvlist(nvl, propname, propval);
103990b4856Slling out:
104990b4856Slling 	nvlist_free(propval);
105990b4856Slling 	return (err);
106990b4856Slling }
107990b4856Slling 
108990b4856Slling /*
109990b4856Slling  * Get property values from the spa configuration.
110990b4856Slling  */
111990b4856Slling static int
112990b4856Slling spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
113990b4856Slling {
114990b4856Slling 	uint64_t size = spa_get_space(spa);
115990b4856Slling 	uint64_t used = spa_get_alloc(spa);
116990b4856Slling 	uint64_t cap, version;
117990b4856Slling 	zprop_source_t src = ZPROP_SRC_NONE;
118990b4856Slling 	int err;
1192f8aaab3Seschrock 	char *cachefile;
1202f8aaab3Seschrock 	size_t len;
121990b4856Slling 
122990b4856Slling 	/*
123990b4856Slling 	 * readonly properties
124990b4856Slling 	 */
125990b4856Slling 	if (err = spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa->spa_name,
126990b4856Slling 	    0, src))
127990b4856Slling 		return (err);
128990b4856Slling 
129990b4856Slling 	if (err = spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src))
130990b4856Slling 		return (err);
131990b4856Slling 
132990b4856Slling 	if (err = spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src))
133990b4856Slling 		return (err);
134990b4856Slling 
135990b4856Slling 	if (err = spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL,
136990b4856Slling 	    size - used, src))
137990b4856Slling 		return (err);
138990b4856Slling 
139990b4856Slling 	cap = (size == 0) ? 0 : (used * 100 / size);
140990b4856Slling 	if (err = spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src))
141990b4856Slling 		return (err);
142990b4856Slling 
143990b4856Slling 	if (err = spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL,
144990b4856Slling 	    spa_guid(spa), src))
145990b4856Slling 		return (err);
146990b4856Slling 
147990b4856Slling 	if (err = spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
148990b4856Slling 	    spa->spa_root_vdev->vdev_state, src))
149990b4856Slling 		return (err);
150990b4856Slling 
151990b4856Slling 	/*
152990b4856Slling 	 * settable properties that are not stored in the pool property object.
153990b4856Slling 	 */
154990b4856Slling 	version = spa_version(spa);
155990b4856Slling 	if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
156990b4856Slling 		src = ZPROP_SRC_DEFAULT;
157990b4856Slling 	else
158990b4856Slling 		src = ZPROP_SRC_LOCAL;
159990b4856Slling 	if (err = spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
160990b4856Slling 	    version, src))
161990b4856Slling 		return (err);
162990b4856Slling 
163990b4856Slling 	if (spa->spa_root != NULL) {
164990b4856Slling 		src = ZPROP_SRC_LOCAL;
165990b4856Slling 		if (err = spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT,
166990b4856Slling 		    spa->spa_root, 0, src))
167990b4856Slling 			return (err);
168990b4856Slling 	}
169990b4856Slling 
1702f8aaab3Seschrock 	if (spa->spa_config_dir != NULL) {
1712f8aaab3Seschrock 		if (strcmp(spa->spa_config_dir, "none") == 0) {
1722f8aaab3Seschrock 			err = spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
1732f8aaab3Seschrock 			    spa->spa_config_dir, 0, ZPROP_SRC_LOCAL);
1742f8aaab3Seschrock 		} else {
1752f8aaab3Seschrock 			len = strlen(spa->spa_config_dir) +
1762f8aaab3Seschrock 			    strlen(spa->spa_config_file) + 2;
1772f8aaab3Seschrock 			cachefile = kmem_alloc(len, KM_SLEEP);
1782f8aaab3Seschrock 			(void) snprintf(cachefile, len, "%s/%s",
1792f8aaab3Seschrock 			    spa->spa_config_dir, spa->spa_config_file);
1802f8aaab3Seschrock 			err = spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
1812f8aaab3Seschrock 			    cachefile, 0, ZPROP_SRC_LOCAL);
1822f8aaab3Seschrock 			kmem_free(cachefile, len);
1832f8aaab3Seschrock 		}
1842f8aaab3Seschrock 
1852f8aaab3Seschrock 		if (err)
1862f8aaab3Seschrock 			return (err);
1872f8aaab3Seschrock 	}
188990b4856Slling 
189990b4856Slling 	return (0);
190990b4856Slling }
191990b4856Slling 
192990b4856Slling /*
193990b4856Slling  * Get zpool property values.
194990b4856Slling  */
195990b4856Slling int
196990b4856Slling spa_prop_get(spa_t *spa, nvlist_t **nvp)
197990b4856Slling {
198990b4856Slling 	zap_cursor_t zc;
199990b4856Slling 	zap_attribute_t za;
200990b4856Slling 	objset_t *mos = spa->spa_meta_objset;
201990b4856Slling 	int err;
202990b4856Slling 
203990b4856Slling 	if (err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP))
204990b4856Slling 		return (err);
205990b4856Slling 
206990b4856Slling 	/*
207990b4856Slling 	 * Get properties from the spa config.
208990b4856Slling 	 */
209990b4856Slling 	if (err = spa_prop_get_config(spa, nvp))
210990b4856Slling 		goto out;
211990b4856Slling 
212990b4856Slling 	mutex_enter(&spa->spa_props_lock);
213990b4856Slling 	/* If no pool property object, no more prop to get. */
214990b4856Slling 	if (spa->spa_pool_props_object == 0) {
215990b4856Slling 		mutex_exit(&spa->spa_props_lock);
216990b4856Slling 		return (0);
217990b4856Slling 	}
218990b4856Slling 
219990b4856Slling 	/*
220990b4856Slling 	 * Get properties from the MOS pool property object.
221990b4856Slling 	 */
222990b4856Slling 	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
223990b4856Slling 	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
224990b4856Slling 	    zap_cursor_advance(&zc)) {
225990b4856Slling 		uint64_t intval = 0;
226990b4856Slling 		char *strval = NULL;
227990b4856Slling 		zprop_source_t src = ZPROP_SRC_DEFAULT;
228990b4856Slling 		zpool_prop_t prop;
229990b4856Slling 
230990b4856Slling 		if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
231990b4856Slling 			continue;
232990b4856Slling 
233990b4856Slling 		switch (za.za_integer_length) {
234990b4856Slling 		case 8:
235990b4856Slling 			/* integer property */
236990b4856Slling 			if (za.za_first_integer !=
237990b4856Slling 			    zpool_prop_default_numeric(prop))
238990b4856Slling 				src = ZPROP_SRC_LOCAL;
239990b4856Slling 
240990b4856Slling 			if (prop == ZPOOL_PROP_BOOTFS) {
241990b4856Slling 				dsl_pool_t *dp;
242990b4856Slling 				dsl_dataset_t *ds = NULL;
243990b4856Slling 
244990b4856Slling 				dp = spa_get_dsl(spa);
245990b4856Slling 				rw_enter(&dp->dp_config_rwlock, RW_READER);
246990b4856Slling 				if (err = dsl_dataset_open_obj(dp,
247990b4856Slling 				    za.za_first_integer, NULL, DS_MODE_NONE,
248990b4856Slling 				    FTAG, &ds)) {
249990b4856Slling 					rw_exit(&dp->dp_config_rwlock);
250990b4856Slling 					break;
251990b4856Slling 				}
252990b4856Slling 
253990b4856Slling 				strval = kmem_alloc(
254990b4856Slling 				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
255990b4856Slling 				    KM_SLEEP);
256990b4856Slling 				dsl_dataset_name(ds, strval);
257990b4856Slling 				dsl_dataset_close(ds, DS_MODE_NONE, FTAG);
258990b4856Slling 				rw_exit(&dp->dp_config_rwlock);
259990b4856Slling 			} else {
260990b4856Slling 				strval = NULL;
261990b4856Slling 				intval = za.za_first_integer;
262990b4856Slling 			}
263990b4856Slling 
264990b4856Slling 			err = spa_prop_add_list(*nvp, prop, strval,
265990b4856Slling 			    intval, src);
266990b4856Slling 
267990b4856Slling 			if (strval != NULL)
268990b4856Slling 				kmem_free(strval,
269990b4856Slling 				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
270990b4856Slling 
271990b4856Slling 			break;
272990b4856Slling 
273990b4856Slling 		case 1:
274990b4856Slling 			/* string property */
275990b4856Slling 			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
276990b4856Slling 			err = zap_lookup(mos, spa->spa_pool_props_object,
277990b4856Slling 			    za.za_name, 1, za.za_num_integers, strval);
278990b4856Slling 			if (err) {
279990b4856Slling 				kmem_free(strval, za.za_num_integers);
280990b4856Slling 				break;
281990b4856Slling 			}
282990b4856Slling 			err = spa_prop_add_list(*nvp, prop, strval, 0, src);
283990b4856Slling 			kmem_free(strval, za.za_num_integers);
284990b4856Slling 			break;
285990b4856Slling 
286990b4856Slling 		default:
287990b4856Slling 			break;
288990b4856Slling 		}
289990b4856Slling 	}
290990b4856Slling 	zap_cursor_fini(&zc);
291990b4856Slling 	mutex_exit(&spa->spa_props_lock);
292990b4856Slling out:
293990b4856Slling 	if (err && err != ENOENT) {
294990b4856Slling 		nvlist_free(*nvp);
295990b4856Slling 		return (err);
296990b4856Slling 	}
297990b4856Slling 
298990b4856Slling 	return (0);
299990b4856Slling }
300990b4856Slling 
301990b4856Slling /*
302990b4856Slling  * Validate the given pool properties nvlist and modify the list
303990b4856Slling  * for the property values to be set.
304990b4856Slling  */
305990b4856Slling static int
306990b4856Slling spa_prop_validate(spa_t *spa, nvlist_t *props)
307990b4856Slling {
308990b4856Slling 	nvpair_t *elem;
309990b4856Slling 	int error = 0, reset_bootfs = 0;
310990b4856Slling 	uint64_t objnum;
311990b4856Slling 
312990b4856Slling 	elem = NULL;
313990b4856Slling 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
314990b4856Slling 		zpool_prop_t prop;
315990b4856Slling 		char *propname, *strval;
316990b4856Slling 		uint64_t intval;
317990b4856Slling 		vdev_t *rvdev;
318990b4856Slling 		char *vdev_type;
319990b4856Slling 		objset_t *os;
3202f8aaab3Seschrock 		char *slash;
321990b4856Slling 
322990b4856Slling 		propname = nvpair_name(elem);
323990b4856Slling 
324990b4856Slling 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL)
325990b4856Slling 			return (EINVAL);
326990b4856Slling 
327990b4856Slling 		switch (prop) {
328990b4856Slling 		case ZPOOL_PROP_VERSION:
329990b4856Slling 			error = nvpair_value_uint64(elem, &intval);
330990b4856Slling 			if (!error &&
331990b4856Slling 			    (intval < spa_version(spa) || intval > SPA_VERSION))
332990b4856Slling 				error = EINVAL;
333990b4856Slling 			break;
334990b4856Slling 
335990b4856Slling 		case ZPOOL_PROP_DELEGATION:
336990b4856Slling 		case ZPOOL_PROP_AUTOREPLACE:
337990b4856Slling 			error = nvpair_value_uint64(elem, &intval);
338990b4856Slling 			if (!error && intval > 1)
339990b4856Slling 				error = EINVAL;
340990b4856Slling 			break;
341990b4856Slling 
342990b4856Slling 		case ZPOOL_PROP_BOOTFS:
343990b4856Slling 			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
344990b4856Slling 				error = ENOTSUP;
345990b4856Slling 				break;
346990b4856Slling 			}
347990b4856Slling 
348990b4856Slling 			/*
349990b4856Slling 			 * A bootable filesystem can not be on a RAIDZ pool
350990b4856Slling 			 * nor a striped pool with more than 1 device.
351990b4856Slling 			 */
352990b4856Slling 			rvdev = spa->spa_root_vdev;
353990b4856Slling 			vdev_type =
354990b4856Slling 			    rvdev->vdev_child[0]->vdev_ops->vdev_op_type;
355990b4856Slling 			if (rvdev->vdev_children > 1 ||
356990b4856Slling 			    strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 ||
357990b4856Slling 			    strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) {
358990b4856Slling 				error = ENOTSUP;
359990b4856Slling 				break;
360990b4856Slling 			}
361990b4856Slling 
362990b4856Slling 			reset_bootfs = 1;
363990b4856Slling 
364990b4856Slling 			error = nvpair_value_string(elem, &strval);
365990b4856Slling 
366990b4856Slling 			if (!error) {
367990b4856Slling 				if (strval == NULL || strval[0] == '\0') {
368990b4856Slling 					objnum = zpool_prop_default_numeric(
369990b4856Slling 					    ZPOOL_PROP_BOOTFS);
370990b4856Slling 					break;
371990b4856Slling 				}
372990b4856Slling 
373990b4856Slling 				if (error = dmu_objset_open(strval, DMU_OST_ZFS,
374990b4856Slling 				    DS_MODE_STANDARD | DS_MODE_READONLY, &os))
375990b4856Slling 					break;
376990b4856Slling 				objnum = dmu_objset_id(os);
377990b4856Slling 				dmu_objset_close(os);
378990b4856Slling 			}
379990b4856Slling 			break;
3800a4e9518Sgw 		case ZPOOL_PROP_FAILUREMODE:
3810a4e9518Sgw 			error = nvpair_value_uint64(elem, &intval);
3820a4e9518Sgw 			if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
3830a4e9518Sgw 			    intval > ZIO_FAILURE_MODE_PANIC))
3840a4e9518Sgw 				error = EINVAL;
3850a4e9518Sgw 
3860a4e9518Sgw 			/*
3870a4e9518Sgw 			 * This is a special case which only occurs when
3880a4e9518Sgw 			 * the pool has completely failed. This allows
3890a4e9518Sgw 			 * the user to change the in-core failmode property
3900a4e9518Sgw 			 * without syncing it out to disk (I/Os might
3910a4e9518Sgw 			 * currently be blocked). We do this by returning
3920a4e9518Sgw 			 * EIO to the caller (spa_prop_set) to trick it
3930a4e9518Sgw 			 * into thinking we encountered a property validation
3940a4e9518Sgw 			 * error.
3950a4e9518Sgw 			 */
3960a4e9518Sgw 			if (!error && spa_state(spa) == POOL_STATE_IO_FAILURE) {
3970a4e9518Sgw 				spa->spa_failmode = intval;
3980a4e9518Sgw 				error = EIO;
3990a4e9518Sgw 			}
4000a4e9518Sgw 			break;
4012f8aaab3Seschrock 
4022f8aaab3Seschrock 		case ZPOOL_PROP_CACHEFILE:
4032f8aaab3Seschrock 			if ((error = nvpair_value_string(elem, &strval)) != 0)
4042f8aaab3Seschrock 				break;
4052f8aaab3Seschrock 
4062f8aaab3Seschrock 			if (strval[0] == '\0')
4072f8aaab3Seschrock 				break;
4082f8aaab3Seschrock 
4092f8aaab3Seschrock 			if (strcmp(strval, "none") == 0)
4102f8aaab3Seschrock 				break;
4112f8aaab3Seschrock 
4122f8aaab3Seschrock 			if (strval[0] != '/') {
4132f8aaab3Seschrock 				error = EINVAL;
4142f8aaab3Seschrock 				break;
4152f8aaab3Seschrock 			}
4162f8aaab3Seschrock 
4172f8aaab3Seschrock 			slash = strrchr(strval, '/');
4182f8aaab3Seschrock 			ASSERT(slash != NULL);
4192f8aaab3Seschrock 
4202f8aaab3Seschrock 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
4212f8aaab3Seschrock 			    strcmp(slash, "/..") == 0)
4222f8aaab3Seschrock 				error = EINVAL;
4232f8aaab3Seschrock 			break;
424990b4856Slling 		}
425990b4856Slling 
426990b4856Slling 		if (error)
427990b4856Slling 			break;
428990b4856Slling 	}
429990b4856Slling 
430990b4856Slling 	if (!error && reset_bootfs) {
431990b4856Slling 		error = nvlist_remove(props,
432990b4856Slling 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
433990b4856Slling 
434990b4856Slling 		if (!error) {
435990b4856Slling 			error = nvlist_add_uint64(props,
436990b4856Slling 			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
437990b4856Slling 		}
438990b4856Slling 	}
439990b4856Slling 
440990b4856Slling 	return (error);
441990b4856Slling }
442990b4856Slling 
443990b4856Slling int
444990b4856Slling spa_prop_set(spa_t *spa, nvlist_t *nvp)
445990b4856Slling {
446990b4856Slling 	int error;
447990b4856Slling 
448990b4856Slling 	if ((error = spa_prop_validate(spa, nvp)) != 0)
449990b4856Slling 		return (error);
450990b4856Slling 
451990b4856Slling 	return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
452990b4856Slling 	    spa, nvp, 3));
453990b4856Slling }
454990b4856Slling 
455990b4856Slling /*
456990b4856Slling  * If the bootfs property value is dsobj, clear it.
457990b4856Slling  */
458990b4856Slling void
459990b4856Slling spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
460990b4856Slling {
461990b4856Slling 	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
462990b4856Slling 		VERIFY(zap_remove(spa->spa_meta_objset,
463990b4856Slling 		    spa->spa_pool_props_object,
464990b4856Slling 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
465990b4856Slling 		spa->spa_bootfs = 0;
466990b4856Slling 	}
467990b4856Slling }
468990b4856Slling 
469fa9e4066Sahrens /*
470fa9e4066Sahrens  * ==========================================================================
471fa9e4066Sahrens  * SPA state manipulation (open/create/destroy/import/export)
472fa9e4066Sahrens  * ==========================================================================
473fa9e4066Sahrens  */
474fa9e4066Sahrens 
475ea8dc4b6Seschrock static int
476ea8dc4b6Seschrock spa_error_entry_compare(const void *a, const void *b)
477ea8dc4b6Seschrock {
478ea8dc4b6Seschrock 	spa_error_entry_t *sa = (spa_error_entry_t *)a;
479ea8dc4b6Seschrock 	spa_error_entry_t *sb = (spa_error_entry_t *)b;
480ea8dc4b6Seschrock 	int ret;
481ea8dc4b6Seschrock 
482ea8dc4b6Seschrock 	ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
483ea8dc4b6Seschrock 	    sizeof (zbookmark_t));
484ea8dc4b6Seschrock 
485ea8dc4b6Seschrock 	if (ret < 0)
486ea8dc4b6Seschrock 		return (-1);
487ea8dc4b6Seschrock 	else if (ret > 0)
488ea8dc4b6Seschrock 		return (1);
489ea8dc4b6Seschrock 	else
490ea8dc4b6Seschrock 		return (0);
491ea8dc4b6Seschrock }
492ea8dc4b6Seschrock 
493ea8dc4b6Seschrock /*
494ea8dc4b6Seschrock  * Utility function which retrieves copies of the current logs and
495ea8dc4b6Seschrock  * re-initializes them in the process.
496ea8dc4b6Seschrock  */
497ea8dc4b6Seschrock void
498ea8dc4b6Seschrock spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
499ea8dc4b6Seschrock {
500ea8dc4b6Seschrock 	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
501ea8dc4b6Seschrock 
502ea8dc4b6Seschrock 	bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
503ea8dc4b6Seschrock 	bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
504ea8dc4b6Seschrock 
505ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_scrub,
506ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
507ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
508ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_last,
509ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
510ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
511ea8dc4b6Seschrock }
512ea8dc4b6Seschrock 
513fa9e4066Sahrens /*
514fa9e4066Sahrens  * Activate an uninitialized pool.
515fa9e4066Sahrens  */
516fa9e4066Sahrens static void
517fa9e4066Sahrens spa_activate(spa_t *spa)
518fa9e4066Sahrens {
519fa9e4066Sahrens 	int t;
520fa9e4066Sahrens 
521fa9e4066Sahrens 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
522fa9e4066Sahrens 
523fa9e4066Sahrens 	spa->spa_state = POOL_STATE_ACTIVE;
524fa9e4066Sahrens 
525fa9e4066Sahrens 	spa->spa_normal_class = metaslab_class_create();
5268654d025Sperrin 	spa->spa_log_class = metaslab_class_create();
527fa9e4066Sahrens 
528fa9e4066Sahrens 	for (t = 0; t < ZIO_TYPES; t++) {
529fa9e4066Sahrens 		spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue",
530416e0cd8Sek 		    zio_taskq_threads, maxclsyspri, 50, INT_MAX,
531fa9e4066Sahrens 		    TASKQ_PREPOPULATE);
532fa9e4066Sahrens 		spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr",
533416e0cd8Sek 		    zio_taskq_threads, maxclsyspri, 50, INT_MAX,
534fa9e4066Sahrens 		    TASKQ_PREPOPULATE);
535fa9e4066Sahrens 	}
536fa9e4066Sahrens 
537fa9e4066Sahrens 	list_create(&spa->spa_dirty_list, sizeof (vdev_t),
538fa9e4066Sahrens 	    offsetof(vdev_t, vdev_dirty_node));
5390a4e9518Sgw 	list_create(&spa->spa_zio_list, sizeof (zio_t),
5400a4e9518Sgw 	    offsetof(zio_t, zio_link_node));
541fa9e4066Sahrens 
542fa9e4066Sahrens 	txg_list_create(&spa->spa_vdev_txg_list,
543fa9e4066Sahrens 	    offsetof(struct vdev, vdev_txg_node));
544ea8dc4b6Seschrock 
545ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_scrub,
546ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
547ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
548ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_last,
549ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
550ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
551fa9e4066Sahrens }
552fa9e4066Sahrens 
553fa9e4066Sahrens /*
554fa9e4066Sahrens  * Opposite of spa_activate().
555fa9e4066Sahrens  */
556fa9e4066Sahrens static void
557fa9e4066Sahrens spa_deactivate(spa_t *spa)
558fa9e4066Sahrens {
559fa9e4066Sahrens 	int t;
560fa9e4066Sahrens 
561fa9e4066Sahrens 	ASSERT(spa->spa_sync_on == B_FALSE);
562fa9e4066Sahrens 	ASSERT(spa->spa_dsl_pool == NULL);
563fa9e4066Sahrens 	ASSERT(spa->spa_root_vdev == NULL);
564fa9e4066Sahrens 
565fa9e4066Sahrens 	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
566fa9e4066Sahrens 
567fa9e4066Sahrens 	txg_list_destroy(&spa->spa_vdev_txg_list);
568fa9e4066Sahrens 
569fa9e4066Sahrens 	list_destroy(&spa->spa_dirty_list);
5700a4e9518Sgw 	list_destroy(&spa->spa_zio_list);
571fa9e4066Sahrens 
572fa9e4066Sahrens 	for (t = 0; t < ZIO_TYPES; t++) {
573fa9e4066Sahrens 		taskq_destroy(spa->spa_zio_issue_taskq[t]);
574fa9e4066Sahrens 		taskq_destroy(spa->spa_zio_intr_taskq[t]);
575fa9e4066Sahrens 		spa->spa_zio_issue_taskq[t] = NULL;
576fa9e4066Sahrens 		spa->spa_zio_intr_taskq[t] = NULL;
577fa9e4066Sahrens 	}
578fa9e4066Sahrens 
579fa9e4066Sahrens 	metaslab_class_destroy(spa->spa_normal_class);
580fa9e4066Sahrens 	spa->spa_normal_class = NULL;
581fa9e4066Sahrens 
5828654d025Sperrin 	metaslab_class_destroy(spa->spa_log_class);
5838654d025Sperrin 	spa->spa_log_class = NULL;
5848654d025Sperrin 
585ea8dc4b6Seschrock 	/*
586ea8dc4b6Seschrock 	 * If this was part of an import or the open otherwise failed, we may
587ea8dc4b6Seschrock 	 * still have errors left in the queues.  Empty them just in case.
588ea8dc4b6Seschrock 	 */
589ea8dc4b6Seschrock 	spa_errlog_drain(spa);
590ea8dc4b6Seschrock 
591ea8dc4b6Seschrock 	avl_destroy(&spa->spa_errlist_scrub);
592ea8dc4b6Seschrock 	avl_destroy(&spa->spa_errlist_last);
593ea8dc4b6Seschrock 
594fa9e4066Sahrens 	spa->spa_state = POOL_STATE_UNINITIALIZED;
595fa9e4066Sahrens }
596fa9e4066Sahrens 
597fa9e4066Sahrens /*
598fa9e4066Sahrens  * Verify a pool configuration, and construct the vdev tree appropriately.  This
599fa9e4066Sahrens  * will create all the necessary vdevs in the appropriate layout, with each vdev
600fa9e4066Sahrens  * in the CLOSED state.  This will prep the pool before open/creation/import.
601fa9e4066Sahrens  * All vdev validation is done by the vdev_alloc() routine.
602fa9e4066Sahrens  */
60399653d4eSeschrock static int
60499653d4eSeschrock spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
60599653d4eSeschrock     uint_t id, int atype)
606fa9e4066Sahrens {
607fa9e4066Sahrens 	nvlist_t **child;
608fa9e4066Sahrens 	uint_t c, children;
60999653d4eSeschrock 	int error;
610fa9e4066Sahrens 
61199653d4eSeschrock 	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
61299653d4eSeschrock 		return (error);
613fa9e4066Sahrens 
61499653d4eSeschrock 	if ((*vdp)->vdev_ops->vdev_op_leaf)
61599653d4eSeschrock 		return (0);
616fa9e4066Sahrens 
617fa9e4066Sahrens 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
618fa9e4066Sahrens 	    &child, &children) != 0) {
61999653d4eSeschrock 		vdev_free(*vdp);
62099653d4eSeschrock 		*vdp = NULL;
62199653d4eSeschrock 		return (EINVAL);
622fa9e4066Sahrens 	}
623fa9e4066Sahrens 
624fa9e4066Sahrens 	for (c = 0; c < children; c++) {
62599653d4eSeschrock 		vdev_t *vd;
62699653d4eSeschrock 		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
62799653d4eSeschrock 		    atype)) != 0) {
62899653d4eSeschrock 			vdev_free(*vdp);
62999653d4eSeschrock 			*vdp = NULL;
63099653d4eSeschrock 			return (error);
631fa9e4066Sahrens 		}
632fa9e4066Sahrens 	}
633fa9e4066Sahrens 
63499653d4eSeschrock 	ASSERT(*vdp != NULL);
63599653d4eSeschrock 
63699653d4eSeschrock 	return (0);
637fa9e4066Sahrens }
638fa9e4066Sahrens 
639fa9e4066Sahrens /*
640fa9e4066Sahrens  * Opposite of spa_load().
641fa9e4066Sahrens  */
642fa9e4066Sahrens static void
643fa9e4066Sahrens spa_unload(spa_t *spa)
644fa9e4066Sahrens {
64599653d4eSeschrock 	int i;
64699653d4eSeschrock 
647ea8dc4b6Seschrock 	/*
648ea8dc4b6Seschrock 	 * Stop async tasks.
649ea8dc4b6Seschrock 	 */
650ea8dc4b6Seschrock 	spa_async_suspend(spa);
651ea8dc4b6Seschrock 
652fa9e4066Sahrens 	/*
653fa9e4066Sahrens 	 * Stop syncing.
654fa9e4066Sahrens 	 */
655fa9e4066Sahrens 	if (spa->spa_sync_on) {
656fa9e4066Sahrens 		txg_sync_stop(spa->spa_dsl_pool);
657fa9e4066Sahrens 		spa->spa_sync_on = B_FALSE;
658fa9e4066Sahrens 	}
659fa9e4066Sahrens 
660fa9e4066Sahrens 	/*
661fa9e4066Sahrens 	 * Wait for any outstanding prefetch I/O to complete.
662fa9e4066Sahrens 	 */
663ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
664ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
665fa9e4066Sahrens 
666fa94a07fSbrendan 	/*
667fa94a07fSbrendan 	 * Drop and purge level 2 cache
668fa94a07fSbrendan 	 */
669fa94a07fSbrendan 	spa_l2cache_drop(spa);
670fa94a07fSbrendan 
671fa9e4066Sahrens 	/*
672fa9e4066Sahrens 	 * Close the dsl pool.
673fa9e4066Sahrens 	 */
674fa9e4066Sahrens 	if (spa->spa_dsl_pool) {
675fa9e4066Sahrens 		dsl_pool_close(spa->spa_dsl_pool);
676fa9e4066Sahrens 		spa->spa_dsl_pool = NULL;
677fa9e4066Sahrens 	}
678fa9e4066Sahrens 
679fa9e4066Sahrens 	/*
680fa9e4066Sahrens 	 * Close all vdevs.
681fa9e4066Sahrens 	 */
6820e34b6a7Sbonwick 	if (spa->spa_root_vdev)
683fa9e4066Sahrens 		vdev_free(spa->spa_root_vdev);
6840e34b6a7Sbonwick 	ASSERT(spa->spa_root_vdev == NULL);
685ea8dc4b6Seschrock 
686fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
687fa94a07fSbrendan 		vdev_free(spa->spa_spares.sav_vdevs[i]);
688fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs) {
689fa94a07fSbrendan 		kmem_free(spa->spa_spares.sav_vdevs,
690fa94a07fSbrendan 		    spa->spa_spares.sav_count * sizeof (void *));
691fa94a07fSbrendan 		spa->spa_spares.sav_vdevs = NULL;
69299653d4eSeschrock 	}
693fa94a07fSbrendan 	if (spa->spa_spares.sav_config) {
694fa94a07fSbrendan 		nvlist_free(spa->spa_spares.sav_config);
695fa94a07fSbrendan 		spa->spa_spares.sav_config = NULL;
696fa94a07fSbrendan 	}
697fa94a07fSbrendan 
698fa94a07fSbrendan 	for (i = 0; i < spa->spa_l2cache.sav_count; i++)
699fa94a07fSbrendan 		vdev_free(spa->spa_l2cache.sav_vdevs[i]);
700fa94a07fSbrendan 	if (spa->spa_l2cache.sav_vdevs) {
701fa94a07fSbrendan 		kmem_free(spa->spa_l2cache.sav_vdevs,
702fa94a07fSbrendan 		    spa->spa_l2cache.sav_count * sizeof (void *));
703fa94a07fSbrendan 		spa->spa_l2cache.sav_vdevs = NULL;
704fa94a07fSbrendan 	}
705fa94a07fSbrendan 	if (spa->spa_l2cache.sav_config) {
706fa94a07fSbrendan 		nvlist_free(spa->spa_l2cache.sav_config);
707fa94a07fSbrendan 		spa->spa_l2cache.sav_config = NULL;
70899653d4eSeschrock 	}
70999653d4eSeschrock 
710ea8dc4b6Seschrock 	spa->spa_async_suspended = 0;
711fa9e4066Sahrens }
712fa9e4066Sahrens 
71399653d4eSeschrock /*
71499653d4eSeschrock  * Load (or re-load) the current list of vdevs describing the active spares for
71599653d4eSeschrock  * this pool.  When this is called, we have some form of basic information in
716fa94a07fSbrendan  * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
717fa94a07fSbrendan  * then re-generate a more complete list including status information.
71899653d4eSeschrock  */
71999653d4eSeschrock static void
72099653d4eSeschrock spa_load_spares(spa_t *spa)
72199653d4eSeschrock {
72299653d4eSeschrock 	nvlist_t **spares;
72399653d4eSeschrock 	uint_t nspares;
72499653d4eSeschrock 	int i;
72539c23413Seschrock 	vdev_t *vd, *tvd;
72699653d4eSeschrock 
72799653d4eSeschrock 	/*
72899653d4eSeschrock 	 * First, close and free any existing spare vdevs.
72999653d4eSeschrock 	 */
730fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
731fa94a07fSbrendan 		vd = spa->spa_spares.sav_vdevs[i];
73239c23413Seschrock 
73339c23413Seschrock 		/* Undo the call to spa_activate() below */
73439c23413Seschrock 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid)) != NULL &&
73539c23413Seschrock 		    tvd->vdev_isspare)
73639c23413Seschrock 			spa_spare_remove(tvd);
73739c23413Seschrock 		vdev_close(vd);
73839c23413Seschrock 		vdev_free(vd);
73999653d4eSeschrock 	}
74039c23413Seschrock 
741fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs)
742fa94a07fSbrendan 		kmem_free(spa->spa_spares.sav_vdevs,
743fa94a07fSbrendan 		    spa->spa_spares.sav_count * sizeof (void *));
74499653d4eSeschrock 
745fa94a07fSbrendan 	if (spa->spa_spares.sav_config == NULL)
74699653d4eSeschrock 		nspares = 0;
74799653d4eSeschrock 	else
748fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
74999653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
75099653d4eSeschrock 
751fa94a07fSbrendan 	spa->spa_spares.sav_count = (int)nspares;
752fa94a07fSbrendan 	spa->spa_spares.sav_vdevs = NULL;
75399653d4eSeschrock 
75499653d4eSeschrock 	if (nspares == 0)
75599653d4eSeschrock 		return;
75699653d4eSeschrock 
75799653d4eSeschrock 	/*
75899653d4eSeschrock 	 * Construct the array of vdevs, opening them to get status in the
75939c23413Seschrock 	 * process.   For each spare, there is potentially two different vdev_t
76039c23413Seschrock 	 * structures associated with it: one in the list of spares (used only
76139c23413Seschrock 	 * for basic validation purposes) and one in the active vdev
76239c23413Seschrock 	 * configuration (if it's spared in).  During this phase we open and
76339c23413Seschrock 	 * validate each vdev on the spare list.  If the vdev also exists in the
76439c23413Seschrock 	 * active configuration, then we also mark this vdev as an active spare.
76599653d4eSeschrock 	 */
766fa94a07fSbrendan 	spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
767fa94a07fSbrendan 	    KM_SLEEP);
768fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
76999653d4eSeschrock 		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
77099653d4eSeschrock 		    VDEV_ALLOC_SPARE) == 0);
77199653d4eSeschrock 		ASSERT(vd != NULL);
77299653d4eSeschrock 
773fa94a07fSbrendan 		spa->spa_spares.sav_vdevs[i] = vd;
77499653d4eSeschrock 
77539c23413Seschrock 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid)) != NULL) {
77639c23413Seschrock 			if (!tvd->vdev_isspare)
77739c23413Seschrock 				spa_spare_add(tvd);
77839c23413Seschrock 
77939c23413Seschrock 			/*
78039c23413Seschrock 			 * We only mark the spare active if we were successfully
78139c23413Seschrock 			 * able to load the vdev.  Otherwise, importing a pool
78239c23413Seschrock 			 * with a bad active spare would result in strange
78339c23413Seschrock 			 * behavior, because multiple pool would think the spare
78439c23413Seschrock 			 * is actively in use.
78539c23413Seschrock 			 *
78639c23413Seschrock 			 * There is a vulnerability here to an equally bizarre
78739c23413Seschrock 			 * circumstance, where a dead active spare is later
78839c23413Seschrock 			 * brought back to life (onlined or otherwise).  Given
78939c23413Seschrock 			 * the rarity of this scenario, and the extra complexity
79039c23413Seschrock 			 * it adds, we ignore the possibility.
79139c23413Seschrock 			 */
79239c23413Seschrock 			if (!vdev_is_dead(tvd))
79339c23413Seschrock 				spa_spare_activate(tvd);
79439c23413Seschrock 		}
79539c23413Seschrock 
79699653d4eSeschrock 		if (vdev_open(vd) != 0)
79799653d4eSeschrock 			continue;
79899653d4eSeschrock 
79999653d4eSeschrock 		vd->vdev_top = vd;
800fa94a07fSbrendan 		if (vdev_validate_aux(vd) == 0)
801fa94a07fSbrendan 			spa_spare_add(vd);
80299653d4eSeschrock 	}
80399653d4eSeschrock 
80499653d4eSeschrock 	/*
80599653d4eSeschrock 	 * Recompute the stashed list of spares, with status information
80699653d4eSeschrock 	 * this time.
80799653d4eSeschrock 	 */
808fa94a07fSbrendan 	VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
80999653d4eSeschrock 	    DATA_TYPE_NVLIST_ARRAY) == 0);
81099653d4eSeschrock 
811fa94a07fSbrendan 	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
812fa94a07fSbrendan 	    KM_SLEEP);
813fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
814fa94a07fSbrendan 		spares[i] = vdev_config_generate(spa,
815fa94a07fSbrendan 		    spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE);
816fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
817fa94a07fSbrendan 	    ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
818fa94a07fSbrendan 	for (i = 0; i < spa->spa_spares.sav_count; i++)
81999653d4eSeschrock 		nvlist_free(spares[i]);
820fa94a07fSbrendan 	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
821fa94a07fSbrendan }
822fa94a07fSbrendan 
823fa94a07fSbrendan /*
824fa94a07fSbrendan  * Load (or re-load) the current list of vdevs describing the active l2cache for
825fa94a07fSbrendan  * this pool.  When this is called, we have some form of basic information in
826fa94a07fSbrendan  * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
827fa94a07fSbrendan  * then re-generate a more complete list including status information.
828fa94a07fSbrendan  * Devices which are already active have their details maintained, and are
829fa94a07fSbrendan  * not re-opened.
830fa94a07fSbrendan  */
831fa94a07fSbrendan static void
832fa94a07fSbrendan spa_load_l2cache(spa_t *spa)
833fa94a07fSbrendan {
834fa94a07fSbrendan 	nvlist_t **l2cache;
835fa94a07fSbrendan 	uint_t nl2cache;
836fa94a07fSbrendan 	int i, j, oldnvdevs;
837fa94a07fSbrendan 	uint64_t guid;
838fa94a07fSbrendan 	vdev_t *vd, **oldvdevs, **newvdevs;
839fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
840fa94a07fSbrendan 
841fa94a07fSbrendan 	if (sav->sav_config != NULL) {
842fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
843fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
844fa94a07fSbrendan 		newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
845fa94a07fSbrendan 	} else {
846fa94a07fSbrendan 		nl2cache = 0;
847fa94a07fSbrendan 	}
848fa94a07fSbrendan 
849fa94a07fSbrendan 	oldvdevs = sav->sav_vdevs;
850fa94a07fSbrendan 	oldnvdevs = sav->sav_count;
851fa94a07fSbrendan 	sav->sav_vdevs = NULL;
852fa94a07fSbrendan 	sav->sav_count = 0;
853fa94a07fSbrendan 
854fa94a07fSbrendan 	/*
855fa94a07fSbrendan 	 * Process new nvlist of vdevs.
856fa94a07fSbrendan 	 */
857fa94a07fSbrendan 	for (i = 0; i < nl2cache; i++) {
858fa94a07fSbrendan 		VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
859fa94a07fSbrendan 		    &guid) == 0);
860fa94a07fSbrendan 
861fa94a07fSbrendan 		newvdevs[i] = NULL;
862fa94a07fSbrendan 		for (j = 0; j < oldnvdevs; j++) {
863fa94a07fSbrendan 			vd = oldvdevs[j];
864fa94a07fSbrendan 			if (vd != NULL && guid == vd->vdev_guid) {
865fa94a07fSbrendan 				/*
866fa94a07fSbrendan 				 * Retain previous vdev for add/remove ops.
867fa94a07fSbrendan 				 */
868fa94a07fSbrendan 				newvdevs[i] = vd;
869fa94a07fSbrendan 				oldvdevs[j] = NULL;
870fa94a07fSbrendan 				break;
871fa94a07fSbrendan 			}
872fa94a07fSbrendan 		}
873fa94a07fSbrendan 
874fa94a07fSbrendan 		if (newvdevs[i] == NULL) {
875fa94a07fSbrendan 			/*
876fa94a07fSbrendan 			 * Create new vdev
877fa94a07fSbrendan 			 */
878fa94a07fSbrendan 			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
879fa94a07fSbrendan 			    VDEV_ALLOC_L2CACHE) == 0);
880fa94a07fSbrendan 			ASSERT(vd != NULL);
881fa94a07fSbrendan 			newvdevs[i] = vd;
882fa94a07fSbrendan 
883fa94a07fSbrendan 			/*
884fa94a07fSbrendan 			 * Commit this vdev as an l2cache device,
885fa94a07fSbrendan 			 * even if it fails to open.
886fa94a07fSbrendan 			 */
887fa94a07fSbrendan 			spa_l2cache_add(vd);
888fa94a07fSbrendan 
889fa94a07fSbrendan 			if (vdev_open(vd) != 0)
890fa94a07fSbrendan 				continue;
891fa94a07fSbrendan 
892fa94a07fSbrendan 			vd->vdev_top = vd;
893fa94a07fSbrendan 			(void) vdev_validate_aux(vd);
894fa94a07fSbrendan 
895fa94a07fSbrendan 			if (!vdev_is_dead(vd)) {
896fa94a07fSbrendan 				uint64_t size;
897fa94a07fSbrendan 				size = vdev_get_rsize(vd);
898fa94a07fSbrendan 				ASSERT3U(size, >, 0);
899fa94a07fSbrendan 				if (spa_mode & FWRITE) {
900fa94a07fSbrendan 					l2arc_add_vdev(spa, vd,
901fa94a07fSbrendan 					    VDEV_LABEL_START_SIZE,
902fa94a07fSbrendan 					    size - VDEV_LABEL_START_SIZE);
903fa94a07fSbrendan 				}
904fa94a07fSbrendan 				spa_l2cache_activate(vd);
905fa94a07fSbrendan 			}
906fa94a07fSbrendan 		}
907fa94a07fSbrendan 	}
908fa94a07fSbrendan 
909fa94a07fSbrendan 	/*
910fa94a07fSbrendan 	 * Purge vdevs that were dropped
911fa94a07fSbrendan 	 */
912fa94a07fSbrendan 	for (i = 0; i < oldnvdevs; i++) {
913fa94a07fSbrendan 		uint64_t pool;
914fa94a07fSbrendan 
915fa94a07fSbrendan 		vd = oldvdevs[i];
916fa94a07fSbrendan 		if (vd != NULL) {
917fa94a07fSbrendan 			if (spa_mode & FWRITE &&
918fa94a07fSbrendan 			    spa_l2cache_exists(vd->vdev_guid, &pool) &&
919fa94a07fSbrendan 			    pool != 0ULL) {
920fa94a07fSbrendan 				l2arc_remove_vdev(vd);
921fa94a07fSbrendan 			}
922fa94a07fSbrendan 			(void) vdev_close(vd);
923fa94a07fSbrendan 			spa_l2cache_remove(vd);
924fa94a07fSbrendan 		}
925fa94a07fSbrendan 	}
926fa94a07fSbrendan 
927fa94a07fSbrendan 	if (oldvdevs)
928fa94a07fSbrendan 		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
929fa94a07fSbrendan 
930fa94a07fSbrendan 	if (sav->sav_config == NULL)
931fa94a07fSbrendan 		goto out;
932fa94a07fSbrendan 
933fa94a07fSbrendan 	sav->sav_vdevs = newvdevs;
934fa94a07fSbrendan 	sav->sav_count = (int)nl2cache;
935fa94a07fSbrendan 
936fa94a07fSbrendan 	/*
937fa94a07fSbrendan 	 * Recompute the stashed list of l2cache devices, with status
938fa94a07fSbrendan 	 * information this time.
939fa94a07fSbrendan 	 */
940fa94a07fSbrendan 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
941fa94a07fSbrendan 	    DATA_TYPE_NVLIST_ARRAY) == 0);
942fa94a07fSbrendan 
943fa94a07fSbrendan 	l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
944fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
945fa94a07fSbrendan 		l2cache[i] = vdev_config_generate(spa,
946fa94a07fSbrendan 		    sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE);
947fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
948fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
949fa94a07fSbrendan out:
950fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
951fa94a07fSbrendan 		nvlist_free(l2cache[i]);
952fa94a07fSbrendan 	if (sav->sav_count)
953fa94a07fSbrendan 		kmem_free(l2cache, sav->sav_count * sizeof (void *));
95499653d4eSeschrock }
95599653d4eSeschrock 
95699653d4eSeschrock static int
95799653d4eSeschrock load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
95899653d4eSeschrock {
95999653d4eSeschrock 	dmu_buf_t *db;
96099653d4eSeschrock 	char *packed = NULL;
96199653d4eSeschrock 	size_t nvsize = 0;
96299653d4eSeschrock 	int error;
96399653d4eSeschrock 	*value = NULL;
96499653d4eSeschrock 
96599653d4eSeschrock 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
96699653d4eSeschrock 	nvsize = *(uint64_t *)db->db_data;
96799653d4eSeschrock 	dmu_buf_rele(db, FTAG);
96899653d4eSeschrock 
96999653d4eSeschrock 	packed = kmem_alloc(nvsize, KM_SLEEP);
97099653d4eSeschrock 	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed);
97199653d4eSeschrock 	if (error == 0)
97299653d4eSeschrock 		error = nvlist_unpack(packed, nvsize, value, 0);
97399653d4eSeschrock 	kmem_free(packed, nvsize);
97499653d4eSeschrock 
97599653d4eSeschrock 	return (error);
97699653d4eSeschrock }
97799653d4eSeschrock 
9783d7072f8Seschrock /*
9793d7072f8Seschrock  * Checks to see if the given vdev could not be opened, in which case we post a
9803d7072f8Seschrock  * sysevent to notify the autoreplace code that the device has been removed.
9813d7072f8Seschrock  */
9823d7072f8Seschrock static void
9833d7072f8Seschrock spa_check_removed(vdev_t *vd)
9843d7072f8Seschrock {
9853d7072f8Seschrock 	int c;
9863d7072f8Seschrock 
9873d7072f8Seschrock 	for (c = 0; c < vd->vdev_children; c++)
9883d7072f8Seschrock 		spa_check_removed(vd->vdev_child[c]);
9893d7072f8Seschrock 
9903d7072f8Seschrock 	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
9913d7072f8Seschrock 		zfs_post_autoreplace(vd->vdev_spa, vd);
9923d7072f8Seschrock 		spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
9933d7072f8Seschrock 	}
9943d7072f8Seschrock }
9953d7072f8Seschrock 
996fa9e4066Sahrens /*
997fa9e4066Sahrens  * Load an existing storage pool, using the pool's builtin spa_config as a
998ea8dc4b6Seschrock  * source of configuration information.
999fa9e4066Sahrens  */
1000fa9e4066Sahrens static int
1001ea8dc4b6Seschrock spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig)
1002fa9e4066Sahrens {
1003fa9e4066Sahrens 	int error = 0;
1004fa9e4066Sahrens 	nvlist_t *nvroot = NULL;
1005fa9e4066Sahrens 	vdev_t *rvd;
1006fa9e4066Sahrens 	uberblock_t *ub = &spa->spa_uberblock;
10070373e76bSbonwick 	uint64_t config_cache_txg = spa->spa_config_txg;
1008fa9e4066Sahrens 	uint64_t pool_guid;
100999653d4eSeschrock 	uint64_t version;
1010fa9e4066Sahrens 	zio_t *zio;
10113d7072f8Seschrock 	uint64_t autoreplace = 0;
1012fa9e4066Sahrens 
1013ea8dc4b6Seschrock 	spa->spa_load_state = state;
10140373e76bSbonwick 
1015fa9e4066Sahrens 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
1016a9926bf0Sbonwick 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
1017ea8dc4b6Seschrock 		error = EINVAL;
1018ea8dc4b6Seschrock 		goto out;
1019ea8dc4b6Seschrock 	}
1020fa9e4066Sahrens 
102199653d4eSeschrock 	/*
102299653d4eSeschrock 	 * Versioning wasn't explicitly added to the label until later, so if
102399653d4eSeschrock 	 * it's not present treat it as the initial version.
102499653d4eSeschrock 	 */
102599653d4eSeschrock 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
1026e7437265Sahrens 		version = SPA_VERSION_INITIAL;
102799653d4eSeschrock 
1028a9926bf0Sbonwick 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
1029a9926bf0Sbonwick 	    &spa->spa_config_txg);
1030a9926bf0Sbonwick 
10310373e76bSbonwick 	if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
1032ea8dc4b6Seschrock 	    spa_guid_exists(pool_guid, 0)) {
1033ea8dc4b6Seschrock 		error = EEXIST;
1034ea8dc4b6Seschrock 		goto out;
1035ea8dc4b6Seschrock 	}
1036fa9e4066Sahrens 
1037b5989ec7Seschrock 	spa->spa_load_guid = pool_guid;
1038b5989ec7Seschrock 
1039fa9e4066Sahrens 	/*
104099653d4eSeschrock 	 * Parse the configuration into a vdev tree.  We explicitly set the
104199653d4eSeschrock 	 * value that will be returned by spa_version() since parsing the
104299653d4eSeschrock 	 * configuration requires knowing the version number.
1043fa9e4066Sahrens 	 */
1044ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
104599653d4eSeschrock 	spa->spa_ubsync.ub_version = version;
104699653d4eSeschrock 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
1047ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
1048fa9e4066Sahrens 
104999653d4eSeschrock 	if (error != 0)
1050ea8dc4b6Seschrock 		goto out;
1051fa9e4066Sahrens 
10520e34b6a7Sbonwick 	ASSERT(spa->spa_root_vdev == rvd);
1053fa9e4066Sahrens 	ASSERT(spa_guid(spa) == pool_guid);
1054fa9e4066Sahrens 
1055fa9e4066Sahrens 	/*
1056fa9e4066Sahrens 	 * Try to open all vdevs, loading each label in the process.
1057fa9e4066Sahrens 	 */
10580bf246f5Smc 	error = vdev_open(rvd);
10590bf246f5Smc 	if (error != 0)
1060ea8dc4b6Seschrock 		goto out;
1061fa9e4066Sahrens 
1062560e6e96Seschrock 	/*
1063560e6e96Seschrock 	 * Validate the labels for all leaf vdevs.  We need to grab the config
1064560e6e96Seschrock 	 * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD
1065560e6e96Seschrock 	 * flag.
1066560e6e96Seschrock 	 */
1067560e6e96Seschrock 	spa_config_enter(spa, RW_READER, FTAG);
1068560e6e96Seschrock 	error = vdev_validate(rvd);
1069560e6e96Seschrock 	spa_config_exit(spa, FTAG);
1070560e6e96Seschrock 
10710bf246f5Smc 	if (error != 0)
1072560e6e96Seschrock 		goto out;
1073560e6e96Seschrock 
1074560e6e96Seschrock 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1075560e6e96Seschrock 		error = ENXIO;
1076560e6e96Seschrock 		goto out;
1077560e6e96Seschrock 	}
1078560e6e96Seschrock 
1079fa9e4066Sahrens 	/*
1080fa9e4066Sahrens 	 * Find the best uberblock.
1081fa9e4066Sahrens 	 */
1082fa9e4066Sahrens 	bzero(ub, sizeof (uberblock_t));
1083fa9e4066Sahrens 
1084fa9e4066Sahrens 	zio = zio_root(spa, NULL, NULL,
1085fa9e4066Sahrens 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
1086fa9e4066Sahrens 	vdev_uberblock_load(zio, rvd, ub);
1087fa9e4066Sahrens 	error = zio_wait(zio);
1088fa9e4066Sahrens 
1089fa9e4066Sahrens 	/*
1090fa9e4066Sahrens 	 * If we weren't able to find a single valid uberblock, return failure.
1091fa9e4066Sahrens 	 */
1092fa9e4066Sahrens 	if (ub->ub_txg == 0) {
1093eaca9bbdSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1094eaca9bbdSeschrock 		    VDEV_AUX_CORRUPT_DATA);
1095ea8dc4b6Seschrock 		error = ENXIO;
1096ea8dc4b6Seschrock 		goto out;
1097ea8dc4b6Seschrock 	}
1098ea8dc4b6Seschrock 
1099ea8dc4b6Seschrock 	/*
1100ea8dc4b6Seschrock 	 * If the pool is newer than the code, we can't open it.
1101ea8dc4b6Seschrock 	 */
1102e7437265Sahrens 	if (ub->ub_version > SPA_VERSION) {
1103eaca9bbdSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1104eaca9bbdSeschrock 		    VDEV_AUX_VERSION_NEWER);
1105ea8dc4b6Seschrock 		error = ENOTSUP;
1106ea8dc4b6Seschrock 		goto out;
1107fa9e4066Sahrens 	}
1108fa9e4066Sahrens 
1109fa9e4066Sahrens 	/*
1110fa9e4066Sahrens 	 * If the vdev guid sum doesn't match the uberblock, we have an
1111fa9e4066Sahrens 	 * incomplete configuration.
1112fa9e4066Sahrens 	 */
1113ecc2d604Sbonwick 	if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
1114ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1115ea8dc4b6Seschrock 		    VDEV_AUX_BAD_GUID_SUM);
1116ea8dc4b6Seschrock 		error = ENXIO;
1117ea8dc4b6Seschrock 		goto out;
1118fa9e4066Sahrens 	}
1119fa9e4066Sahrens 
1120fa9e4066Sahrens 	/*
1121fa9e4066Sahrens 	 * Initialize internal SPA structures.
1122fa9e4066Sahrens 	 */
1123fa9e4066Sahrens 	spa->spa_state = POOL_STATE_ACTIVE;
1124fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
1125fa9e4066Sahrens 	spa->spa_first_txg = spa_last_synced_txg(spa) + 1;
1126ea8dc4b6Seschrock 	error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
1127ea8dc4b6Seschrock 	if (error) {
1128ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1129ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1130ea8dc4b6Seschrock 		goto out;
1131ea8dc4b6Seschrock 	}
1132fa9e4066Sahrens 	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
1133fa9e4066Sahrens 
1134ea8dc4b6Seschrock 	if (zap_lookup(spa->spa_meta_objset,
1135fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1136ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
1137ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1138ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1139ea8dc4b6Seschrock 		error = EIO;
1140ea8dc4b6Seschrock 		goto out;
1141ea8dc4b6Seschrock 	}
1142fa9e4066Sahrens 
1143fa9e4066Sahrens 	if (!mosconfig) {
114499653d4eSeschrock 		nvlist_t *newconfig;
114595173954Sek 		uint64_t hostid;
1146fa9e4066Sahrens 
114799653d4eSeschrock 		if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) {
1148ea8dc4b6Seschrock 			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1149ea8dc4b6Seschrock 			    VDEV_AUX_CORRUPT_DATA);
1150ea8dc4b6Seschrock 			error = EIO;
1151ea8dc4b6Seschrock 			goto out;
1152ea8dc4b6Seschrock 		}
1153fa9e4066Sahrens 
115495173954Sek 		if (nvlist_lookup_uint64(newconfig, ZPOOL_CONFIG_HOSTID,
115595173954Sek 		    &hostid) == 0) {
115695173954Sek 			char *hostname;
115795173954Sek 			unsigned long myhostid = 0;
115895173954Sek 
115995173954Sek 			VERIFY(nvlist_lookup_string(newconfig,
116095173954Sek 			    ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
116195173954Sek 
116295173954Sek 			(void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
116317194a52Slling 			if (hostid != 0 && myhostid != 0 &&
116417194a52Slling 			    (unsigned long)hostid != myhostid) {
116595173954Sek 				cmn_err(CE_WARN, "pool '%s' could not be "
116695173954Sek 				    "loaded as it was last accessed by "
116795173954Sek 				    "another system (host: %s hostid: 0x%lx).  "
116895173954Sek 				    "See: http://www.sun.com/msg/ZFS-8000-EY",
116995173954Sek 				    spa->spa_name, hostname,
117095173954Sek 				    (unsigned long)hostid);
117195173954Sek 				error = EBADF;
117295173954Sek 				goto out;
117395173954Sek 			}
117495173954Sek 		}
117595173954Sek 
1176fa9e4066Sahrens 		spa_config_set(spa, newconfig);
1177fa9e4066Sahrens 		spa_unload(spa);
1178fa9e4066Sahrens 		spa_deactivate(spa);
1179fa9e4066Sahrens 		spa_activate(spa);
1180fa9e4066Sahrens 
1181ea8dc4b6Seschrock 		return (spa_load(spa, newconfig, state, B_TRUE));
1182fa9e4066Sahrens 	}
1183fa9e4066Sahrens 
1184ea8dc4b6Seschrock 	if (zap_lookup(spa->spa_meta_objset,
1185fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1186ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) {
1187ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1188ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1189ea8dc4b6Seschrock 		error = EIO;
1190ea8dc4b6Seschrock 		goto out;
1191ea8dc4b6Seschrock 	}
1192fa9e4066Sahrens 
119399653d4eSeschrock 	/*
119499653d4eSeschrock 	 * Load the bit that tells us to use the new accounting function
119599653d4eSeschrock 	 * (raid-z deflation).  If we have an older pool, this will not
119699653d4eSeschrock 	 * be present.
119799653d4eSeschrock 	 */
119899653d4eSeschrock 	error = zap_lookup(spa->spa_meta_objset,
119999653d4eSeschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
120099653d4eSeschrock 	    sizeof (uint64_t), 1, &spa->spa_deflate);
120199653d4eSeschrock 	if (error != 0 && error != ENOENT) {
120299653d4eSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
120399653d4eSeschrock 		    VDEV_AUX_CORRUPT_DATA);
120499653d4eSeschrock 		error = EIO;
120599653d4eSeschrock 		goto out;
120699653d4eSeschrock 	}
120799653d4eSeschrock 
1208fa9e4066Sahrens 	/*
1209ea8dc4b6Seschrock 	 * Load the persistent error log.  If we have an older pool, this will
1210ea8dc4b6Seschrock 	 * not be present.
1211fa9e4066Sahrens 	 */
1212ea8dc4b6Seschrock 	error = zap_lookup(spa->spa_meta_objset,
1213ea8dc4b6Seschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
1214ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_errlog_last);
1215d80c45e0Sbonwick 	if (error != 0 && error != ENOENT) {
1216ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1217ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1218ea8dc4b6Seschrock 		error = EIO;
1219ea8dc4b6Seschrock 		goto out;
1220ea8dc4b6Seschrock 	}
1221ea8dc4b6Seschrock 
1222ea8dc4b6Seschrock 	error = zap_lookup(spa->spa_meta_objset,
1223ea8dc4b6Seschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
1224ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
1225ea8dc4b6Seschrock 	if (error != 0 && error != ENOENT) {
1226ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1227ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1228ea8dc4b6Seschrock 		error = EIO;
1229ea8dc4b6Seschrock 		goto out;
1230ea8dc4b6Seschrock 	}
1231ea8dc4b6Seschrock 
123206eeb2adSek 	/*
123306eeb2adSek 	 * Load the history object.  If we have an older pool, this
123406eeb2adSek 	 * will not be present.
123506eeb2adSek 	 */
123606eeb2adSek 	error = zap_lookup(spa->spa_meta_objset,
123706eeb2adSek 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
123806eeb2adSek 	    sizeof (uint64_t), 1, &spa->spa_history);
123906eeb2adSek 	if (error != 0 && error != ENOENT) {
124006eeb2adSek 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
124106eeb2adSek 		    VDEV_AUX_CORRUPT_DATA);
124206eeb2adSek 		error = EIO;
124306eeb2adSek 		goto out;
124406eeb2adSek 	}
124506eeb2adSek 
124699653d4eSeschrock 	/*
124799653d4eSeschrock 	 * Load any hot spares for this pool.
124899653d4eSeschrock 	 */
124999653d4eSeschrock 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1250fa94a07fSbrendan 	    DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object);
125199653d4eSeschrock 	if (error != 0 && error != ENOENT) {
125299653d4eSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
125399653d4eSeschrock 		    VDEV_AUX_CORRUPT_DATA);
125499653d4eSeschrock 		error = EIO;
125599653d4eSeschrock 		goto out;
125699653d4eSeschrock 	}
125799653d4eSeschrock 	if (error == 0) {
1258e7437265Sahrens 		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
1259fa94a07fSbrendan 		if (load_nvlist(spa, spa->spa_spares.sav_object,
1260fa94a07fSbrendan 		    &spa->spa_spares.sav_config) != 0) {
126199653d4eSeschrock 			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
126299653d4eSeschrock 			    VDEV_AUX_CORRUPT_DATA);
126399653d4eSeschrock 			error = EIO;
126499653d4eSeschrock 			goto out;
126599653d4eSeschrock 		}
126699653d4eSeschrock 
126799653d4eSeschrock 		spa_config_enter(spa, RW_WRITER, FTAG);
126899653d4eSeschrock 		spa_load_spares(spa);
126999653d4eSeschrock 		spa_config_exit(spa, FTAG);
127099653d4eSeschrock 	}
127199653d4eSeschrock 
1272fa94a07fSbrendan 	/*
1273fa94a07fSbrendan 	 * Load any level 2 ARC devices for this pool.
1274fa94a07fSbrendan 	 */
1275fa94a07fSbrendan 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1276fa94a07fSbrendan 	    DMU_POOL_L2CACHE, sizeof (uint64_t), 1,
1277fa94a07fSbrendan 	    &spa->spa_l2cache.sav_object);
1278fa94a07fSbrendan 	if (error != 0 && error != ENOENT) {
1279fa94a07fSbrendan 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1280fa94a07fSbrendan 		    VDEV_AUX_CORRUPT_DATA);
1281fa94a07fSbrendan 		error = EIO;
1282fa94a07fSbrendan 		goto out;
1283fa94a07fSbrendan 	}
1284fa94a07fSbrendan 	if (error == 0) {
1285fa94a07fSbrendan 		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
1286fa94a07fSbrendan 		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
1287fa94a07fSbrendan 		    &spa->spa_l2cache.sav_config) != 0) {
1288fa94a07fSbrendan 			vdev_set_state(rvd, B_TRUE,
1289fa94a07fSbrendan 			    VDEV_STATE_CANT_OPEN,
1290fa94a07fSbrendan 			    VDEV_AUX_CORRUPT_DATA);
1291fa94a07fSbrendan 			error = EIO;
1292fa94a07fSbrendan 			goto out;
1293fa94a07fSbrendan 		}
1294fa94a07fSbrendan 
1295fa94a07fSbrendan 		spa_config_enter(spa, RW_WRITER, FTAG);
1296fa94a07fSbrendan 		spa_load_l2cache(spa);
1297fa94a07fSbrendan 		spa_config_exit(spa, FTAG);
1298fa94a07fSbrendan 	}
1299fa94a07fSbrendan 
1300990b4856Slling 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
1301ecd6cf80Smarks 
1302b1b8ab34Slling 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1303b1b8ab34Slling 	    DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object);
1304b1b8ab34Slling 
1305b1b8ab34Slling 	if (error && error != ENOENT) {
1306b1b8ab34Slling 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1307b1b8ab34Slling 		    VDEV_AUX_CORRUPT_DATA);
1308b1b8ab34Slling 		error = EIO;
1309b1b8ab34Slling 		goto out;
1310b1b8ab34Slling 	}
1311b1b8ab34Slling 
1312b1b8ab34Slling 	if (error == 0) {
1313b1b8ab34Slling 		(void) zap_lookup(spa->spa_meta_objset,
1314b1b8ab34Slling 		    spa->spa_pool_props_object,
13153d7072f8Seschrock 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS),
1316b1b8ab34Slling 		    sizeof (uint64_t), 1, &spa->spa_bootfs);
13173d7072f8Seschrock 		(void) zap_lookup(spa->spa_meta_objset,
13183d7072f8Seschrock 		    spa->spa_pool_props_object,
13193d7072f8Seschrock 		    zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE),
13203d7072f8Seschrock 		    sizeof (uint64_t), 1, &autoreplace);
1321ecd6cf80Smarks 		(void) zap_lookup(spa->spa_meta_objset,
1322ecd6cf80Smarks 		    spa->spa_pool_props_object,
1323ecd6cf80Smarks 		    zpool_prop_to_name(ZPOOL_PROP_DELEGATION),
1324ecd6cf80Smarks 		    sizeof (uint64_t), 1, &spa->spa_delegation);
13250a4e9518Sgw 		(void) zap_lookup(spa->spa_meta_objset,
13260a4e9518Sgw 		    spa->spa_pool_props_object,
13270a4e9518Sgw 		    zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
13280a4e9518Sgw 		    sizeof (uint64_t), 1, &spa->spa_failmode);
1329b1b8ab34Slling 	}
1330b1b8ab34Slling 
13313d7072f8Seschrock 	/*
13323d7072f8Seschrock 	 * If the 'autoreplace' property is set, then post a resource notifying
13333d7072f8Seschrock 	 * the ZFS DE that it should not issue any faults for unopenable
13343d7072f8Seschrock 	 * devices.  We also iterate over the vdevs, and post a sysevent for any
13353d7072f8Seschrock 	 * unopenable vdevs so that the normal autoreplace handler can take
13363d7072f8Seschrock 	 * over.
13373d7072f8Seschrock 	 */
1338b01c3b58Seschrock 	if (autoreplace && state != SPA_LOAD_TRYIMPORT)
13393d7072f8Seschrock 		spa_check_removed(spa->spa_root_vdev);
13403d7072f8Seschrock 
1341ea8dc4b6Seschrock 	/*
1342560e6e96Seschrock 	 * Load the vdev state for all toplevel vdevs.
1343ea8dc4b6Seschrock 	 */
1344560e6e96Seschrock 	vdev_load(rvd);
13450373e76bSbonwick 
1346fa9e4066Sahrens 	/*
1347fa9e4066Sahrens 	 * Propagate the leaf DTLs we just loaded all the way up the tree.
1348fa9e4066Sahrens 	 */
1349ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
1350fa9e4066Sahrens 	vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
1351ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
1352fa9e4066Sahrens 
1353fa9e4066Sahrens 	/*
1354fa9e4066Sahrens 	 * Check the state of the root vdev.  If it can't be opened, it
1355fa9e4066Sahrens 	 * indicates one or more toplevel vdevs are faulted.
1356fa9e4066Sahrens 	 */
1357ea8dc4b6Seschrock 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1358ea8dc4b6Seschrock 		error = ENXIO;
1359ea8dc4b6Seschrock 		goto out;
1360ea8dc4b6Seschrock 	}
1361fa9e4066Sahrens 
1362ea8dc4b6Seschrock 	if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) {
13635dabedeeSbonwick 		dmu_tx_t *tx;
13640373e76bSbonwick 		int need_update = B_FALSE;
13650373e76bSbonwick 		int c;
13665dabedeeSbonwick 
13670373e76bSbonwick 		/*
13680373e76bSbonwick 		 * Claim log blocks that haven't been committed yet.
13690373e76bSbonwick 		 * This must all happen in a single txg.
13700373e76bSbonwick 		 */
13715dabedeeSbonwick 		tx = dmu_tx_create_assigned(spa_get_dsl(spa),
1372fa9e4066Sahrens 		    spa_first_txg(spa));
13730b69c2f0Sahrens 		(void) dmu_objset_find(spa->spa_name,
13740b69c2f0Sahrens 		    zil_claim, tx, DS_FIND_CHILDREN);
1375fa9e4066Sahrens 		dmu_tx_commit(tx);
1376fa9e4066Sahrens 
1377fa9e4066Sahrens 		spa->spa_sync_on = B_TRUE;
1378fa9e4066Sahrens 		txg_sync_start(spa->spa_dsl_pool);
1379fa9e4066Sahrens 
1380fa9e4066Sahrens 		/*
1381fa9e4066Sahrens 		 * Wait for all claims to sync.
1382fa9e4066Sahrens 		 */
1383fa9e4066Sahrens 		txg_wait_synced(spa->spa_dsl_pool, 0);
13840e34b6a7Sbonwick 
13850e34b6a7Sbonwick 		/*
13860373e76bSbonwick 		 * If the config cache is stale, or we have uninitialized
13870373e76bSbonwick 		 * metaslabs (see spa_vdev_add()), then update the config.
13880e34b6a7Sbonwick 		 */
13890373e76bSbonwick 		if (config_cache_txg != spa->spa_config_txg ||
13900373e76bSbonwick 		    state == SPA_LOAD_IMPORT)
13910373e76bSbonwick 			need_update = B_TRUE;
13920373e76bSbonwick 
13930373e76bSbonwick 		for (c = 0; c < rvd->vdev_children; c++)
13940373e76bSbonwick 			if (rvd->vdev_child[c]->vdev_ms_array == 0)
13950373e76bSbonwick 				need_update = B_TRUE;
13960e34b6a7Sbonwick 
13970e34b6a7Sbonwick 		/*
13980373e76bSbonwick 		 * Update the config cache asychronously in case we're the
13990373e76bSbonwick 		 * root pool, in which case the config cache isn't writable yet.
14000e34b6a7Sbonwick 		 */
14010373e76bSbonwick 		if (need_update)
14020373e76bSbonwick 			spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
1403fa9e4066Sahrens 	}
1404fa9e4066Sahrens 
1405ea8dc4b6Seschrock 	error = 0;
1406ea8dc4b6Seschrock out:
140799653d4eSeschrock 	if (error && error != EBADF)
1408ea8dc4b6Seschrock 		zfs_ereport_post(FM_EREPORT_ZFS_POOL, spa, NULL, NULL, 0, 0);
1409ea8dc4b6Seschrock 	spa->spa_load_state = SPA_LOAD_NONE;
1410ea8dc4b6Seschrock 	spa->spa_ena = 0;
1411ea8dc4b6Seschrock 
1412ea8dc4b6Seschrock 	return (error);
1413fa9e4066Sahrens }
1414fa9e4066Sahrens 
1415fa9e4066Sahrens /*
1416fa9e4066Sahrens  * Pool Open/Import
1417fa9e4066Sahrens  *
1418fa9e4066Sahrens  * The import case is identical to an open except that the configuration is sent
1419fa9e4066Sahrens  * down from userland, instead of grabbed from the configuration cache.  For the
1420fa9e4066Sahrens  * case of an open, the pool configuration will exist in the
14213d7072f8Seschrock  * POOL_STATE_UNINITIALIZED state.
1422fa9e4066Sahrens  *
1423fa9e4066Sahrens  * The stats information (gen/count/ustats) is used to gather vdev statistics at
1424fa9e4066Sahrens  * the same time open the pool, without having to keep around the spa_t in some
1425fa9e4066Sahrens  * ambiguous state.
1426fa9e4066Sahrens  */
1427fa9e4066Sahrens static int
1428fa9e4066Sahrens spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config)
1429fa9e4066Sahrens {
1430fa9e4066Sahrens 	spa_t *spa;
1431fa9e4066Sahrens 	int error;
1432fa9e4066Sahrens 	int loaded = B_FALSE;
1433fa9e4066Sahrens 	int locked = B_FALSE;
1434fa9e4066Sahrens 
1435fa9e4066Sahrens 	*spapp = NULL;
1436fa9e4066Sahrens 
1437fa9e4066Sahrens 	/*
1438fa9e4066Sahrens 	 * As disgusting as this is, we need to support recursive calls to this
1439fa9e4066Sahrens 	 * function because dsl_dir_open() is called during spa_load(), and ends
1440fa9e4066Sahrens 	 * up calling spa_open() again.  The real fix is to figure out how to
1441fa9e4066Sahrens 	 * avoid dsl_dir_open() calling this in the first place.
1442fa9e4066Sahrens 	 */
1443fa9e4066Sahrens 	if (mutex_owner(&spa_namespace_lock) != curthread) {
1444fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
1445fa9e4066Sahrens 		locked = B_TRUE;
1446fa9e4066Sahrens 	}
1447fa9e4066Sahrens 
1448fa9e4066Sahrens 	if ((spa = spa_lookup(pool)) == NULL) {
1449fa9e4066Sahrens 		if (locked)
1450fa9e4066Sahrens 			mutex_exit(&spa_namespace_lock);
1451fa9e4066Sahrens 		return (ENOENT);
1452fa9e4066Sahrens 	}
1453fa9e4066Sahrens 	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
1454fa9e4066Sahrens 
1455fa9e4066Sahrens 		spa_activate(spa);
1456fa9e4066Sahrens 
14570373e76bSbonwick 		error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
1458fa9e4066Sahrens 
1459fa9e4066Sahrens 		if (error == EBADF) {
1460fa9e4066Sahrens 			/*
1461560e6e96Seschrock 			 * If vdev_validate() returns failure (indicated by
1462560e6e96Seschrock 			 * EBADF), it indicates that one of the vdevs indicates
1463560e6e96Seschrock 			 * that the pool has been exported or destroyed.  If
1464560e6e96Seschrock 			 * this is the case, the config cache is out of sync and
1465560e6e96Seschrock 			 * we should remove the pool from the namespace.
1466fa9e4066Sahrens 			 */
146799653d4eSeschrock 			zfs_post_ok(spa, NULL);
1468fa9e4066Sahrens 			spa_unload(spa);
1469fa9e4066Sahrens 			spa_deactivate(spa);
1470fa9e4066Sahrens 			spa_remove(spa);
1471fa9e4066Sahrens 			spa_config_sync();
1472fa9e4066Sahrens 			if (locked)
1473fa9e4066Sahrens 				mutex_exit(&spa_namespace_lock);
1474fa9e4066Sahrens 			return (ENOENT);
1475ea8dc4b6Seschrock 		}
1476ea8dc4b6Seschrock 
1477ea8dc4b6Seschrock 		if (error) {
1478fa9e4066Sahrens 			/*
1479fa9e4066Sahrens 			 * We can't open the pool, but we still have useful
1480fa9e4066Sahrens 			 * information: the state of each vdev after the
1481fa9e4066Sahrens 			 * attempted vdev_open().  Return this to the user.
1482fa9e4066Sahrens 			 */
14830373e76bSbonwick 			if (config != NULL && spa->spa_root_vdev != NULL) {
14840373e76bSbonwick 				spa_config_enter(spa, RW_READER, FTAG);
1485fa9e4066Sahrens 				*config = spa_config_generate(spa, NULL, -1ULL,
1486fa9e4066Sahrens 				    B_TRUE);
14870373e76bSbonwick 				spa_config_exit(spa, FTAG);
14880373e76bSbonwick 			}
1489fa9e4066Sahrens 			spa_unload(spa);
1490fa9e4066Sahrens 			spa_deactivate(spa);
1491ea8dc4b6Seschrock 			spa->spa_last_open_failed = B_TRUE;
1492fa9e4066Sahrens 			if (locked)
1493fa9e4066Sahrens 				mutex_exit(&spa_namespace_lock);
1494fa9e4066Sahrens 			*spapp = NULL;
1495fa9e4066Sahrens 			return (error);
1496ea8dc4b6Seschrock 		} else {
1497ea8dc4b6Seschrock 			zfs_post_ok(spa, NULL);
1498ea8dc4b6Seschrock 			spa->spa_last_open_failed = B_FALSE;
1499fa9e4066Sahrens 		}
1500fa9e4066Sahrens 
1501fa9e4066Sahrens 		loaded = B_TRUE;
1502fa9e4066Sahrens 	}
1503fa9e4066Sahrens 
1504fa9e4066Sahrens 	spa_open_ref(spa, tag);
15053d7072f8Seschrock 
15063d7072f8Seschrock 	/*
15073d7072f8Seschrock 	 * If we just loaded the pool, resilver anything that's out of date.
15083d7072f8Seschrock 	 */
15093d7072f8Seschrock 	if (loaded && (spa_mode & FWRITE))
15103d7072f8Seschrock 		VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
15113d7072f8Seschrock 
1512fa9e4066Sahrens 	if (locked)
1513fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1514fa9e4066Sahrens 
1515fa9e4066Sahrens 	*spapp = spa;
1516fa9e4066Sahrens 
1517fa9e4066Sahrens 	if (config != NULL) {
1518ea8dc4b6Seschrock 		spa_config_enter(spa, RW_READER, FTAG);
1519fa9e4066Sahrens 		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1520ea8dc4b6Seschrock 		spa_config_exit(spa, FTAG);
1521fa9e4066Sahrens 	}
1522fa9e4066Sahrens 
1523fa9e4066Sahrens 	return (0);
1524fa9e4066Sahrens }
1525fa9e4066Sahrens 
1526fa9e4066Sahrens int
1527fa9e4066Sahrens spa_open(const char *name, spa_t **spapp, void *tag)
1528fa9e4066Sahrens {
1529fa9e4066Sahrens 	return (spa_open_common(name, spapp, tag, NULL));
1530fa9e4066Sahrens }
1531fa9e4066Sahrens 
1532ea8dc4b6Seschrock /*
1533ea8dc4b6Seschrock  * Lookup the given spa_t, incrementing the inject count in the process,
1534ea8dc4b6Seschrock  * preventing it from being exported or destroyed.
1535ea8dc4b6Seschrock  */
1536ea8dc4b6Seschrock spa_t *
1537ea8dc4b6Seschrock spa_inject_addref(char *name)
1538ea8dc4b6Seschrock {
1539ea8dc4b6Seschrock 	spa_t *spa;
1540ea8dc4b6Seschrock 
1541ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
1542ea8dc4b6Seschrock 	if ((spa = spa_lookup(name)) == NULL) {
1543ea8dc4b6Seschrock 		mutex_exit(&spa_namespace_lock);
1544ea8dc4b6Seschrock 		return (NULL);
1545ea8dc4b6Seschrock 	}
1546ea8dc4b6Seschrock 	spa->spa_inject_ref++;
1547ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
1548ea8dc4b6Seschrock 
1549ea8dc4b6Seschrock 	return (spa);
1550ea8dc4b6Seschrock }
1551ea8dc4b6Seschrock 
1552ea8dc4b6Seschrock void
1553ea8dc4b6Seschrock spa_inject_delref(spa_t *spa)
1554ea8dc4b6Seschrock {
1555ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
1556ea8dc4b6Seschrock 	spa->spa_inject_ref--;
1557ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
1558ea8dc4b6Seschrock }
1559ea8dc4b6Seschrock 
1560fa94a07fSbrendan /*
1561fa94a07fSbrendan  * Add spares device information to the nvlist.
1562fa94a07fSbrendan  */
156399653d4eSeschrock static void
156499653d4eSeschrock spa_add_spares(spa_t *spa, nvlist_t *config)
156599653d4eSeschrock {
156699653d4eSeschrock 	nvlist_t **spares;
156799653d4eSeschrock 	uint_t i, nspares;
156899653d4eSeschrock 	nvlist_t *nvroot;
156999653d4eSeschrock 	uint64_t guid;
157099653d4eSeschrock 	vdev_stat_t *vs;
157199653d4eSeschrock 	uint_t vsc;
157239c23413Seschrock 	uint64_t pool;
157399653d4eSeschrock 
1574fa94a07fSbrendan 	if (spa->spa_spares.sav_count == 0)
157599653d4eSeschrock 		return;
157699653d4eSeschrock 
157799653d4eSeschrock 	VERIFY(nvlist_lookup_nvlist(config,
157899653d4eSeschrock 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1579fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
158099653d4eSeschrock 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
158199653d4eSeschrock 	if (nspares != 0) {
158299653d4eSeschrock 		VERIFY(nvlist_add_nvlist_array(nvroot,
158399653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
158499653d4eSeschrock 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
158599653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
158699653d4eSeschrock 
158799653d4eSeschrock 		/*
158899653d4eSeschrock 		 * Go through and find any spares which have since been
158999653d4eSeschrock 		 * repurposed as an active spare.  If this is the case, update
159099653d4eSeschrock 		 * their status appropriately.
159199653d4eSeschrock 		 */
159299653d4eSeschrock 		for (i = 0; i < nspares; i++) {
159399653d4eSeschrock 			VERIFY(nvlist_lookup_uint64(spares[i],
159499653d4eSeschrock 			    ZPOOL_CONFIG_GUID, &guid) == 0);
159539c23413Seschrock 			if (spa_spare_exists(guid, &pool) && pool != 0ULL) {
159699653d4eSeschrock 				VERIFY(nvlist_lookup_uint64_array(
159799653d4eSeschrock 				    spares[i], ZPOOL_CONFIG_STATS,
159899653d4eSeschrock 				    (uint64_t **)&vs, &vsc) == 0);
159999653d4eSeschrock 				vs->vs_state = VDEV_STATE_CANT_OPEN;
160099653d4eSeschrock 				vs->vs_aux = VDEV_AUX_SPARED;
160199653d4eSeschrock 			}
160299653d4eSeschrock 		}
160399653d4eSeschrock 	}
160499653d4eSeschrock }
160599653d4eSeschrock 
1606fa94a07fSbrendan /*
1607fa94a07fSbrendan  * Add l2cache device information to the nvlist, including vdev stats.
1608fa94a07fSbrendan  */
1609fa94a07fSbrendan static void
1610fa94a07fSbrendan spa_add_l2cache(spa_t *spa, nvlist_t *config)
1611fa94a07fSbrendan {
1612fa94a07fSbrendan 	nvlist_t **l2cache;
1613fa94a07fSbrendan 	uint_t i, j, nl2cache;
1614fa94a07fSbrendan 	nvlist_t *nvroot;
1615fa94a07fSbrendan 	uint64_t guid;
1616fa94a07fSbrendan 	vdev_t *vd;
1617fa94a07fSbrendan 	vdev_stat_t *vs;
1618fa94a07fSbrendan 	uint_t vsc;
1619fa94a07fSbrendan 
1620fa94a07fSbrendan 	if (spa->spa_l2cache.sav_count == 0)
1621fa94a07fSbrendan 		return;
1622fa94a07fSbrendan 
1623fa94a07fSbrendan 	spa_config_enter(spa, RW_READER, FTAG);
1624fa94a07fSbrendan 
1625fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist(config,
1626fa94a07fSbrendan 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1627fa94a07fSbrendan 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
1628fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1629fa94a07fSbrendan 	if (nl2cache != 0) {
1630fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot,
1631fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1632fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
1633fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1634fa94a07fSbrendan 
1635fa94a07fSbrendan 		/*
1636fa94a07fSbrendan 		 * Update level 2 cache device stats.
1637fa94a07fSbrendan 		 */
1638fa94a07fSbrendan 
1639fa94a07fSbrendan 		for (i = 0; i < nl2cache; i++) {
1640fa94a07fSbrendan 			VERIFY(nvlist_lookup_uint64(l2cache[i],
1641fa94a07fSbrendan 			    ZPOOL_CONFIG_GUID, &guid) == 0);
1642fa94a07fSbrendan 
1643fa94a07fSbrendan 			vd = NULL;
1644fa94a07fSbrendan 			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
1645fa94a07fSbrendan 				if (guid ==
1646fa94a07fSbrendan 				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
1647fa94a07fSbrendan 					vd = spa->spa_l2cache.sav_vdevs[j];
1648fa94a07fSbrendan 					break;
1649fa94a07fSbrendan 				}
1650fa94a07fSbrendan 			}
1651fa94a07fSbrendan 			ASSERT(vd != NULL);
1652fa94a07fSbrendan 
1653fa94a07fSbrendan 			VERIFY(nvlist_lookup_uint64_array(l2cache[i],
1654fa94a07fSbrendan 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
1655fa94a07fSbrendan 			vdev_get_stats(vd, vs);
1656fa94a07fSbrendan 		}
1657fa94a07fSbrendan 	}
1658fa94a07fSbrendan 
1659fa94a07fSbrendan 	spa_config_exit(spa, FTAG);
1660fa94a07fSbrendan }
1661fa94a07fSbrendan 
1662fa9e4066Sahrens int
1663ea8dc4b6Seschrock spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
1664fa9e4066Sahrens {
1665fa9e4066Sahrens 	int error;
1666fa9e4066Sahrens 	spa_t *spa;
1667fa9e4066Sahrens 
1668fa9e4066Sahrens 	*config = NULL;
1669fa9e4066Sahrens 	error = spa_open_common(name, &spa, FTAG, config);
1670fa9e4066Sahrens 
167199653d4eSeschrock 	if (spa && *config != NULL) {
1672ea8dc4b6Seschrock 		VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT,
1673ea8dc4b6Seschrock 		    spa_get_errlog_size(spa)) == 0);
1674ea8dc4b6Seschrock 
167599653d4eSeschrock 		spa_add_spares(spa, *config);
1676fa94a07fSbrendan 		spa_add_l2cache(spa, *config);
167799653d4eSeschrock 	}
167899653d4eSeschrock 
1679ea8dc4b6Seschrock 	/*
1680ea8dc4b6Seschrock 	 * We want to get the alternate root even for faulted pools, so we cheat
1681ea8dc4b6Seschrock 	 * and call spa_lookup() directly.
1682ea8dc4b6Seschrock 	 */
1683ea8dc4b6Seschrock 	if (altroot) {
1684ea8dc4b6Seschrock 		if (spa == NULL) {
1685ea8dc4b6Seschrock 			mutex_enter(&spa_namespace_lock);
1686ea8dc4b6Seschrock 			spa = spa_lookup(name);
1687ea8dc4b6Seschrock 			if (spa)
1688ea8dc4b6Seschrock 				spa_altroot(spa, altroot, buflen);
1689ea8dc4b6Seschrock 			else
1690ea8dc4b6Seschrock 				altroot[0] = '\0';
1691ea8dc4b6Seschrock 			spa = NULL;
1692ea8dc4b6Seschrock 			mutex_exit(&spa_namespace_lock);
1693ea8dc4b6Seschrock 		} else {
1694ea8dc4b6Seschrock 			spa_altroot(spa, altroot, buflen);
1695ea8dc4b6Seschrock 		}
1696ea8dc4b6Seschrock 	}
1697ea8dc4b6Seschrock 
1698fa9e4066Sahrens 	if (spa != NULL)
1699fa9e4066Sahrens 		spa_close(spa, FTAG);
1700fa9e4066Sahrens 
1701fa9e4066Sahrens 	return (error);
1702fa9e4066Sahrens }
1703fa9e4066Sahrens 
170499653d4eSeschrock /*
1705fa94a07fSbrendan  * Validate that the auxiliary device array is well formed.  We must have an
1706fa94a07fSbrendan  * array of nvlists, each which describes a valid leaf vdev.  If this is an
1707fa94a07fSbrendan  * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
1708fa94a07fSbrendan  * specified, as long as they are well-formed.
170999653d4eSeschrock  */
171099653d4eSeschrock static int
1711fa94a07fSbrendan spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
1712fa94a07fSbrendan     spa_aux_vdev_t *sav, const char *config, uint64_t version,
1713fa94a07fSbrendan     vdev_labeltype_t label)
171499653d4eSeschrock {
1715fa94a07fSbrendan 	nvlist_t **dev;
1716fa94a07fSbrendan 	uint_t i, ndev;
171799653d4eSeschrock 	vdev_t *vd;
171899653d4eSeschrock 	int error;
171999653d4eSeschrock 
172099653d4eSeschrock 	/*
1721fa94a07fSbrendan 	 * It's acceptable to have no devs specified.
172299653d4eSeschrock 	 */
1723fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
172499653d4eSeschrock 		return (0);
172599653d4eSeschrock 
1726fa94a07fSbrendan 	if (ndev == 0)
172799653d4eSeschrock 		return (EINVAL);
172899653d4eSeschrock 
172999653d4eSeschrock 	/*
1730fa94a07fSbrendan 	 * Make sure the pool is formatted with a version that supports this
1731fa94a07fSbrendan 	 * device type.
173299653d4eSeschrock 	 */
1733fa94a07fSbrendan 	if (spa_version(spa) < version)
173499653d4eSeschrock 		return (ENOTSUP);
173599653d4eSeschrock 
173639c23413Seschrock 	/*
1737fa94a07fSbrendan 	 * Set the pending device list so we correctly handle device in-use
173839c23413Seschrock 	 * checking.
173939c23413Seschrock 	 */
1740fa94a07fSbrendan 	sav->sav_pending = dev;
1741fa94a07fSbrendan 	sav->sav_npending = ndev;
174239c23413Seschrock 
1743fa94a07fSbrendan 	for (i = 0; i < ndev; i++) {
1744fa94a07fSbrendan 		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
174599653d4eSeschrock 		    mode)) != 0)
174639c23413Seschrock 			goto out;
174799653d4eSeschrock 
174899653d4eSeschrock 		if (!vd->vdev_ops->vdev_op_leaf) {
174999653d4eSeschrock 			vdev_free(vd);
175039c23413Seschrock 			error = EINVAL;
175139c23413Seschrock 			goto out;
175299653d4eSeschrock 		}
175399653d4eSeschrock 
1754fa94a07fSbrendan 		/*
1755fa94a07fSbrendan 		 * The L2ARC currently only supports disk devices.
1756fa94a07fSbrendan 		 */
1757fa94a07fSbrendan 		if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
1758fa94a07fSbrendan 		    strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
1759fa94a07fSbrendan 			error = ENOTBLK;
1760fa94a07fSbrendan 			goto out;
1761fa94a07fSbrendan 		}
1762fa94a07fSbrendan 
176399653d4eSeschrock 		vd->vdev_top = vd;
176499653d4eSeschrock 
176539c23413Seschrock 		if ((error = vdev_open(vd)) == 0 &&
1766fa94a07fSbrendan 		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
1767fa94a07fSbrendan 			VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
176839c23413Seschrock 			    vd->vdev_guid) == 0);
176939c23413Seschrock 		}
177099653d4eSeschrock 
177199653d4eSeschrock 		vdev_free(vd);
177239c23413Seschrock 
1773fa94a07fSbrendan 		if (error &&
1774fa94a07fSbrendan 		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
177539c23413Seschrock 			goto out;
177639c23413Seschrock 		else
177739c23413Seschrock 			error = 0;
177899653d4eSeschrock 	}
177999653d4eSeschrock 
178039c23413Seschrock out:
1781fa94a07fSbrendan 	sav->sav_pending = NULL;
1782fa94a07fSbrendan 	sav->sav_npending = 0;
178339c23413Seschrock 	return (error);
178499653d4eSeschrock }
178599653d4eSeschrock 
1786fa94a07fSbrendan static int
1787fa94a07fSbrendan spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
1788fa94a07fSbrendan {
1789fa94a07fSbrendan 	int error;
1790fa94a07fSbrendan 
1791fa94a07fSbrendan 	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1792fa94a07fSbrendan 	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
1793fa94a07fSbrendan 	    VDEV_LABEL_SPARE)) != 0) {
1794fa94a07fSbrendan 		return (error);
1795fa94a07fSbrendan 	}
1796fa94a07fSbrendan 
1797fa94a07fSbrendan 	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1798fa94a07fSbrendan 	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
1799fa94a07fSbrendan 	    VDEV_LABEL_L2CACHE));
1800fa94a07fSbrendan }
1801fa94a07fSbrendan 
1802fa94a07fSbrendan static void
1803fa94a07fSbrendan spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
1804fa94a07fSbrendan     const char *config)
1805fa94a07fSbrendan {
1806fa94a07fSbrendan 	int i;
1807fa94a07fSbrendan 
1808fa94a07fSbrendan 	if (sav->sav_config != NULL) {
1809fa94a07fSbrendan 		nvlist_t **olddevs;
1810fa94a07fSbrendan 		uint_t oldndevs;
1811fa94a07fSbrendan 		nvlist_t **newdevs;
1812fa94a07fSbrendan 
1813fa94a07fSbrendan 		/*
1814fa94a07fSbrendan 		 * Generate new dev list by concatentating with the
1815fa94a07fSbrendan 		 * current dev list.
1816fa94a07fSbrendan 		 */
1817fa94a07fSbrendan 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
1818fa94a07fSbrendan 		    &olddevs, &oldndevs) == 0);
1819fa94a07fSbrendan 
1820fa94a07fSbrendan 		newdevs = kmem_alloc(sizeof (void *) *
1821fa94a07fSbrendan 		    (ndevs + oldndevs), KM_SLEEP);
1822fa94a07fSbrendan 		for (i = 0; i < oldndevs; i++)
1823fa94a07fSbrendan 			VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
1824fa94a07fSbrendan 			    KM_SLEEP) == 0);
1825fa94a07fSbrendan 		for (i = 0; i < ndevs; i++)
1826fa94a07fSbrendan 			VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
1827fa94a07fSbrendan 			    KM_SLEEP) == 0);
1828fa94a07fSbrendan 
1829fa94a07fSbrendan 		VERIFY(nvlist_remove(sav->sav_config, config,
1830fa94a07fSbrendan 		    DATA_TYPE_NVLIST_ARRAY) == 0);
1831fa94a07fSbrendan 
1832fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1833fa94a07fSbrendan 		    config, newdevs, ndevs + oldndevs) == 0);
1834fa94a07fSbrendan 		for (i = 0; i < oldndevs + ndevs; i++)
1835fa94a07fSbrendan 			nvlist_free(newdevs[i]);
1836fa94a07fSbrendan 		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
1837fa94a07fSbrendan 	} else {
1838fa94a07fSbrendan 		/*
1839fa94a07fSbrendan 		 * Generate a new dev list.
1840fa94a07fSbrendan 		 */
1841fa94a07fSbrendan 		VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
1842fa94a07fSbrendan 		    KM_SLEEP) == 0);
1843fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
1844fa94a07fSbrendan 		    devs, ndevs) == 0);
1845fa94a07fSbrendan 	}
1846fa94a07fSbrendan }
1847fa94a07fSbrendan 
1848fa94a07fSbrendan /*
1849fa94a07fSbrendan  * Stop and drop level 2 ARC devices
1850fa94a07fSbrendan  */
1851fa94a07fSbrendan void
1852fa94a07fSbrendan spa_l2cache_drop(spa_t *spa)
1853fa94a07fSbrendan {
1854fa94a07fSbrendan 	vdev_t *vd;
1855fa94a07fSbrendan 	int i;
1856fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
1857fa94a07fSbrendan 
1858fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++) {
1859fa94a07fSbrendan 		uint64_t pool;
1860fa94a07fSbrendan 
1861fa94a07fSbrendan 		vd = sav->sav_vdevs[i];
1862fa94a07fSbrendan 		ASSERT(vd != NULL);
1863fa94a07fSbrendan 
1864fa94a07fSbrendan 		if (spa_mode & FWRITE &&
1865fa94a07fSbrendan 		    spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL) {
1866fa94a07fSbrendan 			l2arc_remove_vdev(vd);
1867fa94a07fSbrendan 		}
1868fa94a07fSbrendan 		if (vd->vdev_isl2cache)
1869fa94a07fSbrendan 			spa_l2cache_remove(vd);
1870fa94a07fSbrendan 		vdev_clear_stats(vd);
1871fa94a07fSbrendan 		(void) vdev_close(vd);
1872fa94a07fSbrendan 	}
1873fa94a07fSbrendan }
1874fa94a07fSbrendan 
1875fa9e4066Sahrens /*
1876fa9e4066Sahrens  * Pool Creation
1877fa9e4066Sahrens  */
1878fa9e4066Sahrens int
1879990b4856Slling spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
1880228975ccSek     const char *history_str)
1881fa9e4066Sahrens {
1882fa9e4066Sahrens 	spa_t *spa;
1883990b4856Slling 	char *altroot = NULL;
18840373e76bSbonwick 	vdev_t *rvd;
1885fa9e4066Sahrens 	dsl_pool_t *dp;
1886fa9e4066Sahrens 	dmu_tx_t *tx;
188799653d4eSeschrock 	int c, error = 0;
1888fa9e4066Sahrens 	uint64_t txg = TXG_INITIAL;
1889fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
1890fa94a07fSbrendan 	uint_t nspares, nl2cache;
1891990b4856Slling 	uint64_t version;
1892fa9e4066Sahrens 
1893fa9e4066Sahrens 	/*
1894fa9e4066Sahrens 	 * If this pool already exists, return failure.
1895fa9e4066Sahrens 	 */
1896fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
1897fa9e4066Sahrens 	if (spa_lookup(pool) != NULL) {
1898fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1899fa9e4066Sahrens 		return (EEXIST);
1900fa9e4066Sahrens 	}
1901fa9e4066Sahrens 
1902fa9e4066Sahrens 	/*
1903fa9e4066Sahrens 	 * Allocate a new spa_t structure.
1904fa9e4066Sahrens 	 */
1905990b4856Slling 	(void) nvlist_lookup_string(props,
1906990b4856Slling 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
19070373e76bSbonwick 	spa = spa_add(pool, altroot);
1908fa9e4066Sahrens 	spa_activate(spa);
1909fa9e4066Sahrens 
1910fa9e4066Sahrens 	spa->spa_uberblock.ub_txg = txg - 1;
1911990b4856Slling 
1912990b4856Slling 	if (props && (error = spa_prop_validate(spa, props))) {
1913990b4856Slling 		spa_unload(spa);
1914990b4856Slling 		spa_deactivate(spa);
1915990b4856Slling 		spa_remove(spa);
1916990b4856Slling 		return (error);
1917990b4856Slling 	}
1918990b4856Slling 
1919990b4856Slling 	if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION),
1920990b4856Slling 	    &version) != 0)
1921990b4856Slling 		version = SPA_VERSION;
1922990b4856Slling 	ASSERT(version <= SPA_VERSION);
1923990b4856Slling 	spa->spa_uberblock.ub_version = version;
1924fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
1925fa9e4066Sahrens 
19260373e76bSbonwick 	/*
19270373e76bSbonwick 	 * Create the root vdev.
19280373e76bSbonwick 	 */
19290373e76bSbonwick 	spa_config_enter(spa, RW_WRITER, FTAG);
19300373e76bSbonwick 
193199653d4eSeschrock 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
19320373e76bSbonwick 
193399653d4eSeschrock 	ASSERT(error != 0 || rvd != NULL);
193499653d4eSeschrock 	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
19350373e76bSbonwick 
1936*b7b97454Sperrin 	if (error == 0 && !zfs_allocatable_devs(nvroot))
19370373e76bSbonwick 		error = EINVAL;
193899653d4eSeschrock 
193999653d4eSeschrock 	if (error == 0 &&
194099653d4eSeschrock 	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
1941fa94a07fSbrendan 	    (error = spa_validate_aux(spa, nvroot, txg,
194299653d4eSeschrock 	    VDEV_ALLOC_ADD)) == 0) {
194399653d4eSeschrock 		for (c = 0; c < rvd->vdev_children; c++)
194499653d4eSeschrock 			vdev_init(rvd->vdev_child[c], txg);
194599653d4eSeschrock 		vdev_config_dirty(rvd);
19460373e76bSbonwick 	}
19470373e76bSbonwick 
19480373e76bSbonwick 	spa_config_exit(spa, FTAG);
1949fa9e4066Sahrens 
195099653d4eSeschrock 	if (error != 0) {
1951fa9e4066Sahrens 		spa_unload(spa);
1952fa9e4066Sahrens 		spa_deactivate(spa);
1953fa9e4066Sahrens 		spa_remove(spa);
1954fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1955fa9e4066Sahrens 		return (error);
1956fa9e4066Sahrens 	}
1957fa9e4066Sahrens 
195899653d4eSeschrock 	/*
195999653d4eSeschrock 	 * Get the list of spares, if specified.
196099653d4eSeschrock 	 */
196199653d4eSeschrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
196299653d4eSeschrock 	    &spares, &nspares) == 0) {
1963fa94a07fSbrendan 		VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
196499653d4eSeschrock 		    KM_SLEEP) == 0);
1965fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
196699653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
196799653d4eSeschrock 		spa_config_enter(spa, RW_WRITER, FTAG);
196899653d4eSeschrock 		spa_load_spares(spa);
196999653d4eSeschrock 		spa_config_exit(spa, FTAG);
1970fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
1971fa94a07fSbrendan 	}
1972fa94a07fSbrendan 
1973fa94a07fSbrendan 	/*
1974fa94a07fSbrendan 	 * Get the list of level 2 cache devices, if specified.
1975fa94a07fSbrendan 	 */
1976fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1977fa94a07fSbrendan 	    &l2cache, &nl2cache) == 0) {
1978fa94a07fSbrendan 		VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
1979fa94a07fSbrendan 		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
1980fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
1981fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1982fa94a07fSbrendan 		spa_config_enter(spa, RW_WRITER, FTAG);
1983fa94a07fSbrendan 		spa_load_l2cache(spa);
1984fa94a07fSbrendan 		spa_config_exit(spa, FTAG);
1985fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
198699653d4eSeschrock 	}
198799653d4eSeschrock 
1988fa9e4066Sahrens 	spa->spa_dsl_pool = dp = dsl_pool_create(spa, txg);
1989fa9e4066Sahrens 	spa->spa_meta_objset = dp->dp_meta_objset;
1990fa9e4066Sahrens 
1991fa9e4066Sahrens 	tx = dmu_tx_create_assigned(dp, txg);
1992fa9e4066Sahrens 
1993fa9e4066Sahrens 	/*
1994fa9e4066Sahrens 	 * Create the pool config object.
1995fa9e4066Sahrens 	 */
1996fa9e4066Sahrens 	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
1997fa9e4066Sahrens 	    DMU_OT_PACKED_NVLIST, 1 << 14,
1998fa9e4066Sahrens 	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
1999fa9e4066Sahrens 
2000ea8dc4b6Seschrock 	if (zap_add(spa->spa_meta_objset,
2001fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
2002ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
2003ea8dc4b6Seschrock 		cmn_err(CE_PANIC, "failed to add pool config");
2004ea8dc4b6Seschrock 	}
2005fa9e4066Sahrens 
2006990b4856Slling 	/* Newly created pools with the right version are always deflated. */
2007990b4856Slling 	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
2008990b4856Slling 		spa->spa_deflate = TRUE;
2009990b4856Slling 		if (zap_add(spa->spa_meta_objset,
2010990b4856Slling 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
2011990b4856Slling 		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
2012990b4856Slling 			cmn_err(CE_PANIC, "failed to add deflate");
2013990b4856Slling 		}
201499653d4eSeschrock 	}
201599653d4eSeschrock 
2016fa9e4066Sahrens 	/*
2017fa9e4066Sahrens 	 * Create the deferred-free bplist object.  Turn off compression
2018fa9e4066Sahrens 	 * because sync-to-convergence takes longer if the blocksize
2019fa9e4066Sahrens 	 * keeps changing.
2020fa9e4066Sahrens 	 */
2021fa9e4066Sahrens 	spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset,
2022fa9e4066Sahrens 	    1 << 14, tx);
2023fa9e4066Sahrens 	dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj,
2024fa9e4066Sahrens 	    ZIO_COMPRESS_OFF, tx);
2025fa9e4066Sahrens 
2026ea8dc4b6Seschrock 	if (zap_add(spa->spa_meta_objset,
2027fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
2028ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) {
2029ea8dc4b6Seschrock 		cmn_err(CE_PANIC, "failed to add bplist");
2030ea8dc4b6Seschrock 	}
2031fa9e4066Sahrens 
203206eeb2adSek 	/*
203306eeb2adSek 	 * Create the pool's history object.
203406eeb2adSek 	 */
2035990b4856Slling 	if (version >= SPA_VERSION_ZPOOL_HISTORY)
2036990b4856Slling 		spa_history_create_obj(spa, tx);
2037990b4856Slling 
2038990b4856Slling 	/*
2039990b4856Slling 	 * Set pool properties.
2040990b4856Slling 	 */
2041990b4856Slling 	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
2042990b4856Slling 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
20430a4e9518Sgw 	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
2044990b4856Slling 	if (props)
2045990b4856Slling 		spa_sync_props(spa, props, CRED(), tx);
204606eeb2adSek 
2047fa9e4066Sahrens 	dmu_tx_commit(tx);
2048fa9e4066Sahrens 
2049fa9e4066Sahrens 	spa->spa_sync_on = B_TRUE;
2050fa9e4066Sahrens 	txg_sync_start(spa->spa_dsl_pool);
2051fa9e4066Sahrens 
2052fa9e4066Sahrens 	/*
2053fa9e4066Sahrens 	 * We explicitly wait for the first transaction to complete so that our
2054fa9e4066Sahrens 	 * bean counters are appropriately updated.
2055fa9e4066Sahrens 	 */
2056fa9e4066Sahrens 	txg_wait_synced(spa->spa_dsl_pool, txg);
2057fa9e4066Sahrens 
2058fa9e4066Sahrens 	spa_config_sync();
2059fa9e4066Sahrens 
2060990b4856Slling 	if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL)
2061228975ccSek 		(void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE);
2062228975ccSek 
2063fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2064fa9e4066Sahrens 
2065fa9e4066Sahrens 	return (0);
2066fa9e4066Sahrens }
2067fa9e4066Sahrens 
2068fa9e4066Sahrens /*
2069fa9e4066Sahrens  * Import the given pool into the system.  We set up the necessary spa_t and
2070fa9e4066Sahrens  * then call spa_load() to do the dirty work.
2071fa9e4066Sahrens  */
2072fa9e4066Sahrens int
2073990b4856Slling spa_import(const char *pool, nvlist_t *config, nvlist_t *props)
2074fa9e4066Sahrens {
2075fa9e4066Sahrens 	spa_t *spa;
2076990b4856Slling 	char *altroot = NULL;
2077fa9e4066Sahrens 	int error;
207899653d4eSeschrock 	nvlist_t *nvroot;
2079fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
2080fa94a07fSbrendan 	uint_t nspares, nl2cache;
2081fa9e4066Sahrens 
2082fa9e4066Sahrens 	/*
2083fa9e4066Sahrens 	 * If a pool with this name exists, return failure.
2084fa9e4066Sahrens 	 */
2085fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
2086fa9e4066Sahrens 	if (spa_lookup(pool) != NULL) {
2087fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2088fa9e4066Sahrens 		return (EEXIST);
2089fa9e4066Sahrens 	}
2090fa9e4066Sahrens 
2091fa9e4066Sahrens 	/*
20920373e76bSbonwick 	 * Create and initialize the spa structure.
2093fa9e4066Sahrens 	 */
2094990b4856Slling 	(void) nvlist_lookup_string(props,
2095990b4856Slling 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
20960373e76bSbonwick 	spa = spa_add(pool, altroot);
2097fa9e4066Sahrens 	spa_activate(spa);
2098fa9e4066Sahrens 
20995dabedeeSbonwick 	/*
21000373e76bSbonwick 	 * Pass off the heavy lifting to spa_load().
2101ecc2d604Sbonwick 	 * Pass TRUE for mosconfig because the user-supplied config
2102ecc2d604Sbonwick 	 * is actually the one to trust when doing an import.
21035dabedeeSbonwick 	 */
2104ecc2d604Sbonwick 	error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE);
2105fa9e4066Sahrens 
210699653d4eSeschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
210799653d4eSeschrock 	/*
210899653d4eSeschrock 	 * Toss any existing sparelist, as it doesn't have any validity anymore,
210999653d4eSeschrock 	 * and conflicts with spa_has_spare().
211099653d4eSeschrock 	 */
2111fa94a07fSbrendan 	if (spa->spa_spares.sav_config) {
2112fa94a07fSbrendan 		nvlist_free(spa->spa_spares.sav_config);
2113fa94a07fSbrendan 		spa->spa_spares.sav_config = NULL;
211499653d4eSeschrock 		spa_load_spares(spa);
211599653d4eSeschrock 	}
2116fa94a07fSbrendan 	if (spa->spa_l2cache.sav_config) {
2117fa94a07fSbrendan 		nvlist_free(spa->spa_l2cache.sav_config);
2118fa94a07fSbrendan 		spa->spa_l2cache.sav_config = NULL;
2119fa94a07fSbrendan 		spa_load_l2cache(spa);
2120fa94a07fSbrendan 	}
212199653d4eSeschrock 
212299653d4eSeschrock 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
212399653d4eSeschrock 	    &nvroot) == 0);
2124fa94a07fSbrendan 	if (error == 0)
2125fa94a07fSbrendan 		error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE);
2126fa94a07fSbrendan 	if (error == 0)
2127fa94a07fSbrendan 		error = spa_validate_aux(spa, nvroot, -1ULL,
2128fa94a07fSbrendan 		    VDEV_ALLOC_L2CACHE);
212999653d4eSeschrock 	spa_config_exit(spa, FTAG);
213099653d4eSeschrock 
2131990b4856Slling 	if (error != 0 || (props && (error = spa_prop_set(spa, props)))) {
2132fa9e4066Sahrens 		spa_unload(spa);
2133fa9e4066Sahrens 		spa_deactivate(spa);
2134fa9e4066Sahrens 		spa_remove(spa);
2135fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2136fa9e4066Sahrens 		return (error);
2137fa9e4066Sahrens 	}
2138fa9e4066Sahrens 
213999653d4eSeschrock 	/*
2140fa94a07fSbrendan 	 * Override any spares and level 2 cache devices as specified by
2141fa94a07fSbrendan 	 * the user, as these may have correct device names/devids, etc.
214299653d4eSeschrock 	 */
214399653d4eSeschrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
214499653d4eSeschrock 	    &spares, &nspares) == 0) {
2145fa94a07fSbrendan 		if (spa->spa_spares.sav_config)
2146fa94a07fSbrendan 			VERIFY(nvlist_remove(spa->spa_spares.sav_config,
214799653d4eSeschrock 			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
214899653d4eSeschrock 		else
2149fa94a07fSbrendan 			VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
215099653d4eSeschrock 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2151fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
215299653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
215399653d4eSeschrock 		spa_config_enter(spa, RW_WRITER, FTAG);
215499653d4eSeschrock 		spa_load_spares(spa);
215599653d4eSeschrock 		spa_config_exit(spa, FTAG);
2156fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
2157fa94a07fSbrendan 	}
2158fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2159fa94a07fSbrendan 	    &l2cache, &nl2cache) == 0) {
2160fa94a07fSbrendan 		if (spa->spa_l2cache.sav_config)
2161fa94a07fSbrendan 			VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
2162fa94a07fSbrendan 			    ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
2163fa94a07fSbrendan 		else
2164fa94a07fSbrendan 			VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2165fa94a07fSbrendan 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2166fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2167fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2168fa94a07fSbrendan 		spa_config_enter(spa, RW_WRITER, FTAG);
2169fa94a07fSbrendan 		spa_load_l2cache(spa);
2170fa94a07fSbrendan 		spa_config_exit(spa, FTAG);
2171fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
217299653d4eSeschrock 	}
217399653d4eSeschrock 
21740373e76bSbonwick 	/*
21750373e76bSbonwick 	 * Update the config cache to include the newly-imported pool.
21760373e76bSbonwick 	 */
2177de6628f0Sck 	if (spa_mode & FWRITE)
2178de6628f0Sck 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
21790373e76bSbonwick 
2180fa9e4066Sahrens 	/*
2181fa9e4066Sahrens 	 * Resilver anything that's out of date.
2182fa9e4066Sahrens 	 */
2183fa9e4066Sahrens 	if (spa_mode & FWRITE)
2184fa9e4066Sahrens 		VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
2185fa9e4066Sahrens 
21863d7072f8Seschrock 	mutex_exit(&spa_namespace_lock);
21873d7072f8Seschrock 
2188fa9e4066Sahrens 	return (0);
2189fa9e4066Sahrens }
2190fa9e4066Sahrens 
2191fa9e4066Sahrens /*
2192fa9e4066Sahrens  * This (illegal) pool name is used when temporarily importing a spa_t in order
2193fa9e4066Sahrens  * to get the vdev stats associated with the imported devices.
2194fa9e4066Sahrens  */
2195fa9e4066Sahrens #define	TRYIMPORT_NAME	"$import"
2196fa9e4066Sahrens 
2197fa9e4066Sahrens nvlist_t *
2198fa9e4066Sahrens spa_tryimport(nvlist_t *tryconfig)
2199fa9e4066Sahrens {
2200fa9e4066Sahrens 	nvlist_t *config = NULL;
2201fa9e4066Sahrens 	char *poolname;
2202fa9e4066Sahrens 	spa_t *spa;
2203fa9e4066Sahrens 	uint64_t state;
2204fa9e4066Sahrens 
2205fa9e4066Sahrens 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
2206fa9e4066Sahrens 		return (NULL);
2207fa9e4066Sahrens 
2208fa9e4066Sahrens 	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
2209fa9e4066Sahrens 		return (NULL);
2210fa9e4066Sahrens 
2211fa9e4066Sahrens 	/*
22120373e76bSbonwick 	 * Create and initialize the spa structure.
2213fa9e4066Sahrens 	 */
22140373e76bSbonwick 	mutex_enter(&spa_namespace_lock);
22150373e76bSbonwick 	spa = spa_add(TRYIMPORT_NAME, NULL);
2216fa9e4066Sahrens 	spa_activate(spa);
2217fa9e4066Sahrens 
2218fa9e4066Sahrens 	/*
22190373e76bSbonwick 	 * Pass off the heavy lifting to spa_load().
2220ecc2d604Sbonwick 	 * Pass TRUE for mosconfig because the user-supplied config
2221ecc2d604Sbonwick 	 * is actually the one to trust when doing an import.
2222fa9e4066Sahrens 	 */
2223ecc2d604Sbonwick 	(void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
2224fa9e4066Sahrens 
2225fa9e4066Sahrens 	/*
2226fa9e4066Sahrens 	 * If 'tryconfig' was at least parsable, return the current config.
2227fa9e4066Sahrens 	 */
2228fa9e4066Sahrens 	if (spa->spa_root_vdev != NULL) {
22290373e76bSbonwick 		spa_config_enter(spa, RW_READER, FTAG);
2230fa9e4066Sahrens 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
22310373e76bSbonwick 		spa_config_exit(spa, FTAG);
2232fa9e4066Sahrens 		VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
2233fa9e4066Sahrens 		    poolname) == 0);
2234fa9e4066Sahrens 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2235fa9e4066Sahrens 		    state) == 0);
223695173954Sek 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
223795173954Sek 		    spa->spa_uberblock.ub_timestamp) == 0);
223899653d4eSeschrock 
223999653d4eSeschrock 		/*
2240fa94a07fSbrendan 		 * Add the list of hot spares and level 2 cache devices.
224199653d4eSeschrock 		 */
224299653d4eSeschrock 		spa_add_spares(spa, config);
2243fa94a07fSbrendan 		spa_add_l2cache(spa, config);
2244fa9e4066Sahrens 	}
2245fa9e4066Sahrens 
2246fa9e4066Sahrens 	spa_unload(spa);
2247fa9e4066Sahrens 	spa_deactivate(spa);
2248fa9e4066Sahrens 	spa_remove(spa);
2249fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2250fa9e4066Sahrens 
2251fa9e4066Sahrens 	return (config);
2252fa9e4066Sahrens }
2253fa9e4066Sahrens 
2254fa9e4066Sahrens /*
2255fa9e4066Sahrens  * Pool export/destroy
2256fa9e4066Sahrens  *
2257fa9e4066Sahrens  * The act of destroying or exporting a pool is very simple.  We make sure there
2258fa9e4066Sahrens  * is no more pending I/O and any references to the pool are gone.  Then, we
2259fa9e4066Sahrens  * update the pool state and sync all the labels to disk, removing the
2260fa9e4066Sahrens  * configuration from the cache afterwards.
2261fa9e4066Sahrens  */
2262fa9e4066Sahrens static int
226344cd46caSbillm spa_export_common(char *pool, int new_state, nvlist_t **oldconfig)
2264fa9e4066Sahrens {
2265fa9e4066Sahrens 	spa_t *spa;
2266fa9e4066Sahrens 
226744cd46caSbillm 	if (oldconfig)
226844cd46caSbillm 		*oldconfig = NULL;
226944cd46caSbillm 
2270fa9e4066Sahrens 	if (!(spa_mode & FWRITE))
2271fa9e4066Sahrens 		return (EROFS);
2272fa9e4066Sahrens 
2273fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
2274fa9e4066Sahrens 	if ((spa = spa_lookup(pool)) == NULL) {
2275fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2276fa9e4066Sahrens 		return (ENOENT);
2277fa9e4066Sahrens 	}
2278fa9e4066Sahrens 
2279ea8dc4b6Seschrock 	/*
2280ea8dc4b6Seschrock 	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
2281ea8dc4b6Seschrock 	 * reacquire the namespace lock, and see if we can export.
2282ea8dc4b6Seschrock 	 */
2283ea8dc4b6Seschrock 	spa_open_ref(spa, FTAG);
2284ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
2285ea8dc4b6Seschrock 	spa_async_suspend(spa);
2286ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
2287ea8dc4b6Seschrock 	spa_close(spa, FTAG);
2288ea8dc4b6Seschrock 
2289fa9e4066Sahrens 	/*
2290fa9e4066Sahrens 	 * The pool will be in core if it's openable,
2291fa9e4066Sahrens 	 * in which case we can modify its state.
2292fa9e4066Sahrens 	 */
2293fa9e4066Sahrens 	if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
2294fa9e4066Sahrens 		/*
2295fa9e4066Sahrens 		 * Objsets may be open only because they're dirty, so we
2296fa9e4066Sahrens 		 * have to force it to sync before checking spa_refcnt.
2297fa9e4066Sahrens 		 */
2298fa9e4066Sahrens 		spa_scrub_suspend(spa);
2299fa9e4066Sahrens 		txg_wait_synced(spa->spa_dsl_pool, 0);
2300fa9e4066Sahrens 
2301ea8dc4b6Seschrock 		/*
2302ea8dc4b6Seschrock 		 * A pool cannot be exported or destroyed if there are active
2303ea8dc4b6Seschrock 		 * references.  If we are resetting a pool, allow references by
2304ea8dc4b6Seschrock 		 * fault injection handlers.
2305ea8dc4b6Seschrock 		 */
2306ea8dc4b6Seschrock 		if (!spa_refcount_zero(spa) ||
2307ea8dc4b6Seschrock 		    (spa->spa_inject_ref != 0 &&
2308ea8dc4b6Seschrock 		    new_state != POOL_STATE_UNINITIALIZED)) {
2309fa9e4066Sahrens 			spa_scrub_resume(spa);
2310ea8dc4b6Seschrock 			spa_async_resume(spa);
2311fa9e4066Sahrens 			mutex_exit(&spa_namespace_lock);
2312fa9e4066Sahrens 			return (EBUSY);
2313fa9e4066Sahrens 		}
2314fa9e4066Sahrens 
2315fa9e4066Sahrens 		spa_scrub_resume(spa);
2316fa9e4066Sahrens 		VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0);
2317fa9e4066Sahrens 
2318fa9e4066Sahrens 		/*
2319fa9e4066Sahrens 		 * We want this to be reflected on every label,
2320fa9e4066Sahrens 		 * so mark them all dirty.  spa_unload() will do the
2321fa9e4066Sahrens 		 * final sync that pushes these changes out.
2322fa9e4066Sahrens 		 */
2323ea8dc4b6Seschrock 		if (new_state != POOL_STATE_UNINITIALIZED) {
23245dabedeeSbonwick 			spa_config_enter(spa, RW_WRITER, FTAG);
2325ea8dc4b6Seschrock 			spa->spa_state = new_state;
23260373e76bSbonwick 			spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
2327ea8dc4b6Seschrock 			vdev_config_dirty(spa->spa_root_vdev);
23285dabedeeSbonwick 			spa_config_exit(spa, FTAG);
2329ea8dc4b6Seschrock 		}
2330fa9e4066Sahrens 	}
2331fa9e4066Sahrens 
23323d7072f8Seschrock 	spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
23333d7072f8Seschrock 
2334fa9e4066Sahrens 	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
2335fa9e4066Sahrens 		spa_unload(spa);
2336fa9e4066Sahrens 		spa_deactivate(spa);
2337fa9e4066Sahrens 	}
2338fa9e4066Sahrens 
233944cd46caSbillm 	if (oldconfig && spa->spa_config)
234044cd46caSbillm 		VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
234144cd46caSbillm 
2342ea8dc4b6Seschrock 	if (new_state != POOL_STATE_UNINITIALIZED) {
23432f8aaab3Seschrock 		spa_config_check(spa->spa_config_dir,
23442f8aaab3Seschrock 		    spa->spa_config_file);
2345ea8dc4b6Seschrock 		spa_remove(spa);
2346ea8dc4b6Seschrock 		spa_config_sync();
2347ea8dc4b6Seschrock 	}
2348fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2349fa9e4066Sahrens 
2350fa9e4066Sahrens 	return (0);
2351fa9e4066Sahrens }
2352fa9e4066Sahrens 
2353fa9e4066Sahrens /*
2354fa9e4066Sahrens  * Destroy a storage pool.
2355fa9e4066Sahrens  */
2356fa9e4066Sahrens int
2357fa9e4066Sahrens spa_destroy(char *pool)
2358fa9e4066Sahrens {
235944cd46caSbillm 	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL));
2360fa9e4066Sahrens }
2361fa9e4066Sahrens 
2362fa9e4066Sahrens /*
2363fa9e4066Sahrens  * Export a storage pool.
2364fa9e4066Sahrens  */
2365fa9e4066Sahrens int
236644cd46caSbillm spa_export(char *pool, nvlist_t **oldconfig)
2367fa9e4066Sahrens {
236844cd46caSbillm 	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig));
2369fa9e4066Sahrens }
2370fa9e4066Sahrens 
2371ea8dc4b6Seschrock /*
2372ea8dc4b6Seschrock  * Similar to spa_export(), this unloads the spa_t without actually removing it
2373ea8dc4b6Seschrock  * from the namespace in any way.
2374ea8dc4b6Seschrock  */
2375ea8dc4b6Seschrock int
2376ea8dc4b6Seschrock spa_reset(char *pool)
2377ea8dc4b6Seschrock {
237844cd46caSbillm 	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL));
2379ea8dc4b6Seschrock }
2380ea8dc4b6Seschrock 
2381ea8dc4b6Seschrock 
2382fa9e4066Sahrens /*
2383fa9e4066Sahrens  * ==========================================================================
2384fa9e4066Sahrens  * Device manipulation
2385fa9e4066Sahrens  * ==========================================================================
2386fa9e4066Sahrens  */
2387fa9e4066Sahrens 
2388fa9e4066Sahrens /*
23898654d025Sperrin  * Add a device to a storage pool.
2390fa9e4066Sahrens  */
2391fa9e4066Sahrens int
2392fa9e4066Sahrens spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
2393fa9e4066Sahrens {
2394fa9e4066Sahrens 	uint64_t txg;
23950373e76bSbonwick 	int c, error;
2396fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
23970e34b6a7Sbonwick 	vdev_t *vd, *tvd;
2398fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
2399fa94a07fSbrendan 	uint_t nspares, nl2cache;
2400fa9e4066Sahrens 
2401fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
2402fa9e4066Sahrens 
240399653d4eSeschrock 	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
240499653d4eSeschrock 	    VDEV_ALLOC_ADD)) != 0)
240599653d4eSeschrock 		return (spa_vdev_exit(spa, NULL, txg, error));
2406fa9e4066Sahrens 
240739c23413Seschrock 	spa->spa_pending_vdev = vd;
240899653d4eSeschrock 
2409fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
2410fa94a07fSbrendan 	    &nspares) != 0)
241199653d4eSeschrock 		nspares = 0;
241299653d4eSeschrock 
2413fa94a07fSbrendan 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
2414fa94a07fSbrendan 	    &nl2cache) != 0)
2415fa94a07fSbrendan 		nl2cache = 0;
2416fa94a07fSbrendan 
2417fa94a07fSbrendan 	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) {
241839c23413Seschrock 		spa->spa_pending_vdev = NULL;
2419fa9e4066Sahrens 		return (spa_vdev_exit(spa, vd, txg, EINVAL));
242039c23413Seschrock 	}
2421fa9e4066Sahrens 
242299653d4eSeschrock 	if (vd->vdev_children != 0) {
242339c23413Seschrock 		if ((error = vdev_create(vd, txg, B_FALSE)) != 0) {
242439c23413Seschrock 			spa->spa_pending_vdev = NULL;
242599653d4eSeschrock 			return (spa_vdev_exit(spa, vd, txg, error));
242699653d4eSeschrock 		}
242799653d4eSeschrock 	}
242899653d4eSeschrock 
242939c23413Seschrock 	/*
2430fa94a07fSbrendan 	 * We must validate the spares and l2cache devices after checking the
2431fa94a07fSbrendan 	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
243239c23413Seschrock 	 */
2433fa94a07fSbrendan 	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) {
243439c23413Seschrock 		spa->spa_pending_vdev = NULL;
243539c23413Seschrock 		return (spa_vdev_exit(spa, vd, txg, error));
243639c23413Seschrock 	}
243739c23413Seschrock 
243839c23413Seschrock 	spa->spa_pending_vdev = NULL;
243939c23413Seschrock 
244039c23413Seschrock 	/*
244139c23413Seschrock 	 * Transfer each new top-level vdev from vd to rvd.
244239c23413Seschrock 	 */
244339c23413Seschrock 	for (c = 0; c < vd->vdev_children; c++) {
244439c23413Seschrock 		tvd = vd->vdev_child[c];
244539c23413Seschrock 		vdev_remove_child(vd, tvd);
244639c23413Seschrock 		tvd->vdev_id = rvd->vdev_children;
244739c23413Seschrock 		vdev_add_child(rvd, tvd);
244839c23413Seschrock 		vdev_config_dirty(tvd);
244939c23413Seschrock 	}
245039c23413Seschrock 
245199653d4eSeschrock 	if (nspares != 0) {
2452fa94a07fSbrendan 		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
2453fa94a07fSbrendan 		    ZPOOL_CONFIG_SPARES);
245499653d4eSeschrock 		spa_load_spares(spa);
2455fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
2456fa94a07fSbrendan 	}
2457fa94a07fSbrendan 
2458fa94a07fSbrendan 	if (nl2cache != 0) {
2459fa94a07fSbrendan 		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
2460fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE);
2461fa94a07fSbrendan 		spa_load_l2cache(spa);
2462fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
2463fa9e4066Sahrens 	}
2464fa9e4066Sahrens 
2465fa9e4066Sahrens 	/*
24660e34b6a7Sbonwick 	 * We have to be careful when adding new vdevs to an existing pool.
24670e34b6a7Sbonwick 	 * If other threads start allocating from these vdevs before we
24680e34b6a7Sbonwick 	 * sync the config cache, and we lose power, then upon reboot we may
24690e34b6a7Sbonwick 	 * fail to open the pool because there are DVAs that the config cache
24700e34b6a7Sbonwick 	 * can't translate.  Therefore, we first add the vdevs without
24710e34b6a7Sbonwick 	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
24720373e76bSbonwick 	 * and then let spa_config_update() initialize the new metaslabs.
24730e34b6a7Sbonwick 	 *
24740e34b6a7Sbonwick 	 * spa_load() checks for added-but-not-initialized vdevs, so that
24750e34b6a7Sbonwick 	 * if we lose power at any point in this sequence, the remaining
24760e34b6a7Sbonwick 	 * steps will be completed the next time we load the pool.
24770e34b6a7Sbonwick 	 */
24780373e76bSbonwick 	(void) spa_vdev_exit(spa, vd, txg, 0);
24790e34b6a7Sbonwick 
24800373e76bSbonwick 	mutex_enter(&spa_namespace_lock);
24810373e76bSbonwick 	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
24820373e76bSbonwick 	mutex_exit(&spa_namespace_lock);
2483fa9e4066Sahrens 
24840373e76bSbonwick 	return (0);
2485fa9e4066Sahrens }
2486fa9e4066Sahrens 
2487fa9e4066Sahrens /*
2488fa9e4066Sahrens  * Attach a device to a mirror.  The arguments are the path to any device
2489fa9e4066Sahrens  * in the mirror, and the nvroot for the new device.  If the path specifies
2490fa9e4066Sahrens  * a device that is not mirrored, we automatically insert the mirror vdev.
2491fa9e4066Sahrens  *
2492fa9e4066Sahrens  * If 'replacing' is specified, the new device is intended to replace the
2493fa9e4066Sahrens  * existing device; in this case the two devices are made into their own
24943d7072f8Seschrock  * mirror using the 'replacing' vdev, which is functionally identical to
2495fa9e4066Sahrens  * the mirror vdev (it actually reuses all the same ops) but has a few
2496fa9e4066Sahrens  * extra rules: you can't attach to it after it's been created, and upon
2497fa9e4066Sahrens  * completion of resilvering, the first disk (the one being replaced)
2498fa9e4066Sahrens  * is automatically detached.
2499fa9e4066Sahrens  */
2500fa9e4066Sahrens int
2501ea8dc4b6Seschrock spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
2502fa9e4066Sahrens {
2503fa9e4066Sahrens 	uint64_t txg, open_txg;
2504fa9e4066Sahrens 	int error;
2505fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
2506fa9e4066Sahrens 	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
250799653d4eSeschrock 	vdev_ops_t *pvops;
25088654d025Sperrin 	int is_log;
2509fa9e4066Sahrens 
2510fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
2511fa9e4066Sahrens 
2512ea8dc4b6Seschrock 	oldvd = vdev_lookup_by_guid(rvd, guid);
2513fa9e4066Sahrens 
2514fa9e4066Sahrens 	if (oldvd == NULL)
2515fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
2516fa9e4066Sahrens 
25170e34b6a7Sbonwick 	if (!oldvd->vdev_ops->vdev_op_leaf)
25180e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
25190e34b6a7Sbonwick 
2520fa9e4066Sahrens 	pvd = oldvd->vdev_parent;
2521fa9e4066Sahrens 
252299653d4eSeschrock 	if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
25233d7072f8Seschrock 	    VDEV_ALLOC_ADD)) != 0)
25243d7072f8Seschrock 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
25253d7072f8Seschrock 
25263d7072f8Seschrock 	if (newrootvd->vdev_children != 1)
2527fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
2528fa9e4066Sahrens 
2529fa9e4066Sahrens 	newvd = newrootvd->vdev_child[0];
2530fa9e4066Sahrens 
2531fa9e4066Sahrens 	if (!newvd->vdev_ops->vdev_op_leaf)
2532fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
2533fa9e4066Sahrens 
253499653d4eSeschrock 	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
2535fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, error));
2536fa9e4066Sahrens 
25378654d025Sperrin 	/*
25388654d025Sperrin 	 * Spares can't replace logs
25398654d025Sperrin 	 */
25408654d025Sperrin 	is_log = oldvd->vdev_islog;
25418654d025Sperrin 	if (is_log && newvd->vdev_isspare)
25428654d025Sperrin 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
25438654d025Sperrin 
254499653d4eSeschrock 	if (!replacing) {
254599653d4eSeschrock 		/*
254699653d4eSeschrock 		 * For attach, the only allowable parent is a mirror or the root
254799653d4eSeschrock 		 * vdev.
254899653d4eSeschrock 		 */
254999653d4eSeschrock 		if (pvd->vdev_ops != &vdev_mirror_ops &&
255099653d4eSeschrock 		    pvd->vdev_ops != &vdev_root_ops)
255199653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
255299653d4eSeschrock 
255399653d4eSeschrock 		pvops = &vdev_mirror_ops;
255499653d4eSeschrock 	} else {
255599653d4eSeschrock 		/*
255699653d4eSeschrock 		 * Active hot spares can only be replaced by inactive hot
255799653d4eSeschrock 		 * spares.
255899653d4eSeschrock 		 */
255999653d4eSeschrock 		if (pvd->vdev_ops == &vdev_spare_ops &&
256099653d4eSeschrock 		    pvd->vdev_child[1] == oldvd &&
256199653d4eSeschrock 		    !spa_has_spare(spa, newvd->vdev_guid))
256299653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
256399653d4eSeschrock 
256499653d4eSeschrock 		/*
256599653d4eSeschrock 		 * If the source is a hot spare, and the parent isn't already a
256699653d4eSeschrock 		 * spare, then we want to create a new hot spare.  Otherwise, we
256739c23413Seschrock 		 * want to create a replacing vdev.  The user is not allowed to
256839c23413Seschrock 		 * attach to a spared vdev child unless the 'isspare' state is
256939c23413Seschrock 		 * the same (spare replaces spare, non-spare replaces
257039c23413Seschrock 		 * non-spare).
257199653d4eSeschrock 		 */
257299653d4eSeschrock 		if (pvd->vdev_ops == &vdev_replacing_ops)
257399653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
257439c23413Seschrock 		else if (pvd->vdev_ops == &vdev_spare_ops &&
257539c23413Seschrock 		    newvd->vdev_isspare != oldvd->vdev_isspare)
257639c23413Seschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
257799653d4eSeschrock 		else if (pvd->vdev_ops != &vdev_spare_ops &&
257899653d4eSeschrock 		    newvd->vdev_isspare)
257999653d4eSeschrock 			pvops = &vdev_spare_ops;
258099653d4eSeschrock 		else
258199653d4eSeschrock 			pvops = &vdev_replacing_ops;
258299653d4eSeschrock 	}
258399653d4eSeschrock 
25842a79c5feSlling 	/*
25852a79c5feSlling 	 * Compare the new device size with the replaceable/attachable
25862a79c5feSlling 	 * device size.
25872a79c5feSlling 	 */
25882a79c5feSlling 	if (newvd->vdev_psize < vdev_get_rsize(oldvd))
2589fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
2590fa9e4066Sahrens 
2591ecc2d604Sbonwick 	/*
2592ecc2d604Sbonwick 	 * The new device cannot have a higher alignment requirement
2593ecc2d604Sbonwick 	 * than the top-level vdev.
2594ecc2d604Sbonwick 	 */
2595ecc2d604Sbonwick 	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
2596fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
2597fa9e4066Sahrens 
2598fa9e4066Sahrens 	/*
2599fa9e4066Sahrens 	 * If this is an in-place replacement, update oldvd's path and devid
2600fa9e4066Sahrens 	 * to make it distinguishable from newvd, and unopenable from now on.
2601fa9e4066Sahrens 	 */
2602fa9e4066Sahrens 	if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
2603fa9e4066Sahrens 		spa_strfree(oldvd->vdev_path);
2604fa9e4066Sahrens 		oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
2605fa9e4066Sahrens 		    KM_SLEEP);
2606fa9e4066Sahrens 		(void) sprintf(oldvd->vdev_path, "%s/%s",
2607fa9e4066Sahrens 		    newvd->vdev_path, "old");
2608fa9e4066Sahrens 		if (oldvd->vdev_devid != NULL) {
2609fa9e4066Sahrens 			spa_strfree(oldvd->vdev_devid);
2610fa9e4066Sahrens 			oldvd->vdev_devid = NULL;
2611fa9e4066Sahrens 		}
2612fa9e4066Sahrens 	}
2613fa9e4066Sahrens 
2614fa9e4066Sahrens 	/*
261599653d4eSeschrock 	 * If the parent is not a mirror, or if we're replacing, insert the new
261699653d4eSeschrock 	 * mirror/replacing/spare vdev above oldvd.
2617fa9e4066Sahrens 	 */
2618fa9e4066Sahrens 	if (pvd->vdev_ops != pvops)
2619fa9e4066Sahrens 		pvd = vdev_add_parent(oldvd, pvops);
2620fa9e4066Sahrens 
2621fa9e4066Sahrens 	ASSERT(pvd->vdev_top->vdev_parent == rvd);
2622fa9e4066Sahrens 	ASSERT(pvd->vdev_ops == pvops);
2623fa9e4066Sahrens 	ASSERT(oldvd->vdev_parent == pvd);
2624fa9e4066Sahrens 
2625fa9e4066Sahrens 	/*
2626fa9e4066Sahrens 	 * Extract the new device from its root and add it to pvd.
2627fa9e4066Sahrens 	 */
2628fa9e4066Sahrens 	vdev_remove_child(newrootvd, newvd);
2629fa9e4066Sahrens 	newvd->vdev_id = pvd->vdev_children;
2630fa9e4066Sahrens 	vdev_add_child(pvd, newvd);
2631fa9e4066Sahrens 
2632ea8dc4b6Seschrock 	/*
2633ea8dc4b6Seschrock 	 * If newvd is smaller than oldvd, but larger than its rsize,
2634ea8dc4b6Seschrock 	 * the addition of newvd may have decreased our parent's asize.
2635ea8dc4b6Seschrock 	 */
2636ea8dc4b6Seschrock 	pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
2637ea8dc4b6Seschrock 
2638fa9e4066Sahrens 	tvd = newvd->vdev_top;
2639fa9e4066Sahrens 	ASSERT(pvd->vdev_top == tvd);
2640fa9e4066Sahrens 	ASSERT(tvd->vdev_parent == rvd);
2641fa9e4066Sahrens 
2642fa9e4066Sahrens 	vdev_config_dirty(tvd);
2643fa9e4066Sahrens 
2644fa9e4066Sahrens 	/*
2645fa9e4066Sahrens 	 * Set newvd's DTL to [TXG_INITIAL, open_txg].  It will propagate
2646fa9e4066Sahrens 	 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
2647fa9e4066Sahrens 	 */
2648fa9e4066Sahrens 	open_txg = txg + TXG_CONCURRENT_STATES - 1;
2649fa9e4066Sahrens 
2650fa9e4066Sahrens 	mutex_enter(&newvd->vdev_dtl_lock);
2651fa9e4066Sahrens 	space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL,
2652fa9e4066Sahrens 	    open_txg - TXG_INITIAL + 1);
2653fa9e4066Sahrens 	mutex_exit(&newvd->vdev_dtl_lock);
2654fa9e4066Sahrens 
265539c23413Seschrock 	if (newvd->vdev_isspare)
265639c23413Seschrock 		spa_spare_activate(newvd);
2657ea8dc4b6Seschrock 
2658fa9e4066Sahrens 	/*
2659fa9e4066Sahrens 	 * Mark newvd's DTL dirty in this txg.
2660fa9e4066Sahrens 	 */
2661ecc2d604Sbonwick 	vdev_dirty(tvd, VDD_DTL, newvd, txg);
2662fa9e4066Sahrens 
2663fa9e4066Sahrens 	(void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
2664fa9e4066Sahrens 
2665fa9e4066Sahrens 	/*
26663d7072f8Seschrock 	 * Kick off a resilver to update newvd.  We need to grab the namespace
26673d7072f8Seschrock 	 * lock because spa_scrub() needs to post a sysevent with the pool name.
2668fa9e4066Sahrens 	 */
26693d7072f8Seschrock 	mutex_enter(&spa_namespace_lock);
2670fa9e4066Sahrens 	VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
26713d7072f8Seschrock 	mutex_exit(&spa_namespace_lock);
2672fa9e4066Sahrens 
2673fa9e4066Sahrens 	return (0);
2674fa9e4066Sahrens }
2675fa9e4066Sahrens 
2676fa9e4066Sahrens /*
2677fa9e4066Sahrens  * Detach a device from a mirror or replacing vdev.
2678fa9e4066Sahrens  * If 'replace_done' is specified, only detach if the parent
2679fa9e4066Sahrens  * is a replacing vdev.
2680fa9e4066Sahrens  */
2681fa9e4066Sahrens int
2682ea8dc4b6Seschrock spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done)
2683fa9e4066Sahrens {
2684fa9e4066Sahrens 	uint64_t txg;
2685fa9e4066Sahrens 	int c, t, error;
2686fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
2687fa9e4066Sahrens 	vdev_t *vd, *pvd, *cvd, *tvd;
268899653d4eSeschrock 	boolean_t unspare = B_FALSE;
268999653d4eSeschrock 	uint64_t unspare_guid;
2690fa9e4066Sahrens 
2691fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
2692fa9e4066Sahrens 
2693ea8dc4b6Seschrock 	vd = vdev_lookup_by_guid(rvd, guid);
2694fa9e4066Sahrens 
2695fa9e4066Sahrens 	if (vd == NULL)
2696fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
2697fa9e4066Sahrens 
26980e34b6a7Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
26990e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
27000e34b6a7Sbonwick 
2701fa9e4066Sahrens 	pvd = vd->vdev_parent;
2702fa9e4066Sahrens 
2703fa9e4066Sahrens 	/*
2704fa9e4066Sahrens 	 * If replace_done is specified, only remove this device if it's
270599653d4eSeschrock 	 * the first child of a replacing vdev.  For the 'spare' vdev, either
270699653d4eSeschrock 	 * disk can be removed.
270799653d4eSeschrock 	 */
270899653d4eSeschrock 	if (replace_done) {
270999653d4eSeschrock 		if (pvd->vdev_ops == &vdev_replacing_ops) {
271099653d4eSeschrock 			if (vd->vdev_id != 0)
271199653d4eSeschrock 				return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
271299653d4eSeschrock 		} else if (pvd->vdev_ops != &vdev_spare_ops) {
271399653d4eSeschrock 			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
271499653d4eSeschrock 		}
271599653d4eSeschrock 	}
271699653d4eSeschrock 
271799653d4eSeschrock 	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
2718e7437265Sahrens 	    spa_version(spa) >= SPA_VERSION_SPARES);
2719fa9e4066Sahrens 
2720fa9e4066Sahrens 	/*
272199653d4eSeschrock 	 * Only mirror, replacing, and spare vdevs support detach.
2722fa9e4066Sahrens 	 */
2723fa9e4066Sahrens 	if (pvd->vdev_ops != &vdev_replacing_ops &&
272499653d4eSeschrock 	    pvd->vdev_ops != &vdev_mirror_ops &&
272599653d4eSeschrock 	    pvd->vdev_ops != &vdev_spare_ops)
2726fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
2727fa9e4066Sahrens 
2728fa9e4066Sahrens 	/*
2729fa9e4066Sahrens 	 * If there's only one replica, you can't detach it.
2730fa9e4066Sahrens 	 */
2731fa9e4066Sahrens 	if (pvd->vdev_children <= 1)
2732fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
2733fa9e4066Sahrens 
2734fa9e4066Sahrens 	/*
2735fa9e4066Sahrens 	 * If all siblings have non-empty DTLs, this device may have the only
2736fa9e4066Sahrens 	 * valid copy of the data, which means we cannot safely detach it.
2737fa9e4066Sahrens 	 *
2738fa9e4066Sahrens 	 * XXX -- as in the vdev_offline() case, we really want a more
2739fa9e4066Sahrens 	 * precise DTL check.
2740fa9e4066Sahrens 	 */
2741fa9e4066Sahrens 	for (c = 0; c < pvd->vdev_children; c++) {
2742fa9e4066Sahrens 		uint64_t dirty;
2743fa9e4066Sahrens 
2744fa9e4066Sahrens 		cvd = pvd->vdev_child[c];
2745fa9e4066Sahrens 		if (cvd == vd)
2746fa9e4066Sahrens 			continue;
2747fa9e4066Sahrens 		if (vdev_is_dead(cvd))
2748fa9e4066Sahrens 			continue;
2749fa9e4066Sahrens 		mutex_enter(&cvd->vdev_dtl_lock);
2750fa9e4066Sahrens 		dirty = cvd->vdev_dtl_map.sm_space |
2751fa9e4066Sahrens 		    cvd->vdev_dtl_scrub.sm_space;
2752fa9e4066Sahrens 		mutex_exit(&cvd->vdev_dtl_lock);
2753fa9e4066Sahrens 		if (!dirty)
2754fa9e4066Sahrens 			break;
2755fa9e4066Sahrens 	}
275699653d4eSeschrock 
275799653d4eSeschrock 	/*
275899653d4eSeschrock 	 * If we are a replacing or spare vdev, then we can always detach the
275999653d4eSeschrock 	 * latter child, as that is how one cancels the operation.
276099653d4eSeschrock 	 */
276199653d4eSeschrock 	if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) &&
276299653d4eSeschrock 	    c == pvd->vdev_children)
2763fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
2764fa9e4066Sahrens 
276599653d4eSeschrock 	/*
276699653d4eSeschrock 	 * If we are detaching the original disk from a spare, then it implies
276799653d4eSeschrock 	 * that the spare should become a real disk, and be removed from the
276899653d4eSeschrock 	 * active spare list for the pool.
276999653d4eSeschrock 	 */
277099653d4eSeschrock 	if (pvd->vdev_ops == &vdev_spare_ops &&
277199653d4eSeschrock 	    vd->vdev_id == 0)
277299653d4eSeschrock 		unspare = B_TRUE;
277399653d4eSeschrock 
2774fa9e4066Sahrens 	/*
2775fa9e4066Sahrens 	 * Erase the disk labels so the disk can be used for other things.
2776fa9e4066Sahrens 	 * This must be done after all other error cases are handled,
2777fa9e4066Sahrens 	 * but before we disembowel vd (so we can still do I/O to it).
2778fa9e4066Sahrens 	 * But if we can't do it, don't treat the error as fatal --
2779fa9e4066Sahrens 	 * it may be that the unwritability of the disk is the reason
2780fa9e4066Sahrens 	 * it's being detached!
2781fa9e4066Sahrens 	 */
278239c23413Seschrock 	error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
2783fa9e4066Sahrens 
2784fa9e4066Sahrens 	/*
2785fa9e4066Sahrens 	 * Remove vd from its parent and compact the parent's children.
2786fa9e4066Sahrens 	 */
2787fa9e4066Sahrens 	vdev_remove_child(pvd, vd);
2788fa9e4066Sahrens 	vdev_compact_children(pvd);
2789fa9e4066Sahrens 
2790fa9e4066Sahrens 	/*
2791fa9e4066Sahrens 	 * Remember one of the remaining children so we can get tvd below.
2792fa9e4066Sahrens 	 */
2793fa9e4066Sahrens 	cvd = pvd->vdev_child[0];
2794fa9e4066Sahrens 
279599653d4eSeschrock 	/*
279699653d4eSeschrock 	 * If we need to remove the remaining child from the list of hot spares,
279799653d4eSeschrock 	 * do it now, marking the vdev as no longer a spare in the process.  We
279899653d4eSeschrock 	 * must do this before vdev_remove_parent(), because that can change the
279999653d4eSeschrock 	 * GUID if it creates a new toplevel GUID.
280099653d4eSeschrock 	 */
280199653d4eSeschrock 	if (unspare) {
280299653d4eSeschrock 		ASSERT(cvd->vdev_isspare);
280339c23413Seschrock 		spa_spare_remove(cvd);
280499653d4eSeschrock 		unspare_guid = cvd->vdev_guid;
280599653d4eSeschrock 	}
280699653d4eSeschrock 
2807fa9e4066Sahrens 	/*
2808fa9e4066Sahrens 	 * If the parent mirror/replacing vdev only has one child,
2809fa9e4066Sahrens 	 * the parent is no longer needed.  Remove it from the tree.
2810fa9e4066Sahrens 	 */
2811fa9e4066Sahrens 	if (pvd->vdev_children == 1)
2812fa9e4066Sahrens 		vdev_remove_parent(cvd);
2813fa9e4066Sahrens 
2814fa9e4066Sahrens 	/*
2815fa9e4066Sahrens 	 * We don't set tvd until now because the parent we just removed
2816fa9e4066Sahrens 	 * may have been the previous top-level vdev.
2817fa9e4066Sahrens 	 */
2818fa9e4066Sahrens 	tvd = cvd->vdev_top;
2819fa9e4066Sahrens 	ASSERT(tvd->vdev_parent == rvd);
2820fa9e4066Sahrens 
2821fa9e4066Sahrens 	/*
282239c23413Seschrock 	 * Reevaluate the parent vdev state.
2823fa9e4066Sahrens 	 */
28243d7072f8Seschrock 	vdev_propagate_state(cvd);
2825fa9e4066Sahrens 
2826fa9e4066Sahrens 	/*
282739c23413Seschrock 	 * If the device we just detached was smaller than the others, it may be
282839c23413Seschrock 	 * possible to add metaslabs (i.e. grow the pool).  vdev_metaslab_init()
282939c23413Seschrock 	 * can't fail because the existing metaslabs are already in core, so
283039c23413Seschrock 	 * there's nothing to read from disk.
2831fa9e4066Sahrens 	 */
2832ecc2d604Sbonwick 	VERIFY(vdev_metaslab_init(tvd, txg) == 0);
2833fa9e4066Sahrens 
2834fa9e4066Sahrens 	vdev_config_dirty(tvd);
2835fa9e4066Sahrens 
2836fa9e4066Sahrens 	/*
283739c23413Seschrock 	 * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
283839c23413Seschrock 	 * vd->vdev_detached is set and free vd's DTL object in syncing context.
283939c23413Seschrock 	 * But first make sure we're not on any *other* txg's DTL list, to
284039c23413Seschrock 	 * prevent vd from being accessed after it's freed.
2841fa9e4066Sahrens 	 */
2842fa9e4066Sahrens 	for (t = 0; t < TXG_SIZE; t++)
2843fa9e4066Sahrens 		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
2844ecc2d604Sbonwick 	vd->vdev_detached = B_TRUE;
2845ecc2d604Sbonwick 	vdev_dirty(tvd, VDD_DTL, vd, txg);
2846fa9e4066Sahrens 
28473d7072f8Seschrock 	spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
28483d7072f8Seschrock 
284999653d4eSeschrock 	error = spa_vdev_exit(spa, vd, txg, 0);
285099653d4eSeschrock 
285199653d4eSeschrock 	/*
285239c23413Seschrock 	 * If this was the removal of the original device in a hot spare vdev,
285339c23413Seschrock 	 * then we want to go through and remove the device from the hot spare
285439c23413Seschrock 	 * list of every other pool.
285599653d4eSeschrock 	 */
285699653d4eSeschrock 	if (unspare) {
285799653d4eSeschrock 		spa = NULL;
285899653d4eSeschrock 		mutex_enter(&spa_namespace_lock);
285999653d4eSeschrock 		while ((spa = spa_next(spa)) != NULL) {
286099653d4eSeschrock 			if (spa->spa_state != POOL_STATE_ACTIVE)
286199653d4eSeschrock 				continue;
286299653d4eSeschrock 
286399653d4eSeschrock 			(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
286499653d4eSeschrock 		}
286599653d4eSeschrock 		mutex_exit(&spa_namespace_lock);
286699653d4eSeschrock 	}
286799653d4eSeschrock 
286899653d4eSeschrock 	return (error);
286999653d4eSeschrock }
287099653d4eSeschrock 
287199653d4eSeschrock /*
2872fa94a07fSbrendan  * Remove a spares vdev from the nvlist config.
287399653d4eSeschrock  */
2874fa94a07fSbrendan static int
2875fa94a07fSbrendan spa_remove_spares(spa_aux_vdev_t *sav, uint64_t guid, boolean_t unspare,
2876fa94a07fSbrendan     nvlist_t **spares, int nspares, vdev_t *vd)
287799653d4eSeschrock {
2878fa94a07fSbrendan 	nvlist_t *nv, **newspares;
2879fa94a07fSbrendan 	int i, j;
288099653d4eSeschrock 
288199653d4eSeschrock 	nv = NULL;
2882fa94a07fSbrendan 	for (i = 0; i < nspares; i++) {
2883fa94a07fSbrendan 		uint64_t theguid;
288499653d4eSeschrock 
2885fa94a07fSbrendan 		VERIFY(nvlist_lookup_uint64(spares[i],
2886fa94a07fSbrendan 		    ZPOOL_CONFIG_GUID, &theguid) == 0);
2887fa94a07fSbrendan 		if (theguid == guid) {
2888fa94a07fSbrendan 			nv = spares[i];
2889fa94a07fSbrendan 			break;
289099653d4eSeschrock 		}
289199653d4eSeschrock 	}
289299653d4eSeschrock 
289399653d4eSeschrock 	/*
2894fa94a07fSbrendan 	 * Only remove the hot spare if it's not currently in use in this pool.
289599653d4eSeschrock 	 */
2896fa94a07fSbrendan 	if (nv == NULL && vd == NULL)
2897fa94a07fSbrendan 		return (ENOENT);
289899653d4eSeschrock 
2899fa94a07fSbrendan 	if (nv == NULL && vd != NULL)
2900fa94a07fSbrendan 		return (ENOTSUP);
290199653d4eSeschrock 
2902fa94a07fSbrendan 	if (!unspare && nv != NULL && vd != NULL)
2903fa94a07fSbrendan 		return (EBUSY);
290499653d4eSeschrock 
290599653d4eSeschrock 	if (nspares == 1) {
290699653d4eSeschrock 		newspares = NULL;
290799653d4eSeschrock 	} else {
290899653d4eSeschrock 		newspares = kmem_alloc((nspares - 1) * sizeof (void *),
290999653d4eSeschrock 		    KM_SLEEP);
291099653d4eSeschrock 		for (i = 0, j = 0; i < nspares; i++) {
291199653d4eSeschrock 			if (spares[i] != nv)
291299653d4eSeschrock 				VERIFY(nvlist_dup(spares[i],
291399653d4eSeschrock 				    &newspares[j++], KM_SLEEP) == 0);
291499653d4eSeschrock 		}
291599653d4eSeschrock 	}
291699653d4eSeschrock 
2917fa94a07fSbrendan 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_SPARES,
291899653d4eSeschrock 	    DATA_TYPE_NVLIST_ARRAY) == 0);
2919fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
2920fa94a07fSbrendan 	    ZPOOL_CONFIG_SPARES, newspares, nspares - 1) == 0);
292199653d4eSeschrock 	for (i = 0; i < nspares - 1; i++)
292299653d4eSeschrock 		nvlist_free(newspares[i]);
292399653d4eSeschrock 	kmem_free(newspares, (nspares - 1) * sizeof (void *));
2924fa94a07fSbrendan 
2925fa94a07fSbrendan 	return (0);
2926fa94a07fSbrendan }
2927fa94a07fSbrendan 
2928fa94a07fSbrendan /*
2929fa94a07fSbrendan  * Remove an l2cache vdev from the nvlist config.
2930fa94a07fSbrendan  */
2931fa94a07fSbrendan static int
2932fa94a07fSbrendan spa_remove_l2cache(spa_aux_vdev_t *sav, uint64_t guid, nvlist_t **l2cache,
2933fa94a07fSbrendan     int nl2cache, vdev_t *vd)
2934fa94a07fSbrendan {
2935fa94a07fSbrendan 	nvlist_t *nv, **newl2cache;
2936fa94a07fSbrendan 	int i, j;
2937fa94a07fSbrendan 
2938fa94a07fSbrendan 	nv = NULL;
2939fa94a07fSbrendan 	for (i = 0; i < nl2cache; i++) {
2940fa94a07fSbrendan 		uint64_t theguid;
2941fa94a07fSbrendan 
2942fa94a07fSbrendan 		VERIFY(nvlist_lookup_uint64(l2cache[i],
2943fa94a07fSbrendan 		    ZPOOL_CONFIG_GUID, &theguid) == 0);
2944fa94a07fSbrendan 		if (theguid == guid) {
2945fa94a07fSbrendan 			nv = l2cache[i];
2946fa94a07fSbrendan 			break;
2947fa94a07fSbrendan 		}
2948fa94a07fSbrendan 	}
2949fa94a07fSbrendan 
2950fa94a07fSbrendan 	if (vd == NULL) {
2951fa94a07fSbrendan 		for (i = 0; i < nl2cache; i++) {
2952fa94a07fSbrendan 			if (sav->sav_vdevs[i]->vdev_guid == guid) {
2953fa94a07fSbrendan 				vd = sav->sav_vdevs[i];
2954fa94a07fSbrendan 				break;
2955fa94a07fSbrendan 			}
2956fa94a07fSbrendan 		}
2957fa94a07fSbrendan 	}
2958fa94a07fSbrendan 
2959fa94a07fSbrendan 	if (nv == NULL && vd == NULL)
2960fa94a07fSbrendan 		return (ENOENT);
2961fa94a07fSbrendan 
2962fa94a07fSbrendan 	if (nv == NULL && vd != NULL)
2963fa94a07fSbrendan 		return (ENOTSUP);
2964fa94a07fSbrendan 
2965fa94a07fSbrendan 	if (nl2cache == 1) {
2966fa94a07fSbrendan 		newl2cache = NULL;
2967fa94a07fSbrendan 	} else {
2968fa94a07fSbrendan 		newl2cache = kmem_alloc((nl2cache - 1) * sizeof (void *),
2969fa94a07fSbrendan 		    KM_SLEEP);
2970fa94a07fSbrendan 		for (i = 0, j = 0; i < nl2cache; i++) {
2971fa94a07fSbrendan 			if (l2cache[i] != nv)
2972fa94a07fSbrendan 				VERIFY(nvlist_dup(l2cache[i],
2973fa94a07fSbrendan 				    &newl2cache[j++], KM_SLEEP) == 0);
2974fa94a07fSbrendan 		}
2975fa94a07fSbrendan 	}
2976fa94a07fSbrendan 
2977fa94a07fSbrendan 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
2978fa94a07fSbrendan 	    DATA_TYPE_NVLIST_ARRAY) == 0);
2979fa94a07fSbrendan 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
2980fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, newl2cache, nl2cache - 1) == 0);
2981fa94a07fSbrendan 	for (i = 0; i < nl2cache - 1; i++)
2982fa94a07fSbrendan 		nvlist_free(newl2cache[i]);
2983fa94a07fSbrendan 	kmem_free(newl2cache, (nl2cache - 1) * sizeof (void *));
2984fa94a07fSbrendan 
2985fa94a07fSbrendan 	return (0);
2986fa94a07fSbrendan }
2987fa94a07fSbrendan 
2988fa94a07fSbrendan /*
2989fa94a07fSbrendan  * Remove a device from the pool.  Currently, this supports removing only hot
2990fa94a07fSbrendan  * spares and level 2 ARC devices.
2991fa94a07fSbrendan  */
2992fa94a07fSbrendan int
2993fa94a07fSbrendan spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
2994fa94a07fSbrendan {
2995fa94a07fSbrendan 	vdev_t *vd;
2996fa94a07fSbrendan 	nvlist_t **spares, **l2cache;
2997fa94a07fSbrendan 	uint_t nspares, nl2cache;
2998fa94a07fSbrendan 	int error = 0;
2999fa94a07fSbrendan 
3000fa94a07fSbrendan 	spa_config_enter(spa, RW_WRITER, FTAG);
3001fa94a07fSbrendan 
3002fa94a07fSbrendan 	vd = spa_lookup_by_guid(spa, guid);
3003fa94a07fSbrendan 
3004fa94a07fSbrendan 	if (spa->spa_spares.sav_vdevs != NULL &&
3005fa94a07fSbrendan 	    spa_spare_exists(guid, NULL) &&
3006fa94a07fSbrendan 	    nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3007fa94a07fSbrendan 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
3008fa94a07fSbrendan 		if ((error = spa_remove_spares(&spa->spa_spares, guid, unspare,
3009fa94a07fSbrendan 		    spares, nspares, vd)) != 0)
3010fa94a07fSbrendan 			goto out;
3011fa94a07fSbrendan 		spa_load_spares(spa);
3012fa94a07fSbrendan 		spa->spa_spares.sav_sync = B_TRUE;
3013fa94a07fSbrendan 		goto out;
3014fa94a07fSbrendan 	}
3015fa94a07fSbrendan 
3016fa94a07fSbrendan 	if (spa->spa_l2cache.sav_vdevs != NULL &&
3017fa94a07fSbrendan 	    spa_l2cache_exists(guid, NULL) &&
3018fa94a07fSbrendan 	    nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3019fa94a07fSbrendan 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) {
3020fa94a07fSbrendan 		if ((error = spa_remove_l2cache(&spa->spa_l2cache, guid,
3021fa94a07fSbrendan 		    l2cache, nl2cache, vd)) != 0)
3022fa94a07fSbrendan 			goto out;
3023fa94a07fSbrendan 		spa_load_l2cache(spa);
3024fa94a07fSbrendan 		spa->spa_l2cache.sav_sync = B_TRUE;
3025fa94a07fSbrendan 	}
302699653d4eSeschrock 
302799653d4eSeschrock out:
302899653d4eSeschrock 	spa_config_exit(spa, FTAG);
3029fa94a07fSbrendan 	return (error);
3030fa9e4066Sahrens }
3031fa9e4066Sahrens 
3032fa9e4066Sahrens /*
30333d7072f8Seschrock  * Find any device that's done replacing, or a vdev marked 'unspare' that's
30343d7072f8Seschrock  * current spared, so we can detach it.
3035fa9e4066Sahrens  */
3036ea8dc4b6Seschrock static vdev_t *
30373d7072f8Seschrock spa_vdev_resilver_done_hunt(vdev_t *vd)
3038fa9e4066Sahrens {
3039ea8dc4b6Seschrock 	vdev_t *newvd, *oldvd;
3040fa9e4066Sahrens 	int c;
3041fa9e4066Sahrens 
3042ea8dc4b6Seschrock 	for (c = 0; c < vd->vdev_children; c++) {
30433d7072f8Seschrock 		oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
3044ea8dc4b6Seschrock 		if (oldvd != NULL)
3045ea8dc4b6Seschrock 			return (oldvd);
3046ea8dc4b6Seschrock 	}
3047fa9e4066Sahrens 
30483d7072f8Seschrock 	/*
30493d7072f8Seschrock 	 * Check for a completed replacement.
30503d7072f8Seschrock 	 */
3051fa9e4066Sahrens 	if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
3052ea8dc4b6Seschrock 		oldvd = vd->vdev_child[0];
3053ea8dc4b6Seschrock 		newvd = vd->vdev_child[1];
3054ea8dc4b6Seschrock 
3055ea8dc4b6Seschrock 		mutex_enter(&newvd->vdev_dtl_lock);
3056ea8dc4b6Seschrock 		if (newvd->vdev_dtl_map.sm_space == 0 &&
3057ea8dc4b6Seschrock 		    newvd->vdev_dtl_scrub.sm_space == 0) {
3058ea8dc4b6Seschrock 			mutex_exit(&newvd->vdev_dtl_lock);
3059ea8dc4b6Seschrock 			return (oldvd);
3060fa9e4066Sahrens 		}
3061ea8dc4b6Seschrock 		mutex_exit(&newvd->vdev_dtl_lock);
3062fa9e4066Sahrens 	}
3063ea8dc4b6Seschrock 
30643d7072f8Seschrock 	/*
30653d7072f8Seschrock 	 * Check for a completed resilver with the 'unspare' flag set.
30663d7072f8Seschrock 	 */
30673d7072f8Seschrock 	if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) {
30683d7072f8Seschrock 		newvd = vd->vdev_child[0];
30693d7072f8Seschrock 		oldvd = vd->vdev_child[1];
30703d7072f8Seschrock 
30713d7072f8Seschrock 		mutex_enter(&newvd->vdev_dtl_lock);
30723d7072f8Seschrock 		if (newvd->vdev_unspare &&
30733d7072f8Seschrock 		    newvd->vdev_dtl_map.sm_space == 0 &&
30743d7072f8Seschrock 		    newvd->vdev_dtl_scrub.sm_space == 0) {
30753d7072f8Seschrock 			newvd->vdev_unspare = 0;
30763d7072f8Seschrock 			mutex_exit(&newvd->vdev_dtl_lock);
30773d7072f8Seschrock 			return (oldvd);
30783d7072f8Seschrock 		}
30793d7072f8Seschrock 		mutex_exit(&newvd->vdev_dtl_lock);
30803d7072f8Seschrock 	}
30813d7072f8Seschrock 
3082ea8dc4b6Seschrock 	return (NULL);
3083fa9e4066Sahrens }
3084fa9e4066Sahrens 
3085ea8dc4b6Seschrock static void
30863d7072f8Seschrock spa_vdev_resilver_done(spa_t *spa)
3087fa9e4066Sahrens {
3088ea8dc4b6Seschrock 	vdev_t *vd;
308999653d4eSeschrock 	vdev_t *pvd;
3090ea8dc4b6Seschrock 	uint64_t guid;
309199653d4eSeschrock 	uint64_t pguid = 0;
3092ea8dc4b6Seschrock 
3093ea8dc4b6Seschrock 	spa_config_enter(spa, RW_READER, FTAG);
3094ea8dc4b6Seschrock 
30953d7072f8Seschrock 	while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
3096ea8dc4b6Seschrock 		guid = vd->vdev_guid;
309799653d4eSeschrock 		/*
309899653d4eSeschrock 		 * If we have just finished replacing a hot spared device, then
309999653d4eSeschrock 		 * we need to detach the parent's first child (the original hot
310099653d4eSeschrock 		 * spare) as well.
310199653d4eSeschrock 		 */
310299653d4eSeschrock 		pvd = vd->vdev_parent;
310399653d4eSeschrock 		if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
310499653d4eSeschrock 		    pvd->vdev_id == 0) {
310599653d4eSeschrock 			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
310699653d4eSeschrock 			ASSERT(pvd->vdev_parent->vdev_children == 2);
310799653d4eSeschrock 			pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid;
310899653d4eSeschrock 		}
3109ea8dc4b6Seschrock 		spa_config_exit(spa, FTAG);
3110ea8dc4b6Seschrock 		if (spa_vdev_detach(spa, guid, B_TRUE) != 0)
3111ea8dc4b6Seschrock 			return;
311299653d4eSeschrock 		if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0)
311399653d4eSeschrock 			return;
3114ea8dc4b6Seschrock 		spa_config_enter(spa, RW_READER, FTAG);
3115fa9e4066Sahrens 	}
3116fa9e4066Sahrens 
3117ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
3118fa9e4066Sahrens }
3119fa9e4066Sahrens 
3120c67d9675Seschrock /*
3121c67d9675Seschrock  * Update the stored path for this vdev.  Dirty the vdev configuration, relying
3122c67d9675Seschrock  * on spa_vdev_enter/exit() to synchronize the labels and cache.
3123c67d9675Seschrock  */
3124c67d9675Seschrock int
3125c67d9675Seschrock spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
3126c67d9675Seschrock {
3127c67d9675Seschrock 	vdev_t *rvd, *vd;
3128c67d9675Seschrock 	uint64_t txg;
3129c67d9675Seschrock 
3130c67d9675Seschrock 	rvd = spa->spa_root_vdev;
3131c67d9675Seschrock 
3132c67d9675Seschrock 	txg = spa_vdev_enter(spa);
3133c67d9675Seschrock 
313499653d4eSeschrock 	if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
313599653d4eSeschrock 		/*
3136fa94a07fSbrendan 		 * Determine if this is a reference to a hot spare or l2cache
3137fa94a07fSbrendan 		 * device.  If it is, update the path as stored in their
3138fa94a07fSbrendan 		 * device list.
313999653d4eSeschrock 		 */
3140fa94a07fSbrendan 		nvlist_t **spares, **l2cache;
3141fa94a07fSbrendan 		uint_t i, nspares, nl2cache;
3142fa94a07fSbrendan 
3143fa94a07fSbrendan 		if (spa->spa_spares.sav_config != NULL) {
3144fa94a07fSbrendan 			VERIFY(nvlist_lookup_nvlist_array(
3145fa94a07fSbrendan 			    spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
3146fa94a07fSbrendan 			    &spares, &nspares) == 0);
314799653d4eSeschrock 			for (i = 0; i < nspares; i++) {
314899653d4eSeschrock 				uint64_t theguid;
314999653d4eSeschrock 				VERIFY(nvlist_lookup_uint64(spares[i],
315099653d4eSeschrock 				    ZPOOL_CONFIG_GUID, &theguid) == 0);
3151fa94a07fSbrendan 				if (theguid == guid) {
3152fa94a07fSbrendan 					VERIFY(nvlist_add_string(spares[i],
3153fa94a07fSbrendan 					    ZPOOL_CONFIG_PATH, newpath) == 0);
3154fa94a07fSbrendan 					spa_load_spares(spa);
3155fa94a07fSbrendan 					spa->spa_spares.sav_sync = B_TRUE;
3156fa94a07fSbrendan 					return (spa_vdev_exit(spa, NULL, txg,
3157fa94a07fSbrendan 					    0));
3158fa94a07fSbrendan 				}
315999653d4eSeschrock 			}
3160fa94a07fSbrendan 		}
316199653d4eSeschrock 
3162fa94a07fSbrendan 		if (spa->spa_l2cache.sav_config != NULL) {
3163fa94a07fSbrendan 			VERIFY(nvlist_lookup_nvlist_array(
3164fa94a07fSbrendan 			    spa->spa_l2cache.sav_config, ZPOOL_CONFIG_L2CACHE,
3165fa94a07fSbrendan 			    &l2cache, &nl2cache) == 0);
3166fa94a07fSbrendan 			for (i = 0; i < nl2cache; i++) {
3167fa94a07fSbrendan 				uint64_t theguid;
3168fa94a07fSbrendan 				VERIFY(nvlist_lookup_uint64(l2cache[i],
3169fa94a07fSbrendan 				    ZPOOL_CONFIG_GUID, &theguid) == 0);
3170fa94a07fSbrendan 				if (theguid == guid) {
3171fa94a07fSbrendan 					VERIFY(nvlist_add_string(l2cache[i],
3172fa94a07fSbrendan 					    ZPOOL_CONFIG_PATH, newpath) == 0);
3173fa94a07fSbrendan 					spa_load_l2cache(spa);
3174fa94a07fSbrendan 					spa->spa_l2cache.sav_sync = B_TRUE;
3175fa94a07fSbrendan 					return (spa_vdev_exit(spa, NULL, txg,
3176fa94a07fSbrendan 					    0));
3177fa94a07fSbrendan 				}
3178fa94a07fSbrendan 			}
317999653d4eSeschrock 		}
3180fa94a07fSbrendan 
3181fa94a07fSbrendan 		return (spa_vdev_exit(spa, NULL, txg, ENOENT));
318299653d4eSeschrock 	}
3183c67d9675Seschrock 
31840e34b6a7Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
31850e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
31860e34b6a7Sbonwick 
3187c67d9675Seschrock 	spa_strfree(vd->vdev_path);
3188c67d9675Seschrock 	vd->vdev_path = spa_strdup(newpath);
3189c67d9675Seschrock 
3190c67d9675Seschrock 	vdev_config_dirty(vd->vdev_top);
3191c67d9675Seschrock 
3192c67d9675Seschrock 	return (spa_vdev_exit(spa, NULL, txg, 0));
3193c67d9675Seschrock }
3194c67d9675Seschrock 
3195fa9e4066Sahrens /*
3196fa9e4066Sahrens  * ==========================================================================
3197fa9e4066Sahrens  * SPA Scrubbing
3198fa9e4066Sahrens  * ==========================================================================
3199fa9e4066Sahrens  */
3200fa9e4066Sahrens 
3201fa9e4066Sahrens static void
3202fa9e4066Sahrens spa_scrub_io_done(zio_t *zio)
3203fa9e4066Sahrens {
3204fa9e4066Sahrens 	spa_t *spa = zio->io_spa;
3205fa9e4066Sahrens 
32060e8c6158Smaybee 	arc_data_buf_free(zio->io_data, zio->io_size);
3207fa9e4066Sahrens 
3208fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
3209ea8dc4b6Seschrock 	if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
321044cd46caSbillm 		vdev_t *vd = zio->io_vd ? zio->io_vd : spa->spa_root_vdev;
3211ea8dc4b6Seschrock 		spa->spa_scrub_errors++;
3212fa9e4066Sahrens 		mutex_enter(&vd->vdev_stat_lock);
3213fa9e4066Sahrens 		vd->vdev_stat.vs_scrub_errors++;
3214fa9e4066Sahrens 		mutex_exit(&vd->vdev_stat_lock);
3215fa9e4066Sahrens 	}
321605b2b3b8Smishra 
321705b2b3b8Smishra 	if (--spa->spa_scrub_inflight < spa->spa_scrub_maxinflight)
3218ea8dc4b6Seschrock 		cv_broadcast(&spa->spa_scrub_io_cv);
321905b2b3b8Smishra 
322005b2b3b8Smishra 	ASSERT(spa->spa_scrub_inflight >= 0);
322105b2b3b8Smishra 
3222ea8dc4b6Seschrock 	mutex_exit(&spa->spa_scrub_lock);
3223fa9e4066Sahrens }
3224fa9e4066Sahrens 
3225fa9e4066Sahrens static void
3226ea8dc4b6Seschrock spa_scrub_io_start(spa_t *spa, blkptr_t *bp, int priority, int flags,
3227ea8dc4b6Seschrock     zbookmark_t *zb)
3228fa9e4066Sahrens {
3229fa9e4066Sahrens 	size_t size = BP_GET_LSIZE(bp);
323005b2b3b8Smishra 	void *data;
3231fa9e4066Sahrens 
3232fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
323305b2b3b8Smishra 	/*
323405b2b3b8Smishra 	 * Do not give too much work to vdev(s).
323505b2b3b8Smishra 	 */
323605b2b3b8Smishra 	while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight) {
323705b2b3b8Smishra 		cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
323805b2b3b8Smishra 	}
3239fa9e4066Sahrens 	spa->spa_scrub_inflight++;
3240fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
3241fa9e4066Sahrens 
32420e8c6158Smaybee 	data = arc_data_buf_alloc(size);
324305b2b3b8Smishra 
3244ea8dc4b6Seschrock 	if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
3245ea8dc4b6Seschrock 		flags |= ZIO_FLAG_SPECULATIVE;	/* intent log block */
3246ea8dc4b6Seschrock 
3247d80c45e0Sbonwick 	flags |= ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL;
3248ea8dc4b6Seschrock 
3249fa9e4066Sahrens 	zio_nowait(zio_read(NULL, spa, bp, data, size,
3250ea8dc4b6Seschrock 	    spa_scrub_io_done, NULL, priority, flags, zb));
3251fa9e4066Sahrens }
3252fa9e4066Sahrens 
3253fa9e4066Sahrens /* ARGSUSED */
3254fa9e4066Sahrens static int
3255fa9e4066Sahrens spa_scrub_cb(traverse_blk_cache_t *bc, spa_t *spa, void *a)
3256fa9e4066Sahrens {
3257fa9e4066Sahrens 	blkptr_t *bp = &bc->bc_blkptr;
325844cd46caSbillm 	vdev_t *vd = spa->spa_root_vdev;
325944cd46caSbillm 	dva_t *dva = bp->blk_dva;
326044cd46caSbillm 	int needs_resilver = B_FALSE;
326144cd46caSbillm 	int d;
3262fa9e4066Sahrens 
326344cd46caSbillm 	if (bc->bc_errno) {
3264fa9e4066Sahrens 		/*
3265fa9e4066Sahrens 		 * We can't scrub this block, but we can continue to scrub
3266fa9e4066Sahrens 		 * the rest of the pool.  Note the error and move along.
3267fa9e4066Sahrens 		 */
3268fa9e4066Sahrens 		mutex_enter(&spa->spa_scrub_lock);
3269fa9e4066Sahrens 		spa->spa_scrub_errors++;
3270fa9e4066Sahrens 		mutex_exit(&spa->spa_scrub_lock);
3271fa9e4066Sahrens 
327244cd46caSbillm 		mutex_enter(&vd->vdev_stat_lock);
327344cd46caSbillm 		vd->vdev_stat.vs_scrub_errors++;
327444cd46caSbillm 		mutex_exit(&vd->vdev_stat_lock);
3275fa9e4066Sahrens 
3276fa9e4066Sahrens 		return (ERESTART);
3277fa9e4066Sahrens 	}
3278fa9e4066Sahrens 
3279fa9e4066Sahrens 	ASSERT(bp->blk_birth < spa->spa_scrub_maxtxg);
3280fa9e4066Sahrens 
328144cd46caSbillm 	for (d = 0; d < BP_GET_NDVAS(bp); d++) {
328244cd46caSbillm 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]));
3283fa9e4066Sahrens 
328444cd46caSbillm 		ASSERT(vd != NULL);
328544cd46caSbillm 
328644cd46caSbillm 		/*
328744cd46caSbillm 		 * Keep track of how much data we've examined so that
328844cd46caSbillm 		 * zpool(1M) status can make useful progress reports.
328944cd46caSbillm 		 */
329044cd46caSbillm 		mutex_enter(&vd->vdev_stat_lock);
329144cd46caSbillm 		vd->vdev_stat.vs_scrub_examined += DVA_GET_ASIZE(&dva[d]);
329244cd46caSbillm 		mutex_exit(&vd->vdev_stat_lock);
329344cd46caSbillm 
329444cd46caSbillm 		if (spa->spa_scrub_type == POOL_SCRUB_RESILVER) {
329544cd46caSbillm 			if (DVA_GET_GANG(&dva[d])) {
329644cd46caSbillm 				/*
329744cd46caSbillm 				 * Gang members may be spread across multiple
329844cd46caSbillm 				 * vdevs, so the best we can do is look at the
329944cd46caSbillm 				 * pool-wide DTL.
330044cd46caSbillm 				 * XXX -- it would be better to change our
330144cd46caSbillm 				 * allocation policy to ensure that this can't
330244cd46caSbillm 				 * happen.
330344cd46caSbillm 				 */
330444cd46caSbillm 				vd = spa->spa_root_vdev;
330544cd46caSbillm 			}
330644cd46caSbillm 			if (vdev_dtl_contains(&vd->vdev_dtl_map,
330744cd46caSbillm 			    bp->blk_birth, 1))
330844cd46caSbillm 				needs_resilver = B_TRUE;
3309fa9e4066Sahrens 		}
331044cd46caSbillm 	}
331144cd46caSbillm 
331244cd46caSbillm 	if (spa->spa_scrub_type == POOL_SCRUB_EVERYTHING)
3313fa9e4066Sahrens 		spa_scrub_io_start(spa, bp, ZIO_PRIORITY_SCRUB,
3314ea8dc4b6Seschrock 		    ZIO_FLAG_SCRUB, &bc->bc_bookmark);
331544cd46caSbillm 	else if (needs_resilver)
331644cd46caSbillm 		spa_scrub_io_start(spa, bp, ZIO_PRIORITY_RESILVER,
331744cd46caSbillm 		    ZIO_FLAG_RESILVER, &bc->bc_bookmark);
3318fa9e4066Sahrens 
3319fa9e4066Sahrens 	return (0);
3320fa9e4066Sahrens }
3321fa9e4066Sahrens 
3322fa9e4066Sahrens static void
3323fa9e4066Sahrens spa_scrub_thread(spa_t *spa)
3324fa9e4066Sahrens {
3325fa9e4066Sahrens 	callb_cpr_t cprinfo;
3326fa9e4066Sahrens 	traverse_handle_t *th = spa->spa_scrub_th;
3327fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
3328fa9e4066Sahrens 	pool_scrub_type_t scrub_type = spa->spa_scrub_type;
3329fa9e4066Sahrens 	int error = 0;
3330fa9e4066Sahrens 	boolean_t complete;
3331fa9e4066Sahrens 
3332fa9e4066Sahrens 	CALLB_CPR_INIT(&cprinfo, &spa->spa_scrub_lock, callb_generic_cpr, FTAG);
3333fa9e4066Sahrens 
3334f0aa80d4Sbonwick 	/*
3335f0aa80d4Sbonwick 	 * If we're restarting due to a snapshot create/delete,
3336f0aa80d4Sbonwick 	 * wait for that to complete.
3337f0aa80d4Sbonwick 	 */
3338f0aa80d4Sbonwick 	txg_wait_synced(spa_get_dsl(spa), 0);
3339f0aa80d4Sbonwick 
3340ea8dc4b6Seschrock 	dprintf("start %s mintxg=%llu maxtxg=%llu\n",
3341ea8dc4b6Seschrock 	    scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub",
3342ea8dc4b6Seschrock 	    spa->spa_scrub_mintxg, spa->spa_scrub_maxtxg);
3343ea8dc4b6Seschrock 
3344ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
3345ea8dc4b6Seschrock 	vdev_reopen(rvd);		/* purge all vdev caches */
3346fa9e4066Sahrens 	vdev_config_dirty(rvd);		/* rewrite all disk labels */
3347fa9e4066Sahrens 	vdev_scrub_stat_update(rvd, scrub_type, B_FALSE);
3348ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
3349fa9e4066Sahrens 
3350fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
3351fa9e4066Sahrens 	spa->spa_scrub_errors = 0;
3352fa9e4066Sahrens 	spa->spa_scrub_active = 1;
3353ea8dc4b6Seschrock 	ASSERT(spa->spa_scrub_inflight == 0);
3354fa9e4066Sahrens 
3355fa9e4066Sahrens 	while (!spa->spa_scrub_stop) {
3356fa9e4066Sahrens 		CALLB_CPR_SAFE_BEGIN(&cprinfo);
3357ea8dc4b6Seschrock 		while (spa->spa_scrub_suspended) {
3358fa9e4066Sahrens 			spa->spa_scrub_active = 0;
3359fa9e4066Sahrens 			cv_broadcast(&spa->spa_scrub_cv);
3360fa9e4066Sahrens 			cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
3361fa9e4066Sahrens 			spa->spa_scrub_active = 1;
3362fa9e4066Sahrens 		}
3363fa9e4066Sahrens 		CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_scrub_lock);
3364fa9e4066Sahrens 
3365fa9e4066Sahrens 		if (spa->spa_scrub_restart_txg != 0)
3366fa9e4066Sahrens 			break;
3367fa9e4066Sahrens 
3368fa9e4066Sahrens 		mutex_exit(&spa->spa_scrub_lock);
3369fa9e4066Sahrens 		error = traverse_more(th);
3370fa9e4066Sahrens 		mutex_enter(&spa->spa_scrub_lock);
3371fa9e4066Sahrens 		if (error != EAGAIN)
3372fa9e4066Sahrens 			break;
3373fa9e4066Sahrens 	}
3374fa9e4066Sahrens 
3375fa9e4066Sahrens 	while (spa->spa_scrub_inflight)
3376fa9e4066Sahrens 		cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
3377fa9e4066Sahrens 
33785dabedeeSbonwick 	spa->spa_scrub_active = 0;
33795dabedeeSbonwick 	cv_broadcast(&spa->spa_scrub_cv);
33805dabedeeSbonwick 
33815dabedeeSbonwick 	mutex_exit(&spa->spa_scrub_lock);
33825dabedeeSbonwick 
33835dabedeeSbonwick 	spa_config_enter(spa, RW_WRITER, FTAG);
33845dabedeeSbonwick 
33855dabedeeSbonwick 	mutex_enter(&spa->spa_scrub_lock);
33865dabedeeSbonwick 
33875dabedeeSbonwick 	/*
33885dabedeeSbonwick 	 * Note: we check spa_scrub_restart_txg under both spa_scrub_lock
33895dabedeeSbonwick 	 * AND the spa config lock to synchronize with any config changes
33905dabedeeSbonwick 	 * that revise the DTLs under spa_vdev_enter() / spa_vdev_exit().
33915dabedeeSbonwick 	 */
3392fa9e4066Sahrens 	if (spa->spa_scrub_restart_txg != 0)
3393fa9e4066Sahrens 		error = ERESTART;
3394fa9e4066Sahrens 
3395ea8dc4b6Seschrock 	if (spa->spa_scrub_stop)
3396ea8dc4b6Seschrock 		error = EINTR;
3397ea8dc4b6Seschrock 
3398fa9e4066Sahrens 	/*
3399ea8dc4b6Seschrock 	 * Even if there were uncorrectable errors, we consider the scrub
3400ea8dc4b6Seschrock 	 * completed.  The downside is that if there is a transient error during
3401ea8dc4b6Seschrock 	 * a resilver, we won't resilver the data properly to the target.  But
3402ea8dc4b6Seschrock 	 * if the damage is permanent (more likely) we will resilver forever,
3403ea8dc4b6Seschrock 	 * which isn't really acceptable.  Since there is enough information for
3404ea8dc4b6Seschrock 	 * the user to know what has failed and why, this seems like a more
3405ea8dc4b6Seschrock 	 * tractable approach.
3406fa9e4066Sahrens 	 */
3407ea8dc4b6Seschrock 	complete = (error == 0);
3408fa9e4066Sahrens 
3409ea8dc4b6Seschrock 	dprintf("end %s to maxtxg=%llu %s, traverse=%d, %llu errors, stop=%u\n",
3410ea8dc4b6Seschrock 	    scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub",
3411fa9e4066Sahrens 	    spa->spa_scrub_maxtxg, complete ? "done" : "FAILED",
3412fa9e4066Sahrens 	    error, spa->spa_scrub_errors, spa->spa_scrub_stop);
3413fa9e4066Sahrens 
3414fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
3415fa9e4066Sahrens 
3416fa9e4066Sahrens 	/*
3417fa9e4066Sahrens 	 * If the scrub/resilver completed, update all DTLs to reflect this.
3418fa9e4066Sahrens 	 * Whether it succeeded or not, vacate all temporary scrub DTLs.
3419fa9e4066Sahrens 	 */
3420fa9e4066Sahrens 	vdev_dtl_reassess(rvd, spa_last_synced_txg(spa) + 1,
3421fa9e4066Sahrens 	    complete ? spa->spa_scrub_maxtxg : 0, B_TRUE);
3422fa9e4066Sahrens 	vdev_scrub_stat_update(rvd, POOL_SCRUB_NONE, complete);
3423ea8dc4b6Seschrock 	spa_errlog_rotate(spa);
34245dabedeeSbonwick 
34253d7072f8Seschrock 	if (scrub_type == POOL_SCRUB_RESILVER && complete)
34263d7072f8Seschrock 		spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_FINISH);
34273d7072f8Seschrock 
3428ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
3429fa9e4066Sahrens 
3430fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
3431fa9e4066Sahrens 
3432ea8dc4b6Seschrock 	/*
3433ea8dc4b6Seschrock 	 * We may have finished replacing a device.
3434ea8dc4b6Seschrock 	 * Let the async thread assess this and handle the detach.
3435ea8dc4b6Seschrock 	 */
34363d7072f8Seschrock 	spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
3437fa9e4066Sahrens 
3438fa9e4066Sahrens 	/*
3439fa9e4066Sahrens 	 * If we were told to restart, our final act is to start a new scrub.
3440fa9e4066Sahrens 	 */
3441fa9e4066Sahrens 	if (error == ERESTART)
3442ea8dc4b6Seschrock 		spa_async_request(spa, scrub_type == POOL_SCRUB_RESILVER ?
3443ea8dc4b6Seschrock 		    SPA_ASYNC_RESILVER : SPA_ASYNC_SCRUB);
3444fa9e4066Sahrens 
3445ea8dc4b6Seschrock 	spa->spa_scrub_type = POOL_SCRUB_NONE;
3446ea8dc4b6Seschrock 	spa->spa_scrub_active = 0;
3447ea8dc4b6Seschrock 	spa->spa_scrub_thread = NULL;
3448ea8dc4b6Seschrock 	cv_broadcast(&spa->spa_scrub_cv);
3449fa9e4066Sahrens 	CALLB_CPR_EXIT(&cprinfo);	/* drops &spa->spa_scrub_lock */
3450fa9e4066Sahrens 	thread_exit();
3451fa9e4066Sahrens }
3452fa9e4066Sahrens 
3453fa9e4066Sahrens void
3454fa9e4066Sahrens spa_scrub_suspend(spa_t *spa)
3455fa9e4066Sahrens {
3456fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
3457ea8dc4b6Seschrock 	spa->spa_scrub_suspended++;
3458fa9e4066Sahrens 	while (spa->spa_scrub_active) {
3459fa9e4066Sahrens 		cv_broadcast(&spa->spa_scrub_cv);
3460fa9e4066Sahrens 		cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
3461fa9e4066Sahrens 	}
3462fa9e4066Sahrens 	while (spa->spa_scrub_inflight)
3463fa9e4066Sahrens 		cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
3464fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
3465fa9e4066Sahrens }
3466fa9e4066Sahrens 
3467fa9e4066Sahrens void
3468fa9e4066Sahrens spa_scrub_resume(spa_t *spa)
3469fa9e4066Sahrens {
3470fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
3471ea8dc4b6Seschrock 	ASSERT(spa->spa_scrub_suspended != 0);
3472ea8dc4b6Seschrock 	if (--spa->spa_scrub_suspended == 0)
3473fa9e4066Sahrens 		cv_broadcast(&spa->spa_scrub_cv);
3474fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
3475fa9e4066Sahrens }
3476fa9e4066Sahrens 
3477fa9e4066Sahrens void
3478fa9e4066Sahrens spa_scrub_restart(spa_t *spa, uint64_t txg)
3479fa9e4066Sahrens {
3480fa9e4066Sahrens 	/*
3481fa9e4066Sahrens 	 * Something happened (e.g. snapshot create/delete) that means
3482fa9e4066Sahrens 	 * we must restart any in-progress scrubs.  The itinerary will
3483fa9e4066Sahrens 	 * fix this properly.
3484fa9e4066Sahrens 	 */
3485fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
3486fa9e4066Sahrens 	spa->spa_scrub_restart_txg = txg;
3487fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
3488fa9e4066Sahrens }
3489fa9e4066Sahrens 
3490ea8dc4b6Seschrock int
3491ea8dc4b6Seschrock spa_scrub(spa_t *spa, pool_scrub_type_t type, boolean_t force)
3492fa9e4066Sahrens {
3493fa9e4066Sahrens 	space_seg_t *ss;
3494fa9e4066Sahrens 	uint64_t mintxg, maxtxg;
3495fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
3496fa9e4066Sahrens 
3497bb8b5132Sek 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
3498bb8b5132Sek 	ASSERT(!spa_config_held(spa, RW_WRITER));
3499bb8b5132Sek 
3500fa9e4066Sahrens 	if ((uint_t)type >= POOL_SCRUB_TYPES)
3501fa9e4066Sahrens 		return (ENOTSUP);
3502fa9e4066Sahrens 
3503ea8dc4b6Seschrock 	mutex_enter(&spa->spa_scrub_lock);
3504ea8dc4b6Seschrock 
3505fa9e4066Sahrens 	/*
3506fa9e4066Sahrens 	 * If there's a scrub or resilver already in progress, stop it.
3507fa9e4066Sahrens 	 */
3508fa9e4066Sahrens 	while (spa->spa_scrub_thread != NULL) {
3509fa9e4066Sahrens 		/*
3510fa9e4066Sahrens 		 * Don't stop a resilver unless forced.
3511fa9e4066Sahrens 		 */
3512ea8dc4b6Seschrock 		if (spa->spa_scrub_type == POOL_SCRUB_RESILVER && !force) {
3513ea8dc4b6Seschrock 			mutex_exit(&spa->spa_scrub_lock);
3514fa9e4066Sahrens 			return (EBUSY);
3515ea8dc4b6Seschrock 		}
3516fa9e4066Sahrens 		spa->spa_scrub_stop = 1;
3517fa9e4066Sahrens 		cv_broadcast(&spa->spa_scrub_cv);
3518fa9e4066Sahrens 		cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
3519fa9e4066Sahrens 	}
3520fa9e4066Sahrens 
3521fa9e4066Sahrens 	/*
3522fa9e4066Sahrens 	 * Terminate the previous traverse.
3523fa9e4066Sahrens 	 */
3524fa9e4066Sahrens 	if (spa->spa_scrub_th != NULL) {
3525fa9e4066Sahrens 		traverse_fini(spa->spa_scrub_th);
3526fa9e4066Sahrens 		spa->spa_scrub_th = NULL;
3527fa9e4066Sahrens 	}
3528fa9e4066Sahrens 
3529ea8dc4b6Seschrock 	if (rvd == NULL) {
3530ea8dc4b6Seschrock 		ASSERT(spa->spa_scrub_stop == 0);
3531ea8dc4b6Seschrock 		ASSERT(spa->spa_scrub_type == type);
3532ea8dc4b6Seschrock 		ASSERT(spa->spa_scrub_restart_txg == 0);
3533ea8dc4b6Seschrock 		mutex_exit(&spa->spa_scrub_lock);
3534ea8dc4b6Seschrock 		return (0);
3535ea8dc4b6Seschrock 	}
3536fa9e4066Sahrens 
3537fa9e4066Sahrens 	mintxg = TXG_INITIAL - 1;
3538fa9e4066Sahrens 	maxtxg = spa_last_synced_txg(spa) + 1;
3539fa9e4066Sahrens 
3540ea8dc4b6Seschrock 	mutex_enter(&rvd->vdev_dtl_lock);
3541fa9e4066Sahrens 
3542ea8dc4b6Seschrock 	if (rvd->vdev_dtl_map.sm_space == 0) {
3543ea8dc4b6Seschrock 		/*
3544ea8dc4b6Seschrock 		 * The pool-wide DTL is empty.
3545ecc2d604Sbonwick 		 * If this is a resilver, there's nothing to do except
3546ecc2d604Sbonwick 		 * check whether any in-progress replacements have completed.
3547ea8dc4b6Seschrock 		 */
3548ecc2d604Sbonwick 		if (type == POOL_SCRUB_RESILVER) {
3549ea8dc4b6Seschrock 			type = POOL_SCRUB_NONE;
35503d7072f8Seschrock 			spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
3551ecc2d604Sbonwick 		}
3552ea8dc4b6Seschrock 	} else {
3553ea8dc4b6Seschrock 		/*
3554ea8dc4b6Seschrock 		 * The pool-wide DTL is non-empty.
3555ea8dc4b6Seschrock 		 * If this is a normal scrub, upgrade to a resilver instead.
3556ea8dc4b6Seschrock 		 */
3557ea8dc4b6Seschrock 		if (type == POOL_SCRUB_EVERYTHING)
3558ea8dc4b6Seschrock 			type = POOL_SCRUB_RESILVER;
3559ea8dc4b6Seschrock 	}
3560fa9e4066Sahrens 
3561ea8dc4b6Seschrock 	if (type == POOL_SCRUB_RESILVER) {
3562fa9e4066Sahrens 		/*
3563fa9e4066Sahrens 		 * Determine the resilvering boundaries.
3564fa9e4066Sahrens 		 *
3565fa9e4066Sahrens 		 * Note: (mintxg, maxtxg) is an open interval,
3566fa9e4066Sahrens 		 * i.e. mintxg and maxtxg themselves are not included.
3567fa9e4066Sahrens 		 *
3568fa9e4066Sahrens 		 * Note: for maxtxg, we MIN with spa_last_synced_txg(spa) + 1
3569fa9e4066Sahrens 		 * so we don't claim to resilver a txg that's still changing.
3570fa9e4066Sahrens 		 */
3571fa9e4066Sahrens 		ss = avl_first(&rvd->vdev_dtl_map.sm_root);
3572ea8dc4b6Seschrock 		mintxg = ss->ss_start - 1;
3573fa9e4066Sahrens 		ss = avl_last(&rvd->vdev_dtl_map.sm_root);
3574ea8dc4b6Seschrock 		maxtxg = MIN(ss->ss_end, maxtxg);
35753d7072f8Seschrock 
35763d7072f8Seschrock 		spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START);
3577fa9e4066Sahrens 	}
3578fa9e4066Sahrens 
3579ea8dc4b6Seschrock 	mutex_exit(&rvd->vdev_dtl_lock);
3580ea8dc4b6Seschrock 
3581ea8dc4b6Seschrock 	spa->spa_scrub_stop = 0;
3582ea8dc4b6Seschrock 	spa->spa_scrub_type = type;
3583ea8dc4b6Seschrock 	spa->spa_scrub_restart_txg = 0;
3584ea8dc4b6Seschrock 
3585ea8dc4b6Seschrock 	if (type != POOL_SCRUB_NONE) {
3586ea8dc4b6Seschrock 		spa->spa_scrub_mintxg = mintxg;
3587fa9e4066Sahrens 		spa->spa_scrub_maxtxg = maxtxg;
3588fa9e4066Sahrens 		spa->spa_scrub_th = traverse_init(spa, spa_scrub_cb, NULL,
35890373e76bSbonwick 		    ADVANCE_PRE | ADVANCE_PRUNE | ADVANCE_ZIL,
35900373e76bSbonwick 		    ZIO_FLAG_CANFAIL);
3591fa9e4066Sahrens 		traverse_add_pool(spa->spa_scrub_th, mintxg, maxtxg);
3592fa9e4066Sahrens 		spa->spa_scrub_thread = thread_create(NULL, 0,
3593fa9e4066Sahrens 		    spa_scrub_thread, spa, 0, &p0, TS_RUN, minclsyspri);
3594fa9e4066Sahrens 	}
3595fa9e4066Sahrens 
3596ea8dc4b6Seschrock 	mutex_exit(&spa->spa_scrub_lock);
3597ea8dc4b6Seschrock 
3598fa9e4066Sahrens 	return (0);
3599fa9e4066Sahrens }
3600fa9e4066Sahrens 
3601ea8dc4b6Seschrock /*
3602ea8dc4b6Seschrock  * ==========================================================================
3603ea8dc4b6Seschrock  * SPA async task processing
3604ea8dc4b6Seschrock  * ==========================================================================
3605ea8dc4b6Seschrock  */
3606ea8dc4b6Seschrock 
3607ea8dc4b6Seschrock static void
36083d7072f8Seschrock spa_async_remove(spa_t *spa, vdev_t *vd)
3609fa9e4066Sahrens {
3610ea8dc4b6Seschrock 	vdev_t *tvd;
3611ea8dc4b6Seschrock 	int c;
3612fa9e4066Sahrens 
36133d7072f8Seschrock 	for (c = 0; c < vd->vdev_children; c++) {
36143d7072f8Seschrock 		tvd = vd->vdev_child[c];
36153d7072f8Seschrock 		if (tvd->vdev_remove_wanted) {
36163d7072f8Seschrock 			tvd->vdev_remove_wanted = 0;
36173d7072f8Seschrock 			vdev_set_state(tvd, B_FALSE, VDEV_STATE_REMOVED,
36183d7072f8Seschrock 			    VDEV_AUX_NONE);
36190a4e9518Sgw 			vdev_clear(spa, tvd, B_TRUE);
36203d7072f8Seschrock 			vdev_config_dirty(tvd->vdev_top);
3621ea8dc4b6Seschrock 		}
36223d7072f8Seschrock 		spa_async_remove(spa, tvd);
3623ea8dc4b6Seschrock 	}
3624ea8dc4b6Seschrock }
3625fa9e4066Sahrens 
3626ea8dc4b6Seschrock static void
3627ea8dc4b6Seschrock spa_async_thread(spa_t *spa)
3628ea8dc4b6Seschrock {
3629ea8dc4b6Seschrock 	int tasks;
36303d7072f8Seschrock 	uint64_t txg;
3631ea8dc4b6Seschrock 
3632ea8dc4b6Seschrock 	ASSERT(spa->spa_sync_on);
3633ea8dc4b6Seschrock 
3634ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3635ea8dc4b6Seschrock 	tasks = spa->spa_async_tasks;
3636ea8dc4b6Seschrock 	spa->spa_async_tasks = 0;
3637ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3638ea8dc4b6Seschrock 
36390373e76bSbonwick 	/*
36400373e76bSbonwick 	 * See if the config needs to be updated.
36410373e76bSbonwick 	 */
36420373e76bSbonwick 	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
36430373e76bSbonwick 		mutex_enter(&spa_namespace_lock);
36440373e76bSbonwick 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
36450373e76bSbonwick 		mutex_exit(&spa_namespace_lock);
36460373e76bSbonwick 	}
36470373e76bSbonwick 
3648ea8dc4b6Seschrock 	/*
36493d7072f8Seschrock 	 * See if any devices need to be marked REMOVED.
36500a4e9518Sgw 	 *
36510a4e9518Sgw 	 * XXX - We avoid doing this when we are in
36520a4e9518Sgw 	 * I/O failure state since spa_vdev_enter() grabs
36530a4e9518Sgw 	 * the namespace lock and would not be able to obtain
36540a4e9518Sgw 	 * the writer config lock.
3655ea8dc4b6Seschrock 	 */
36560a4e9518Sgw 	if (tasks & SPA_ASYNC_REMOVE &&
36570a4e9518Sgw 	    spa_state(spa) != POOL_STATE_IO_FAILURE) {
36583d7072f8Seschrock 		txg = spa_vdev_enter(spa);
36593d7072f8Seschrock 		spa_async_remove(spa, spa->spa_root_vdev);
36603d7072f8Seschrock 		(void) spa_vdev_exit(spa, NULL, txg, 0);
36613d7072f8Seschrock 	}
3662ea8dc4b6Seschrock 
3663ea8dc4b6Seschrock 	/*
3664ea8dc4b6Seschrock 	 * If any devices are done replacing, detach them.
3665ea8dc4b6Seschrock 	 */
36663d7072f8Seschrock 	if (tasks & SPA_ASYNC_RESILVER_DONE)
36673d7072f8Seschrock 		spa_vdev_resilver_done(spa);
3668fa9e4066Sahrens 
3669ea8dc4b6Seschrock 	/*
36703d7072f8Seschrock 	 * Kick off a scrub.  When starting a RESILVER scrub (or an EVERYTHING
36713d7072f8Seschrock 	 * scrub which can become a resilver), we need to hold
36723d7072f8Seschrock 	 * spa_namespace_lock() because the sysevent we post via
36733d7072f8Seschrock 	 * spa_event_notify() needs to get the name of the pool.
3674ea8dc4b6Seschrock 	 */
36753d7072f8Seschrock 	if (tasks & SPA_ASYNC_SCRUB) {
36763d7072f8Seschrock 		mutex_enter(&spa_namespace_lock);
3677ea8dc4b6Seschrock 		VERIFY(spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_TRUE) == 0);
36783d7072f8Seschrock 		mutex_exit(&spa_namespace_lock);
36793d7072f8Seschrock 	}
3680ea8dc4b6Seschrock 
3681ea8dc4b6Seschrock 	/*
3682ea8dc4b6Seschrock 	 * Kick off a resilver.
3683ea8dc4b6Seschrock 	 */
36843d7072f8Seschrock 	if (tasks & SPA_ASYNC_RESILVER) {
36853d7072f8Seschrock 		mutex_enter(&spa_namespace_lock);
3686ea8dc4b6Seschrock 		VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
36873d7072f8Seschrock 		mutex_exit(&spa_namespace_lock);
36883d7072f8Seschrock 	}
3689ea8dc4b6Seschrock 
3690ea8dc4b6Seschrock 	/*
3691ea8dc4b6Seschrock 	 * Let the world know that we're done.
3692ea8dc4b6Seschrock 	 */
3693ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3694ea8dc4b6Seschrock 	spa->spa_async_thread = NULL;
3695ea8dc4b6Seschrock 	cv_broadcast(&spa->spa_async_cv);
3696ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3697ea8dc4b6Seschrock 	thread_exit();
3698ea8dc4b6Seschrock }
3699ea8dc4b6Seschrock 
3700ea8dc4b6Seschrock void
3701ea8dc4b6Seschrock spa_async_suspend(spa_t *spa)
3702ea8dc4b6Seschrock {
3703ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3704ea8dc4b6Seschrock 	spa->spa_async_suspended++;
3705ea8dc4b6Seschrock 	while (spa->spa_async_thread != NULL)
3706ea8dc4b6Seschrock 		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
3707ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3708ea8dc4b6Seschrock }
3709ea8dc4b6Seschrock 
3710ea8dc4b6Seschrock void
3711ea8dc4b6Seschrock spa_async_resume(spa_t *spa)
3712ea8dc4b6Seschrock {
3713ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3714ea8dc4b6Seschrock 	ASSERT(spa->spa_async_suspended != 0);
3715ea8dc4b6Seschrock 	spa->spa_async_suspended--;
3716ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3717ea8dc4b6Seschrock }
3718ea8dc4b6Seschrock 
3719ea8dc4b6Seschrock static void
3720ea8dc4b6Seschrock spa_async_dispatch(spa_t *spa)
3721ea8dc4b6Seschrock {
3722ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3723ea8dc4b6Seschrock 	if (spa->spa_async_tasks && !spa->spa_async_suspended &&
37240373e76bSbonwick 	    spa->spa_async_thread == NULL &&
37250373e76bSbonwick 	    rootdir != NULL && !vn_is_readonly(rootdir))
3726ea8dc4b6Seschrock 		spa->spa_async_thread = thread_create(NULL, 0,
3727ea8dc4b6Seschrock 		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
3728ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3729ea8dc4b6Seschrock }
3730ea8dc4b6Seschrock 
3731ea8dc4b6Seschrock void
3732ea8dc4b6Seschrock spa_async_request(spa_t *spa, int task)
3733ea8dc4b6Seschrock {
3734ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
3735ea8dc4b6Seschrock 	spa->spa_async_tasks |= task;
3736ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
3737fa9e4066Sahrens }
3738fa9e4066Sahrens 
3739fa9e4066Sahrens /*
3740fa9e4066Sahrens  * ==========================================================================
3741fa9e4066Sahrens  * SPA syncing routines
3742fa9e4066Sahrens  * ==========================================================================
3743fa9e4066Sahrens  */
3744fa9e4066Sahrens 
3745fa9e4066Sahrens static void
3746fa9e4066Sahrens spa_sync_deferred_frees(spa_t *spa, uint64_t txg)
3747fa9e4066Sahrens {
3748fa9e4066Sahrens 	bplist_t *bpl = &spa->spa_sync_bplist;
3749fa9e4066Sahrens 	dmu_tx_t *tx;
3750fa9e4066Sahrens 	blkptr_t blk;
3751fa9e4066Sahrens 	uint64_t itor = 0;
3752fa9e4066Sahrens 	zio_t *zio;
3753fa9e4066Sahrens 	int error;
3754fa9e4066Sahrens 	uint8_t c = 1;
3755fa9e4066Sahrens 
3756fa9e4066Sahrens 	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD);
3757fa9e4066Sahrens 
3758fa9e4066Sahrens 	while (bplist_iterate(bpl, &itor, &blk) == 0)
3759fa9e4066Sahrens 		zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL));
3760fa9e4066Sahrens 
3761fa9e4066Sahrens 	error = zio_wait(zio);
3762fa9e4066Sahrens 	ASSERT3U(error, ==, 0);
3763fa9e4066Sahrens 
3764fa9e4066Sahrens 	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3765fa9e4066Sahrens 	bplist_vacate(bpl, tx);
3766fa9e4066Sahrens 
3767fa9e4066Sahrens 	/*
3768fa9e4066Sahrens 	 * Pre-dirty the first block so we sync to convergence faster.
3769fa9e4066Sahrens 	 * (Usually only the first block is needed.)
3770fa9e4066Sahrens 	 */
3771fa9e4066Sahrens 	dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx);
3772fa9e4066Sahrens 	dmu_tx_commit(tx);
3773fa9e4066Sahrens }
3774fa9e4066Sahrens 
3775fa9e4066Sahrens static void
377699653d4eSeschrock spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
3777fa9e4066Sahrens {
3778fa9e4066Sahrens 	char *packed = NULL;
3779fa9e4066Sahrens 	size_t nvsize = 0;
3780fa9e4066Sahrens 	dmu_buf_t *db;
3781fa9e4066Sahrens 
378299653d4eSeschrock 	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
3783fa9e4066Sahrens 
3784fa9e4066Sahrens 	packed = kmem_alloc(nvsize, KM_SLEEP);
3785fa9e4066Sahrens 
378699653d4eSeschrock 	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
3787ea8dc4b6Seschrock 	    KM_SLEEP) == 0);
3788fa9e4066Sahrens 
378999653d4eSeschrock 	dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx);
3790fa9e4066Sahrens 
3791fa9e4066Sahrens 	kmem_free(packed, nvsize);
3792fa9e4066Sahrens 
379399653d4eSeschrock 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
3794fa9e4066Sahrens 	dmu_buf_will_dirty(db, tx);
3795fa9e4066Sahrens 	*(uint64_t *)db->db_data = nvsize;
3796ea8dc4b6Seschrock 	dmu_buf_rele(db, FTAG);
3797fa9e4066Sahrens }
3798fa9e4066Sahrens 
379999653d4eSeschrock static void
3800fa94a07fSbrendan spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
3801fa94a07fSbrendan     const char *config, const char *entry)
380299653d4eSeschrock {
380399653d4eSeschrock 	nvlist_t *nvroot;
3804fa94a07fSbrendan 	nvlist_t **list;
380599653d4eSeschrock 	int i;
380699653d4eSeschrock 
3807fa94a07fSbrendan 	if (!sav->sav_sync)
380899653d4eSeschrock 		return;
380999653d4eSeschrock 
381099653d4eSeschrock 	/*
3811fa94a07fSbrendan 	 * Update the MOS nvlist describing the list of available devices.
3812fa94a07fSbrendan 	 * spa_validate_aux() will have already made sure this nvlist is
38133d7072f8Seschrock 	 * valid and the vdevs are labeled appropriately.
381499653d4eSeschrock 	 */
3815fa94a07fSbrendan 	if (sav->sav_object == 0) {
3816fa94a07fSbrendan 		sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
3817fa94a07fSbrendan 		    DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
3818fa94a07fSbrendan 		    sizeof (uint64_t), tx);
381999653d4eSeschrock 		VERIFY(zap_update(spa->spa_meta_objset,
3820fa94a07fSbrendan 		    DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
3821fa94a07fSbrendan 		    &sav->sav_object, tx) == 0);
382299653d4eSeschrock 	}
382399653d4eSeschrock 
382499653d4eSeschrock 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3825fa94a07fSbrendan 	if (sav->sav_count == 0) {
3826fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
382799653d4eSeschrock 	} else {
3828fa94a07fSbrendan 		list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
3829fa94a07fSbrendan 		for (i = 0; i < sav->sav_count; i++)
3830fa94a07fSbrendan 			list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
3831fa94a07fSbrendan 			    B_FALSE, B_FALSE, B_TRUE);
3832fa94a07fSbrendan 		VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
3833fa94a07fSbrendan 		    sav->sav_count) == 0);
3834fa94a07fSbrendan 		for (i = 0; i < sav->sav_count; i++)
3835fa94a07fSbrendan 			nvlist_free(list[i]);
3836fa94a07fSbrendan 		kmem_free(list, sav->sav_count * sizeof (void *));
383799653d4eSeschrock 	}
383899653d4eSeschrock 
3839fa94a07fSbrendan 	spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
384006eeb2adSek 	nvlist_free(nvroot);
384199653d4eSeschrock 
3842fa94a07fSbrendan 	sav->sav_sync = B_FALSE;
384399653d4eSeschrock }
384499653d4eSeschrock 
384599653d4eSeschrock static void
384699653d4eSeschrock spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
384799653d4eSeschrock {
384899653d4eSeschrock 	nvlist_t *config;
384999653d4eSeschrock 
385099653d4eSeschrock 	if (list_is_empty(&spa->spa_dirty_list))
385199653d4eSeschrock 		return;
385299653d4eSeschrock 
385399653d4eSeschrock 	config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE);
385499653d4eSeschrock 
385599653d4eSeschrock 	if (spa->spa_config_syncing)
385699653d4eSeschrock 		nvlist_free(spa->spa_config_syncing);
385799653d4eSeschrock 	spa->spa_config_syncing = config;
385899653d4eSeschrock 
385999653d4eSeschrock 	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
386099653d4eSeschrock }
386199653d4eSeschrock 
3862990b4856Slling /*
3863990b4856Slling  * Set zpool properties.
3864990b4856Slling  */
3865b1b8ab34Slling static void
3866ecd6cf80Smarks spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
3867b1b8ab34Slling {
3868b1b8ab34Slling 	spa_t *spa = arg1;
3869b1b8ab34Slling 	objset_t *mos = spa->spa_meta_objset;
3870990b4856Slling 	nvlist_t *nvp = arg2;
3871990b4856Slling 	nvpair_t *elem;
38723d7072f8Seschrock 	uint64_t intval;
38732f8aaab3Seschrock 	char *strval, *slash;
3874990b4856Slling 	zpool_prop_t prop;
3875990b4856Slling 	const char *propname;
3876990b4856Slling 	zprop_type_t proptype;
3877b1b8ab34Slling 
3878990b4856Slling 	elem = NULL;
3879990b4856Slling 	while ((elem = nvlist_next_nvpair(nvp, elem))) {
3880990b4856Slling 		switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
3881990b4856Slling 		case ZPOOL_PROP_VERSION:
3882990b4856Slling 			/*
3883990b4856Slling 			 * Only set version for non-zpool-creation cases
3884990b4856Slling 			 * (set/import). spa_create() needs special care
3885990b4856Slling 			 * for version setting.
3886990b4856Slling 			 */
3887990b4856Slling 			if (tx->tx_txg != TXG_INITIAL) {
3888990b4856Slling 				VERIFY(nvpair_value_uint64(elem,
3889990b4856Slling 				    &intval) == 0);
3890990b4856Slling 				ASSERT(intval <= SPA_VERSION);
3891990b4856Slling 				ASSERT(intval >= spa_version(spa));
3892990b4856Slling 				spa->spa_uberblock.ub_version = intval;
3893990b4856Slling 				vdev_config_dirty(spa->spa_root_vdev);
3894990b4856Slling 			}
3895ecd6cf80Smarks 			break;
3896990b4856Slling 
3897990b4856Slling 		case ZPOOL_PROP_ALTROOT:
3898990b4856Slling 			/*
3899990b4856Slling 			 * 'altroot' is a non-persistent property. It should
3900990b4856Slling 			 * have been set temporarily at creation or import time.
3901990b4856Slling 			 */
3902990b4856Slling 			ASSERT(spa->spa_root != NULL);
3903b1b8ab34Slling 			break;
39043d7072f8Seschrock 
39052f8aaab3Seschrock 		case ZPOOL_PROP_CACHEFILE:
3906990b4856Slling 			/*
39072f8aaab3Seschrock 			 * 'cachefile' is a non-persistent property, but note
39082f8aaab3Seschrock 			 * an async request that the config cache needs to be
39092f8aaab3Seschrock 			 * udpated.
3910990b4856Slling 			 */
39112f8aaab3Seschrock 			VERIFY(nvpair_value_string(elem, &strval) == 0);
39122f8aaab3Seschrock 			if (spa->spa_config_dir)
39132f8aaab3Seschrock 				spa_strfree(spa->spa_config_dir);
39142f8aaab3Seschrock 			if (spa->spa_config_file)
39152f8aaab3Seschrock 				spa_strfree(spa->spa_config_file);
39162f8aaab3Seschrock 
39172f8aaab3Seschrock 			if (strval[0] == '\0') {
39182f8aaab3Seschrock 				spa->spa_config_dir = NULL;
39192f8aaab3Seschrock 				spa->spa_config_file = NULL;
39202f8aaab3Seschrock 			} else if (strcmp(strval, "none") == 0) {
39212f8aaab3Seschrock 				spa->spa_config_dir = spa_strdup(strval);
39222f8aaab3Seschrock 				spa->spa_config_file = NULL;
39232f8aaab3Seschrock 			} else {
39242c32020fSeschrock 				/*
39252c32020fSeschrock 				 * If the cachefile is in the root directory,
39262c32020fSeschrock 				 * we will end up with an empty string for
39272c32020fSeschrock 				 * spa_config_dir.  This value is only ever
39282c32020fSeschrock 				 * used when concatenated with '/', so an empty
39292c32020fSeschrock 				 * string still behaves correctly and keeps the
39302c32020fSeschrock 				 * rest of the code simple.
39312c32020fSeschrock 				 */
39322f8aaab3Seschrock 				slash = strrchr(strval, '/');
39332f8aaab3Seschrock 				ASSERT(slash != NULL);
39342f8aaab3Seschrock 				*slash = '\0';
39352c32020fSeschrock 				if (strcmp(strval, spa_config_dir) == 0 &&
39362c32020fSeschrock 				    strcmp(slash + 1, ZPOOL_CACHE_FILE) == 0) {
39372c32020fSeschrock 					spa->spa_config_dir = NULL;
39382c32020fSeschrock 					spa->spa_config_file = NULL;
39392c32020fSeschrock 				} else {
39402c32020fSeschrock 					spa->spa_config_dir =
39412c32020fSeschrock 					    spa_strdup(strval);
39422c32020fSeschrock 					spa->spa_config_file =
39432c32020fSeschrock 					    spa_strdup(slash + 1);
39442c32020fSeschrock 				}
39452f8aaab3Seschrock 			}
39462f8aaab3Seschrock 			spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
39473d7072f8Seschrock 			break;
3948990b4856Slling 		default:
3949990b4856Slling 			/*
3950990b4856Slling 			 * Set pool property values in the poolprops mos object.
3951990b4856Slling 			 */
3952990b4856Slling 			mutex_enter(&spa->spa_props_lock);
3953990b4856Slling 			if (spa->spa_pool_props_object == 0) {
3954990b4856Slling 				objset_t *mos = spa->spa_meta_objset;
3955990b4856Slling 
3956990b4856Slling 				VERIFY((spa->spa_pool_props_object =
3957990b4856Slling 				    zap_create(mos, DMU_OT_POOL_PROPS,
3958990b4856Slling 				    DMU_OT_NONE, 0, tx)) > 0);
3959990b4856Slling 
3960990b4856Slling 				VERIFY(zap_update(mos,
3961990b4856Slling 				    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
3962990b4856Slling 				    8, 1, &spa->spa_pool_props_object, tx)
3963990b4856Slling 				    == 0);
3964990b4856Slling 			}
3965990b4856Slling 			mutex_exit(&spa->spa_props_lock);
3966990b4856Slling 
3967990b4856Slling 			/* normalize the property name */
3968990b4856Slling 			propname = zpool_prop_to_name(prop);
3969990b4856Slling 			proptype = zpool_prop_get_type(prop);
3970990b4856Slling 
3971990b4856Slling 			if (nvpair_type(elem) == DATA_TYPE_STRING) {
3972990b4856Slling 				ASSERT(proptype == PROP_TYPE_STRING);
3973990b4856Slling 				VERIFY(nvpair_value_string(elem, &strval) == 0);
3974990b4856Slling 				VERIFY(zap_update(mos,
3975990b4856Slling 				    spa->spa_pool_props_object, propname,
3976990b4856Slling 				    1, strlen(strval) + 1, strval, tx) == 0);
3977990b4856Slling 
3978990b4856Slling 			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
3979990b4856Slling 				VERIFY(nvpair_value_uint64(elem, &intval) == 0);
3980990b4856Slling 
3981990b4856Slling 				if (proptype == PROP_TYPE_INDEX) {
3982990b4856Slling 					const char *unused;
3983990b4856Slling 					VERIFY(zpool_prop_index_to_string(
3984990b4856Slling 					    prop, intval, &unused) == 0);
3985990b4856Slling 				}
3986990b4856Slling 				VERIFY(zap_update(mos,
3987990b4856Slling 				    spa->spa_pool_props_object, propname,
3988990b4856Slling 				    8, 1, &intval, tx) == 0);
3989990b4856Slling 			} else {
3990990b4856Slling 				ASSERT(0); /* not allowed */
3991990b4856Slling 			}
3992990b4856Slling 
39930a4e9518Sgw 			switch (prop) {
39940a4e9518Sgw 			case ZPOOL_PROP_DELEGATION:
3995990b4856Slling 				spa->spa_delegation = intval;
39960a4e9518Sgw 				break;
39970a4e9518Sgw 			case ZPOOL_PROP_BOOTFS:
3998990b4856Slling 				spa->spa_bootfs = intval;
39990a4e9518Sgw 				break;
40000a4e9518Sgw 			case ZPOOL_PROP_FAILUREMODE:
40010a4e9518Sgw 				spa->spa_failmode = intval;
40020a4e9518Sgw 				break;
40030a4e9518Sgw 			default:
40040a4e9518Sgw 				break;
40050a4e9518Sgw 			}
4006990b4856Slling 		}
4007990b4856Slling 
4008990b4856Slling 		/* log internal history if this is not a zpool create */
4009990b4856Slling 		if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY &&
4010990b4856Slling 		    tx->tx_txg != TXG_INITIAL) {
4011990b4856Slling 			spa_history_internal_log(LOG_POOL_PROPSET,
4012990b4856Slling 			    spa, tx, cr, "%s %lld %s",
4013990b4856Slling 			    nvpair_name(elem), intval, spa->spa_name);
4014b1b8ab34Slling 		}
4015b1b8ab34Slling 	}
4016b1b8ab34Slling }
4017b1b8ab34Slling 
4018fa9e4066Sahrens /*
4019fa9e4066Sahrens  * Sync the specified transaction group.  New blocks may be dirtied as
4020fa9e4066Sahrens  * part of the process, so we iterate until it converges.
4021fa9e4066Sahrens  */
4022fa9e4066Sahrens void
4023fa9e4066Sahrens spa_sync(spa_t *spa, uint64_t txg)
4024fa9e4066Sahrens {
4025fa9e4066Sahrens 	dsl_pool_t *dp = spa->spa_dsl_pool;
4026fa9e4066Sahrens 	objset_t *mos = spa->spa_meta_objset;
4027fa9e4066Sahrens 	bplist_t *bpl = &spa->spa_sync_bplist;
40280373e76bSbonwick 	vdev_t *rvd = spa->spa_root_vdev;
4029fa9e4066Sahrens 	vdev_t *vd;
403017f17c2dSbonwick 	vdev_t *svd[SPA_DVAS_PER_BP];
403117f17c2dSbonwick 	int svdcount = 0;
4032fa9e4066Sahrens 	dmu_tx_t *tx;
4033fa9e4066Sahrens 	int dirty_vdevs;
4034fa9e4066Sahrens 
4035fa9e4066Sahrens 	/*
4036fa9e4066Sahrens 	 * Lock out configuration changes.
4037fa9e4066Sahrens 	 */
4038ea8dc4b6Seschrock 	spa_config_enter(spa, RW_READER, FTAG);
4039fa9e4066Sahrens 
4040fa9e4066Sahrens 	spa->spa_syncing_txg = txg;
4041fa9e4066Sahrens 	spa->spa_sync_pass = 0;
4042fa9e4066Sahrens 
4043ea8dc4b6Seschrock 	VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj));
4044fa9e4066Sahrens 
404599653d4eSeschrock 	tx = dmu_tx_create_assigned(dp, txg);
404699653d4eSeschrock 
404799653d4eSeschrock 	/*
4048e7437265Sahrens 	 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
404999653d4eSeschrock 	 * set spa_deflate if we have no raid-z vdevs.
405099653d4eSeschrock 	 */
4051e7437265Sahrens 	if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
4052e7437265Sahrens 	    spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
405399653d4eSeschrock 		int i;
405499653d4eSeschrock 
405599653d4eSeschrock 		for (i = 0; i < rvd->vdev_children; i++) {
405699653d4eSeschrock 			vd = rvd->vdev_child[i];
405799653d4eSeschrock 			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
405899653d4eSeschrock 				break;
405999653d4eSeschrock 		}
406099653d4eSeschrock 		if (i == rvd->vdev_children) {
406199653d4eSeschrock 			spa->spa_deflate = TRUE;
406299653d4eSeschrock 			VERIFY(0 == zap_add(spa->spa_meta_objset,
406399653d4eSeschrock 			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
406499653d4eSeschrock 			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
406599653d4eSeschrock 		}
406699653d4eSeschrock 	}
406799653d4eSeschrock 
4068fa9e4066Sahrens 	/*
4069fa9e4066Sahrens 	 * If anything has changed in this txg, push the deferred frees
4070fa9e4066Sahrens 	 * from the previous txg.  If not, leave them alone so that we
4071fa9e4066Sahrens 	 * don't generate work on an otherwise idle system.
4072fa9e4066Sahrens 	 */
4073fa9e4066Sahrens 	if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
40741615a317Sek 	    !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
40751615a317Sek 	    !txg_list_empty(&dp->dp_sync_tasks, txg))
4076fa9e4066Sahrens 		spa_sync_deferred_frees(spa, txg);
4077fa9e4066Sahrens 
4078fa9e4066Sahrens 	/*
4079fa9e4066Sahrens 	 * Iterate to convergence.
4080fa9e4066Sahrens 	 */
4081fa9e4066Sahrens 	do {
4082fa9e4066Sahrens 		spa->spa_sync_pass++;
4083fa9e4066Sahrens 
4084fa9e4066Sahrens 		spa_sync_config_object(spa, tx);
4085fa94a07fSbrendan 		spa_sync_aux_dev(spa, &spa->spa_spares, tx,
4086fa94a07fSbrendan 		    ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
4087fa94a07fSbrendan 		spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
4088fa94a07fSbrendan 		    ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
4089ea8dc4b6Seschrock 		spa_errlog_sync(spa, txg);
4090fa9e4066Sahrens 		dsl_pool_sync(dp, txg);
4091fa9e4066Sahrens 
4092fa9e4066Sahrens 		dirty_vdevs = 0;
4093fa9e4066Sahrens 		while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
4094fa9e4066Sahrens 			vdev_sync(vd, txg);
4095fa9e4066Sahrens 			dirty_vdevs++;
4096fa9e4066Sahrens 		}
4097fa9e4066Sahrens 
4098fa9e4066Sahrens 		bplist_sync(bpl, tx);
4099fa9e4066Sahrens 	} while (dirty_vdevs);
4100fa9e4066Sahrens 
4101fa9e4066Sahrens 	bplist_close(bpl);
4102fa9e4066Sahrens 
4103fa9e4066Sahrens 	dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass);
4104fa9e4066Sahrens 
4105fa9e4066Sahrens 	/*
4106fa9e4066Sahrens 	 * Rewrite the vdev configuration (which includes the uberblock)
4107fa9e4066Sahrens 	 * to commit the transaction group.
41080373e76bSbonwick 	 *
410917f17c2dSbonwick 	 * If there are no dirty vdevs, we sync the uberblock to a few
411017f17c2dSbonwick 	 * random top-level vdevs that are known to be visible in the
411117f17c2dSbonwick 	 * config cache (see spa_vdev_add() for details).  If there *are*
411217f17c2dSbonwick 	 * dirty vdevs -- or if the sync to our random subset fails --
411317f17c2dSbonwick 	 * then sync the uberblock to all vdevs.
41140373e76bSbonwick 	 */
411517f17c2dSbonwick 	if (list_is_empty(&spa->spa_dirty_list)) {
41160373e76bSbonwick 		int children = rvd->vdev_children;
41170373e76bSbonwick 		int c0 = spa_get_random(children);
41180373e76bSbonwick 		int c;
41190373e76bSbonwick 
41200373e76bSbonwick 		for (c = 0; c < children; c++) {
41210373e76bSbonwick 			vd = rvd->vdev_child[(c0 + c) % children];
412217f17c2dSbonwick 			if (vd->vdev_ms_array == 0 || vd->vdev_islog)
41230373e76bSbonwick 				continue;
412417f17c2dSbonwick 			svd[svdcount++] = vd;
412517f17c2dSbonwick 			if (svdcount == SPA_DVAS_PER_BP)
41260373e76bSbonwick 				break;
41270373e76bSbonwick 		}
41280373e76bSbonwick 	}
412917f17c2dSbonwick 	if (svdcount == 0 || vdev_config_sync(svd, svdcount, txg) != 0)
413017f17c2dSbonwick 		VERIFY3U(vdev_config_sync(rvd->vdev_child,
413117f17c2dSbonwick 		    rvd->vdev_children, txg), ==, 0);
41320373e76bSbonwick 
413399653d4eSeschrock 	dmu_tx_commit(tx);
413499653d4eSeschrock 
41350373e76bSbonwick 	/*
41360373e76bSbonwick 	 * Clear the dirty config list.
4137fa9e4066Sahrens 	 */
41380373e76bSbonwick 	while ((vd = list_head(&spa->spa_dirty_list)) != NULL)
41390373e76bSbonwick 		vdev_config_clean(vd);
41400373e76bSbonwick 
41410373e76bSbonwick 	/*
41420373e76bSbonwick 	 * Now that the new config has synced transactionally,
41430373e76bSbonwick 	 * let it become visible to the config cache.
41440373e76bSbonwick 	 */
41450373e76bSbonwick 	if (spa->spa_config_syncing != NULL) {
41460373e76bSbonwick 		spa_config_set(spa, spa->spa_config_syncing);
41470373e76bSbonwick 		spa->spa_config_txg = txg;
41480373e76bSbonwick 		spa->spa_config_syncing = NULL;
41490373e76bSbonwick 	}
4150fa9e4066Sahrens 
4151fa9e4066Sahrens 	/*
4152fa9e4066Sahrens 	 * Make a stable copy of the fully synced uberblock.
4153fa9e4066Sahrens 	 * We use this as the root for pool traversals.
4154fa9e4066Sahrens 	 */
4155fa9e4066Sahrens 	spa->spa_traverse_wanted = 1;	/* tells traverse_more() to stop */
4156fa9e4066Sahrens 
4157fa9e4066Sahrens 	spa_scrub_suspend(spa);		/* stop scrubbing and finish I/Os */
4158fa9e4066Sahrens 
4159fa9e4066Sahrens 	rw_enter(&spa->spa_traverse_lock, RW_WRITER);
4160fa9e4066Sahrens 	spa->spa_traverse_wanted = 0;
4161fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
4162fa9e4066Sahrens 	rw_exit(&spa->spa_traverse_lock);
4163fa9e4066Sahrens 
4164fa9e4066Sahrens 	spa_scrub_resume(spa);		/* resume scrub with new ubsync */
4165fa9e4066Sahrens 
4166fa9e4066Sahrens 	/*
4167fa9e4066Sahrens 	 * Clean up the ZIL records for the synced txg.
4168fa9e4066Sahrens 	 */
4169fa9e4066Sahrens 	dsl_pool_zil_clean(dp);
4170fa9e4066Sahrens 
4171fa9e4066Sahrens 	/*
4172fa9e4066Sahrens 	 * Update usable space statistics.
4173fa9e4066Sahrens 	 */
4174fa9e4066Sahrens 	while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
4175fa9e4066Sahrens 		vdev_sync_done(vd, txg);
4176fa9e4066Sahrens 
4177fa9e4066Sahrens 	/*
4178fa9e4066Sahrens 	 * It had better be the case that we didn't dirty anything
417999653d4eSeschrock 	 * since vdev_config_sync().
4180fa9e4066Sahrens 	 */
4181fa9e4066Sahrens 	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
4182fa9e4066Sahrens 	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
4183fa9e4066Sahrens 	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
4184fa9e4066Sahrens 	ASSERT(bpl->bpl_queue == NULL);
4185fa9e4066Sahrens 
4186ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
4187ea8dc4b6Seschrock 
4188ea8dc4b6Seschrock 	/*
4189ea8dc4b6Seschrock 	 * If any async tasks have been requested, kick them off.
4190ea8dc4b6Seschrock 	 */
4191ea8dc4b6Seschrock 	spa_async_dispatch(spa);
4192fa9e4066Sahrens }
4193fa9e4066Sahrens 
4194fa9e4066Sahrens /*
4195fa9e4066Sahrens  * Sync all pools.  We don't want to hold the namespace lock across these
4196fa9e4066Sahrens  * operations, so we take a reference on the spa_t and drop the lock during the
4197fa9e4066Sahrens  * sync.
4198fa9e4066Sahrens  */
4199fa9e4066Sahrens void
4200fa9e4066Sahrens spa_sync_allpools(void)
4201fa9e4066Sahrens {
4202fa9e4066Sahrens 	spa_t *spa = NULL;
4203fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
4204fa9e4066Sahrens 	while ((spa = spa_next(spa)) != NULL) {
4205fa9e4066Sahrens 		if (spa_state(spa) != POOL_STATE_ACTIVE)
4206fa9e4066Sahrens 			continue;
4207fa9e4066Sahrens 		spa_open_ref(spa, FTAG);
4208fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
4209fa9e4066Sahrens 		txg_wait_synced(spa_get_dsl(spa), 0);
4210fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
4211fa9e4066Sahrens 		spa_close(spa, FTAG);
4212fa9e4066Sahrens 	}
4213fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
4214fa9e4066Sahrens }
4215fa9e4066Sahrens 
4216fa9e4066Sahrens /*
4217fa9e4066Sahrens  * ==========================================================================
4218fa9e4066Sahrens  * Miscellaneous routines
4219fa9e4066Sahrens  * ==========================================================================
4220fa9e4066Sahrens  */
4221fa9e4066Sahrens 
4222fa9e4066Sahrens /*
4223fa9e4066Sahrens  * Remove all pools in the system.
4224fa9e4066Sahrens  */
4225fa9e4066Sahrens void
4226fa9e4066Sahrens spa_evict_all(void)
4227fa9e4066Sahrens {
4228fa9e4066Sahrens 	spa_t *spa;
4229fa9e4066Sahrens 
4230fa9e4066Sahrens 	/*
4231fa9e4066Sahrens 	 * Remove all cached state.  All pools should be closed now,
4232fa9e4066Sahrens 	 * so every spa in the AVL tree should be unreferenced.
4233fa9e4066Sahrens 	 */
4234fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
4235fa9e4066Sahrens 	while ((spa = spa_next(NULL)) != NULL) {
4236fa9e4066Sahrens 		/*
4237ea8dc4b6Seschrock 		 * Stop async tasks.  The async thread may need to detach
4238ea8dc4b6Seschrock 		 * a device that's been replaced, which requires grabbing
4239ea8dc4b6Seschrock 		 * spa_namespace_lock, so we must drop it here.
4240fa9e4066Sahrens 		 */
4241fa9e4066Sahrens 		spa_open_ref(spa, FTAG);
4242fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
4243ea8dc4b6Seschrock 		spa_async_suspend(spa);
4244fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
4245bb8b5132Sek 		VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0);
4246fa9e4066Sahrens 		spa_close(spa, FTAG);
4247fa9e4066Sahrens 
4248fa9e4066Sahrens 		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4249fa9e4066Sahrens 			spa_unload(spa);
4250fa9e4066Sahrens 			spa_deactivate(spa);
4251fa9e4066Sahrens 		}
4252fa9e4066Sahrens 		spa_remove(spa);
4253fa9e4066Sahrens 	}
4254fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
4255fa9e4066Sahrens }
4256ea8dc4b6Seschrock 
4257ea8dc4b6Seschrock vdev_t *
4258ea8dc4b6Seschrock spa_lookup_by_guid(spa_t *spa, uint64_t guid)
4259ea8dc4b6Seschrock {
4260ea8dc4b6Seschrock 	return (vdev_lookup_by_guid(spa->spa_root_vdev, guid));
4261ea8dc4b6Seschrock }
4262eaca9bbdSeschrock 
4263eaca9bbdSeschrock void
4264990b4856Slling spa_upgrade(spa_t *spa, uint64_t version)
4265eaca9bbdSeschrock {
4266eaca9bbdSeschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
4267eaca9bbdSeschrock 
4268eaca9bbdSeschrock 	/*
4269eaca9bbdSeschrock 	 * This should only be called for a non-faulted pool, and since a
4270eaca9bbdSeschrock 	 * future version would result in an unopenable pool, this shouldn't be
4271eaca9bbdSeschrock 	 * possible.
4272eaca9bbdSeschrock 	 */
4273e7437265Sahrens 	ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION);
4274990b4856Slling 	ASSERT(version >= spa->spa_uberblock.ub_version);
4275eaca9bbdSeschrock 
4276990b4856Slling 	spa->spa_uberblock.ub_version = version;
4277eaca9bbdSeschrock 	vdev_config_dirty(spa->spa_root_vdev);
4278eaca9bbdSeschrock 
4279eaca9bbdSeschrock 	spa_config_exit(spa, FTAG);
428099653d4eSeschrock 
428199653d4eSeschrock 	txg_wait_synced(spa_get_dsl(spa), 0);
428299653d4eSeschrock }
428399653d4eSeschrock 
428499653d4eSeschrock boolean_t
428599653d4eSeschrock spa_has_spare(spa_t *spa, uint64_t guid)
428699653d4eSeschrock {
428799653d4eSeschrock 	int i;
428839c23413Seschrock 	uint64_t spareguid;
4289fa94a07fSbrendan 	spa_aux_vdev_t *sav = &spa->spa_spares;
429099653d4eSeschrock 
4291fa94a07fSbrendan 	for (i = 0; i < sav->sav_count; i++)
4292fa94a07fSbrendan 		if (sav->sav_vdevs[i]->vdev_guid == guid)
429399653d4eSeschrock 			return (B_TRUE);
429499653d4eSeschrock 
4295fa94a07fSbrendan 	for (i = 0; i < sav->sav_npending; i++) {
4296fa94a07fSbrendan 		if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
4297fa94a07fSbrendan 		    &spareguid) == 0 && spareguid == guid)
429839c23413Seschrock 			return (B_TRUE);
429939c23413Seschrock 	}
430039c23413Seschrock 
430199653d4eSeschrock 	return (B_FALSE);
4302eaca9bbdSeschrock }
4303b1b8ab34Slling 
43043d7072f8Seschrock /*
43053d7072f8Seschrock  * Post a sysevent corresponding to the given event.  The 'name' must be one of
43063d7072f8Seschrock  * the event definitions in sys/sysevent/eventdefs.h.  The payload will be
43073d7072f8Seschrock  * filled in from the spa and (optionally) the vdev.  This doesn't do anything
43083d7072f8Seschrock  * in the userland libzpool, as we don't want consumers to misinterpret ztest
43093d7072f8Seschrock  * or zdb as real changes.
43103d7072f8Seschrock  */
43113d7072f8Seschrock void
43123d7072f8Seschrock spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
43133d7072f8Seschrock {
43143d7072f8Seschrock #ifdef _KERNEL
43153d7072f8Seschrock 	sysevent_t		*ev;
43163d7072f8Seschrock 	sysevent_attr_list_t	*attr = NULL;
43173d7072f8Seschrock 	sysevent_value_t	value;
43183d7072f8Seschrock 	sysevent_id_t		eid;
43193d7072f8Seschrock 
43203d7072f8Seschrock 	ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
43213d7072f8Seschrock 	    SE_SLEEP);
43223d7072f8Seschrock 
43233d7072f8Seschrock 	value.value_type = SE_DATA_TYPE_STRING;
43243d7072f8Seschrock 	value.value.sv_string = spa_name(spa);
43253d7072f8Seschrock 	if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
43263d7072f8Seschrock 		goto done;
43273d7072f8Seschrock 
43283d7072f8Seschrock 	value.value_type = SE_DATA_TYPE_UINT64;
43293d7072f8Seschrock 	value.value.sv_uint64 = spa_guid(spa);
43303d7072f8Seschrock 	if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
43313d7072f8Seschrock 		goto done;
43323d7072f8Seschrock 
43333d7072f8Seschrock 	if (vd) {
43343d7072f8Seschrock 		value.value_type = SE_DATA_TYPE_UINT64;
43353d7072f8Seschrock 		value.value.sv_uint64 = vd->vdev_guid;
43363d7072f8Seschrock 		if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
43373d7072f8Seschrock 		    SE_SLEEP) != 0)
43383d7072f8Seschrock 			goto done;
43393d7072f8Seschrock 
43403d7072f8Seschrock 		if (vd->vdev_path) {
43413d7072f8Seschrock 			value.value_type = SE_DATA_TYPE_STRING;
43423d7072f8Seschrock 			value.value.sv_string = vd->vdev_path;
43433d7072f8Seschrock 			if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
43443d7072f8Seschrock 			    &value, SE_SLEEP) != 0)
43453d7072f8Seschrock 				goto done;
43463d7072f8Seschrock 		}
43473d7072f8Seschrock 	}
43483d7072f8Seschrock 
4349b01c3b58Seschrock 	if (sysevent_attach_attributes(ev, attr) != 0)
4350b01c3b58Seschrock 		goto done;
4351b01c3b58Seschrock 	attr = NULL;
4352b01c3b58Seschrock 
43533d7072f8Seschrock 	(void) log_sysevent(ev, SE_SLEEP, &eid);
43543d7072f8Seschrock 
43553d7072f8Seschrock done:
43563d7072f8Seschrock 	if (attr)
43573d7072f8Seschrock 		sysevent_free_attr(attr);
43583d7072f8Seschrock 	sysevent_free(ev);
43593d7072f8Seschrock #endif
43603d7072f8Seschrock }
4361