xref: /illumos-gate/usr/src/uts/common/fs/zfs/spa.c (revision 416e0cd8)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
2199653d4eSeschrock 
22fa9e4066Sahrens /*
23c67d9675Seschrock  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24fa9e4066Sahrens  * Use is subject to license terms.
25fa9e4066Sahrens  */
26fa9e4066Sahrens 
27fa9e4066Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
28fa9e4066Sahrens 
29fa9e4066Sahrens /*
30fa9e4066Sahrens  * This file contains all the routines used when modifying on-disk SPA state.
31fa9e4066Sahrens  * This includes opening, importing, destroying, exporting a pool, and syncing a
32fa9e4066Sahrens  * pool.
33fa9e4066Sahrens  */
34fa9e4066Sahrens 
35fa9e4066Sahrens #include <sys/zfs_context.h>
36ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h>
37fa9e4066Sahrens #include <sys/spa_impl.h>
38fa9e4066Sahrens #include <sys/zio.h>
39fa9e4066Sahrens #include <sys/zio_checksum.h>
40fa9e4066Sahrens #include <sys/zio_compress.h>
41fa9e4066Sahrens #include <sys/dmu.h>
42fa9e4066Sahrens #include <sys/dmu_tx.h>
43fa9e4066Sahrens #include <sys/zap.h>
44fa9e4066Sahrens #include <sys/zil.h>
45fa9e4066Sahrens #include <sys/vdev_impl.h>
46fa9e4066Sahrens #include <sys/metaslab.h>
47fa9e4066Sahrens #include <sys/uberblock_impl.h>
48fa9e4066Sahrens #include <sys/txg.h>
49fa9e4066Sahrens #include <sys/avl.h>
50fa9e4066Sahrens #include <sys/dmu_traverse.h>
51fa9e4066Sahrens #include <sys/unique.h>
52fa9e4066Sahrens #include <sys/dsl_pool.h>
53fa9e4066Sahrens #include <sys/dsl_dir.h>
54fa9e4066Sahrens #include <sys/dsl_prop.h>
55fa9e4066Sahrens #include <sys/fs/zfs.h>
56fa9e4066Sahrens #include <sys/callb.h>
57fa9e4066Sahrens 
58*416e0cd8Sek int zio_taskq_threads = 8;
59*416e0cd8Sek 
60fa9e4066Sahrens /*
61fa9e4066Sahrens  * ==========================================================================
62fa9e4066Sahrens  * SPA state manipulation (open/create/destroy/import/export)
63fa9e4066Sahrens  * ==========================================================================
64fa9e4066Sahrens  */
65fa9e4066Sahrens 
66ea8dc4b6Seschrock static int
67ea8dc4b6Seschrock spa_error_entry_compare(const void *a, const void *b)
68ea8dc4b6Seschrock {
69ea8dc4b6Seschrock 	spa_error_entry_t *sa = (spa_error_entry_t *)a;
70ea8dc4b6Seschrock 	spa_error_entry_t *sb = (spa_error_entry_t *)b;
71ea8dc4b6Seschrock 	int ret;
72ea8dc4b6Seschrock 
73ea8dc4b6Seschrock 	ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
74ea8dc4b6Seschrock 	    sizeof (zbookmark_t));
75ea8dc4b6Seschrock 
76ea8dc4b6Seschrock 	if (ret < 0)
77ea8dc4b6Seschrock 		return (-1);
78ea8dc4b6Seschrock 	else if (ret > 0)
79ea8dc4b6Seschrock 		return (1);
80ea8dc4b6Seschrock 	else
81ea8dc4b6Seschrock 		return (0);
82ea8dc4b6Seschrock }
83ea8dc4b6Seschrock 
84ea8dc4b6Seschrock /*
85ea8dc4b6Seschrock  * Utility function which retrieves copies of the current logs and
86ea8dc4b6Seschrock  * re-initializes them in the process.
87ea8dc4b6Seschrock  */
88ea8dc4b6Seschrock void
89ea8dc4b6Seschrock spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
90ea8dc4b6Seschrock {
91ea8dc4b6Seschrock 	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
92ea8dc4b6Seschrock 
93ea8dc4b6Seschrock 	bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
94ea8dc4b6Seschrock 	bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
95ea8dc4b6Seschrock 
96ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_scrub,
97ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
98ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
99ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_last,
100ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
101ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
102ea8dc4b6Seschrock }
103ea8dc4b6Seschrock 
104fa9e4066Sahrens /*
105fa9e4066Sahrens  * Activate an uninitialized pool.
106fa9e4066Sahrens  */
107fa9e4066Sahrens static void
108fa9e4066Sahrens spa_activate(spa_t *spa)
109fa9e4066Sahrens {
110fa9e4066Sahrens 	int t;
111fa9e4066Sahrens 
112fa9e4066Sahrens 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
113fa9e4066Sahrens 
114fa9e4066Sahrens 	spa->spa_state = POOL_STATE_ACTIVE;
115fa9e4066Sahrens 
116fa9e4066Sahrens 	spa->spa_normal_class = metaslab_class_create();
117fa9e4066Sahrens 
118fa9e4066Sahrens 	for (t = 0; t < ZIO_TYPES; t++) {
119fa9e4066Sahrens 		spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue",
120*416e0cd8Sek 		    zio_taskq_threads, maxclsyspri, 50, INT_MAX,
121fa9e4066Sahrens 		    TASKQ_PREPOPULATE);
122fa9e4066Sahrens 		spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr",
123*416e0cd8Sek 		    zio_taskq_threads, maxclsyspri, 50, INT_MAX,
124fa9e4066Sahrens 		    TASKQ_PREPOPULATE);
125fa9e4066Sahrens 	}
126fa9e4066Sahrens 
127fa9e4066Sahrens 	rw_init(&spa->spa_traverse_lock, NULL, RW_DEFAULT, NULL);
128fa9e4066Sahrens 
1295ad82045Snd 	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
1305ad82045Snd 	mutex_init(&spa->spa_config_cache_lock, NULL, MUTEX_DEFAULT, NULL);
1315ad82045Snd 	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
1325ad82045Snd 	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
1335ad82045Snd 	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
1345ad82045Snd 	mutex_init(&spa->spa_config_lock.scl_lock, NULL, MUTEX_DEFAULT, NULL);
1355ad82045Snd 	mutex_init(&spa->spa_sync_bplist.bpl_lock, NULL, MUTEX_DEFAULT, NULL);
13606eeb2adSek 	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
1375ad82045Snd 
138fa9e4066Sahrens 	list_create(&spa->spa_dirty_list, sizeof (vdev_t),
139fa9e4066Sahrens 	    offsetof(vdev_t, vdev_dirty_node));
140fa9e4066Sahrens 
141fa9e4066Sahrens 	txg_list_create(&spa->spa_vdev_txg_list,
142fa9e4066Sahrens 	    offsetof(struct vdev, vdev_txg_node));
143ea8dc4b6Seschrock 
144ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_scrub,
145ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
146ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
147ea8dc4b6Seschrock 	avl_create(&spa->spa_errlist_last,
148ea8dc4b6Seschrock 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
149ea8dc4b6Seschrock 	    offsetof(spa_error_entry_t, se_avl));
150fa9e4066Sahrens }
151fa9e4066Sahrens 
152fa9e4066Sahrens /*
153fa9e4066Sahrens  * Opposite of spa_activate().
154fa9e4066Sahrens  */
155fa9e4066Sahrens static void
156fa9e4066Sahrens spa_deactivate(spa_t *spa)
157fa9e4066Sahrens {
158fa9e4066Sahrens 	int t;
159fa9e4066Sahrens 
160fa9e4066Sahrens 	ASSERT(spa->spa_sync_on == B_FALSE);
161fa9e4066Sahrens 	ASSERT(spa->spa_dsl_pool == NULL);
162fa9e4066Sahrens 	ASSERT(spa->spa_root_vdev == NULL);
163fa9e4066Sahrens 
164fa9e4066Sahrens 	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
165fa9e4066Sahrens 
166fa9e4066Sahrens 	txg_list_destroy(&spa->spa_vdev_txg_list);
167fa9e4066Sahrens 
168fa9e4066Sahrens 	list_destroy(&spa->spa_dirty_list);
169fa9e4066Sahrens 
170fa9e4066Sahrens 	rw_destroy(&spa->spa_traverse_lock);
171fa9e4066Sahrens 
172fa9e4066Sahrens 	for (t = 0; t < ZIO_TYPES; t++) {
173fa9e4066Sahrens 		taskq_destroy(spa->spa_zio_issue_taskq[t]);
174fa9e4066Sahrens 		taskq_destroy(spa->spa_zio_intr_taskq[t]);
175fa9e4066Sahrens 		spa->spa_zio_issue_taskq[t] = NULL;
176fa9e4066Sahrens 		spa->spa_zio_intr_taskq[t] = NULL;
177fa9e4066Sahrens 	}
178fa9e4066Sahrens 
179fa9e4066Sahrens 	metaslab_class_destroy(spa->spa_normal_class);
180fa9e4066Sahrens 	spa->spa_normal_class = NULL;
181fa9e4066Sahrens 
182ea8dc4b6Seschrock 	/*
183ea8dc4b6Seschrock 	 * If this was part of an import or the open otherwise failed, we may
184ea8dc4b6Seschrock 	 * still have errors left in the queues.  Empty them just in case.
185ea8dc4b6Seschrock 	 */
186ea8dc4b6Seschrock 	spa_errlog_drain(spa);
187ea8dc4b6Seschrock 
188ea8dc4b6Seschrock 	avl_destroy(&spa->spa_errlist_scrub);
189ea8dc4b6Seschrock 	avl_destroy(&spa->spa_errlist_last);
190ea8dc4b6Seschrock 
191fa9e4066Sahrens 	spa->spa_state = POOL_STATE_UNINITIALIZED;
192fa9e4066Sahrens }
193fa9e4066Sahrens 
194fa9e4066Sahrens /*
195fa9e4066Sahrens  * Verify a pool configuration, and construct the vdev tree appropriately.  This
196fa9e4066Sahrens  * will create all the necessary vdevs in the appropriate layout, with each vdev
197fa9e4066Sahrens  * in the CLOSED state.  This will prep the pool before open/creation/import.
198fa9e4066Sahrens  * All vdev validation is done by the vdev_alloc() routine.
199fa9e4066Sahrens  */
20099653d4eSeschrock static int
20199653d4eSeschrock spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
20299653d4eSeschrock     uint_t id, int atype)
203fa9e4066Sahrens {
204fa9e4066Sahrens 	nvlist_t **child;
205fa9e4066Sahrens 	uint_t c, children;
20699653d4eSeschrock 	int error;
207fa9e4066Sahrens 
20899653d4eSeschrock 	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
20999653d4eSeschrock 		return (error);
210fa9e4066Sahrens 
21199653d4eSeschrock 	if ((*vdp)->vdev_ops->vdev_op_leaf)
21299653d4eSeschrock 		return (0);
213fa9e4066Sahrens 
214fa9e4066Sahrens 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
215fa9e4066Sahrens 	    &child, &children) != 0) {
21699653d4eSeschrock 		vdev_free(*vdp);
21799653d4eSeschrock 		*vdp = NULL;
21899653d4eSeschrock 		return (EINVAL);
219fa9e4066Sahrens 	}
220fa9e4066Sahrens 
221fa9e4066Sahrens 	for (c = 0; c < children; c++) {
22299653d4eSeschrock 		vdev_t *vd;
22399653d4eSeschrock 		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
22499653d4eSeschrock 		    atype)) != 0) {
22599653d4eSeschrock 			vdev_free(*vdp);
22699653d4eSeschrock 			*vdp = NULL;
22799653d4eSeschrock 			return (error);
228fa9e4066Sahrens 		}
229fa9e4066Sahrens 	}
230fa9e4066Sahrens 
23199653d4eSeschrock 	ASSERT(*vdp != NULL);
23299653d4eSeschrock 
23399653d4eSeschrock 	return (0);
234fa9e4066Sahrens }
235fa9e4066Sahrens 
236fa9e4066Sahrens /*
237fa9e4066Sahrens  * Opposite of spa_load().
238fa9e4066Sahrens  */
239fa9e4066Sahrens static void
240fa9e4066Sahrens spa_unload(spa_t *spa)
241fa9e4066Sahrens {
24299653d4eSeschrock 	int i;
24399653d4eSeschrock 
244ea8dc4b6Seschrock 	/*
245ea8dc4b6Seschrock 	 * Stop async tasks.
246ea8dc4b6Seschrock 	 */
247ea8dc4b6Seschrock 	spa_async_suspend(spa);
248ea8dc4b6Seschrock 
249fa9e4066Sahrens 	/*
250fa9e4066Sahrens 	 * Stop syncing.
251fa9e4066Sahrens 	 */
252fa9e4066Sahrens 	if (spa->spa_sync_on) {
253fa9e4066Sahrens 		txg_sync_stop(spa->spa_dsl_pool);
254fa9e4066Sahrens 		spa->spa_sync_on = B_FALSE;
255fa9e4066Sahrens 	}
256fa9e4066Sahrens 
257fa9e4066Sahrens 	/*
258fa9e4066Sahrens 	 * Wait for any outstanding prefetch I/O to complete.
259fa9e4066Sahrens 	 */
260ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
261ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
262fa9e4066Sahrens 
263fa9e4066Sahrens 	/*
264fa9e4066Sahrens 	 * Close the dsl pool.
265fa9e4066Sahrens 	 */
266fa9e4066Sahrens 	if (spa->spa_dsl_pool) {
267fa9e4066Sahrens 		dsl_pool_close(spa->spa_dsl_pool);
268fa9e4066Sahrens 		spa->spa_dsl_pool = NULL;
269fa9e4066Sahrens 	}
270fa9e4066Sahrens 
271fa9e4066Sahrens 	/*
272fa9e4066Sahrens 	 * Close all vdevs.
273fa9e4066Sahrens 	 */
2740e34b6a7Sbonwick 	if (spa->spa_root_vdev)
275fa9e4066Sahrens 		vdev_free(spa->spa_root_vdev);
2760e34b6a7Sbonwick 	ASSERT(spa->spa_root_vdev == NULL);
277ea8dc4b6Seschrock 
27899653d4eSeschrock 	for (i = 0; i < spa->spa_nspares; i++)
27999653d4eSeschrock 		vdev_free(spa->spa_spares[i]);
28099653d4eSeschrock 	if (spa->spa_spares) {
28199653d4eSeschrock 		kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *));
28299653d4eSeschrock 		spa->spa_spares = NULL;
28399653d4eSeschrock 	}
28499653d4eSeschrock 	if (spa->spa_sparelist) {
28599653d4eSeschrock 		nvlist_free(spa->spa_sparelist);
28699653d4eSeschrock 		spa->spa_sparelist = NULL;
28799653d4eSeschrock 	}
28899653d4eSeschrock 
289ea8dc4b6Seschrock 	spa->spa_async_suspended = 0;
290fa9e4066Sahrens }
291fa9e4066Sahrens 
29299653d4eSeschrock /*
29399653d4eSeschrock  * Load (or re-load) the current list of vdevs describing the active spares for
29499653d4eSeschrock  * this pool.  When this is called, we have some form of basic information in
29599653d4eSeschrock  * 'spa_sparelist'.  We parse this into vdevs, try to open them, and then
29699653d4eSeschrock  * re-generate a more complete list including status information.
29799653d4eSeschrock  */
29899653d4eSeschrock static void
29999653d4eSeschrock spa_load_spares(spa_t *spa)
30099653d4eSeschrock {
30199653d4eSeschrock 	nvlist_t **spares;
30299653d4eSeschrock 	uint_t nspares;
30399653d4eSeschrock 	int i;
30499653d4eSeschrock 
30599653d4eSeschrock 	/*
30699653d4eSeschrock 	 * First, close and free any existing spare vdevs.
30799653d4eSeschrock 	 */
30899653d4eSeschrock 	for (i = 0; i < spa->spa_nspares; i++) {
30999653d4eSeschrock 		vdev_close(spa->spa_spares[i]);
31099653d4eSeschrock 		vdev_free(spa->spa_spares[i]);
31199653d4eSeschrock 	}
31299653d4eSeschrock 	if (spa->spa_spares)
31399653d4eSeschrock 		kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *));
31499653d4eSeschrock 
31599653d4eSeschrock 	if (spa->spa_sparelist == NULL)
31699653d4eSeschrock 		nspares = 0;
31799653d4eSeschrock 	else
31899653d4eSeschrock 		VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist,
31999653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
32099653d4eSeschrock 
32199653d4eSeschrock 	spa->spa_nspares = (int)nspares;
32299653d4eSeschrock 	spa->spa_spares = NULL;
32399653d4eSeschrock 
32499653d4eSeschrock 	if (nspares == 0)
32599653d4eSeschrock 		return;
32699653d4eSeschrock 
32799653d4eSeschrock 	/*
32899653d4eSeschrock 	 * Construct the array of vdevs, opening them to get status in the
32999653d4eSeschrock 	 * process.
33099653d4eSeschrock 	 */
33199653d4eSeschrock 	spa->spa_spares = kmem_alloc(nspares * sizeof (void *), KM_SLEEP);
33299653d4eSeschrock 	for (i = 0; i < spa->spa_nspares; i++) {
33399653d4eSeschrock 		vdev_t *vd;
33499653d4eSeschrock 
33599653d4eSeschrock 		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
33699653d4eSeschrock 		    VDEV_ALLOC_SPARE) == 0);
33799653d4eSeschrock 		ASSERT(vd != NULL);
33899653d4eSeschrock 
33999653d4eSeschrock 		spa->spa_spares[i] = vd;
34099653d4eSeschrock 
34199653d4eSeschrock 		if (vdev_open(vd) != 0)
34299653d4eSeschrock 			continue;
34399653d4eSeschrock 
34499653d4eSeschrock 		vd->vdev_top = vd;
34599653d4eSeschrock 		(void) vdev_validate_spare(vd);
34699653d4eSeschrock 	}
34799653d4eSeschrock 
34899653d4eSeschrock 	/*
34999653d4eSeschrock 	 * Recompute the stashed list of spares, with status information
35099653d4eSeschrock 	 * this time.
35199653d4eSeschrock 	 */
35299653d4eSeschrock 	VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
35399653d4eSeschrock 	    DATA_TYPE_NVLIST_ARRAY) == 0);
35499653d4eSeschrock 
35599653d4eSeschrock 	spares = kmem_alloc(spa->spa_nspares * sizeof (void *), KM_SLEEP);
35699653d4eSeschrock 	for (i = 0; i < spa->spa_nspares; i++)
35799653d4eSeschrock 		spares[i] = vdev_config_generate(spa, spa->spa_spares[i],
35899653d4eSeschrock 		    B_TRUE, B_TRUE);
35999653d4eSeschrock 	VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
36099653d4eSeschrock 	    spares, spa->spa_nspares) == 0);
36199653d4eSeschrock 	for (i = 0; i < spa->spa_nspares; i++)
36299653d4eSeschrock 		nvlist_free(spares[i]);
36399653d4eSeschrock 	kmem_free(spares, spa->spa_nspares * sizeof (void *));
36499653d4eSeschrock }
36599653d4eSeschrock 
36699653d4eSeschrock static int
36799653d4eSeschrock load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
36899653d4eSeschrock {
36999653d4eSeschrock 	dmu_buf_t *db;
37099653d4eSeschrock 	char *packed = NULL;
37199653d4eSeschrock 	size_t nvsize = 0;
37299653d4eSeschrock 	int error;
37399653d4eSeschrock 	*value = NULL;
37499653d4eSeschrock 
37599653d4eSeschrock 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
37699653d4eSeschrock 	nvsize = *(uint64_t *)db->db_data;
37799653d4eSeschrock 	dmu_buf_rele(db, FTAG);
37899653d4eSeschrock 
37999653d4eSeschrock 	packed = kmem_alloc(nvsize, KM_SLEEP);
38099653d4eSeschrock 	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed);
38199653d4eSeschrock 	if (error == 0)
38299653d4eSeschrock 		error = nvlist_unpack(packed, nvsize, value, 0);
38399653d4eSeschrock 	kmem_free(packed, nvsize);
38499653d4eSeschrock 
38599653d4eSeschrock 	return (error);
38699653d4eSeschrock }
38799653d4eSeschrock 
388fa9e4066Sahrens /*
389fa9e4066Sahrens  * Load an existing storage pool, using the pool's builtin spa_config as a
390ea8dc4b6Seschrock  * source of configuration information.
391fa9e4066Sahrens  */
392fa9e4066Sahrens static int
393ea8dc4b6Seschrock spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig)
394fa9e4066Sahrens {
395fa9e4066Sahrens 	int error = 0;
396fa9e4066Sahrens 	nvlist_t *nvroot = NULL;
397fa9e4066Sahrens 	vdev_t *rvd;
398fa9e4066Sahrens 	uberblock_t *ub = &spa->spa_uberblock;
3990373e76bSbonwick 	uint64_t config_cache_txg = spa->spa_config_txg;
400fa9e4066Sahrens 	uint64_t pool_guid;
40199653d4eSeschrock 	uint64_t version;
402fa9e4066Sahrens 	zio_t *zio;
403fa9e4066Sahrens 
404ea8dc4b6Seschrock 	spa->spa_load_state = state;
4050373e76bSbonwick 
406fa9e4066Sahrens 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
407a9926bf0Sbonwick 	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
408ea8dc4b6Seschrock 		error = EINVAL;
409ea8dc4b6Seschrock 		goto out;
410ea8dc4b6Seschrock 	}
411fa9e4066Sahrens 
41299653d4eSeschrock 	/*
41399653d4eSeschrock 	 * Versioning wasn't explicitly added to the label until later, so if
41499653d4eSeschrock 	 * it's not present treat it as the initial version.
41599653d4eSeschrock 	 */
41699653d4eSeschrock 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
41799653d4eSeschrock 		version = ZFS_VERSION_INITIAL;
41899653d4eSeschrock 
419a9926bf0Sbonwick 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
420a9926bf0Sbonwick 	    &spa->spa_config_txg);
421a9926bf0Sbonwick 
4220373e76bSbonwick 	if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
423ea8dc4b6Seschrock 	    spa_guid_exists(pool_guid, 0)) {
424ea8dc4b6Seschrock 		error = EEXIST;
425ea8dc4b6Seschrock 		goto out;
426ea8dc4b6Seschrock 	}
427fa9e4066Sahrens 
428b5989ec7Seschrock 	spa->spa_load_guid = pool_guid;
429b5989ec7Seschrock 
430fa9e4066Sahrens 	/*
43199653d4eSeschrock 	 * Parse the configuration into a vdev tree.  We explicitly set the
43299653d4eSeschrock 	 * value that will be returned by spa_version() since parsing the
43399653d4eSeschrock 	 * configuration requires knowing the version number.
434fa9e4066Sahrens 	 */
435ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
43699653d4eSeschrock 	spa->spa_ubsync.ub_version = version;
43799653d4eSeschrock 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
438ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
439fa9e4066Sahrens 
44099653d4eSeschrock 	if (error != 0)
441ea8dc4b6Seschrock 		goto out;
442fa9e4066Sahrens 
4430e34b6a7Sbonwick 	ASSERT(spa->spa_root_vdev == rvd);
444fa9e4066Sahrens 	ASSERT(spa_guid(spa) == pool_guid);
445fa9e4066Sahrens 
446fa9e4066Sahrens 	/*
447fa9e4066Sahrens 	 * Try to open all vdevs, loading each label in the process.
448fa9e4066Sahrens 	 */
449ea8dc4b6Seschrock 	if (vdev_open(rvd) != 0) {
450ea8dc4b6Seschrock 		error = ENXIO;
451ea8dc4b6Seschrock 		goto out;
452ea8dc4b6Seschrock 	}
453fa9e4066Sahrens 
454560e6e96Seschrock 	/*
455560e6e96Seschrock 	 * Validate the labels for all leaf vdevs.  We need to grab the config
456560e6e96Seschrock 	 * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD
457560e6e96Seschrock 	 * flag.
458560e6e96Seschrock 	 */
459560e6e96Seschrock 	spa_config_enter(spa, RW_READER, FTAG);
460560e6e96Seschrock 	error = vdev_validate(rvd);
461560e6e96Seschrock 	spa_config_exit(spa, FTAG);
462560e6e96Seschrock 
463560e6e96Seschrock 	if (error != 0) {
464560e6e96Seschrock 		error = EBADF;
465560e6e96Seschrock 		goto out;
466560e6e96Seschrock 	}
467560e6e96Seschrock 
468560e6e96Seschrock 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
469560e6e96Seschrock 		error = ENXIO;
470560e6e96Seschrock 		goto out;
471560e6e96Seschrock 	}
472560e6e96Seschrock 
473fa9e4066Sahrens 	/*
474fa9e4066Sahrens 	 * Find the best uberblock.
475fa9e4066Sahrens 	 */
476fa9e4066Sahrens 	bzero(ub, sizeof (uberblock_t));
477fa9e4066Sahrens 
478fa9e4066Sahrens 	zio = zio_root(spa, NULL, NULL,
479fa9e4066Sahrens 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
480fa9e4066Sahrens 	vdev_uberblock_load(zio, rvd, ub);
481fa9e4066Sahrens 	error = zio_wait(zio);
482fa9e4066Sahrens 
483fa9e4066Sahrens 	/*
484fa9e4066Sahrens 	 * If we weren't able to find a single valid uberblock, return failure.
485fa9e4066Sahrens 	 */
486fa9e4066Sahrens 	if (ub->ub_txg == 0) {
487eaca9bbdSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
488eaca9bbdSeschrock 		    VDEV_AUX_CORRUPT_DATA);
489ea8dc4b6Seschrock 		error = ENXIO;
490ea8dc4b6Seschrock 		goto out;
491ea8dc4b6Seschrock 	}
492ea8dc4b6Seschrock 
493ea8dc4b6Seschrock 	/*
494ea8dc4b6Seschrock 	 * If the pool is newer than the code, we can't open it.
495ea8dc4b6Seschrock 	 */
496eaca9bbdSeschrock 	if (ub->ub_version > ZFS_VERSION) {
497eaca9bbdSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
498eaca9bbdSeschrock 		    VDEV_AUX_VERSION_NEWER);
499ea8dc4b6Seschrock 		error = ENOTSUP;
500ea8dc4b6Seschrock 		goto out;
501fa9e4066Sahrens 	}
502fa9e4066Sahrens 
503fa9e4066Sahrens 	/*
504fa9e4066Sahrens 	 * If the vdev guid sum doesn't match the uberblock, we have an
505fa9e4066Sahrens 	 * incomplete configuration.
506fa9e4066Sahrens 	 */
507ecc2d604Sbonwick 	if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
508ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
509ea8dc4b6Seschrock 		    VDEV_AUX_BAD_GUID_SUM);
510ea8dc4b6Seschrock 		error = ENXIO;
511ea8dc4b6Seschrock 		goto out;
512fa9e4066Sahrens 	}
513fa9e4066Sahrens 
514fa9e4066Sahrens 	/*
515fa9e4066Sahrens 	 * Initialize internal SPA structures.
516fa9e4066Sahrens 	 */
517fa9e4066Sahrens 	spa->spa_state = POOL_STATE_ACTIVE;
518fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
519fa9e4066Sahrens 	spa->spa_first_txg = spa_last_synced_txg(spa) + 1;
520ea8dc4b6Seschrock 	error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
521ea8dc4b6Seschrock 	if (error) {
522ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
523ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
524ea8dc4b6Seschrock 		goto out;
525ea8dc4b6Seschrock 	}
526fa9e4066Sahrens 	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
527fa9e4066Sahrens 
528ea8dc4b6Seschrock 	if (zap_lookup(spa->spa_meta_objset,
529fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
530ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
531ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
532ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
533ea8dc4b6Seschrock 		error = EIO;
534ea8dc4b6Seschrock 		goto out;
535ea8dc4b6Seschrock 	}
536fa9e4066Sahrens 
537fa9e4066Sahrens 	if (!mosconfig) {
53899653d4eSeschrock 		nvlist_t *newconfig;
539fa9e4066Sahrens 
54099653d4eSeschrock 		if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) {
541ea8dc4b6Seschrock 			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
542ea8dc4b6Seschrock 			    VDEV_AUX_CORRUPT_DATA);
543ea8dc4b6Seschrock 			error = EIO;
544ea8dc4b6Seschrock 			goto out;
545ea8dc4b6Seschrock 		}
546fa9e4066Sahrens 
547fa9e4066Sahrens 		spa_config_set(spa, newconfig);
548fa9e4066Sahrens 		spa_unload(spa);
549fa9e4066Sahrens 		spa_deactivate(spa);
550fa9e4066Sahrens 		spa_activate(spa);
551fa9e4066Sahrens 
552ea8dc4b6Seschrock 		return (spa_load(spa, newconfig, state, B_TRUE));
553fa9e4066Sahrens 	}
554fa9e4066Sahrens 
555ea8dc4b6Seschrock 	if (zap_lookup(spa->spa_meta_objset,
556fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
557ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) {
558ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
559ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
560ea8dc4b6Seschrock 		error = EIO;
561ea8dc4b6Seschrock 		goto out;
562ea8dc4b6Seschrock 	}
563fa9e4066Sahrens 
56499653d4eSeschrock 	/*
56599653d4eSeschrock 	 * Load the bit that tells us to use the new accounting function
56699653d4eSeschrock 	 * (raid-z deflation).  If we have an older pool, this will not
56799653d4eSeschrock 	 * be present.
56899653d4eSeschrock 	 */
56999653d4eSeschrock 	error = zap_lookup(spa->spa_meta_objset,
57099653d4eSeschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
57199653d4eSeschrock 	    sizeof (uint64_t), 1, &spa->spa_deflate);
57299653d4eSeschrock 	if (error != 0 && error != ENOENT) {
57399653d4eSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
57499653d4eSeschrock 		    VDEV_AUX_CORRUPT_DATA);
57599653d4eSeschrock 		error = EIO;
57699653d4eSeschrock 		goto out;
57799653d4eSeschrock 	}
57899653d4eSeschrock 
579fa9e4066Sahrens 	/*
580ea8dc4b6Seschrock 	 * Load the persistent error log.  If we have an older pool, this will
581ea8dc4b6Seschrock 	 * not be present.
582fa9e4066Sahrens 	 */
583ea8dc4b6Seschrock 	error = zap_lookup(spa->spa_meta_objset,
584ea8dc4b6Seschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
585ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_errlog_last);
586d80c45e0Sbonwick 	if (error != 0 && error != ENOENT) {
587ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
588ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
589ea8dc4b6Seschrock 		error = EIO;
590ea8dc4b6Seschrock 		goto out;
591ea8dc4b6Seschrock 	}
592ea8dc4b6Seschrock 
593ea8dc4b6Seschrock 	error = zap_lookup(spa->spa_meta_objset,
594ea8dc4b6Seschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
595ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
596ea8dc4b6Seschrock 	if (error != 0 && error != ENOENT) {
597ea8dc4b6Seschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
598ea8dc4b6Seschrock 		    VDEV_AUX_CORRUPT_DATA);
599ea8dc4b6Seschrock 		error = EIO;
600ea8dc4b6Seschrock 		goto out;
601ea8dc4b6Seschrock 	}
602ea8dc4b6Seschrock 
60306eeb2adSek 	/*
60406eeb2adSek 	 * Load the history object.  If we have an older pool, this
60506eeb2adSek 	 * will not be present.
60606eeb2adSek 	 */
60706eeb2adSek 	error = zap_lookup(spa->spa_meta_objset,
60806eeb2adSek 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
60906eeb2adSek 	    sizeof (uint64_t), 1, &spa->spa_history);
61006eeb2adSek 	if (error != 0 && error != ENOENT) {
61106eeb2adSek 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
61206eeb2adSek 		    VDEV_AUX_CORRUPT_DATA);
61306eeb2adSek 		error = EIO;
61406eeb2adSek 		goto out;
61506eeb2adSek 	}
61606eeb2adSek 
61799653d4eSeschrock 	/*
61899653d4eSeschrock 	 * Load any hot spares for this pool.
61999653d4eSeschrock 	 */
62099653d4eSeschrock 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
62199653d4eSeschrock 	    DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares_object);
62299653d4eSeschrock 	if (error != 0 && error != ENOENT) {
62399653d4eSeschrock 		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
62499653d4eSeschrock 		    VDEV_AUX_CORRUPT_DATA);
62599653d4eSeschrock 		error = EIO;
62699653d4eSeschrock 		goto out;
62799653d4eSeschrock 	}
62899653d4eSeschrock 	if (error == 0) {
62999653d4eSeschrock 		ASSERT(spa_version(spa) >= ZFS_VERSION_SPARES);
63099653d4eSeschrock 		if (load_nvlist(spa, spa->spa_spares_object,
63199653d4eSeschrock 		    &spa->spa_sparelist) != 0) {
63299653d4eSeschrock 			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
63399653d4eSeschrock 			    VDEV_AUX_CORRUPT_DATA);
63499653d4eSeschrock 			error = EIO;
63599653d4eSeschrock 			goto out;
63699653d4eSeschrock 		}
63799653d4eSeschrock 
63899653d4eSeschrock 		spa_config_enter(spa, RW_WRITER, FTAG);
63999653d4eSeschrock 		spa_load_spares(spa);
64099653d4eSeschrock 		spa_config_exit(spa, FTAG);
64199653d4eSeschrock 	}
64299653d4eSeschrock 
643ea8dc4b6Seschrock 	/*
644560e6e96Seschrock 	 * Load the vdev state for all toplevel vdevs.
645ea8dc4b6Seschrock 	 */
646560e6e96Seschrock 	vdev_load(rvd);
6470373e76bSbonwick 
648fa9e4066Sahrens 	/*
649fa9e4066Sahrens 	 * Propagate the leaf DTLs we just loaded all the way up the tree.
650fa9e4066Sahrens 	 */
651ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
652fa9e4066Sahrens 	vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
653ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
654fa9e4066Sahrens 
655fa9e4066Sahrens 	/*
656fa9e4066Sahrens 	 * Check the state of the root vdev.  If it can't be opened, it
657fa9e4066Sahrens 	 * indicates one or more toplevel vdevs are faulted.
658fa9e4066Sahrens 	 */
659ea8dc4b6Seschrock 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
660ea8dc4b6Seschrock 		error = ENXIO;
661ea8dc4b6Seschrock 		goto out;
662ea8dc4b6Seschrock 	}
663fa9e4066Sahrens 
664ea8dc4b6Seschrock 	if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) {
6655dabedeeSbonwick 		dmu_tx_t *tx;
6660373e76bSbonwick 		int need_update = B_FALSE;
6670373e76bSbonwick 		int c;
6685dabedeeSbonwick 
6690373e76bSbonwick 		/*
6700373e76bSbonwick 		 * Claim log blocks that haven't been committed yet.
6710373e76bSbonwick 		 * This must all happen in a single txg.
6720373e76bSbonwick 		 */
6735dabedeeSbonwick 		tx = dmu_tx_create_assigned(spa_get_dsl(spa),
674fa9e4066Sahrens 		    spa_first_txg(spa));
6750b69c2f0Sahrens 		(void) dmu_objset_find(spa->spa_name,
6760b69c2f0Sahrens 		    zil_claim, tx, DS_FIND_CHILDREN);
677fa9e4066Sahrens 		dmu_tx_commit(tx);
678fa9e4066Sahrens 
679fa9e4066Sahrens 		spa->spa_sync_on = B_TRUE;
680fa9e4066Sahrens 		txg_sync_start(spa->spa_dsl_pool);
681fa9e4066Sahrens 
682fa9e4066Sahrens 		/*
683fa9e4066Sahrens 		 * Wait for all claims to sync.
684fa9e4066Sahrens 		 */
685fa9e4066Sahrens 		txg_wait_synced(spa->spa_dsl_pool, 0);
6860e34b6a7Sbonwick 
6870e34b6a7Sbonwick 		/*
6880373e76bSbonwick 		 * If the config cache is stale, or we have uninitialized
6890373e76bSbonwick 		 * metaslabs (see spa_vdev_add()), then update the config.
6900e34b6a7Sbonwick 		 */
6910373e76bSbonwick 		if (config_cache_txg != spa->spa_config_txg ||
6920373e76bSbonwick 		    state == SPA_LOAD_IMPORT)
6930373e76bSbonwick 			need_update = B_TRUE;
6940373e76bSbonwick 
6950373e76bSbonwick 		for (c = 0; c < rvd->vdev_children; c++)
6960373e76bSbonwick 			if (rvd->vdev_child[c]->vdev_ms_array == 0)
6970373e76bSbonwick 				need_update = B_TRUE;
6980e34b6a7Sbonwick 
6990e34b6a7Sbonwick 		/*
7000373e76bSbonwick 		 * Update the config cache asychronously in case we're the
7010373e76bSbonwick 		 * root pool, in which case the config cache isn't writable yet.
7020e34b6a7Sbonwick 		 */
7030373e76bSbonwick 		if (need_update)
7040373e76bSbonwick 			spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
705fa9e4066Sahrens 	}
706fa9e4066Sahrens 
707ea8dc4b6Seschrock 	error = 0;
708ea8dc4b6Seschrock out:
70999653d4eSeschrock 	if (error && error != EBADF)
710ea8dc4b6Seschrock 		zfs_ereport_post(FM_EREPORT_ZFS_POOL, spa, NULL, NULL, 0, 0);
711ea8dc4b6Seschrock 	spa->spa_load_state = SPA_LOAD_NONE;
712ea8dc4b6Seschrock 	spa->spa_ena = 0;
713ea8dc4b6Seschrock 
714ea8dc4b6Seschrock 	return (error);
715fa9e4066Sahrens }
716fa9e4066Sahrens 
717fa9e4066Sahrens /*
718fa9e4066Sahrens  * Pool Open/Import
719fa9e4066Sahrens  *
720fa9e4066Sahrens  * The import case is identical to an open except that the configuration is sent
721fa9e4066Sahrens  * down from userland, instead of grabbed from the configuration cache.  For the
722fa9e4066Sahrens  * case of an open, the pool configuration will exist in the
723fa9e4066Sahrens  * POOL_STATE_UNITIALIZED state.
724fa9e4066Sahrens  *
725fa9e4066Sahrens  * The stats information (gen/count/ustats) is used to gather vdev statistics at
726fa9e4066Sahrens  * the same time open the pool, without having to keep around the spa_t in some
727fa9e4066Sahrens  * ambiguous state.
728fa9e4066Sahrens  */
729fa9e4066Sahrens static int
730fa9e4066Sahrens spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config)
731fa9e4066Sahrens {
732fa9e4066Sahrens 	spa_t *spa;
733fa9e4066Sahrens 	int error;
734fa9e4066Sahrens 	int loaded = B_FALSE;
735fa9e4066Sahrens 	int locked = B_FALSE;
736fa9e4066Sahrens 
737fa9e4066Sahrens 	*spapp = NULL;
738fa9e4066Sahrens 
739fa9e4066Sahrens 	/*
740fa9e4066Sahrens 	 * As disgusting as this is, we need to support recursive calls to this
741fa9e4066Sahrens 	 * function because dsl_dir_open() is called during spa_load(), and ends
742fa9e4066Sahrens 	 * up calling spa_open() again.  The real fix is to figure out how to
743fa9e4066Sahrens 	 * avoid dsl_dir_open() calling this in the first place.
744fa9e4066Sahrens 	 */
745fa9e4066Sahrens 	if (mutex_owner(&spa_namespace_lock) != curthread) {
746fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
747fa9e4066Sahrens 		locked = B_TRUE;
748fa9e4066Sahrens 	}
749fa9e4066Sahrens 
750fa9e4066Sahrens 	if ((spa = spa_lookup(pool)) == NULL) {
751fa9e4066Sahrens 		if (locked)
752fa9e4066Sahrens 			mutex_exit(&spa_namespace_lock);
753fa9e4066Sahrens 		return (ENOENT);
754fa9e4066Sahrens 	}
755fa9e4066Sahrens 	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
756fa9e4066Sahrens 
757fa9e4066Sahrens 		spa_activate(spa);
758fa9e4066Sahrens 
7590373e76bSbonwick 		error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
760fa9e4066Sahrens 
761fa9e4066Sahrens 		if (error == EBADF) {
762fa9e4066Sahrens 			/*
763560e6e96Seschrock 			 * If vdev_validate() returns failure (indicated by
764560e6e96Seschrock 			 * EBADF), it indicates that one of the vdevs indicates
765560e6e96Seschrock 			 * that the pool has been exported or destroyed.  If
766560e6e96Seschrock 			 * this is the case, the config cache is out of sync and
767560e6e96Seschrock 			 * we should remove the pool from the namespace.
768fa9e4066Sahrens 			 */
76999653d4eSeschrock 			zfs_post_ok(spa, NULL);
770fa9e4066Sahrens 			spa_unload(spa);
771fa9e4066Sahrens 			spa_deactivate(spa);
772fa9e4066Sahrens 			spa_remove(spa);
773fa9e4066Sahrens 			spa_config_sync();
774fa9e4066Sahrens 			if (locked)
775fa9e4066Sahrens 				mutex_exit(&spa_namespace_lock);
776fa9e4066Sahrens 			return (ENOENT);
777ea8dc4b6Seschrock 		}
778ea8dc4b6Seschrock 
779ea8dc4b6Seschrock 		if (error) {
780fa9e4066Sahrens 			/*
781fa9e4066Sahrens 			 * We can't open the pool, but we still have useful
782fa9e4066Sahrens 			 * information: the state of each vdev after the
783fa9e4066Sahrens 			 * attempted vdev_open().  Return this to the user.
784fa9e4066Sahrens 			 */
7850373e76bSbonwick 			if (config != NULL && spa->spa_root_vdev != NULL) {
7860373e76bSbonwick 				spa_config_enter(spa, RW_READER, FTAG);
787fa9e4066Sahrens 				*config = spa_config_generate(spa, NULL, -1ULL,
788fa9e4066Sahrens 				    B_TRUE);
7890373e76bSbonwick 				spa_config_exit(spa, FTAG);
7900373e76bSbonwick 			}
791fa9e4066Sahrens 			spa_unload(spa);
792fa9e4066Sahrens 			spa_deactivate(spa);
793ea8dc4b6Seschrock 			spa->spa_last_open_failed = B_TRUE;
794fa9e4066Sahrens 			if (locked)
795fa9e4066Sahrens 				mutex_exit(&spa_namespace_lock);
796fa9e4066Sahrens 			*spapp = NULL;
797fa9e4066Sahrens 			return (error);
798ea8dc4b6Seschrock 		} else {
799ea8dc4b6Seschrock 			zfs_post_ok(spa, NULL);
800ea8dc4b6Seschrock 			spa->spa_last_open_failed = B_FALSE;
801fa9e4066Sahrens 		}
802fa9e4066Sahrens 
803fa9e4066Sahrens 		loaded = B_TRUE;
804fa9e4066Sahrens 	}
805fa9e4066Sahrens 
806fa9e4066Sahrens 	spa_open_ref(spa, tag);
807fa9e4066Sahrens 	if (locked)
808fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
809fa9e4066Sahrens 
810fa9e4066Sahrens 	*spapp = spa;
811fa9e4066Sahrens 
812fa9e4066Sahrens 	if (config != NULL) {
813ea8dc4b6Seschrock 		spa_config_enter(spa, RW_READER, FTAG);
814fa9e4066Sahrens 		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
815ea8dc4b6Seschrock 		spa_config_exit(spa, FTAG);
816fa9e4066Sahrens 	}
817fa9e4066Sahrens 
818fa9e4066Sahrens 	/*
819fa9e4066Sahrens 	 * If we just loaded the pool, resilver anything that's out of date.
820fa9e4066Sahrens 	 */
821fa9e4066Sahrens 	if (loaded && (spa_mode & FWRITE))
822fa9e4066Sahrens 		VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
823fa9e4066Sahrens 
824fa9e4066Sahrens 	return (0);
825fa9e4066Sahrens }
826fa9e4066Sahrens 
827fa9e4066Sahrens int
828fa9e4066Sahrens spa_open(const char *name, spa_t **spapp, void *tag)
829fa9e4066Sahrens {
830fa9e4066Sahrens 	return (spa_open_common(name, spapp, tag, NULL));
831fa9e4066Sahrens }
832fa9e4066Sahrens 
833ea8dc4b6Seschrock /*
834ea8dc4b6Seschrock  * Lookup the given spa_t, incrementing the inject count in the process,
835ea8dc4b6Seschrock  * preventing it from being exported or destroyed.
836ea8dc4b6Seschrock  */
837ea8dc4b6Seschrock spa_t *
838ea8dc4b6Seschrock spa_inject_addref(char *name)
839ea8dc4b6Seschrock {
840ea8dc4b6Seschrock 	spa_t *spa;
841ea8dc4b6Seschrock 
842ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
843ea8dc4b6Seschrock 	if ((spa = spa_lookup(name)) == NULL) {
844ea8dc4b6Seschrock 		mutex_exit(&spa_namespace_lock);
845ea8dc4b6Seschrock 		return (NULL);
846ea8dc4b6Seschrock 	}
847ea8dc4b6Seschrock 	spa->spa_inject_ref++;
848ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
849ea8dc4b6Seschrock 
850ea8dc4b6Seschrock 	return (spa);
851ea8dc4b6Seschrock }
852ea8dc4b6Seschrock 
853ea8dc4b6Seschrock void
854ea8dc4b6Seschrock spa_inject_delref(spa_t *spa)
855ea8dc4b6Seschrock {
856ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
857ea8dc4b6Seschrock 	spa->spa_inject_ref--;
858ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
859ea8dc4b6Seschrock }
860ea8dc4b6Seschrock 
86199653d4eSeschrock static void
86299653d4eSeschrock spa_add_spares(spa_t *spa, nvlist_t *config)
86399653d4eSeschrock {
86499653d4eSeschrock 	nvlist_t **spares;
86599653d4eSeschrock 	uint_t i, nspares;
86699653d4eSeschrock 	nvlist_t *nvroot;
86799653d4eSeschrock 	uint64_t guid;
86899653d4eSeschrock 	vdev_stat_t *vs;
86999653d4eSeschrock 	uint_t vsc;
87099653d4eSeschrock 
87199653d4eSeschrock 	if (spa->spa_nspares == 0)
87299653d4eSeschrock 		return;
87399653d4eSeschrock 
87499653d4eSeschrock 	VERIFY(nvlist_lookup_nvlist(config,
87599653d4eSeschrock 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
87699653d4eSeschrock 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist,
87799653d4eSeschrock 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
87899653d4eSeschrock 	if (nspares != 0) {
87999653d4eSeschrock 		VERIFY(nvlist_add_nvlist_array(nvroot,
88099653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
88199653d4eSeschrock 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
88299653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
88399653d4eSeschrock 
88499653d4eSeschrock 		/*
88599653d4eSeschrock 		 * Go through and find any spares which have since been
88699653d4eSeschrock 		 * repurposed as an active spare.  If this is the case, update
88799653d4eSeschrock 		 * their status appropriately.
88899653d4eSeschrock 		 */
88999653d4eSeschrock 		for (i = 0; i < nspares; i++) {
89099653d4eSeschrock 			VERIFY(nvlist_lookup_uint64(spares[i],
89199653d4eSeschrock 			    ZPOOL_CONFIG_GUID, &guid) == 0);
89299653d4eSeschrock 			if (spa_spare_inuse(guid)) {
89399653d4eSeschrock 				VERIFY(nvlist_lookup_uint64_array(
89499653d4eSeschrock 				    spares[i], ZPOOL_CONFIG_STATS,
89599653d4eSeschrock 				    (uint64_t **)&vs, &vsc) == 0);
89699653d4eSeschrock 				vs->vs_state = VDEV_STATE_CANT_OPEN;
89799653d4eSeschrock 				vs->vs_aux = VDEV_AUX_SPARED;
89899653d4eSeschrock 			}
89999653d4eSeschrock 		}
90099653d4eSeschrock 	}
90199653d4eSeschrock }
90299653d4eSeschrock 
903fa9e4066Sahrens int
904ea8dc4b6Seschrock spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
905fa9e4066Sahrens {
906fa9e4066Sahrens 	int error;
907fa9e4066Sahrens 	spa_t *spa;
908fa9e4066Sahrens 
909fa9e4066Sahrens 	*config = NULL;
910fa9e4066Sahrens 	error = spa_open_common(name, &spa, FTAG, config);
911fa9e4066Sahrens 
91299653d4eSeschrock 	if (spa && *config != NULL) {
913ea8dc4b6Seschrock 		VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT,
914ea8dc4b6Seschrock 		    spa_get_errlog_size(spa)) == 0);
915ea8dc4b6Seschrock 
91699653d4eSeschrock 		spa_add_spares(spa, *config);
91799653d4eSeschrock 	}
91899653d4eSeschrock 
919ea8dc4b6Seschrock 	/*
920ea8dc4b6Seschrock 	 * We want to get the alternate root even for faulted pools, so we cheat
921ea8dc4b6Seschrock 	 * and call spa_lookup() directly.
922ea8dc4b6Seschrock 	 */
923ea8dc4b6Seschrock 	if (altroot) {
924ea8dc4b6Seschrock 		if (spa == NULL) {
925ea8dc4b6Seschrock 			mutex_enter(&spa_namespace_lock);
926ea8dc4b6Seschrock 			spa = spa_lookup(name);
927ea8dc4b6Seschrock 			if (spa)
928ea8dc4b6Seschrock 				spa_altroot(spa, altroot, buflen);
929ea8dc4b6Seschrock 			else
930ea8dc4b6Seschrock 				altroot[0] = '\0';
931ea8dc4b6Seschrock 			spa = NULL;
932ea8dc4b6Seschrock 			mutex_exit(&spa_namespace_lock);
933ea8dc4b6Seschrock 		} else {
934ea8dc4b6Seschrock 			spa_altroot(spa, altroot, buflen);
935ea8dc4b6Seschrock 		}
936ea8dc4b6Seschrock 	}
937ea8dc4b6Seschrock 
938fa9e4066Sahrens 	if (spa != NULL)
939fa9e4066Sahrens 		spa_close(spa, FTAG);
940fa9e4066Sahrens 
941fa9e4066Sahrens 	return (error);
942fa9e4066Sahrens }
943fa9e4066Sahrens 
94499653d4eSeschrock /*
94599653d4eSeschrock  * Validate that the 'spares' array is well formed.  We must have an array of
94699653d4eSeschrock  * nvlists, each which describes a valid leaf vdev.
94799653d4eSeschrock  */
94899653d4eSeschrock static int
94999653d4eSeschrock spa_validate_spares(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
95099653d4eSeschrock {
95199653d4eSeschrock 	nvlist_t **spares;
95299653d4eSeschrock 	uint_t i, nspares;
95399653d4eSeschrock 	vdev_t *vd;
95499653d4eSeschrock 	int error;
95599653d4eSeschrock 
95699653d4eSeschrock 	/*
95799653d4eSeschrock 	 * It's acceptable to have no spares specified.
95899653d4eSeschrock 	 */
95999653d4eSeschrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
96099653d4eSeschrock 	    &spares, &nspares) != 0)
96199653d4eSeschrock 		return (0);
96299653d4eSeschrock 
96399653d4eSeschrock 	if (nspares == 0)
96499653d4eSeschrock 		return (EINVAL);
96599653d4eSeschrock 
96699653d4eSeschrock 	/*
96799653d4eSeschrock 	 * Make sure the pool is formatted with a version that supports hot
96899653d4eSeschrock 	 * spares.
96999653d4eSeschrock 	 */
97099653d4eSeschrock 	if (spa_version(spa) < ZFS_VERSION_SPARES)
97199653d4eSeschrock 		return (ENOTSUP);
97299653d4eSeschrock 
97399653d4eSeschrock 	for (i = 0; i < nspares; i++) {
97499653d4eSeschrock 		if ((error = spa_config_parse(spa, &vd, spares[i], NULL, 0,
97599653d4eSeschrock 		    mode)) != 0)
97699653d4eSeschrock 			return (error);
97799653d4eSeschrock 
97899653d4eSeschrock 		if (!vd->vdev_ops->vdev_op_leaf) {
97999653d4eSeschrock 			vdev_free(vd);
98099653d4eSeschrock 			return (EINVAL);
98199653d4eSeschrock 		}
98299653d4eSeschrock 
98399653d4eSeschrock 		if ((error = vdev_open(vd)) != 0) {
98499653d4eSeschrock 			vdev_free(vd);
98599653d4eSeschrock 			return (error);
98699653d4eSeschrock 		}
98799653d4eSeschrock 
98899653d4eSeschrock 		vd->vdev_top = vd;
98999653d4eSeschrock 		if ((error = vdev_label_spare(vd, crtxg)) != 0) {
99099653d4eSeschrock 			vdev_free(vd);
99199653d4eSeschrock 			return (error);
99299653d4eSeschrock 		}
99399653d4eSeschrock 
99499653d4eSeschrock 		VERIFY(nvlist_add_uint64(spares[i], ZPOOL_CONFIG_GUID,
99599653d4eSeschrock 		    vd->vdev_guid) == 0);
99699653d4eSeschrock 
99799653d4eSeschrock 		vdev_free(vd);
99899653d4eSeschrock 	}
99999653d4eSeschrock 
100099653d4eSeschrock 	return (0);
100199653d4eSeschrock }
100299653d4eSeschrock 
1003fa9e4066Sahrens /*
1004fa9e4066Sahrens  * Pool Creation
1005fa9e4066Sahrens  */
1006fa9e4066Sahrens int
10070373e76bSbonwick spa_create(const char *pool, nvlist_t *nvroot, const char *altroot)
1008fa9e4066Sahrens {
1009fa9e4066Sahrens 	spa_t *spa;
10100373e76bSbonwick 	vdev_t *rvd;
1011fa9e4066Sahrens 	dsl_pool_t *dp;
1012fa9e4066Sahrens 	dmu_tx_t *tx;
101399653d4eSeschrock 	int c, error = 0;
1014fa9e4066Sahrens 	uint64_t txg = TXG_INITIAL;
101599653d4eSeschrock 	nvlist_t **spares;
101699653d4eSeschrock 	uint_t nspares;
1017fa9e4066Sahrens 
1018fa9e4066Sahrens 	/*
1019fa9e4066Sahrens 	 * If this pool already exists, return failure.
1020fa9e4066Sahrens 	 */
1021fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
1022fa9e4066Sahrens 	if (spa_lookup(pool) != NULL) {
1023fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1024fa9e4066Sahrens 		return (EEXIST);
1025fa9e4066Sahrens 	}
1026fa9e4066Sahrens 
1027fa9e4066Sahrens 	/*
1028fa9e4066Sahrens 	 * Allocate a new spa_t structure.
1029fa9e4066Sahrens 	 */
10300373e76bSbonwick 	spa = spa_add(pool, altroot);
1031fa9e4066Sahrens 	spa_activate(spa);
1032fa9e4066Sahrens 
1033fa9e4066Sahrens 	spa->spa_uberblock.ub_txg = txg - 1;
1034eaca9bbdSeschrock 	spa->spa_uberblock.ub_version = ZFS_VERSION;
1035fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
1036fa9e4066Sahrens 
10370373e76bSbonwick 	/*
10380373e76bSbonwick 	 * Create the root vdev.
10390373e76bSbonwick 	 */
10400373e76bSbonwick 	spa_config_enter(spa, RW_WRITER, FTAG);
10410373e76bSbonwick 
104299653d4eSeschrock 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
10430373e76bSbonwick 
104499653d4eSeschrock 	ASSERT(error != 0 || rvd != NULL);
104599653d4eSeschrock 	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
10460373e76bSbonwick 
104799653d4eSeschrock 	if (error == 0 && rvd->vdev_children == 0)
10480373e76bSbonwick 		error = EINVAL;
104999653d4eSeschrock 
105099653d4eSeschrock 	if (error == 0 &&
105199653d4eSeschrock 	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
105299653d4eSeschrock 	    (error = spa_validate_spares(spa, nvroot, txg,
105399653d4eSeschrock 	    VDEV_ALLOC_ADD)) == 0) {
105499653d4eSeschrock 		for (c = 0; c < rvd->vdev_children; c++)
105599653d4eSeschrock 			vdev_init(rvd->vdev_child[c], txg);
105699653d4eSeschrock 		vdev_config_dirty(rvd);
10570373e76bSbonwick 	}
10580373e76bSbonwick 
10590373e76bSbonwick 	spa_config_exit(spa, FTAG);
1060fa9e4066Sahrens 
106199653d4eSeschrock 	if (error != 0) {
1062fa9e4066Sahrens 		spa_unload(spa);
1063fa9e4066Sahrens 		spa_deactivate(spa);
1064fa9e4066Sahrens 		spa_remove(spa);
1065fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1066fa9e4066Sahrens 		return (error);
1067fa9e4066Sahrens 	}
1068fa9e4066Sahrens 
106999653d4eSeschrock 	/*
107099653d4eSeschrock 	 * Get the list of spares, if specified.
107199653d4eSeschrock 	 */
107299653d4eSeschrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
107399653d4eSeschrock 	    &spares, &nspares) == 0) {
107499653d4eSeschrock 		VERIFY(nvlist_alloc(&spa->spa_sparelist, NV_UNIQUE_NAME,
107599653d4eSeschrock 		    KM_SLEEP) == 0);
107699653d4eSeschrock 		VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist,
107799653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
107899653d4eSeschrock 		spa_config_enter(spa, RW_WRITER, FTAG);
107999653d4eSeschrock 		spa_load_spares(spa);
108099653d4eSeschrock 		spa_config_exit(spa, FTAG);
108199653d4eSeschrock 		spa->spa_sync_spares = B_TRUE;
108299653d4eSeschrock 	}
108399653d4eSeschrock 
1084fa9e4066Sahrens 	spa->spa_dsl_pool = dp = dsl_pool_create(spa, txg);
1085fa9e4066Sahrens 	spa->spa_meta_objset = dp->dp_meta_objset;
1086fa9e4066Sahrens 
1087fa9e4066Sahrens 	tx = dmu_tx_create_assigned(dp, txg);
1088fa9e4066Sahrens 
1089fa9e4066Sahrens 	/*
1090fa9e4066Sahrens 	 * Create the pool config object.
1091fa9e4066Sahrens 	 */
1092fa9e4066Sahrens 	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
1093fa9e4066Sahrens 	    DMU_OT_PACKED_NVLIST, 1 << 14,
1094fa9e4066Sahrens 	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
1095fa9e4066Sahrens 
1096ea8dc4b6Seschrock 	if (zap_add(spa->spa_meta_objset,
1097fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1098ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
1099ea8dc4b6Seschrock 		cmn_err(CE_PANIC, "failed to add pool config");
1100ea8dc4b6Seschrock 	}
1101fa9e4066Sahrens 
110299653d4eSeschrock 	/* Newly created pools are always deflated. */
110399653d4eSeschrock 	spa->spa_deflate = TRUE;
110499653d4eSeschrock 	if (zap_add(spa->spa_meta_objset,
110599653d4eSeschrock 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
110699653d4eSeschrock 	    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
110799653d4eSeschrock 		cmn_err(CE_PANIC, "failed to add deflate");
110899653d4eSeschrock 	}
110999653d4eSeschrock 
1110fa9e4066Sahrens 	/*
1111fa9e4066Sahrens 	 * Create the deferred-free bplist object.  Turn off compression
1112fa9e4066Sahrens 	 * because sync-to-convergence takes longer if the blocksize
1113fa9e4066Sahrens 	 * keeps changing.
1114fa9e4066Sahrens 	 */
1115fa9e4066Sahrens 	spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset,
1116fa9e4066Sahrens 	    1 << 14, tx);
1117fa9e4066Sahrens 	dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj,
1118fa9e4066Sahrens 	    ZIO_COMPRESS_OFF, tx);
1119fa9e4066Sahrens 
1120ea8dc4b6Seschrock 	if (zap_add(spa->spa_meta_objset,
1121fa9e4066Sahrens 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1122ea8dc4b6Seschrock 	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) {
1123ea8dc4b6Seschrock 		cmn_err(CE_PANIC, "failed to add bplist");
1124ea8dc4b6Seschrock 	}
1125fa9e4066Sahrens 
112606eeb2adSek 	/*
112706eeb2adSek 	 * Create the pool's history object.
112806eeb2adSek 	 */
112906eeb2adSek 	spa_history_create_obj(spa, tx);
113006eeb2adSek 
1131fa9e4066Sahrens 	dmu_tx_commit(tx);
1132fa9e4066Sahrens 
1133fa9e4066Sahrens 	spa->spa_sync_on = B_TRUE;
1134fa9e4066Sahrens 	txg_sync_start(spa->spa_dsl_pool);
1135fa9e4066Sahrens 
1136fa9e4066Sahrens 	/*
1137fa9e4066Sahrens 	 * We explicitly wait for the first transaction to complete so that our
1138fa9e4066Sahrens 	 * bean counters are appropriately updated.
1139fa9e4066Sahrens 	 */
1140fa9e4066Sahrens 	txg_wait_synced(spa->spa_dsl_pool, txg);
1141fa9e4066Sahrens 
1142fa9e4066Sahrens 	spa_config_sync();
1143fa9e4066Sahrens 
1144fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
1145fa9e4066Sahrens 
1146fa9e4066Sahrens 	return (0);
1147fa9e4066Sahrens }
1148fa9e4066Sahrens 
1149fa9e4066Sahrens /*
1150fa9e4066Sahrens  * Import the given pool into the system.  We set up the necessary spa_t and
1151fa9e4066Sahrens  * then call spa_load() to do the dirty work.
1152fa9e4066Sahrens  */
1153fa9e4066Sahrens int
11540373e76bSbonwick spa_import(const char *pool, nvlist_t *config, const char *altroot)
1155fa9e4066Sahrens {
1156fa9e4066Sahrens 	spa_t *spa;
1157fa9e4066Sahrens 	int error;
115899653d4eSeschrock 	nvlist_t *nvroot;
115999653d4eSeschrock 	nvlist_t **spares;
116099653d4eSeschrock 	uint_t nspares;
1161fa9e4066Sahrens 
1162fa9e4066Sahrens 	if (!(spa_mode & FWRITE))
1163fa9e4066Sahrens 		return (EROFS);
1164fa9e4066Sahrens 
1165fa9e4066Sahrens 	/*
1166fa9e4066Sahrens 	 * If a pool with this name exists, return failure.
1167fa9e4066Sahrens 	 */
1168fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
1169fa9e4066Sahrens 	if (spa_lookup(pool) != NULL) {
1170fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1171fa9e4066Sahrens 		return (EEXIST);
1172fa9e4066Sahrens 	}
1173fa9e4066Sahrens 
1174fa9e4066Sahrens 	/*
11750373e76bSbonwick 	 * Create and initialize the spa structure.
1176fa9e4066Sahrens 	 */
11770373e76bSbonwick 	spa = spa_add(pool, altroot);
1178fa9e4066Sahrens 	spa_activate(spa);
1179fa9e4066Sahrens 
11805dabedeeSbonwick 	/*
11810373e76bSbonwick 	 * Pass off the heavy lifting to spa_load().
1182ecc2d604Sbonwick 	 * Pass TRUE for mosconfig because the user-supplied config
1183ecc2d604Sbonwick 	 * is actually the one to trust when doing an import.
11845dabedeeSbonwick 	 */
1185ecc2d604Sbonwick 	error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE);
1186fa9e4066Sahrens 
118799653d4eSeschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
118899653d4eSeschrock 	/*
118999653d4eSeschrock 	 * Toss any existing sparelist, as it doesn't have any validity anymore,
119099653d4eSeschrock 	 * and conflicts with spa_has_spare().
119199653d4eSeschrock 	 */
119299653d4eSeschrock 	if (spa->spa_sparelist) {
119399653d4eSeschrock 		nvlist_free(spa->spa_sparelist);
119499653d4eSeschrock 		spa->spa_sparelist = NULL;
119599653d4eSeschrock 		spa_load_spares(spa);
119699653d4eSeschrock 	}
119799653d4eSeschrock 
119899653d4eSeschrock 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
119999653d4eSeschrock 	    &nvroot) == 0);
120099653d4eSeschrock 	if (error == 0)
120199653d4eSeschrock 		error = spa_validate_spares(spa, nvroot, -1ULL,
120299653d4eSeschrock 		    VDEV_ALLOC_SPARE);
120399653d4eSeschrock 	spa_config_exit(spa, FTAG);
120499653d4eSeschrock 
120599653d4eSeschrock 	if (error != 0) {
1206fa9e4066Sahrens 		spa_unload(spa);
1207fa9e4066Sahrens 		spa_deactivate(spa);
1208fa9e4066Sahrens 		spa_remove(spa);
1209fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1210fa9e4066Sahrens 		return (error);
1211fa9e4066Sahrens 	}
1212fa9e4066Sahrens 
121399653d4eSeschrock 	/*
121499653d4eSeschrock 	 * Override any spares as specified by the user, as these may have
121599653d4eSeschrock 	 * correct device names/devids, etc.
121699653d4eSeschrock 	 */
121799653d4eSeschrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
121899653d4eSeschrock 	    &spares, &nspares) == 0) {
121999653d4eSeschrock 		if (spa->spa_sparelist)
122099653d4eSeschrock 			VERIFY(nvlist_remove(spa->spa_sparelist,
122199653d4eSeschrock 			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
122299653d4eSeschrock 		else
122399653d4eSeschrock 			VERIFY(nvlist_alloc(&spa->spa_sparelist,
122499653d4eSeschrock 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
122599653d4eSeschrock 		VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist,
122699653d4eSeschrock 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
122799653d4eSeschrock 		spa_config_enter(spa, RW_WRITER, FTAG);
122899653d4eSeschrock 		spa_load_spares(spa);
122999653d4eSeschrock 		spa_config_exit(spa, FTAG);
123099653d4eSeschrock 		spa->spa_sync_spares = B_TRUE;
123199653d4eSeschrock 	}
123299653d4eSeschrock 
12330373e76bSbonwick 	/*
12340373e76bSbonwick 	 * Update the config cache to include the newly-imported pool.
12350373e76bSbonwick 	 */
12360373e76bSbonwick 	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
12370373e76bSbonwick 
1238fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
1239fa9e4066Sahrens 
1240fa9e4066Sahrens 	/*
1241fa9e4066Sahrens 	 * Resilver anything that's out of date.
1242fa9e4066Sahrens 	 */
1243fa9e4066Sahrens 	if (spa_mode & FWRITE)
1244fa9e4066Sahrens 		VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
1245fa9e4066Sahrens 
1246fa9e4066Sahrens 	return (0);
1247fa9e4066Sahrens }
1248fa9e4066Sahrens 
1249fa9e4066Sahrens /*
1250fa9e4066Sahrens  * This (illegal) pool name is used when temporarily importing a spa_t in order
1251fa9e4066Sahrens  * to get the vdev stats associated with the imported devices.
1252fa9e4066Sahrens  */
1253fa9e4066Sahrens #define	TRYIMPORT_NAME	"$import"
1254fa9e4066Sahrens 
1255fa9e4066Sahrens nvlist_t *
1256fa9e4066Sahrens spa_tryimport(nvlist_t *tryconfig)
1257fa9e4066Sahrens {
1258fa9e4066Sahrens 	nvlist_t *config = NULL;
1259fa9e4066Sahrens 	char *poolname;
1260fa9e4066Sahrens 	spa_t *spa;
1261fa9e4066Sahrens 	uint64_t state;
1262fa9e4066Sahrens 
1263fa9e4066Sahrens 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
1264fa9e4066Sahrens 		return (NULL);
1265fa9e4066Sahrens 
1266fa9e4066Sahrens 	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
1267fa9e4066Sahrens 		return (NULL);
1268fa9e4066Sahrens 
1269fa9e4066Sahrens 	/*
12700373e76bSbonwick 	 * Create and initialize the spa structure.
1271fa9e4066Sahrens 	 */
12720373e76bSbonwick 	mutex_enter(&spa_namespace_lock);
12730373e76bSbonwick 	spa = spa_add(TRYIMPORT_NAME, NULL);
1274fa9e4066Sahrens 	spa_activate(spa);
1275fa9e4066Sahrens 
1276fa9e4066Sahrens 	/*
12770373e76bSbonwick 	 * Pass off the heavy lifting to spa_load().
1278ecc2d604Sbonwick 	 * Pass TRUE for mosconfig because the user-supplied config
1279ecc2d604Sbonwick 	 * is actually the one to trust when doing an import.
1280fa9e4066Sahrens 	 */
1281ecc2d604Sbonwick 	(void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
1282fa9e4066Sahrens 
1283fa9e4066Sahrens 	/*
1284fa9e4066Sahrens 	 * If 'tryconfig' was at least parsable, return the current config.
1285fa9e4066Sahrens 	 */
1286fa9e4066Sahrens 	if (spa->spa_root_vdev != NULL) {
12870373e76bSbonwick 		spa_config_enter(spa, RW_READER, FTAG);
1288fa9e4066Sahrens 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
12890373e76bSbonwick 		spa_config_exit(spa, FTAG);
1290fa9e4066Sahrens 		VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
1291fa9e4066Sahrens 		    poolname) == 0);
1292fa9e4066Sahrens 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1293fa9e4066Sahrens 		    state) == 0);
129499653d4eSeschrock 
129599653d4eSeschrock 		/*
129699653d4eSeschrock 		 * Add the list of hot spares.
129799653d4eSeschrock 		 */
129899653d4eSeschrock 		spa_add_spares(spa, config);
1299fa9e4066Sahrens 	}
1300fa9e4066Sahrens 
1301fa9e4066Sahrens 	spa_unload(spa);
1302fa9e4066Sahrens 	spa_deactivate(spa);
1303fa9e4066Sahrens 	spa_remove(spa);
1304fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
1305fa9e4066Sahrens 
1306fa9e4066Sahrens 	return (config);
1307fa9e4066Sahrens }
1308fa9e4066Sahrens 
1309fa9e4066Sahrens /*
1310fa9e4066Sahrens  * Pool export/destroy
1311fa9e4066Sahrens  *
1312fa9e4066Sahrens  * The act of destroying or exporting a pool is very simple.  We make sure there
1313fa9e4066Sahrens  * is no more pending I/O and any references to the pool are gone.  Then, we
1314fa9e4066Sahrens  * update the pool state and sync all the labels to disk, removing the
1315fa9e4066Sahrens  * configuration from the cache afterwards.
1316fa9e4066Sahrens  */
1317fa9e4066Sahrens static int
131844cd46caSbillm spa_export_common(char *pool, int new_state, nvlist_t **oldconfig)
1319fa9e4066Sahrens {
1320fa9e4066Sahrens 	spa_t *spa;
1321fa9e4066Sahrens 
132244cd46caSbillm 	if (oldconfig)
132344cd46caSbillm 		*oldconfig = NULL;
132444cd46caSbillm 
1325fa9e4066Sahrens 	if (!(spa_mode & FWRITE))
1326fa9e4066Sahrens 		return (EROFS);
1327fa9e4066Sahrens 
1328fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
1329fa9e4066Sahrens 	if ((spa = spa_lookup(pool)) == NULL) {
1330fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
1331fa9e4066Sahrens 		return (ENOENT);
1332fa9e4066Sahrens 	}
1333fa9e4066Sahrens 
1334ea8dc4b6Seschrock 	/*
1335ea8dc4b6Seschrock 	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
1336ea8dc4b6Seschrock 	 * reacquire the namespace lock, and see if we can export.
1337ea8dc4b6Seschrock 	 */
1338ea8dc4b6Seschrock 	spa_open_ref(spa, FTAG);
1339ea8dc4b6Seschrock 	mutex_exit(&spa_namespace_lock);
1340ea8dc4b6Seschrock 	spa_async_suspend(spa);
1341ea8dc4b6Seschrock 	mutex_enter(&spa_namespace_lock);
1342ea8dc4b6Seschrock 	spa_close(spa, FTAG);
1343ea8dc4b6Seschrock 
1344fa9e4066Sahrens 	/*
1345fa9e4066Sahrens 	 * The pool will be in core if it's openable,
1346fa9e4066Sahrens 	 * in which case we can modify its state.
1347fa9e4066Sahrens 	 */
1348fa9e4066Sahrens 	if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
1349fa9e4066Sahrens 		/*
1350fa9e4066Sahrens 		 * Objsets may be open only because they're dirty, so we
1351fa9e4066Sahrens 		 * have to force it to sync before checking spa_refcnt.
1352fa9e4066Sahrens 		 */
1353fa9e4066Sahrens 		spa_scrub_suspend(spa);
1354fa9e4066Sahrens 		txg_wait_synced(spa->spa_dsl_pool, 0);
1355fa9e4066Sahrens 
1356ea8dc4b6Seschrock 		/*
1357ea8dc4b6Seschrock 		 * A pool cannot be exported or destroyed if there are active
1358ea8dc4b6Seschrock 		 * references.  If we are resetting a pool, allow references by
1359ea8dc4b6Seschrock 		 * fault injection handlers.
1360ea8dc4b6Seschrock 		 */
1361ea8dc4b6Seschrock 		if (!spa_refcount_zero(spa) ||
1362ea8dc4b6Seschrock 		    (spa->spa_inject_ref != 0 &&
1363ea8dc4b6Seschrock 		    new_state != POOL_STATE_UNINITIALIZED)) {
1364fa9e4066Sahrens 			spa_scrub_resume(spa);
1365ea8dc4b6Seschrock 			spa_async_resume(spa);
1366fa9e4066Sahrens 			mutex_exit(&spa_namespace_lock);
1367fa9e4066Sahrens 			return (EBUSY);
1368fa9e4066Sahrens 		}
1369fa9e4066Sahrens 
1370fa9e4066Sahrens 		spa_scrub_resume(spa);
1371fa9e4066Sahrens 		VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0);
1372fa9e4066Sahrens 
1373fa9e4066Sahrens 		/*
1374fa9e4066Sahrens 		 * We want this to be reflected on every label,
1375fa9e4066Sahrens 		 * so mark them all dirty.  spa_unload() will do the
1376fa9e4066Sahrens 		 * final sync that pushes these changes out.
1377fa9e4066Sahrens 		 */
1378ea8dc4b6Seschrock 		if (new_state != POOL_STATE_UNINITIALIZED) {
13795dabedeeSbonwick 			spa_config_enter(spa, RW_WRITER, FTAG);
1380ea8dc4b6Seschrock 			spa->spa_state = new_state;
13810373e76bSbonwick 			spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
1382ea8dc4b6Seschrock 			vdev_config_dirty(spa->spa_root_vdev);
13835dabedeeSbonwick 			spa_config_exit(spa, FTAG);
1384ea8dc4b6Seschrock 		}
1385fa9e4066Sahrens 	}
1386fa9e4066Sahrens 
1387fa9e4066Sahrens 	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
1388fa9e4066Sahrens 		spa_unload(spa);
1389fa9e4066Sahrens 		spa_deactivate(spa);
1390fa9e4066Sahrens 	}
1391fa9e4066Sahrens 
139244cd46caSbillm 	if (oldconfig && spa->spa_config)
139344cd46caSbillm 		VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
139444cd46caSbillm 
1395ea8dc4b6Seschrock 	if (new_state != POOL_STATE_UNINITIALIZED) {
1396ea8dc4b6Seschrock 		spa_remove(spa);
1397ea8dc4b6Seschrock 		spa_config_sync();
1398ea8dc4b6Seschrock 	}
1399fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
1400fa9e4066Sahrens 
1401fa9e4066Sahrens 	return (0);
1402fa9e4066Sahrens }
1403fa9e4066Sahrens 
1404fa9e4066Sahrens /*
1405fa9e4066Sahrens  * Destroy a storage pool.
1406fa9e4066Sahrens  */
1407fa9e4066Sahrens int
1408fa9e4066Sahrens spa_destroy(char *pool)
1409fa9e4066Sahrens {
141044cd46caSbillm 	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL));
1411fa9e4066Sahrens }
1412fa9e4066Sahrens 
1413fa9e4066Sahrens /*
1414fa9e4066Sahrens  * Export a storage pool.
1415fa9e4066Sahrens  */
1416fa9e4066Sahrens int
141744cd46caSbillm spa_export(char *pool, nvlist_t **oldconfig)
1418fa9e4066Sahrens {
141944cd46caSbillm 	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig));
1420fa9e4066Sahrens }
1421fa9e4066Sahrens 
1422ea8dc4b6Seschrock /*
1423ea8dc4b6Seschrock  * Similar to spa_export(), this unloads the spa_t without actually removing it
1424ea8dc4b6Seschrock  * from the namespace in any way.
1425ea8dc4b6Seschrock  */
1426ea8dc4b6Seschrock int
1427ea8dc4b6Seschrock spa_reset(char *pool)
1428ea8dc4b6Seschrock {
142944cd46caSbillm 	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL));
1430ea8dc4b6Seschrock }
1431ea8dc4b6Seschrock 
1432ea8dc4b6Seschrock 
1433fa9e4066Sahrens /*
1434fa9e4066Sahrens  * ==========================================================================
1435fa9e4066Sahrens  * Device manipulation
1436fa9e4066Sahrens  * ==========================================================================
1437fa9e4066Sahrens  */
1438fa9e4066Sahrens 
1439fa9e4066Sahrens /*
1440fa9e4066Sahrens  * Add capacity to a storage pool.
1441fa9e4066Sahrens  */
1442fa9e4066Sahrens int
1443fa9e4066Sahrens spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
1444fa9e4066Sahrens {
1445fa9e4066Sahrens 	uint64_t txg;
14460373e76bSbonwick 	int c, error;
1447fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
14480e34b6a7Sbonwick 	vdev_t *vd, *tvd;
144999653d4eSeschrock 	nvlist_t **spares;
145099653d4eSeschrock 	uint_t i, nspares;
1451fa9e4066Sahrens 
1452fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
1453fa9e4066Sahrens 
145499653d4eSeschrock 	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
145599653d4eSeschrock 	    VDEV_ALLOC_ADD)) != 0)
145699653d4eSeschrock 		return (spa_vdev_exit(spa, NULL, txg, error));
1457fa9e4066Sahrens 
145899653d4eSeschrock 	if ((error = spa_validate_spares(spa, nvroot, txg,
145999653d4eSeschrock 	    VDEV_ALLOC_ADD)) != 0)
146099653d4eSeschrock 		return (spa_vdev_exit(spa, vd, txg, error));
146199653d4eSeschrock 
146299653d4eSeschrock 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
146399653d4eSeschrock 	    &spares, &nspares) != 0)
146499653d4eSeschrock 		nspares = 0;
146599653d4eSeschrock 
146699653d4eSeschrock 	if (vd->vdev_children == 0 && nspares == 0)
1467fa9e4066Sahrens 		return (spa_vdev_exit(spa, vd, txg, EINVAL));
1468fa9e4066Sahrens 
146999653d4eSeschrock 	if (vd->vdev_children != 0) {
147099653d4eSeschrock 		if ((error = vdev_create(vd, txg, B_FALSE)) != 0)
147199653d4eSeschrock 			return (spa_vdev_exit(spa, vd, txg, error));
1472fa9e4066Sahrens 
147399653d4eSeschrock 		/*
147499653d4eSeschrock 		 * Transfer each new top-level vdev from vd to rvd.
147599653d4eSeschrock 		 */
147699653d4eSeschrock 		for (c = 0; c < vd->vdev_children; c++) {
147799653d4eSeschrock 			tvd = vd->vdev_child[c];
147899653d4eSeschrock 			vdev_remove_child(vd, tvd);
147999653d4eSeschrock 			tvd->vdev_id = rvd->vdev_children;
148099653d4eSeschrock 			vdev_add_child(rvd, tvd);
148199653d4eSeschrock 			vdev_config_dirty(tvd);
148299653d4eSeschrock 		}
148399653d4eSeschrock 	}
148499653d4eSeschrock 
148599653d4eSeschrock 	if (nspares != 0) {
148699653d4eSeschrock 		if (spa->spa_sparelist != NULL) {
148799653d4eSeschrock 			nvlist_t **oldspares;
148899653d4eSeschrock 			uint_t oldnspares;
148999653d4eSeschrock 			nvlist_t **newspares;
149099653d4eSeschrock 
149199653d4eSeschrock 			VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist,
149299653d4eSeschrock 			    ZPOOL_CONFIG_SPARES, &oldspares, &oldnspares) == 0);
149399653d4eSeschrock 
149499653d4eSeschrock 			newspares = kmem_alloc(sizeof (void *) *
149599653d4eSeschrock 			    (nspares + oldnspares), KM_SLEEP);
149699653d4eSeschrock 			for (i = 0; i < oldnspares; i++)
149799653d4eSeschrock 				VERIFY(nvlist_dup(oldspares[i],
149899653d4eSeschrock 				    &newspares[i], KM_SLEEP) == 0);
149999653d4eSeschrock 			for (i = 0; i < nspares; i++)
150099653d4eSeschrock 				VERIFY(nvlist_dup(spares[i],
150199653d4eSeschrock 				    &newspares[i + oldnspares],
150299653d4eSeschrock 				    KM_SLEEP) == 0);
150399653d4eSeschrock 
150499653d4eSeschrock 			VERIFY(nvlist_remove(spa->spa_sparelist,
150599653d4eSeschrock 			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
150699653d4eSeschrock 
150799653d4eSeschrock 			VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist,
150899653d4eSeschrock 			    ZPOOL_CONFIG_SPARES, newspares,
150999653d4eSeschrock 			    nspares + oldnspares) == 0);
151099653d4eSeschrock 			for (i = 0; i < oldnspares + nspares; i++)
151199653d4eSeschrock 				nvlist_free(newspares[i]);
151299653d4eSeschrock 			kmem_free(newspares, (oldnspares + nspares) *
151399653d4eSeschrock 			    sizeof (void *));
151499653d4eSeschrock 		} else {
151599653d4eSeschrock 			VERIFY(nvlist_alloc(&spa->spa_sparelist,
151699653d4eSeschrock 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
151799653d4eSeschrock 			VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist,
151899653d4eSeschrock 			    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
151999653d4eSeschrock 		}
152099653d4eSeschrock 
152199653d4eSeschrock 		spa_load_spares(spa);
152299653d4eSeschrock 		spa->spa_sync_spares = B_TRUE;
1523fa9e4066Sahrens 	}
1524fa9e4066Sahrens 
1525fa9e4066Sahrens 	/*
15260e34b6a7Sbonwick 	 * We have to be careful when adding new vdevs to an existing pool.
15270e34b6a7Sbonwick 	 * If other threads start allocating from these vdevs before we
15280e34b6a7Sbonwick 	 * sync the config cache, and we lose power, then upon reboot we may
15290e34b6a7Sbonwick 	 * fail to open the pool because there are DVAs that the config cache
15300e34b6a7Sbonwick 	 * can't translate.  Therefore, we first add the vdevs without
15310e34b6a7Sbonwick 	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
15320373e76bSbonwick 	 * and then let spa_config_update() initialize the new metaslabs.
15330e34b6a7Sbonwick 	 *
15340e34b6a7Sbonwick 	 * spa_load() checks for added-but-not-initialized vdevs, so that
15350e34b6a7Sbonwick 	 * if we lose power at any point in this sequence, the remaining
15360e34b6a7Sbonwick 	 * steps will be completed the next time we load the pool.
15370e34b6a7Sbonwick 	 */
15380373e76bSbonwick 	(void) spa_vdev_exit(spa, vd, txg, 0);
15390e34b6a7Sbonwick 
15400373e76bSbonwick 	mutex_enter(&spa_namespace_lock);
15410373e76bSbonwick 	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
15420373e76bSbonwick 	mutex_exit(&spa_namespace_lock);
1543fa9e4066Sahrens 
15440373e76bSbonwick 	return (0);
1545fa9e4066Sahrens }
1546fa9e4066Sahrens 
1547fa9e4066Sahrens /*
1548fa9e4066Sahrens  * Attach a device to a mirror.  The arguments are the path to any device
1549fa9e4066Sahrens  * in the mirror, and the nvroot for the new device.  If the path specifies
1550fa9e4066Sahrens  * a device that is not mirrored, we automatically insert the mirror vdev.
1551fa9e4066Sahrens  *
1552fa9e4066Sahrens  * If 'replacing' is specified, the new device is intended to replace the
1553fa9e4066Sahrens  * existing device; in this case the two devices are made into their own
1554fa9e4066Sahrens  * mirror using the 'replacing' vdev, which is functionally idendical to
1555fa9e4066Sahrens  * the mirror vdev (it actually reuses all the same ops) but has a few
1556fa9e4066Sahrens  * extra rules: you can't attach to it after it's been created, and upon
1557fa9e4066Sahrens  * completion of resilvering, the first disk (the one being replaced)
1558fa9e4066Sahrens  * is automatically detached.
1559fa9e4066Sahrens  */
1560fa9e4066Sahrens int
1561ea8dc4b6Seschrock spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
1562fa9e4066Sahrens {
1563fa9e4066Sahrens 	uint64_t txg, open_txg;
1564fa9e4066Sahrens 	int error;
1565fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
1566fa9e4066Sahrens 	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
156799653d4eSeschrock 	vdev_ops_t *pvops;
1568fa9e4066Sahrens 
1569fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
1570fa9e4066Sahrens 
1571ea8dc4b6Seschrock 	oldvd = vdev_lookup_by_guid(rvd, guid);
1572fa9e4066Sahrens 
1573fa9e4066Sahrens 	if (oldvd == NULL)
1574fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
1575fa9e4066Sahrens 
15760e34b6a7Sbonwick 	if (!oldvd->vdev_ops->vdev_op_leaf)
15770e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
15780e34b6a7Sbonwick 
1579fa9e4066Sahrens 	pvd = oldvd->vdev_parent;
1580fa9e4066Sahrens 
158199653d4eSeschrock 	if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
158299653d4eSeschrock 	    VDEV_ALLOC_ADD)) != 0 || newrootvd->vdev_children != 1)
1583fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
1584fa9e4066Sahrens 
1585fa9e4066Sahrens 	newvd = newrootvd->vdev_child[0];
1586fa9e4066Sahrens 
1587fa9e4066Sahrens 	if (!newvd->vdev_ops->vdev_op_leaf)
1588fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
1589fa9e4066Sahrens 
159099653d4eSeschrock 	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
1591fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, error));
1592fa9e4066Sahrens 
159399653d4eSeschrock 	if (!replacing) {
159499653d4eSeschrock 		/*
159599653d4eSeschrock 		 * For attach, the only allowable parent is a mirror or the root
159699653d4eSeschrock 		 * vdev.
159799653d4eSeschrock 		 */
159899653d4eSeschrock 		if (pvd->vdev_ops != &vdev_mirror_ops &&
159999653d4eSeschrock 		    pvd->vdev_ops != &vdev_root_ops)
160099653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
160199653d4eSeschrock 
160299653d4eSeschrock 		pvops = &vdev_mirror_ops;
160399653d4eSeschrock 	} else {
160499653d4eSeschrock 		/*
160599653d4eSeschrock 		 * Active hot spares can only be replaced by inactive hot
160699653d4eSeschrock 		 * spares.
160799653d4eSeschrock 		 */
160899653d4eSeschrock 		if (pvd->vdev_ops == &vdev_spare_ops &&
160999653d4eSeschrock 		    pvd->vdev_child[1] == oldvd &&
161099653d4eSeschrock 		    !spa_has_spare(spa, newvd->vdev_guid))
161199653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
161299653d4eSeschrock 
161399653d4eSeschrock 		/*
161499653d4eSeschrock 		 * If the source is a hot spare, and the parent isn't already a
161599653d4eSeschrock 		 * spare, then we want to create a new hot spare.  Otherwise, we
161699653d4eSeschrock 		 * want to create a replacing vdev.
161799653d4eSeschrock 		 */
161899653d4eSeschrock 		if (pvd->vdev_ops == &vdev_replacing_ops)
161999653d4eSeschrock 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
162099653d4eSeschrock 		else if (pvd->vdev_ops != &vdev_spare_ops &&
162199653d4eSeschrock 		    newvd->vdev_isspare)
162299653d4eSeschrock 			pvops = &vdev_spare_ops;
162399653d4eSeschrock 		else
162499653d4eSeschrock 			pvops = &vdev_replacing_ops;
162599653d4eSeschrock 	}
162699653d4eSeschrock 
16272a79c5feSlling 	/*
16282a79c5feSlling 	 * Compare the new device size with the replaceable/attachable
16292a79c5feSlling 	 * device size.
16302a79c5feSlling 	 */
16312a79c5feSlling 	if (newvd->vdev_psize < vdev_get_rsize(oldvd))
1632fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
1633fa9e4066Sahrens 
1634ecc2d604Sbonwick 	/*
1635ecc2d604Sbonwick 	 * The new device cannot have a higher alignment requirement
1636ecc2d604Sbonwick 	 * than the top-level vdev.
1637ecc2d604Sbonwick 	 */
1638ecc2d604Sbonwick 	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
1639fa9e4066Sahrens 		return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
1640fa9e4066Sahrens 
1641fa9e4066Sahrens 	/*
1642fa9e4066Sahrens 	 * If this is an in-place replacement, update oldvd's path and devid
1643fa9e4066Sahrens 	 * to make it distinguishable from newvd, and unopenable from now on.
1644fa9e4066Sahrens 	 */
1645fa9e4066Sahrens 	if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
1646fa9e4066Sahrens 		spa_strfree(oldvd->vdev_path);
1647fa9e4066Sahrens 		oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
1648fa9e4066Sahrens 		    KM_SLEEP);
1649fa9e4066Sahrens 		(void) sprintf(oldvd->vdev_path, "%s/%s",
1650fa9e4066Sahrens 		    newvd->vdev_path, "old");
1651fa9e4066Sahrens 		if (oldvd->vdev_devid != NULL) {
1652fa9e4066Sahrens 			spa_strfree(oldvd->vdev_devid);
1653fa9e4066Sahrens 			oldvd->vdev_devid = NULL;
1654fa9e4066Sahrens 		}
1655fa9e4066Sahrens 	}
1656fa9e4066Sahrens 
1657fa9e4066Sahrens 	/*
165899653d4eSeschrock 	 * If the parent is not a mirror, or if we're replacing, insert the new
165999653d4eSeschrock 	 * mirror/replacing/spare vdev above oldvd.
1660fa9e4066Sahrens 	 */
1661fa9e4066Sahrens 	if (pvd->vdev_ops != pvops)
1662fa9e4066Sahrens 		pvd = vdev_add_parent(oldvd, pvops);
1663fa9e4066Sahrens 
1664fa9e4066Sahrens 	ASSERT(pvd->vdev_top->vdev_parent == rvd);
1665fa9e4066Sahrens 	ASSERT(pvd->vdev_ops == pvops);
1666fa9e4066Sahrens 	ASSERT(oldvd->vdev_parent == pvd);
1667fa9e4066Sahrens 
1668fa9e4066Sahrens 	/*
1669fa9e4066Sahrens 	 * Extract the new device from its root and add it to pvd.
1670fa9e4066Sahrens 	 */
1671fa9e4066Sahrens 	vdev_remove_child(newrootvd, newvd);
1672fa9e4066Sahrens 	newvd->vdev_id = pvd->vdev_children;
1673fa9e4066Sahrens 	vdev_add_child(pvd, newvd);
1674fa9e4066Sahrens 
1675ea8dc4b6Seschrock 	/*
1676ea8dc4b6Seschrock 	 * If newvd is smaller than oldvd, but larger than its rsize,
1677ea8dc4b6Seschrock 	 * the addition of newvd may have decreased our parent's asize.
1678ea8dc4b6Seschrock 	 */
1679ea8dc4b6Seschrock 	pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
1680ea8dc4b6Seschrock 
1681fa9e4066Sahrens 	tvd = newvd->vdev_top;
1682fa9e4066Sahrens 	ASSERT(pvd->vdev_top == tvd);
1683fa9e4066Sahrens 	ASSERT(tvd->vdev_parent == rvd);
1684fa9e4066Sahrens 
1685fa9e4066Sahrens 	vdev_config_dirty(tvd);
1686fa9e4066Sahrens 
1687fa9e4066Sahrens 	/*
1688fa9e4066Sahrens 	 * Set newvd's DTL to [TXG_INITIAL, open_txg].  It will propagate
1689fa9e4066Sahrens 	 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
1690fa9e4066Sahrens 	 */
1691fa9e4066Sahrens 	open_txg = txg + TXG_CONCURRENT_STATES - 1;
1692fa9e4066Sahrens 
1693fa9e4066Sahrens 	mutex_enter(&newvd->vdev_dtl_lock);
1694fa9e4066Sahrens 	space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL,
1695fa9e4066Sahrens 	    open_txg - TXG_INITIAL + 1);
1696fa9e4066Sahrens 	mutex_exit(&newvd->vdev_dtl_lock);
1697fa9e4066Sahrens 
1698ea8dc4b6Seschrock 	dprintf("attached %s in txg %llu\n", newvd->vdev_path, txg);
1699ea8dc4b6Seschrock 
1700fa9e4066Sahrens 	/*
1701fa9e4066Sahrens 	 * Mark newvd's DTL dirty in this txg.
1702fa9e4066Sahrens 	 */
1703ecc2d604Sbonwick 	vdev_dirty(tvd, VDD_DTL, newvd, txg);
1704fa9e4066Sahrens 
1705fa9e4066Sahrens 	(void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
1706fa9e4066Sahrens 
1707fa9e4066Sahrens 	/*
1708fa9e4066Sahrens 	 * Kick off a resilver to update newvd.
1709fa9e4066Sahrens 	 */
1710fa9e4066Sahrens 	VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
1711fa9e4066Sahrens 
1712fa9e4066Sahrens 	return (0);
1713fa9e4066Sahrens }
1714fa9e4066Sahrens 
1715fa9e4066Sahrens /*
1716fa9e4066Sahrens  * Detach a device from a mirror or replacing vdev.
1717fa9e4066Sahrens  * If 'replace_done' is specified, only detach if the parent
1718fa9e4066Sahrens  * is a replacing vdev.
1719fa9e4066Sahrens  */
1720fa9e4066Sahrens int
1721ea8dc4b6Seschrock spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done)
1722fa9e4066Sahrens {
1723fa9e4066Sahrens 	uint64_t txg;
1724fa9e4066Sahrens 	int c, t, error;
1725fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
1726fa9e4066Sahrens 	vdev_t *vd, *pvd, *cvd, *tvd;
172799653d4eSeschrock 	boolean_t unspare = B_FALSE;
172899653d4eSeschrock 	uint64_t unspare_guid;
1729fa9e4066Sahrens 
1730fa9e4066Sahrens 	txg = spa_vdev_enter(spa);
1731fa9e4066Sahrens 
1732ea8dc4b6Seschrock 	vd = vdev_lookup_by_guid(rvd, guid);
1733fa9e4066Sahrens 
1734fa9e4066Sahrens 	if (vd == NULL)
1735fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
1736fa9e4066Sahrens 
17370e34b6a7Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
17380e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
17390e34b6a7Sbonwick 
1740fa9e4066Sahrens 	pvd = vd->vdev_parent;
1741fa9e4066Sahrens 
1742fa9e4066Sahrens 	/*
1743fa9e4066Sahrens 	 * If replace_done is specified, only remove this device if it's
174499653d4eSeschrock 	 * the first child of a replacing vdev.  For the 'spare' vdev, either
174599653d4eSeschrock 	 * disk can be removed.
174699653d4eSeschrock 	 */
174799653d4eSeschrock 	if (replace_done) {
174899653d4eSeschrock 		if (pvd->vdev_ops == &vdev_replacing_ops) {
174999653d4eSeschrock 			if (vd->vdev_id != 0)
175099653d4eSeschrock 				return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
175199653d4eSeschrock 		} else if (pvd->vdev_ops != &vdev_spare_ops) {
175299653d4eSeschrock 			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
175399653d4eSeschrock 		}
175499653d4eSeschrock 	}
175599653d4eSeschrock 
175699653d4eSeschrock 	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
175799653d4eSeschrock 	    spa_version(spa) >= ZFS_VERSION_SPARES);
1758fa9e4066Sahrens 
1759fa9e4066Sahrens 	/*
176099653d4eSeschrock 	 * Only mirror, replacing, and spare vdevs support detach.
1761fa9e4066Sahrens 	 */
1762fa9e4066Sahrens 	if (pvd->vdev_ops != &vdev_replacing_ops &&
176399653d4eSeschrock 	    pvd->vdev_ops != &vdev_mirror_ops &&
176499653d4eSeschrock 	    pvd->vdev_ops != &vdev_spare_ops)
1765fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
1766fa9e4066Sahrens 
1767fa9e4066Sahrens 	/*
1768fa9e4066Sahrens 	 * If there's only one replica, you can't detach it.
1769fa9e4066Sahrens 	 */
1770fa9e4066Sahrens 	if (pvd->vdev_children <= 1)
1771fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
1772fa9e4066Sahrens 
1773fa9e4066Sahrens 	/*
1774fa9e4066Sahrens 	 * If all siblings have non-empty DTLs, this device may have the only
1775fa9e4066Sahrens 	 * valid copy of the data, which means we cannot safely detach it.
1776fa9e4066Sahrens 	 *
1777fa9e4066Sahrens 	 * XXX -- as in the vdev_offline() case, we really want a more
1778fa9e4066Sahrens 	 * precise DTL check.
1779fa9e4066Sahrens 	 */
1780fa9e4066Sahrens 	for (c = 0; c < pvd->vdev_children; c++) {
1781fa9e4066Sahrens 		uint64_t dirty;
1782fa9e4066Sahrens 
1783fa9e4066Sahrens 		cvd = pvd->vdev_child[c];
1784fa9e4066Sahrens 		if (cvd == vd)
1785fa9e4066Sahrens 			continue;
1786fa9e4066Sahrens 		if (vdev_is_dead(cvd))
1787fa9e4066Sahrens 			continue;
1788fa9e4066Sahrens 		mutex_enter(&cvd->vdev_dtl_lock);
1789fa9e4066Sahrens 		dirty = cvd->vdev_dtl_map.sm_space |
1790fa9e4066Sahrens 		    cvd->vdev_dtl_scrub.sm_space;
1791fa9e4066Sahrens 		mutex_exit(&cvd->vdev_dtl_lock);
1792fa9e4066Sahrens 		if (!dirty)
1793fa9e4066Sahrens 			break;
1794fa9e4066Sahrens 	}
179599653d4eSeschrock 
179699653d4eSeschrock 	/*
179799653d4eSeschrock 	 * If we are a replacing or spare vdev, then we can always detach the
179899653d4eSeschrock 	 * latter child, as that is how one cancels the operation.
179999653d4eSeschrock 	 */
180099653d4eSeschrock 	if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) &&
180199653d4eSeschrock 	    c == pvd->vdev_children)
1802fa9e4066Sahrens 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
1803fa9e4066Sahrens 
180499653d4eSeschrock 	/*
180599653d4eSeschrock 	 * If we are detaching the original disk from a spare, then it implies
180699653d4eSeschrock 	 * that the spare should become a real disk, and be removed from the
180799653d4eSeschrock 	 * active spare list for the pool.
180899653d4eSeschrock 	 */
180999653d4eSeschrock 	if (pvd->vdev_ops == &vdev_spare_ops &&
181099653d4eSeschrock 	    vd->vdev_id == 0)
181199653d4eSeschrock 		unspare = B_TRUE;
181299653d4eSeschrock 
1813fa9e4066Sahrens 	/*
1814fa9e4066Sahrens 	 * Erase the disk labels so the disk can be used for other things.
1815fa9e4066Sahrens 	 * This must be done after all other error cases are handled,
1816fa9e4066Sahrens 	 * but before we disembowel vd (so we can still do I/O to it).
1817fa9e4066Sahrens 	 * But if we can't do it, don't treat the error as fatal --
1818fa9e4066Sahrens 	 * it may be that the unwritability of the disk is the reason
1819fa9e4066Sahrens 	 * it's being detached!
1820fa9e4066Sahrens 	 */
182199653d4eSeschrock 	error = vdev_label_init(vd, 0, B_FALSE);
1822fa9e4066Sahrens 	if (error)
1823fa9e4066Sahrens 		dprintf("unable to erase labels on %s\n", vdev_description(vd));
1824fa9e4066Sahrens 
1825fa9e4066Sahrens 	/*
1826fa9e4066Sahrens 	 * Remove vd from its parent and compact the parent's children.
1827fa9e4066Sahrens 	 */
1828fa9e4066Sahrens 	vdev_remove_child(pvd, vd);
1829fa9e4066Sahrens 	vdev_compact_children(pvd);
1830fa9e4066Sahrens 
1831fa9e4066Sahrens 	/*
1832fa9e4066Sahrens 	 * Remember one of the remaining children so we can get tvd below.
1833fa9e4066Sahrens 	 */
1834fa9e4066Sahrens 	cvd = pvd->vdev_child[0];
1835fa9e4066Sahrens 
183699653d4eSeschrock 	/*
183799653d4eSeschrock 	 * If we need to remove the remaining child from the list of hot spares,
183899653d4eSeschrock 	 * do it now, marking the vdev as no longer a spare in the process.  We
183999653d4eSeschrock 	 * must do this before vdev_remove_parent(), because that can change the
184099653d4eSeschrock 	 * GUID if it creates a new toplevel GUID.
184199653d4eSeschrock 	 */
184299653d4eSeschrock 	if (unspare) {
184399653d4eSeschrock 		ASSERT(cvd->vdev_isspare);
184499653d4eSeschrock 		spa_spare_remove(cvd->vdev_guid);
184599653d4eSeschrock 		cvd->vdev_isspare = B_FALSE;
184699653d4eSeschrock 		unspare_guid = cvd->vdev_guid;
184799653d4eSeschrock 	}
184899653d4eSeschrock 
1849fa9e4066Sahrens 	/*
1850fa9e4066Sahrens 	 * If the parent mirror/replacing vdev only has one child,
1851fa9e4066Sahrens 	 * the parent is no longer needed.  Remove it from the tree.
1852fa9e4066Sahrens 	 */
1853fa9e4066Sahrens 	if (pvd->vdev_children == 1)
1854fa9e4066Sahrens 		vdev_remove_parent(cvd);
1855fa9e4066Sahrens 
1856fa9e4066Sahrens 	/*
1857fa9e4066Sahrens 	 * We don't set tvd until now because the parent we just removed
1858fa9e4066Sahrens 	 * may have been the previous top-level vdev.
1859fa9e4066Sahrens 	 */
1860fa9e4066Sahrens 	tvd = cvd->vdev_top;
1861fa9e4066Sahrens 	ASSERT(tvd->vdev_parent == rvd);
1862fa9e4066Sahrens 
1863fa9e4066Sahrens 	/*
1864fa9e4066Sahrens 	 * Reopen this top-level vdev to reassess health after detach.
1865fa9e4066Sahrens 	 */
1866ea8dc4b6Seschrock 	vdev_reopen(tvd);
1867fa9e4066Sahrens 
1868fa9e4066Sahrens 	/*
1869fa9e4066Sahrens 	 * If the device we just detached was smaller than the others,
1870ecc2d604Sbonwick 	 * it may be possible to add metaslabs (i.e. grow the pool).
1871ecc2d604Sbonwick 	 * vdev_metaslab_init() can't fail because the existing metaslabs
1872ecc2d604Sbonwick 	 * are already in core, so there's nothing to read from disk.
1873fa9e4066Sahrens 	 */
1874ecc2d604Sbonwick 	VERIFY(vdev_metaslab_init(tvd, txg) == 0);
1875fa9e4066Sahrens 
1876fa9e4066Sahrens 	vdev_config_dirty(tvd);
1877fa9e4066Sahrens 
1878fa9e4066Sahrens 	/*
1879fa9e4066Sahrens 	 * Mark vd's DTL as dirty in this txg.
1880fa9e4066Sahrens 	 * vdev_dtl_sync() will see that vd->vdev_detached is set
1881fa9e4066Sahrens 	 * and free vd's DTL object in syncing context.
1882fa9e4066Sahrens 	 * But first make sure we're not on any *other* txg's DTL list,
1883fa9e4066Sahrens 	 * to prevent vd from being accessed after it's freed.
1884fa9e4066Sahrens 	 */
1885fa9e4066Sahrens 	for (t = 0; t < TXG_SIZE; t++)
1886fa9e4066Sahrens 		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
1887ecc2d604Sbonwick 	vd->vdev_detached = B_TRUE;
1888ecc2d604Sbonwick 	vdev_dirty(tvd, VDD_DTL, vd, txg);
1889fa9e4066Sahrens 
1890ea8dc4b6Seschrock 	dprintf("detached %s in txg %llu\n", vd->vdev_path, txg);
1891fa9e4066Sahrens 
189299653d4eSeschrock 	error = spa_vdev_exit(spa, vd, txg, 0);
189399653d4eSeschrock 
189499653d4eSeschrock 	/*
189599653d4eSeschrock 	 * If we are supposed to remove the given vdev from the list of spares,
189699653d4eSeschrock 	 * iterate over all pools in the system and replace it if it's present.
189799653d4eSeschrock 	 */
189899653d4eSeschrock 	if (unspare) {
189999653d4eSeschrock 		spa = NULL;
190099653d4eSeschrock 		mutex_enter(&spa_namespace_lock);
190199653d4eSeschrock 		while ((spa = spa_next(spa)) != NULL) {
190299653d4eSeschrock 			if (spa->spa_state != POOL_STATE_ACTIVE)
190399653d4eSeschrock 				continue;
190499653d4eSeschrock 
190599653d4eSeschrock 			(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
190699653d4eSeschrock 		}
190799653d4eSeschrock 		mutex_exit(&spa_namespace_lock);
190899653d4eSeschrock 	}
190999653d4eSeschrock 
191099653d4eSeschrock 	return (error);
191199653d4eSeschrock }
191299653d4eSeschrock 
191399653d4eSeschrock /*
191499653d4eSeschrock  * Remove a device from the pool.  Currently, this supports removing only hot
191599653d4eSeschrock  * spares.
191699653d4eSeschrock  */
191799653d4eSeschrock int
191899653d4eSeschrock spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
191999653d4eSeschrock {
192099653d4eSeschrock 	vdev_t *vd;
192199653d4eSeschrock 	nvlist_t **spares, *nv, **newspares;
192299653d4eSeschrock 	uint_t i, j, nspares;
192399653d4eSeschrock 	int ret = 0;
192499653d4eSeschrock 
192599653d4eSeschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
192699653d4eSeschrock 
192799653d4eSeschrock 	vd = spa_lookup_by_guid(spa, guid);
192899653d4eSeschrock 
192999653d4eSeschrock 	nv = NULL;
193099653d4eSeschrock 	if (spa->spa_spares != NULL &&
193199653d4eSeschrock 	    nvlist_lookup_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
193299653d4eSeschrock 	    &spares, &nspares) == 0) {
193399653d4eSeschrock 		for (i = 0; i < nspares; i++) {
193499653d4eSeschrock 			uint64_t theguid;
193599653d4eSeschrock 
193699653d4eSeschrock 			VERIFY(nvlist_lookup_uint64(spares[i],
193799653d4eSeschrock 			    ZPOOL_CONFIG_GUID, &theguid) == 0);
193899653d4eSeschrock 			if (theguid == guid) {
193999653d4eSeschrock 				nv = spares[i];
194099653d4eSeschrock 				break;
194199653d4eSeschrock 			}
194299653d4eSeschrock 		}
194399653d4eSeschrock 	}
194499653d4eSeschrock 
194599653d4eSeschrock 	/*
194699653d4eSeschrock 	 * We only support removing a hot spare, and only if it's not currently
194799653d4eSeschrock 	 * in use in this pool.
194899653d4eSeschrock 	 */
194999653d4eSeschrock 	if (nv == NULL && vd == NULL) {
195099653d4eSeschrock 		ret = ENOENT;
195199653d4eSeschrock 		goto out;
195299653d4eSeschrock 	}
195399653d4eSeschrock 
195499653d4eSeschrock 	if (nv == NULL && vd != NULL) {
195599653d4eSeschrock 		ret = ENOTSUP;
195699653d4eSeschrock 		goto out;
195799653d4eSeschrock 	}
195899653d4eSeschrock 
195999653d4eSeschrock 	if (!unspare && nv != NULL && vd != NULL) {
196099653d4eSeschrock 		ret = EBUSY;
196199653d4eSeschrock 		goto out;
196299653d4eSeschrock 	}
196399653d4eSeschrock 
196499653d4eSeschrock 	if (nspares == 1) {
196599653d4eSeschrock 		newspares = NULL;
196699653d4eSeschrock 	} else {
196799653d4eSeschrock 		newspares = kmem_alloc((nspares - 1) * sizeof (void *),
196899653d4eSeschrock 		    KM_SLEEP);
196999653d4eSeschrock 		for (i = 0, j = 0; i < nspares; i++) {
197099653d4eSeschrock 			if (spares[i] != nv)
197199653d4eSeschrock 				VERIFY(nvlist_dup(spares[i],
197299653d4eSeschrock 				    &newspares[j++], KM_SLEEP) == 0);
197399653d4eSeschrock 		}
197499653d4eSeschrock 	}
197599653d4eSeschrock 
197699653d4eSeschrock 	VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
197799653d4eSeschrock 	    DATA_TYPE_NVLIST_ARRAY) == 0);
197899653d4eSeschrock 	VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES,
197999653d4eSeschrock 	    newspares, nspares - 1) == 0);
198099653d4eSeschrock 	for (i = 0; i < nspares - 1; i++)
198199653d4eSeschrock 		nvlist_free(newspares[i]);
198299653d4eSeschrock 	kmem_free(newspares, (nspares - 1) * sizeof (void *));
198399653d4eSeschrock 	spa_load_spares(spa);
198499653d4eSeschrock 	spa->spa_sync_spares = B_TRUE;
198599653d4eSeschrock 
198699653d4eSeschrock out:
198799653d4eSeschrock 	spa_config_exit(spa, FTAG);
198899653d4eSeschrock 
198999653d4eSeschrock 	return (ret);
1990fa9e4066Sahrens }
1991fa9e4066Sahrens 
1992fa9e4066Sahrens /*
1993ea8dc4b6Seschrock  * Find any device that's done replacing, so we can detach it.
1994fa9e4066Sahrens  */
1995ea8dc4b6Seschrock static vdev_t *
1996ea8dc4b6Seschrock spa_vdev_replace_done_hunt(vdev_t *vd)
1997fa9e4066Sahrens {
1998ea8dc4b6Seschrock 	vdev_t *newvd, *oldvd;
1999fa9e4066Sahrens 	int c;
2000fa9e4066Sahrens 
2001ea8dc4b6Seschrock 	for (c = 0; c < vd->vdev_children; c++) {
2002ea8dc4b6Seschrock 		oldvd = spa_vdev_replace_done_hunt(vd->vdev_child[c]);
2003ea8dc4b6Seschrock 		if (oldvd != NULL)
2004ea8dc4b6Seschrock 			return (oldvd);
2005ea8dc4b6Seschrock 	}
2006fa9e4066Sahrens 
2007fa9e4066Sahrens 	if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
2008ea8dc4b6Seschrock 		oldvd = vd->vdev_child[0];
2009ea8dc4b6Seschrock 		newvd = vd->vdev_child[1];
2010ea8dc4b6Seschrock 
2011ea8dc4b6Seschrock 		mutex_enter(&newvd->vdev_dtl_lock);
2012ea8dc4b6Seschrock 		if (newvd->vdev_dtl_map.sm_space == 0 &&
2013ea8dc4b6Seschrock 		    newvd->vdev_dtl_scrub.sm_space == 0) {
2014ea8dc4b6Seschrock 			mutex_exit(&newvd->vdev_dtl_lock);
2015ea8dc4b6Seschrock 			return (oldvd);
2016fa9e4066Sahrens 		}
2017ea8dc4b6Seschrock 		mutex_exit(&newvd->vdev_dtl_lock);
2018fa9e4066Sahrens 	}
2019ea8dc4b6Seschrock 
2020ea8dc4b6Seschrock 	return (NULL);
2021fa9e4066Sahrens }
2022fa9e4066Sahrens 
2023ea8dc4b6Seschrock static void
2024fa9e4066Sahrens spa_vdev_replace_done(spa_t *spa)
2025fa9e4066Sahrens {
2026ea8dc4b6Seschrock 	vdev_t *vd;
202799653d4eSeschrock 	vdev_t *pvd;
2028ea8dc4b6Seschrock 	uint64_t guid;
202999653d4eSeschrock 	uint64_t pguid = 0;
2030ea8dc4b6Seschrock 
2031ea8dc4b6Seschrock 	spa_config_enter(spa, RW_READER, FTAG);
2032ea8dc4b6Seschrock 
2033ea8dc4b6Seschrock 	while ((vd = spa_vdev_replace_done_hunt(spa->spa_root_vdev)) != NULL) {
2034ea8dc4b6Seschrock 		guid = vd->vdev_guid;
203599653d4eSeschrock 		/*
203699653d4eSeschrock 		 * If we have just finished replacing a hot spared device, then
203799653d4eSeschrock 		 * we need to detach the parent's first child (the original hot
203899653d4eSeschrock 		 * spare) as well.
203999653d4eSeschrock 		 */
204099653d4eSeschrock 		pvd = vd->vdev_parent;
204199653d4eSeschrock 		if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
204299653d4eSeschrock 		    pvd->vdev_id == 0) {
204399653d4eSeschrock 			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
204499653d4eSeschrock 			ASSERT(pvd->vdev_parent->vdev_children == 2);
204599653d4eSeschrock 			pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid;
204699653d4eSeschrock 		}
2047ea8dc4b6Seschrock 		spa_config_exit(spa, FTAG);
2048ea8dc4b6Seschrock 		if (spa_vdev_detach(spa, guid, B_TRUE) != 0)
2049ea8dc4b6Seschrock 			return;
205099653d4eSeschrock 		if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0)
205199653d4eSeschrock 			return;
2052ea8dc4b6Seschrock 		spa_config_enter(spa, RW_READER, FTAG);
2053fa9e4066Sahrens 	}
2054fa9e4066Sahrens 
2055ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
2056fa9e4066Sahrens }
2057fa9e4066Sahrens 
2058c67d9675Seschrock /*
2059c67d9675Seschrock  * Update the stored path for this vdev.  Dirty the vdev configuration, relying
2060c67d9675Seschrock  * on spa_vdev_enter/exit() to synchronize the labels and cache.
2061c67d9675Seschrock  */
2062c67d9675Seschrock int
2063c67d9675Seschrock spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
2064c67d9675Seschrock {
2065c67d9675Seschrock 	vdev_t *rvd, *vd;
2066c67d9675Seschrock 	uint64_t txg;
2067c67d9675Seschrock 
2068c67d9675Seschrock 	rvd = spa->spa_root_vdev;
2069c67d9675Seschrock 
2070c67d9675Seschrock 	txg = spa_vdev_enter(spa);
2071c67d9675Seschrock 
207299653d4eSeschrock 	if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
207399653d4eSeschrock 		/*
207499653d4eSeschrock 		 * Determine if this is a reference to a hot spare.  In that
207599653d4eSeschrock 		 * case, update the path as stored in the spare list.
207699653d4eSeschrock 		 */
207799653d4eSeschrock 		nvlist_t **spares;
207899653d4eSeschrock 		uint_t i, nspares;
207999653d4eSeschrock 		if (spa->spa_sparelist != NULL) {
208099653d4eSeschrock 			VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist,
208199653d4eSeschrock 			    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
208299653d4eSeschrock 			for (i = 0; i < nspares; i++) {
208399653d4eSeschrock 				uint64_t theguid;
208499653d4eSeschrock 				VERIFY(nvlist_lookup_uint64(spares[i],
208599653d4eSeschrock 				    ZPOOL_CONFIG_GUID, &theguid) == 0);
208699653d4eSeschrock 				if (theguid == guid)
208799653d4eSeschrock 					break;
208899653d4eSeschrock 			}
208999653d4eSeschrock 
209099653d4eSeschrock 			if (i == nspares)
209199653d4eSeschrock 				return (spa_vdev_exit(spa, NULL, txg, ENOENT));
209299653d4eSeschrock 
209399653d4eSeschrock 			VERIFY(nvlist_add_string(spares[i],
209499653d4eSeschrock 			    ZPOOL_CONFIG_PATH, newpath) == 0);
209599653d4eSeschrock 			spa_load_spares(spa);
209699653d4eSeschrock 			spa->spa_sync_spares = B_TRUE;
209799653d4eSeschrock 			return (spa_vdev_exit(spa, NULL, txg, 0));
209899653d4eSeschrock 		} else {
209999653d4eSeschrock 			return (spa_vdev_exit(spa, NULL, txg, ENOENT));
210099653d4eSeschrock 		}
210199653d4eSeschrock 	}
2102c67d9675Seschrock 
21030e34b6a7Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
21040e34b6a7Sbonwick 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
21050e34b6a7Sbonwick 
2106c67d9675Seschrock 	spa_strfree(vd->vdev_path);
2107c67d9675Seschrock 	vd->vdev_path = spa_strdup(newpath);
2108c67d9675Seschrock 
2109c67d9675Seschrock 	vdev_config_dirty(vd->vdev_top);
2110c67d9675Seschrock 
2111c67d9675Seschrock 	return (spa_vdev_exit(spa, NULL, txg, 0));
2112c67d9675Seschrock }
2113c67d9675Seschrock 
2114fa9e4066Sahrens /*
2115fa9e4066Sahrens  * ==========================================================================
2116fa9e4066Sahrens  * SPA Scrubbing
2117fa9e4066Sahrens  * ==========================================================================
2118fa9e4066Sahrens  */
2119fa9e4066Sahrens 
2120ea8dc4b6Seschrock void
2121ea8dc4b6Seschrock spa_scrub_throttle(spa_t *spa, int direction)
2122ea8dc4b6Seschrock {
2123ea8dc4b6Seschrock 	mutex_enter(&spa->spa_scrub_lock);
2124ea8dc4b6Seschrock 	spa->spa_scrub_throttled += direction;
2125ea8dc4b6Seschrock 	ASSERT(spa->spa_scrub_throttled >= 0);
2126ea8dc4b6Seschrock 	if (spa->spa_scrub_throttled == 0)
2127ea8dc4b6Seschrock 		cv_broadcast(&spa->spa_scrub_io_cv);
2128ea8dc4b6Seschrock 	mutex_exit(&spa->spa_scrub_lock);
2129ea8dc4b6Seschrock }
2130fa9e4066Sahrens 
2131fa9e4066Sahrens static void
2132fa9e4066Sahrens spa_scrub_io_done(zio_t *zio)
2133fa9e4066Sahrens {
2134fa9e4066Sahrens 	spa_t *spa = zio->io_spa;
2135fa9e4066Sahrens 
2136fa9e4066Sahrens 	zio_buf_free(zio->io_data, zio->io_size);
2137fa9e4066Sahrens 
2138fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
2139ea8dc4b6Seschrock 	if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
214044cd46caSbillm 		vdev_t *vd = zio->io_vd ? zio->io_vd : spa->spa_root_vdev;
2141ea8dc4b6Seschrock 		spa->spa_scrub_errors++;
2142fa9e4066Sahrens 		mutex_enter(&vd->vdev_stat_lock);
2143fa9e4066Sahrens 		vd->vdev_stat.vs_scrub_errors++;
2144fa9e4066Sahrens 		mutex_exit(&vd->vdev_stat_lock);
2145fa9e4066Sahrens 	}
2146ea8dc4b6Seschrock 	if (--spa->spa_scrub_inflight == 0) {
2147ea8dc4b6Seschrock 		cv_broadcast(&spa->spa_scrub_io_cv);
2148ea8dc4b6Seschrock 		ASSERT(spa->spa_scrub_throttled == 0);
2149ea8dc4b6Seschrock 	}
2150ea8dc4b6Seschrock 	mutex_exit(&spa->spa_scrub_lock);
2151fa9e4066Sahrens }
2152fa9e4066Sahrens 
2153fa9e4066Sahrens static void
2154ea8dc4b6Seschrock spa_scrub_io_start(spa_t *spa, blkptr_t *bp, int priority, int flags,
2155ea8dc4b6Seschrock     zbookmark_t *zb)
2156fa9e4066Sahrens {
2157fa9e4066Sahrens 	size_t size = BP_GET_LSIZE(bp);
2158fa9e4066Sahrens 	void *data = zio_buf_alloc(size);
2159fa9e4066Sahrens 
2160fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
2161fa9e4066Sahrens 	spa->spa_scrub_inflight++;
2162fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
2163fa9e4066Sahrens 
2164ea8dc4b6Seschrock 	if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
2165ea8dc4b6Seschrock 		flags |= ZIO_FLAG_SPECULATIVE;	/* intent log block */
2166ea8dc4b6Seschrock 
2167d80c45e0Sbonwick 	flags |= ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL;
2168ea8dc4b6Seschrock 
2169fa9e4066Sahrens 	zio_nowait(zio_read(NULL, spa, bp, data, size,
2170ea8dc4b6Seschrock 	    spa_scrub_io_done, NULL, priority, flags, zb));
2171fa9e4066Sahrens }
2172fa9e4066Sahrens 
2173fa9e4066Sahrens /* ARGSUSED */
2174fa9e4066Sahrens static int
2175fa9e4066Sahrens spa_scrub_cb(traverse_blk_cache_t *bc, spa_t *spa, void *a)
2176fa9e4066Sahrens {
2177fa9e4066Sahrens 	blkptr_t *bp = &bc->bc_blkptr;
217844cd46caSbillm 	vdev_t *vd = spa->spa_root_vdev;
217944cd46caSbillm 	dva_t *dva = bp->blk_dva;
218044cd46caSbillm 	int needs_resilver = B_FALSE;
218144cd46caSbillm 	int d;
2182fa9e4066Sahrens 
218344cd46caSbillm 	if (bc->bc_errno) {
2184fa9e4066Sahrens 		/*
2185fa9e4066Sahrens 		 * We can't scrub this block, but we can continue to scrub
2186fa9e4066Sahrens 		 * the rest of the pool.  Note the error and move along.
2187fa9e4066Sahrens 		 */
2188fa9e4066Sahrens 		mutex_enter(&spa->spa_scrub_lock);
2189fa9e4066Sahrens 		spa->spa_scrub_errors++;
2190fa9e4066Sahrens 		mutex_exit(&spa->spa_scrub_lock);
2191fa9e4066Sahrens 
219244cd46caSbillm 		mutex_enter(&vd->vdev_stat_lock);
219344cd46caSbillm 		vd->vdev_stat.vs_scrub_errors++;
219444cd46caSbillm 		mutex_exit(&vd->vdev_stat_lock);
2195fa9e4066Sahrens 
2196fa9e4066Sahrens 		return (ERESTART);
2197fa9e4066Sahrens 	}
2198fa9e4066Sahrens 
2199fa9e4066Sahrens 	ASSERT(bp->blk_birth < spa->spa_scrub_maxtxg);
2200fa9e4066Sahrens 
220144cd46caSbillm 	for (d = 0; d < BP_GET_NDVAS(bp); d++) {
220244cd46caSbillm 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]));
2203fa9e4066Sahrens 
220444cd46caSbillm 		ASSERT(vd != NULL);
220544cd46caSbillm 
220644cd46caSbillm 		/*
220744cd46caSbillm 		 * Keep track of how much data we've examined so that
220844cd46caSbillm 		 * zpool(1M) status can make useful progress reports.
220944cd46caSbillm 		 */
221044cd46caSbillm 		mutex_enter(&vd->vdev_stat_lock);
221144cd46caSbillm 		vd->vdev_stat.vs_scrub_examined += DVA_GET_ASIZE(&dva[d]);
221244cd46caSbillm 		mutex_exit(&vd->vdev_stat_lock);
221344cd46caSbillm 
221444cd46caSbillm 		if (spa->spa_scrub_type == POOL_SCRUB_RESILVER) {
221544cd46caSbillm 			if (DVA_GET_GANG(&dva[d])) {
221644cd46caSbillm 				/*
221744cd46caSbillm 				 * Gang members may be spread across multiple
221844cd46caSbillm 				 * vdevs, so the best we can do is look at the
221944cd46caSbillm 				 * pool-wide DTL.
222044cd46caSbillm 				 * XXX -- it would be better to change our
222144cd46caSbillm 				 * allocation policy to ensure that this can't
222244cd46caSbillm 				 * happen.
222344cd46caSbillm 				 */
222444cd46caSbillm 				vd = spa->spa_root_vdev;
222544cd46caSbillm 			}
222644cd46caSbillm 			if (vdev_dtl_contains(&vd->vdev_dtl_map,
222744cd46caSbillm 			    bp->blk_birth, 1))
222844cd46caSbillm 				needs_resilver = B_TRUE;
2229fa9e4066Sahrens 		}
223044cd46caSbillm 	}
223144cd46caSbillm 
223244cd46caSbillm 	if (spa->spa_scrub_type == POOL_SCRUB_EVERYTHING)
2233fa9e4066Sahrens 		spa_scrub_io_start(spa, bp, ZIO_PRIORITY_SCRUB,
2234ea8dc4b6Seschrock 		    ZIO_FLAG_SCRUB, &bc->bc_bookmark);
223544cd46caSbillm 	else if (needs_resilver)
223644cd46caSbillm 		spa_scrub_io_start(spa, bp, ZIO_PRIORITY_RESILVER,
223744cd46caSbillm 		    ZIO_FLAG_RESILVER, &bc->bc_bookmark);
2238fa9e4066Sahrens 
2239fa9e4066Sahrens 	return (0);
2240fa9e4066Sahrens }
2241fa9e4066Sahrens 
2242fa9e4066Sahrens static void
2243fa9e4066Sahrens spa_scrub_thread(spa_t *spa)
2244fa9e4066Sahrens {
2245fa9e4066Sahrens 	callb_cpr_t cprinfo;
2246fa9e4066Sahrens 	traverse_handle_t *th = spa->spa_scrub_th;
2247fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
2248fa9e4066Sahrens 	pool_scrub_type_t scrub_type = spa->spa_scrub_type;
2249fa9e4066Sahrens 	int error = 0;
2250fa9e4066Sahrens 	boolean_t complete;
2251fa9e4066Sahrens 
2252fa9e4066Sahrens 	CALLB_CPR_INIT(&cprinfo, &spa->spa_scrub_lock, callb_generic_cpr, FTAG);
2253fa9e4066Sahrens 
2254f0aa80d4Sbonwick 	/*
2255f0aa80d4Sbonwick 	 * If we're restarting due to a snapshot create/delete,
2256f0aa80d4Sbonwick 	 * wait for that to complete.
2257f0aa80d4Sbonwick 	 */
2258f0aa80d4Sbonwick 	txg_wait_synced(spa_get_dsl(spa), 0);
2259f0aa80d4Sbonwick 
2260ea8dc4b6Seschrock 	dprintf("start %s mintxg=%llu maxtxg=%llu\n",
2261ea8dc4b6Seschrock 	    scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub",
2262ea8dc4b6Seschrock 	    spa->spa_scrub_mintxg, spa->spa_scrub_maxtxg);
2263ea8dc4b6Seschrock 
2264ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
2265ea8dc4b6Seschrock 	vdev_reopen(rvd);		/* purge all vdev caches */
2266fa9e4066Sahrens 	vdev_config_dirty(rvd);		/* rewrite all disk labels */
2267fa9e4066Sahrens 	vdev_scrub_stat_update(rvd, scrub_type, B_FALSE);
2268ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
2269fa9e4066Sahrens 
2270fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
2271fa9e4066Sahrens 	spa->spa_scrub_errors = 0;
2272fa9e4066Sahrens 	spa->spa_scrub_active = 1;
2273ea8dc4b6Seschrock 	ASSERT(spa->spa_scrub_inflight == 0);
2274ea8dc4b6Seschrock 	ASSERT(spa->spa_scrub_throttled == 0);
2275fa9e4066Sahrens 
2276fa9e4066Sahrens 	while (!spa->spa_scrub_stop) {
2277fa9e4066Sahrens 		CALLB_CPR_SAFE_BEGIN(&cprinfo);
2278ea8dc4b6Seschrock 		while (spa->spa_scrub_suspended) {
2279fa9e4066Sahrens 			spa->spa_scrub_active = 0;
2280fa9e4066Sahrens 			cv_broadcast(&spa->spa_scrub_cv);
2281fa9e4066Sahrens 			cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
2282fa9e4066Sahrens 			spa->spa_scrub_active = 1;
2283fa9e4066Sahrens 		}
2284fa9e4066Sahrens 		CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_scrub_lock);
2285fa9e4066Sahrens 
2286fa9e4066Sahrens 		if (spa->spa_scrub_restart_txg != 0)
2287fa9e4066Sahrens 			break;
2288fa9e4066Sahrens 
2289fa9e4066Sahrens 		mutex_exit(&spa->spa_scrub_lock);
2290fa9e4066Sahrens 		error = traverse_more(th);
2291fa9e4066Sahrens 		mutex_enter(&spa->spa_scrub_lock);
2292fa9e4066Sahrens 		if (error != EAGAIN)
2293fa9e4066Sahrens 			break;
2294ea8dc4b6Seschrock 
2295ea8dc4b6Seschrock 		while (spa->spa_scrub_throttled > 0)
2296ea8dc4b6Seschrock 			cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2297fa9e4066Sahrens 	}
2298fa9e4066Sahrens 
2299fa9e4066Sahrens 	while (spa->spa_scrub_inflight)
2300fa9e4066Sahrens 		cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2301fa9e4066Sahrens 
23025dabedeeSbonwick 	spa->spa_scrub_active = 0;
23035dabedeeSbonwick 	cv_broadcast(&spa->spa_scrub_cv);
23045dabedeeSbonwick 
23055dabedeeSbonwick 	mutex_exit(&spa->spa_scrub_lock);
23065dabedeeSbonwick 
23075dabedeeSbonwick 	spa_config_enter(spa, RW_WRITER, FTAG);
23085dabedeeSbonwick 
23095dabedeeSbonwick 	mutex_enter(&spa->spa_scrub_lock);
23105dabedeeSbonwick 
23115dabedeeSbonwick 	/*
23125dabedeeSbonwick 	 * Note: we check spa_scrub_restart_txg under both spa_scrub_lock
23135dabedeeSbonwick 	 * AND the spa config lock to synchronize with any config changes
23145dabedeeSbonwick 	 * that revise the DTLs under spa_vdev_enter() / spa_vdev_exit().
23155dabedeeSbonwick 	 */
2316fa9e4066Sahrens 	if (spa->spa_scrub_restart_txg != 0)
2317fa9e4066Sahrens 		error = ERESTART;
2318fa9e4066Sahrens 
2319ea8dc4b6Seschrock 	if (spa->spa_scrub_stop)
2320ea8dc4b6Seschrock 		error = EINTR;
2321ea8dc4b6Seschrock 
2322fa9e4066Sahrens 	/*
2323ea8dc4b6Seschrock 	 * Even if there were uncorrectable errors, we consider the scrub
2324ea8dc4b6Seschrock 	 * completed.  The downside is that if there is a transient error during
2325ea8dc4b6Seschrock 	 * a resilver, we won't resilver the data properly to the target.  But
2326ea8dc4b6Seschrock 	 * if the damage is permanent (more likely) we will resilver forever,
2327ea8dc4b6Seschrock 	 * which isn't really acceptable.  Since there is enough information for
2328ea8dc4b6Seschrock 	 * the user to know what has failed and why, this seems like a more
2329ea8dc4b6Seschrock 	 * tractable approach.
2330fa9e4066Sahrens 	 */
2331ea8dc4b6Seschrock 	complete = (error == 0);
2332fa9e4066Sahrens 
2333ea8dc4b6Seschrock 	dprintf("end %s to maxtxg=%llu %s, traverse=%d, %llu errors, stop=%u\n",
2334ea8dc4b6Seschrock 	    scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub",
2335fa9e4066Sahrens 	    spa->spa_scrub_maxtxg, complete ? "done" : "FAILED",
2336fa9e4066Sahrens 	    error, spa->spa_scrub_errors, spa->spa_scrub_stop);
2337fa9e4066Sahrens 
2338fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
2339fa9e4066Sahrens 
2340fa9e4066Sahrens 	/*
2341fa9e4066Sahrens 	 * If the scrub/resilver completed, update all DTLs to reflect this.
2342fa9e4066Sahrens 	 * Whether it succeeded or not, vacate all temporary scrub DTLs.
2343fa9e4066Sahrens 	 */
2344fa9e4066Sahrens 	vdev_dtl_reassess(rvd, spa_last_synced_txg(spa) + 1,
2345fa9e4066Sahrens 	    complete ? spa->spa_scrub_maxtxg : 0, B_TRUE);
2346fa9e4066Sahrens 	vdev_scrub_stat_update(rvd, POOL_SCRUB_NONE, complete);
2347ea8dc4b6Seschrock 	spa_errlog_rotate(spa);
23485dabedeeSbonwick 
2349ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
2350fa9e4066Sahrens 
2351fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
2352fa9e4066Sahrens 
2353ea8dc4b6Seschrock 	/*
2354ea8dc4b6Seschrock 	 * We may have finished replacing a device.
2355ea8dc4b6Seschrock 	 * Let the async thread assess this and handle the detach.
2356ea8dc4b6Seschrock 	 */
2357ea8dc4b6Seschrock 	spa_async_request(spa, SPA_ASYNC_REPLACE_DONE);
2358fa9e4066Sahrens 
2359fa9e4066Sahrens 	/*
2360fa9e4066Sahrens 	 * If we were told to restart, our final act is to start a new scrub.
2361fa9e4066Sahrens 	 */
2362fa9e4066Sahrens 	if (error == ERESTART)
2363ea8dc4b6Seschrock 		spa_async_request(spa, scrub_type == POOL_SCRUB_RESILVER ?
2364ea8dc4b6Seschrock 		    SPA_ASYNC_RESILVER : SPA_ASYNC_SCRUB);
2365fa9e4066Sahrens 
2366ea8dc4b6Seschrock 	spa->spa_scrub_type = POOL_SCRUB_NONE;
2367ea8dc4b6Seschrock 	spa->spa_scrub_active = 0;
2368ea8dc4b6Seschrock 	spa->spa_scrub_thread = NULL;
2369ea8dc4b6Seschrock 	cv_broadcast(&spa->spa_scrub_cv);
2370fa9e4066Sahrens 	CALLB_CPR_EXIT(&cprinfo);	/* drops &spa->spa_scrub_lock */
2371fa9e4066Sahrens 	thread_exit();
2372fa9e4066Sahrens }
2373fa9e4066Sahrens 
2374fa9e4066Sahrens void
2375fa9e4066Sahrens spa_scrub_suspend(spa_t *spa)
2376fa9e4066Sahrens {
2377fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
2378ea8dc4b6Seschrock 	spa->spa_scrub_suspended++;
2379fa9e4066Sahrens 	while (spa->spa_scrub_active) {
2380fa9e4066Sahrens 		cv_broadcast(&spa->spa_scrub_cv);
2381fa9e4066Sahrens 		cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
2382fa9e4066Sahrens 	}
2383fa9e4066Sahrens 	while (spa->spa_scrub_inflight)
2384fa9e4066Sahrens 		cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2385fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
2386fa9e4066Sahrens }
2387fa9e4066Sahrens 
2388fa9e4066Sahrens void
2389fa9e4066Sahrens spa_scrub_resume(spa_t *spa)
2390fa9e4066Sahrens {
2391fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
2392ea8dc4b6Seschrock 	ASSERT(spa->spa_scrub_suspended != 0);
2393ea8dc4b6Seschrock 	if (--spa->spa_scrub_suspended == 0)
2394fa9e4066Sahrens 		cv_broadcast(&spa->spa_scrub_cv);
2395fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
2396fa9e4066Sahrens }
2397fa9e4066Sahrens 
2398fa9e4066Sahrens void
2399fa9e4066Sahrens spa_scrub_restart(spa_t *spa, uint64_t txg)
2400fa9e4066Sahrens {
2401fa9e4066Sahrens 	/*
2402fa9e4066Sahrens 	 * Something happened (e.g. snapshot create/delete) that means
2403fa9e4066Sahrens 	 * we must restart any in-progress scrubs.  The itinerary will
2404fa9e4066Sahrens 	 * fix this properly.
2405fa9e4066Sahrens 	 */
2406fa9e4066Sahrens 	mutex_enter(&spa->spa_scrub_lock);
2407fa9e4066Sahrens 	spa->spa_scrub_restart_txg = txg;
2408fa9e4066Sahrens 	mutex_exit(&spa->spa_scrub_lock);
2409fa9e4066Sahrens }
2410fa9e4066Sahrens 
2411ea8dc4b6Seschrock int
2412ea8dc4b6Seschrock spa_scrub(spa_t *spa, pool_scrub_type_t type, boolean_t force)
2413fa9e4066Sahrens {
2414fa9e4066Sahrens 	space_seg_t *ss;
2415fa9e4066Sahrens 	uint64_t mintxg, maxtxg;
2416fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
2417fa9e4066Sahrens 
2418fa9e4066Sahrens 	if ((uint_t)type >= POOL_SCRUB_TYPES)
2419fa9e4066Sahrens 		return (ENOTSUP);
2420fa9e4066Sahrens 
2421ea8dc4b6Seschrock 	mutex_enter(&spa->spa_scrub_lock);
2422ea8dc4b6Seschrock 
2423fa9e4066Sahrens 	/*
2424fa9e4066Sahrens 	 * If there's a scrub or resilver already in progress, stop it.
2425fa9e4066Sahrens 	 */
2426fa9e4066Sahrens 	while (spa->spa_scrub_thread != NULL) {
2427fa9e4066Sahrens 		/*
2428fa9e4066Sahrens 		 * Don't stop a resilver unless forced.
2429fa9e4066Sahrens 		 */
2430ea8dc4b6Seschrock 		if (spa->spa_scrub_type == POOL_SCRUB_RESILVER && !force) {
2431ea8dc4b6Seschrock 			mutex_exit(&spa->spa_scrub_lock);
2432fa9e4066Sahrens 			return (EBUSY);
2433ea8dc4b6Seschrock 		}
2434fa9e4066Sahrens 		spa->spa_scrub_stop = 1;
2435fa9e4066Sahrens 		cv_broadcast(&spa->spa_scrub_cv);
2436fa9e4066Sahrens 		cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock);
2437fa9e4066Sahrens 	}
2438fa9e4066Sahrens 
2439fa9e4066Sahrens 	/*
2440fa9e4066Sahrens 	 * Terminate the previous traverse.
2441fa9e4066Sahrens 	 */
2442fa9e4066Sahrens 	if (spa->spa_scrub_th != NULL) {
2443fa9e4066Sahrens 		traverse_fini(spa->spa_scrub_th);
2444fa9e4066Sahrens 		spa->spa_scrub_th = NULL;
2445fa9e4066Sahrens 	}
2446fa9e4066Sahrens 
2447ea8dc4b6Seschrock 	if (rvd == NULL) {
2448ea8dc4b6Seschrock 		ASSERT(spa->spa_scrub_stop == 0);
2449ea8dc4b6Seschrock 		ASSERT(spa->spa_scrub_type == type);
2450ea8dc4b6Seschrock 		ASSERT(spa->spa_scrub_restart_txg == 0);
2451ea8dc4b6Seschrock 		mutex_exit(&spa->spa_scrub_lock);
2452ea8dc4b6Seschrock 		return (0);
2453ea8dc4b6Seschrock 	}
2454fa9e4066Sahrens 
2455fa9e4066Sahrens 	mintxg = TXG_INITIAL - 1;
2456fa9e4066Sahrens 	maxtxg = spa_last_synced_txg(spa) + 1;
2457fa9e4066Sahrens 
2458ea8dc4b6Seschrock 	mutex_enter(&rvd->vdev_dtl_lock);
2459fa9e4066Sahrens 
2460ea8dc4b6Seschrock 	if (rvd->vdev_dtl_map.sm_space == 0) {
2461ea8dc4b6Seschrock 		/*
2462ea8dc4b6Seschrock 		 * The pool-wide DTL is empty.
2463ecc2d604Sbonwick 		 * If this is a resilver, there's nothing to do except
2464ecc2d604Sbonwick 		 * check whether any in-progress replacements have completed.
2465ea8dc4b6Seschrock 		 */
2466ecc2d604Sbonwick 		if (type == POOL_SCRUB_RESILVER) {
2467ea8dc4b6Seschrock 			type = POOL_SCRUB_NONE;
2468ecc2d604Sbonwick 			spa_async_request(spa, SPA_ASYNC_REPLACE_DONE);
2469ecc2d604Sbonwick 		}
2470ea8dc4b6Seschrock 	} else {
2471ea8dc4b6Seschrock 		/*
2472ea8dc4b6Seschrock 		 * The pool-wide DTL is non-empty.
2473ea8dc4b6Seschrock 		 * If this is a normal scrub, upgrade to a resilver instead.
2474ea8dc4b6Seschrock 		 */
2475ea8dc4b6Seschrock 		if (type == POOL_SCRUB_EVERYTHING)
2476ea8dc4b6Seschrock 			type = POOL_SCRUB_RESILVER;
2477ea8dc4b6Seschrock 	}
2478fa9e4066Sahrens 
2479ea8dc4b6Seschrock 	if (type == POOL_SCRUB_RESILVER) {
2480fa9e4066Sahrens 		/*
2481fa9e4066Sahrens 		 * Determine the resilvering boundaries.
2482fa9e4066Sahrens 		 *
2483fa9e4066Sahrens 		 * Note: (mintxg, maxtxg) is an open interval,
2484fa9e4066Sahrens 		 * i.e. mintxg and maxtxg themselves are not included.
2485fa9e4066Sahrens 		 *
2486fa9e4066Sahrens 		 * Note: for maxtxg, we MIN with spa_last_synced_txg(spa) + 1
2487fa9e4066Sahrens 		 * so we don't claim to resilver a txg that's still changing.
2488fa9e4066Sahrens 		 */
2489fa9e4066Sahrens 		ss = avl_first(&rvd->vdev_dtl_map.sm_root);
2490ea8dc4b6Seschrock 		mintxg = ss->ss_start - 1;
2491fa9e4066Sahrens 		ss = avl_last(&rvd->vdev_dtl_map.sm_root);
2492ea8dc4b6Seschrock 		maxtxg = MIN(ss->ss_end, maxtxg);
2493fa9e4066Sahrens 	}
2494fa9e4066Sahrens 
2495ea8dc4b6Seschrock 	mutex_exit(&rvd->vdev_dtl_lock);
2496ea8dc4b6Seschrock 
2497ea8dc4b6Seschrock 	spa->spa_scrub_stop = 0;
2498ea8dc4b6Seschrock 	spa->spa_scrub_type = type;
2499ea8dc4b6Seschrock 	spa->spa_scrub_restart_txg = 0;
2500ea8dc4b6Seschrock 
2501ea8dc4b6Seschrock 	if (type != POOL_SCRUB_NONE) {
2502ea8dc4b6Seschrock 		spa->spa_scrub_mintxg = mintxg;
2503fa9e4066Sahrens 		spa->spa_scrub_maxtxg = maxtxg;
2504fa9e4066Sahrens 		spa->spa_scrub_th = traverse_init(spa, spa_scrub_cb, NULL,
25050373e76bSbonwick 		    ADVANCE_PRE | ADVANCE_PRUNE | ADVANCE_ZIL,
25060373e76bSbonwick 		    ZIO_FLAG_CANFAIL);
2507fa9e4066Sahrens 		traverse_add_pool(spa->spa_scrub_th, mintxg, maxtxg);
2508fa9e4066Sahrens 		spa->spa_scrub_thread = thread_create(NULL, 0,
2509fa9e4066Sahrens 		    spa_scrub_thread, spa, 0, &p0, TS_RUN, minclsyspri);
2510fa9e4066Sahrens 	}
2511fa9e4066Sahrens 
2512ea8dc4b6Seschrock 	mutex_exit(&spa->spa_scrub_lock);
2513ea8dc4b6Seschrock 
2514fa9e4066Sahrens 	return (0);
2515fa9e4066Sahrens }
2516fa9e4066Sahrens 
2517ea8dc4b6Seschrock /*
2518ea8dc4b6Seschrock  * ==========================================================================
2519ea8dc4b6Seschrock  * SPA async task processing
2520ea8dc4b6Seschrock  * ==========================================================================
2521ea8dc4b6Seschrock  */
2522ea8dc4b6Seschrock 
2523ea8dc4b6Seschrock static void
2524ea8dc4b6Seschrock spa_async_reopen(spa_t *spa)
2525fa9e4066Sahrens {
2526ea8dc4b6Seschrock 	vdev_t *rvd = spa->spa_root_vdev;
2527ea8dc4b6Seschrock 	vdev_t *tvd;
2528ea8dc4b6Seschrock 	int c;
2529fa9e4066Sahrens 
2530ea8dc4b6Seschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
2531ea8dc4b6Seschrock 
2532ea8dc4b6Seschrock 	for (c = 0; c < rvd->vdev_children; c++) {
2533ea8dc4b6Seschrock 		tvd = rvd->vdev_child[c];
2534ea8dc4b6Seschrock 		if (tvd->vdev_reopen_wanted) {
2535ea8dc4b6Seschrock 			tvd->vdev_reopen_wanted = 0;
2536ea8dc4b6Seschrock 			vdev_reopen(tvd);
2537ea8dc4b6Seschrock 		}
2538ea8dc4b6Seschrock 	}
2539ea8dc4b6Seschrock 
2540ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
2541ea8dc4b6Seschrock }
2542fa9e4066Sahrens 
2543ea8dc4b6Seschrock static void
2544ea8dc4b6Seschrock spa_async_thread(spa_t *spa)
2545ea8dc4b6Seschrock {
2546ea8dc4b6Seschrock 	int tasks;
2547ea8dc4b6Seschrock 
2548ea8dc4b6Seschrock 	ASSERT(spa->spa_sync_on);
2549ea8dc4b6Seschrock 
2550ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
2551ea8dc4b6Seschrock 	tasks = spa->spa_async_tasks;
2552ea8dc4b6Seschrock 	spa->spa_async_tasks = 0;
2553ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
2554ea8dc4b6Seschrock 
25550373e76bSbonwick 	/*
25560373e76bSbonwick 	 * See if the config needs to be updated.
25570373e76bSbonwick 	 */
25580373e76bSbonwick 	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
25590373e76bSbonwick 		mutex_enter(&spa_namespace_lock);
25600373e76bSbonwick 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
25610373e76bSbonwick 		mutex_exit(&spa_namespace_lock);
25620373e76bSbonwick 	}
25630373e76bSbonwick 
2564ea8dc4b6Seschrock 	/*
2565ea8dc4b6Seschrock 	 * See if any devices need to be reopened.
2566ea8dc4b6Seschrock 	 */
2567ea8dc4b6Seschrock 	if (tasks & SPA_ASYNC_REOPEN)
2568ea8dc4b6Seschrock 		spa_async_reopen(spa);
2569ea8dc4b6Seschrock 
2570ea8dc4b6Seschrock 	/*
2571ea8dc4b6Seschrock 	 * If any devices are done replacing, detach them.
2572ea8dc4b6Seschrock 	 */
2573ea8dc4b6Seschrock 	if (tasks & SPA_ASYNC_REPLACE_DONE)
2574fa9e4066Sahrens 		spa_vdev_replace_done(spa);
2575fa9e4066Sahrens 
2576ea8dc4b6Seschrock 	/*
2577ea8dc4b6Seschrock 	 * Kick off a scrub.
2578ea8dc4b6Seschrock 	 */
2579ea8dc4b6Seschrock 	if (tasks & SPA_ASYNC_SCRUB)
2580ea8dc4b6Seschrock 		VERIFY(spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_TRUE) == 0);
2581ea8dc4b6Seschrock 
2582ea8dc4b6Seschrock 	/*
2583ea8dc4b6Seschrock 	 * Kick off a resilver.
2584ea8dc4b6Seschrock 	 */
2585ea8dc4b6Seschrock 	if (tasks & SPA_ASYNC_RESILVER)
2586ea8dc4b6Seschrock 		VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
2587ea8dc4b6Seschrock 
2588ea8dc4b6Seschrock 	/*
2589ea8dc4b6Seschrock 	 * Let the world know that we're done.
2590ea8dc4b6Seschrock 	 */
2591ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
2592ea8dc4b6Seschrock 	spa->spa_async_thread = NULL;
2593ea8dc4b6Seschrock 	cv_broadcast(&spa->spa_async_cv);
2594ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
2595ea8dc4b6Seschrock 	thread_exit();
2596ea8dc4b6Seschrock }
2597ea8dc4b6Seschrock 
2598ea8dc4b6Seschrock void
2599ea8dc4b6Seschrock spa_async_suspend(spa_t *spa)
2600ea8dc4b6Seschrock {
2601ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
2602ea8dc4b6Seschrock 	spa->spa_async_suspended++;
2603ea8dc4b6Seschrock 	while (spa->spa_async_thread != NULL)
2604ea8dc4b6Seschrock 		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
2605ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
2606ea8dc4b6Seschrock }
2607ea8dc4b6Seschrock 
2608ea8dc4b6Seschrock void
2609ea8dc4b6Seschrock spa_async_resume(spa_t *spa)
2610ea8dc4b6Seschrock {
2611ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
2612ea8dc4b6Seschrock 	ASSERT(spa->spa_async_suspended != 0);
2613ea8dc4b6Seschrock 	spa->spa_async_suspended--;
2614ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
2615ea8dc4b6Seschrock }
2616ea8dc4b6Seschrock 
2617ea8dc4b6Seschrock static void
2618ea8dc4b6Seschrock spa_async_dispatch(spa_t *spa)
2619ea8dc4b6Seschrock {
2620ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
2621ea8dc4b6Seschrock 	if (spa->spa_async_tasks && !spa->spa_async_suspended &&
26220373e76bSbonwick 	    spa->spa_async_thread == NULL &&
26230373e76bSbonwick 	    rootdir != NULL && !vn_is_readonly(rootdir))
2624ea8dc4b6Seschrock 		spa->spa_async_thread = thread_create(NULL, 0,
2625ea8dc4b6Seschrock 		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
2626ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
2627ea8dc4b6Seschrock }
2628ea8dc4b6Seschrock 
2629ea8dc4b6Seschrock void
2630ea8dc4b6Seschrock spa_async_request(spa_t *spa, int task)
2631ea8dc4b6Seschrock {
2632ea8dc4b6Seschrock 	mutex_enter(&spa->spa_async_lock);
2633ea8dc4b6Seschrock 	spa->spa_async_tasks |= task;
2634ea8dc4b6Seschrock 	mutex_exit(&spa->spa_async_lock);
2635fa9e4066Sahrens }
2636fa9e4066Sahrens 
2637fa9e4066Sahrens /*
2638fa9e4066Sahrens  * ==========================================================================
2639fa9e4066Sahrens  * SPA syncing routines
2640fa9e4066Sahrens  * ==========================================================================
2641fa9e4066Sahrens  */
2642fa9e4066Sahrens 
2643fa9e4066Sahrens static void
2644fa9e4066Sahrens spa_sync_deferred_frees(spa_t *spa, uint64_t txg)
2645fa9e4066Sahrens {
2646fa9e4066Sahrens 	bplist_t *bpl = &spa->spa_sync_bplist;
2647fa9e4066Sahrens 	dmu_tx_t *tx;
2648fa9e4066Sahrens 	blkptr_t blk;
2649fa9e4066Sahrens 	uint64_t itor = 0;
2650fa9e4066Sahrens 	zio_t *zio;
2651fa9e4066Sahrens 	int error;
2652fa9e4066Sahrens 	uint8_t c = 1;
2653fa9e4066Sahrens 
2654fa9e4066Sahrens 	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD);
2655fa9e4066Sahrens 
2656fa9e4066Sahrens 	while (bplist_iterate(bpl, &itor, &blk) == 0)
2657fa9e4066Sahrens 		zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL));
2658fa9e4066Sahrens 
2659fa9e4066Sahrens 	error = zio_wait(zio);
2660fa9e4066Sahrens 	ASSERT3U(error, ==, 0);
2661fa9e4066Sahrens 
2662fa9e4066Sahrens 	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
2663fa9e4066Sahrens 	bplist_vacate(bpl, tx);
2664fa9e4066Sahrens 
2665fa9e4066Sahrens 	/*
2666fa9e4066Sahrens 	 * Pre-dirty the first block so we sync to convergence faster.
2667fa9e4066Sahrens 	 * (Usually only the first block is needed.)
2668fa9e4066Sahrens 	 */
2669fa9e4066Sahrens 	dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx);
2670fa9e4066Sahrens 	dmu_tx_commit(tx);
2671fa9e4066Sahrens }
2672fa9e4066Sahrens 
2673fa9e4066Sahrens static void
267499653d4eSeschrock spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
2675fa9e4066Sahrens {
2676fa9e4066Sahrens 	char *packed = NULL;
2677fa9e4066Sahrens 	size_t nvsize = 0;
2678fa9e4066Sahrens 	dmu_buf_t *db;
2679fa9e4066Sahrens 
268099653d4eSeschrock 	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
2681fa9e4066Sahrens 
2682fa9e4066Sahrens 	packed = kmem_alloc(nvsize, KM_SLEEP);
2683fa9e4066Sahrens 
268499653d4eSeschrock 	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
2685ea8dc4b6Seschrock 	    KM_SLEEP) == 0);
2686fa9e4066Sahrens 
268799653d4eSeschrock 	dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx);
2688fa9e4066Sahrens 
2689fa9e4066Sahrens 	kmem_free(packed, nvsize);
2690fa9e4066Sahrens 
269199653d4eSeschrock 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
2692fa9e4066Sahrens 	dmu_buf_will_dirty(db, tx);
2693fa9e4066Sahrens 	*(uint64_t *)db->db_data = nvsize;
2694ea8dc4b6Seschrock 	dmu_buf_rele(db, FTAG);
2695fa9e4066Sahrens }
2696fa9e4066Sahrens 
269799653d4eSeschrock static void
269899653d4eSeschrock spa_sync_spares(spa_t *spa, dmu_tx_t *tx)
269999653d4eSeschrock {
270099653d4eSeschrock 	nvlist_t *nvroot;
270199653d4eSeschrock 	nvlist_t **spares;
270299653d4eSeschrock 	int i;
270399653d4eSeschrock 
270499653d4eSeschrock 	if (!spa->spa_sync_spares)
270599653d4eSeschrock 		return;
270699653d4eSeschrock 
270799653d4eSeschrock 	/*
270899653d4eSeschrock 	 * Update the MOS nvlist describing the list of available spares.
270999653d4eSeschrock 	 * spa_validate_spares() will have already made sure this nvlist is
271099653d4eSeschrock 	 * valid and the vdevs are labelled appropriately.
271199653d4eSeschrock 	 */
271299653d4eSeschrock 	if (spa->spa_spares_object == 0) {
271399653d4eSeschrock 		spa->spa_spares_object = dmu_object_alloc(spa->spa_meta_objset,
271499653d4eSeschrock 		    DMU_OT_PACKED_NVLIST, 1 << 14,
271599653d4eSeschrock 		    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
271699653d4eSeschrock 		VERIFY(zap_update(spa->spa_meta_objset,
271799653d4eSeschrock 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SPARES,
271899653d4eSeschrock 		    sizeof (uint64_t), 1, &spa->spa_spares_object, tx) == 0);
271999653d4eSeschrock 	}
272099653d4eSeschrock 
272199653d4eSeschrock 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
272299653d4eSeschrock 	if (spa->spa_nspares == 0) {
272399653d4eSeschrock 		VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
272499653d4eSeschrock 		    NULL, 0) == 0);
272599653d4eSeschrock 	} else {
272699653d4eSeschrock 		spares = kmem_alloc(spa->spa_nspares * sizeof (void *),
272799653d4eSeschrock 		    KM_SLEEP);
272899653d4eSeschrock 		for (i = 0; i < spa->spa_nspares; i++)
272999653d4eSeschrock 			spares[i] = vdev_config_generate(spa,
273099653d4eSeschrock 			    spa->spa_spares[i], B_FALSE, B_TRUE);
273199653d4eSeschrock 		VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
273299653d4eSeschrock 		    spares, spa->spa_nspares) == 0);
273399653d4eSeschrock 		for (i = 0; i < spa->spa_nspares; i++)
273499653d4eSeschrock 			nvlist_free(spares[i]);
273599653d4eSeschrock 		kmem_free(spares, spa->spa_nspares * sizeof (void *));
273699653d4eSeschrock 	}
273799653d4eSeschrock 
273899653d4eSeschrock 	spa_sync_nvlist(spa, spa->spa_spares_object, nvroot, tx);
273906eeb2adSek 	nvlist_free(nvroot);
274099653d4eSeschrock 
274199653d4eSeschrock 	spa->spa_sync_spares = B_FALSE;
274299653d4eSeschrock }
274399653d4eSeschrock 
274499653d4eSeschrock static void
274599653d4eSeschrock spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
274699653d4eSeschrock {
274799653d4eSeschrock 	nvlist_t *config;
274899653d4eSeschrock 
274999653d4eSeschrock 	if (list_is_empty(&spa->spa_dirty_list))
275099653d4eSeschrock 		return;
275199653d4eSeschrock 
275299653d4eSeschrock 	config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE);
275399653d4eSeschrock 
275499653d4eSeschrock 	if (spa->spa_config_syncing)
275599653d4eSeschrock 		nvlist_free(spa->spa_config_syncing);
275699653d4eSeschrock 	spa->spa_config_syncing = config;
275799653d4eSeschrock 
275899653d4eSeschrock 	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
275999653d4eSeschrock }
276099653d4eSeschrock 
2761fa9e4066Sahrens /*
2762fa9e4066Sahrens  * Sync the specified transaction group.  New blocks may be dirtied as
2763fa9e4066Sahrens  * part of the process, so we iterate until it converges.
2764fa9e4066Sahrens  */
2765fa9e4066Sahrens void
2766fa9e4066Sahrens spa_sync(spa_t *spa, uint64_t txg)
2767fa9e4066Sahrens {
2768fa9e4066Sahrens 	dsl_pool_t *dp = spa->spa_dsl_pool;
2769fa9e4066Sahrens 	objset_t *mos = spa->spa_meta_objset;
2770fa9e4066Sahrens 	bplist_t *bpl = &spa->spa_sync_bplist;
27710373e76bSbonwick 	vdev_t *rvd = spa->spa_root_vdev;
2772fa9e4066Sahrens 	vdev_t *vd;
2773fa9e4066Sahrens 	dmu_tx_t *tx;
2774fa9e4066Sahrens 	int dirty_vdevs;
2775fa9e4066Sahrens 
2776fa9e4066Sahrens 	/*
2777fa9e4066Sahrens 	 * Lock out configuration changes.
2778fa9e4066Sahrens 	 */
2779ea8dc4b6Seschrock 	spa_config_enter(spa, RW_READER, FTAG);
2780fa9e4066Sahrens 
2781fa9e4066Sahrens 	spa->spa_syncing_txg = txg;
2782fa9e4066Sahrens 	spa->spa_sync_pass = 0;
2783fa9e4066Sahrens 
2784ea8dc4b6Seschrock 	VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj));
2785fa9e4066Sahrens 
278699653d4eSeschrock 	tx = dmu_tx_create_assigned(dp, txg);
278799653d4eSeschrock 
278899653d4eSeschrock 	/*
278999653d4eSeschrock 	 * If we are upgrading to ZFS_VERSION_RAIDZ_DEFLATE this txg,
279099653d4eSeschrock 	 * set spa_deflate if we have no raid-z vdevs.
279199653d4eSeschrock 	 */
279299653d4eSeschrock 	if (spa->spa_ubsync.ub_version < ZFS_VERSION_RAIDZ_DEFLATE &&
279399653d4eSeschrock 	    spa->spa_uberblock.ub_version >= ZFS_VERSION_RAIDZ_DEFLATE) {
279499653d4eSeschrock 		int i;
279599653d4eSeschrock 
279699653d4eSeschrock 		for (i = 0; i < rvd->vdev_children; i++) {
279799653d4eSeschrock 			vd = rvd->vdev_child[i];
279899653d4eSeschrock 			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
279999653d4eSeschrock 				break;
280099653d4eSeschrock 		}
280199653d4eSeschrock 		if (i == rvd->vdev_children) {
280299653d4eSeschrock 			spa->spa_deflate = TRUE;
280399653d4eSeschrock 			VERIFY(0 == zap_add(spa->spa_meta_objset,
280499653d4eSeschrock 			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
280599653d4eSeschrock 			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
280699653d4eSeschrock 		}
280799653d4eSeschrock 	}
280899653d4eSeschrock 
2809fa9e4066Sahrens 	/*
2810fa9e4066Sahrens 	 * If anything has changed in this txg, push the deferred frees
2811fa9e4066Sahrens 	 * from the previous txg.  If not, leave them alone so that we
2812fa9e4066Sahrens 	 * don't generate work on an otherwise idle system.
2813fa9e4066Sahrens 	 */
2814fa9e4066Sahrens 	if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
28151615a317Sek 	    !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
28161615a317Sek 	    !txg_list_empty(&dp->dp_sync_tasks, txg))
2817fa9e4066Sahrens 		spa_sync_deferred_frees(spa, txg);
2818fa9e4066Sahrens 
2819fa9e4066Sahrens 	/*
2820fa9e4066Sahrens 	 * Iterate to convergence.
2821fa9e4066Sahrens 	 */
2822fa9e4066Sahrens 	do {
2823fa9e4066Sahrens 		spa->spa_sync_pass++;
2824fa9e4066Sahrens 
2825fa9e4066Sahrens 		spa_sync_config_object(spa, tx);
282699653d4eSeschrock 		spa_sync_spares(spa, tx);
2827ea8dc4b6Seschrock 		spa_errlog_sync(spa, txg);
2828fa9e4066Sahrens 		dsl_pool_sync(dp, txg);
2829fa9e4066Sahrens 
2830fa9e4066Sahrens 		dirty_vdevs = 0;
2831fa9e4066Sahrens 		while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
2832fa9e4066Sahrens 			vdev_sync(vd, txg);
2833fa9e4066Sahrens 			dirty_vdevs++;
2834fa9e4066Sahrens 		}
2835fa9e4066Sahrens 
2836fa9e4066Sahrens 		bplist_sync(bpl, tx);
2837fa9e4066Sahrens 	} while (dirty_vdevs);
2838fa9e4066Sahrens 
2839fa9e4066Sahrens 	bplist_close(bpl);
2840fa9e4066Sahrens 
2841fa9e4066Sahrens 	dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass);
2842fa9e4066Sahrens 
2843fa9e4066Sahrens 	/*
2844fa9e4066Sahrens 	 * Rewrite the vdev configuration (which includes the uberblock)
2845fa9e4066Sahrens 	 * to commit the transaction group.
28460373e76bSbonwick 	 *
28470373e76bSbonwick 	 * If there are any dirty vdevs, sync the uberblock to all vdevs.
28480373e76bSbonwick 	 * Otherwise, pick a random top-level vdev that's known to be
28490373e76bSbonwick 	 * visible in the config cache (see spa_vdev_add() for details).
28500373e76bSbonwick 	 * If the write fails, try the next vdev until we're tried them all.
28510373e76bSbonwick 	 */
28520373e76bSbonwick 	if (!list_is_empty(&spa->spa_dirty_list)) {
28530373e76bSbonwick 		VERIFY(vdev_config_sync(rvd, txg) == 0);
28540373e76bSbonwick 	} else {
28550373e76bSbonwick 		int children = rvd->vdev_children;
28560373e76bSbonwick 		int c0 = spa_get_random(children);
28570373e76bSbonwick 		int c;
28580373e76bSbonwick 
28590373e76bSbonwick 		for (c = 0; c < children; c++) {
28600373e76bSbonwick 			vd = rvd->vdev_child[(c0 + c) % children];
28610373e76bSbonwick 			if (vd->vdev_ms_array == 0)
28620373e76bSbonwick 				continue;
28630373e76bSbonwick 			if (vdev_config_sync(vd, txg) == 0)
28640373e76bSbonwick 				break;
28650373e76bSbonwick 		}
28660373e76bSbonwick 		if (c == children)
28670373e76bSbonwick 			VERIFY(vdev_config_sync(rvd, txg) == 0);
28680373e76bSbonwick 	}
28690373e76bSbonwick 
287099653d4eSeschrock 	dmu_tx_commit(tx);
287199653d4eSeschrock 
28720373e76bSbonwick 	/*
28730373e76bSbonwick 	 * Clear the dirty config list.
2874fa9e4066Sahrens 	 */
28750373e76bSbonwick 	while ((vd = list_head(&spa->spa_dirty_list)) != NULL)
28760373e76bSbonwick 		vdev_config_clean(vd);
28770373e76bSbonwick 
28780373e76bSbonwick 	/*
28790373e76bSbonwick 	 * Now that the new config has synced transactionally,
28800373e76bSbonwick 	 * let it become visible to the config cache.
28810373e76bSbonwick 	 */
28820373e76bSbonwick 	if (spa->spa_config_syncing != NULL) {
28830373e76bSbonwick 		spa_config_set(spa, spa->spa_config_syncing);
28840373e76bSbonwick 		spa->spa_config_txg = txg;
28850373e76bSbonwick 		spa->spa_config_syncing = NULL;
28860373e76bSbonwick 	}
2887fa9e4066Sahrens 
2888fa9e4066Sahrens 	/*
2889fa9e4066Sahrens 	 * Make a stable copy of the fully synced uberblock.
2890fa9e4066Sahrens 	 * We use this as the root for pool traversals.
2891fa9e4066Sahrens 	 */
2892fa9e4066Sahrens 	spa->spa_traverse_wanted = 1;	/* tells traverse_more() to stop */
2893fa9e4066Sahrens 
2894fa9e4066Sahrens 	spa_scrub_suspend(spa);		/* stop scrubbing and finish I/Os */
2895fa9e4066Sahrens 
2896fa9e4066Sahrens 	rw_enter(&spa->spa_traverse_lock, RW_WRITER);
2897fa9e4066Sahrens 	spa->spa_traverse_wanted = 0;
2898fa9e4066Sahrens 	spa->spa_ubsync = spa->spa_uberblock;
2899fa9e4066Sahrens 	rw_exit(&spa->spa_traverse_lock);
2900fa9e4066Sahrens 
2901fa9e4066Sahrens 	spa_scrub_resume(spa);		/* resume scrub with new ubsync */
2902fa9e4066Sahrens 
2903fa9e4066Sahrens 	/*
2904fa9e4066Sahrens 	 * Clean up the ZIL records for the synced txg.
2905fa9e4066Sahrens 	 */
2906fa9e4066Sahrens 	dsl_pool_zil_clean(dp);
2907fa9e4066Sahrens 
2908fa9e4066Sahrens 	/*
2909fa9e4066Sahrens 	 * Update usable space statistics.
2910fa9e4066Sahrens 	 */
2911fa9e4066Sahrens 	while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
2912fa9e4066Sahrens 		vdev_sync_done(vd, txg);
2913fa9e4066Sahrens 
2914fa9e4066Sahrens 	/*
2915fa9e4066Sahrens 	 * It had better be the case that we didn't dirty anything
291699653d4eSeschrock 	 * since vdev_config_sync().
2917fa9e4066Sahrens 	 */
2918fa9e4066Sahrens 	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
2919fa9e4066Sahrens 	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
2920fa9e4066Sahrens 	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
2921fa9e4066Sahrens 	ASSERT(bpl->bpl_queue == NULL);
2922fa9e4066Sahrens 
2923ea8dc4b6Seschrock 	spa_config_exit(spa, FTAG);
2924ea8dc4b6Seschrock 
2925ea8dc4b6Seschrock 	/*
2926ea8dc4b6Seschrock 	 * If any async tasks have been requested, kick them off.
2927ea8dc4b6Seschrock 	 */
2928ea8dc4b6Seschrock 	spa_async_dispatch(spa);
2929fa9e4066Sahrens }
2930fa9e4066Sahrens 
2931fa9e4066Sahrens /*
2932fa9e4066Sahrens  * Sync all pools.  We don't want to hold the namespace lock across these
2933fa9e4066Sahrens  * operations, so we take a reference on the spa_t and drop the lock during the
2934fa9e4066Sahrens  * sync.
2935fa9e4066Sahrens  */
2936fa9e4066Sahrens void
2937fa9e4066Sahrens spa_sync_allpools(void)
2938fa9e4066Sahrens {
2939fa9e4066Sahrens 	spa_t *spa = NULL;
2940fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
2941fa9e4066Sahrens 	while ((spa = spa_next(spa)) != NULL) {
2942fa9e4066Sahrens 		if (spa_state(spa) != POOL_STATE_ACTIVE)
2943fa9e4066Sahrens 			continue;
2944fa9e4066Sahrens 		spa_open_ref(spa, FTAG);
2945fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2946fa9e4066Sahrens 		txg_wait_synced(spa_get_dsl(spa), 0);
2947fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
2948fa9e4066Sahrens 		spa_close(spa, FTAG);
2949fa9e4066Sahrens 	}
2950fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2951fa9e4066Sahrens }
2952fa9e4066Sahrens 
2953fa9e4066Sahrens /*
2954fa9e4066Sahrens  * ==========================================================================
2955fa9e4066Sahrens  * Miscellaneous routines
2956fa9e4066Sahrens  * ==========================================================================
2957fa9e4066Sahrens  */
2958fa9e4066Sahrens 
2959fa9e4066Sahrens /*
2960fa9e4066Sahrens  * Remove all pools in the system.
2961fa9e4066Sahrens  */
2962fa9e4066Sahrens void
2963fa9e4066Sahrens spa_evict_all(void)
2964fa9e4066Sahrens {
2965fa9e4066Sahrens 	spa_t *spa;
2966fa9e4066Sahrens 
2967fa9e4066Sahrens 	/*
2968fa9e4066Sahrens 	 * Remove all cached state.  All pools should be closed now,
2969fa9e4066Sahrens 	 * so every spa in the AVL tree should be unreferenced.
2970fa9e4066Sahrens 	 */
2971fa9e4066Sahrens 	mutex_enter(&spa_namespace_lock);
2972fa9e4066Sahrens 	while ((spa = spa_next(NULL)) != NULL) {
2973fa9e4066Sahrens 		/*
2974ea8dc4b6Seschrock 		 * Stop async tasks.  The async thread may need to detach
2975ea8dc4b6Seschrock 		 * a device that's been replaced, which requires grabbing
2976ea8dc4b6Seschrock 		 * spa_namespace_lock, so we must drop it here.
2977fa9e4066Sahrens 		 */
2978fa9e4066Sahrens 		spa_open_ref(spa, FTAG);
2979fa9e4066Sahrens 		mutex_exit(&spa_namespace_lock);
2980ea8dc4b6Seschrock 		spa_async_suspend(spa);
2981fa9e4066Sahrens 		VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0);
2982fa9e4066Sahrens 		mutex_enter(&spa_namespace_lock);
2983fa9e4066Sahrens 		spa_close(spa, FTAG);
2984fa9e4066Sahrens 
2985fa9e4066Sahrens 		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
2986fa9e4066Sahrens 			spa_unload(spa);
2987fa9e4066Sahrens 			spa_deactivate(spa);
2988fa9e4066Sahrens 		}
2989fa9e4066Sahrens 		spa_remove(spa);
2990fa9e4066Sahrens 	}
2991fa9e4066Sahrens 	mutex_exit(&spa_namespace_lock);
2992fa9e4066Sahrens }
2993ea8dc4b6Seschrock 
2994ea8dc4b6Seschrock vdev_t *
2995ea8dc4b6Seschrock spa_lookup_by_guid(spa_t *spa, uint64_t guid)
2996ea8dc4b6Seschrock {
2997ea8dc4b6Seschrock 	return (vdev_lookup_by_guid(spa->spa_root_vdev, guid));
2998ea8dc4b6Seschrock }
2999eaca9bbdSeschrock 
3000eaca9bbdSeschrock void
3001eaca9bbdSeschrock spa_upgrade(spa_t *spa)
3002eaca9bbdSeschrock {
3003eaca9bbdSeschrock 	spa_config_enter(spa, RW_WRITER, FTAG);
3004eaca9bbdSeschrock 
3005eaca9bbdSeschrock 	/*
3006eaca9bbdSeschrock 	 * This should only be called for a non-faulted pool, and since a
3007eaca9bbdSeschrock 	 * future version would result in an unopenable pool, this shouldn't be
3008eaca9bbdSeschrock 	 * possible.
3009eaca9bbdSeschrock 	 */
3010eaca9bbdSeschrock 	ASSERT(spa->spa_uberblock.ub_version <= ZFS_VERSION);
3011eaca9bbdSeschrock 
3012eaca9bbdSeschrock 	spa->spa_uberblock.ub_version = ZFS_VERSION;
3013eaca9bbdSeschrock 	vdev_config_dirty(spa->spa_root_vdev);
3014eaca9bbdSeschrock 
3015eaca9bbdSeschrock 	spa_config_exit(spa, FTAG);
301699653d4eSeschrock 
301799653d4eSeschrock 	txg_wait_synced(spa_get_dsl(spa), 0);
301899653d4eSeschrock }
301999653d4eSeschrock 
302099653d4eSeschrock boolean_t
302199653d4eSeschrock spa_has_spare(spa_t *spa, uint64_t guid)
302299653d4eSeschrock {
302399653d4eSeschrock 	int i;
302499653d4eSeschrock 
302599653d4eSeschrock 	for (i = 0; i < spa->spa_nspares; i++)
302699653d4eSeschrock 		if (spa->spa_spares[i]->vdev_guid == guid)
302799653d4eSeschrock 			return (B_TRUE);
302899653d4eSeschrock 
302999653d4eSeschrock 	return (B_FALSE);
3030eaca9bbdSeschrock }
3031