xref: /illumos-gate/usr/src/uts/common/fs/zfs/vdev.c (revision b24ab676)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5441d80aaSlling  * Common Development and Distribution License (the "License").
6441d80aaSlling  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
2199653d4eSeschrock 
22fa9e4066Sahrens /*
23a3f829aeSBill Moore  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24fa9e4066Sahrens  * Use is subject to license terms.
25fa9e4066Sahrens  */
26fa9e4066Sahrens 
27fa9e4066Sahrens #include <sys/zfs_context.h>
28ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h>
29fa9e4066Sahrens #include <sys/spa.h>
30fa9e4066Sahrens #include <sys/spa_impl.h>
31fa9e4066Sahrens #include <sys/dmu.h>
32fa9e4066Sahrens #include <sys/dmu_tx.h>
33fa9e4066Sahrens #include <sys/vdev_impl.h>
34fa9e4066Sahrens #include <sys/uberblock_impl.h>
35fa9e4066Sahrens #include <sys/metaslab.h>
36fa9e4066Sahrens #include <sys/metaslab_impl.h>
37fa9e4066Sahrens #include <sys/space_map.h>
38fa9e4066Sahrens #include <sys/zio.h>
39fa9e4066Sahrens #include <sys/zap.h>
40fa9e4066Sahrens #include <sys/fs/zfs.h>
41c5904d13Seschrock #include <sys/arc.h>
42e6ca193dSGeorge Wilson #include <sys/zil.h>
43fa9e4066Sahrens 
44fa9e4066Sahrens /*
45fa9e4066Sahrens  * Virtual device management.
46fa9e4066Sahrens  */
47fa9e4066Sahrens 
48fa9e4066Sahrens static vdev_ops_t *vdev_ops_table[] = {
49fa9e4066Sahrens 	&vdev_root_ops,
50fa9e4066Sahrens 	&vdev_raidz_ops,
51fa9e4066Sahrens 	&vdev_mirror_ops,
52fa9e4066Sahrens 	&vdev_replacing_ops,
5399653d4eSeschrock 	&vdev_spare_ops,
54fa9e4066Sahrens 	&vdev_disk_ops,
55fa9e4066Sahrens 	&vdev_file_ops,
56fa9e4066Sahrens 	&vdev_missing_ops,
5788ecc943SGeorge Wilson 	&vdev_hole_ops,
58fa9e4066Sahrens 	NULL
59fa9e4066Sahrens };
60fa9e4066Sahrens 
61088f3894Sahrens /* maximum scrub/resilver I/O queue per leaf vdev */
62088f3894Sahrens int zfs_scrub_limit = 10;
6305b2b3b8Smishra 
64fa9e4066Sahrens /*
65fa9e4066Sahrens  * Given a vdev type, return the appropriate ops vector.
66fa9e4066Sahrens  */
67fa9e4066Sahrens static vdev_ops_t *
68fa9e4066Sahrens vdev_getops(const char *type)
69fa9e4066Sahrens {
70fa9e4066Sahrens 	vdev_ops_t *ops, **opspp;
71fa9e4066Sahrens 
72fa9e4066Sahrens 	for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
73fa9e4066Sahrens 		if (strcmp(ops->vdev_op_type, type) == 0)
74fa9e4066Sahrens 			break;
75fa9e4066Sahrens 
76fa9e4066Sahrens 	return (ops);
77fa9e4066Sahrens }
78fa9e4066Sahrens 
79fa9e4066Sahrens /*
80fa9e4066Sahrens  * Default asize function: return the MAX of psize with the asize of
81fa9e4066Sahrens  * all children.  This is what's used by anything other than RAID-Z.
82fa9e4066Sahrens  */
83fa9e4066Sahrens uint64_t
84fa9e4066Sahrens vdev_default_asize(vdev_t *vd, uint64_t psize)
85fa9e4066Sahrens {
86ecc2d604Sbonwick 	uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
87fa9e4066Sahrens 	uint64_t csize;
88fa9e4066Sahrens 
89573ca77eSGeorge Wilson 	for (int c = 0; c < vd->vdev_children; c++) {
90fa9e4066Sahrens 		csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
91fa9e4066Sahrens 		asize = MAX(asize, csize);
92fa9e4066Sahrens 	}
93fa9e4066Sahrens 
94fa9e4066Sahrens 	return (asize);
95fa9e4066Sahrens }
96fa9e4066Sahrens 
972a79c5feSlling /*
98573ca77eSGeorge Wilson  * Get the minimum allocatable size. We define the allocatable size as
99573ca77eSGeorge Wilson  * the vdev's asize rounded to the nearest metaslab. This allows us to
100573ca77eSGeorge Wilson  * replace or attach devices which don't have the same physical size but
101573ca77eSGeorge Wilson  * can still satisfy the same number of allocations.
1022a79c5feSlling  */
1032a79c5feSlling uint64_t
104573ca77eSGeorge Wilson vdev_get_min_asize(vdev_t *vd)
1052a79c5feSlling {
106573ca77eSGeorge Wilson 	vdev_t *pvd = vd->vdev_parent;
1072a79c5feSlling 
108573ca77eSGeorge Wilson 	/*
109573ca77eSGeorge Wilson 	 * The our parent is NULL (inactive spare or cache) or is the root,
110573ca77eSGeorge Wilson 	 * just return our own asize.
111573ca77eSGeorge Wilson 	 */
112573ca77eSGeorge Wilson 	if (pvd == NULL)
113573ca77eSGeorge Wilson 		return (vd->vdev_asize);
1142a79c5feSlling 
1152a79c5feSlling 	/*
116573ca77eSGeorge Wilson 	 * The top-level vdev just returns the allocatable size rounded
117573ca77eSGeorge Wilson 	 * to the nearest metaslab.
1182a79c5feSlling 	 */
119573ca77eSGeorge Wilson 	if (vd == vd->vdev_top)
120573ca77eSGeorge Wilson 		return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
1212a79c5feSlling 
122573ca77eSGeorge Wilson 	/*
123573ca77eSGeorge Wilson 	 * The allocatable space for a raidz vdev is N * sizeof(smallest child),
124573ca77eSGeorge Wilson 	 * so each child must provide at least 1/Nth of its asize.
125573ca77eSGeorge Wilson 	 */
126573ca77eSGeorge Wilson 	if (pvd->vdev_ops == &vdev_raidz_ops)
127573ca77eSGeorge Wilson 		return (pvd->vdev_min_asize / pvd->vdev_children);
1282a79c5feSlling 
129573ca77eSGeorge Wilson 	return (pvd->vdev_min_asize);
130573ca77eSGeorge Wilson }
1312a79c5feSlling 
132573ca77eSGeorge Wilson void
133573ca77eSGeorge Wilson vdev_set_min_asize(vdev_t *vd)
134573ca77eSGeorge Wilson {
135573ca77eSGeorge Wilson 	vd->vdev_min_asize = vdev_get_min_asize(vd);
136573ca77eSGeorge Wilson 
137573ca77eSGeorge Wilson 	for (int c = 0; c < vd->vdev_children; c++)
138573ca77eSGeorge Wilson 		vdev_set_min_asize(vd->vdev_child[c]);
1392a79c5feSlling }
1402a79c5feSlling 
141fa9e4066Sahrens vdev_t *
142fa9e4066Sahrens vdev_lookup_top(spa_t *spa, uint64_t vdev)
143fa9e4066Sahrens {
144fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
145fa9e4066Sahrens 
146e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
147e05725b1Sbonwick 
148088f3894Sahrens 	if (vdev < rvd->vdev_children) {
149088f3894Sahrens 		ASSERT(rvd->vdev_child[vdev] != NULL);
150fa9e4066Sahrens 		return (rvd->vdev_child[vdev]);
151088f3894Sahrens 	}
152fa9e4066Sahrens 
153fa9e4066Sahrens 	return (NULL);
154fa9e4066Sahrens }
155fa9e4066Sahrens 
156fa9e4066Sahrens vdev_t *
157fa9e4066Sahrens vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
158fa9e4066Sahrens {
159fa9e4066Sahrens 	vdev_t *mvd;
160fa9e4066Sahrens 
1610e34b6a7Sbonwick 	if (vd->vdev_guid == guid)
162fa9e4066Sahrens 		return (vd);
163fa9e4066Sahrens 
164573ca77eSGeorge Wilson 	for (int c = 0; c < vd->vdev_children; c++)
165fa9e4066Sahrens 		if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
166fa9e4066Sahrens 		    NULL)
167fa9e4066Sahrens 			return (mvd);
168fa9e4066Sahrens 
169fa9e4066Sahrens 	return (NULL);
170fa9e4066Sahrens }
171fa9e4066Sahrens 
172fa9e4066Sahrens void
173fa9e4066Sahrens vdev_add_child(vdev_t *pvd, vdev_t *cvd)
174fa9e4066Sahrens {
175fa9e4066Sahrens 	size_t oldsize, newsize;
176fa9e4066Sahrens 	uint64_t id = cvd->vdev_id;
177fa9e4066Sahrens 	vdev_t **newchild;
178fa9e4066Sahrens 
179e14bb325SJeff Bonwick 	ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
180fa9e4066Sahrens 	ASSERT(cvd->vdev_parent == NULL);
181fa9e4066Sahrens 
182fa9e4066Sahrens 	cvd->vdev_parent = pvd;
183fa9e4066Sahrens 
184fa9e4066Sahrens 	if (pvd == NULL)
185fa9e4066Sahrens 		return;
186fa9e4066Sahrens 
187fa9e4066Sahrens 	ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
188fa9e4066Sahrens 
189fa9e4066Sahrens 	oldsize = pvd->vdev_children * sizeof (vdev_t *);
190fa9e4066Sahrens 	pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
191fa9e4066Sahrens 	newsize = pvd->vdev_children * sizeof (vdev_t *);
192fa9e4066Sahrens 
193fa9e4066Sahrens 	newchild = kmem_zalloc(newsize, KM_SLEEP);
194fa9e4066Sahrens 	if (pvd->vdev_child != NULL) {
195fa9e4066Sahrens 		bcopy(pvd->vdev_child, newchild, oldsize);
196fa9e4066Sahrens 		kmem_free(pvd->vdev_child, oldsize);
197fa9e4066Sahrens 	}
198fa9e4066Sahrens 
199fa9e4066Sahrens 	pvd->vdev_child = newchild;
200fa9e4066Sahrens 	pvd->vdev_child[id] = cvd;
201fa9e4066Sahrens 
202fa9e4066Sahrens 	cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
203fa9e4066Sahrens 	ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
204fa9e4066Sahrens 
205fa9e4066Sahrens 	/*
206fa9e4066Sahrens 	 * Walk up all ancestors to update guid sum.
207fa9e4066Sahrens 	 */
208fa9e4066Sahrens 	for (; pvd != NULL; pvd = pvd->vdev_parent)
209fa9e4066Sahrens 		pvd->vdev_guid_sum += cvd->vdev_guid_sum;
21005b2b3b8Smishra 
21105b2b3b8Smishra 	if (cvd->vdev_ops->vdev_op_leaf)
21205b2b3b8Smishra 		cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit;
213fa9e4066Sahrens }
214fa9e4066Sahrens 
215fa9e4066Sahrens void
216fa9e4066Sahrens vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
217fa9e4066Sahrens {
218fa9e4066Sahrens 	int c;
219fa9e4066Sahrens 	uint_t id = cvd->vdev_id;
220fa9e4066Sahrens 
221fa9e4066Sahrens 	ASSERT(cvd->vdev_parent == pvd);
222fa9e4066Sahrens 
223fa9e4066Sahrens 	if (pvd == NULL)
224fa9e4066Sahrens 		return;
225fa9e4066Sahrens 
226fa9e4066Sahrens 	ASSERT(id < pvd->vdev_children);
227fa9e4066Sahrens 	ASSERT(pvd->vdev_child[id] == cvd);
228fa9e4066Sahrens 
229fa9e4066Sahrens 	pvd->vdev_child[id] = NULL;
230fa9e4066Sahrens 	cvd->vdev_parent = NULL;
231fa9e4066Sahrens 
232fa9e4066Sahrens 	for (c = 0; c < pvd->vdev_children; c++)
233fa9e4066Sahrens 		if (pvd->vdev_child[c])
234fa9e4066Sahrens 			break;
235fa9e4066Sahrens 
236fa9e4066Sahrens 	if (c == pvd->vdev_children) {
237fa9e4066Sahrens 		kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
238fa9e4066Sahrens 		pvd->vdev_child = NULL;
239fa9e4066Sahrens 		pvd->vdev_children = 0;
240fa9e4066Sahrens 	}
241fa9e4066Sahrens 
242fa9e4066Sahrens 	/*
243fa9e4066Sahrens 	 * Walk up all ancestors to update guid sum.
244fa9e4066Sahrens 	 */
245fa9e4066Sahrens 	for (; pvd != NULL; pvd = pvd->vdev_parent)
246fa9e4066Sahrens 		pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
24705b2b3b8Smishra 
24805b2b3b8Smishra 	if (cvd->vdev_ops->vdev_op_leaf)
24905b2b3b8Smishra 		cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit;
250fa9e4066Sahrens }
251fa9e4066Sahrens 
252fa9e4066Sahrens /*
253fa9e4066Sahrens  * Remove any holes in the child array.
254fa9e4066Sahrens  */
255fa9e4066Sahrens void
256fa9e4066Sahrens vdev_compact_children(vdev_t *pvd)
257fa9e4066Sahrens {
258fa9e4066Sahrens 	vdev_t **newchild, *cvd;
259fa9e4066Sahrens 	int oldc = pvd->vdev_children;
260573ca77eSGeorge Wilson 	int newc;
261fa9e4066Sahrens 
262e14bb325SJeff Bonwick 	ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
263fa9e4066Sahrens 
264573ca77eSGeorge Wilson 	for (int c = newc = 0; c < oldc; c++)
265fa9e4066Sahrens 		if (pvd->vdev_child[c])
266fa9e4066Sahrens 			newc++;
267fa9e4066Sahrens 
268fa9e4066Sahrens 	newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP);
269fa9e4066Sahrens 
270573ca77eSGeorge Wilson 	for (int c = newc = 0; c < oldc; c++) {
271fa9e4066Sahrens 		if ((cvd = pvd->vdev_child[c]) != NULL) {
272fa9e4066Sahrens 			newchild[newc] = cvd;
273fa9e4066Sahrens 			cvd->vdev_id = newc++;
274fa9e4066Sahrens 		}
275fa9e4066Sahrens 	}
276fa9e4066Sahrens 
277fa9e4066Sahrens 	kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
278fa9e4066Sahrens 	pvd->vdev_child = newchild;
279fa9e4066Sahrens 	pvd->vdev_children = newc;
280fa9e4066Sahrens }
281fa9e4066Sahrens 
282fa9e4066Sahrens /*
283fa9e4066Sahrens  * Allocate and minimally initialize a vdev_t.
284fa9e4066Sahrens  */
28588ecc943SGeorge Wilson vdev_t *
286fa9e4066Sahrens vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
287fa9e4066Sahrens {
288fa9e4066Sahrens 	vdev_t *vd;
289fa9e4066Sahrens 
290fa9e4066Sahrens 	vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
291fa9e4066Sahrens 
2920e34b6a7Sbonwick 	if (spa->spa_root_vdev == NULL) {
2930e34b6a7Sbonwick 		ASSERT(ops == &vdev_root_ops);
2940e34b6a7Sbonwick 		spa->spa_root_vdev = vd;
2950e34b6a7Sbonwick 	}
2960e34b6a7Sbonwick 
29788ecc943SGeorge Wilson 	if (guid == 0 && ops != &vdev_hole_ops) {
2980e34b6a7Sbonwick 		if (spa->spa_root_vdev == vd) {
2990e34b6a7Sbonwick 			/*
3000e34b6a7Sbonwick 			 * The root vdev's guid will also be the pool guid,
3010e34b6a7Sbonwick 			 * which must be unique among all pools.
3020e34b6a7Sbonwick 			 */
3030e34b6a7Sbonwick 			while (guid == 0 || spa_guid_exists(guid, 0))
3040e34b6a7Sbonwick 				guid = spa_get_random(-1ULL);
3050e34b6a7Sbonwick 		} else {
3060e34b6a7Sbonwick 			/*
3070e34b6a7Sbonwick 			 * Any other vdev's guid must be unique within the pool.
3080e34b6a7Sbonwick 			 */
3090e34b6a7Sbonwick 			while (guid == 0 ||
3100e34b6a7Sbonwick 			    spa_guid_exists(spa_guid(spa), guid))
3110e34b6a7Sbonwick 				guid = spa_get_random(-1ULL);
3120e34b6a7Sbonwick 		}
3130e34b6a7Sbonwick 		ASSERT(!spa_guid_exists(spa_guid(spa), guid));
3140e34b6a7Sbonwick 	}
3150e34b6a7Sbonwick 
316fa9e4066Sahrens 	vd->vdev_spa = spa;
317fa9e4066Sahrens 	vd->vdev_id = id;
318fa9e4066Sahrens 	vd->vdev_guid = guid;
319fa9e4066Sahrens 	vd->vdev_guid_sum = guid;
320fa9e4066Sahrens 	vd->vdev_ops = ops;
321fa9e4066Sahrens 	vd->vdev_state = VDEV_STATE_CLOSED;
32288ecc943SGeorge Wilson 	vd->vdev_ishole = (ops == &vdev_hole_ops);
323fa9e4066Sahrens 
324fa9e4066Sahrens 	mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
3255ad82045Snd 	mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
326e14bb325SJeff Bonwick 	mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
3278ad4d6ddSJeff Bonwick 	for (int t = 0; t < DTL_TYPES; t++) {
3288ad4d6ddSJeff Bonwick 		space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0,
3298ad4d6ddSJeff Bonwick 		    &vd->vdev_dtl_lock);
3308ad4d6ddSJeff Bonwick 	}
331fa9e4066Sahrens 	txg_list_create(&vd->vdev_ms_list,
332fa9e4066Sahrens 	    offsetof(struct metaslab, ms_txg_node));
333fa9e4066Sahrens 	txg_list_create(&vd->vdev_dtl_list,
334fa9e4066Sahrens 	    offsetof(struct vdev, vdev_dtl_node));
335fa9e4066Sahrens 	vd->vdev_stat.vs_timestamp = gethrtime();
3363d7072f8Seschrock 	vdev_queue_init(vd);
3373d7072f8Seschrock 	vdev_cache_init(vd);
338fa9e4066Sahrens 
339fa9e4066Sahrens 	return (vd);
340fa9e4066Sahrens }
341fa9e4066Sahrens 
342fa9e4066Sahrens /*
343fa9e4066Sahrens  * Allocate a new vdev.  The 'alloctype' is used to control whether we are
344fa9e4066Sahrens  * creating a new vdev or loading an existing one - the behavior is slightly
345fa9e4066Sahrens  * different for each case.
346fa9e4066Sahrens  */
34799653d4eSeschrock int
34899653d4eSeschrock vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
34999653d4eSeschrock     int alloctype)
350fa9e4066Sahrens {
351fa9e4066Sahrens 	vdev_ops_t *ops;
352fa9e4066Sahrens 	char *type;
3538654d025Sperrin 	uint64_t guid = 0, islog, nparity;
354fa9e4066Sahrens 	vdev_t *vd;
355fa9e4066Sahrens 
356e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
357fa9e4066Sahrens 
358fa9e4066Sahrens 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
35999653d4eSeschrock 		return (EINVAL);
360fa9e4066Sahrens 
361fa9e4066Sahrens 	if ((ops = vdev_getops(type)) == NULL)
36299653d4eSeschrock 		return (EINVAL);
363fa9e4066Sahrens 
364fa9e4066Sahrens 	/*
365fa9e4066Sahrens 	 * If this is a load, get the vdev guid from the nvlist.
366fa9e4066Sahrens 	 * Otherwise, vdev_alloc_common() will generate one for us.
367fa9e4066Sahrens 	 */
368fa9e4066Sahrens 	if (alloctype == VDEV_ALLOC_LOAD) {
369fa9e4066Sahrens 		uint64_t label_id;
370fa9e4066Sahrens 
371fa9e4066Sahrens 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
372fa9e4066Sahrens 		    label_id != id)
37399653d4eSeschrock 			return (EINVAL);
374fa9e4066Sahrens 
375fa9e4066Sahrens 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
37699653d4eSeschrock 			return (EINVAL);
37799653d4eSeschrock 	} else if (alloctype == VDEV_ALLOC_SPARE) {
37899653d4eSeschrock 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
37999653d4eSeschrock 			return (EINVAL);
380fa94a07fSbrendan 	} else if (alloctype == VDEV_ALLOC_L2CACHE) {
381fa94a07fSbrendan 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
382fa94a07fSbrendan 			return (EINVAL);
38321ecdf64SLin Ling 	} else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
38421ecdf64SLin Ling 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
38521ecdf64SLin Ling 			return (EINVAL);
386fa9e4066Sahrens 	}
387fa9e4066Sahrens 
38899653d4eSeschrock 	/*
38999653d4eSeschrock 	 * The first allocated vdev must be of type 'root'.
39099653d4eSeschrock 	 */
39199653d4eSeschrock 	if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
39299653d4eSeschrock 		return (EINVAL);
39399653d4eSeschrock 
3948654d025Sperrin 	/*
3958654d025Sperrin 	 * Determine whether we're a log vdev.
3968654d025Sperrin 	 */
3978654d025Sperrin 	islog = 0;
3988654d025Sperrin 	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
399990b4856Slling 	if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
4008654d025Sperrin 		return (ENOTSUP);
401fa9e4066Sahrens 
40288ecc943SGeorge Wilson 	if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
40388ecc943SGeorge Wilson 		return (ENOTSUP);
40488ecc943SGeorge Wilson 
40599653d4eSeschrock 	/*
4068654d025Sperrin 	 * Set the nparity property for RAID-Z vdevs.
40799653d4eSeschrock 	 */
4088654d025Sperrin 	nparity = -1ULL;
40999653d4eSeschrock 	if (ops == &vdev_raidz_ops) {
41099653d4eSeschrock 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
4118654d025Sperrin 		    &nparity) == 0) {
412*b24ab676SJeff Bonwick 			if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
41399653d4eSeschrock 				return (EINVAL);
41499653d4eSeschrock 			/*
415f94275ceSAdam Leventhal 			 * Previous versions could only support 1 or 2 parity
416f94275ceSAdam Leventhal 			 * device.
41799653d4eSeschrock 			 */
418f94275ceSAdam Leventhal 			if (nparity > 1 &&
419f94275ceSAdam Leventhal 			    spa_version(spa) < SPA_VERSION_RAIDZ2)
420f94275ceSAdam Leventhal 				return (ENOTSUP);
421f94275ceSAdam Leventhal 			if (nparity > 2 &&
422f94275ceSAdam Leventhal 			    spa_version(spa) < SPA_VERSION_RAIDZ3)
42399653d4eSeschrock 				return (ENOTSUP);
42499653d4eSeschrock 		} else {
42599653d4eSeschrock 			/*
42699653d4eSeschrock 			 * We require the parity to be specified for SPAs that
42799653d4eSeschrock 			 * support multiple parity levels.
42899653d4eSeschrock 			 */
429f94275ceSAdam Leventhal 			if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
43099653d4eSeschrock 				return (EINVAL);
43199653d4eSeschrock 			/*
43299653d4eSeschrock 			 * Otherwise, we default to 1 parity device for RAID-Z.
43399653d4eSeschrock 			 */
4348654d025Sperrin 			nparity = 1;
43599653d4eSeschrock 		}
43699653d4eSeschrock 	} else {
4378654d025Sperrin 		nparity = 0;
43899653d4eSeschrock 	}
4398654d025Sperrin 	ASSERT(nparity != -1ULL);
4408654d025Sperrin 
4418654d025Sperrin 	vd = vdev_alloc_common(spa, id, guid, ops);
4428654d025Sperrin 
4438654d025Sperrin 	vd->vdev_islog = islog;
4448654d025Sperrin 	vd->vdev_nparity = nparity;
4458654d025Sperrin 
4468654d025Sperrin 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
4478654d025Sperrin 		vd->vdev_path = spa_strdup(vd->vdev_path);
4488654d025Sperrin 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
4498654d025Sperrin 		vd->vdev_devid = spa_strdup(vd->vdev_devid);
4508654d025Sperrin 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
4518654d025Sperrin 	    &vd->vdev_physpath) == 0)
4528654d025Sperrin 		vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
4536809eb4eSEric Schrock 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
4546809eb4eSEric Schrock 		vd->vdev_fru = spa_strdup(vd->vdev_fru);
45599653d4eSeschrock 
456afefbcddSeschrock 	/*
457afefbcddSeschrock 	 * Set the whole_disk property.  If it's not specified, leave the value
458afefbcddSeschrock 	 * as -1.
459afefbcddSeschrock 	 */
460afefbcddSeschrock 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
461afefbcddSeschrock 	    &vd->vdev_wholedisk) != 0)
462afefbcddSeschrock 		vd->vdev_wholedisk = -1ULL;
463afefbcddSeschrock 
464ea8dc4b6Seschrock 	/*
465ea8dc4b6Seschrock 	 * Look for the 'not present' flag.  This will only be set if the device
466ea8dc4b6Seschrock 	 * was not present at the time of import.
467ea8dc4b6Seschrock 	 */
4686809eb4eSEric Schrock 	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
4696809eb4eSEric Schrock 	    &vd->vdev_not_present);
470ea8dc4b6Seschrock 
471ecc2d604Sbonwick 	/*
472ecc2d604Sbonwick 	 * Get the alignment requirement.
473ecc2d604Sbonwick 	 */
474ecc2d604Sbonwick 	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
475ecc2d604Sbonwick 
47688ecc943SGeorge Wilson 	/*
47788ecc943SGeorge Wilson 	 * Retrieve the vdev creation time.
47888ecc943SGeorge Wilson 	 */
47988ecc943SGeorge Wilson 	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
48088ecc943SGeorge Wilson 	    &vd->vdev_crtxg);
48188ecc943SGeorge Wilson 
482fa9e4066Sahrens 	/*
483fa9e4066Sahrens 	 * If we're a top-level vdev, try to load the allocation parameters.
484fa9e4066Sahrens 	 */
485fa9e4066Sahrens 	if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) {
486fa9e4066Sahrens 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
487fa9e4066Sahrens 		    &vd->vdev_ms_array);
488fa9e4066Sahrens 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
489fa9e4066Sahrens 		    &vd->vdev_ms_shift);
490fa9e4066Sahrens 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
491fa9e4066Sahrens 		    &vd->vdev_asize);
492fa9e4066Sahrens 	}
493fa9e4066Sahrens 
494fa9e4066Sahrens 	/*
4953d7072f8Seschrock 	 * If we're a leaf vdev, try to load the DTL object and other state.
496fa9e4066Sahrens 	 */
497c5904d13Seschrock 	if (vd->vdev_ops->vdev_op_leaf &&
49821ecdf64SLin Ling 	    (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
49921ecdf64SLin Ling 	    alloctype == VDEV_ALLOC_ROOTPOOL)) {
500c5904d13Seschrock 		if (alloctype == VDEV_ALLOC_LOAD) {
501c5904d13Seschrock 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
5028ad4d6ddSJeff Bonwick 			    &vd->vdev_dtl_smo.smo_object);
503c5904d13Seschrock 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
504c5904d13Seschrock 			    &vd->vdev_unspare);
505c5904d13Seschrock 		}
50621ecdf64SLin Ling 
50721ecdf64SLin Ling 		if (alloctype == VDEV_ALLOC_ROOTPOOL) {
50821ecdf64SLin Ling 			uint64_t spare = 0;
50921ecdf64SLin Ling 
51021ecdf64SLin Ling 			if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
51121ecdf64SLin Ling 			    &spare) == 0 && spare)
51221ecdf64SLin Ling 				spa_spare_add(vd);
51321ecdf64SLin Ling 		}
51421ecdf64SLin Ling 
515ecc2d604Sbonwick 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
516ecc2d604Sbonwick 		    &vd->vdev_offline);
517c5904d13Seschrock 
5183d7072f8Seschrock 		/*
5193d7072f8Seschrock 		 * When importing a pool, we want to ignore the persistent fault
5203d7072f8Seschrock 		 * state, as the diagnosis made on another system may not be
521069f55e2SEric Schrock 		 * valid in the current context.  Local vdevs will
522069f55e2SEric Schrock 		 * remain in the faulted state.
5233d7072f8Seschrock 		 */
5243d7072f8Seschrock 		if (spa->spa_load_state == SPA_LOAD_OPEN) {
5253d7072f8Seschrock 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
5263d7072f8Seschrock 			    &vd->vdev_faulted);
5273d7072f8Seschrock 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
5283d7072f8Seschrock 			    &vd->vdev_degraded);
5293d7072f8Seschrock 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
5303d7072f8Seschrock 			    &vd->vdev_removed);
531069f55e2SEric Schrock 
532069f55e2SEric Schrock 			if (vd->vdev_faulted || vd->vdev_degraded) {
533069f55e2SEric Schrock 				char *aux;
534069f55e2SEric Schrock 
535069f55e2SEric Schrock 				vd->vdev_label_aux =
536069f55e2SEric Schrock 				    VDEV_AUX_ERR_EXCEEDED;
537069f55e2SEric Schrock 				if (nvlist_lookup_string(nv,
538069f55e2SEric Schrock 				    ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
539069f55e2SEric Schrock 				    strcmp(aux, "external") == 0)
540069f55e2SEric Schrock 					vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
541069f55e2SEric Schrock 			}
5423d7072f8Seschrock 		}
543fa9e4066Sahrens 	}
544fa9e4066Sahrens 
545fa9e4066Sahrens 	/*
546fa9e4066Sahrens 	 * Add ourselves to the parent's list of children.
547fa9e4066Sahrens 	 */
548fa9e4066Sahrens 	vdev_add_child(parent, vd);
549fa9e4066Sahrens 
55099653d4eSeschrock 	*vdp = vd;
55199653d4eSeschrock 
55299653d4eSeschrock 	return (0);
553fa9e4066Sahrens }
554fa9e4066Sahrens 
555fa9e4066Sahrens void
556fa9e4066Sahrens vdev_free(vdev_t *vd)
557fa9e4066Sahrens {
5583d7072f8Seschrock 	spa_t *spa = vd->vdev_spa;
559fa9e4066Sahrens 
560fa9e4066Sahrens 	/*
561fa9e4066Sahrens 	 * vdev_free() implies closing the vdev first.  This is simpler than
562fa9e4066Sahrens 	 * trying to ensure complicated semantics for all callers.
563fa9e4066Sahrens 	 */
564fa9e4066Sahrens 	vdev_close(vd);
565fa9e4066Sahrens 
566e14bb325SJeff Bonwick 	ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
567*b24ab676SJeff Bonwick 	ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
568fa9e4066Sahrens 
569fa9e4066Sahrens 	/*
570fa9e4066Sahrens 	 * Free all children.
571fa9e4066Sahrens 	 */
572573ca77eSGeorge Wilson 	for (int c = 0; c < vd->vdev_children; c++)
573fa9e4066Sahrens 		vdev_free(vd->vdev_child[c]);
574fa9e4066Sahrens 
575fa9e4066Sahrens 	ASSERT(vd->vdev_child == NULL);
576fa9e4066Sahrens 	ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
577fa9e4066Sahrens 
578fa9e4066Sahrens 	/*
579fa9e4066Sahrens 	 * Discard allocation state.
580fa9e4066Sahrens 	 */
581fa9e4066Sahrens 	if (vd == vd->vdev_top)
582fa9e4066Sahrens 		vdev_metaslab_fini(vd);
583fa9e4066Sahrens 
584fa9e4066Sahrens 	ASSERT3U(vd->vdev_stat.vs_space, ==, 0);
58599653d4eSeschrock 	ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0);
586fa9e4066Sahrens 	ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0);
587fa9e4066Sahrens 
588fa9e4066Sahrens 	/*
589fa9e4066Sahrens 	 * Remove this vdev from its parent's child list.
590fa9e4066Sahrens 	 */
591fa9e4066Sahrens 	vdev_remove_child(vd->vdev_parent, vd);
592fa9e4066Sahrens 
593fa9e4066Sahrens 	ASSERT(vd->vdev_parent == NULL);
594fa9e4066Sahrens 
5953d7072f8Seschrock 	/*
5963d7072f8Seschrock 	 * Clean up vdev structure.
5973d7072f8Seschrock 	 */
5983d7072f8Seschrock 	vdev_queue_fini(vd);
5993d7072f8Seschrock 	vdev_cache_fini(vd);
6003d7072f8Seschrock 
6013d7072f8Seschrock 	if (vd->vdev_path)
6023d7072f8Seschrock 		spa_strfree(vd->vdev_path);
6033d7072f8Seschrock 	if (vd->vdev_devid)
6043d7072f8Seschrock 		spa_strfree(vd->vdev_devid);
6053d7072f8Seschrock 	if (vd->vdev_physpath)
6063d7072f8Seschrock 		spa_strfree(vd->vdev_physpath);
6076809eb4eSEric Schrock 	if (vd->vdev_fru)
6086809eb4eSEric Schrock 		spa_strfree(vd->vdev_fru);
6093d7072f8Seschrock 
6103d7072f8Seschrock 	if (vd->vdev_isspare)
6113d7072f8Seschrock 		spa_spare_remove(vd);
612fa94a07fSbrendan 	if (vd->vdev_isl2cache)
613fa94a07fSbrendan 		spa_l2cache_remove(vd);
6143d7072f8Seschrock 
6153d7072f8Seschrock 	txg_list_destroy(&vd->vdev_ms_list);
6163d7072f8Seschrock 	txg_list_destroy(&vd->vdev_dtl_list);
6178ad4d6ddSJeff Bonwick 
6183d7072f8Seschrock 	mutex_enter(&vd->vdev_dtl_lock);
6198ad4d6ddSJeff Bonwick 	for (int t = 0; t < DTL_TYPES; t++) {
6208ad4d6ddSJeff Bonwick 		space_map_unload(&vd->vdev_dtl[t]);
6218ad4d6ddSJeff Bonwick 		space_map_destroy(&vd->vdev_dtl[t]);
6228ad4d6ddSJeff Bonwick 	}
6233d7072f8Seschrock 	mutex_exit(&vd->vdev_dtl_lock);
6248ad4d6ddSJeff Bonwick 
6253d7072f8Seschrock 	mutex_destroy(&vd->vdev_dtl_lock);
6263d7072f8Seschrock 	mutex_destroy(&vd->vdev_stat_lock);
627e14bb325SJeff Bonwick 	mutex_destroy(&vd->vdev_probe_lock);
6283d7072f8Seschrock 
6293d7072f8Seschrock 	if (vd == spa->spa_root_vdev)
6303d7072f8Seschrock 		spa->spa_root_vdev = NULL;
6313d7072f8Seschrock 
6323d7072f8Seschrock 	kmem_free(vd, sizeof (vdev_t));
633fa9e4066Sahrens }
634fa9e4066Sahrens 
635fa9e4066Sahrens /*
636fa9e4066Sahrens  * Transfer top-level vdev state from svd to tvd.
637fa9e4066Sahrens  */
638fa9e4066Sahrens static void
639fa9e4066Sahrens vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
640fa9e4066Sahrens {
641fa9e4066Sahrens 	spa_t *spa = svd->vdev_spa;
642fa9e4066Sahrens 	metaslab_t *msp;
643fa9e4066Sahrens 	vdev_t *vd;
644fa9e4066Sahrens 	int t;
645fa9e4066Sahrens 
646fa9e4066Sahrens 	ASSERT(tvd == tvd->vdev_top);
647fa9e4066Sahrens 
648fa9e4066Sahrens 	tvd->vdev_ms_array = svd->vdev_ms_array;
649fa9e4066Sahrens 	tvd->vdev_ms_shift = svd->vdev_ms_shift;
650fa9e4066Sahrens 	tvd->vdev_ms_count = svd->vdev_ms_count;
651fa9e4066Sahrens 
652fa9e4066Sahrens 	svd->vdev_ms_array = 0;
653fa9e4066Sahrens 	svd->vdev_ms_shift = 0;
654fa9e4066Sahrens 	svd->vdev_ms_count = 0;
655fa9e4066Sahrens 
656fa9e4066Sahrens 	tvd->vdev_mg = svd->vdev_mg;
657fa9e4066Sahrens 	tvd->vdev_ms = svd->vdev_ms;
658fa9e4066Sahrens 
659fa9e4066Sahrens 	svd->vdev_mg = NULL;
660fa9e4066Sahrens 	svd->vdev_ms = NULL;
661ecc2d604Sbonwick 
662ecc2d604Sbonwick 	if (tvd->vdev_mg != NULL)
663ecc2d604Sbonwick 		tvd->vdev_mg->mg_vd = tvd;
664fa9e4066Sahrens 
665fa9e4066Sahrens 	tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
666fa9e4066Sahrens 	tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
66799653d4eSeschrock 	tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
668fa9e4066Sahrens 
669fa9e4066Sahrens 	svd->vdev_stat.vs_alloc = 0;
670fa9e4066Sahrens 	svd->vdev_stat.vs_space = 0;
67199653d4eSeschrock 	svd->vdev_stat.vs_dspace = 0;
672fa9e4066Sahrens 
673fa9e4066Sahrens 	for (t = 0; t < TXG_SIZE; t++) {
674fa9e4066Sahrens 		while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
675fa9e4066Sahrens 			(void) txg_list_add(&tvd->vdev_ms_list, msp, t);
676fa9e4066Sahrens 		while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
677fa9e4066Sahrens 			(void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
678fa9e4066Sahrens 		if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
679fa9e4066Sahrens 			(void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
680fa9e4066Sahrens 	}
681fa9e4066Sahrens 
682e14bb325SJeff Bonwick 	if (list_link_active(&svd->vdev_config_dirty_node)) {
683fa9e4066Sahrens 		vdev_config_clean(svd);
684fa9e4066Sahrens 		vdev_config_dirty(tvd);
685fa9e4066Sahrens 	}
686fa9e4066Sahrens 
687e14bb325SJeff Bonwick 	if (list_link_active(&svd->vdev_state_dirty_node)) {
688e14bb325SJeff Bonwick 		vdev_state_clean(svd);
689e14bb325SJeff Bonwick 		vdev_state_dirty(tvd);
690e14bb325SJeff Bonwick 	}
691e14bb325SJeff Bonwick 
69299653d4eSeschrock 	tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
69399653d4eSeschrock 	svd->vdev_deflate_ratio = 0;
6948654d025Sperrin 
6958654d025Sperrin 	tvd->vdev_islog = svd->vdev_islog;
6968654d025Sperrin 	svd->vdev_islog = 0;
697fa9e4066Sahrens }
698fa9e4066Sahrens 
699fa9e4066Sahrens static void
700fa9e4066Sahrens vdev_top_update(vdev_t *tvd, vdev_t *vd)
701fa9e4066Sahrens {
702fa9e4066Sahrens 	if (vd == NULL)
703fa9e4066Sahrens 		return;
704fa9e4066Sahrens 
705fa9e4066Sahrens 	vd->vdev_top = tvd;
706fa9e4066Sahrens 
707573ca77eSGeorge Wilson 	for (int c = 0; c < vd->vdev_children; c++)
708fa9e4066Sahrens 		vdev_top_update(tvd, vd->vdev_child[c]);
709fa9e4066Sahrens }
710fa9e4066Sahrens 
711fa9e4066Sahrens /*
712fa9e4066Sahrens  * Add a mirror/replacing vdev above an existing vdev.
713fa9e4066Sahrens  */
714fa9e4066Sahrens vdev_t *
715fa9e4066Sahrens vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
716fa9e4066Sahrens {
717fa9e4066Sahrens 	spa_t *spa = cvd->vdev_spa;
718fa9e4066Sahrens 	vdev_t *pvd = cvd->vdev_parent;
719fa9e4066Sahrens 	vdev_t *mvd;
720fa9e4066Sahrens 
721e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
722fa9e4066Sahrens 
723fa9e4066Sahrens 	mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
724ecc2d604Sbonwick 
725ecc2d604Sbonwick 	mvd->vdev_asize = cvd->vdev_asize;
726573ca77eSGeorge Wilson 	mvd->vdev_min_asize = cvd->vdev_min_asize;
727ecc2d604Sbonwick 	mvd->vdev_ashift = cvd->vdev_ashift;
728ecc2d604Sbonwick 	mvd->vdev_state = cvd->vdev_state;
72988ecc943SGeorge Wilson 	mvd->vdev_crtxg = cvd->vdev_crtxg;
730ecc2d604Sbonwick 
731fa9e4066Sahrens 	vdev_remove_child(pvd, cvd);
732fa9e4066Sahrens 	vdev_add_child(pvd, mvd);
733fa9e4066Sahrens 	cvd->vdev_id = mvd->vdev_children;
734fa9e4066Sahrens 	vdev_add_child(mvd, cvd);
735fa9e4066Sahrens 	vdev_top_update(cvd->vdev_top, cvd->vdev_top);
736fa9e4066Sahrens 
737fa9e4066Sahrens 	if (mvd == mvd->vdev_top)
738fa9e4066Sahrens 		vdev_top_transfer(cvd, mvd);
739fa9e4066Sahrens 
740fa9e4066Sahrens 	return (mvd);
741fa9e4066Sahrens }
742fa9e4066Sahrens 
743fa9e4066Sahrens /*
744fa9e4066Sahrens  * Remove a 1-way mirror/replacing vdev from the tree.
745fa9e4066Sahrens  */
746fa9e4066Sahrens void
747fa9e4066Sahrens vdev_remove_parent(vdev_t *cvd)
748fa9e4066Sahrens {
749fa9e4066Sahrens 	vdev_t *mvd = cvd->vdev_parent;
750fa9e4066Sahrens 	vdev_t *pvd = mvd->vdev_parent;
751fa9e4066Sahrens 
752e14bb325SJeff Bonwick 	ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
753fa9e4066Sahrens 
754fa9e4066Sahrens 	ASSERT(mvd->vdev_children == 1);
755fa9e4066Sahrens 	ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
75699653d4eSeschrock 	    mvd->vdev_ops == &vdev_replacing_ops ||
75799653d4eSeschrock 	    mvd->vdev_ops == &vdev_spare_ops);
758ecc2d604Sbonwick 	cvd->vdev_ashift = mvd->vdev_ashift;
759fa9e4066Sahrens 
760fa9e4066Sahrens 	vdev_remove_child(mvd, cvd);
761fa9e4066Sahrens 	vdev_remove_child(pvd, mvd);
7628ad4d6ddSJeff Bonwick 
76399653d4eSeschrock 	/*
764e14bb325SJeff Bonwick 	 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
765e14bb325SJeff Bonwick 	 * Otherwise, we could have detached an offline device, and when we
766e14bb325SJeff Bonwick 	 * go to import the pool we'll think we have two top-level vdevs,
767e14bb325SJeff Bonwick 	 * instead of a different version of the same top-level vdev.
76899653d4eSeschrock 	 */
7698ad4d6ddSJeff Bonwick 	if (mvd->vdev_top == mvd) {
7708ad4d6ddSJeff Bonwick 		uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
7718ad4d6ddSJeff Bonwick 		cvd->vdev_guid += guid_delta;
7728ad4d6ddSJeff Bonwick 		cvd->vdev_guid_sum += guid_delta;
7738ad4d6ddSJeff Bonwick 	}
774e14bb325SJeff Bonwick 	cvd->vdev_id = mvd->vdev_id;
775e14bb325SJeff Bonwick 	vdev_add_child(pvd, cvd);
776fa9e4066Sahrens 	vdev_top_update(cvd->vdev_top, cvd->vdev_top);
777fa9e4066Sahrens 
778fa9e4066Sahrens 	if (cvd == cvd->vdev_top)
779fa9e4066Sahrens 		vdev_top_transfer(mvd, cvd);
780fa9e4066Sahrens 
781fa9e4066Sahrens 	ASSERT(mvd->vdev_children == 0);
782fa9e4066Sahrens 	vdev_free(mvd);
783fa9e4066Sahrens }
784fa9e4066Sahrens 
785ea8dc4b6Seschrock int
786fa9e4066Sahrens vdev_metaslab_init(vdev_t *vd, uint64_t txg)
787fa9e4066Sahrens {
788fa9e4066Sahrens 	spa_t *spa = vd->vdev_spa;
789ecc2d604Sbonwick 	objset_t *mos = spa->spa_meta_objset;
7908654d025Sperrin 	metaslab_class_t *mc;
791ecc2d604Sbonwick 	uint64_t m;
792fa9e4066Sahrens 	uint64_t oldc = vd->vdev_ms_count;
793fa9e4066Sahrens 	uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
794ecc2d604Sbonwick 	metaslab_t **mspp;
795ecc2d604Sbonwick 	int error;
796fa9e4066Sahrens 
79788ecc943SGeorge Wilson 	/*
79888ecc943SGeorge Wilson 	 * This vdev is not being allocated from yet or is a hole.
79988ecc943SGeorge Wilson 	 */
80088ecc943SGeorge Wilson 	if (vd->vdev_ms_shift == 0)
8010e34b6a7Sbonwick 		return (0);
8020e34b6a7Sbonwick 
80388ecc943SGeorge Wilson 	ASSERT(!vd->vdev_ishole);
80488ecc943SGeorge Wilson 
805e6ca193dSGeorge Wilson 	/*
806e6ca193dSGeorge Wilson 	 * Compute the raidz-deflation ratio.  Note, we hard-code
807e6ca193dSGeorge Wilson 	 * in 128k (1 << 17) because it is the current "typical" blocksize.
808e6ca193dSGeorge Wilson 	 * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change,
809e6ca193dSGeorge Wilson 	 * or we will inconsistently account for existing bp's.
810e6ca193dSGeorge Wilson 	 */
811e6ca193dSGeorge Wilson 	vd->vdev_deflate_ratio = (1 << 17) /
812e6ca193dSGeorge Wilson 	    (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
813e6ca193dSGeorge Wilson 
814fa9e4066Sahrens 	ASSERT(oldc <= newc);
815fa9e4066Sahrens 
8168654d025Sperrin 	if (vd->vdev_islog)
817*b24ab676SJeff Bonwick 		mc = spa_log_class(spa);
8188654d025Sperrin 	else
819*b24ab676SJeff Bonwick 		mc = spa_normal_class(spa);
8208654d025Sperrin 
821ecc2d604Sbonwick 	if (vd->vdev_mg == NULL)
822ecc2d604Sbonwick 		vd->vdev_mg = metaslab_group_create(mc, vd);
823fa9e4066Sahrens 
824ecc2d604Sbonwick 	mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
825fa9e4066Sahrens 
826ecc2d604Sbonwick 	if (oldc != 0) {
827ecc2d604Sbonwick 		bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
828ecc2d604Sbonwick 		kmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
829ecc2d604Sbonwick 	}
830fa9e4066Sahrens 
831ecc2d604Sbonwick 	vd->vdev_ms = mspp;
832ecc2d604Sbonwick 	vd->vdev_ms_count = newc;
833fa9e4066Sahrens 
834ecc2d604Sbonwick 	for (m = oldc; m < newc; m++) {
835ecc2d604Sbonwick 		space_map_obj_t smo = { 0, 0, 0 };
836ecc2d604Sbonwick 		if (txg == 0) {
837ecc2d604Sbonwick 			uint64_t object = 0;
838ecc2d604Sbonwick 			error = dmu_read(mos, vd->vdev_ms_array,
8397bfdf011SNeil Perrin 			    m * sizeof (uint64_t), sizeof (uint64_t), &object,
8407bfdf011SNeil Perrin 			    DMU_READ_PREFETCH);
841ecc2d604Sbonwick 			if (error)
842ecc2d604Sbonwick 				return (error);
843ecc2d604Sbonwick 			if (object != 0) {
844ecc2d604Sbonwick 				dmu_buf_t *db;
845ecc2d604Sbonwick 				error = dmu_bonus_hold(mos, object, FTAG, &db);
846ecc2d604Sbonwick 				if (error)
847ecc2d604Sbonwick 					return (error);
8481934e92fSmaybee 				ASSERT3U(db->db_size, >=, sizeof (smo));
8491934e92fSmaybee 				bcopy(db->db_data, &smo, sizeof (smo));
850ecc2d604Sbonwick 				ASSERT3U(smo.smo_object, ==, object);
851ea8dc4b6Seschrock 				dmu_buf_rele(db, FTAG);
852fa9e4066Sahrens 			}
853fa9e4066Sahrens 		}
854ecc2d604Sbonwick 		vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo,
855ecc2d604Sbonwick 		    m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg);
856fa9e4066Sahrens 	}
857fa9e4066Sahrens 
858ea8dc4b6Seschrock 	return (0);
859fa9e4066Sahrens }
860fa9e4066Sahrens 
861fa9e4066Sahrens void
862fa9e4066Sahrens vdev_metaslab_fini(vdev_t *vd)
863fa9e4066Sahrens {
864fa9e4066Sahrens 	uint64_t m;
865fa9e4066Sahrens 	uint64_t count = vd->vdev_ms_count;
866fa9e4066Sahrens 
867fa9e4066Sahrens 	if (vd->vdev_ms != NULL) {
868fa9e4066Sahrens 		for (m = 0; m < count; m++)
869ecc2d604Sbonwick 			if (vd->vdev_ms[m] != NULL)
870ecc2d604Sbonwick 				metaslab_fini(vd->vdev_ms[m]);
871fa9e4066Sahrens 		kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
872fa9e4066Sahrens 		vd->vdev_ms = NULL;
873fa9e4066Sahrens 	}
874fa9e4066Sahrens }
875fa9e4066Sahrens 
876e14bb325SJeff Bonwick typedef struct vdev_probe_stats {
877e14bb325SJeff Bonwick 	boolean_t	vps_readable;
878e14bb325SJeff Bonwick 	boolean_t	vps_writeable;
879e14bb325SJeff Bonwick 	int		vps_flags;
880e14bb325SJeff Bonwick } vdev_probe_stats_t;
881e14bb325SJeff Bonwick 
882e14bb325SJeff Bonwick static void
883e14bb325SJeff Bonwick vdev_probe_done(zio_t *zio)
8840a4e9518Sgw {
8858ad4d6ddSJeff Bonwick 	spa_t *spa = zio->io_spa;
886a3f829aeSBill Moore 	vdev_t *vd = zio->io_vd;
887e14bb325SJeff Bonwick 	vdev_probe_stats_t *vps = zio->io_private;
888a3f829aeSBill Moore 
889a3f829aeSBill Moore 	ASSERT(vd->vdev_probe_zio != NULL);
890e14bb325SJeff Bonwick 
891e14bb325SJeff Bonwick 	if (zio->io_type == ZIO_TYPE_READ) {
892e14bb325SJeff Bonwick 		if (zio->io_error == 0)
893e14bb325SJeff Bonwick 			vps->vps_readable = 1;
8948ad4d6ddSJeff Bonwick 		if (zio->io_error == 0 && spa_writeable(spa)) {
895a3f829aeSBill Moore 			zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
896e14bb325SJeff Bonwick 			    zio->io_offset, zio->io_size, zio->io_data,
897e14bb325SJeff Bonwick 			    ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
898e14bb325SJeff Bonwick 			    ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
899e14bb325SJeff Bonwick 		} else {
900e14bb325SJeff Bonwick 			zio_buf_free(zio->io_data, zio->io_size);
901e14bb325SJeff Bonwick 		}
902e14bb325SJeff Bonwick 	} else if (zio->io_type == ZIO_TYPE_WRITE) {
903e14bb325SJeff Bonwick 		if (zio->io_error == 0)
904e14bb325SJeff Bonwick 			vps->vps_writeable = 1;
905e14bb325SJeff Bonwick 		zio_buf_free(zio->io_data, zio->io_size);
906e14bb325SJeff Bonwick 	} else if (zio->io_type == ZIO_TYPE_NULL) {
907a3f829aeSBill Moore 		zio_t *pio;
908e14bb325SJeff Bonwick 
909e14bb325SJeff Bonwick 		vd->vdev_cant_read |= !vps->vps_readable;
910e14bb325SJeff Bonwick 		vd->vdev_cant_write |= !vps->vps_writeable;
911e14bb325SJeff Bonwick 
912e14bb325SJeff Bonwick 		if (vdev_readable(vd) &&
9138ad4d6ddSJeff Bonwick 		    (vdev_writeable(vd) || !spa_writeable(spa))) {
914e14bb325SJeff Bonwick 			zio->io_error = 0;
915e14bb325SJeff Bonwick 		} else {
916e14bb325SJeff Bonwick 			ASSERT(zio->io_error != 0);
917e14bb325SJeff Bonwick 			zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
9188ad4d6ddSJeff Bonwick 			    spa, vd, NULL, 0, 0);
919e14bb325SJeff Bonwick 			zio->io_error = ENXIO;
920e14bb325SJeff Bonwick 		}
921a3f829aeSBill Moore 
922a3f829aeSBill Moore 		mutex_enter(&vd->vdev_probe_lock);
923a3f829aeSBill Moore 		ASSERT(vd->vdev_probe_zio == zio);
924a3f829aeSBill Moore 		vd->vdev_probe_zio = NULL;
925a3f829aeSBill Moore 		mutex_exit(&vd->vdev_probe_lock);
926a3f829aeSBill Moore 
927a3f829aeSBill Moore 		while ((pio = zio_walk_parents(zio)) != NULL)
928a3f829aeSBill Moore 			if (!vdev_accessible(vd, pio))
929a3f829aeSBill Moore 				pio->io_error = ENXIO;
930a3f829aeSBill Moore 
931e14bb325SJeff Bonwick 		kmem_free(vps, sizeof (*vps));
932e14bb325SJeff Bonwick 	}
933e14bb325SJeff Bonwick }
9340a4e9518Sgw 
935e14bb325SJeff Bonwick /*
936e14bb325SJeff Bonwick  * Determine whether this device is accessible by reading and writing
937e14bb325SJeff Bonwick  * to several known locations: the pad regions of each vdev label
938e14bb325SJeff Bonwick  * but the first (which we leave alone in case it contains a VTOC).
939e14bb325SJeff Bonwick  */
940e14bb325SJeff Bonwick zio_t *
941a3f829aeSBill Moore vdev_probe(vdev_t *vd, zio_t *zio)
942e14bb325SJeff Bonwick {
943e14bb325SJeff Bonwick 	spa_t *spa = vd->vdev_spa;
944a3f829aeSBill Moore 	vdev_probe_stats_t *vps = NULL;
945a3f829aeSBill Moore 	zio_t *pio;
946a3f829aeSBill Moore 
947a3f829aeSBill Moore 	ASSERT(vd->vdev_ops->vdev_op_leaf);
9480a4e9518Sgw 
949a3f829aeSBill Moore 	/*
950a3f829aeSBill Moore 	 * Don't probe the probe.
951a3f829aeSBill Moore 	 */
952a3f829aeSBill Moore 	if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
953a3f829aeSBill Moore 		return (NULL);
954e14bb325SJeff Bonwick 
955a3f829aeSBill Moore 	/*
956a3f829aeSBill Moore 	 * To prevent 'probe storms' when a device fails, we create
957a3f829aeSBill Moore 	 * just one probe i/o at a time.  All zios that want to probe
958a3f829aeSBill Moore 	 * this vdev will become parents of the probe io.
959a3f829aeSBill Moore 	 */
960a3f829aeSBill Moore 	mutex_enter(&vd->vdev_probe_lock);
961e14bb325SJeff Bonwick 
962a3f829aeSBill Moore 	if ((pio = vd->vdev_probe_zio) == NULL) {
963a3f829aeSBill Moore 		vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
964a3f829aeSBill Moore 
965a3f829aeSBill Moore 		vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
966a3f829aeSBill Moore 		    ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
9678956713aSEric Schrock 		    ZIO_FLAG_TRYHARD;
968a3f829aeSBill Moore 
969a3f829aeSBill Moore 		if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
970a3f829aeSBill Moore 			/*
971a3f829aeSBill Moore 			 * vdev_cant_read and vdev_cant_write can only
972a3f829aeSBill Moore 			 * transition from TRUE to FALSE when we have the
973a3f829aeSBill Moore 			 * SCL_ZIO lock as writer; otherwise they can only
974a3f829aeSBill Moore 			 * transition from FALSE to TRUE.  This ensures that
975a3f829aeSBill Moore 			 * any zio looking at these values can assume that
976a3f829aeSBill Moore 			 * failures persist for the life of the I/O.  That's
977a3f829aeSBill Moore 			 * important because when a device has intermittent
978a3f829aeSBill Moore 			 * connectivity problems, we want to ensure that
979a3f829aeSBill Moore 			 * they're ascribed to the device (ENXIO) and not
980a3f829aeSBill Moore 			 * the zio (EIO).
981a3f829aeSBill Moore 			 *
982a3f829aeSBill Moore 			 * Since we hold SCL_ZIO as writer here, clear both
983a3f829aeSBill Moore 			 * values so the probe can reevaluate from first
984a3f829aeSBill Moore 			 * principles.
985a3f829aeSBill Moore 			 */
986a3f829aeSBill Moore 			vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
987a3f829aeSBill Moore 			vd->vdev_cant_read = B_FALSE;
988a3f829aeSBill Moore 			vd->vdev_cant_write = B_FALSE;
989a3f829aeSBill Moore 		}
990a3f829aeSBill Moore 
991a3f829aeSBill Moore 		vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
992a3f829aeSBill Moore 		    vdev_probe_done, vps,
993a3f829aeSBill Moore 		    vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
994a3f829aeSBill Moore 
995a3f829aeSBill Moore 		if (zio != NULL) {
996a3f829aeSBill Moore 			vd->vdev_probe_wanted = B_TRUE;
997a3f829aeSBill Moore 			spa_async_request(spa, SPA_ASYNC_PROBE);
998a3f829aeSBill Moore 		}
999e14bb325SJeff Bonwick 	}
1000e14bb325SJeff Bonwick 
1001a3f829aeSBill Moore 	if (zio != NULL)
1002a3f829aeSBill Moore 		zio_add_child(zio, pio);
1003e14bb325SJeff Bonwick 
1004a3f829aeSBill Moore 	mutex_exit(&vd->vdev_probe_lock);
1005e14bb325SJeff Bonwick 
1006a3f829aeSBill Moore 	if (vps == NULL) {
1007a3f829aeSBill Moore 		ASSERT(zio != NULL);
1008a3f829aeSBill Moore 		return (NULL);
1009a3f829aeSBill Moore 	}
1010e14bb325SJeff Bonwick 
1011e14bb325SJeff Bonwick 	for (int l = 1; l < VDEV_LABELS; l++) {
1012a3f829aeSBill Moore 		zio_nowait(zio_read_phys(pio, vd,
1013e14bb325SJeff Bonwick 		    vdev_label_offset(vd->vdev_psize, l,
1014f83ffe1aSLin Ling 		    offsetof(vdev_label_t, vl_pad2)),
1015f83ffe1aSLin Ling 		    VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE),
1016e14bb325SJeff Bonwick 		    ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1017e14bb325SJeff Bonwick 		    ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1018e14bb325SJeff Bonwick 	}
1019e14bb325SJeff Bonwick 
1020a3f829aeSBill Moore 	if (zio == NULL)
1021a3f829aeSBill Moore 		return (pio);
1022a3f829aeSBill Moore 
1023a3f829aeSBill Moore 	zio_nowait(pio);
1024a3f829aeSBill Moore 	return (NULL);
10250a4e9518Sgw }
10260a4e9518Sgw 
1027f64c0e34SEric Taylor static void
1028f64c0e34SEric Taylor vdev_open_child(void *arg)
1029f64c0e34SEric Taylor {
1030f64c0e34SEric Taylor 	vdev_t *vd = arg;
1031f64c0e34SEric Taylor 
1032f64c0e34SEric Taylor 	vd->vdev_open_thread = curthread;
1033f64c0e34SEric Taylor 	vd->vdev_open_error = vdev_open(vd);
1034f64c0e34SEric Taylor 	vd->vdev_open_thread = NULL;
1035f64c0e34SEric Taylor }
1036f64c0e34SEric Taylor 
1037681d9761SEric Taylor boolean_t
1038681d9761SEric Taylor vdev_uses_zvols(vdev_t *vd)
1039681d9761SEric Taylor {
1040681d9761SEric Taylor 	if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR,
1041681d9761SEric Taylor 	    strlen(ZVOL_DIR)) == 0)
1042681d9761SEric Taylor 		return (B_TRUE);
1043681d9761SEric Taylor 	for (int c = 0; c < vd->vdev_children; c++)
1044681d9761SEric Taylor 		if (vdev_uses_zvols(vd->vdev_child[c]))
1045681d9761SEric Taylor 			return (B_TRUE);
1046681d9761SEric Taylor 	return (B_FALSE);
1047681d9761SEric Taylor }
1048681d9761SEric Taylor 
1049f64c0e34SEric Taylor void
1050f64c0e34SEric Taylor vdev_open_children(vdev_t *vd)
1051f64c0e34SEric Taylor {
1052f64c0e34SEric Taylor 	taskq_t *tq;
1053f64c0e34SEric Taylor 	int children = vd->vdev_children;
1054f64c0e34SEric Taylor 
1055681d9761SEric Taylor 	/*
1056681d9761SEric Taylor 	 * in order to handle pools on top of zvols, do the opens
1057681d9761SEric Taylor 	 * in a single thread so that the same thread holds the
1058681d9761SEric Taylor 	 * spa_namespace_lock
1059681d9761SEric Taylor 	 */
1060681d9761SEric Taylor 	if (vdev_uses_zvols(vd)) {
1061681d9761SEric Taylor 		for (int c = 0; c < children; c++)
1062681d9761SEric Taylor 			vd->vdev_child[c]->vdev_open_error =
1063681d9761SEric Taylor 			    vdev_open(vd->vdev_child[c]);
1064681d9761SEric Taylor 		return;
1065681d9761SEric Taylor 	}
1066f64c0e34SEric Taylor 	tq = taskq_create("vdev_open", children, minclsyspri,
1067f64c0e34SEric Taylor 	    children, children, TASKQ_PREPOPULATE);
1068f64c0e34SEric Taylor 
1069f64c0e34SEric Taylor 	for (int c = 0; c < children; c++)
1070f64c0e34SEric Taylor 		VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c],
1071f64c0e34SEric Taylor 		    TQ_SLEEP) != NULL);
1072f64c0e34SEric Taylor 
1073f64c0e34SEric Taylor 	taskq_destroy(tq);
1074f64c0e34SEric Taylor }
1075f64c0e34SEric Taylor 
1076fa9e4066Sahrens /*
1077fa9e4066Sahrens  * Prepare a virtual device for access.
1078fa9e4066Sahrens  */
1079fa9e4066Sahrens int
1080fa9e4066Sahrens vdev_open(vdev_t *vd)
1081fa9e4066Sahrens {
10828ad4d6ddSJeff Bonwick 	spa_t *spa = vd->vdev_spa;
1083fa9e4066Sahrens 	int error;
1084fa9e4066Sahrens 	uint64_t osize = 0;
1085fa9e4066Sahrens 	uint64_t asize, psize;
1086ecc2d604Sbonwick 	uint64_t ashift = 0;
1087fa9e4066Sahrens 
1088f64c0e34SEric Taylor 	ASSERT(vd->vdev_open_thread == curthread ||
1089f64c0e34SEric Taylor 	    spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1090fa9e4066Sahrens 	ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
1091fa9e4066Sahrens 	    vd->vdev_state == VDEV_STATE_CANT_OPEN ||
1092fa9e4066Sahrens 	    vd->vdev_state == VDEV_STATE_OFFLINE);
1093fa9e4066Sahrens 
1094fa9e4066Sahrens 	vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1095e6ca193dSGeorge Wilson 	vd->vdev_cant_read = B_FALSE;
1096e6ca193dSGeorge Wilson 	vd->vdev_cant_write = B_FALSE;
1097573ca77eSGeorge Wilson 	vd->vdev_min_asize = vdev_get_min_asize(vd);
1098fa9e4066Sahrens 
1099069f55e2SEric Schrock 	/*
1100069f55e2SEric Schrock 	 * If this vdev is not removed, check its fault status.  If it's
1101069f55e2SEric Schrock 	 * faulted, bail out of the open.
1102069f55e2SEric Schrock 	 */
11033d7072f8Seschrock 	if (!vd->vdev_removed && vd->vdev_faulted) {
11043d7072f8Seschrock 		ASSERT(vd->vdev_children == 0);
1105069f55e2SEric Schrock 		ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1106069f55e2SEric Schrock 		    vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
11073d7072f8Seschrock 		vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1108069f55e2SEric Schrock 		    vd->vdev_label_aux);
11093d7072f8Seschrock 		return (ENXIO);
11103d7072f8Seschrock 	} else if (vd->vdev_offline) {
1111fa9e4066Sahrens 		ASSERT(vd->vdev_children == 0);
1112ea8dc4b6Seschrock 		vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
1113fa9e4066Sahrens 		return (ENXIO);
1114fa9e4066Sahrens 	}
1115fa9e4066Sahrens 
1116fa9e4066Sahrens 	error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift);
1117fa9e4066Sahrens 
1118095bcd66SGeorge Wilson 	/*
1119095bcd66SGeorge Wilson 	 * Reset the vdev_reopening flag so that we actually close
1120095bcd66SGeorge Wilson 	 * the vdev on error.
1121095bcd66SGeorge Wilson 	 */
1122095bcd66SGeorge Wilson 	vd->vdev_reopening = B_FALSE;
1123ea8dc4b6Seschrock 	if (zio_injection_enabled && error == 0)
11248956713aSEric Schrock 		error = zio_handle_device_injection(vd, NULL, ENXIO);
1125ea8dc4b6Seschrock 
1126fa9e4066Sahrens 	if (error) {
11273d7072f8Seschrock 		if (vd->vdev_removed &&
11283d7072f8Seschrock 		    vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
11293d7072f8Seschrock 			vd->vdev_removed = B_FALSE;
11303d7072f8Seschrock 
1131ea8dc4b6Seschrock 		vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1132fa9e4066Sahrens 		    vd->vdev_stat.vs_aux);
1133fa9e4066Sahrens 		return (error);
1134fa9e4066Sahrens 	}
1135fa9e4066Sahrens 
11363d7072f8Seschrock 	vd->vdev_removed = B_FALSE;
11373d7072f8Seschrock 
1138096d22d4SEric Schrock 	/*
1139096d22d4SEric Schrock 	 * Recheck the faulted flag now that we have confirmed that
1140096d22d4SEric Schrock 	 * the vdev is accessible.  If we're faulted, bail.
1141096d22d4SEric Schrock 	 */
1142096d22d4SEric Schrock 	if (vd->vdev_faulted) {
1143096d22d4SEric Schrock 		ASSERT(vd->vdev_children == 0);
1144096d22d4SEric Schrock 		ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
1145096d22d4SEric Schrock 		    vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
1146096d22d4SEric Schrock 		vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
1147096d22d4SEric Schrock 		    vd->vdev_label_aux);
1148096d22d4SEric Schrock 		return (ENXIO);
1149096d22d4SEric Schrock 	}
1150096d22d4SEric Schrock 
11513d7072f8Seschrock 	if (vd->vdev_degraded) {
11523d7072f8Seschrock 		ASSERT(vd->vdev_children == 0);
11533d7072f8Seschrock 		vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
11543d7072f8Seschrock 		    VDEV_AUX_ERR_EXCEEDED);
11553d7072f8Seschrock 	} else {
1156069f55e2SEric Schrock 		vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
11573d7072f8Seschrock 	}
1158fa9e4066Sahrens 
115988ecc943SGeorge Wilson 	/*
116088ecc943SGeorge Wilson 	 * For hole or missing vdevs we just return success.
116188ecc943SGeorge Wilson 	 */
116288ecc943SGeorge Wilson 	if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
116388ecc943SGeorge Wilson 		return (0);
116488ecc943SGeorge Wilson 
1165573ca77eSGeorge Wilson 	for (int c = 0; c < vd->vdev_children; c++) {
1166ea8dc4b6Seschrock 		if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
1167ea8dc4b6Seschrock 			vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
1168ea8dc4b6Seschrock 			    VDEV_AUX_NONE);
1169ea8dc4b6Seschrock 			break;
1170ea8dc4b6Seschrock 		}
1171573ca77eSGeorge Wilson 	}
1172fa9e4066Sahrens 
1173fa9e4066Sahrens 	osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
1174fa9e4066Sahrens 
1175fa9e4066Sahrens 	if (vd->vdev_children == 0) {
1176fa9e4066Sahrens 		if (osize < SPA_MINDEVSIZE) {
1177ea8dc4b6Seschrock 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1178ea8dc4b6Seschrock 			    VDEV_AUX_TOO_SMALL);
1179fa9e4066Sahrens 			return (EOVERFLOW);
1180fa9e4066Sahrens 		}
1181fa9e4066Sahrens 		psize = osize;
1182fa9e4066Sahrens 		asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
1183fa9e4066Sahrens 	} else {
1184ecc2d604Sbonwick 		if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
1185fa9e4066Sahrens 		    (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
1186ea8dc4b6Seschrock 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1187ea8dc4b6Seschrock 			    VDEV_AUX_TOO_SMALL);
1188fa9e4066Sahrens 			return (EOVERFLOW);
1189fa9e4066Sahrens 		}
1190fa9e4066Sahrens 		psize = 0;
1191fa9e4066Sahrens 		asize = osize;
1192fa9e4066Sahrens 	}
1193fa9e4066Sahrens 
1194fa9e4066Sahrens 	vd->vdev_psize = psize;
1195fa9e4066Sahrens 
1196573ca77eSGeorge Wilson 	/*
1197573ca77eSGeorge Wilson 	 * Make sure the allocatable size hasn't shrunk.
1198573ca77eSGeorge Wilson 	 */
1199573ca77eSGeorge Wilson 	if (asize < vd->vdev_min_asize) {
1200573ca77eSGeorge Wilson 		vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1201573ca77eSGeorge Wilson 		    VDEV_AUX_BAD_LABEL);
1202573ca77eSGeorge Wilson 		return (EINVAL);
1203573ca77eSGeorge Wilson 	}
1204573ca77eSGeorge Wilson 
1205fa9e4066Sahrens 	if (vd->vdev_asize == 0) {
1206fa9e4066Sahrens 		/*
1207fa9e4066Sahrens 		 * This is the first-ever open, so use the computed values.
1208ecc2d604Sbonwick 		 * For testing purposes, a higher ashift can be requested.
1209fa9e4066Sahrens 		 */
1210fa9e4066Sahrens 		vd->vdev_asize = asize;
1211ecc2d604Sbonwick 		vd->vdev_ashift = MAX(ashift, vd->vdev_ashift);
1212fa9e4066Sahrens 	} else {
1213fa9e4066Sahrens 		/*
1214fa9e4066Sahrens 		 * Make sure the alignment requirement hasn't increased.
1215fa9e4066Sahrens 		 */
1216ecc2d604Sbonwick 		if (ashift > vd->vdev_top->vdev_ashift) {
1217ea8dc4b6Seschrock 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1218ea8dc4b6Seschrock 			    VDEV_AUX_BAD_LABEL);
1219fa9e4066Sahrens 			return (EINVAL);
1220fa9e4066Sahrens 		}
1221573ca77eSGeorge Wilson 	}
1222fa9e4066Sahrens 
1223573ca77eSGeorge Wilson 	/*
1224573ca77eSGeorge Wilson 	 * If all children are healthy and the asize has increased,
1225573ca77eSGeorge Wilson 	 * then we've experienced dynamic LUN growth.  If automatic
1226573ca77eSGeorge Wilson 	 * expansion is enabled then use the additional space.
1227573ca77eSGeorge Wilson 	 */
1228573ca77eSGeorge Wilson 	if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize &&
1229573ca77eSGeorge Wilson 	    (vd->vdev_expanding || spa->spa_autoexpand))
1230573ca77eSGeorge Wilson 		vd->vdev_asize = asize;
1231fa9e4066Sahrens 
1232573ca77eSGeorge Wilson 	vdev_set_min_asize(vd);
1233fa9e4066Sahrens 
12340a4e9518Sgw 	/*
12350a4e9518Sgw 	 * Ensure we can issue some IO before declaring the
12360a4e9518Sgw 	 * vdev open for business.
12370a4e9518Sgw 	 */
1238e14bb325SJeff Bonwick 	if (vd->vdev_ops->vdev_op_leaf &&
1239e14bb325SJeff Bonwick 	    (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
12400a4e9518Sgw 		vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1241e14bb325SJeff Bonwick 		    VDEV_AUX_IO_FAILURE);
12420a4e9518Sgw 		return (error);
12430a4e9518Sgw 	}
12440a4e9518Sgw 
1245088f3894Sahrens 	/*
1246088f3894Sahrens 	 * If a leaf vdev has a DTL, and seems healthy, then kick off a
12478ad4d6ddSJeff Bonwick 	 * resilver.  But don't do this if we are doing a reopen for a scrub,
12488ad4d6ddSJeff Bonwick 	 * since this would just restart the scrub we are already doing.
1249088f3894Sahrens 	 */
12508ad4d6ddSJeff Bonwick 	if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen &&
12518ad4d6ddSJeff Bonwick 	    vdev_resilver_needed(vd, NULL, NULL))
12528ad4d6ddSJeff Bonwick 		spa_async_request(spa, SPA_ASYNC_RESILVER);
1253088f3894Sahrens 
1254fa9e4066Sahrens 	return (0);
1255fa9e4066Sahrens }
1256fa9e4066Sahrens 
1257560e6e96Seschrock /*
1258560e6e96Seschrock  * Called once the vdevs are all opened, this routine validates the label
1259560e6e96Seschrock  * contents.  This needs to be done before vdev_load() so that we don't
12603d7072f8Seschrock  * inadvertently do repair I/Os to the wrong device.
1261560e6e96Seschrock  *
1262560e6e96Seschrock  * This function will only return failure if one of the vdevs indicates that it
1263560e6e96Seschrock  * has since been destroyed or exported.  This is only possible if
1264560e6e96Seschrock  * /etc/zfs/zpool.cache was readonly at the time.  Otherwise, the vdev state
1265560e6e96Seschrock  * will be updated but the function will return 0.
1266560e6e96Seschrock  */
1267560e6e96Seschrock int
1268560e6e96Seschrock vdev_validate(vdev_t *vd)
1269560e6e96Seschrock {
1270560e6e96Seschrock 	spa_t *spa = vd->vdev_spa;
1271560e6e96Seschrock 	nvlist_t *label;
1272e14bb325SJeff Bonwick 	uint64_t guid, top_guid;
1273560e6e96Seschrock 	uint64_t state;
1274560e6e96Seschrock 
1275573ca77eSGeorge Wilson 	for (int c = 0; c < vd->vdev_children; c++)
1276560e6e96Seschrock 		if (vdev_validate(vd->vdev_child[c]) != 0)
12770bf246f5Smc 			return (EBADF);
1278560e6e96Seschrock 
1279b5989ec7Seschrock 	/*
1280b5989ec7Seschrock 	 * If the device has already failed, or was marked offline, don't do
1281b5989ec7Seschrock 	 * any further validation.  Otherwise, label I/O will fail and we will
1282b5989ec7Seschrock 	 * overwrite the previous state.
1283b5989ec7Seschrock 	 */
1284e14bb325SJeff Bonwick 	if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
1285560e6e96Seschrock 
1286560e6e96Seschrock 		if ((label = vdev_label_read_config(vd)) == NULL) {
1287560e6e96Seschrock 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
1288560e6e96Seschrock 			    VDEV_AUX_BAD_LABEL);
1289560e6e96Seschrock 			return (0);
1290560e6e96Seschrock 		}
1291560e6e96Seschrock 
1292560e6e96Seschrock 		if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
1293560e6e96Seschrock 		    &guid) != 0 || guid != spa_guid(spa)) {
1294560e6e96Seschrock 			vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1295560e6e96Seschrock 			    VDEV_AUX_CORRUPT_DATA);
1296560e6e96Seschrock 			nvlist_free(label);
1297560e6e96Seschrock 			return (0);
1298560e6e96Seschrock 		}
1299560e6e96Seschrock 
1300e14bb325SJeff Bonwick 		/*
1301e14bb325SJeff Bonwick 		 * If this vdev just became a top-level vdev because its
1302e14bb325SJeff Bonwick 		 * sibling was detached, it will have adopted the parent's
1303e14bb325SJeff Bonwick 		 * vdev guid -- but the label may or may not be on disk yet.
1304e14bb325SJeff Bonwick 		 * Fortunately, either version of the label will have the
1305e14bb325SJeff Bonwick 		 * same top guid, so if we're a top-level vdev, we can
1306e14bb325SJeff Bonwick 		 * safely compare to that instead.
1307e14bb325SJeff Bonwick 		 */
1308560e6e96Seschrock 		if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
1309e14bb325SJeff Bonwick 		    &guid) != 0 ||
1310e14bb325SJeff Bonwick 		    nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID,
1311e14bb325SJeff Bonwick 		    &top_guid) != 0 ||
1312e14bb325SJeff Bonwick 		    (vd->vdev_guid != guid &&
1313e14bb325SJeff Bonwick 		    (vd->vdev_guid != top_guid || vd != vd->vdev_top))) {
1314560e6e96Seschrock 			vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1315560e6e96Seschrock 			    VDEV_AUX_CORRUPT_DATA);
1316560e6e96Seschrock 			nvlist_free(label);
1317560e6e96Seschrock 			return (0);
1318560e6e96Seschrock 		}
1319560e6e96Seschrock 
1320560e6e96Seschrock 		if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
1321560e6e96Seschrock 		    &state) != 0) {
1322560e6e96Seschrock 			vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1323560e6e96Seschrock 			    VDEV_AUX_CORRUPT_DATA);
1324560e6e96Seschrock 			nvlist_free(label);
1325560e6e96Seschrock 			return (0);
1326560e6e96Seschrock 		}
1327560e6e96Seschrock 
1328560e6e96Seschrock 		nvlist_free(label);
1329560e6e96Seschrock 
1330bc758434SLin Ling 		/*
1331bc758434SLin Ling 		 * If spa->spa_load_verbatim is true, no need to check the
1332bc758434SLin Ling 		 * state of the pool.
1333bc758434SLin Ling 		 */
1334bc758434SLin Ling 		if (!spa->spa_load_verbatim &&
1335bc758434SLin Ling 		    spa->spa_load_state == SPA_LOAD_OPEN &&
1336bc758434SLin Ling 		    state != POOL_STATE_ACTIVE)
13370bf246f5Smc 			return (EBADF);
1338560e6e96Seschrock 
133951ece835Seschrock 		/*
134051ece835Seschrock 		 * If we were able to open and validate a vdev that was
134151ece835Seschrock 		 * previously marked permanently unavailable, clear that state
134251ece835Seschrock 		 * now.
134351ece835Seschrock 		 */
134451ece835Seschrock 		if (vd->vdev_not_present)
134551ece835Seschrock 			vd->vdev_not_present = 0;
134651ece835Seschrock 	}
1347560e6e96Seschrock 
1348560e6e96Seschrock 	return (0);
1349560e6e96Seschrock }
1350560e6e96Seschrock 
1351fa9e4066Sahrens /*
1352fa9e4066Sahrens  * Close a virtual device.
1353fa9e4066Sahrens  */
1354fa9e4066Sahrens void
1355fa9e4066Sahrens vdev_close(vdev_t *vd)
1356fa9e4066Sahrens {
13578ad4d6ddSJeff Bonwick 	spa_t *spa = vd->vdev_spa;
1358095bcd66SGeorge Wilson 	vdev_t *pvd = vd->vdev_parent;
13598ad4d6ddSJeff Bonwick 
13608ad4d6ddSJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
13618ad4d6ddSJeff Bonwick 
1362095bcd66SGeorge Wilson 	if (pvd != NULL && pvd->vdev_reopening)
1363095bcd66SGeorge Wilson 		vd->vdev_reopening = pvd->vdev_reopening;
1364095bcd66SGeorge Wilson 
1365fa9e4066Sahrens 	vd->vdev_ops->vdev_op_close(vd);
1366fa9e4066Sahrens 
13673d7072f8Seschrock 	vdev_cache_purge(vd);
1368fa9e4066Sahrens 
1369560e6e96Seschrock 	/*
1370573ca77eSGeorge Wilson 	 * We record the previous state before we close it, so that if we are
1371560e6e96Seschrock 	 * doing a reopen(), we don't generate FMA ereports if we notice that
1372560e6e96Seschrock 	 * it's still faulted.
1373560e6e96Seschrock 	 */
1374560e6e96Seschrock 	vd->vdev_prevstate = vd->vdev_state;
1375560e6e96Seschrock 
1376fa9e4066Sahrens 	if (vd->vdev_offline)
1377fa9e4066Sahrens 		vd->vdev_state = VDEV_STATE_OFFLINE;
1378fa9e4066Sahrens 	else
1379fa9e4066Sahrens 		vd->vdev_state = VDEV_STATE_CLOSED;
1380ea8dc4b6Seschrock 	vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
1381fa9e4066Sahrens }
1382fa9e4066Sahrens 
1383095bcd66SGeorge Wilson /*
1384095bcd66SGeorge Wilson  * Reopen all interior vdevs and any unopened leaves.  We don't actually
1385095bcd66SGeorge Wilson  * reopen leaf vdevs which had previously been opened as they might deadlock
1386095bcd66SGeorge Wilson  * on the spa_config_lock.  Instead we only obtain the leaf's physical size.
1387095bcd66SGeorge Wilson  * If the leaf has never been opened then open it, as usual.
1388095bcd66SGeorge Wilson  */
1389fa9e4066Sahrens void
1390ea8dc4b6Seschrock vdev_reopen(vdev_t *vd)
1391fa9e4066Sahrens {
1392ea8dc4b6Seschrock 	spa_t *spa = vd->vdev_spa;
1393fa9e4066Sahrens 
1394e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1395ea8dc4b6Seschrock 
1396095bcd66SGeorge Wilson 	vd->vdev_reopening = B_TRUE;
1397fa9e4066Sahrens 	vdev_close(vd);
1398fa9e4066Sahrens 	(void) vdev_open(vd);
1399fa9e4066Sahrens 
140039c23413Seschrock 	/*
140139c23413Seschrock 	 * Call vdev_validate() here to make sure we have the same device.
140239c23413Seschrock 	 * Otherwise, a device with an invalid label could be successfully
140339c23413Seschrock 	 * opened in response to vdev_reopen().
140439c23413Seschrock 	 */
1405c5904d13Seschrock 	if (vd->vdev_aux) {
1406c5904d13Seschrock 		(void) vdev_validate_aux(vd);
1407e14bb325SJeff Bonwick 		if (vdev_readable(vd) && vdev_writeable(vd) &&
14086809eb4eSEric Schrock 		    vd->vdev_aux == &spa->spa_l2cache &&
1409573ca77eSGeorge Wilson 		    !l2arc_vdev_present(vd))
1410573ca77eSGeorge Wilson 			l2arc_add_vdev(spa, vd);
1411c5904d13Seschrock 	} else {
1412c5904d13Seschrock 		(void) vdev_validate(vd);
1413c5904d13Seschrock 	}
141439c23413Seschrock 
1415fa9e4066Sahrens 	/*
14163d7072f8Seschrock 	 * Reassess parent vdev's health.
1417fa9e4066Sahrens 	 */
14183d7072f8Seschrock 	vdev_propagate_state(vd);
1419fa9e4066Sahrens }
1420fa9e4066Sahrens 
1421fa9e4066Sahrens int
142299653d4eSeschrock vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
1423fa9e4066Sahrens {
1424fa9e4066Sahrens 	int error;
1425fa9e4066Sahrens 
1426fa9e4066Sahrens 	/*
1427fa9e4066Sahrens 	 * Normally, partial opens (e.g. of a mirror) are allowed.
1428fa9e4066Sahrens 	 * For a create, however, we want to fail the request if
1429fa9e4066Sahrens 	 * there are any components we can't open.
1430fa9e4066Sahrens 	 */
1431fa9e4066Sahrens 	error = vdev_open(vd);
1432fa9e4066Sahrens 
1433fa9e4066Sahrens 	if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
1434fa9e4066Sahrens 		vdev_close(vd);
1435fa9e4066Sahrens 		return (error ? error : ENXIO);
1436fa9e4066Sahrens 	}
1437fa9e4066Sahrens 
1438fa9e4066Sahrens 	/*
1439fa9e4066Sahrens 	 * Recursively initialize all labels.
1440fa9e4066Sahrens 	 */
144139c23413Seschrock 	if ((error = vdev_label_init(vd, txg, isreplacing ?
144239c23413Seschrock 	    VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
1443fa9e4066Sahrens 		vdev_close(vd);
1444fa9e4066Sahrens 		return (error);
1445fa9e4066Sahrens 	}
1446fa9e4066Sahrens 
1447fa9e4066Sahrens 	return (0);
1448fa9e4066Sahrens }
1449fa9e4066Sahrens 
14500e34b6a7Sbonwick void
1451573ca77eSGeorge Wilson vdev_metaslab_set_size(vdev_t *vd)
1452fa9e4066Sahrens {
1453fa9e4066Sahrens 	/*
1454fa9e4066Sahrens 	 * Aim for roughly 200 metaslabs per vdev.
1455fa9e4066Sahrens 	 */
1456fa9e4066Sahrens 	vd->vdev_ms_shift = highbit(vd->vdev_asize / 200);
1457fa9e4066Sahrens 	vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT);
1458fa9e4066Sahrens }
1459fa9e4066Sahrens 
1460fa9e4066Sahrens void
1461ecc2d604Sbonwick vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
1462fa9e4066Sahrens {
1463ecc2d604Sbonwick 	ASSERT(vd == vd->vdev_top);
146488ecc943SGeorge Wilson 	ASSERT(!vd->vdev_ishole);
1465ecc2d604Sbonwick 	ASSERT(ISP2(flags));
1466fa9e4066Sahrens 
1467ecc2d604Sbonwick 	if (flags & VDD_METASLAB)
1468ecc2d604Sbonwick 		(void) txg_list_add(&vd->vdev_ms_list, arg, txg);
1469ecc2d604Sbonwick 
1470ecc2d604Sbonwick 	if (flags & VDD_DTL)
1471ecc2d604Sbonwick 		(void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
1472ecc2d604Sbonwick 
1473ecc2d604Sbonwick 	(void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
1474fa9e4066Sahrens }
1475fa9e4066Sahrens 
14768ad4d6ddSJeff Bonwick /*
14778ad4d6ddSJeff Bonwick  * DTLs.
14788ad4d6ddSJeff Bonwick  *
14798ad4d6ddSJeff Bonwick  * A vdev's DTL (dirty time log) is the set of transaction groups for which
14808ad4d6ddSJeff Bonwick  * the vdev has less than perfect replication.  There are three kinds of DTL:
14818ad4d6ddSJeff Bonwick  *
14828ad4d6ddSJeff Bonwick  * DTL_MISSING: txgs for which the vdev has no valid copies of the data
14838ad4d6ddSJeff Bonwick  *
14848ad4d6ddSJeff Bonwick  * DTL_PARTIAL: txgs for which data is available, but not fully replicated
14858ad4d6ddSJeff Bonwick  *
14868ad4d6ddSJeff Bonwick  * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
14878ad4d6ddSJeff Bonwick  *	scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
14888ad4d6ddSJeff Bonwick  *	txgs that was scrubbed.
14898ad4d6ddSJeff Bonwick  *
14908ad4d6ddSJeff Bonwick  * DTL_OUTAGE: txgs which cannot currently be read, whether due to
14918ad4d6ddSJeff Bonwick  *	persistent errors or just some device being offline.
14928ad4d6ddSJeff Bonwick  *	Unlike the other three, the DTL_OUTAGE map is not generally
14938ad4d6ddSJeff Bonwick  *	maintained; it's only computed when needed, typically to
14948ad4d6ddSJeff Bonwick  *	determine whether a device can be detached.
14958ad4d6ddSJeff Bonwick  *
14968ad4d6ddSJeff Bonwick  * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
14978ad4d6ddSJeff Bonwick  * either has the data or it doesn't.
14988ad4d6ddSJeff Bonwick  *
14998ad4d6ddSJeff Bonwick  * For interior vdevs such as mirror and RAID-Z the picture is more complex.
15008ad4d6ddSJeff Bonwick  * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
15018ad4d6ddSJeff Bonwick  * if any child is less than fully replicated, then so is its parent.
15028ad4d6ddSJeff Bonwick  * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
15038ad4d6ddSJeff Bonwick  * comprising only those txgs which appear in 'maxfaults' or more children;
15048ad4d6ddSJeff Bonwick  * those are the txgs we don't have enough replication to read.  For example,
15058ad4d6ddSJeff Bonwick  * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
15068ad4d6ddSJeff Bonwick  * thus, its DTL_MISSING consists of the set of txgs that appear in more than
15078ad4d6ddSJeff Bonwick  * two child DTL_MISSING maps.
15088ad4d6ddSJeff Bonwick  *
15098ad4d6ddSJeff Bonwick  * It should be clear from the above that to compute the DTLs and outage maps
15108ad4d6ddSJeff Bonwick  * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
15118ad4d6ddSJeff Bonwick  * Therefore, that is all we keep on disk.  When loading the pool, or after
15128ad4d6ddSJeff Bonwick  * a configuration change, we generate all other DTLs from first principles.
15138ad4d6ddSJeff Bonwick  */
1514fa9e4066Sahrens void
15158ad4d6ddSJeff Bonwick vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
1516fa9e4066Sahrens {
15178ad4d6ddSJeff Bonwick 	space_map_t *sm = &vd->vdev_dtl[t];
15188ad4d6ddSJeff Bonwick 
15198ad4d6ddSJeff Bonwick 	ASSERT(t < DTL_TYPES);
15208ad4d6ddSJeff Bonwick 	ASSERT(vd != vd->vdev_spa->spa_root_vdev);
15218ad4d6ddSJeff Bonwick 
1522fa9e4066Sahrens 	mutex_enter(sm->sm_lock);
1523fa9e4066Sahrens 	if (!space_map_contains(sm, txg, size))
1524fa9e4066Sahrens 		space_map_add(sm, txg, size);
1525fa9e4066Sahrens 	mutex_exit(sm->sm_lock);
1526fa9e4066Sahrens }
1527fa9e4066Sahrens 
15288ad4d6ddSJeff Bonwick boolean_t
15298ad4d6ddSJeff Bonwick vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
1530fa9e4066Sahrens {
15318ad4d6ddSJeff Bonwick 	space_map_t *sm = &vd->vdev_dtl[t];
15328ad4d6ddSJeff Bonwick 	boolean_t dirty = B_FALSE;
1533fa9e4066Sahrens 
15348ad4d6ddSJeff Bonwick 	ASSERT(t < DTL_TYPES);
15358ad4d6ddSJeff Bonwick 	ASSERT(vd != vd->vdev_spa->spa_root_vdev);
1536fa9e4066Sahrens 
1537fa9e4066Sahrens 	mutex_enter(sm->sm_lock);
15388ad4d6ddSJeff Bonwick 	if (sm->sm_space != 0)
15398ad4d6ddSJeff Bonwick 		dirty = space_map_contains(sm, txg, size);
1540fa9e4066Sahrens 	mutex_exit(sm->sm_lock);
1541fa9e4066Sahrens 
1542fa9e4066Sahrens 	return (dirty);
1543fa9e4066Sahrens }
1544fa9e4066Sahrens 
15458ad4d6ddSJeff Bonwick boolean_t
15468ad4d6ddSJeff Bonwick vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
15478ad4d6ddSJeff Bonwick {
15488ad4d6ddSJeff Bonwick 	space_map_t *sm = &vd->vdev_dtl[t];
15498ad4d6ddSJeff Bonwick 	boolean_t empty;
15508ad4d6ddSJeff Bonwick 
15518ad4d6ddSJeff Bonwick 	mutex_enter(sm->sm_lock);
15528ad4d6ddSJeff Bonwick 	empty = (sm->sm_space == 0);
15538ad4d6ddSJeff Bonwick 	mutex_exit(sm->sm_lock);
15548ad4d6ddSJeff Bonwick 
15558ad4d6ddSJeff Bonwick 	return (empty);
15568ad4d6ddSJeff Bonwick }
15578ad4d6ddSJeff Bonwick 
1558fa9e4066Sahrens /*
1559fa9e4066Sahrens  * Reassess DTLs after a config change or scrub completion.
1560fa9e4066Sahrens  */
1561fa9e4066Sahrens void
1562fa9e4066Sahrens vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
1563fa9e4066Sahrens {
1564ea8dc4b6Seschrock 	spa_t *spa = vd->vdev_spa;
15658ad4d6ddSJeff Bonwick 	avl_tree_t reftree;
15668ad4d6ddSJeff Bonwick 	int minref;
1567fa9e4066Sahrens 
15688ad4d6ddSJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1569fa9e4066Sahrens 
15708ad4d6ddSJeff Bonwick 	for (int c = 0; c < vd->vdev_children; c++)
15718ad4d6ddSJeff Bonwick 		vdev_dtl_reassess(vd->vdev_child[c], txg,
15728ad4d6ddSJeff Bonwick 		    scrub_txg, scrub_done);
15738ad4d6ddSJeff Bonwick 
1574*b24ab676SJeff Bonwick 	if (vd == spa->spa_root_vdev || vd->vdev_ishole || vd->vdev_aux)
15758ad4d6ddSJeff Bonwick 		return;
15768ad4d6ddSJeff Bonwick 
15778ad4d6ddSJeff Bonwick 	if (vd->vdev_ops->vdev_op_leaf) {
1578fa9e4066Sahrens 		mutex_enter(&vd->vdev_dtl_lock);
1579088f3894Sahrens 		if (scrub_txg != 0 &&
1580088f3894Sahrens 		    (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) {
1581088f3894Sahrens 			/* XXX should check scrub_done? */
1582088f3894Sahrens 			/*
1583088f3894Sahrens 			 * We completed a scrub up to scrub_txg.  If we
1584088f3894Sahrens 			 * did it without rebooting, then the scrub dtl
1585088f3894Sahrens 			 * will be valid, so excise the old region and
1586088f3894Sahrens 			 * fold in the scrub dtl.  Otherwise, leave the
1587088f3894Sahrens 			 * dtl as-is if there was an error.
15888ad4d6ddSJeff Bonwick 			 *
15898ad4d6ddSJeff Bonwick 			 * There's little trick here: to excise the beginning
15908ad4d6ddSJeff Bonwick 			 * of the DTL_MISSING map, we put it into a reference
15918ad4d6ddSJeff Bonwick 			 * tree and then add a segment with refcnt -1 that
15928ad4d6ddSJeff Bonwick 			 * covers the range [0, scrub_txg).  This means
15938ad4d6ddSJeff Bonwick 			 * that each txg in that range has refcnt -1 or 0.
15948ad4d6ddSJeff Bonwick 			 * We then add DTL_SCRUB with a refcnt of 2, so that
15958ad4d6ddSJeff Bonwick 			 * entries in the range [0, scrub_txg) will have a
15968ad4d6ddSJeff Bonwick 			 * positive refcnt -- either 1 or 2.  We then convert
15978ad4d6ddSJeff Bonwick 			 * the reference tree into the new DTL_MISSING map.
1598088f3894Sahrens 			 */
15998ad4d6ddSJeff Bonwick 			space_map_ref_create(&reftree);
16008ad4d6ddSJeff Bonwick 			space_map_ref_add_map(&reftree,
16018ad4d6ddSJeff Bonwick 			    &vd->vdev_dtl[DTL_MISSING], 1);
16028ad4d6ddSJeff Bonwick 			space_map_ref_add_seg(&reftree, 0, scrub_txg, -1);
16038ad4d6ddSJeff Bonwick 			space_map_ref_add_map(&reftree,
16048ad4d6ddSJeff Bonwick 			    &vd->vdev_dtl[DTL_SCRUB], 2);
16058ad4d6ddSJeff Bonwick 			space_map_ref_generate_map(&reftree,
16068ad4d6ddSJeff Bonwick 			    &vd->vdev_dtl[DTL_MISSING], 1);
16078ad4d6ddSJeff Bonwick 			space_map_ref_destroy(&reftree);
1608fa9e4066Sahrens 		}
16098ad4d6ddSJeff Bonwick 		space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
16108ad4d6ddSJeff Bonwick 		space_map_walk(&vd->vdev_dtl[DTL_MISSING],
16118ad4d6ddSJeff Bonwick 		    space_map_add, &vd->vdev_dtl[DTL_PARTIAL]);
1612fa9e4066Sahrens 		if (scrub_done)
16138ad4d6ddSJeff Bonwick 			space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
16148ad4d6ddSJeff Bonwick 		space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
16158ad4d6ddSJeff Bonwick 		if (!vdev_readable(vd))
16168ad4d6ddSJeff Bonwick 			space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
16178ad4d6ddSJeff Bonwick 		else
16188ad4d6ddSJeff Bonwick 			space_map_walk(&vd->vdev_dtl[DTL_MISSING],
16198ad4d6ddSJeff Bonwick 			    space_map_add, &vd->vdev_dtl[DTL_OUTAGE]);
1620fa9e4066Sahrens 		mutex_exit(&vd->vdev_dtl_lock);
1621088f3894Sahrens 
1622ecc2d604Sbonwick 		if (txg != 0)
1623ecc2d604Sbonwick 			vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
1624fa9e4066Sahrens 		return;
1625fa9e4066Sahrens 	}
1626fa9e4066Sahrens 
1627fa9e4066Sahrens 	mutex_enter(&vd->vdev_dtl_lock);
16288ad4d6ddSJeff Bonwick 	for (int t = 0; t < DTL_TYPES; t++) {
162999bb17e2SEric Taylor 		/* account for child's outage in parent's missing map */
163099bb17e2SEric Taylor 		int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
16318ad4d6ddSJeff Bonwick 		if (t == DTL_SCRUB)
16328ad4d6ddSJeff Bonwick 			continue;			/* leaf vdevs only */
16338ad4d6ddSJeff Bonwick 		if (t == DTL_PARTIAL)
16348ad4d6ddSJeff Bonwick 			minref = 1;			/* i.e. non-zero */
16358ad4d6ddSJeff Bonwick 		else if (vd->vdev_nparity != 0)
16368ad4d6ddSJeff Bonwick 			minref = vd->vdev_nparity + 1;	/* RAID-Z */
16378ad4d6ddSJeff Bonwick 		else
16388ad4d6ddSJeff Bonwick 			minref = vd->vdev_children;	/* any kind of mirror */
16398ad4d6ddSJeff Bonwick 		space_map_ref_create(&reftree);
16408ad4d6ddSJeff Bonwick 		for (int c = 0; c < vd->vdev_children; c++) {
16418ad4d6ddSJeff Bonwick 			vdev_t *cvd = vd->vdev_child[c];
16428ad4d6ddSJeff Bonwick 			mutex_enter(&cvd->vdev_dtl_lock);
164399bb17e2SEric Taylor 			space_map_ref_add_map(&reftree, &cvd->vdev_dtl[s], 1);
16448ad4d6ddSJeff Bonwick 			mutex_exit(&cvd->vdev_dtl_lock);
16458ad4d6ddSJeff Bonwick 		}
16468ad4d6ddSJeff Bonwick 		space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref);
16478ad4d6ddSJeff Bonwick 		space_map_ref_destroy(&reftree);
1648fa9e4066Sahrens 	}
16498ad4d6ddSJeff Bonwick 	mutex_exit(&vd->vdev_dtl_lock);
1650fa9e4066Sahrens }
1651fa9e4066Sahrens 
1652fa9e4066Sahrens static int
1653fa9e4066Sahrens vdev_dtl_load(vdev_t *vd)
1654fa9e4066Sahrens {
1655fa9e4066Sahrens 	spa_t *spa = vd->vdev_spa;
16568ad4d6ddSJeff Bonwick 	space_map_obj_t *smo = &vd->vdev_dtl_smo;
1657ecc2d604Sbonwick 	objset_t *mos = spa->spa_meta_objset;
1658fa9e4066Sahrens 	dmu_buf_t *db;
1659fa9e4066Sahrens 	int error;
1660fa9e4066Sahrens 
1661fa9e4066Sahrens 	ASSERT(vd->vdev_children == 0);
1662fa9e4066Sahrens 
1663fa9e4066Sahrens 	if (smo->smo_object == 0)
1664fa9e4066Sahrens 		return (0);
1665fa9e4066Sahrens 
166688ecc943SGeorge Wilson 	ASSERT(!vd->vdev_ishole);
166788ecc943SGeorge Wilson 
1668ecc2d604Sbonwick 	if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0)
1669ea8dc4b6Seschrock 		return (error);
1670ecc2d604Sbonwick 
16711934e92fSmaybee 	ASSERT3U(db->db_size, >=, sizeof (*smo));
16721934e92fSmaybee 	bcopy(db->db_data, smo, sizeof (*smo));
1673ea8dc4b6Seschrock 	dmu_buf_rele(db, FTAG);
1674fa9e4066Sahrens 
1675fa9e4066Sahrens 	mutex_enter(&vd->vdev_dtl_lock);
16768ad4d6ddSJeff Bonwick 	error = space_map_load(&vd->vdev_dtl[DTL_MISSING],
16778ad4d6ddSJeff Bonwick 	    NULL, SM_ALLOC, smo, mos);
1678fa9e4066Sahrens 	mutex_exit(&vd->vdev_dtl_lock);
1679fa9e4066Sahrens 
1680fa9e4066Sahrens 	return (error);
1681fa9e4066Sahrens }
1682fa9e4066Sahrens 
1683fa9e4066Sahrens void
1684fa9e4066Sahrens vdev_dtl_sync(vdev_t *vd, uint64_t txg)
1685fa9e4066Sahrens {
1686fa9e4066Sahrens 	spa_t *spa = vd->vdev_spa;
16878ad4d6ddSJeff Bonwick 	space_map_obj_t *smo = &vd->vdev_dtl_smo;
16888ad4d6ddSJeff Bonwick 	space_map_t *sm = &vd->vdev_dtl[DTL_MISSING];
1689ecc2d604Sbonwick 	objset_t *mos = spa->spa_meta_objset;
1690fa9e4066Sahrens 	space_map_t smsync;
1691fa9e4066Sahrens 	kmutex_t smlock;
1692fa9e4066Sahrens 	dmu_buf_t *db;
1693fa9e4066Sahrens 	dmu_tx_t *tx;
1694fa9e4066Sahrens 
169588ecc943SGeorge Wilson 	ASSERT(!vd->vdev_ishole);
169688ecc943SGeorge Wilson 
1697fa9e4066Sahrens 	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1698fa9e4066Sahrens 
1699fa9e4066Sahrens 	if (vd->vdev_detached) {
1700fa9e4066Sahrens 		if (smo->smo_object != 0) {
1701ecc2d604Sbonwick 			int err = dmu_object_free(mos, smo->smo_object, tx);
1702fa9e4066Sahrens 			ASSERT3U(err, ==, 0);
1703fa9e4066Sahrens 			smo->smo_object = 0;
1704fa9e4066Sahrens 		}
1705fa9e4066Sahrens 		dmu_tx_commit(tx);
1706fa9e4066Sahrens 		return;
1707fa9e4066Sahrens 	}
1708fa9e4066Sahrens 
1709fa9e4066Sahrens 	if (smo->smo_object == 0) {
1710fa9e4066Sahrens 		ASSERT(smo->smo_objsize == 0);
1711fa9e4066Sahrens 		ASSERT(smo->smo_alloc == 0);
1712ecc2d604Sbonwick 		smo->smo_object = dmu_object_alloc(mos,
1713fa9e4066Sahrens 		    DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
1714fa9e4066Sahrens 		    DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
1715fa9e4066Sahrens 		ASSERT(smo->smo_object != 0);
1716fa9e4066Sahrens 		vdev_config_dirty(vd->vdev_top);
1717fa9e4066Sahrens 	}
1718fa9e4066Sahrens 
1719fa9e4066Sahrens 	mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL);
1720fa9e4066Sahrens 
1721fa9e4066Sahrens 	space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift,
1722fa9e4066Sahrens 	    &smlock);
1723fa9e4066Sahrens 
1724fa9e4066Sahrens 	mutex_enter(&smlock);
1725fa9e4066Sahrens 
1726fa9e4066Sahrens 	mutex_enter(&vd->vdev_dtl_lock);
1727ecc2d604Sbonwick 	space_map_walk(sm, space_map_add, &smsync);
1728fa9e4066Sahrens 	mutex_exit(&vd->vdev_dtl_lock);
1729fa9e4066Sahrens 
1730ecc2d604Sbonwick 	space_map_truncate(smo, mos, tx);
1731ecc2d604Sbonwick 	space_map_sync(&smsync, SM_ALLOC, smo, mos, tx);
1732fa9e4066Sahrens 
1733fa9e4066Sahrens 	space_map_destroy(&smsync);
1734fa9e4066Sahrens 
1735fa9e4066Sahrens 	mutex_exit(&smlock);
1736fa9e4066Sahrens 	mutex_destroy(&smlock);
1737fa9e4066Sahrens 
1738ecc2d604Sbonwick 	VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
1739fa9e4066Sahrens 	dmu_buf_will_dirty(db, tx);
17401934e92fSmaybee 	ASSERT3U(db->db_size, >=, sizeof (*smo));
17411934e92fSmaybee 	bcopy(smo, db->db_data, sizeof (*smo));
1742ea8dc4b6Seschrock 	dmu_buf_rele(db, FTAG);
1743fa9e4066Sahrens 
1744fa9e4066Sahrens 	dmu_tx_commit(tx);
1745fa9e4066Sahrens }
1746fa9e4066Sahrens 
17478ad4d6ddSJeff Bonwick /*
17488ad4d6ddSJeff Bonwick  * Determine whether the specified vdev can be offlined/detached/removed
17498ad4d6ddSJeff Bonwick  * without losing data.
17508ad4d6ddSJeff Bonwick  */
17518ad4d6ddSJeff Bonwick boolean_t
17528ad4d6ddSJeff Bonwick vdev_dtl_required(vdev_t *vd)
17538ad4d6ddSJeff Bonwick {
17548ad4d6ddSJeff Bonwick 	spa_t *spa = vd->vdev_spa;
17558ad4d6ddSJeff Bonwick 	vdev_t *tvd = vd->vdev_top;
17568ad4d6ddSJeff Bonwick 	uint8_t cant_read = vd->vdev_cant_read;
17578ad4d6ddSJeff Bonwick 	boolean_t required;
17588ad4d6ddSJeff Bonwick 
17598ad4d6ddSJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
17608ad4d6ddSJeff Bonwick 
17618ad4d6ddSJeff Bonwick 	if (vd == spa->spa_root_vdev || vd == tvd)
17628ad4d6ddSJeff Bonwick 		return (B_TRUE);
17638ad4d6ddSJeff Bonwick 
17648ad4d6ddSJeff Bonwick 	/*
17658ad4d6ddSJeff Bonwick 	 * Temporarily mark the device as unreadable, and then determine
17668ad4d6ddSJeff Bonwick 	 * whether this results in any DTL outages in the top-level vdev.
17678ad4d6ddSJeff Bonwick 	 * If not, we can safely offline/detach/remove the device.
17688ad4d6ddSJeff Bonwick 	 */
17698ad4d6ddSJeff Bonwick 	vd->vdev_cant_read = B_TRUE;
17708ad4d6ddSJeff Bonwick 	vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
17718ad4d6ddSJeff Bonwick 	required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
17728ad4d6ddSJeff Bonwick 	vd->vdev_cant_read = cant_read;
17738ad4d6ddSJeff Bonwick 	vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
17748ad4d6ddSJeff Bonwick 
17758ad4d6ddSJeff Bonwick 	return (required);
17768ad4d6ddSJeff Bonwick }
17778ad4d6ddSJeff Bonwick 
1778088f3894Sahrens /*
1779088f3894Sahrens  * Determine if resilver is needed, and if so the txg range.
1780088f3894Sahrens  */
1781088f3894Sahrens boolean_t
1782088f3894Sahrens vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
1783088f3894Sahrens {
1784088f3894Sahrens 	boolean_t needed = B_FALSE;
1785088f3894Sahrens 	uint64_t thismin = UINT64_MAX;
1786088f3894Sahrens 	uint64_t thismax = 0;
1787088f3894Sahrens 
1788088f3894Sahrens 	if (vd->vdev_children == 0) {
1789088f3894Sahrens 		mutex_enter(&vd->vdev_dtl_lock);
17908ad4d6ddSJeff Bonwick 		if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 &&
17918ad4d6ddSJeff Bonwick 		    vdev_writeable(vd)) {
1792088f3894Sahrens 			space_seg_t *ss;
1793088f3894Sahrens 
17948ad4d6ddSJeff Bonwick 			ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root);
1795088f3894Sahrens 			thismin = ss->ss_start - 1;
17968ad4d6ddSJeff Bonwick 			ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root);
1797088f3894Sahrens 			thismax = ss->ss_end;
1798088f3894Sahrens 			needed = B_TRUE;
1799088f3894Sahrens 		}
1800088f3894Sahrens 		mutex_exit(&vd->vdev_dtl_lock);
1801088f3894Sahrens 	} else {
18028ad4d6ddSJeff Bonwick 		for (int c = 0; c < vd->vdev_children; c++) {
1803088f3894Sahrens 			vdev_t *cvd = vd->vdev_child[c];
1804088f3894Sahrens 			uint64_t cmin, cmax;
1805088f3894Sahrens 
1806088f3894Sahrens 			if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
1807088f3894Sahrens 				thismin = MIN(thismin, cmin);
1808088f3894Sahrens 				thismax = MAX(thismax, cmax);
1809088f3894Sahrens 				needed = B_TRUE;
1810088f3894Sahrens 			}
1811088f3894Sahrens 		}
1812088f3894Sahrens 	}
1813088f3894Sahrens 
1814088f3894Sahrens 	if (needed && minp) {
1815088f3894Sahrens 		*minp = thismin;
1816088f3894Sahrens 		*maxp = thismax;
1817088f3894Sahrens 	}
1818088f3894Sahrens 	return (needed);
1819088f3894Sahrens }
1820088f3894Sahrens 
1821560e6e96Seschrock void
1822ea8dc4b6Seschrock vdev_load(vdev_t *vd)
1823fa9e4066Sahrens {
1824fa9e4066Sahrens 	/*
1825fa9e4066Sahrens 	 * Recursively load all children.
1826fa9e4066Sahrens 	 */
18278ad4d6ddSJeff Bonwick 	for (int c = 0; c < vd->vdev_children; c++)
1828560e6e96Seschrock 		vdev_load(vd->vdev_child[c]);
1829fa9e4066Sahrens 
1830fa9e4066Sahrens 	/*
18310e34b6a7Sbonwick 	 * If this is a top-level vdev, initialize its metaslabs.
1832fa9e4066Sahrens 	 */
183388ecc943SGeorge Wilson 	if (vd == vd->vdev_top && !vd->vdev_ishole &&
1834560e6e96Seschrock 	    (vd->vdev_ashift == 0 || vd->vdev_asize == 0 ||
1835560e6e96Seschrock 	    vdev_metaslab_init(vd, 0) != 0))
1836560e6e96Seschrock 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1837560e6e96Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1838fa9e4066Sahrens 
1839fa9e4066Sahrens 	/*
1840fa9e4066Sahrens 	 * If this is a leaf vdev, load its DTL.
1841fa9e4066Sahrens 	 */
1842560e6e96Seschrock 	if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0)
1843560e6e96Seschrock 		vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
1844560e6e96Seschrock 		    VDEV_AUX_CORRUPT_DATA);
1845fa9e4066Sahrens }
1846fa9e4066Sahrens 
184799653d4eSeschrock /*
1848fa94a07fSbrendan  * The special vdev case is used for hot spares and l2cache devices.  Its
1849fa94a07fSbrendan  * sole purpose it to set the vdev state for the associated vdev.  To do this,
1850fa94a07fSbrendan  * we make sure that we can open the underlying device, then try to read the
1851fa94a07fSbrendan  * label, and make sure that the label is sane and that it hasn't been
1852fa94a07fSbrendan  * repurposed to another pool.
185399653d4eSeschrock  */
185499653d4eSeschrock int
1855fa94a07fSbrendan vdev_validate_aux(vdev_t *vd)
185699653d4eSeschrock {
185799653d4eSeschrock 	nvlist_t *label;
185899653d4eSeschrock 	uint64_t guid, version;
185999653d4eSeschrock 	uint64_t state;
186099653d4eSeschrock 
1861e14bb325SJeff Bonwick 	if (!vdev_readable(vd))
1862c5904d13Seschrock 		return (0);
1863c5904d13Seschrock 
186499653d4eSeschrock 	if ((label = vdev_label_read_config(vd)) == NULL) {
186599653d4eSeschrock 		vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
186699653d4eSeschrock 		    VDEV_AUX_CORRUPT_DATA);
186799653d4eSeschrock 		return (-1);
186899653d4eSeschrock 	}
186999653d4eSeschrock 
187099653d4eSeschrock 	if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
1871e7437265Sahrens 	    version > SPA_VERSION ||
187299653d4eSeschrock 	    nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
187399653d4eSeschrock 	    guid != vd->vdev_guid ||
187499653d4eSeschrock 	    nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
187599653d4eSeschrock 		vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
187699653d4eSeschrock 		    VDEV_AUX_CORRUPT_DATA);
187799653d4eSeschrock 		nvlist_free(label);
187899653d4eSeschrock 		return (-1);
187999653d4eSeschrock 	}
188099653d4eSeschrock 
188199653d4eSeschrock 	/*
188299653d4eSeschrock 	 * We don't actually check the pool state here.  If it's in fact in
188399653d4eSeschrock 	 * use by another pool, we update this fact on the fly when requested.
188499653d4eSeschrock 	 */
188599653d4eSeschrock 	nvlist_free(label);
188699653d4eSeschrock 	return (0);
188799653d4eSeschrock }
188899653d4eSeschrock 
188988ecc943SGeorge Wilson void
189088ecc943SGeorge Wilson vdev_remove(vdev_t *vd, uint64_t txg)
189188ecc943SGeorge Wilson {
189288ecc943SGeorge Wilson 	spa_t *spa = vd->vdev_spa;
189388ecc943SGeorge Wilson 	objset_t *mos = spa->spa_meta_objset;
189488ecc943SGeorge Wilson 	dmu_tx_t *tx;
189588ecc943SGeorge Wilson 
189688ecc943SGeorge Wilson 	tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
189788ecc943SGeorge Wilson 
189888ecc943SGeorge Wilson 	if (vd->vdev_dtl_smo.smo_object) {
189988ecc943SGeorge Wilson 		ASSERT3U(vd->vdev_dtl_smo.smo_alloc, ==, 0);
190088ecc943SGeorge Wilson 		(void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx);
190188ecc943SGeorge Wilson 		vd->vdev_dtl_smo.smo_object = 0;
190288ecc943SGeorge Wilson 	}
190388ecc943SGeorge Wilson 
190488ecc943SGeorge Wilson 	if (vd->vdev_ms != NULL) {
190588ecc943SGeorge Wilson 		for (int m = 0; m < vd->vdev_ms_count; m++) {
190688ecc943SGeorge Wilson 			metaslab_t *msp = vd->vdev_ms[m];
190788ecc943SGeorge Wilson 
190888ecc943SGeorge Wilson 			if (msp == NULL || msp->ms_smo.smo_object == 0)
190988ecc943SGeorge Wilson 				continue;
191088ecc943SGeorge Wilson 
191188ecc943SGeorge Wilson 			ASSERT3U(msp->ms_smo.smo_alloc, ==, 0);
191288ecc943SGeorge Wilson 			(void) dmu_object_free(mos, msp->ms_smo.smo_object, tx);
191388ecc943SGeorge Wilson 			msp->ms_smo.smo_object = 0;
191488ecc943SGeorge Wilson 		}
191588ecc943SGeorge Wilson 	}
191688ecc943SGeorge Wilson 
191788ecc943SGeorge Wilson 	if (vd->vdev_ms_array) {
191888ecc943SGeorge Wilson 		(void) dmu_object_free(mos, vd->vdev_ms_array, tx);
191988ecc943SGeorge Wilson 		vd->vdev_ms_array = 0;
192088ecc943SGeorge Wilson 		vd->vdev_ms_shift = 0;
192188ecc943SGeorge Wilson 	}
192288ecc943SGeorge Wilson 	dmu_tx_commit(tx);
192388ecc943SGeorge Wilson }
192488ecc943SGeorge Wilson 
1925fa9e4066Sahrens void
1926fa9e4066Sahrens vdev_sync_done(vdev_t *vd, uint64_t txg)
1927fa9e4066Sahrens {
1928fa9e4066Sahrens 	metaslab_t *msp;
1929fa9e4066Sahrens 
193088ecc943SGeorge Wilson 	ASSERT(!vd->vdev_ishole);
193188ecc943SGeorge Wilson 
1932fa9e4066Sahrens 	while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
1933fa9e4066Sahrens 		metaslab_sync_done(msp, txg);
1934fa9e4066Sahrens }
1935fa9e4066Sahrens 
1936fa9e4066Sahrens void
1937fa9e4066Sahrens vdev_sync(vdev_t *vd, uint64_t txg)
1938fa9e4066Sahrens {
1939fa9e4066Sahrens 	spa_t *spa = vd->vdev_spa;
1940fa9e4066Sahrens 	vdev_t *lvd;
1941fa9e4066Sahrens 	metaslab_t *msp;
1942ecc2d604Sbonwick 	dmu_tx_t *tx;
1943fa9e4066Sahrens 
194488ecc943SGeorge Wilson 	ASSERT(!vd->vdev_ishole);
194588ecc943SGeorge Wilson 
1946ecc2d604Sbonwick 	if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) {
1947ecc2d604Sbonwick 		ASSERT(vd == vd->vdev_top);
1948ecc2d604Sbonwick 		tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1949ecc2d604Sbonwick 		vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
1950ecc2d604Sbonwick 		    DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
1951ecc2d604Sbonwick 		ASSERT(vd->vdev_ms_array != 0);
1952ecc2d604Sbonwick 		vdev_config_dirty(vd);
1953ecc2d604Sbonwick 		dmu_tx_commit(tx);
1954ecc2d604Sbonwick 	}
1955fa9e4066Sahrens 
195688ecc943SGeorge Wilson 	if (vd->vdev_removing)
195788ecc943SGeorge Wilson 		vdev_remove(vd, txg);
195888ecc943SGeorge Wilson 
1959ecc2d604Sbonwick 	while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
1960fa9e4066Sahrens 		metaslab_sync(msp, txg);
1961ecc2d604Sbonwick 		(void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
1962ecc2d604Sbonwick 	}
1963fa9e4066Sahrens 
1964fa9e4066Sahrens 	while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
1965fa9e4066Sahrens 		vdev_dtl_sync(lvd, txg);
1966fa9e4066Sahrens 
1967fa9e4066Sahrens 	(void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
1968fa9e4066Sahrens }
1969fa9e4066Sahrens 
1970fa9e4066Sahrens uint64_t
1971fa9e4066Sahrens vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
1972fa9e4066Sahrens {
1973fa9e4066Sahrens 	return (vd->vdev_ops->vdev_op_asize(vd, psize));
1974fa9e4066Sahrens }
1975fa9e4066Sahrens 
19763d7072f8Seschrock /*
19773d7072f8Seschrock  * Mark the given vdev faulted.  A faulted vdev behaves as if the device could
19783d7072f8Seschrock  * not be opened, and no I/O is attempted.
19793d7072f8Seschrock  */
1980fa9e4066Sahrens int
1981069f55e2SEric Schrock vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
1982fa9e4066Sahrens {
1983c5904d13Seschrock 	vdev_t *vd;
1984fa9e4066Sahrens 
19858f18d1faSGeorge Wilson 	spa_vdev_state_enter(spa, SCL_NONE);
1986fa9e4066Sahrens 
1987c5904d13Seschrock 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
1988e14bb325SJeff Bonwick 		return (spa_vdev_state_exit(spa, NULL, ENODEV));
1989e14bb325SJeff Bonwick 
19903d7072f8Seschrock 	if (!vd->vdev_ops->vdev_op_leaf)
1991e14bb325SJeff Bonwick 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
1992fa9e4066Sahrens 
1993069f55e2SEric Schrock 	/*
1994069f55e2SEric Schrock 	 * We don't directly use the aux state here, but if we do a
1995069f55e2SEric Schrock 	 * vdev_reopen(), we need this value to be present to remember why we
1996069f55e2SEric Schrock 	 * were faulted.
1997069f55e2SEric Schrock 	 */
1998069f55e2SEric Schrock 	vd->vdev_label_aux = aux;
1999069f55e2SEric Schrock 
20003d7072f8Seschrock 	/*
20013d7072f8Seschrock 	 * Faulted state takes precedence over degraded.
20023d7072f8Seschrock 	 */
20033d7072f8Seschrock 	vd->vdev_faulted = 1ULL;
20043d7072f8Seschrock 	vd->vdev_degraded = 0ULL;
2005069f55e2SEric Schrock 	vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
20063d7072f8Seschrock 
20073d7072f8Seschrock 	/*
20086988b9faSDavid Marker 	 * If marking the vdev as faulted cause the top-level vdev to become
20093d7072f8Seschrock 	 * unavailable, then back off and simply mark the vdev as degraded
20103d7072f8Seschrock 	 * instead.
20113d7072f8Seschrock 	 */
20128f18d1faSGeorge Wilson 	if (vdev_is_dead(vd->vdev_top) && !vd->vdev_islog &&
20138f18d1faSGeorge Wilson 	    vd->vdev_aux == NULL) {
20143d7072f8Seschrock 		vd->vdev_degraded = 1ULL;
20153d7072f8Seschrock 		vd->vdev_faulted = 0ULL;
20163d7072f8Seschrock 
20173d7072f8Seschrock 		/*
20183d7072f8Seschrock 		 * If we reopen the device and it's not dead, only then do we
20193d7072f8Seschrock 		 * mark it degraded.
20203d7072f8Seschrock 		 */
20213d7072f8Seschrock 		vdev_reopen(vd);
20223d7072f8Seschrock 
2023069f55e2SEric Schrock 		if (vdev_readable(vd))
2024069f55e2SEric Schrock 			vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
20253d7072f8Seschrock 	}
20263d7072f8Seschrock 
2027e14bb325SJeff Bonwick 	return (spa_vdev_state_exit(spa, vd, 0));
20283d7072f8Seschrock }
20293d7072f8Seschrock 
20303d7072f8Seschrock /*
20313d7072f8Seschrock  * Mark the given vdev degraded.  A degraded vdev is purely an indication to the
20323d7072f8Seschrock  * user that something is wrong.  The vdev continues to operate as normal as far
20333d7072f8Seschrock  * as I/O is concerned.
20343d7072f8Seschrock  */
20353d7072f8Seschrock int
2036069f55e2SEric Schrock vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
20373d7072f8Seschrock {
2038c5904d13Seschrock 	vdev_t *vd;
20390a4e9518Sgw 
20408f18d1faSGeorge Wilson 	spa_vdev_state_enter(spa, SCL_NONE);
20413d7072f8Seschrock 
2042c5904d13Seschrock 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2043e14bb325SJeff Bonwick 		return (spa_vdev_state_exit(spa, NULL, ENODEV));
2044e14bb325SJeff Bonwick 
20450e34b6a7Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
2046e14bb325SJeff Bonwick 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
20470e34b6a7Sbonwick 
20483d7072f8Seschrock 	/*
20493d7072f8Seschrock 	 * If the vdev is already faulted, then don't do anything.
20503d7072f8Seschrock 	 */
2051e14bb325SJeff Bonwick 	if (vd->vdev_faulted || vd->vdev_degraded)
2052e14bb325SJeff Bonwick 		return (spa_vdev_state_exit(spa, NULL, 0));
20533d7072f8Seschrock 
20543d7072f8Seschrock 	vd->vdev_degraded = 1ULL;
20553d7072f8Seschrock 	if (!vdev_is_dead(vd))
20563d7072f8Seschrock 		vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
2057069f55e2SEric Schrock 		    aux);
20583d7072f8Seschrock 
2059e14bb325SJeff Bonwick 	return (spa_vdev_state_exit(spa, vd, 0));
20603d7072f8Seschrock }
20613d7072f8Seschrock 
20623d7072f8Seschrock /*
20633d7072f8Seschrock  * Online the given vdev.  If 'unspare' is set, it implies two things.  First,
20643d7072f8Seschrock  * any attached spare device should be detached when the device finishes
20653d7072f8Seschrock  * resilvering.  Second, the online should be treated like a 'test' online case,
20663d7072f8Seschrock  * so no FMA events are generated if the device fails to open.
20673d7072f8Seschrock  */
20683d7072f8Seschrock int
2069e14bb325SJeff Bonwick vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
20703d7072f8Seschrock {
2071573ca77eSGeorge Wilson 	vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
20723d7072f8Seschrock 
20738f18d1faSGeorge Wilson 	spa_vdev_state_enter(spa, SCL_NONE);
20743d7072f8Seschrock 
2075c5904d13Seschrock 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2076e14bb325SJeff Bonwick 		return (spa_vdev_state_exit(spa, NULL, ENODEV));
20773d7072f8Seschrock 
20783d7072f8Seschrock 	if (!vd->vdev_ops->vdev_op_leaf)
2079e14bb325SJeff Bonwick 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
2080fa9e4066Sahrens 
2081573ca77eSGeorge Wilson 	tvd = vd->vdev_top;
2082fa9e4066Sahrens 	vd->vdev_offline = B_FALSE;
2083441d80aaSlling 	vd->vdev_tmpoffline = B_FALSE;
2084e14bb325SJeff Bonwick 	vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
2085e14bb325SJeff Bonwick 	vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
2086573ca77eSGeorge Wilson 
2087573ca77eSGeorge Wilson 	/* XXX - L2ARC 1.0 does not support expansion */
2088573ca77eSGeorge Wilson 	if (!vd->vdev_aux) {
2089573ca77eSGeorge Wilson 		for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2090573ca77eSGeorge Wilson 			pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND);
2091573ca77eSGeorge Wilson 	}
2092573ca77eSGeorge Wilson 
2093573ca77eSGeorge Wilson 	vdev_reopen(tvd);
20943d7072f8Seschrock 	vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
20953d7072f8Seschrock 
2096573ca77eSGeorge Wilson 	if (!vd->vdev_aux) {
2097573ca77eSGeorge Wilson 		for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
2098573ca77eSGeorge Wilson 			pvd->vdev_expanding = B_FALSE;
2099573ca77eSGeorge Wilson 	}
2100573ca77eSGeorge Wilson 
21013d7072f8Seschrock 	if (newstate)
21023d7072f8Seschrock 		*newstate = vd->vdev_state;
21033d7072f8Seschrock 	if ((flags & ZFS_ONLINE_UNSPARE) &&
21043d7072f8Seschrock 	    !vdev_is_dead(vd) && vd->vdev_parent &&
21053d7072f8Seschrock 	    vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
21063d7072f8Seschrock 	    vd->vdev_parent->vdev_child[0] == vd)
21073d7072f8Seschrock 		vd->vdev_unspare = B_TRUE;
2108fa9e4066Sahrens 
2109573ca77eSGeorge Wilson 	if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
2110573ca77eSGeorge Wilson 
2111573ca77eSGeorge Wilson 		/* XXX - L2ARC 1.0 does not support expansion */
2112573ca77eSGeorge Wilson 		if (vd->vdev_aux)
2113573ca77eSGeorge Wilson 			return (spa_vdev_state_exit(spa, vd, ENOTSUP));
2114573ca77eSGeorge Wilson 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
2115573ca77eSGeorge Wilson 	}
21168ad4d6ddSJeff Bonwick 	return (spa_vdev_state_exit(spa, vd, 0));
2117fa9e4066Sahrens }
2118fa9e4066Sahrens 
21198f18d1faSGeorge Wilson int
21208f18d1faSGeorge Wilson vdev_offline_log(spa_t *spa)
21218f18d1faSGeorge Wilson {
21228f18d1faSGeorge Wilson 	int error = 0;
21238f18d1faSGeorge Wilson 
21248f18d1faSGeorge Wilson 	if ((error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
21258f18d1faSGeorge Wilson 	    NULL, DS_FIND_CHILDREN)) == 0) {
21268f18d1faSGeorge Wilson 
21278f18d1faSGeorge Wilson 		/*
21288f18d1faSGeorge Wilson 		 * We successfully offlined the log device, sync out the
21298f18d1faSGeorge Wilson 		 * current txg so that the "stubby" block can be removed
21308f18d1faSGeorge Wilson 		 * by zil_sync().
21318f18d1faSGeorge Wilson 		 */
21328f18d1faSGeorge Wilson 		txg_wait_synced(spa->spa_dsl_pool, 0);
21338f18d1faSGeorge Wilson 	}
21348f18d1faSGeorge Wilson 	return (error);
21358f18d1faSGeorge Wilson }
21368f18d1faSGeorge Wilson 
2137fa9e4066Sahrens int
21383d7072f8Seschrock vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
2139fa9e4066Sahrens {
2140e6ca193dSGeorge Wilson 	vdev_t *vd, *tvd;
21418f18d1faSGeorge Wilson 	int error = 0;
21428f18d1faSGeorge Wilson 	uint64_t generation;
21438f18d1faSGeorge Wilson 	metaslab_group_t *mg;
21440a4e9518Sgw 
21458f18d1faSGeorge Wilson top:
21468f18d1faSGeorge Wilson 	spa_vdev_state_enter(spa, SCL_ALLOC);
2147fa9e4066Sahrens 
2148c5904d13Seschrock 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
2149e14bb325SJeff Bonwick 		return (spa_vdev_state_exit(spa, NULL, ENODEV));
2150fa9e4066Sahrens 
21510e34b6a7Sbonwick 	if (!vd->vdev_ops->vdev_op_leaf)
2152e14bb325SJeff Bonwick 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
21530e34b6a7Sbonwick 
2154e6ca193dSGeorge Wilson 	tvd = vd->vdev_top;
21558f18d1faSGeorge Wilson 	mg = tvd->vdev_mg;
21568f18d1faSGeorge Wilson 	generation = spa->spa_config_generation + 1;
2157e6ca193dSGeorge Wilson 
2158fa9e4066Sahrens 	/*
2159ecc2d604Sbonwick 	 * If the device isn't already offline, try to offline it.
2160fa9e4066Sahrens 	 */
2161ecc2d604Sbonwick 	if (!vd->vdev_offline) {
2162ecc2d604Sbonwick 		/*
21638ad4d6ddSJeff Bonwick 		 * If this device has the only valid copy of some data,
2164e6ca193dSGeorge Wilson 		 * don't allow it to be offlined. Log devices are always
2165e6ca193dSGeorge Wilson 		 * expendable.
2166ecc2d604Sbonwick 		 */
2167e6ca193dSGeorge Wilson 		if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
2168e6ca193dSGeorge Wilson 		    vdev_dtl_required(vd))
2169e14bb325SJeff Bonwick 			return (spa_vdev_state_exit(spa, NULL, EBUSY));
2170fa9e4066Sahrens 
21718f18d1faSGeorge Wilson 		/*
2172*b24ab676SJeff Bonwick 		 * If the top-level is a slog and it has had allocations
2173*b24ab676SJeff Bonwick 		 * then proceed.  We check that the vdev's metaslab group
2174*b24ab676SJeff Bonwick 		 * is not NULL since it's possible that we may have just
2175*b24ab676SJeff Bonwick 		 * added this vdev but not yet initialized its metaslabs.
21768f18d1faSGeorge Wilson 		 */
21778f18d1faSGeorge Wilson 		if (tvd->vdev_islog && mg != NULL) {
21788f18d1faSGeorge Wilson 			/*
21798f18d1faSGeorge Wilson 			 * Prevent any future allocations.
21808f18d1faSGeorge Wilson 			 */
21818f18d1faSGeorge Wilson 			metaslab_class_remove(spa->spa_log_class, mg);
21828f18d1faSGeorge Wilson 			(void) spa_vdev_state_exit(spa, vd, 0);
21838f18d1faSGeorge Wilson 
21848f18d1faSGeorge Wilson 			error = vdev_offline_log(spa);
21858f18d1faSGeorge Wilson 
21868f18d1faSGeorge Wilson 			spa_vdev_state_enter(spa, SCL_ALLOC);
21878f18d1faSGeorge Wilson 
21888f18d1faSGeorge Wilson 			/*
21898f18d1faSGeorge Wilson 			 * Check to see if the config has changed.
21908f18d1faSGeorge Wilson 			 */
21918f18d1faSGeorge Wilson 			if (error || generation != spa->spa_config_generation) {
21928f18d1faSGeorge Wilson 				metaslab_class_add(spa->spa_log_class, mg);
21938f18d1faSGeorge Wilson 				if (error)
21948f18d1faSGeorge Wilson 					return (spa_vdev_state_exit(spa,
21958f18d1faSGeorge Wilson 					    vd, error));
21968f18d1faSGeorge Wilson 				(void) spa_vdev_state_exit(spa, vd, 0);
21978f18d1faSGeorge Wilson 				goto top;
21988f18d1faSGeorge Wilson 			}
21998f18d1faSGeorge Wilson 			ASSERT3U(tvd->vdev_stat.vs_alloc, ==, 0);
22008f18d1faSGeorge Wilson 		}
22018f18d1faSGeorge Wilson 
2202ecc2d604Sbonwick 		/*
2203ecc2d604Sbonwick 		 * Offline this device and reopen its top-level vdev.
2204e6ca193dSGeorge Wilson 		 * If the top-level vdev is a log device then just offline
2205e6ca193dSGeorge Wilson 		 * it. Otherwise, if this action results in the top-level
2206e6ca193dSGeorge Wilson 		 * vdev becoming unusable, undo it and fail the request.
2207ecc2d604Sbonwick 		 */
2208ecc2d604Sbonwick 		vd->vdev_offline = B_TRUE;
2209e6ca193dSGeorge Wilson 		vdev_reopen(tvd);
2210e6ca193dSGeorge Wilson 
2211e6ca193dSGeorge Wilson 		if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
2212e6ca193dSGeorge Wilson 		    vdev_is_dead(tvd)) {
2213ecc2d604Sbonwick 			vd->vdev_offline = B_FALSE;
2214e6ca193dSGeorge Wilson 			vdev_reopen(tvd);
2215e14bb325SJeff Bonwick 			return (spa_vdev_state_exit(spa, NULL, EBUSY));
2216ecc2d604Sbonwick 		}
22178f18d1faSGeorge Wilson 
22188f18d1faSGeorge Wilson 		/*
22198f18d1faSGeorge Wilson 		 * Add the device back into the metaslab rotor so that
22208f18d1faSGeorge Wilson 		 * once we online the device it's open for business.
22218f18d1faSGeorge Wilson 		 */
22228f18d1faSGeorge Wilson 		if (tvd->vdev_islog && mg != NULL)
22238f18d1faSGeorge Wilson 			metaslab_class_add(spa->spa_log_class, mg);
2224fa9e4066Sahrens 	}
2225fa9e4066Sahrens 
2226e14bb325SJeff Bonwick 	vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
2227ecc2d604Sbonwick 
22288f18d1faSGeorge Wilson 	return (spa_vdev_state_exit(spa, vd, 0));
2229fa9e4066Sahrens }
2230fa9e4066Sahrens 
2231ea8dc4b6Seschrock /*
2232ea8dc4b6Seschrock  * Clear the error counts associated with this vdev.  Unlike vdev_online() and
2233ea8dc4b6Seschrock  * vdev_offline(), we assume the spa config is locked.  We also clear all
2234ea8dc4b6Seschrock  * children.  If 'vd' is NULL, then the user wants to clear all vdevs.
2235ea8dc4b6Seschrock  */
2236ea8dc4b6Seschrock void
2237e14bb325SJeff Bonwick vdev_clear(spa_t *spa, vdev_t *vd)
2238fa9e4066Sahrens {
2239e14bb325SJeff Bonwick 	vdev_t *rvd = spa->spa_root_vdev;
2240e14bb325SJeff Bonwick 
2241e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2242fa9e4066Sahrens 
2243ea8dc4b6Seschrock 	if (vd == NULL)
2244e14bb325SJeff Bonwick 		vd = rvd;
2245fa9e4066Sahrens 
2246ea8dc4b6Seschrock 	vd->vdev_stat.vs_read_errors = 0;
2247ea8dc4b6Seschrock 	vd->vdev_stat.vs_write_errors = 0;
2248ea8dc4b6Seschrock 	vd->vdev_stat.vs_checksum_errors = 0;
2249fa9e4066Sahrens 
2250e14bb325SJeff Bonwick 	for (int c = 0; c < vd->vdev_children; c++)
2251e14bb325SJeff Bonwick 		vdev_clear(spa, vd->vdev_child[c]);
22523d7072f8Seschrock 
22533d7072f8Seschrock 	/*
22548a79c1b5Sek 	 * If we're in the FAULTED state or have experienced failed I/O, then
22558a79c1b5Sek 	 * clear the persistent state and attempt to reopen the device.  We
22568a79c1b5Sek 	 * also mark the vdev config dirty, so that the new faulted state is
22578a79c1b5Sek 	 * written out to disk.
22583d7072f8Seschrock 	 */
2259e14bb325SJeff Bonwick 	if (vd->vdev_faulted || vd->vdev_degraded ||
2260e14bb325SJeff Bonwick 	    !vdev_readable(vd) || !vdev_writeable(vd)) {
22618a79c1b5Sek 
2262096d22d4SEric Schrock 		/*
2263096d22d4SEric Schrock 		 * When reopening in reponse to a clear event, it may be due to
2264096d22d4SEric Schrock 		 * a fmadm repair request.  In this case, if the device is
2265096d22d4SEric Schrock 		 * still broken, we want to still post the ereport again.
2266096d22d4SEric Schrock 		 */
2267096d22d4SEric Schrock 		vd->vdev_forcefault = B_TRUE;
2268096d22d4SEric Schrock 
22693d7072f8Seschrock 		vd->vdev_faulted = vd->vdev_degraded = 0;
2270e14bb325SJeff Bonwick 		vd->vdev_cant_read = B_FALSE;
2271e14bb325SJeff Bonwick 		vd->vdev_cant_write = B_FALSE;
2272e14bb325SJeff Bonwick 
22733d7072f8Seschrock 		vdev_reopen(vd);
22743d7072f8Seschrock 
2275096d22d4SEric Schrock 		vd->vdev_forcefault = B_FALSE;
2276096d22d4SEric Schrock 
2277e14bb325SJeff Bonwick 		if (vd != rvd)
2278e14bb325SJeff Bonwick 			vdev_state_dirty(vd->vdev_top);
2279e14bb325SJeff Bonwick 
2280e14bb325SJeff Bonwick 		if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
2281bb8b5132Sek 			spa_async_request(spa, SPA_ASYNC_RESILVER);
22823d7072f8Seschrock 
22833d7072f8Seschrock 		spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR);
22843d7072f8Seschrock 	}
2285096d22d4SEric Schrock 
2286096d22d4SEric Schrock 	/*
2287096d22d4SEric Schrock 	 * When clearing a FMA-diagnosed fault, we always want to
2288096d22d4SEric Schrock 	 * unspare the device, as we assume that the original spare was
2289096d22d4SEric Schrock 	 * done in response to the FMA fault.
2290096d22d4SEric Schrock 	 */
2291096d22d4SEric Schrock 	if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
2292096d22d4SEric Schrock 	    vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
2293096d22d4SEric Schrock 	    vd->vdev_parent->vdev_child[0] == vd)
2294096d22d4SEric Schrock 		vd->vdev_unspare = B_TRUE;
2295fa9e4066Sahrens }
2296fa9e4066Sahrens 
2297e14bb325SJeff Bonwick boolean_t
2298e14bb325SJeff Bonwick vdev_is_dead(vdev_t *vd)
22990a4e9518Sgw {
230088ecc943SGeorge Wilson 	/*
230188ecc943SGeorge Wilson 	 * Holes and missing devices are always considered "dead".
230288ecc943SGeorge Wilson 	 * This simplifies the code since we don't have to check for
230388ecc943SGeorge Wilson 	 * these types of devices in the various code paths.
230488ecc943SGeorge Wilson 	 * Instead we rely on the fact that we skip over dead devices
230588ecc943SGeorge Wilson 	 * before issuing I/O to them.
230688ecc943SGeorge Wilson 	 */
230788ecc943SGeorge Wilson 	return (vd->vdev_state < VDEV_STATE_DEGRADED || vd->vdev_ishole ||
230888ecc943SGeorge Wilson 	    vd->vdev_ops == &vdev_missing_ops);
23090a4e9518Sgw }
23100a4e9518Sgw 
2311e14bb325SJeff Bonwick boolean_t
2312e14bb325SJeff Bonwick vdev_readable(vdev_t *vd)
23130a4e9518Sgw {
2314e14bb325SJeff Bonwick 	return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
23150a4e9518Sgw }
23160a4e9518Sgw 
2317e14bb325SJeff Bonwick boolean_t
2318e14bb325SJeff Bonwick vdev_writeable(vdev_t *vd)
2319fa9e4066Sahrens {
2320e14bb325SJeff Bonwick 	return (!vdev_is_dead(vd) && !vd->vdev_cant_write);
2321fa9e4066Sahrens }
2322fa9e4066Sahrens 
2323a31e6787SGeorge Wilson boolean_t
2324a31e6787SGeorge Wilson vdev_allocatable(vdev_t *vd)
2325a31e6787SGeorge Wilson {
23268ad4d6ddSJeff Bonwick 	uint64_t state = vd->vdev_state;
23278ad4d6ddSJeff Bonwick 
2328a31e6787SGeorge Wilson 	/*
23298ad4d6ddSJeff Bonwick 	 * We currently allow allocations from vdevs which may be in the
2330a31e6787SGeorge Wilson 	 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
2331a31e6787SGeorge Wilson 	 * fails to reopen then we'll catch it later when we're holding
23328ad4d6ddSJeff Bonwick 	 * the proper locks.  Note that we have to get the vdev state
23338ad4d6ddSJeff Bonwick 	 * in a local variable because although it changes atomically,
23348ad4d6ddSJeff Bonwick 	 * we're asking two separate questions about it.
2335a31e6787SGeorge Wilson 	 */
23368ad4d6ddSJeff Bonwick 	return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
233788ecc943SGeorge Wilson 	    !vd->vdev_cant_write && !vd->vdev_ishole && !vd->vdev_removing);
2338a31e6787SGeorge Wilson }
2339a31e6787SGeorge Wilson 
2340e14bb325SJeff Bonwick boolean_t
2341e14bb325SJeff Bonwick vdev_accessible(vdev_t *vd, zio_t *zio)
2342fa9e4066Sahrens {
2343e14bb325SJeff Bonwick 	ASSERT(zio->io_vd == vd);
2344fa9e4066Sahrens 
2345e14bb325SJeff Bonwick 	if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
2346e14bb325SJeff Bonwick 		return (B_FALSE);
2347fa9e4066Sahrens 
2348e14bb325SJeff Bonwick 	if (zio->io_type == ZIO_TYPE_READ)
2349e14bb325SJeff Bonwick 		return (!vd->vdev_cant_read);
2350fa9e4066Sahrens 
2351e14bb325SJeff Bonwick 	if (zio->io_type == ZIO_TYPE_WRITE)
2352e14bb325SJeff Bonwick 		return (!vd->vdev_cant_write);
2353fa9e4066Sahrens 
2354e14bb325SJeff Bonwick 	return (B_TRUE);
2355fa9e4066Sahrens }
2356fa9e4066Sahrens 
2357fa9e4066Sahrens /*
2358fa9e4066Sahrens  * Get statistics for the given vdev.
2359fa9e4066Sahrens  */
2360fa9e4066Sahrens void
2361fa9e4066Sahrens vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
2362fa9e4066Sahrens {
2363fa9e4066Sahrens 	vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
2364fa9e4066Sahrens 
2365fa9e4066Sahrens 	mutex_enter(&vd->vdev_stat_lock);
2366fa9e4066Sahrens 	bcopy(&vd->vdev_stat, vs, sizeof (*vs));
2367088f3894Sahrens 	vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors;
2368fa9e4066Sahrens 	vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
2369fa9e4066Sahrens 	vs->vs_state = vd->vdev_state;
2370573ca77eSGeorge Wilson 	vs->vs_rsize = vdev_get_min_asize(vd);
2371573ca77eSGeorge Wilson 	if (vd->vdev_ops->vdev_op_leaf)
2372573ca77eSGeorge Wilson 		vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
2373fa9e4066Sahrens 	mutex_exit(&vd->vdev_stat_lock);
2374fa9e4066Sahrens 
2375fa9e4066Sahrens 	/*
2376fa9e4066Sahrens 	 * If we're getting stats on the root vdev, aggregate the I/O counts
2377fa9e4066Sahrens 	 * over all top-level vdevs (i.e. the direct children of the root).
2378fa9e4066Sahrens 	 */
2379fa9e4066Sahrens 	if (vd == rvd) {
2380e14bb325SJeff Bonwick 		for (int c = 0; c < rvd->vdev_children; c++) {
2381fa9e4066Sahrens 			vdev_t *cvd = rvd->vdev_child[c];
2382fa9e4066Sahrens 			vdev_stat_t *cvs = &cvd->vdev_stat;
2383fa9e4066Sahrens 
2384fa9e4066Sahrens 			mutex_enter(&vd->vdev_stat_lock);
2385e14bb325SJeff Bonwick 			for (int t = 0; t < ZIO_TYPES; t++) {
2386fa9e4066Sahrens 				vs->vs_ops[t] += cvs->vs_ops[t];
2387fa9e4066Sahrens 				vs->vs_bytes[t] += cvs->vs_bytes[t];
2388fa9e4066Sahrens 			}
2389fa9e4066Sahrens 			vs->vs_scrub_examined += cvs->vs_scrub_examined;
2390fa9e4066Sahrens 			mutex_exit(&vd->vdev_stat_lock);
2391fa9e4066Sahrens 		}
2392fa9e4066Sahrens 	}
2393fa9e4066Sahrens }
2394fa9e4066Sahrens 
2395fa94a07fSbrendan void
2396fa94a07fSbrendan vdev_clear_stats(vdev_t *vd)
2397fa94a07fSbrendan {
2398fa94a07fSbrendan 	mutex_enter(&vd->vdev_stat_lock);
2399fa94a07fSbrendan 	vd->vdev_stat.vs_space = 0;
2400fa94a07fSbrendan 	vd->vdev_stat.vs_dspace = 0;
2401fa94a07fSbrendan 	vd->vdev_stat.vs_alloc = 0;
2402fa94a07fSbrendan 	mutex_exit(&vd->vdev_stat_lock);
2403fa94a07fSbrendan }
2404fa94a07fSbrendan 
2405fa9e4066Sahrens void
2406e14bb325SJeff Bonwick vdev_stat_update(zio_t *zio, uint64_t psize)
2407fa9e4066Sahrens {
24088ad4d6ddSJeff Bonwick 	spa_t *spa = zio->io_spa;
24098ad4d6ddSJeff Bonwick 	vdev_t *rvd = spa->spa_root_vdev;
2410e14bb325SJeff Bonwick 	vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
2411fa9e4066Sahrens 	vdev_t *pvd;
2412fa9e4066Sahrens 	uint64_t txg = zio->io_txg;
2413fa9e4066Sahrens 	vdev_stat_t *vs = &vd->vdev_stat;
2414fa9e4066Sahrens 	zio_type_t type = zio->io_type;
2415fa9e4066Sahrens 	int flags = zio->io_flags;
2416fa9e4066Sahrens 
2417e14bb325SJeff Bonwick 	/*
2418e14bb325SJeff Bonwick 	 * If this i/o is a gang leader, it didn't do any actual work.
2419e14bb325SJeff Bonwick 	 */
2420e14bb325SJeff Bonwick 	if (zio->io_gang_tree)
2421e14bb325SJeff Bonwick 		return;
2422e14bb325SJeff Bonwick 
2423fa9e4066Sahrens 	if (zio->io_error == 0) {
2424e14bb325SJeff Bonwick 		/*
2425e14bb325SJeff Bonwick 		 * If this is a root i/o, don't count it -- we've already
2426e14bb325SJeff Bonwick 		 * counted the top-level vdevs, and vdev_get_stats() will
2427e14bb325SJeff Bonwick 		 * aggregate them when asked.  This reduces contention on
2428e14bb325SJeff Bonwick 		 * the root vdev_stat_lock and implicitly handles blocks
2429e14bb325SJeff Bonwick 		 * that compress away to holes, for which there is no i/o.
2430e14bb325SJeff Bonwick 		 * (Holes never create vdev children, so all the counters
2431e14bb325SJeff Bonwick 		 * remain zero, which is what we want.)
2432e14bb325SJeff Bonwick 		 *
2433e14bb325SJeff Bonwick 		 * Note: this only applies to successful i/o (io_error == 0)
2434e14bb325SJeff Bonwick 		 * because unlike i/o counts, errors are not additive.
2435e14bb325SJeff Bonwick 		 * When reading a ditto block, for example, failure of
2436e14bb325SJeff Bonwick 		 * one top-level vdev does not imply a root-level error.
2437e14bb325SJeff Bonwick 		 */
2438e14bb325SJeff Bonwick 		if (vd == rvd)
2439e14bb325SJeff Bonwick 			return;
2440e14bb325SJeff Bonwick 
2441e14bb325SJeff Bonwick 		ASSERT(vd == zio->io_vd);
24428ad4d6ddSJeff Bonwick 
24438ad4d6ddSJeff Bonwick 		if (flags & ZIO_FLAG_IO_BYPASS)
24448ad4d6ddSJeff Bonwick 			return;
24458ad4d6ddSJeff Bonwick 
24468ad4d6ddSJeff Bonwick 		mutex_enter(&vd->vdev_stat_lock);
24478ad4d6ddSJeff Bonwick 
2448e14bb325SJeff Bonwick 		if (flags & ZIO_FLAG_IO_REPAIR) {
2449d80c45e0Sbonwick 			if (flags & ZIO_FLAG_SCRUB_THREAD)
2450e14bb325SJeff Bonwick 				vs->vs_scrub_repaired += psize;
24518ad4d6ddSJeff Bonwick 			if (flags & ZIO_FLAG_SELF_HEAL)
2452e14bb325SJeff Bonwick 				vs->vs_self_healed += psize;
2453fa9e4066Sahrens 		}
24548ad4d6ddSJeff Bonwick 
24558ad4d6ddSJeff Bonwick 		vs->vs_ops[type]++;
24568ad4d6ddSJeff Bonwick 		vs->vs_bytes[type] += psize;
24578ad4d6ddSJeff Bonwick 
24588ad4d6ddSJeff Bonwick 		mutex_exit(&vd->vdev_stat_lock);
2459fa9e4066Sahrens 		return;
2460fa9e4066Sahrens 	}
2461fa9e4066Sahrens 
2462fa9e4066Sahrens 	if (flags & ZIO_FLAG_SPECULATIVE)
2463fa9e4066Sahrens 		return;
2464fa9e4066Sahrens 
24658956713aSEric Schrock 	/*
24668956713aSEric Schrock 	 * If this is an I/O error that is going to be retried, then ignore the
24678956713aSEric Schrock 	 * error.  Otherwise, the user may interpret B_FAILFAST I/O errors as
24688956713aSEric Schrock 	 * hard errors, when in reality they can happen for any number of
24698956713aSEric Schrock 	 * innocuous reasons (bus resets, MPxIO link failure, etc).
24708956713aSEric Schrock 	 */
24718956713aSEric Schrock 	if (zio->io_error == EIO &&
24728956713aSEric Schrock 	    !(zio->io_flags & ZIO_FLAG_IO_RETRY))
24738956713aSEric Schrock 		return;
24748956713aSEric Schrock 
24758f18d1faSGeorge Wilson 	/*
24768f18d1faSGeorge Wilson 	 * Intent logs writes won't propagate their error to the root
24778f18d1faSGeorge Wilson 	 * I/O so don't mark these types of failures as pool-level
24788f18d1faSGeorge Wilson 	 * errors.
24798f18d1faSGeorge Wilson 	 */
24808f18d1faSGeorge Wilson 	if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
24818f18d1faSGeorge Wilson 		return;
24828f18d1faSGeorge Wilson 
2483e14bb325SJeff Bonwick 	mutex_enter(&vd->vdev_stat_lock);
2484b47119fdSGeorge Wilson 	if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) {
2485e14bb325SJeff Bonwick 		if (zio->io_error == ECKSUM)
2486e14bb325SJeff Bonwick 			vs->vs_checksum_errors++;
2487e14bb325SJeff Bonwick 		else
2488e14bb325SJeff Bonwick 			vs->vs_read_errors++;
2489fa9e4066Sahrens 	}
2490b47119fdSGeorge Wilson 	if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd))
2491e14bb325SJeff Bonwick 		vs->vs_write_errors++;
2492e14bb325SJeff Bonwick 	mutex_exit(&vd->vdev_stat_lock);
2493fa9e4066Sahrens 
24948ad4d6ddSJeff Bonwick 	if (type == ZIO_TYPE_WRITE && txg != 0 &&
24958ad4d6ddSJeff Bonwick 	    (!(flags & ZIO_FLAG_IO_REPAIR) ||
2496*b24ab676SJeff Bonwick 	    (flags & ZIO_FLAG_SCRUB_THREAD) ||
2497*b24ab676SJeff Bonwick 	    spa->spa_claiming)) {
24988ad4d6ddSJeff Bonwick 		/*
2499*b24ab676SJeff Bonwick 		 * This is either a normal write (not a repair), or it's
2500*b24ab676SJeff Bonwick 		 * a repair induced by the scrub thread, or it's a repair
2501*b24ab676SJeff Bonwick 		 * made by zil_claim() during spa_load() in the first txg.
2502*b24ab676SJeff Bonwick 		 * In the normal case, we commit the DTL change in the same
2503*b24ab676SJeff Bonwick 		 * txg as the block was born.  In the scrub-induced repair
2504*b24ab676SJeff Bonwick 		 * case, we know that scrubs run in first-pass syncing context,
2505*b24ab676SJeff Bonwick 		 * so we commit the DTL change in spa_syncing_txg(spa).
2506*b24ab676SJeff Bonwick 		 * In the zil_claim() case, we commit in spa_first_txg(spa).
25078ad4d6ddSJeff Bonwick 		 *
25088ad4d6ddSJeff Bonwick 		 * We currently do not make DTL entries for failed spontaneous
25098ad4d6ddSJeff Bonwick 		 * self-healing writes triggered by normal (non-scrubbing)
25108ad4d6ddSJeff Bonwick 		 * reads, because we have no transactional context in which to
25118ad4d6ddSJeff Bonwick 		 * do so -- and it's not clear that it'd be desirable anyway.
25128ad4d6ddSJeff Bonwick 		 */
25138ad4d6ddSJeff Bonwick 		if (vd->vdev_ops->vdev_op_leaf) {
25148ad4d6ddSJeff Bonwick 			uint64_t commit_txg = txg;
25158ad4d6ddSJeff Bonwick 			if (flags & ZIO_FLAG_SCRUB_THREAD) {
25168ad4d6ddSJeff Bonwick 				ASSERT(flags & ZIO_FLAG_IO_REPAIR);
25178ad4d6ddSJeff Bonwick 				ASSERT(spa_sync_pass(spa) == 1);
25188ad4d6ddSJeff Bonwick 				vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
2519*b24ab676SJeff Bonwick 				commit_txg = spa_syncing_txg(spa);
2520*b24ab676SJeff Bonwick 			} else if (spa->spa_claiming) {
2521*b24ab676SJeff Bonwick 				ASSERT(flags & ZIO_FLAG_IO_REPAIR);
2522*b24ab676SJeff Bonwick 				commit_txg = spa_first_txg(spa);
25238ad4d6ddSJeff Bonwick 			}
2524*b24ab676SJeff Bonwick 			ASSERT(commit_txg >= spa_syncing_txg(spa));
25258ad4d6ddSJeff Bonwick 			if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
2526fa9e4066Sahrens 				return;
25278ad4d6ddSJeff Bonwick 			for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
25288ad4d6ddSJeff Bonwick 				vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
25298ad4d6ddSJeff Bonwick 			vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
2530fa9e4066Sahrens 		}
25318ad4d6ddSJeff Bonwick 		if (vd != rvd)
25328ad4d6ddSJeff Bonwick 			vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
2533fa9e4066Sahrens 	}
2534fa9e4066Sahrens }
2535fa9e4066Sahrens 
2536fa9e4066Sahrens void
2537fa9e4066Sahrens vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete)
2538fa9e4066Sahrens {
2539fa9e4066Sahrens 	vdev_stat_t *vs = &vd->vdev_stat;
2540fa9e4066Sahrens 
2541573ca77eSGeorge Wilson 	for (int c = 0; c < vd->vdev_children; c++)
2542fa9e4066Sahrens 		vdev_scrub_stat_update(vd->vdev_child[c], type, complete);
2543fa9e4066Sahrens 
2544fa9e4066Sahrens 	mutex_enter(&vd->vdev_stat_lock);
2545fa9e4066Sahrens 
2546fa9e4066Sahrens 	if (type == POOL_SCRUB_NONE) {
2547fa9e4066Sahrens 		/*
2548fa9e4066Sahrens 		 * Update completion and end time.  Leave everything else alone
2549fa9e4066Sahrens 		 * so we can report what happened during the previous scrub.
2550fa9e4066Sahrens 		 */
2551fa9e4066Sahrens 		vs->vs_scrub_complete = complete;
2552fa9e4066Sahrens 		vs->vs_scrub_end = gethrestime_sec();
2553fa9e4066Sahrens 	} else {
2554fa9e4066Sahrens 		vs->vs_scrub_type = type;
2555fa9e4066Sahrens 		vs->vs_scrub_complete = 0;
2556fa9e4066Sahrens 		vs->vs_scrub_examined = 0;
2557fa9e4066Sahrens 		vs->vs_scrub_repaired = 0;
2558fa9e4066Sahrens 		vs->vs_scrub_start = gethrestime_sec();
2559fa9e4066Sahrens 		vs->vs_scrub_end = 0;
2560fa9e4066Sahrens 	}
2561fa9e4066Sahrens 
2562fa9e4066Sahrens 	mutex_exit(&vd->vdev_stat_lock);
2563fa9e4066Sahrens }
2564fa9e4066Sahrens 
2565fa9e4066Sahrens /*
2566*b24ab676SJeff Bonwick  * Update the in-core space usage stats for this vdev, its metaslab class,
2567*b24ab676SJeff Bonwick  * and the root vdev.
2568fa9e4066Sahrens  */
2569fa9e4066Sahrens void
2570*b24ab676SJeff Bonwick vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
2571*b24ab676SJeff Bonwick     int64_t space_delta)
2572fa9e4066Sahrens {
257399653d4eSeschrock 	int64_t dspace_delta = space_delta;
25748654d025Sperrin 	spa_t *spa = vd->vdev_spa;
25758654d025Sperrin 	vdev_t *rvd = spa->spa_root_vdev;
2576*b24ab676SJeff Bonwick 	metaslab_group_t *mg = vd->vdev_mg;
2577*b24ab676SJeff Bonwick 	metaslab_class_t *mc = mg ? mg->mg_class : NULL;
2578fa9e4066Sahrens 
25798654d025Sperrin 	ASSERT(vd == vd->vdev_top);
258099653d4eSeschrock 
25818654d025Sperrin 	/*
25828654d025Sperrin 	 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
25838654d025Sperrin 	 * factor.  We must calculate this here and not at the root vdev
25848654d025Sperrin 	 * because the root vdev's psize-to-asize is simply the max of its
25858654d025Sperrin 	 * childrens', thus not accurate enough for us.
25868654d025Sperrin 	 */
25878654d025Sperrin 	ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0);
2588e6ca193dSGeorge Wilson 	ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
25898654d025Sperrin 	dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) *
25908654d025Sperrin 	    vd->vdev_deflate_ratio;
25918654d025Sperrin 
25928654d025Sperrin 	mutex_enter(&vd->vdev_stat_lock);
25938654d025Sperrin 	vd->vdev_stat.vs_alloc += alloc_delta;
2594*b24ab676SJeff Bonwick 	vd->vdev_stat.vs_space += space_delta;
25958654d025Sperrin 	vd->vdev_stat.vs_dspace += dspace_delta;
25968654d025Sperrin 	mutex_exit(&vd->vdev_stat_lock);
25978654d025Sperrin 
2598*b24ab676SJeff Bonwick 	if (mc == spa_normal_class(spa)) {
2599fa94a07fSbrendan 		mutex_enter(&rvd->vdev_stat_lock);
2600fa94a07fSbrendan 		rvd->vdev_stat.vs_alloc += alloc_delta;
2601*b24ab676SJeff Bonwick 		rvd->vdev_stat.vs_space += space_delta;
2602fa94a07fSbrendan 		rvd->vdev_stat.vs_dspace += dspace_delta;
2603fa94a07fSbrendan 		mutex_exit(&rvd->vdev_stat_lock);
2604fa94a07fSbrendan 	}
2605*b24ab676SJeff Bonwick 
2606*b24ab676SJeff Bonwick 	if (mc != NULL) {
2607*b24ab676SJeff Bonwick 		ASSERT(rvd == vd->vdev_parent);
2608*b24ab676SJeff Bonwick 		ASSERT(vd->vdev_ms_count != 0);
2609*b24ab676SJeff Bonwick 
2610*b24ab676SJeff Bonwick 		metaslab_class_space_update(mc,
2611*b24ab676SJeff Bonwick 		    alloc_delta, defer_delta, space_delta, dspace_delta);
2612*b24ab676SJeff Bonwick 	}
2613fa9e4066Sahrens }
2614fa9e4066Sahrens 
2615fa9e4066Sahrens /*
2616fa9e4066Sahrens  * Mark a top-level vdev's config as dirty, placing it on the dirty list
2617fa9e4066Sahrens  * so that it will be written out next time the vdev configuration is synced.
2618fa9e4066Sahrens  * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
2619fa9e4066Sahrens  */
2620fa9e4066Sahrens void
2621fa9e4066Sahrens vdev_config_dirty(vdev_t *vd)
2622fa9e4066Sahrens {
2623fa9e4066Sahrens 	spa_t *spa = vd->vdev_spa;
2624fa9e4066Sahrens 	vdev_t *rvd = spa->spa_root_vdev;
2625fa9e4066Sahrens 	int c;
2626fa9e4066Sahrens 
2627c5904d13Seschrock 	/*
26286809eb4eSEric Schrock 	 * If this is an aux vdev (as with l2cache and spare devices), then we
26296809eb4eSEric Schrock 	 * update the vdev config manually and set the sync flag.
2630c5904d13Seschrock 	 */
2631c5904d13Seschrock 	if (vd->vdev_aux != NULL) {
2632c5904d13Seschrock 		spa_aux_vdev_t *sav = vd->vdev_aux;
2633c5904d13Seschrock 		nvlist_t **aux;
2634c5904d13Seschrock 		uint_t naux;
2635c5904d13Seschrock 
2636c5904d13Seschrock 		for (c = 0; c < sav->sav_count; c++) {
2637c5904d13Seschrock 			if (sav->sav_vdevs[c] == vd)
2638c5904d13Seschrock 				break;
2639c5904d13Seschrock 		}
2640c5904d13Seschrock 
2641e14bb325SJeff Bonwick 		if (c == sav->sav_count) {
2642e14bb325SJeff Bonwick 			/*
2643e14bb325SJeff Bonwick 			 * We're being removed.  There's nothing more to do.
2644e14bb325SJeff Bonwick 			 */
2645e14bb325SJeff Bonwick 			ASSERT(sav->sav_sync == B_TRUE);
2646e14bb325SJeff Bonwick 			return;
2647e14bb325SJeff Bonwick 		}
2648e14bb325SJeff Bonwick 
2649c5904d13Seschrock 		sav->sav_sync = B_TRUE;
2650c5904d13Seschrock 
26516809eb4eSEric Schrock 		if (nvlist_lookup_nvlist_array(sav->sav_config,
26526809eb4eSEric Schrock 		    ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
26536809eb4eSEric Schrock 			VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
26546809eb4eSEric Schrock 			    ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
26556809eb4eSEric Schrock 		}
2656c5904d13Seschrock 
2657c5904d13Seschrock 		ASSERT(c < naux);
2658c5904d13Seschrock 
2659c5904d13Seschrock 		/*
2660c5904d13Seschrock 		 * Setting the nvlist in the middle if the array is a little
2661c5904d13Seschrock 		 * sketchy, but it will work.
2662c5904d13Seschrock 		 */
2663c5904d13Seschrock 		nvlist_free(aux[c]);
2664c5904d13Seschrock 		aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE);
2665c5904d13Seschrock 
2666c5904d13Seschrock 		return;
2667c5904d13Seschrock 	}
2668c5904d13Seschrock 
26695dabedeeSbonwick 	/*
2670e14bb325SJeff Bonwick 	 * The dirty list is protected by the SCL_CONFIG lock.  The caller
2671e14bb325SJeff Bonwick 	 * must either hold SCL_CONFIG as writer, or must be the sync thread
2672e14bb325SJeff Bonwick 	 * (which holds SCL_CONFIG as reader).  There's only one sync thread,
26735dabedeeSbonwick 	 * so this is sufficient to ensure mutual exclusion.
26745dabedeeSbonwick 	 */
2675e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
2676e14bb325SJeff Bonwick 	    (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2677e14bb325SJeff Bonwick 	    spa_config_held(spa, SCL_CONFIG, RW_READER)));
26785dabedeeSbonwick 
2679fa9e4066Sahrens 	if (vd == rvd) {
2680fa9e4066Sahrens 		for (c = 0; c < rvd->vdev_children; c++)
2681fa9e4066Sahrens 			vdev_config_dirty(rvd->vdev_child[c]);
2682fa9e4066Sahrens 	} else {
2683fa9e4066Sahrens 		ASSERT(vd == vd->vdev_top);
2684fa9e4066Sahrens 
268588ecc943SGeorge Wilson 		if (!list_link_active(&vd->vdev_config_dirty_node) &&
268688ecc943SGeorge Wilson 		    !vd->vdev_ishole)
2687e14bb325SJeff Bonwick 			list_insert_head(&spa->spa_config_dirty_list, vd);
2688fa9e4066Sahrens 	}
2689fa9e4066Sahrens }
2690fa9e4066Sahrens 
2691fa9e4066Sahrens void
2692fa9e4066Sahrens vdev_config_clean(vdev_t *vd)
2693fa9e4066Sahrens {
26945dabedeeSbonwick 	spa_t *spa = vd->vdev_spa;
26955dabedeeSbonwick 
2696e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
2697e14bb325SJeff Bonwick 	    (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2698e14bb325SJeff Bonwick 	    spa_config_held(spa, SCL_CONFIG, RW_READER)));
26995dabedeeSbonwick 
2700e14bb325SJeff Bonwick 	ASSERT(list_link_active(&vd->vdev_config_dirty_node));
2701e14bb325SJeff Bonwick 	list_remove(&spa->spa_config_dirty_list, vd);
2702e14bb325SJeff Bonwick }
2703e14bb325SJeff Bonwick 
2704e14bb325SJeff Bonwick /*
2705e14bb325SJeff Bonwick  * Mark a top-level vdev's state as dirty, so that the next pass of
2706e14bb325SJeff Bonwick  * spa_sync() can convert this into vdev_config_dirty().  We distinguish
2707e14bb325SJeff Bonwick  * the state changes from larger config changes because they require
2708e14bb325SJeff Bonwick  * much less locking, and are often needed for administrative actions.
2709e14bb325SJeff Bonwick  */
2710e14bb325SJeff Bonwick void
2711e14bb325SJeff Bonwick vdev_state_dirty(vdev_t *vd)
2712e14bb325SJeff Bonwick {
2713e14bb325SJeff Bonwick 	spa_t *spa = vd->vdev_spa;
2714e14bb325SJeff Bonwick 
2715e14bb325SJeff Bonwick 	ASSERT(vd == vd->vdev_top);
2716e14bb325SJeff Bonwick 
2717e14bb325SJeff Bonwick 	/*
2718e14bb325SJeff Bonwick 	 * The state list is protected by the SCL_STATE lock.  The caller
2719e14bb325SJeff Bonwick 	 * must either hold SCL_STATE as writer, or must be the sync thread
2720e14bb325SJeff Bonwick 	 * (which holds SCL_STATE as reader).  There's only one sync thread,
2721e14bb325SJeff Bonwick 	 * so this is sufficient to ensure mutual exclusion.
2722e14bb325SJeff Bonwick 	 */
2723e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
2724e14bb325SJeff Bonwick 	    (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2725e14bb325SJeff Bonwick 	    spa_config_held(spa, SCL_STATE, RW_READER)));
2726e14bb325SJeff Bonwick 
2727*b24ab676SJeff Bonwick 	if (!list_link_active(&vd->vdev_state_dirty_node) && !vd->vdev_ishole)
2728e14bb325SJeff Bonwick 		list_insert_head(&spa->spa_state_dirty_list, vd);
2729e14bb325SJeff Bonwick }
2730e14bb325SJeff Bonwick 
2731e14bb325SJeff Bonwick void
2732e14bb325SJeff Bonwick vdev_state_clean(vdev_t *vd)
2733e14bb325SJeff Bonwick {
2734e14bb325SJeff Bonwick 	spa_t *spa = vd->vdev_spa;
2735e14bb325SJeff Bonwick 
2736e14bb325SJeff Bonwick 	ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
2737e14bb325SJeff Bonwick 	    (dsl_pool_sync_context(spa_get_dsl(spa)) &&
2738e14bb325SJeff Bonwick 	    spa_config_held(spa, SCL_STATE, RW_READER)));
2739e14bb325SJeff Bonwick 
2740e14bb325SJeff Bonwick 	ASSERT(list_link_active(&vd->vdev_state_dirty_node));
2741e14bb325SJeff Bonwick 	list_remove(&spa->spa_state_dirty_list, vd);
2742fa9e4066Sahrens }
2743fa9e4066Sahrens 
274432b87932Sek /*
274532b87932Sek  * Propagate vdev state up from children to parent.
274632b87932Sek  */
274744cd46caSbillm void
274844cd46caSbillm vdev_propagate_state(vdev_t *vd)
274944cd46caSbillm {
27508ad4d6ddSJeff Bonwick 	spa_t *spa = vd->vdev_spa;
27518ad4d6ddSJeff Bonwick 	vdev_t *rvd = spa->spa_root_vdev;
275244cd46caSbillm 	int degraded = 0, faulted = 0;
275344cd46caSbillm 	int corrupted = 0;
275444cd46caSbillm 	vdev_t *child;
275544cd46caSbillm 
27563d7072f8Seschrock 	if (vd->vdev_children > 0) {
2757573ca77eSGeorge Wilson 		for (int c = 0; c < vd->vdev_children; c++) {
27583d7072f8Seschrock 			child = vd->vdev_child[c];
275951ece835Seschrock 
276088ecc943SGeorge Wilson 			/*
276188ecc943SGeorge Wilson 			 * Don't factor holes into the decision.
276288ecc943SGeorge Wilson 			 */
276388ecc943SGeorge Wilson 			if (child->vdev_ishole)
276488ecc943SGeorge Wilson 				continue;
276588ecc943SGeorge Wilson 
2766e14bb325SJeff Bonwick 			if (!vdev_readable(child) ||
27678ad4d6ddSJeff Bonwick 			    (!vdev_writeable(child) && spa_writeable(spa))) {
276851ece835Seschrock 				/*
276951ece835Seschrock 				 * Root special: if there is a top-level log
277051ece835Seschrock 				 * device, treat the root vdev as if it were
277151ece835Seschrock 				 * degraded.
277251ece835Seschrock 				 */
277351ece835Seschrock 				if (child->vdev_islog && vd == rvd)
277451ece835Seschrock 					degraded++;
277551ece835Seschrock 				else
277651ece835Seschrock 					faulted++;
277751ece835Seschrock 			} else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
27783d7072f8Seschrock 				degraded++;
277951ece835Seschrock 			}
278044cd46caSbillm 
27813d7072f8Seschrock 			if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
27823d7072f8Seschrock 				corrupted++;
27833d7072f8Seschrock 		}
278444cd46caSbillm 
27853d7072f8Seschrock 		vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
27863d7072f8Seschrock 
27873d7072f8Seschrock 		/*
2788e14bb325SJeff Bonwick 		 * Root special: if there is a top-level vdev that cannot be
27893d7072f8Seschrock 		 * opened due to corrupted metadata, then propagate the root
27903d7072f8Seschrock 		 * vdev's aux state as 'corrupt' rather than 'insufficient
27913d7072f8Seschrock 		 * replicas'.
27923d7072f8Seschrock 		 */
27933d7072f8Seschrock 		if (corrupted && vd == rvd &&
27943d7072f8Seschrock 		    rvd->vdev_state == VDEV_STATE_CANT_OPEN)
27953d7072f8Seschrock 			vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
27963d7072f8Seschrock 			    VDEV_AUX_CORRUPT_DATA);
27973d7072f8Seschrock 	}
27983d7072f8Seschrock 
279951ece835Seschrock 	if (vd->vdev_parent)
28003d7072f8Seschrock 		vdev_propagate_state(vd->vdev_parent);
280144cd46caSbillm }
280244cd46caSbillm 
2803fa9e4066Sahrens /*
2804ea8dc4b6Seschrock  * Set a vdev's state.  If this is during an open, we don't update the parent
2805ea8dc4b6Seschrock  * state, because we're in the process of opening children depth-first.
2806ea8dc4b6Seschrock  * Otherwise, we propagate the change to the parent.
2807ea8dc4b6Seschrock  *
2808ea8dc4b6Seschrock  * If this routine places a device in a faulted state, an appropriate ereport is
2809ea8dc4b6Seschrock  * generated.
2810fa9e4066Sahrens  */
2811fa9e4066Sahrens void
2812ea8dc4b6Seschrock vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
2813fa9e4066Sahrens {
2814560e6e96Seschrock 	uint64_t save_state;
2815c5904d13Seschrock 	spa_t *spa = vd->vdev_spa;
2816ea8dc4b6Seschrock 
2817ea8dc4b6Seschrock 	if (state == vd->vdev_state) {
2818ea8dc4b6Seschrock 		vd->vdev_stat.vs_aux = aux;
2819fa9e4066Sahrens 		return;
2820ea8dc4b6Seschrock 	}
2821ea8dc4b6Seschrock 
2822560e6e96Seschrock 	save_state = vd->vdev_state;
2823fa9e4066Sahrens 
2824fa9e4066Sahrens 	vd->vdev_state = state;
2825fa9e4066Sahrens 	vd->vdev_stat.vs_aux = aux;
2826fa9e4066Sahrens 
28273d7072f8Seschrock 	/*
28283d7072f8Seschrock 	 * If we are setting the vdev state to anything but an open state, then
28293d7072f8Seschrock 	 * always close the underlying device.  Otherwise, we keep accessible
28303d7072f8Seschrock 	 * but invalid devices open forever.  We don't call vdev_close() itself,
28313d7072f8Seschrock 	 * because that implies some extra checks (offline, etc) that we don't
28323d7072f8Seschrock 	 * want here.  This is limited to leaf devices, because otherwise
28333d7072f8Seschrock 	 * closing the device will affect other children.
28343d7072f8Seschrock 	 */
2835cbd2b15eSJeff Bonwick 	if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf)
28363d7072f8Seschrock 		vd->vdev_ops->vdev_op_close(vd);
28373d7072f8Seschrock 
2838069f55e2SEric Schrock 	/*
2839069f55e2SEric Schrock 	 * If we have brought this vdev back into service, we need
2840069f55e2SEric Schrock 	 * to notify fmd so that it can gracefully repair any outstanding
2841069f55e2SEric Schrock 	 * cases due to a missing device.  We do this in all cases, even those
2842069f55e2SEric Schrock 	 * that probably don't correlate to a repaired fault.  This is sure to
2843069f55e2SEric Schrock 	 * catch all cases, and we let the zfs-retire agent sort it out.  If
2844069f55e2SEric Schrock 	 * this is a transient state it's OK, as the retire agent will
2845069f55e2SEric Schrock 	 * double-check the state of the vdev before repairing it.
2846069f55e2SEric Schrock 	 */
2847069f55e2SEric Schrock 	if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf &&
2848069f55e2SEric Schrock 	    vd->vdev_prevstate != state)
2849069f55e2SEric Schrock 		zfs_post_state_change(spa, vd);
2850069f55e2SEric Schrock 
28513d7072f8Seschrock 	if (vd->vdev_removed &&
28523d7072f8Seschrock 	    state == VDEV_STATE_CANT_OPEN &&
28533d7072f8Seschrock 	    (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
28543d7072f8Seschrock 		/*
28553d7072f8Seschrock 		 * If the previous state is set to VDEV_STATE_REMOVED, then this
28563d7072f8Seschrock 		 * device was previously marked removed and someone attempted to
28573d7072f8Seschrock 		 * reopen it.  If this failed due to a nonexistent device, then
28583d7072f8Seschrock 		 * keep the device in the REMOVED state.  We also let this be if
28593d7072f8Seschrock 		 * it is one of our special test online cases, which is only
28603d7072f8Seschrock 		 * attempting to online the device and shouldn't generate an FMA
28613d7072f8Seschrock 		 * fault.
28623d7072f8Seschrock 		 */
28633d7072f8Seschrock 		vd->vdev_state = VDEV_STATE_REMOVED;
28643d7072f8Seschrock 		vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
28653d7072f8Seschrock 	} else if (state == VDEV_STATE_REMOVED) {
28663d7072f8Seschrock 		vd->vdev_removed = B_TRUE;
28673d7072f8Seschrock 	} else if (state == VDEV_STATE_CANT_OPEN) {
2868ea8dc4b6Seschrock 		/*
2869ea8dc4b6Seschrock 		 * If we fail to open a vdev during an import, we mark it as
2870ea8dc4b6Seschrock 		 * "not available", which signifies that it was never there to
2871ea8dc4b6Seschrock 		 * begin with.  Failure to open such a device is not considered
2872ea8dc4b6Seschrock 		 * an error.
2873ea8dc4b6Seschrock 		 */
2874c5904d13Seschrock 		if (spa->spa_load_state == SPA_LOAD_IMPORT &&
2875560e6e96Seschrock 		    vd->vdev_ops->vdev_op_leaf)
2876560e6e96Seschrock 			vd->vdev_not_present = 1;
2877560e6e96Seschrock 
2878560e6e96Seschrock 		/*
2879560e6e96Seschrock 		 * Post the appropriate ereport.  If the 'prevstate' field is
2880560e6e96Seschrock 		 * set to something other than VDEV_STATE_UNKNOWN, it indicates
2881560e6e96Seschrock 		 * that this is part of a vdev_reopen().  In this case, we don't
2882560e6e96Seschrock 		 * want to post the ereport if the device was already in the
2883560e6e96Seschrock 		 * CANT_OPEN state beforehand.
28843d7072f8Seschrock 		 *
28853d7072f8Seschrock 		 * If the 'checkremove' flag is set, then this is an attempt to
28863d7072f8Seschrock 		 * online the device in response to an insertion event.  If we
28873d7072f8Seschrock 		 * hit this case, then we have detected an insertion event for a
28883d7072f8Seschrock 		 * faulted or offline device that wasn't in the removed state.
28893d7072f8Seschrock 		 * In this scenario, we don't post an ereport because we are
28903d7072f8Seschrock 		 * about to replace the device, or attempt an online with
28913d7072f8Seschrock 		 * vdev_forcefault, which will generate the fault for us.
2892560e6e96Seschrock 		 */
28933d7072f8Seschrock 		if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
28943d7072f8Seschrock 		    !vd->vdev_not_present && !vd->vdev_checkremove &&
2895c5904d13Seschrock 		    vd != spa->spa_root_vdev) {
2896ea8dc4b6Seschrock 			const char *class;
2897ea8dc4b6Seschrock 
2898ea8dc4b6Seschrock 			switch (aux) {
2899ea8dc4b6Seschrock 			case VDEV_AUX_OPEN_FAILED:
2900ea8dc4b6Seschrock 				class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
2901ea8dc4b6Seschrock 				break;
2902ea8dc4b6Seschrock 			case VDEV_AUX_CORRUPT_DATA:
2903ea8dc4b6Seschrock 				class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
2904ea8dc4b6Seschrock 				break;
2905ea8dc4b6Seschrock 			case VDEV_AUX_NO_REPLICAS:
2906ea8dc4b6Seschrock 				class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
2907ea8dc4b6Seschrock 				break;
2908ea8dc4b6Seschrock 			case VDEV_AUX_BAD_GUID_SUM:
2909ea8dc4b6Seschrock 				class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
2910ea8dc4b6Seschrock 				break;
2911ea8dc4b6Seschrock 			case VDEV_AUX_TOO_SMALL:
2912ea8dc4b6Seschrock 				class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
2913ea8dc4b6Seschrock 				break;
2914ea8dc4b6Seschrock 			case VDEV_AUX_BAD_LABEL:
2915ea8dc4b6Seschrock 				class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
2916ea8dc4b6Seschrock 				break;
2917e14bb325SJeff Bonwick 			case VDEV_AUX_IO_FAILURE:
2918e14bb325SJeff Bonwick 				class = FM_EREPORT_ZFS_IO_FAILURE;
2919e14bb325SJeff Bonwick 				break;
2920ea8dc4b6Seschrock 			default:
2921ea8dc4b6Seschrock 				class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
2922ea8dc4b6Seschrock 			}
2923ea8dc4b6Seschrock 
2924c5904d13Seschrock 			zfs_ereport_post(class, spa, vd, NULL, save_state, 0);
2925ea8dc4b6Seschrock 		}
2926ea8dc4b6Seschrock 
29273d7072f8Seschrock 		/* Erase any notion of persistent removed state */
29283d7072f8Seschrock 		vd->vdev_removed = B_FALSE;
29293d7072f8Seschrock 	} else {
29303d7072f8Seschrock 		vd->vdev_removed = B_FALSE;
29313d7072f8Seschrock 	}
2932ea8dc4b6Seschrock 
29338b33d774STim Haley 	if (!isopen && vd->vdev_parent)
29348b33d774STim Haley 		vdev_propagate_state(vd->vdev_parent);
2935fa9e4066Sahrens }
293615e6edf1Sgw 
293715e6edf1Sgw /*
293815e6edf1Sgw  * Check the vdev configuration to ensure that it's capable of supporting
293915e6edf1Sgw  * a root pool. Currently, we do not support RAID-Z or partial configuration.
294015e6edf1Sgw  * In addition, only a single top-level vdev is allowed and none of the leaves
294115e6edf1Sgw  * can be wholedisks.
294215e6edf1Sgw  */
294315e6edf1Sgw boolean_t
294415e6edf1Sgw vdev_is_bootable(vdev_t *vd)
294515e6edf1Sgw {
294615e6edf1Sgw 	if (!vd->vdev_ops->vdev_op_leaf) {
294715e6edf1Sgw 		char *vdev_type = vd->vdev_ops->vdev_op_type;
294815e6edf1Sgw 
294915e6edf1Sgw 		if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 &&
295015e6edf1Sgw 		    vd->vdev_children > 1) {
295115e6edf1Sgw 			return (B_FALSE);
295215e6edf1Sgw 		} else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 ||
295315e6edf1Sgw 		    strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) {
295415e6edf1Sgw 			return (B_FALSE);
295515e6edf1Sgw 		}
295615e6edf1Sgw 	} else if (vd->vdev_wholedisk == 1) {
295715e6edf1Sgw 		return (B_FALSE);
295815e6edf1Sgw 	}
295915e6edf1Sgw 
2960573ca77eSGeorge Wilson 	for (int c = 0; c < vd->vdev_children; c++) {
296115e6edf1Sgw 		if (!vdev_is_bootable(vd->vdev_child[c]))
296215e6edf1Sgw 			return (B_FALSE);
296315e6edf1Sgw 	}
296415e6edf1Sgw 	return (B_TRUE);
296515e6edf1Sgw }
2966e6ca193dSGeorge Wilson 
296788ecc943SGeorge Wilson /*
296888ecc943SGeorge Wilson  * Load the state from the original vdev tree (ovd) which
296988ecc943SGeorge Wilson  * we've retrieved from the MOS config object. If the original
297088ecc943SGeorge Wilson  * vdev was offline then we transfer that state to the device
297188ecc943SGeorge Wilson  * in the current vdev tree (nvd).
297288ecc943SGeorge Wilson  */
2973e6ca193dSGeorge Wilson void
297488ecc943SGeorge Wilson vdev_load_log_state(vdev_t *nvd, vdev_t *ovd)
2975e6ca193dSGeorge Wilson {
297688ecc943SGeorge Wilson 	spa_t *spa = nvd->vdev_spa;
2977e6ca193dSGeorge Wilson 
297888ecc943SGeorge Wilson 	ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
297988ecc943SGeorge Wilson 	ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid);
2980e6ca193dSGeorge Wilson 
298188ecc943SGeorge Wilson 	for (int c = 0; c < nvd->vdev_children; c++)
298288ecc943SGeorge Wilson 		vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]);
2983e6ca193dSGeorge Wilson 
298488ecc943SGeorge Wilson 	if (nvd->vdev_ops->vdev_op_leaf && ovd->vdev_offline) {
2985e6ca193dSGeorge Wilson 		/*
2986e6ca193dSGeorge Wilson 		 * It would be nice to call vdev_offline()
2987e6ca193dSGeorge Wilson 		 * directly but the pool isn't fully loaded and
2988e6ca193dSGeorge Wilson 		 * the txg threads have not been started yet.
2989e6ca193dSGeorge Wilson 		 */
299088ecc943SGeorge Wilson 		nvd->vdev_offline = ovd->vdev_offline;
299188ecc943SGeorge Wilson 		vdev_reopen(nvd->vdev_top);
2992e6ca193dSGeorge Wilson 	}
2993e6ca193dSGeorge Wilson }
2994573ca77eSGeorge Wilson 
2995573ca77eSGeorge Wilson /*
2996573ca77eSGeorge Wilson  * Expand a vdev if possible.
2997573ca77eSGeorge Wilson  */
2998573ca77eSGeorge Wilson void
2999573ca77eSGeorge Wilson vdev_expand(vdev_t *vd, uint64_t txg)
3000573ca77eSGeorge Wilson {
3001573ca77eSGeorge Wilson 	ASSERT(vd->vdev_top == vd);
3002573ca77eSGeorge Wilson 	ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3003573ca77eSGeorge Wilson 
3004573ca77eSGeorge Wilson 	if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) {
3005573ca77eSGeorge Wilson 		VERIFY(vdev_metaslab_init(vd, txg) == 0);
3006573ca77eSGeorge Wilson 		vdev_config_dirty(vd);
3007573ca77eSGeorge Wilson 	}
3008573ca77eSGeorge Wilson }
3009