xref: /illumos-gate/usr/src/uts/common/fs/zfs/spa.c (revision 9fa718d2f477620f14e3f2948dd03e3470add804)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
25  * Copyright (c) 2012 by Delphix. All rights reserved.
26  */
27 
28 /*
29  * This file contains all the routines used when modifying on-disk SPA state.
30  * This includes opening, importing, destroying, exporting a pool, and syncing a
31  * pool.
32  */
33 
34 #include <sys/zfs_context.h>
35 #include <sys/fm/fs/zfs.h>
36 #include <sys/spa_impl.h>
37 #include <sys/zio.h>
38 #include <sys/zio_checksum.h>
39 #include <sys/dmu.h>
40 #include <sys/dmu_tx.h>
41 #include <sys/zap.h>
42 #include <sys/zil.h>
43 #include <sys/ddt.h>
44 #include <sys/vdev_impl.h>
45 #include <sys/metaslab.h>
46 #include <sys/metaslab_impl.h>
47 #include <sys/uberblock_impl.h>
48 #include <sys/txg.h>
49 #include <sys/avl.h>
50 #include <sys/dmu_traverse.h>
51 #include <sys/dmu_objset.h>
52 #include <sys/unique.h>
53 #include <sys/dsl_pool.h>
54 #include <sys/dsl_dataset.h>
55 #include <sys/dsl_dir.h>
56 #include <sys/dsl_prop.h>
57 #include <sys/dsl_synctask.h>
58 #include <sys/fs/zfs.h>
59 #include <sys/arc.h>
60 #include <sys/callb.h>
61 #include <sys/systeminfo.h>
62 #include <sys/spa_boot.h>
63 #include <sys/zfs_ioctl.h>
64 #include <sys/dsl_scan.h>
65 #include <sys/zfeature.h>
66 
67 #ifdef	_KERNEL
68 #include <sys/bootprops.h>
69 #include <sys/callb.h>
70 #include <sys/cpupart.h>
71 #include <sys/pool.h>
72 #include <sys/sysdc.h>
73 #include <sys/zone.h>
74 #endif	/* _KERNEL */
75 
76 #include "zfs_prop.h"
77 #include "zfs_comutil.h"
78 
79 typedef enum zti_modes {
80 	zti_mode_fixed,			/* value is # of threads (min 1) */
81 	zti_mode_online_percent,	/* value is % of online CPUs */
82 	zti_mode_batch,			/* cpu-intensive; value is ignored */
83 	zti_mode_null,			/* don't create a taskq */
84 	zti_nmodes
85 } zti_modes_t;
86 
87 #define	ZTI_FIX(n)	{ zti_mode_fixed, (n) }
88 #define	ZTI_PCT(n)	{ zti_mode_online_percent, (n) }
89 #define	ZTI_BATCH	{ zti_mode_batch, 0 }
90 #define	ZTI_NULL	{ zti_mode_null, 0 }
91 
92 #define	ZTI_ONE		ZTI_FIX(1)
93 
94 typedef struct zio_taskq_info {
95 	enum zti_modes zti_mode;
96 	uint_t zti_value;
97 } zio_taskq_info_t;
98 
99 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
100 	"issue", "issue_high", "intr", "intr_high"
101 };
102 
103 /*
104  * Define the taskq threads for the following I/O types:
105  * 	NULL, READ, WRITE, FREE, CLAIM, and IOCTL
106  */
107 const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
108 	/* ISSUE	ISSUE_HIGH	INTR		INTR_HIGH */
109 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL },
110 	{ ZTI_FIX(8),	ZTI_NULL,	ZTI_BATCH,	ZTI_NULL },
111 	{ ZTI_BATCH,	ZTI_FIX(5),	ZTI_FIX(8),	ZTI_FIX(5) },
112 	{ ZTI_FIX(100),	ZTI_NULL,	ZTI_ONE,	ZTI_NULL },
113 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL },
114 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL },
115 };
116 
117 static dsl_syncfunc_t spa_sync_version;
118 static dsl_syncfunc_t spa_sync_props;
119 static boolean_t spa_has_active_shared_spare(spa_t *spa);
120 static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
121     spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
122     char **ereport);
123 static void spa_vdev_resilver_done(spa_t *spa);
124 
125 uint_t		zio_taskq_batch_pct = 100;	/* 1 thread per cpu in pset */
126 id_t		zio_taskq_psrset_bind = PS_NONE;
127 boolean_t	zio_taskq_sysdc = B_TRUE;	/* use SDC scheduling class */
128 uint_t		zio_taskq_basedc = 80;		/* base duty cycle */
129 
130 boolean_t	spa_create_process = B_TRUE;	/* no process ==> no sysdc */
131 
132 /*
133  * This (illegal) pool name is used when temporarily importing a spa_t in order
134  * to get the vdev stats associated with the imported devices.
135  */
136 #define	TRYIMPORT_NAME	"$import"
137 
138 /*
139  * ==========================================================================
140  * SPA properties routines
141  * ==========================================================================
142  */
143 
144 /*
145  * Add a (source=src, propname=propval) list to an nvlist.
146  */
147 static void
148 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
149     uint64_t intval, zprop_source_t src)
150 {
151 	const char *propname = zpool_prop_to_name(prop);
152 	nvlist_t *propval;
153 
154 	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
155 	VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
156 
157 	if (strval != NULL)
158 		VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
159 	else
160 		VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
161 
162 	VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
163 	nvlist_free(propval);
164 }
165 
166 /*
167  * Get property values from the spa configuration.
168  */
169 static void
170 spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
171 {
172 	vdev_t *rvd = spa->spa_root_vdev;
173 	dsl_pool_t *pool = spa->spa_dsl_pool;
174 	uint64_t size;
175 	uint64_t alloc;
176 	uint64_t space;
177 	uint64_t cap, version;
178 	zprop_source_t src = ZPROP_SRC_NONE;
179 	spa_config_dirent_t *dp;
180 
181 	ASSERT(MUTEX_HELD(&spa->spa_props_lock));
182 
183 	if (rvd != NULL) {
184 		alloc = metaslab_class_get_alloc(spa_normal_class(spa));
185 		size = metaslab_class_get_space(spa_normal_class(spa));
186 		spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
187 		spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
188 		spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
189 		spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
190 		    size - alloc, src);
191 
192 		space = 0;
193 		for (int c = 0; c < rvd->vdev_children; c++) {
194 			vdev_t *tvd = rvd->vdev_child[c];
195 			space += tvd->vdev_max_asize - tvd->vdev_asize;
196 		}
197 		spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, space,
198 		    src);
199 
200 		spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
201 		    (spa_mode(spa) == FREAD), src);
202 
203 		cap = (size == 0) ? 0 : (alloc * 100 / size);
204 		spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
205 
206 		spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
207 		    ddt_get_pool_dedup_ratio(spa), src);
208 
209 		spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
210 		    rvd->vdev_state, src);
211 
212 		version = spa_version(spa);
213 		if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
214 			src = ZPROP_SRC_DEFAULT;
215 		else
216 			src = ZPROP_SRC_LOCAL;
217 		spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
218 	}
219 
220 	if (pool != NULL) {
221 		dsl_dir_t *freedir = pool->dp_free_dir;
222 
223 		/*
224 		 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
225 		 * when opening pools before this version freedir will be NULL.
226 		 */
227 		if (freedir != NULL) {
228 			spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
229 			    freedir->dd_phys->dd_used_bytes, src);
230 		} else {
231 			spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
232 			    NULL, 0, src);
233 		}
234 	}
235 
236 	spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
237 
238 	if (spa->spa_comment != NULL) {
239 		spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
240 		    0, ZPROP_SRC_LOCAL);
241 	}
242 
243 	if (spa->spa_root != NULL)
244 		spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
245 		    0, ZPROP_SRC_LOCAL);
246 
247 	if ((dp = list_head(&spa->spa_config_list)) != NULL) {
248 		if (dp->scd_path == NULL) {
249 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
250 			    "none", 0, ZPROP_SRC_LOCAL);
251 		} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
252 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
253 			    dp->scd_path, 0, ZPROP_SRC_LOCAL);
254 		}
255 	}
256 }
257 
258 /*
259  * Get zpool property values.
260  */
261 int
262 spa_prop_get(spa_t *spa, nvlist_t **nvp)
263 {
264 	objset_t *mos = spa->spa_meta_objset;
265 	zap_cursor_t zc;
266 	zap_attribute_t za;
267 	int err;
268 
269 	VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
270 
271 	mutex_enter(&spa->spa_props_lock);
272 
273 	/*
274 	 * Get properties from the spa config.
275 	 */
276 	spa_prop_get_config(spa, nvp);
277 
278 	/* If no pool property object, no more prop to get. */
279 	if (mos == NULL || spa->spa_pool_props_object == 0) {
280 		mutex_exit(&spa->spa_props_lock);
281 		return (0);
282 	}
283 
284 	/*
285 	 * Get properties from the MOS pool property object.
286 	 */
287 	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
288 	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
289 	    zap_cursor_advance(&zc)) {
290 		uint64_t intval = 0;
291 		char *strval = NULL;
292 		zprop_source_t src = ZPROP_SRC_DEFAULT;
293 		zpool_prop_t prop;
294 
295 		if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
296 			continue;
297 
298 		switch (za.za_integer_length) {
299 		case 8:
300 			/* integer property */
301 			if (za.za_first_integer !=
302 			    zpool_prop_default_numeric(prop))
303 				src = ZPROP_SRC_LOCAL;
304 
305 			if (prop == ZPOOL_PROP_BOOTFS) {
306 				dsl_pool_t *dp;
307 				dsl_dataset_t *ds = NULL;
308 
309 				dp = spa_get_dsl(spa);
310 				rw_enter(&dp->dp_config_rwlock, RW_READER);
311 				if (err = dsl_dataset_hold_obj(dp,
312 				    za.za_first_integer, FTAG, &ds)) {
313 					rw_exit(&dp->dp_config_rwlock);
314 					break;
315 				}
316 
317 				strval = kmem_alloc(
318 				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
319 				    KM_SLEEP);
320 				dsl_dataset_name(ds, strval);
321 				dsl_dataset_rele(ds, FTAG);
322 				rw_exit(&dp->dp_config_rwlock);
323 			} else {
324 				strval = NULL;
325 				intval = za.za_first_integer;
326 			}
327 
328 			spa_prop_add_list(*nvp, prop, strval, intval, src);
329 
330 			if (strval != NULL)
331 				kmem_free(strval,
332 				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
333 
334 			break;
335 
336 		case 1:
337 			/* string property */
338 			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
339 			err = zap_lookup(mos, spa->spa_pool_props_object,
340 			    za.za_name, 1, za.za_num_integers, strval);
341 			if (err) {
342 				kmem_free(strval, za.za_num_integers);
343 				break;
344 			}
345 			spa_prop_add_list(*nvp, prop, strval, 0, src);
346 			kmem_free(strval, za.za_num_integers);
347 			break;
348 
349 		default:
350 			break;
351 		}
352 	}
353 	zap_cursor_fini(&zc);
354 	mutex_exit(&spa->spa_props_lock);
355 out:
356 	if (err && err != ENOENT) {
357 		nvlist_free(*nvp);
358 		*nvp = NULL;
359 		return (err);
360 	}
361 
362 	return (0);
363 }
364 
365 /*
366  * Validate the given pool properties nvlist and modify the list
367  * for the property values to be set.
368  */
369 static int
370 spa_prop_validate(spa_t *spa, nvlist_t *props)
371 {
372 	nvpair_t *elem;
373 	int error = 0, reset_bootfs = 0;
374 	uint64_t objnum;
375 	boolean_t has_feature = B_FALSE;
376 
377 	elem = NULL;
378 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
379 		uint64_t intval;
380 		char *strval, *slash, *check, *fname;
381 		const char *propname = nvpair_name(elem);
382 		zpool_prop_t prop = zpool_name_to_prop(propname);
383 
384 		switch (prop) {
385 		case ZPROP_INVAL:
386 			if (!zpool_prop_feature(propname)) {
387 				error = EINVAL;
388 				break;
389 			}
390 
391 			/*
392 			 * Sanitize the input.
393 			 */
394 			if (nvpair_type(elem) != DATA_TYPE_UINT64) {
395 				error = EINVAL;
396 				break;
397 			}
398 
399 			if (nvpair_value_uint64(elem, &intval) != 0) {
400 				error = EINVAL;
401 				break;
402 			}
403 
404 			if (intval != 0) {
405 				error = EINVAL;
406 				break;
407 			}
408 
409 			fname = strchr(propname, '@') + 1;
410 			if (zfeature_lookup_name(fname, NULL) != 0) {
411 				error = EINVAL;
412 				break;
413 			}
414 
415 			has_feature = B_TRUE;
416 			break;
417 
418 		case ZPOOL_PROP_VERSION:
419 			error = nvpair_value_uint64(elem, &intval);
420 			if (!error &&
421 			    (intval < spa_version(spa) ||
422 			    intval > SPA_VERSION_BEFORE_FEATURES ||
423 			    has_feature))
424 				error = EINVAL;
425 			break;
426 
427 		case ZPOOL_PROP_DELEGATION:
428 		case ZPOOL_PROP_AUTOREPLACE:
429 		case ZPOOL_PROP_LISTSNAPS:
430 		case ZPOOL_PROP_AUTOEXPAND:
431 			error = nvpair_value_uint64(elem, &intval);
432 			if (!error && intval > 1)
433 				error = EINVAL;
434 			break;
435 
436 		case ZPOOL_PROP_BOOTFS:
437 			/*
438 			 * If the pool version is less than SPA_VERSION_BOOTFS,
439 			 * or the pool is still being created (version == 0),
440 			 * the bootfs property cannot be set.
441 			 */
442 			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
443 				error = ENOTSUP;
444 				break;
445 			}
446 
447 			/*
448 			 * Make sure the vdev config is bootable
449 			 */
450 			if (!vdev_is_bootable(spa->spa_root_vdev)) {
451 				error = ENOTSUP;
452 				break;
453 			}
454 
455 			reset_bootfs = 1;
456 
457 			error = nvpair_value_string(elem, &strval);
458 
459 			if (!error) {
460 				objset_t *os;
461 				uint64_t compress;
462 
463 				if (strval == NULL || strval[0] == '\0') {
464 					objnum = zpool_prop_default_numeric(
465 					    ZPOOL_PROP_BOOTFS);
466 					break;
467 				}
468 
469 				if (error = dmu_objset_hold(strval, FTAG, &os))
470 					break;
471 
472 				/* Must be ZPL and not gzip compressed. */
473 
474 				if (dmu_objset_type(os) != DMU_OST_ZFS) {
475 					error = ENOTSUP;
476 				} else if ((error = dsl_prop_get_integer(strval,
477 				    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
478 				    &compress, NULL)) == 0 &&
479 				    !BOOTFS_COMPRESS_VALID(compress)) {
480 					error = ENOTSUP;
481 				} else {
482 					objnum = dmu_objset_id(os);
483 				}
484 				dmu_objset_rele(os, FTAG);
485 			}
486 			break;
487 
488 		case ZPOOL_PROP_FAILUREMODE:
489 			error = nvpair_value_uint64(elem, &intval);
490 			if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
491 			    intval > ZIO_FAILURE_MODE_PANIC))
492 				error = EINVAL;
493 
494 			/*
495 			 * This is a special case which only occurs when
496 			 * the pool has completely failed. This allows
497 			 * the user to change the in-core failmode property
498 			 * without syncing it out to disk (I/Os might
499 			 * currently be blocked). We do this by returning
500 			 * EIO to the caller (spa_prop_set) to trick it
501 			 * into thinking we encountered a property validation
502 			 * error.
503 			 */
504 			if (!error && spa_suspended(spa)) {
505 				spa->spa_failmode = intval;
506 				error = EIO;
507 			}
508 			break;
509 
510 		case ZPOOL_PROP_CACHEFILE:
511 			if ((error = nvpair_value_string(elem, &strval)) != 0)
512 				break;
513 
514 			if (strval[0] == '\0')
515 				break;
516 
517 			if (strcmp(strval, "none") == 0)
518 				break;
519 
520 			if (strval[0] != '/') {
521 				error = EINVAL;
522 				break;
523 			}
524 
525 			slash = strrchr(strval, '/');
526 			ASSERT(slash != NULL);
527 
528 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
529 			    strcmp(slash, "/..") == 0)
530 				error = EINVAL;
531 			break;
532 
533 		case ZPOOL_PROP_COMMENT:
534 			if ((error = nvpair_value_string(elem, &strval)) != 0)
535 				break;
536 			for (check = strval; *check != '\0'; check++) {
537 				/*
538 				 * The kernel doesn't have an easy isprint()
539 				 * check.  For this kernel check, we merely
540 				 * check ASCII apart from DEL.  Fix this if
541 				 * there is an easy-to-use kernel isprint().
542 				 */
543 				if (*check >= 0x7f) {
544 					error = EINVAL;
545 					break;
546 				}
547 				check++;
548 			}
549 			if (strlen(strval) > ZPROP_MAX_COMMENT)
550 				error = E2BIG;
551 			break;
552 
553 		case ZPOOL_PROP_DEDUPDITTO:
554 			if (spa_version(spa) < SPA_VERSION_DEDUP)
555 				error = ENOTSUP;
556 			else
557 				error = nvpair_value_uint64(elem, &intval);
558 			if (error == 0 &&
559 			    intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
560 				error = EINVAL;
561 			break;
562 		}
563 
564 		if (error)
565 			break;
566 	}
567 
568 	if (!error && reset_bootfs) {
569 		error = nvlist_remove(props,
570 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
571 
572 		if (!error) {
573 			error = nvlist_add_uint64(props,
574 			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
575 		}
576 	}
577 
578 	return (error);
579 }
580 
581 void
582 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
583 {
584 	char *cachefile;
585 	spa_config_dirent_t *dp;
586 
587 	if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
588 	    &cachefile) != 0)
589 		return;
590 
591 	dp = kmem_alloc(sizeof (spa_config_dirent_t),
592 	    KM_SLEEP);
593 
594 	if (cachefile[0] == '\0')
595 		dp->scd_path = spa_strdup(spa_config_path);
596 	else if (strcmp(cachefile, "none") == 0)
597 		dp->scd_path = NULL;
598 	else
599 		dp->scd_path = spa_strdup(cachefile);
600 
601 	list_insert_head(&spa->spa_config_list, dp);
602 	if (need_sync)
603 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
604 }
605 
606 int
607 spa_prop_set(spa_t *spa, nvlist_t *nvp)
608 {
609 	int error;
610 	nvpair_t *elem = NULL;
611 	boolean_t need_sync = B_FALSE;
612 
613 	if ((error = spa_prop_validate(spa, nvp)) != 0)
614 		return (error);
615 
616 	while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
617 		zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
618 
619 		if (prop == ZPOOL_PROP_CACHEFILE ||
620 		    prop == ZPOOL_PROP_ALTROOT ||
621 		    prop == ZPOOL_PROP_READONLY)
622 			continue;
623 
624 		if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) {
625 			uint64_t ver;
626 
627 			if (prop == ZPOOL_PROP_VERSION) {
628 				VERIFY(nvpair_value_uint64(elem, &ver) == 0);
629 			} else {
630 				ASSERT(zpool_prop_feature(nvpair_name(elem)));
631 				ver = SPA_VERSION_FEATURES;
632 				need_sync = B_TRUE;
633 			}
634 
635 			/* Save time if the version is already set. */
636 			if (ver == spa_version(spa))
637 				continue;
638 
639 			/*
640 			 * In addition to the pool directory object, we might
641 			 * create the pool properties object, the features for
642 			 * read object, the features for write object, or the
643 			 * feature descriptions object.
644 			 */
645 			error = dsl_sync_task_do(spa_get_dsl(spa), NULL,
646 			    spa_sync_version, spa, &ver, 6);
647 			if (error)
648 				return (error);
649 			continue;
650 		}
651 
652 		need_sync = B_TRUE;
653 		break;
654 	}
655 
656 	if (need_sync) {
657 		return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
658 		    spa, nvp, 6));
659 	}
660 
661 	return (0);
662 }
663 
664 /*
665  * If the bootfs property value is dsobj, clear it.
666  */
667 void
668 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
669 {
670 	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
671 		VERIFY(zap_remove(spa->spa_meta_objset,
672 		    spa->spa_pool_props_object,
673 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
674 		spa->spa_bootfs = 0;
675 	}
676 }
677 
678 /*
679  * Change the GUID for the pool.  This is done so that we can later
680  * re-import a pool built from a clone of our own vdevs.  We will modify
681  * the root vdev's guid, our own pool guid, and then mark all of our
682  * vdevs dirty.  Note that we must make sure that all our vdevs are
683  * online when we do this, or else any vdevs that weren't present
684  * would be orphaned from our pool.  We are also going to issue a
685  * sysevent to update any watchers.
686  */
687 int
688 spa_change_guid(spa_t *spa)
689 {
690 	uint64_t	oldguid, newguid;
691 	uint64_t	txg;
692 
693 	if (!(spa_mode_global & FWRITE))
694 		return (EROFS);
695 
696 	txg = spa_vdev_enter(spa);
697 
698 	if (spa->spa_root_vdev->vdev_state != VDEV_STATE_HEALTHY)
699 		return (spa_vdev_exit(spa, NULL, txg, ENXIO));
700 
701 	oldguid = spa_guid(spa);
702 	newguid = spa_generate_guid(NULL);
703 	ASSERT3U(oldguid, !=, newguid);
704 
705 	spa->spa_root_vdev->vdev_guid = newguid;
706 	spa->spa_root_vdev->vdev_guid_sum += (newguid - oldguid);
707 
708 	vdev_config_dirty(spa->spa_root_vdev);
709 
710 	spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID);
711 
712 	return (spa_vdev_exit(spa, NULL, txg, 0));
713 }
714 
715 /*
716  * ==========================================================================
717  * SPA state manipulation (open/create/destroy/import/export)
718  * ==========================================================================
719  */
720 
721 static int
722 spa_error_entry_compare(const void *a, const void *b)
723 {
724 	spa_error_entry_t *sa = (spa_error_entry_t *)a;
725 	spa_error_entry_t *sb = (spa_error_entry_t *)b;
726 	int ret;
727 
728 	ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
729 	    sizeof (zbookmark_t));
730 
731 	if (ret < 0)
732 		return (-1);
733 	else if (ret > 0)
734 		return (1);
735 	else
736 		return (0);
737 }
738 
739 /*
740  * Utility function which retrieves copies of the current logs and
741  * re-initializes them in the process.
742  */
743 void
744 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
745 {
746 	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
747 
748 	bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
749 	bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
750 
751 	avl_create(&spa->spa_errlist_scrub,
752 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
753 	    offsetof(spa_error_entry_t, se_avl));
754 	avl_create(&spa->spa_errlist_last,
755 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
756 	    offsetof(spa_error_entry_t, se_avl));
757 }
758 
759 static taskq_t *
760 spa_taskq_create(spa_t *spa, const char *name, enum zti_modes mode,
761     uint_t value)
762 {
763 	uint_t flags = 0;
764 	boolean_t batch = B_FALSE;
765 
766 	switch (mode) {
767 	case zti_mode_null:
768 		return (NULL);		/* no taskq needed */
769 
770 	case zti_mode_fixed:
771 		ASSERT3U(value, >=, 1);
772 		value = MAX(value, 1);
773 		break;
774 
775 	case zti_mode_batch:
776 		batch = B_TRUE;
777 		flags |= TASKQ_THREADS_CPU_PCT;
778 		value = zio_taskq_batch_pct;
779 		break;
780 
781 	case zti_mode_online_percent:
782 		flags |= TASKQ_THREADS_CPU_PCT;
783 		break;
784 
785 	default:
786 		panic("unrecognized mode for %s taskq (%u:%u) in "
787 		    "spa_activate()",
788 		    name, mode, value);
789 		break;
790 	}
791 
792 	if (zio_taskq_sysdc && spa->spa_proc != &p0) {
793 		if (batch)
794 			flags |= TASKQ_DC_BATCH;
795 
796 		return (taskq_create_sysdc(name, value, 50, INT_MAX,
797 		    spa->spa_proc, zio_taskq_basedc, flags));
798 	}
799 	return (taskq_create_proc(name, value, maxclsyspri, 50, INT_MAX,
800 	    spa->spa_proc, flags));
801 }
802 
803 static void
804 spa_create_zio_taskqs(spa_t *spa)
805 {
806 	for (int t = 0; t < ZIO_TYPES; t++) {
807 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
808 			const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
809 			enum zti_modes mode = ztip->zti_mode;
810 			uint_t value = ztip->zti_value;
811 			char name[32];
812 
813 			(void) snprintf(name, sizeof (name),
814 			    "%s_%s", zio_type_name[t], zio_taskq_types[q]);
815 
816 			spa->spa_zio_taskq[t][q] =
817 			    spa_taskq_create(spa, name, mode, value);
818 		}
819 	}
820 }
821 
822 #ifdef _KERNEL
823 static void
824 spa_thread(void *arg)
825 {
826 	callb_cpr_t cprinfo;
827 
828 	spa_t *spa = arg;
829 	user_t *pu = PTOU(curproc);
830 
831 	CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
832 	    spa->spa_name);
833 
834 	ASSERT(curproc != &p0);
835 	(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
836 	    "zpool-%s", spa->spa_name);
837 	(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
838 
839 	/* bind this thread to the requested psrset */
840 	if (zio_taskq_psrset_bind != PS_NONE) {
841 		pool_lock();
842 		mutex_enter(&cpu_lock);
843 		mutex_enter(&pidlock);
844 		mutex_enter(&curproc->p_lock);
845 
846 		if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
847 		    0, NULL, NULL) == 0)  {
848 			curthread->t_bind_pset = zio_taskq_psrset_bind;
849 		} else {
850 			cmn_err(CE_WARN,
851 			    "Couldn't bind process for zfs pool \"%s\" to "
852 			    "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
853 		}
854 
855 		mutex_exit(&curproc->p_lock);
856 		mutex_exit(&pidlock);
857 		mutex_exit(&cpu_lock);
858 		pool_unlock();
859 	}
860 
861 	if (zio_taskq_sysdc) {
862 		sysdc_thread_enter(curthread, 100, 0);
863 	}
864 
865 	spa->spa_proc = curproc;
866 	spa->spa_did = curthread->t_did;
867 
868 	spa_create_zio_taskqs(spa);
869 
870 	mutex_enter(&spa->spa_proc_lock);
871 	ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
872 
873 	spa->spa_proc_state = SPA_PROC_ACTIVE;
874 	cv_broadcast(&spa->spa_proc_cv);
875 
876 	CALLB_CPR_SAFE_BEGIN(&cprinfo);
877 	while (spa->spa_proc_state == SPA_PROC_ACTIVE)
878 		cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
879 	CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
880 
881 	ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
882 	spa->spa_proc_state = SPA_PROC_GONE;
883 	spa->spa_proc = &p0;
884 	cv_broadcast(&spa->spa_proc_cv);
885 	CALLB_CPR_EXIT(&cprinfo);	/* drops spa_proc_lock */
886 
887 	mutex_enter(&curproc->p_lock);
888 	lwp_exit();
889 }
890 #endif
891 
892 /*
893  * Activate an uninitialized pool.
894  */
895 static void
896 spa_activate(spa_t *spa, int mode)
897 {
898 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
899 
900 	spa->spa_state = POOL_STATE_ACTIVE;
901 	spa->spa_mode = mode;
902 
903 	spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
904 	spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
905 
906 	/* Try to create a covering process */
907 	mutex_enter(&spa->spa_proc_lock);
908 	ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
909 	ASSERT(spa->spa_proc == &p0);
910 	spa->spa_did = 0;
911 
912 	/* Only create a process if we're going to be around a while. */
913 	if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
914 		if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
915 		    NULL, 0) == 0) {
916 			spa->spa_proc_state = SPA_PROC_CREATED;
917 			while (spa->spa_proc_state == SPA_PROC_CREATED) {
918 				cv_wait(&spa->spa_proc_cv,
919 				    &spa->spa_proc_lock);
920 			}
921 			ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
922 			ASSERT(spa->spa_proc != &p0);
923 			ASSERT(spa->spa_did != 0);
924 		} else {
925 #ifdef _KERNEL
926 			cmn_err(CE_WARN,
927 			    "Couldn't create process for zfs pool \"%s\"\n",
928 			    spa->spa_name);
929 #endif
930 		}
931 	}
932 	mutex_exit(&spa->spa_proc_lock);
933 
934 	/* If we didn't create a process, we need to create our taskqs. */
935 	if (spa->spa_proc == &p0) {
936 		spa_create_zio_taskqs(spa);
937 	}
938 
939 	list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
940 	    offsetof(vdev_t, vdev_config_dirty_node));
941 	list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
942 	    offsetof(vdev_t, vdev_state_dirty_node));
943 
944 	txg_list_create(&spa->spa_vdev_txg_list,
945 	    offsetof(struct vdev, vdev_txg_node));
946 
947 	avl_create(&spa->spa_errlist_scrub,
948 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
949 	    offsetof(spa_error_entry_t, se_avl));
950 	avl_create(&spa->spa_errlist_last,
951 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
952 	    offsetof(spa_error_entry_t, se_avl));
953 }
954 
955 /*
956  * Opposite of spa_activate().
957  */
958 static void
959 spa_deactivate(spa_t *spa)
960 {
961 	ASSERT(spa->spa_sync_on == B_FALSE);
962 	ASSERT(spa->spa_dsl_pool == NULL);
963 	ASSERT(spa->spa_root_vdev == NULL);
964 	ASSERT(spa->spa_async_zio_root == NULL);
965 	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
966 
967 	txg_list_destroy(&spa->spa_vdev_txg_list);
968 
969 	list_destroy(&spa->spa_config_dirty_list);
970 	list_destroy(&spa->spa_state_dirty_list);
971 
972 	for (int t = 0; t < ZIO_TYPES; t++) {
973 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
974 			if (spa->spa_zio_taskq[t][q] != NULL)
975 				taskq_destroy(spa->spa_zio_taskq[t][q]);
976 			spa->spa_zio_taskq[t][q] = NULL;
977 		}
978 	}
979 
980 	metaslab_class_destroy(spa->spa_normal_class);
981 	spa->spa_normal_class = NULL;
982 
983 	metaslab_class_destroy(spa->spa_log_class);
984 	spa->spa_log_class = NULL;
985 
986 	/*
987 	 * If this was part of an import or the open otherwise failed, we may
988 	 * still have errors left in the queues.  Empty them just in case.
989 	 */
990 	spa_errlog_drain(spa);
991 
992 	avl_destroy(&spa->spa_errlist_scrub);
993 	avl_destroy(&spa->spa_errlist_last);
994 
995 	spa->spa_state = POOL_STATE_UNINITIALIZED;
996 
997 	mutex_enter(&spa->spa_proc_lock);
998 	if (spa->spa_proc_state != SPA_PROC_NONE) {
999 		ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1000 		spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1001 		cv_broadcast(&spa->spa_proc_cv);
1002 		while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1003 			ASSERT(spa->spa_proc != &p0);
1004 			cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1005 		}
1006 		ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1007 		spa->spa_proc_state = SPA_PROC_NONE;
1008 	}
1009 	ASSERT(spa->spa_proc == &p0);
1010 	mutex_exit(&spa->spa_proc_lock);
1011 
1012 	/*
1013 	 * We want to make sure spa_thread() has actually exited the ZFS
1014 	 * module, so that the module can't be unloaded out from underneath
1015 	 * it.
1016 	 */
1017 	if (spa->spa_did != 0) {
1018 		thread_join(spa->spa_did);
1019 		spa->spa_did = 0;
1020 	}
1021 }
1022 
1023 /*
1024  * Verify a pool configuration, and construct the vdev tree appropriately.  This
1025  * will create all the necessary vdevs in the appropriate layout, with each vdev
1026  * in the CLOSED state.  This will prep the pool before open/creation/import.
1027  * All vdev validation is done by the vdev_alloc() routine.
1028  */
1029 static int
1030 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1031     uint_t id, int atype)
1032 {
1033 	nvlist_t **child;
1034 	uint_t children;
1035 	int error;
1036 
1037 	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1038 		return (error);
1039 
1040 	if ((*vdp)->vdev_ops->vdev_op_leaf)
1041 		return (0);
1042 
1043 	error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1044 	    &child, &children);
1045 
1046 	if (error == ENOENT)
1047 		return (0);
1048 
1049 	if (error) {
1050 		vdev_free(*vdp);
1051 		*vdp = NULL;
1052 		return (EINVAL);
1053 	}
1054 
1055 	for (int c = 0; c < children; c++) {
1056 		vdev_t *vd;
1057 		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1058 		    atype)) != 0) {
1059 			vdev_free(*vdp);
1060 			*vdp = NULL;
1061 			return (error);
1062 		}
1063 	}
1064 
1065 	ASSERT(*vdp != NULL);
1066 
1067 	return (0);
1068 }
1069 
1070 /*
1071  * Opposite of spa_load().
1072  */
1073 static void
1074 spa_unload(spa_t *spa)
1075 {
1076 	int i;
1077 
1078 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1079 
1080 	/*
1081 	 * Stop async tasks.
1082 	 */
1083 	spa_async_suspend(spa);
1084 
1085 	/*
1086 	 * Stop syncing.
1087 	 */
1088 	if (spa->spa_sync_on) {
1089 		txg_sync_stop(spa->spa_dsl_pool);
1090 		spa->spa_sync_on = B_FALSE;
1091 	}
1092 
1093 	/*
1094 	 * Wait for any outstanding async I/O to complete.
1095 	 */
1096 	if (spa->spa_async_zio_root != NULL) {
1097 		(void) zio_wait(spa->spa_async_zio_root);
1098 		spa->spa_async_zio_root = NULL;
1099 	}
1100 
1101 	bpobj_close(&spa->spa_deferred_bpobj);
1102 
1103 	/*
1104 	 * Close the dsl pool.
1105 	 */
1106 	if (spa->spa_dsl_pool) {
1107 		dsl_pool_close(spa->spa_dsl_pool);
1108 		spa->spa_dsl_pool = NULL;
1109 		spa->spa_meta_objset = NULL;
1110 	}
1111 
1112 	ddt_unload(spa);
1113 
1114 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1115 
1116 	/*
1117 	 * Drop and purge level 2 cache
1118 	 */
1119 	spa_l2cache_drop(spa);
1120 
1121 	/*
1122 	 * Close all vdevs.
1123 	 */
1124 	if (spa->spa_root_vdev)
1125 		vdev_free(spa->spa_root_vdev);
1126 	ASSERT(spa->spa_root_vdev == NULL);
1127 
1128 	for (i = 0; i < spa->spa_spares.sav_count; i++)
1129 		vdev_free(spa->spa_spares.sav_vdevs[i]);
1130 	if (spa->spa_spares.sav_vdevs) {
1131 		kmem_free(spa->spa_spares.sav_vdevs,
1132 		    spa->spa_spares.sav_count * sizeof (void *));
1133 		spa->spa_spares.sav_vdevs = NULL;
1134 	}
1135 	if (spa->spa_spares.sav_config) {
1136 		nvlist_free(spa->spa_spares.sav_config);
1137 		spa->spa_spares.sav_config = NULL;
1138 	}
1139 	spa->spa_spares.sav_count = 0;
1140 
1141 	for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1142 		vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
1143 		vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1144 	}
1145 	if (spa->spa_l2cache.sav_vdevs) {
1146 		kmem_free(spa->spa_l2cache.sav_vdevs,
1147 		    spa->spa_l2cache.sav_count * sizeof (void *));
1148 		spa->spa_l2cache.sav_vdevs = NULL;
1149 	}
1150 	if (spa->spa_l2cache.sav_config) {
1151 		nvlist_free(spa->spa_l2cache.sav_config);
1152 		spa->spa_l2cache.sav_config = NULL;
1153 	}
1154 	spa->spa_l2cache.sav_count = 0;
1155 
1156 	spa->spa_async_suspended = 0;
1157 
1158 	if (spa->spa_comment != NULL) {
1159 		spa_strfree(spa->spa_comment);
1160 		spa->spa_comment = NULL;
1161 	}
1162 
1163 	spa_config_exit(spa, SCL_ALL, FTAG);
1164 }
1165 
1166 /*
1167  * Load (or re-load) the current list of vdevs describing the active spares for
1168  * this pool.  When this is called, we have some form of basic information in
1169  * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
1170  * then re-generate a more complete list including status information.
1171  */
1172 static void
1173 spa_load_spares(spa_t *spa)
1174 {
1175 	nvlist_t **spares;
1176 	uint_t nspares;
1177 	int i;
1178 	vdev_t *vd, *tvd;
1179 
1180 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1181 
1182 	/*
1183 	 * First, close and free any existing spare vdevs.
1184 	 */
1185 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
1186 		vd = spa->spa_spares.sav_vdevs[i];
1187 
1188 		/* Undo the call to spa_activate() below */
1189 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1190 		    B_FALSE)) != NULL && tvd->vdev_isspare)
1191 			spa_spare_remove(tvd);
1192 		vdev_close(vd);
1193 		vdev_free(vd);
1194 	}
1195 
1196 	if (spa->spa_spares.sav_vdevs)
1197 		kmem_free(spa->spa_spares.sav_vdevs,
1198 		    spa->spa_spares.sav_count * sizeof (void *));
1199 
1200 	if (spa->spa_spares.sav_config == NULL)
1201 		nspares = 0;
1202 	else
1203 		VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1204 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1205 
1206 	spa->spa_spares.sav_count = (int)nspares;
1207 	spa->spa_spares.sav_vdevs = NULL;
1208 
1209 	if (nspares == 0)
1210 		return;
1211 
1212 	/*
1213 	 * Construct the array of vdevs, opening them to get status in the
1214 	 * process.   For each spare, there is potentially two different vdev_t
1215 	 * structures associated with it: one in the list of spares (used only
1216 	 * for basic validation purposes) and one in the active vdev
1217 	 * configuration (if it's spared in).  During this phase we open and
1218 	 * validate each vdev on the spare list.  If the vdev also exists in the
1219 	 * active configuration, then we also mark this vdev as an active spare.
1220 	 */
1221 	spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
1222 	    KM_SLEEP);
1223 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
1224 		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1225 		    VDEV_ALLOC_SPARE) == 0);
1226 		ASSERT(vd != NULL);
1227 
1228 		spa->spa_spares.sav_vdevs[i] = vd;
1229 
1230 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1231 		    B_FALSE)) != NULL) {
1232 			if (!tvd->vdev_isspare)
1233 				spa_spare_add(tvd);
1234 
1235 			/*
1236 			 * We only mark the spare active if we were successfully
1237 			 * able to load the vdev.  Otherwise, importing a pool
1238 			 * with a bad active spare would result in strange
1239 			 * behavior, because multiple pool would think the spare
1240 			 * is actively in use.
1241 			 *
1242 			 * There is a vulnerability here to an equally bizarre
1243 			 * circumstance, where a dead active spare is later
1244 			 * brought back to life (onlined or otherwise).  Given
1245 			 * the rarity of this scenario, and the extra complexity
1246 			 * it adds, we ignore the possibility.
1247 			 */
1248 			if (!vdev_is_dead(tvd))
1249 				spa_spare_activate(tvd);
1250 		}
1251 
1252 		vd->vdev_top = vd;
1253 		vd->vdev_aux = &spa->spa_spares;
1254 
1255 		if (vdev_open(vd) != 0)
1256 			continue;
1257 
1258 		if (vdev_validate_aux(vd) == 0)
1259 			spa_spare_add(vd);
1260 	}
1261 
1262 	/*
1263 	 * Recompute the stashed list of spares, with status information
1264 	 * this time.
1265 	 */
1266 	VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1267 	    DATA_TYPE_NVLIST_ARRAY) == 0);
1268 
1269 	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
1270 	    KM_SLEEP);
1271 	for (i = 0; i < spa->spa_spares.sav_count; i++)
1272 		spares[i] = vdev_config_generate(spa,
1273 		    spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
1274 	VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1275 	    ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1276 	for (i = 0; i < spa->spa_spares.sav_count; i++)
1277 		nvlist_free(spares[i]);
1278 	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1279 }
1280 
1281 /*
1282  * Load (or re-load) the current list of vdevs describing the active l2cache for
1283  * this pool.  When this is called, we have some form of basic information in
1284  * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
1285  * then re-generate a more complete list including status information.
1286  * Devices which are already active have their details maintained, and are
1287  * not re-opened.
1288  */
1289 static void
1290 spa_load_l2cache(spa_t *spa)
1291 {
1292 	nvlist_t **l2cache;
1293 	uint_t nl2cache;
1294 	int i, j, oldnvdevs;
1295 	uint64_t guid;
1296 	vdev_t *vd, **oldvdevs, **newvdevs;
1297 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
1298 
1299 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1300 
1301 	if (sav->sav_config != NULL) {
1302 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1303 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1304 		newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
1305 	} else {
1306 		nl2cache = 0;
1307 	}
1308 
1309 	oldvdevs = sav->sav_vdevs;
1310 	oldnvdevs = sav->sav_count;
1311 	sav->sav_vdevs = NULL;
1312 	sav->sav_count = 0;
1313 
1314 	/*
1315 	 * Process new nvlist of vdevs.
1316 	 */
1317 	for (i = 0; i < nl2cache; i++) {
1318 		VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1319 		    &guid) == 0);
1320 
1321 		newvdevs[i] = NULL;
1322 		for (j = 0; j < oldnvdevs; j++) {
1323 			vd = oldvdevs[j];
1324 			if (vd != NULL && guid == vd->vdev_guid) {
1325 				/*
1326 				 * Retain previous vdev for add/remove ops.
1327 				 */
1328 				newvdevs[i] = vd;
1329 				oldvdevs[j] = NULL;
1330 				break;
1331 			}
1332 		}
1333 
1334 		if (newvdevs[i] == NULL) {
1335 			/*
1336 			 * Create new vdev
1337 			 */
1338 			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1339 			    VDEV_ALLOC_L2CACHE) == 0);
1340 			ASSERT(vd != NULL);
1341 			newvdevs[i] = vd;
1342 
1343 			/*
1344 			 * Commit this vdev as an l2cache device,
1345 			 * even if it fails to open.
1346 			 */
1347 			spa_l2cache_add(vd);
1348 
1349 			vd->vdev_top = vd;
1350 			vd->vdev_aux = sav;
1351 
1352 			spa_l2cache_activate(vd);
1353 
1354 			if (vdev_open(vd) != 0)
1355 				continue;
1356 
1357 			(void) vdev_validate_aux(vd);
1358 
1359 			if (!vdev_is_dead(vd))
1360 				l2arc_add_vdev(spa, vd);
1361 		}
1362 	}
1363 
1364 	/*
1365 	 * Purge vdevs that were dropped
1366 	 */
1367 	for (i = 0; i < oldnvdevs; i++) {
1368 		uint64_t pool;
1369 
1370 		vd = oldvdevs[i];
1371 		if (vd != NULL) {
1372 			ASSERT(vd->vdev_isl2cache);
1373 
1374 			if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1375 			    pool != 0ULL && l2arc_vdev_present(vd))
1376 				l2arc_remove_vdev(vd);
1377 			vdev_clear_stats(vd);
1378 			vdev_free(vd);
1379 		}
1380 	}
1381 
1382 	if (oldvdevs)
1383 		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1384 
1385 	if (sav->sav_config == NULL)
1386 		goto out;
1387 
1388 	sav->sav_vdevs = newvdevs;
1389 	sav->sav_count = (int)nl2cache;
1390 
1391 	/*
1392 	 * Recompute the stashed list of l2cache devices, with status
1393 	 * information this time.
1394 	 */
1395 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1396 	    DATA_TYPE_NVLIST_ARRAY) == 0);
1397 
1398 	l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1399 	for (i = 0; i < sav->sav_count; i++)
1400 		l2cache[i] = vdev_config_generate(spa,
1401 		    sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
1402 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1403 	    ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1404 out:
1405 	for (i = 0; i < sav->sav_count; i++)
1406 		nvlist_free(l2cache[i]);
1407 	if (sav->sav_count)
1408 		kmem_free(l2cache, sav->sav_count * sizeof (void *));
1409 }
1410 
1411 static int
1412 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1413 {
1414 	dmu_buf_t *db;
1415 	char *packed = NULL;
1416 	size_t nvsize = 0;
1417 	int error;
1418 	*value = NULL;
1419 
1420 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
1421 	nvsize = *(uint64_t *)db->db_data;
1422 	dmu_buf_rele(db, FTAG);
1423 
1424 	packed = kmem_alloc(nvsize, KM_SLEEP);
1425 	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1426 	    DMU_READ_PREFETCH);
1427 	if (error == 0)
1428 		error = nvlist_unpack(packed, nvsize, value, 0);
1429 	kmem_free(packed, nvsize);
1430 
1431 	return (error);
1432 }
1433 
1434 /*
1435  * Checks to see if the given vdev could not be opened, in which case we post a
1436  * sysevent to notify the autoreplace code that the device has been removed.
1437  */
1438 static void
1439 spa_check_removed(vdev_t *vd)
1440 {
1441 	for (int c = 0; c < vd->vdev_children; c++)
1442 		spa_check_removed(vd->vdev_child[c]);
1443 
1444 	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
1445 		zfs_post_autoreplace(vd->vdev_spa, vd);
1446 		spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
1447 	}
1448 }
1449 
1450 /*
1451  * Validate the current config against the MOS config
1452  */
1453 static boolean_t
1454 spa_config_valid(spa_t *spa, nvlist_t *config)
1455 {
1456 	vdev_t *mrvd, *rvd = spa->spa_root_vdev;
1457 	nvlist_t *nv;
1458 
1459 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
1460 
1461 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1462 	VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
1463 
1464 	ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children);
1465 
1466 	/*
1467 	 * If we're doing a normal import, then build up any additional
1468 	 * diagnostic information about missing devices in this config.
1469 	 * We'll pass this up to the user for further processing.
1470 	 */
1471 	if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1472 		nvlist_t **child, *nv;
1473 		uint64_t idx = 0;
1474 
1475 		child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
1476 		    KM_SLEEP);
1477 		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1478 
1479 		for (int c = 0; c < rvd->vdev_children; c++) {
1480 			vdev_t *tvd = rvd->vdev_child[c];
1481 			vdev_t *mtvd  = mrvd->vdev_child[c];
1482 
1483 			if (tvd->vdev_ops == &vdev_missing_ops &&
1484 			    mtvd->vdev_ops != &vdev_missing_ops &&
1485 			    mtvd->vdev_islog)
1486 				child[idx++] = vdev_config_generate(spa, mtvd,
1487 				    B_FALSE, 0);
1488 		}
1489 
1490 		if (idx) {
1491 			VERIFY(nvlist_add_nvlist_array(nv,
1492 			    ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
1493 			VERIFY(nvlist_add_nvlist(spa->spa_load_info,
1494 			    ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
1495 
1496 			for (int i = 0; i < idx; i++)
1497 				nvlist_free(child[i]);
1498 		}
1499 		nvlist_free(nv);
1500 		kmem_free(child, rvd->vdev_children * sizeof (char **));
1501 	}
1502 
1503 	/*
1504 	 * Compare the root vdev tree with the information we have
1505 	 * from the MOS config (mrvd). Check each top-level vdev
1506 	 * with the corresponding MOS config top-level (mtvd).
1507 	 */
1508 	for (int c = 0; c < rvd->vdev_children; c++) {
1509 		vdev_t *tvd = rvd->vdev_child[c];
1510 		vdev_t *mtvd  = mrvd->vdev_child[c];
1511 
1512 		/*
1513 		 * Resolve any "missing" vdevs in the current configuration.
1514 		 * If we find that the MOS config has more accurate information
1515 		 * about the top-level vdev then use that vdev instead.
1516 		 */
1517 		if (tvd->vdev_ops == &vdev_missing_ops &&
1518 		    mtvd->vdev_ops != &vdev_missing_ops) {
1519 
1520 			if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG))
1521 				continue;
1522 
1523 			/*
1524 			 * Device specific actions.
1525 			 */
1526 			if (mtvd->vdev_islog) {
1527 				spa_set_log_state(spa, SPA_LOG_CLEAR);
1528 			} else {
1529 				/*
1530 				 * XXX - once we have 'readonly' pool
1531 				 * support we should be able to handle
1532 				 * missing data devices by transitioning
1533 				 * the pool to readonly.
1534 				 */
1535 				continue;
1536 			}
1537 
1538 			/*
1539 			 * Swap the missing vdev with the data we were
1540 			 * able to obtain from the MOS config.
1541 			 */
1542 			vdev_remove_child(rvd, tvd);
1543 			vdev_remove_child(mrvd, mtvd);
1544 
1545 			vdev_add_child(rvd, mtvd);
1546 			vdev_add_child(mrvd, tvd);
1547 
1548 			spa_config_exit(spa, SCL_ALL, FTAG);
1549 			vdev_load(mtvd);
1550 			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1551 
1552 			vdev_reopen(rvd);
1553 		} else if (mtvd->vdev_islog) {
1554 			/*
1555 			 * Load the slog device's state from the MOS config
1556 			 * since it's possible that the label does not
1557 			 * contain the most up-to-date information.
1558 			 */
1559 			vdev_load_log_state(tvd, mtvd);
1560 			vdev_reopen(tvd);
1561 		}
1562 	}
1563 	vdev_free(mrvd);
1564 	spa_config_exit(spa, SCL_ALL, FTAG);
1565 
1566 	/*
1567 	 * Ensure we were able to validate the config.
1568 	 */
1569 	return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
1570 }
1571 
1572 /*
1573  * Check for missing log devices
1574  */
1575 static int
1576 spa_check_logs(spa_t *spa)
1577 {
1578 	switch (spa->spa_log_state) {
1579 	case SPA_LOG_MISSING:
1580 		/* need to recheck in case slog has been restored */
1581 	case SPA_LOG_UNKNOWN:
1582 		if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL,
1583 		    DS_FIND_CHILDREN)) {
1584 			spa_set_log_state(spa, SPA_LOG_MISSING);
1585 			return (1);
1586 		}
1587 		break;
1588 	}
1589 	return (0);
1590 }
1591 
1592 static boolean_t
1593 spa_passivate_log(spa_t *spa)
1594 {
1595 	vdev_t *rvd = spa->spa_root_vdev;
1596 	boolean_t slog_found = B_FALSE;
1597 
1598 	ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1599 
1600 	if (!spa_has_slogs(spa))
1601 		return (B_FALSE);
1602 
1603 	for (int c = 0; c < rvd->vdev_children; c++) {
1604 		vdev_t *tvd = rvd->vdev_child[c];
1605 		metaslab_group_t *mg = tvd->vdev_mg;
1606 
1607 		if (tvd->vdev_islog) {
1608 			metaslab_group_passivate(mg);
1609 			slog_found = B_TRUE;
1610 		}
1611 	}
1612 
1613 	return (slog_found);
1614 }
1615 
1616 static void
1617 spa_activate_log(spa_t *spa)
1618 {
1619 	vdev_t *rvd = spa->spa_root_vdev;
1620 
1621 	ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1622 
1623 	for (int c = 0; c < rvd->vdev_children; c++) {
1624 		vdev_t *tvd = rvd->vdev_child[c];
1625 		metaslab_group_t *mg = tvd->vdev_mg;
1626 
1627 		if (tvd->vdev_islog)
1628 			metaslab_group_activate(mg);
1629 	}
1630 }
1631 
1632 int
1633 spa_offline_log(spa_t *spa)
1634 {
1635 	int error = 0;
1636 
1637 	if ((error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
1638 	    NULL, DS_FIND_CHILDREN)) == 0) {
1639 
1640 		/*
1641 		 * We successfully offlined the log device, sync out the
1642 		 * current txg so that the "stubby" block can be removed
1643 		 * by zil_sync().
1644 		 */
1645 		txg_wait_synced(spa->spa_dsl_pool, 0);
1646 	}
1647 	return (error);
1648 }
1649 
1650 static void
1651 spa_aux_check_removed(spa_aux_vdev_t *sav)
1652 {
1653 	for (int i = 0; i < sav->sav_count; i++)
1654 		spa_check_removed(sav->sav_vdevs[i]);
1655 }
1656 
1657 void
1658 spa_claim_notify(zio_t *zio)
1659 {
1660 	spa_t *spa = zio->io_spa;
1661 
1662 	if (zio->io_error)
1663 		return;
1664 
1665 	mutex_enter(&spa->spa_props_lock);	/* any mutex will do */
1666 	if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1667 		spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1668 	mutex_exit(&spa->spa_props_lock);
1669 }
1670 
1671 typedef struct spa_load_error {
1672 	uint64_t	sle_meta_count;
1673 	uint64_t	sle_data_count;
1674 } spa_load_error_t;
1675 
1676 static void
1677 spa_load_verify_done(zio_t *zio)
1678 {
1679 	blkptr_t *bp = zio->io_bp;
1680 	spa_load_error_t *sle = zio->io_private;
1681 	dmu_object_type_t type = BP_GET_TYPE(bp);
1682 	int error = zio->io_error;
1683 
1684 	if (error) {
1685 		if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
1686 		    type != DMU_OT_INTENT_LOG)
1687 			atomic_add_64(&sle->sle_meta_count, 1);
1688 		else
1689 			atomic_add_64(&sle->sle_data_count, 1);
1690 	}
1691 	zio_data_buf_free(zio->io_data, zio->io_size);
1692 }
1693 
1694 /*ARGSUSED*/
1695 static int
1696 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1697     arc_buf_t *pbuf, const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1698 {
1699 	if (bp != NULL) {
1700 		zio_t *rio = arg;
1701 		size_t size = BP_GET_PSIZE(bp);
1702 		void *data = zio_data_buf_alloc(size);
1703 
1704 		zio_nowait(zio_read(rio, spa, bp, data, size,
1705 		    spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
1706 		    ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
1707 		    ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
1708 	}
1709 	return (0);
1710 }
1711 
1712 static int
1713 spa_load_verify(spa_t *spa)
1714 {
1715 	zio_t *rio;
1716 	spa_load_error_t sle = { 0 };
1717 	zpool_rewind_policy_t policy;
1718 	boolean_t verify_ok = B_FALSE;
1719 	int error;
1720 
1721 	zpool_get_rewind_policy(spa->spa_config, &policy);
1722 
1723 	if (policy.zrp_request & ZPOOL_NEVER_REWIND)
1724 		return (0);
1725 
1726 	rio = zio_root(spa, NULL, &sle,
1727 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
1728 
1729 	error = traverse_pool(spa, spa->spa_verify_min_txg,
1730 	    TRAVERSE_PRE | TRAVERSE_PREFETCH, spa_load_verify_cb, rio);
1731 
1732 	(void) zio_wait(rio);
1733 
1734 	spa->spa_load_meta_errors = sle.sle_meta_count;
1735 	spa->spa_load_data_errors = sle.sle_data_count;
1736 
1737 	if (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
1738 	    sle.sle_data_count <= policy.zrp_maxdata) {
1739 		int64_t loss = 0;
1740 
1741 		verify_ok = B_TRUE;
1742 		spa->spa_load_txg = spa->spa_uberblock.ub_txg;
1743 		spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
1744 
1745 		loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
1746 		VERIFY(nvlist_add_uint64(spa->spa_load_info,
1747 		    ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
1748 		VERIFY(nvlist_add_int64(spa->spa_load_info,
1749 		    ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
1750 		VERIFY(nvlist_add_uint64(spa->spa_load_info,
1751 		    ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
1752 	} else {
1753 		spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
1754 	}
1755 
1756 	if (error) {
1757 		if (error != ENXIO && error != EIO)
1758 			error = EIO;
1759 		return (error);
1760 	}
1761 
1762 	return (verify_ok ? 0 : EIO);
1763 }
1764 
1765 /*
1766  * Find a value in the pool props object.
1767  */
1768 static void
1769 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
1770 {
1771 	(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
1772 	    zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
1773 }
1774 
1775 /*
1776  * Find a value in the pool directory object.
1777  */
1778 static int
1779 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
1780 {
1781 	return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1782 	    name, sizeof (uint64_t), 1, val));
1783 }
1784 
1785 static int
1786 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
1787 {
1788 	vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
1789 	return (err);
1790 }
1791 
1792 /*
1793  * Fix up config after a partly-completed split.  This is done with the
1794  * ZPOOL_CONFIG_SPLIT nvlist.  Both the splitting pool and the split-off
1795  * pool have that entry in their config, but only the splitting one contains
1796  * a list of all the guids of the vdevs that are being split off.
1797  *
1798  * This function determines what to do with that list: either rejoin
1799  * all the disks to the pool, or complete the splitting process.  To attempt
1800  * the rejoin, each disk that is offlined is marked online again, and
1801  * we do a reopen() call.  If the vdev label for every disk that was
1802  * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
1803  * then we call vdev_split() on each disk, and complete the split.
1804  *
1805  * Otherwise we leave the config alone, with all the vdevs in place in
1806  * the original pool.
1807  */
1808 static void
1809 spa_try_repair(spa_t *spa, nvlist_t *config)
1810 {
1811 	uint_t extracted;
1812 	uint64_t *glist;
1813 	uint_t i, gcount;
1814 	nvlist_t *nvl;
1815 	vdev_t **vd;
1816 	boolean_t attempt_reopen;
1817 
1818 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
1819 		return;
1820 
1821 	/* check that the config is complete */
1822 	if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
1823 	    &glist, &gcount) != 0)
1824 		return;
1825 
1826 	vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
1827 
1828 	/* attempt to online all the vdevs & validate */
1829 	attempt_reopen = B_TRUE;
1830 	for (i = 0; i < gcount; i++) {
1831 		if (glist[i] == 0)	/* vdev is hole */
1832 			continue;
1833 
1834 		vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
1835 		if (vd[i] == NULL) {
1836 			/*
1837 			 * Don't bother attempting to reopen the disks;
1838 			 * just do the split.
1839 			 */
1840 			attempt_reopen = B_FALSE;
1841 		} else {
1842 			/* attempt to re-online it */
1843 			vd[i]->vdev_offline = B_FALSE;
1844 		}
1845 	}
1846 
1847 	if (attempt_reopen) {
1848 		vdev_reopen(spa->spa_root_vdev);
1849 
1850 		/* check each device to see what state it's in */
1851 		for (extracted = 0, i = 0; i < gcount; i++) {
1852 			if (vd[i] != NULL &&
1853 			    vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
1854 				break;
1855 			++extracted;
1856 		}
1857 	}
1858 
1859 	/*
1860 	 * If every disk has been moved to the new pool, or if we never
1861 	 * even attempted to look at them, then we split them off for
1862 	 * good.
1863 	 */
1864 	if (!attempt_reopen || gcount == extracted) {
1865 		for (i = 0; i < gcount; i++)
1866 			if (vd[i] != NULL)
1867 				vdev_split(vd[i]);
1868 		vdev_reopen(spa->spa_root_vdev);
1869 	}
1870 
1871 	kmem_free(vd, gcount * sizeof (vdev_t *));
1872 }
1873 
1874 static int
1875 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
1876     boolean_t mosconfig)
1877 {
1878 	nvlist_t *config = spa->spa_config;
1879 	char *ereport = FM_EREPORT_ZFS_POOL;
1880 	char *comment;
1881 	int error;
1882 	uint64_t pool_guid;
1883 	nvlist_t *nvl;
1884 
1885 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
1886 		return (EINVAL);
1887 
1888 	ASSERT(spa->spa_comment == NULL);
1889 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
1890 		spa->spa_comment = spa_strdup(comment);
1891 
1892 	/*
1893 	 * Versioning wasn't explicitly added to the label until later, so if
1894 	 * it's not present treat it as the initial version.
1895 	 */
1896 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1897 	    &spa->spa_ubsync.ub_version) != 0)
1898 		spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
1899 
1900 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
1901 	    &spa->spa_config_txg);
1902 
1903 	if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
1904 	    spa_guid_exists(pool_guid, 0)) {
1905 		error = EEXIST;
1906 	} else {
1907 		spa->spa_config_guid = pool_guid;
1908 
1909 		if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
1910 		    &nvl) == 0) {
1911 			VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
1912 			    KM_SLEEP) == 0);
1913 		}
1914 
1915 		nvlist_free(spa->spa_load_info);
1916 		spa->spa_load_info = fnvlist_alloc();
1917 
1918 		gethrestime(&spa->spa_loaded_ts);
1919 		error = spa_load_impl(spa, pool_guid, config, state, type,
1920 		    mosconfig, &ereport);
1921 	}
1922 
1923 	spa->spa_minref = refcount_count(&spa->spa_refcount);
1924 	if (error) {
1925 		if (error != EEXIST) {
1926 			spa->spa_loaded_ts.tv_sec = 0;
1927 			spa->spa_loaded_ts.tv_nsec = 0;
1928 		}
1929 		if (error != EBADF) {
1930 			zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
1931 		}
1932 	}
1933 	spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
1934 	spa->spa_ena = 0;
1935 
1936 	return (error);
1937 }
1938 
1939 /*
1940  * Load an existing storage pool, using the pool's builtin spa_config as a
1941  * source of configuration information.
1942  */
1943 static int
1944 spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
1945     spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
1946     char **ereport)
1947 {
1948 	int error = 0;
1949 	nvlist_t *nvroot = NULL;
1950 	nvlist_t *label;
1951 	vdev_t *rvd;
1952 	uberblock_t *ub = &spa->spa_uberblock;
1953 	uint64_t children, config_cache_txg = spa->spa_config_txg;
1954 	int orig_mode = spa->spa_mode;
1955 	int parse;
1956 	uint64_t obj;
1957 	boolean_t missing_feat_write = B_FALSE;
1958 
1959 	/*
1960 	 * If this is an untrusted config, access the pool in read-only mode.
1961 	 * This prevents things like resilvering recently removed devices.
1962 	 */
1963 	if (!mosconfig)
1964 		spa->spa_mode = FREAD;
1965 
1966 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1967 
1968 	spa->spa_load_state = state;
1969 
1970 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
1971 		return (EINVAL);
1972 
1973 	parse = (type == SPA_IMPORT_EXISTING ?
1974 	    VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
1975 
1976 	/*
1977 	 * Create "The Godfather" zio to hold all async IOs
1978 	 */
1979 	spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
1980 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
1981 
1982 	/*
1983 	 * Parse the configuration into a vdev tree.  We explicitly set the
1984 	 * value that will be returned by spa_version() since parsing the
1985 	 * configuration requires knowing the version number.
1986 	 */
1987 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1988 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse);
1989 	spa_config_exit(spa, SCL_ALL, FTAG);
1990 
1991 	if (error != 0)
1992 		return (error);
1993 
1994 	ASSERT(spa->spa_root_vdev == rvd);
1995 
1996 	if (type != SPA_IMPORT_ASSEMBLE) {
1997 		ASSERT(spa_guid(spa) == pool_guid);
1998 	}
1999 
2000 	/*
2001 	 * Try to open all vdevs, loading each label in the process.
2002 	 */
2003 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2004 	error = vdev_open(rvd);
2005 	spa_config_exit(spa, SCL_ALL, FTAG);
2006 	if (error != 0)
2007 		return (error);
2008 
2009 	/*
2010 	 * We need to validate the vdev labels against the configuration that
2011 	 * we have in hand, which is dependent on the setting of mosconfig. If
2012 	 * mosconfig is true then we're validating the vdev labels based on
2013 	 * that config.  Otherwise, we're validating against the cached config
2014 	 * (zpool.cache) that was read when we loaded the zfs module, and then
2015 	 * later we will recursively call spa_load() and validate against
2016 	 * the vdev config.
2017 	 *
2018 	 * If we're assembling a new pool that's been split off from an
2019 	 * existing pool, the labels haven't yet been updated so we skip
2020 	 * validation for now.
2021 	 */
2022 	if (type != SPA_IMPORT_ASSEMBLE) {
2023 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2024 		error = vdev_validate(rvd, mosconfig);
2025 		spa_config_exit(spa, SCL_ALL, FTAG);
2026 
2027 		if (error != 0)
2028 			return (error);
2029 
2030 		if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2031 			return (ENXIO);
2032 	}
2033 
2034 	/*
2035 	 * Find the best uberblock.
2036 	 */
2037 	vdev_uberblock_load(rvd, ub, &label);
2038 
2039 	/*
2040 	 * If we weren't able to find a single valid uberblock, return failure.
2041 	 */
2042 	if (ub->ub_txg == 0) {
2043 		nvlist_free(label);
2044 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
2045 	}
2046 
2047 	/*
2048 	 * If the pool has an unsupported version we can't open it.
2049 	 */
2050 	if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
2051 		nvlist_free(label);
2052 		return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
2053 	}
2054 
2055 	if (ub->ub_version >= SPA_VERSION_FEATURES) {
2056 		nvlist_t *features;
2057 
2058 		/*
2059 		 * If we weren't able to find what's necessary for reading the
2060 		 * MOS in the label, return failure.
2061 		 */
2062 		if (label == NULL || nvlist_lookup_nvlist(label,
2063 		    ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
2064 			nvlist_free(label);
2065 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2066 			    ENXIO));
2067 		}
2068 
2069 		/*
2070 		 * Update our in-core representation with the definitive values
2071 		 * from the label.
2072 		 */
2073 		nvlist_free(spa->spa_label_features);
2074 		VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
2075 	}
2076 
2077 	nvlist_free(label);
2078 
2079 	/*
2080 	 * Look through entries in the label nvlist's features_for_read. If
2081 	 * there is a feature listed there which we don't understand then we
2082 	 * cannot open a pool.
2083 	 */
2084 	if (ub->ub_version >= SPA_VERSION_FEATURES) {
2085 		nvlist_t *unsup_feat;
2086 
2087 		VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
2088 		    0);
2089 
2090 		for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
2091 		    NULL); nvp != NULL;
2092 		    nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
2093 			if (!zfeature_is_supported(nvpair_name(nvp))) {
2094 				VERIFY(nvlist_add_string(unsup_feat,
2095 				    nvpair_name(nvp), "") == 0);
2096 			}
2097 		}
2098 
2099 		if (!nvlist_empty(unsup_feat)) {
2100 			VERIFY(nvlist_add_nvlist(spa->spa_load_info,
2101 			    ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
2102 			nvlist_free(unsup_feat);
2103 			return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2104 			    ENOTSUP));
2105 		}
2106 
2107 		nvlist_free(unsup_feat);
2108 	}
2109 
2110 	/*
2111 	 * If the vdev guid sum doesn't match the uberblock, we have an
2112 	 * incomplete configuration.  We first check to see if the pool
2113 	 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
2114 	 * If it is, defer the vdev_guid_sum check till later so we
2115 	 * can handle missing vdevs.
2116 	 */
2117 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
2118 	    &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&
2119 	    rvd->vdev_guid_sum != ub->ub_guid_sum)
2120 		return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
2121 
2122 	if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
2123 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2124 		spa_try_repair(spa, config);
2125 		spa_config_exit(spa, SCL_ALL, FTAG);
2126 		nvlist_free(spa->spa_config_splitting);
2127 		spa->spa_config_splitting = NULL;
2128 	}
2129 
2130 	/*
2131 	 * Initialize internal SPA structures.
2132 	 */
2133 	spa->spa_state = POOL_STATE_ACTIVE;
2134 	spa->spa_ubsync = spa->spa_uberblock;
2135 	spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2136 	    TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2137 	spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2138 	    spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2139 	spa->spa_claim_max_txg = spa->spa_first_txg;
2140 	spa->spa_prev_software_version = ub->ub_software_version;
2141 
2142 	error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
2143 	if (error)
2144 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2145 	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2146 
2147 	if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
2148 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2149 
2150 	if (spa_version(spa) >= SPA_VERSION_FEATURES) {
2151 		boolean_t missing_feat_read = B_FALSE;
2152 		nvlist_t *unsup_feat, *enabled_feat;
2153 
2154 		if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
2155 		    &spa->spa_feat_for_read_obj) != 0) {
2156 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2157 		}
2158 
2159 		if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
2160 		    &spa->spa_feat_for_write_obj) != 0) {
2161 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2162 		}
2163 
2164 		if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
2165 		    &spa->spa_feat_desc_obj) != 0) {
2166 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2167 		}
2168 
2169 		enabled_feat = fnvlist_alloc();
2170 		unsup_feat = fnvlist_alloc();
2171 
2172 		if (!feature_is_supported(spa->spa_meta_objset,
2173 		    spa->spa_feat_for_read_obj, spa->spa_feat_desc_obj,
2174 		    unsup_feat, enabled_feat))
2175 			missing_feat_read = B_TRUE;
2176 
2177 		if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
2178 			if (!feature_is_supported(spa->spa_meta_objset,
2179 			    spa->spa_feat_for_write_obj, spa->spa_feat_desc_obj,
2180 			    unsup_feat, enabled_feat)) {
2181 				missing_feat_write = B_TRUE;
2182 			}
2183 		}
2184 
2185 		fnvlist_add_nvlist(spa->spa_load_info,
2186 		    ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
2187 
2188 		if (!nvlist_empty(unsup_feat)) {
2189 			fnvlist_add_nvlist(spa->spa_load_info,
2190 			    ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
2191 		}
2192 
2193 		fnvlist_free(enabled_feat);
2194 		fnvlist_free(unsup_feat);
2195 
2196 		if (!missing_feat_read) {
2197 			fnvlist_add_boolean(spa->spa_load_info,
2198 			    ZPOOL_CONFIG_CAN_RDONLY);
2199 		}
2200 
2201 		/*
2202 		 * If the state is SPA_LOAD_TRYIMPORT, our objective is
2203 		 * twofold: to determine whether the pool is available for
2204 		 * import in read-write mode and (if it is not) whether the
2205 		 * pool is available for import in read-only mode. If the pool
2206 		 * is available for import in read-write mode, it is displayed
2207 		 * as available in userland; if it is not available for import
2208 		 * in read-only mode, it is displayed as unavailable in
2209 		 * userland. If the pool is available for import in read-only
2210 		 * mode but not read-write mode, it is displayed as unavailable
2211 		 * in userland with a special note that the pool is actually
2212 		 * available for open in read-only mode.
2213 		 *
2214 		 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
2215 		 * missing a feature for write, we must first determine whether
2216 		 * the pool can be opened read-only before returning to
2217 		 * userland in order to know whether to display the
2218 		 * abovementioned note.
2219 		 */
2220 		if (missing_feat_read || (missing_feat_write &&
2221 		    spa_writeable(spa))) {
2222 			return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2223 			    ENOTSUP));
2224 		}
2225 	}
2226 
2227 	spa->spa_is_initializing = B_TRUE;
2228 	error = dsl_pool_open(spa->spa_dsl_pool);
2229 	spa->spa_is_initializing = B_FALSE;
2230 	if (error != 0)
2231 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2232 
2233 	if (!mosconfig) {
2234 		uint64_t hostid;
2235 		nvlist_t *policy = NULL, *nvconfig;
2236 
2237 		if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2238 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2239 
2240 		if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
2241 		    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
2242 			char *hostname;
2243 			unsigned long myhostid = 0;
2244 
2245 			VERIFY(nvlist_lookup_string(nvconfig,
2246 			    ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
2247 
2248 #ifdef	_KERNEL
2249 			myhostid = zone_get_hostid(NULL);
2250 #else	/* _KERNEL */
2251 			/*
2252 			 * We're emulating the system's hostid in userland, so
2253 			 * we can't use zone_get_hostid().
2254 			 */
2255 			(void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
2256 #endif	/* _KERNEL */
2257 			if (hostid != 0 && myhostid != 0 &&
2258 			    hostid != myhostid) {
2259 				nvlist_free(nvconfig);
2260 				cmn_err(CE_WARN, "pool '%s' could not be "
2261 				    "loaded as it was last accessed by "
2262 				    "another system (host: %s hostid: 0x%lx). "
2263 				    "See: http://illumos.org/msg/ZFS-8000-EY",
2264 				    spa_name(spa), hostname,
2265 				    (unsigned long)hostid);
2266 				return (EBADF);
2267 			}
2268 		}
2269 		if (nvlist_lookup_nvlist(spa->spa_config,
2270 		    ZPOOL_REWIND_POLICY, &policy) == 0)
2271 			VERIFY(nvlist_add_nvlist(nvconfig,
2272 			    ZPOOL_REWIND_POLICY, policy) == 0);
2273 
2274 		spa_config_set(spa, nvconfig);
2275 		spa_unload(spa);
2276 		spa_deactivate(spa);
2277 		spa_activate(spa, orig_mode);
2278 
2279 		return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
2280 	}
2281 
2282 	if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
2283 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2284 	error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
2285 	if (error != 0)
2286 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2287 
2288 	/*
2289 	 * Load the bit that tells us to use the new accounting function
2290 	 * (raid-z deflation).  If we have an older pool, this will not
2291 	 * be present.
2292 	 */
2293 	error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
2294 	if (error != 0 && error != ENOENT)
2295 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2296 
2297 	error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
2298 	    &spa->spa_creation_version);
2299 	if (error != 0 && error != ENOENT)
2300 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2301 
2302 	/*
2303 	 * Load the persistent error log.  If we have an older pool, this will
2304 	 * not be present.
2305 	 */
2306 	error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
2307 	if (error != 0 && error != ENOENT)
2308 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2309 
2310 	error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
2311 	    &spa->spa_errlog_scrub);
2312 	if (error != 0 && error != ENOENT)
2313 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2314 
2315 	/*
2316 	 * Load the history object.  If we have an older pool, this
2317 	 * will not be present.
2318 	 */
2319 	error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
2320 	if (error != 0 && error != ENOENT)
2321 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2322 
2323 	/*
2324 	 * If we're assembling the pool from the split-off vdevs of
2325 	 * an existing pool, we don't want to attach the spares & cache
2326 	 * devices.
2327 	 */
2328 
2329 	/*
2330 	 * Load any hot spares for this pool.
2331 	 */
2332 	error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
2333 	if (error != 0 && error != ENOENT)
2334 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2335 	if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2336 		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
2337 		if (load_nvlist(spa, spa->spa_spares.sav_object,
2338 		    &spa->spa_spares.sav_config) != 0)
2339 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2340 
2341 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2342 		spa_load_spares(spa);
2343 		spa_config_exit(spa, SCL_ALL, FTAG);
2344 	} else if (error == 0) {
2345 		spa->spa_spares.sav_sync = B_TRUE;
2346 	}
2347 
2348 	/*
2349 	 * Load any level 2 ARC devices for this pool.
2350 	 */
2351 	error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
2352 	    &spa->spa_l2cache.sav_object);
2353 	if (error != 0 && error != ENOENT)
2354 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2355 	if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
2356 		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
2357 		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
2358 		    &spa->spa_l2cache.sav_config) != 0)
2359 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2360 
2361 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2362 		spa_load_l2cache(spa);
2363 		spa_config_exit(spa, SCL_ALL, FTAG);
2364 	} else if (error == 0) {
2365 		spa->spa_l2cache.sav_sync = B_TRUE;
2366 	}
2367 
2368 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2369 
2370 	error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
2371 	if (error && error != ENOENT)
2372 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2373 
2374 	if (error == 0) {
2375 		uint64_t autoreplace;
2376 
2377 		spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
2378 		spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
2379 		spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
2380 		spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
2381 		spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
2382 		spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
2383 		    &spa->spa_dedup_ditto);
2384 
2385 		spa->spa_autoreplace = (autoreplace != 0);
2386 	}
2387 
2388 	/*
2389 	 * If the 'autoreplace' property is set, then post a resource notifying
2390 	 * the ZFS DE that it should not issue any faults for unopenable
2391 	 * devices.  We also iterate over the vdevs, and post a sysevent for any
2392 	 * unopenable vdevs so that the normal autoreplace handler can take
2393 	 * over.
2394 	 */
2395 	if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
2396 		spa_check_removed(spa->spa_root_vdev);
2397 		/*
2398 		 * For the import case, this is done in spa_import(), because
2399 		 * at this point we're using the spare definitions from
2400 		 * the MOS config, not necessarily from the userland config.
2401 		 */
2402 		if (state != SPA_LOAD_IMPORT) {
2403 			spa_aux_check_removed(&spa->spa_spares);
2404 			spa_aux_check_removed(&spa->spa_l2cache);
2405 		}
2406 	}
2407 
2408 	/*
2409 	 * Load the vdev state for all toplevel vdevs.
2410 	 */
2411 	vdev_load(rvd);
2412 
2413 	/*
2414 	 * Propagate the leaf DTLs we just loaded all the way up the tree.
2415 	 */
2416 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2417 	vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
2418 	spa_config_exit(spa, SCL_ALL, FTAG);
2419 
2420 	/*
2421 	 * Load the DDTs (dedup tables).
2422 	 */
2423 	error = ddt_load(spa);
2424 	if (error != 0)
2425 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2426 
2427 	spa_update_dspace(spa);
2428 
2429 	/*
2430 	 * Validate the config, using the MOS config to fill in any
2431 	 * information which might be missing.  If we fail to validate
2432 	 * the config then declare the pool unfit for use. If we're
2433 	 * assembling a pool from a split, the log is not transferred
2434 	 * over.
2435 	 */
2436 	if (type != SPA_IMPORT_ASSEMBLE) {
2437 		nvlist_t *nvconfig;
2438 
2439 		if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
2440 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2441 
2442 		if (!spa_config_valid(spa, nvconfig)) {
2443 			nvlist_free(nvconfig);
2444 			return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2445 			    ENXIO));
2446 		}
2447 		nvlist_free(nvconfig);
2448 
2449 		/*
2450 		 * Now that we've validated the config, check the state of the
2451 		 * root vdev.  If it can't be opened, it indicates one or
2452 		 * more toplevel vdevs are faulted.
2453 		 */
2454 		if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
2455 			return (ENXIO);
2456 
2457 		if (spa_check_logs(spa)) {
2458 			*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
2459 			return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
2460 		}
2461 	}
2462 
2463 	if (missing_feat_write) {
2464 		ASSERT(state == SPA_LOAD_TRYIMPORT);
2465 
2466 		/*
2467 		 * At this point, we know that we can open the pool in
2468 		 * read-only mode but not read-write mode. We now have enough
2469 		 * information and can return to userland.
2470 		 */
2471 		return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP));
2472 	}
2473 
2474 	/*
2475 	 * We've successfully opened the pool, verify that we're ready
2476 	 * to start pushing transactions.
2477 	 */
2478 	if (state != SPA_LOAD_TRYIMPORT) {
2479 		if (error = spa_load_verify(spa))
2480 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2481 			    error));
2482 	}
2483 
2484 	if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2485 	    spa->spa_load_max_txg == UINT64_MAX)) {
2486 		dmu_tx_t *tx;
2487 		int need_update = B_FALSE;
2488 
2489 		ASSERT(state != SPA_LOAD_TRYIMPORT);
2490 
2491 		/*
2492 		 * Claim log blocks that haven't been committed yet.
2493 		 * This must all happen in a single txg.
2494 		 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2495 		 * invoked from zil_claim_log_block()'s i/o done callback.
2496 		 * Price of rollback is that we abandon the log.
2497 		 */
2498 		spa->spa_claiming = B_TRUE;
2499 
2500 		tx = dmu_tx_create_assigned(spa_get_dsl(spa),
2501 		    spa_first_txg(spa));
2502 		(void) dmu_objset_find(spa_name(spa),
2503 		    zil_claim, tx, DS_FIND_CHILDREN);
2504 		dmu_tx_commit(tx);
2505 
2506 		spa->spa_claiming = B_FALSE;
2507 
2508 		spa_set_log_state(spa, SPA_LOG_GOOD);
2509 		spa->spa_sync_on = B_TRUE;
2510 		txg_sync_start(spa->spa_dsl_pool);
2511 
2512 		/*
2513 		 * Wait for all claims to sync.  We sync up to the highest
2514 		 * claimed log block birth time so that claimed log blocks
2515 		 * don't appear to be from the future.  spa_claim_max_txg
2516 		 * will have been set for us by either zil_check_log_chain()
2517 		 * (invoked from spa_check_logs()) or zil_claim() above.
2518 		 */
2519 		txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
2520 
2521 		/*
2522 		 * If the config cache is stale, or we have uninitialized
2523 		 * metaslabs (see spa_vdev_add()), then update the config.
2524 		 *
2525 		 * If this is a verbatim import, trust the current
2526 		 * in-core spa_config and update the disk labels.
2527 		 */
2528 		if (config_cache_txg != spa->spa_config_txg ||
2529 		    state == SPA_LOAD_IMPORT ||
2530 		    state == SPA_LOAD_RECOVER ||
2531 		    (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
2532 			need_update = B_TRUE;
2533 
2534 		for (int c = 0; c < rvd->vdev_children; c++)
2535 			if (rvd->vdev_child[c]->vdev_ms_array == 0)
2536 				need_update = B_TRUE;
2537 
2538 		/*
2539 		 * Update the config cache asychronously in case we're the
2540 		 * root pool, in which case the config cache isn't writable yet.
2541 		 */
2542 		if (need_update)
2543 			spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
2544 
2545 		/*
2546 		 * Check all DTLs to see if anything needs resilvering.
2547 		 */
2548 		if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
2549 		    vdev_resilver_needed(rvd, NULL, NULL))
2550 			spa_async_request(spa, SPA_ASYNC_RESILVER);
2551 
2552 		/*
2553 		 * Log the fact that we booted up (so that we can detect if
2554 		 * we rebooted in the middle of an operation).
2555 		 */
2556 		spa_history_log_version(spa, "open");
2557 
2558 		/*
2559 		 * Delete any inconsistent datasets.
2560 		 */
2561 		(void) dmu_objset_find(spa_name(spa),
2562 		    dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
2563 
2564 		/*
2565 		 * Clean up any stale temporary dataset userrefs.
2566 		 */
2567 		dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
2568 	}
2569 
2570 	return (0);
2571 }
2572 
2573 static int
2574 spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
2575 {
2576 	int mode = spa->spa_mode;
2577 
2578 	spa_unload(spa);
2579 	spa_deactivate(spa);
2580 
2581 	spa->spa_load_max_txg--;
2582 
2583 	spa_activate(spa, mode);
2584 	spa_async_suspend(spa);
2585 
2586 	return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
2587 }
2588 
2589 /*
2590  * If spa_load() fails this function will try loading prior txg's. If
2591  * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
2592  * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
2593  * function will not rewind the pool and will return the same error as
2594  * spa_load().
2595  */
2596 static int
2597 spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
2598     uint64_t max_request, int rewind_flags)
2599 {
2600 	nvlist_t *loadinfo = NULL;
2601 	nvlist_t *config = NULL;
2602 	int load_error, rewind_error;
2603 	uint64_t safe_rewind_txg;
2604 	uint64_t min_txg;
2605 
2606 	if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
2607 		spa->spa_load_max_txg = spa->spa_load_txg;
2608 		spa_set_log_state(spa, SPA_LOG_CLEAR);
2609 	} else {
2610 		spa->spa_load_max_txg = max_request;
2611 	}
2612 
2613 	load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
2614 	    mosconfig);
2615 	if (load_error == 0)
2616 		return (0);
2617 
2618 	if (spa->spa_root_vdev != NULL)
2619 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2620 
2621 	spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
2622 	spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
2623 
2624 	if (rewind_flags & ZPOOL_NEVER_REWIND) {
2625 		nvlist_free(config);
2626 		return (load_error);
2627 	}
2628 
2629 	if (state == SPA_LOAD_RECOVER) {
2630 		/* Price of rolling back is discarding txgs, including log */
2631 		spa_set_log_state(spa, SPA_LOG_CLEAR);
2632 	} else {
2633 		/*
2634 		 * If we aren't rolling back save the load info from our first
2635 		 * import attempt so that we can restore it after attempting
2636 		 * to rewind.
2637 		 */
2638 		loadinfo = spa->spa_load_info;
2639 		spa->spa_load_info = fnvlist_alloc();
2640 	}
2641 
2642 	spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
2643 	safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
2644 	min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
2645 	    TXG_INITIAL : safe_rewind_txg;
2646 
2647 	/*
2648 	 * Continue as long as we're finding errors, we're still within
2649 	 * the acceptable rewind range, and we're still finding uberblocks
2650 	 */
2651 	while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
2652 	    spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
2653 		if (spa->spa_load_max_txg < safe_rewind_txg)
2654 			spa->spa_extreme_rewind = B_TRUE;
2655 		rewind_error = spa_load_retry(spa, state, mosconfig);
2656 	}
2657 
2658 	spa->spa_extreme_rewind = B_FALSE;
2659 	spa->spa_load_max_txg = UINT64_MAX;
2660 
2661 	if (config && (rewind_error || state != SPA_LOAD_RECOVER))
2662 		spa_config_set(spa, config);
2663 
2664 	if (state == SPA_LOAD_RECOVER) {
2665 		ASSERT3P(loadinfo, ==, NULL);
2666 		return (rewind_error);
2667 	} else {
2668 		/* Store the rewind info as part of the initial load info */
2669 		fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
2670 		    spa->spa_load_info);
2671 
2672 		/* Restore the initial load info */
2673 		fnvlist_free(spa->spa_load_info);
2674 		spa->spa_load_info = loadinfo;
2675 
2676 		return (load_error);
2677 	}
2678 }
2679 
2680 /*
2681  * Pool Open/Import
2682  *
2683  * The import case is identical to an open except that the configuration is sent
2684  * down from userland, instead of grabbed from the configuration cache.  For the
2685  * case of an open, the pool configuration will exist in the
2686  * POOL_STATE_UNINITIALIZED state.
2687  *
2688  * The stats information (gen/count/ustats) is used to gather vdev statistics at
2689  * the same time open the pool, without having to keep around the spa_t in some
2690  * ambiguous state.
2691  */
2692 static int
2693 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
2694     nvlist_t **config)
2695 {
2696 	spa_t *spa;
2697 	spa_load_state_t state = SPA_LOAD_OPEN;
2698 	int error;
2699 	int locked = B_FALSE;
2700 
2701 	*spapp = NULL;
2702 
2703 	/*
2704 	 * As disgusting as this is, we need to support recursive calls to this
2705 	 * function because dsl_dir_open() is called during spa_load(), and ends
2706 	 * up calling spa_open() again.  The real fix is to figure out how to
2707 	 * avoid dsl_dir_open() calling this in the first place.
2708 	 */
2709 	if (mutex_owner(&spa_namespace_lock) != curthread) {
2710 		mutex_enter(&spa_namespace_lock);
2711 		locked = B_TRUE;
2712 	}
2713 
2714 	if ((spa = spa_lookup(pool)) == NULL) {
2715 		if (locked)
2716 			mutex_exit(&spa_namespace_lock);
2717 		return (ENOENT);
2718 	}
2719 
2720 	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
2721 		zpool_rewind_policy_t policy;
2722 
2723 		zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
2724 		    &policy);
2725 		if (policy.zrp_request & ZPOOL_DO_REWIND)
2726 			state = SPA_LOAD_RECOVER;
2727 
2728 		spa_activate(spa, spa_mode_global);
2729 
2730 		if (state != SPA_LOAD_RECOVER)
2731 			spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
2732 
2733 		error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
2734 		    policy.zrp_request);
2735 
2736 		if (error == EBADF) {
2737 			/*
2738 			 * If vdev_validate() returns failure (indicated by
2739 			 * EBADF), it indicates that one of the vdevs indicates
2740 			 * that the pool has been exported or destroyed.  If
2741 			 * this is the case, the config cache is out of sync and
2742 			 * we should remove the pool from the namespace.
2743 			 */
2744 			spa_unload(spa);
2745 			spa_deactivate(spa);
2746 			spa_config_sync(spa, B_TRUE, B_TRUE);
2747 			spa_remove(spa);
2748 			if (locked)
2749 				mutex_exit(&spa_namespace_lock);
2750 			return (ENOENT);
2751 		}
2752 
2753 		if (error) {
2754 			/*
2755 			 * We can't open the pool, but we still have useful
2756 			 * information: the state of each vdev after the
2757 			 * attempted vdev_open().  Return this to the user.
2758 			 */
2759 			if (config != NULL && spa->spa_config) {
2760 				VERIFY(nvlist_dup(spa->spa_config, config,
2761 				    KM_SLEEP) == 0);
2762 				VERIFY(nvlist_add_nvlist(*config,
2763 				    ZPOOL_CONFIG_LOAD_INFO,
2764 				    spa->spa_load_info) == 0);
2765 			}
2766 			spa_unload(spa);
2767 			spa_deactivate(spa);
2768 			spa->spa_last_open_failed = error;
2769 			if (locked)
2770 				mutex_exit(&spa_namespace_lock);
2771 			*spapp = NULL;
2772 			return (error);
2773 		}
2774 	}
2775 
2776 	spa_open_ref(spa, tag);
2777 
2778 	if (config != NULL)
2779 		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2780 
2781 	/*
2782 	 * If we've recovered the pool, pass back any information we
2783 	 * gathered while doing the load.
2784 	 */
2785 	if (state == SPA_LOAD_RECOVER) {
2786 		VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
2787 		    spa->spa_load_info) == 0);
2788 	}
2789 
2790 	if (locked) {
2791 		spa->spa_last_open_failed = 0;
2792 		spa->spa_last_ubsync_txg = 0;
2793 		spa->spa_load_txg = 0;
2794 		mutex_exit(&spa_namespace_lock);
2795 	}
2796 
2797 	*spapp = spa;
2798 
2799 	return (0);
2800 }
2801 
2802 int
2803 spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
2804     nvlist_t **config)
2805 {
2806 	return (spa_open_common(name, spapp, tag, policy, config));
2807 }
2808 
2809 int
2810 spa_open(const char *name, spa_t **spapp, void *tag)
2811 {
2812 	return (spa_open_common(name, spapp, tag, NULL, NULL));
2813 }
2814 
2815 /*
2816  * Lookup the given spa_t, incrementing the inject count in the process,
2817  * preventing it from being exported or destroyed.
2818  */
2819 spa_t *
2820 spa_inject_addref(char *name)
2821 {
2822 	spa_t *spa;
2823 
2824 	mutex_enter(&spa_namespace_lock);
2825 	if ((spa = spa_lookup(name)) == NULL) {
2826 		mutex_exit(&spa_namespace_lock);
2827 		return (NULL);
2828 	}
2829 	spa->spa_inject_ref++;
2830 	mutex_exit(&spa_namespace_lock);
2831 
2832 	return (spa);
2833 }
2834 
2835 void
2836 spa_inject_delref(spa_t *spa)
2837 {
2838 	mutex_enter(&spa_namespace_lock);
2839 	spa->spa_inject_ref--;
2840 	mutex_exit(&spa_namespace_lock);
2841 }
2842 
2843 /*
2844  * Add spares device information to the nvlist.
2845  */
2846 static void
2847 spa_add_spares(spa_t *spa, nvlist_t *config)
2848 {
2849 	nvlist_t **spares;
2850 	uint_t i, nspares;
2851 	nvlist_t *nvroot;
2852 	uint64_t guid;
2853 	vdev_stat_t *vs;
2854 	uint_t vsc;
2855 	uint64_t pool;
2856 
2857 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
2858 
2859 	if (spa->spa_spares.sav_count == 0)
2860 		return;
2861 
2862 	VERIFY(nvlist_lookup_nvlist(config,
2863 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2864 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
2865 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
2866 	if (nspares != 0) {
2867 		VERIFY(nvlist_add_nvlist_array(nvroot,
2868 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2869 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
2870 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
2871 
2872 		/*
2873 		 * Go through and find any spares which have since been
2874 		 * repurposed as an active spare.  If this is the case, update
2875 		 * their status appropriately.
2876 		 */
2877 		for (i = 0; i < nspares; i++) {
2878 			VERIFY(nvlist_lookup_uint64(spares[i],
2879 			    ZPOOL_CONFIG_GUID, &guid) == 0);
2880 			if (spa_spare_exists(guid, &pool, NULL) &&
2881 			    pool != 0ULL) {
2882 				VERIFY(nvlist_lookup_uint64_array(
2883 				    spares[i], ZPOOL_CONFIG_VDEV_STATS,
2884 				    (uint64_t **)&vs, &vsc) == 0);
2885 				vs->vs_state = VDEV_STATE_CANT_OPEN;
2886 				vs->vs_aux = VDEV_AUX_SPARED;
2887 			}
2888 		}
2889 	}
2890 }
2891 
2892 /*
2893  * Add l2cache device information to the nvlist, including vdev stats.
2894  */
2895 static void
2896 spa_add_l2cache(spa_t *spa, nvlist_t *config)
2897 {
2898 	nvlist_t **l2cache;
2899 	uint_t i, j, nl2cache;
2900 	nvlist_t *nvroot;
2901 	uint64_t guid;
2902 	vdev_t *vd;
2903 	vdev_stat_t *vs;
2904 	uint_t vsc;
2905 
2906 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
2907 
2908 	if (spa->spa_l2cache.sav_count == 0)
2909 		return;
2910 
2911 	VERIFY(nvlist_lookup_nvlist(config,
2912 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2913 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
2914 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
2915 	if (nl2cache != 0) {
2916 		VERIFY(nvlist_add_nvlist_array(nvroot,
2917 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2918 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
2919 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
2920 
2921 		/*
2922 		 * Update level 2 cache device stats.
2923 		 */
2924 
2925 		for (i = 0; i < nl2cache; i++) {
2926 			VERIFY(nvlist_lookup_uint64(l2cache[i],
2927 			    ZPOOL_CONFIG_GUID, &guid) == 0);
2928 
2929 			vd = NULL;
2930 			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
2931 				if (guid ==
2932 				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
2933 					vd = spa->spa_l2cache.sav_vdevs[j];
2934 					break;
2935 				}
2936 			}
2937 			ASSERT(vd != NULL);
2938 
2939 			VERIFY(nvlist_lookup_uint64_array(l2cache[i],
2940 			    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
2941 			    == 0);
2942 			vdev_get_stats(vd, vs);
2943 		}
2944 	}
2945 }
2946 
2947 static void
2948 spa_add_feature_stats(spa_t *spa, nvlist_t *config)
2949 {
2950 	nvlist_t *features;
2951 	zap_cursor_t zc;
2952 	zap_attribute_t za;
2953 
2954 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
2955 	VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2956 
2957 	if (spa->spa_feat_for_read_obj != 0) {
2958 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
2959 		    spa->spa_feat_for_read_obj);
2960 		    zap_cursor_retrieve(&zc, &za) == 0;
2961 		    zap_cursor_advance(&zc)) {
2962 			ASSERT(za.za_integer_length == sizeof (uint64_t) &&
2963 			    za.za_num_integers == 1);
2964 			VERIFY0(nvlist_add_uint64(features, za.za_name,
2965 			    za.za_first_integer));
2966 		}
2967 		zap_cursor_fini(&zc);
2968 	}
2969 
2970 	if (spa->spa_feat_for_write_obj != 0) {
2971 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
2972 		    spa->spa_feat_for_write_obj);
2973 		    zap_cursor_retrieve(&zc, &za) == 0;
2974 		    zap_cursor_advance(&zc)) {
2975 			ASSERT(za.za_integer_length == sizeof (uint64_t) &&
2976 			    za.za_num_integers == 1);
2977 			VERIFY0(nvlist_add_uint64(features, za.za_name,
2978 			    za.za_first_integer));
2979 		}
2980 		zap_cursor_fini(&zc);
2981 	}
2982 
2983 	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
2984 	    features) == 0);
2985 	nvlist_free(features);
2986 }
2987 
2988 int
2989 spa_get_stats(const char *name, nvlist_t **config,
2990     char *altroot, size_t buflen)
2991 {
2992 	int error;
2993 	spa_t *spa;
2994 
2995 	*config = NULL;
2996 	error = spa_open_common(name, &spa, FTAG, NULL, config);
2997 
2998 	if (spa != NULL) {
2999 		/*
3000 		 * This still leaves a window of inconsistency where the spares
3001 		 * or l2cache devices could change and the config would be
3002 		 * self-inconsistent.
3003 		 */
3004 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3005 
3006 		if (*config != NULL) {
3007 			uint64_t loadtimes[2];
3008 
3009 			loadtimes[0] = spa->spa_loaded_ts.tv_sec;
3010 			loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
3011 			VERIFY(nvlist_add_uint64_array(*config,
3012 			    ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
3013 
3014 			VERIFY(nvlist_add_uint64(*config,
3015 			    ZPOOL_CONFIG_ERRCOUNT,
3016 			    spa_get_errlog_size(spa)) == 0);
3017 
3018 			if (spa_suspended(spa))
3019 				VERIFY(nvlist_add_uint64(*config,
3020 				    ZPOOL_CONFIG_SUSPENDED,
3021 				    spa->spa_failmode) == 0);
3022 
3023 			spa_add_spares(spa, *config);
3024 			spa_add_l2cache(spa, *config);
3025 			spa_add_feature_stats(spa, *config);
3026 		}
3027 	}
3028 
3029 	/*
3030 	 * We want to get the alternate root even for faulted pools, so we cheat
3031 	 * and call spa_lookup() directly.
3032 	 */
3033 	if (altroot) {
3034 		if (spa == NULL) {
3035 			mutex_enter(&spa_namespace_lock);
3036 			spa = spa_lookup(name);
3037 			if (spa)
3038 				spa_altroot(spa, altroot, buflen);
3039 			else
3040 				altroot[0] = '\0';
3041 			spa = NULL;
3042 			mutex_exit(&spa_namespace_lock);
3043 		} else {
3044 			spa_altroot(spa, altroot, buflen);
3045 		}
3046 	}
3047 
3048 	if (spa != NULL) {
3049 		spa_config_exit(spa, SCL_CONFIG, FTAG);
3050 		spa_close(spa, FTAG);
3051 	}
3052 
3053 	return (error);
3054 }
3055 
3056 /*
3057  * Validate that the auxiliary device array is well formed.  We must have an
3058  * array of nvlists, each which describes a valid leaf vdev.  If this is an
3059  * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
3060  * specified, as long as they are well-formed.
3061  */
3062 static int
3063 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
3064     spa_aux_vdev_t *sav, const char *config, uint64_t version,
3065     vdev_labeltype_t label)
3066 {
3067 	nvlist_t **dev;
3068 	uint_t i, ndev;
3069 	vdev_t *vd;
3070 	int error;
3071 
3072 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3073 
3074 	/*
3075 	 * It's acceptable to have no devs specified.
3076 	 */
3077 	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
3078 		return (0);
3079 
3080 	if (ndev == 0)
3081 		return (EINVAL);
3082 
3083 	/*
3084 	 * Make sure the pool is formatted with a version that supports this
3085 	 * device type.
3086 	 */
3087 	if (spa_version(spa) < version)
3088 		return (ENOTSUP);
3089 
3090 	/*
3091 	 * Set the pending device list so we correctly handle device in-use
3092 	 * checking.
3093 	 */
3094 	sav->sav_pending = dev;
3095 	sav->sav_npending = ndev;
3096 
3097 	for (i = 0; i < ndev; i++) {
3098 		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
3099 		    mode)) != 0)
3100 			goto out;
3101 
3102 		if (!vd->vdev_ops->vdev_op_leaf) {
3103 			vdev_free(vd);
3104 			error = EINVAL;
3105 			goto out;
3106 		}
3107 
3108 		/*
3109 		 * The L2ARC currently only supports disk devices in
3110 		 * kernel context.  For user-level testing, we allow it.
3111 		 */
3112 #ifdef _KERNEL
3113 		if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
3114 		    strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
3115 			error = ENOTBLK;
3116 			vdev_free(vd);
3117 			goto out;
3118 		}
3119 #endif
3120 		vd->vdev_top = vd;
3121 
3122 		if ((error = vdev_open(vd)) == 0 &&
3123 		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
3124 			VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
3125 			    vd->vdev_guid) == 0);
3126 		}
3127 
3128 		vdev_free(vd);
3129 
3130 		if (error &&
3131 		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
3132 			goto out;
3133 		else
3134 			error = 0;
3135 	}
3136 
3137 out:
3138 	sav->sav_pending = NULL;
3139 	sav->sav_npending = 0;
3140 	return (error);
3141 }
3142 
3143 static int
3144 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
3145 {
3146 	int error;
3147 
3148 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
3149 
3150 	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3151 	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
3152 	    VDEV_LABEL_SPARE)) != 0) {
3153 		return (error);
3154 	}
3155 
3156 	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
3157 	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
3158 	    VDEV_LABEL_L2CACHE));
3159 }
3160 
3161 static void
3162 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
3163     const char *config)
3164 {
3165 	int i;
3166 
3167 	if (sav->sav_config != NULL) {
3168 		nvlist_t **olddevs;
3169 		uint_t oldndevs;
3170 		nvlist_t **newdevs;
3171 
3172 		/*
3173 		 * Generate new dev list by concatentating with the
3174 		 * current dev list.
3175 		 */
3176 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
3177 		    &olddevs, &oldndevs) == 0);
3178 
3179 		newdevs = kmem_alloc(sizeof (void *) *
3180 		    (ndevs + oldndevs), KM_SLEEP);
3181 		for (i = 0; i < oldndevs; i++)
3182 			VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
3183 			    KM_SLEEP) == 0);
3184 		for (i = 0; i < ndevs; i++)
3185 			VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
3186 			    KM_SLEEP) == 0);
3187 
3188 		VERIFY(nvlist_remove(sav->sav_config, config,
3189 		    DATA_TYPE_NVLIST_ARRAY) == 0);
3190 
3191 		VERIFY(nvlist_add_nvlist_array(sav->sav_config,
3192 		    config, newdevs, ndevs + oldndevs) == 0);
3193 		for (i = 0; i < oldndevs + ndevs; i++)
3194 			nvlist_free(newdevs[i]);
3195 		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
3196 	} else {
3197 		/*
3198 		 * Generate a new dev list.
3199 		 */
3200 		VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
3201 		    KM_SLEEP) == 0);
3202 		VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
3203 		    devs, ndevs) == 0);
3204 	}
3205 }
3206 
3207 /*
3208  * Stop and drop level 2 ARC devices
3209  */
3210 void
3211 spa_l2cache_drop(spa_t *spa)
3212 {
3213 	vdev_t *vd;
3214 	int i;
3215 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
3216 
3217 	for (i = 0; i < sav->sav_count; i++) {
3218 		uint64_t pool;
3219 
3220 		vd = sav->sav_vdevs[i];
3221 		ASSERT(vd != NULL);
3222 
3223 		if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
3224 		    pool != 0ULL && l2arc_vdev_present(vd))
3225 			l2arc_remove_vdev(vd);
3226 	}
3227 }
3228 
3229 /*
3230  * Pool Creation
3231  */
3232 int
3233 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
3234     nvlist_t *zplprops)
3235 {
3236 	spa_t *spa;
3237 	char *altroot = NULL;
3238 	vdev_t *rvd;
3239 	dsl_pool_t *dp;
3240 	dmu_tx_t *tx;
3241 	int error = 0;
3242 	uint64_t txg = TXG_INITIAL;
3243 	nvlist_t **spares, **l2cache;
3244 	uint_t nspares, nl2cache;
3245 	uint64_t version, obj;
3246 	boolean_t has_features;
3247 
3248 	/*
3249 	 * If this pool already exists, return failure.
3250 	 */
3251 	mutex_enter(&spa_namespace_lock);
3252 	if (spa_lookup(pool) != NULL) {
3253 		mutex_exit(&spa_namespace_lock);
3254 		return (EEXIST);
3255 	}
3256 
3257 	/*
3258 	 * Allocate a new spa_t structure.
3259 	 */
3260 	(void) nvlist_lookup_string(props,
3261 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
3262 	spa = spa_add(pool, NULL, altroot);
3263 	spa_activate(spa, spa_mode_global);
3264 
3265 	if (props && (error = spa_prop_validate(spa, props))) {
3266 		spa_deactivate(spa);
3267 		spa_remove(spa);
3268 		mutex_exit(&spa_namespace_lock);
3269 		return (error);
3270 	}
3271 
3272 	has_features = B_FALSE;
3273 	for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
3274 	    elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
3275 		if (zpool_prop_feature(nvpair_name(elem)))
3276 			has_features = B_TRUE;
3277 	}
3278 
3279 	if (has_features || nvlist_lookup_uint64(props,
3280 	    zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
3281 		version = SPA_VERSION;
3282 	}
3283 	ASSERT(SPA_VERSION_IS_SUPPORTED(version));
3284 
3285 	spa->spa_first_txg = txg;
3286 	spa->spa_uberblock.ub_txg = txg - 1;
3287 	spa->spa_uberblock.ub_version = version;
3288 	spa->spa_ubsync = spa->spa_uberblock;
3289 
3290 	/*
3291 	 * Create "The Godfather" zio to hold all async IOs
3292 	 */
3293 	spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
3294 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
3295 
3296 	/*
3297 	 * Create the root vdev.
3298 	 */
3299 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3300 
3301 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
3302 
3303 	ASSERT(error != 0 || rvd != NULL);
3304 	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
3305 
3306 	if (error == 0 && !zfs_allocatable_devs(nvroot))
3307 		error = EINVAL;
3308 
3309 	if (error == 0 &&
3310 	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
3311 	    (error = spa_validate_aux(spa, nvroot, txg,
3312 	    VDEV_ALLOC_ADD)) == 0) {
3313 		for (int c = 0; c < rvd->vdev_children; c++) {
3314 			vdev_metaslab_set_size(rvd->vdev_child[c]);
3315 			vdev_expand(rvd->vdev_child[c], txg);
3316 		}
3317 	}
3318 
3319 	spa_config_exit(spa, SCL_ALL, FTAG);
3320 
3321 	if (error != 0) {
3322 		spa_unload(spa);
3323 		spa_deactivate(spa);
3324 		spa_remove(spa);
3325 		mutex_exit(&spa_namespace_lock);
3326 		return (error);
3327 	}
3328 
3329 	/*
3330 	 * Get the list of spares, if specified.
3331 	 */
3332 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3333 	    &spares, &nspares) == 0) {
3334 		VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
3335 		    KM_SLEEP) == 0);
3336 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3337 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3338 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3339 		spa_load_spares(spa);
3340 		spa_config_exit(spa, SCL_ALL, FTAG);
3341 		spa->spa_spares.sav_sync = B_TRUE;
3342 	}
3343 
3344 	/*
3345 	 * Get the list of level 2 cache devices, if specified.
3346 	 */
3347 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3348 	    &l2cache, &nl2cache) == 0) {
3349 		VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
3350 		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
3351 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3352 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3353 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3354 		spa_load_l2cache(spa);
3355 		spa_config_exit(spa, SCL_ALL, FTAG);
3356 		spa->spa_l2cache.sav_sync = B_TRUE;
3357 	}
3358 
3359 	spa->spa_is_initializing = B_TRUE;
3360 	spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
3361 	spa->spa_meta_objset = dp->dp_meta_objset;
3362 	spa->spa_is_initializing = B_FALSE;
3363 
3364 	/*
3365 	 * Create DDTs (dedup tables).
3366 	 */
3367 	ddt_create(spa);
3368 
3369 	spa_update_dspace(spa);
3370 
3371 	tx = dmu_tx_create_assigned(dp, txg);
3372 
3373 	/*
3374 	 * Create the pool config object.
3375 	 */
3376 	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
3377 	    DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
3378 	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
3379 
3380 	if (zap_add(spa->spa_meta_objset,
3381 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
3382 	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
3383 		cmn_err(CE_PANIC, "failed to add pool config");
3384 	}
3385 
3386 	if (spa_version(spa) >= SPA_VERSION_FEATURES)
3387 		spa_feature_create_zap_objects(spa, tx);
3388 
3389 	if (zap_add(spa->spa_meta_objset,
3390 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
3391 	    sizeof (uint64_t), 1, &version, tx) != 0) {
3392 		cmn_err(CE_PANIC, "failed to add pool version");
3393 	}
3394 
3395 	/* Newly created pools with the right version are always deflated. */
3396 	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
3397 		spa->spa_deflate = TRUE;
3398 		if (zap_add(spa->spa_meta_objset,
3399 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3400 		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
3401 			cmn_err(CE_PANIC, "failed to add deflate");
3402 		}
3403 	}
3404 
3405 	/*
3406 	 * Create the deferred-free bpobj.  Turn off compression
3407 	 * because sync-to-convergence takes longer if the blocksize
3408 	 * keeps changing.
3409 	 */
3410 	obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
3411 	dmu_object_set_compress(spa->spa_meta_objset, obj,
3412 	    ZIO_COMPRESS_OFF, tx);
3413 	if (zap_add(spa->spa_meta_objset,
3414 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
3415 	    sizeof (uint64_t), 1, &obj, tx) != 0) {
3416 		cmn_err(CE_PANIC, "failed to add bpobj");
3417 	}
3418 	VERIFY0(bpobj_open(&spa->spa_deferred_bpobj,
3419 	    spa->spa_meta_objset, obj));
3420 
3421 	/*
3422 	 * Create the pool's history object.
3423 	 */
3424 	if (version >= SPA_VERSION_ZPOOL_HISTORY)
3425 		spa_history_create_obj(spa, tx);
3426 
3427 	/*
3428 	 * Set pool properties.
3429 	 */
3430 	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
3431 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3432 	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
3433 	spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
3434 
3435 	if (props != NULL) {
3436 		spa_configfile_set(spa, props, B_FALSE);
3437 		spa_sync_props(spa, props, tx);
3438 	}
3439 
3440 	dmu_tx_commit(tx);
3441 
3442 	spa->spa_sync_on = B_TRUE;
3443 	txg_sync_start(spa->spa_dsl_pool);
3444 
3445 	/*
3446 	 * We explicitly wait for the first transaction to complete so that our
3447 	 * bean counters are appropriately updated.
3448 	 */
3449 	txg_wait_synced(spa->spa_dsl_pool, txg);
3450 
3451 	spa_config_sync(spa, B_FALSE, B_TRUE);
3452 
3453 	spa_history_log_version(spa, "create");
3454 
3455 	spa->spa_minref = refcount_count(&spa->spa_refcount);
3456 
3457 	mutex_exit(&spa_namespace_lock);
3458 
3459 	return (0);
3460 }
3461 
3462 #ifdef _KERNEL
3463 /*
3464  * Get the root pool information from the root disk, then import the root pool
3465  * during the system boot up time.
3466  */
3467 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
3468 
3469 static nvlist_t *
3470 spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
3471 {
3472 	nvlist_t *config;
3473 	nvlist_t *nvtop, *nvroot;
3474 	uint64_t pgid;
3475 
3476 	if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
3477 		return (NULL);
3478 
3479 	/*
3480 	 * Add this top-level vdev to the child array.
3481 	 */
3482 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3483 	    &nvtop) == 0);
3484 	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3485 	    &pgid) == 0);
3486 	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
3487 
3488 	/*
3489 	 * Put this pool's top-level vdevs into a root vdev.
3490 	 */
3491 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3492 	VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
3493 	    VDEV_TYPE_ROOT) == 0);
3494 	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
3495 	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
3496 	VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3497 	    &nvtop, 1) == 0);
3498 
3499 	/*
3500 	 * Replace the existing vdev_tree with the new root vdev in
3501 	 * this pool's configuration (remove the old, add the new).
3502 	 */
3503 	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
3504 	nvlist_free(nvroot);
3505 	return (config);
3506 }
3507 
3508 /*
3509  * Walk the vdev tree and see if we can find a device with "better"
3510  * configuration. A configuration is "better" if the label on that
3511  * device has a more recent txg.
3512  */
3513 static void
3514 spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
3515 {
3516 	for (int c = 0; c < vd->vdev_children; c++)
3517 		spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
3518 
3519 	if (vd->vdev_ops->vdev_op_leaf) {
3520 		nvlist_t *label;
3521 		uint64_t label_txg;
3522 
3523 		if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
3524 		    &label) != 0)
3525 			return;
3526 
3527 		VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
3528 		    &label_txg) == 0);
3529 
3530 		/*
3531 		 * Do we have a better boot device?
3532 		 */
3533 		if (label_txg > *txg) {
3534 			*txg = label_txg;
3535 			*avd = vd;
3536 		}
3537 		nvlist_free(label);
3538 	}
3539 }
3540 
3541 /*
3542  * Import a root pool.
3543  *
3544  * For x86. devpath_list will consist of devid and/or physpath name of
3545  * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
3546  * The GRUB "findroot" command will return the vdev we should boot.
3547  *
3548  * For Sparc, devpath_list consists the physpath name of the booting device
3549  * no matter the rootpool is a single device pool or a mirrored pool.
3550  * e.g.
3551  *	"/pci@1f,0/ide@d/disk@0,0:a"
3552  */
3553 int
3554 spa_import_rootpool(char *devpath, char *devid)
3555 {
3556 	spa_t *spa;
3557 	vdev_t *rvd, *bvd, *avd = NULL;
3558 	nvlist_t *config, *nvtop;
3559 	uint64_t guid, txg;
3560 	char *pname;
3561 	int error;
3562 
3563 	/*
3564 	 * Read the label from the boot device and generate a configuration.
3565 	 */
3566 	config = spa_generate_rootconf(devpath, devid, &guid);
3567 #if defined(_OBP) && defined(_KERNEL)
3568 	if (config == NULL) {
3569 		if (strstr(devpath, "/iscsi/ssd") != NULL) {
3570 			/* iscsi boot */
3571 			get_iscsi_bootpath_phy(devpath);
3572 			config = spa_generate_rootconf(devpath, devid, &guid);
3573 		}
3574 	}
3575 #endif
3576 	if (config == NULL) {
3577 		cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
3578 		    devpath);
3579 		return (EIO);
3580 	}
3581 
3582 	VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3583 	    &pname) == 0);
3584 	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
3585 
3586 	mutex_enter(&spa_namespace_lock);
3587 	if ((spa = spa_lookup(pname)) != NULL) {
3588 		/*
3589 		 * Remove the existing root pool from the namespace so that we
3590 		 * can replace it with the correct config we just read in.
3591 		 */
3592 		spa_remove(spa);
3593 	}
3594 
3595 	spa = spa_add(pname, config, NULL);
3596 	spa->spa_is_root = B_TRUE;
3597 	spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
3598 
3599 	/*
3600 	 * Build up a vdev tree based on the boot device's label config.
3601 	 */
3602 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3603 	    &nvtop) == 0);
3604 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3605 	error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
3606 	    VDEV_ALLOC_ROOTPOOL);
3607 	spa_config_exit(spa, SCL_ALL, FTAG);
3608 	if (error) {
3609 		mutex_exit(&spa_namespace_lock);
3610 		nvlist_free(config);
3611 		cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
3612 		    pname);
3613 		return (error);
3614 	}
3615 
3616 	/*
3617 	 * Get the boot vdev.
3618 	 */
3619 	if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
3620 		cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
3621 		    (u_longlong_t)guid);
3622 		error = ENOENT;
3623 		goto out;
3624 	}
3625 
3626 	/*
3627 	 * Determine if there is a better boot device.
3628 	 */
3629 	avd = bvd;
3630 	spa_alt_rootvdev(rvd, &avd, &txg);
3631 	if (avd != bvd) {
3632 		cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
3633 		    "try booting from '%s'", avd->vdev_path);
3634 		error = EINVAL;
3635 		goto out;
3636 	}
3637 
3638 	/*
3639 	 * If the boot device is part of a spare vdev then ensure that
3640 	 * we're booting off the active spare.
3641 	 */
3642 	if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3643 	    !bvd->vdev_isspare) {
3644 		cmn_err(CE_NOTE, "The boot device is currently spared. Please "
3645 		    "try booting from '%s'",
3646 		    bvd->vdev_parent->
3647 		    vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
3648 		error = EINVAL;
3649 		goto out;
3650 	}
3651 
3652 	error = 0;
3653 out:
3654 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3655 	vdev_free(rvd);
3656 	spa_config_exit(spa, SCL_ALL, FTAG);
3657 	mutex_exit(&spa_namespace_lock);
3658 
3659 	nvlist_free(config);
3660 	return (error);
3661 }
3662 
3663 #endif
3664 
3665 /*
3666  * Import a non-root pool into the system.
3667  */
3668 int
3669 spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
3670 {
3671 	spa_t *spa;
3672 	char *altroot = NULL;
3673 	spa_load_state_t state = SPA_LOAD_IMPORT;
3674 	zpool_rewind_policy_t policy;
3675 	uint64_t mode = spa_mode_global;
3676 	uint64_t readonly = B_FALSE;
3677 	int error;
3678 	nvlist_t *nvroot;
3679 	nvlist_t **spares, **l2cache;
3680 	uint_t nspares, nl2cache;
3681 
3682 	/*
3683 	 * If a pool with this name exists, return failure.
3684 	 */
3685 	mutex_enter(&spa_namespace_lock);
3686 	if (spa_lookup(pool) != NULL) {
3687 		mutex_exit(&spa_namespace_lock);
3688 		return (EEXIST);
3689 	}
3690 
3691 	/*
3692 	 * Create and initialize the spa structure.
3693 	 */
3694 	(void) nvlist_lookup_string(props,
3695 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
3696 	(void) nvlist_lookup_uint64(props,
3697 	    zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3698 	if (readonly)
3699 		mode = FREAD;
3700 	spa = spa_add(pool, config, altroot);
3701 	spa->spa_import_flags = flags;
3702 
3703 	/*
3704 	 * Verbatim import - Take a pool and insert it into the namespace
3705 	 * as if it had been loaded at boot.
3706 	 */
3707 	if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
3708 		if (props != NULL)
3709 			spa_configfile_set(spa, props, B_FALSE);
3710 
3711 		spa_config_sync(spa, B_FALSE, B_TRUE);
3712 
3713 		mutex_exit(&spa_namespace_lock);
3714 		spa_history_log_version(spa, "import");
3715 
3716 		return (0);
3717 	}
3718 
3719 	spa_activate(spa, mode);
3720 
3721 	/*
3722 	 * Don't start async tasks until we know everything is healthy.
3723 	 */
3724 	spa_async_suspend(spa);
3725 
3726 	zpool_get_rewind_policy(config, &policy);
3727 	if (policy.zrp_request & ZPOOL_DO_REWIND)
3728 		state = SPA_LOAD_RECOVER;
3729 
3730 	/*
3731 	 * Pass off the heavy lifting to spa_load().  Pass TRUE for mosconfig
3732 	 * because the user-supplied config is actually the one to trust when
3733 	 * doing an import.
3734 	 */
3735 	if (state != SPA_LOAD_RECOVER)
3736 		spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
3737 
3738 	error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
3739 	    policy.zrp_request);
3740 
3741 	/*
3742 	 * Propagate anything learned while loading the pool and pass it
3743 	 * back to caller (i.e. rewind info, missing devices, etc).
3744 	 */
3745 	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
3746 	    spa->spa_load_info) == 0);
3747 
3748 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3749 	/*
3750 	 * Toss any existing sparelist, as it doesn't have any validity
3751 	 * anymore, and conflicts with spa_has_spare().
3752 	 */
3753 	if (spa->spa_spares.sav_config) {
3754 		nvlist_free(spa->spa_spares.sav_config);
3755 		spa->spa_spares.sav_config = NULL;
3756 		spa_load_spares(spa);
3757 	}
3758 	if (spa->spa_l2cache.sav_config) {
3759 		nvlist_free(spa->spa_l2cache.sav_config);
3760 		spa->spa_l2cache.sav_config = NULL;
3761 		spa_load_l2cache(spa);
3762 	}
3763 
3764 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3765 	    &nvroot) == 0);
3766 	if (error == 0)
3767 		error = spa_validate_aux(spa, nvroot, -1ULL,
3768 		    VDEV_ALLOC_SPARE);
3769 	if (error == 0)
3770 		error = spa_validate_aux(spa, nvroot, -1ULL,
3771 		    VDEV_ALLOC_L2CACHE);
3772 	spa_config_exit(spa, SCL_ALL, FTAG);
3773 
3774 	if (props != NULL)
3775 		spa_configfile_set(spa, props, B_FALSE);
3776 
3777 	if (error != 0 || (props && spa_writeable(spa) &&
3778 	    (error = spa_prop_set(spa, props)))) {
3779 		spa_unload(spa);
3780 		spa_deactivate(spa);
3781 		spa_remove(spa);
3782 		mutex_exit(&spa_namespace_lock);
3783 		return (error);
3784 	}
3785 
3786 	spa_async_resume(spa);
3787 
3788 	/*
3789 	 * Override any spares and level 2 cache devices as specified by
3790 	 * the user, as these may have correct device names/devids, etc.
3791 	 */
3792 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
3793 	    &spares, &nspares) == 0) {
3794 		if (spa->spa_spares.sav_config)
3795 			VERIFY(nvlist_remove(spa->spa_spares.sav_config,
3796 			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
3797 		else
3798 			VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
3799 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
3800 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
3801 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
3802 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3803 		spa_load_spares(spa);
3804 		spa_config_exit(spa, SCL_ALL, FTAG);
3805 		spa->spa_spares.sav_sync = B_TRUE;
3806 	}
3807 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
3808 	    &l2cache, &nl2cache) == 0) {
3809 		if (spa->spa_l2cache.sav_config)
3810 			VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
3811 			    ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
3812 		else
3813 			VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
3814 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
3815 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
3816 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
3817 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3818 		spa_load_l2cache(spa);
3819 		spa_config_exit(spa, SCL_ALL, FTAG);
3820 		spa->spa_l2cache.sav_sync = B_TRUE;
3821 	}
3822 
3823 	/*
3824 	 * Check for any removed devices.
3825 	 */
3826 	if (spa->spa_autoreplace) {
3827 		spa_aux_check_removed(&spa->spa_spares);
3828 		spa_aux_check_removed(&spa->spa_l2cache);
3829 	}
3830 
3831 	if (spa_writeable(spa)) {
3832 		/*
3833 		 * Update the config cache to include the newly-imported pool.
3834 		 */
3835 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
3836 	}
3837 
3838 	/*
3839 	 * It's possible that the pool was expanded while it was exported.
3840 	 * We kick off an async task to handle this for us.
3841 	 */
3842 	spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
3843 
3844 	mutex_exit(&spa_namespace_lock);
3845 	spa_history_log_version(spa, "import");
3846 
3847 	return (0);
3848 }
3849 
3850 nvlist_t *
3851 spa_tryimport(nvlist_t *tryconfig)
3852 {
3853 	nvlist_t *config = NULL;
3854 	char *poolname;
3855 	spa_t *spa;
3856 	uint64_t state;
3857 	int error;
3858 
3859 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
3860 		return (NULL);
3861 
3862 	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
3863 		return (NULL);
3864 
3865 	/*
3866 	 * Create and initialize the spa structure.
3867 	 */
3868 	mutex_enter(&spa_namespace_lock);
3869 	spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
3870 	spa_activate(spa, FREAD);
3871 
3872 	/*
3873 	 * Pass off the heavy lifting to spa_load().
3874 	 * Pass TRUE for mosconfig because the user-supplied config
3875 	 * is actually the one to trust when doing an import.
3876 	 */
3877 	error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE);
3878 
3879 	/*
3880 	 * If 'tryconfig' was at least parsable, return the current config.
3881 	 */
3882 	if (spa->spa_root_vdev != NULL) {
3883 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
3884 		VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
3885 		    poolname) == 0);
3886 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3887 		    state) == 0);
3888 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
3889 		    spa->spa_uberblock.ub_timestamp) == 0);
3890 		VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
3891 		    spa->spa_load_info) == 0);
3892 
3893 		/*
3894 		 * If the bootfs property exists on this pool then we
3895 		 * copy it out so that external consumers can tell which
3896 		 * pools are bootable.
3897 		 */
3898 		if ((!error || error == EEXIST) && spa->spa_bootfs) {
3899 			char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3900 
3901 			/*
3902 			 * We have to play games with the name since the
3903 			 * pool was opened as TRYIMPORT_NAME.
3904 			 */
3905 			if (dsl_dsobj_to_dsname(spa_name(spa),
3906 			    spa->spa_bootfs, tmpname) == 0) {
3907 				char *cp;
3908 				char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3909 
3910 				cp = strchr(tmpname, '/');
3911 				if (cp == NULL) {
3912 					(void) strlcpy(dsname, tmpname,
3913 					    MAXPATHLEN);
3914 				} else {
3915 					(void) snprintf(dsname, MAXPATHLEN,
3916 					    "%s/%s", poolname, ++cp);
3917 				}
3918 				VERIFY(nvlist_add_string(config,
3919 				    ZPOOL_CONFIG_BOOTFS, dsname) == 0);
3920 				kmem_free(dsname, MAXPATHLEN);
3921 			}
3922 			kmem_free(tmpname, MAXPATHLEN);
3923 		}
3924 
3925 		/*
3926 		 * Add the list of hot spares and level 2 cache devices.
3927 		 */
3928 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3929 		spa_add_spares(spa, config);
3930 		spa_add_l2cache(spa, config);
3931 		spa_config_exit(spa, SCL_CONFIG, FTAG);
3932 	}
3933 
3934 	spa_unload(spa);
3935 	spa_deactivate(spa);
3936 	spa_remove(spa);
3937 	mutex_exit(&spa_namespace_lock);
3938 
3939 	return (config);
3940 }
3941 
3942 /*
3943  * Pool export/destroy
3944  *
3945  * The act of destroying or exporting a pool is very simple.  We make sure there
3946  * is no more pending I/O and any references to the pool are gone.  Then, we
3947  * update the pool state and sync all the labels to disk, removing the
3948  * configuration from the cache afterwards. If the 'hardforce' flag is set, then
3949  * we don't sync the labels or remove the configuration cache.
3950  */
3951 static int
3952 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
3953     boolean_t force, boolean_t hardforce)
3954 {
3955 	spa_t *spa;
3956 
3957 	if (oldconfig)
3958 		*oldconfig = NULL;
3959 
3960 	if (!(spa_mode_global & FWRITE))
3961 		return (EROFS);
3962 
3963 	mutex_enter(&spa_namespace_lock);
3964 	if ((spa = spa_lookup(pool)) == NULL) {
3965 		mutex_exit(&spa_namespace_lock);
3966 		return (ENOENT);
3967 	}
3968 
3969 	/*
3970 	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
3971 	 * reacquire the namespace lock, and see if we can export.
3972 	 */
3973 	spa_open_ref(spa, FTAG);
3974 	mutex_exit(&spa_namespace_lock);
3975 	spa_async_suspend(spa);
3976 	mutex_enter(&spa_namespace_lock);
3977 	spa_close(spa, FTAG);
3978 
3979 	/*
3980 	 * The pool will be in core if it's openable,
3981 	 * in which case we can modify its state.
3982 	 */
3983 	if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
3984 		/*
3985 		 * Objsets may be open only because they're dirty, so we
3986 		 * have to force it to sync before checking spa_refcnt.
3987 		 */
3988 		txg_wait_synced(spa->spa_dsl_pool, 0);
3989 
3990 		/*
3991 		 * A pool cannot be exported or destroyed if there are active
3992 		 * references.  If we are resetting a pool, allow references by
3993 		 * fault injection handlers.
3994 		 */
3995 		if (!spa_refcount_zero(spa) ||
3996 		    (spa->spa_inject_ref != 0 &&
3997 		    new_state != POOL_STATE_UNINITIALIZED)) {
3998 			spa_async_resume(spa);
3999 			mutex_exit(&spa_namespace_lock);
4000 			return (EBUSY);
4001 		}
4002 
4003 		/*
4004 		 * A pool cannot be exported if it has an active shared spare.
4005 		 * This is to prevent other pools stealing the active spare
4006 		 * from an exported pool. At user's own will, such pool can
4007 		 * be forcedly exported.
4008 		 */
4009 		if (!force && new_state == POOL_STATE_EXPORTED &&
4010 		    spa_has_active_shared_spare(spa)) {
4011 			spa_async_resume(spa);
4012 			mutex_exit(&spa_namespace_lock);
4013 			return (EXDEV);
4014 		}
4015 
4016 		/*
4017 		 * We want this to be reflected on every label,
4018 		 * so mark them all dirty.  spa_unload() will do the
4019 		 * final sync that pushes these changes out.
4020 		 */
4021 		if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
4022 			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4023 			spa->spa_state = new_state;
4024 			spa->spa_final_txg = spa_last_synced_txg(spa) +
4025 			    TXG_DEFER_SIZE + 1;
4026 			vdev_config_dirty(spa->spa_root_vdev);
4027 			spa_config_exit(spa, SCL_ALL, FTAG);
4028 		}
4029 	}
4030 
4031 	spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
4032 
4033 	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4034 		spa_unload(spa);
4035 		spa_deactivate(spa);
4036 	}
4037 
4038 	if (oldconfig && spa->spa_config)
4039 		VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
4040 
4041 	if (new_state != POOL_STATE_UNINITIALIZED) {
4042 		if (!hardforce)
4043 			spa_config_sync(spa, B_TRUE, B_TRUE);
4044 		spa_remove(spa);
4045 	}
4046 	mutex_exit(&spa_namespace_lock);
4047 
4048 	return (0);
4049 }
4050 
4051 /*
4052  * Destroy a storage pool.
4053  */
4054 int
4055 spa_destroy(char *pool)
4056 {
4057 	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
4058 	    B_FALSE, B_FALSE));
4059 }
4060 
4061 /*
4062  * Export a storage pool.
4063  */
4064 int
4065 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
4066     boolean_t hardforce)
4067 {
4068 	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
4069 	    force, hardforce));
4070 }
4071 
4072 /*
4073  * Similar to spa_export(), this unloads the spa_t without actually removing it
4074  * from the namespace in any way.
4075  */
4076 int
4077 spa_reset(char *pool)
4078 {
4079 	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
4080 	    B_FALSE, B_FALSE));
4081 }
4082 
4083 /*
4084  * ==========================================================================
4085  * Device manipulation
4086  * ==========================================================================
4087  */
4088 
4089 /*
4090  * Add a device to a storage pool.
4091  */
4092 int
4093 spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
4094 {
4095 	uint64_t txg, id;
4096 	int error;
4097 	vdev_t *rvd = spa->spa_root_vdev;
4098 	vdev_t *vd, *tvd;
4099 	nvlist_t **spares, **l2cache;
4100 	uint_t nspares, nl2cache;
4101 
4102 	ASSERT(spa_writeable(spa));
4103 
4104 	txg = spa_vdev_enter(spa);
4105 
4106 	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
4107 	    VDEV_ALLOC_ADD)) != 0)
4108 		return (spa_vdev_exit(spa, NULL, txg, error));
4109 
4110 	spa->spa_pending_vdev = vd;	/* spa_vdev_exit() will clear this */
4111 
4112 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
4113 	    &nspares) != 0)
4114 		nspares = 0;
4115 
4116 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
4117 	    &nl2cache) != 0)
4118 		nl2cache = 0;
4119 
4120 	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
4121 		return (spa_vdev_exit(spa, vd, txg, EINVAL));
4122 
4123 	if (vd->vdev_children != 0 &&
4124 	    (error = vdev_create(vd, txg, B_FALSE)) != 0)
4125 		return (spa_vdev_exit(spa, vd, txg, error));
4126 
4127 	/*
4128 	 * We must validate the spares and l2cache devices after checking the
4129 	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
4130 	 */
4131 	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
4132 		return (spa_vdev_exit(spa, vd, txg, error));
4133 
4134 	/*
4135 	 * Transfer each new top-level vdev from vd to rvd.
4136 	 */
4137 	for (int c = 0; c < vd->vdev_children; c++) {
4138 
4139 		/*
4140 		 * Set the vdev id to the first hole, if one exists.
4141 		 */
4142 		for (id = 0; id < rvd->vdev_children; id++) {
4143 			if (rvd->vdev_child[id]->vdev_ishole) {
4144 				vdev_free(rvd->vdev_child[id]);
4145 				break;
4146 			}
4147 		}
4148 		tvd = vd->vdev_child[c];
4149 		vdev_remove_child(vd, tvd);
4150 		tvd->vdev_id = id;
4151 		vdev_add_child(rvd, tvd);
4152 		vdev_config_dirty(tvd);
4153 	}
4154 
4155 	if (nspares != 0) {
4156 		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
4157 		    ZPOOL_CONFIG_SPARES);
4158 		spa_load_spares(spa);
4159 		spa->spa_spares.sav_sync = B_TRUE;
4160 	}
4161 
4162 	if (nl2cache != 0) {
4163 		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
4164 		    ZPOOL_CONFIG_L2CACHE);
4165 		spa_load_l2cache(spa);
4166 		spa->spa_l2cache.sav_sync = B_TRUE;
4167 	}
4168 
4169 	/*
4170 	 * We have to be careful when adding new vdevs to an existing pool.
4171 	 * If other threads start allocating from these vdevs before we
4172 	 * sync the config cache, and we lose power, then upon reboot we may
4173 	 * fail to open the pool because there are DVAs that the config cache
4174 	 * can't translate.  Therefore, we first add the vdevs without
4175 	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
4176 	 * and then let spa_config_update() initialize the new metaslabs.
4177 	 *
4178 	 * spa_load() checks for added-but-not-initialized vdevs, so that
4179 	 * if we lose power at any point in this sequence, the remaining
4180 	 * steps will be completed the next time we load the pool.
4181 	 */
4182 	(void) spa_vdev_exit(spa, vd, txg, 0);
4183 
4184 	mutex_enter(&spa_namespace_lock);
4185 	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
4186 	mutex_exit(&spa_namespace_lock);
4187 
4188 	return (0);
4189 }
4190 
4191 /*
4192  * Attach a device to a mirror.  The arguments are the path to any device
4193  * in the mirror, and the nvroot for the new device.  If the path specifies
4194  * a device that is not mirrored, we automatically insert the mirror vdev.
4195  *
4196  * If 'replacing' is specified, the new device is intended to replace the
4197  * existing device; in this case the two devices are made into their own
4198  * mirror using the 'replacing' vdev, which is functionally identical to
4199  * the mirror vdev (it actually reuses all the same ops) but has a few
4200  * extra rules: you can't attach to it after it's been created, and upon
4201  * completion of resilvering, the first disk (the one being replaced)
4202  * is automatically detached.
4203  */
4204 int
4205 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
4206 {
4207 	uint64_t txg, dtl_max_txg;
4208 	vdev_t *rvd = spa->spa_root_vdev;
4209 	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
4210 	vdev_ops_t *pvops;
4211 	char *oldvdpath, *newvdpath;
4212 	int newvd_isspare;
4213 	int error;
4214 
4215 	ASSERT(spa_writeable(spa));
4216 
4217 	txg = spa_vdev_enter(spa);
4218 
4219 	oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
4220 
4221 	if (oldvd == NULL)
4222 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4223 
4224 	if (!oldvd->vdev_ops->vdev_op_leaf)
4225 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4226 
4227 	pvd = oldvd->vdev_parent;
4228 
4229 	if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
4230 	    VDEV_ALLOC_ATTACH)) != 0)
4231 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4232 
4233 	if (newrootvd->vdev_children != 1)
4234 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4235 
4236 	newvd = newrootvd->vdev_child[0];
4237 
4238 	if (!newvd->vdev_ops->vdev_op_leaf)
4239 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
4240 
4241 	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
4242 		return (spa_vdev_exit(spa, newrootvd, txg, error));
4243 
4244 	/*
4245 	 * Spares can't replace logs
4246 	 */
4247 	if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
4248 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4249 
4250 	if (!replacing) {
4251 		/*
4252 		 * For attach, the only allowable parent is a mirror or the root
4253 		 * vdev.
4254 		 */
4255 		if (pvd->vdev_ops != &vdev_mirror_ops &&
4256 		    pvd->vdev_ops != &vdev_root_ops)
4257 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4258 
4259 		pvops = &vdev_mirror_ops;
4260 	} else {
4261 		/*
4262 		 * Active hot spares can only be replaced by inactive hot
4263 		 * spares.
4264 		 */
4265 		if (pvd->vdev_ops == &vdev_spare_ops &&
4266 		    oldvd->vdev_isspare &&
4267 		    !spa_has_spare(spa, newvd->vdev_guid))
4268 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4269 
4270 		/*
4271 		 * If the source is a hot spare, and the parent isn't already a
4272 		 * spare, then we want to create a new hot spare.  Otherwise, we
4273 		 * want to create a replacing vdev.  The user is not allowed to
4274 		 * attach to a spared vdev child unless the 'isspare' state is
4275 		 * the same (spare replaces spare, non-spare replaces
4276 		 * non-spare).
4277 		 */
4278 		if (pvd->vdev_ops == &vdev_replacing_ops &&
4279 		    spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
4280 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4281 		} else if (pvd->vdev_ops == &vdev_spare_ops &&
4282 		    newvd->vdev_isspare != oldvd->vdev_isspare) {
4283 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
4284 		}
4285 
4286 		if (newvd->vdev_isspare)
4287 			pvops = &vdev_spare_ops;
4288 		else
4289 			pvops = &vdev_replacing_ops;
4290 	}
4291 
4292 	/*
4293 	 * Make sure the new device is big enough.
4294 	 */
4295 	if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
4296 		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
4297 
4298 	/*
4299 	 * The new device cannot have a higher alignment requirement
4300 	 * than the top-level vdev.
4301 	 */
4302 	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
4303 		return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
4304 
4305 	/*
4306 	 * If this is an in-place replacement, update oldvd's path and devid
4307 	 * to make it distinguishable from newvd, and unopenable from now on.
4308 	 */
4309 	if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
4310 		spa_strfree(oldvd->vdev_path);
4311 		oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
4312 		    KM_SLEEP);
4313 		(void) sprintf(oldvd->vdev_path, "%s/%s",
4314 		    newvd->vdev_path, "old");
4315 		if (oldvd->vdev_devid != NULL) {
4316 			spa_strfree(oldvd->vdev_devid);
4317 			oldvd->vdev_devid = NULL;
4318 		}
4319 	}
4320 
4321 	/* mark the device being resilvered */
4322 	newvd->vdev_resilvering = B_TRUE;
4323 
4324 	/*
4325 	 * If the parent is not a mirror, or if we're replacing, insert the new
4326 	 * mirror/replacing/spare vdev above oldvd.
4327 	 */
4328 	if (pvd->vdev_ops != pvops)
4329 		pvd = vdev_add_parent(oldvd, pvops);
4330 
4331 	ASSERT(pvd->vdev_top->vdev_parent == rvd);
4332 	ASSERT(pvd->vdev_ops == pvops);
4333 	ASSERT(oldvd->vdev_parent == pvd);
4334 
4335 	/*
4336 	 * Extract the new device from its root and add it to pvd.
4337 	 */
4338 	vdev_remove_child(newrootvd, newvd);
4339 	newvd->vdev_id = pvd->vdev_children;
4340 	newvd->vdev_crtxg = oldvd->vdev_crtxg;
4341 	vdev_add_child(pvd, newvd);
4342 
4343 	tvd = newvd->vdev_top;
4344 	ASSERT(pvd->vdev_top == tvd);
4345 	ASSERT(tvd->vdev_parent == rvd);
4346 
4347 	vdev_config_dirty(tvd);
4348 
4349 	/*
4350 	 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
4351 	 * for any dmu_sync-ed blocks.  It will propagate upward when
4352 	 * spa_vdev_exit() calls vdev_dtl_reassess().
4353 	 */
4354 	dtl_max_txg = txg + TXG_CONCURRENT_STATES;
4355 
4356 	vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
4357 	    dtl_max_txg - TXG_INITIAL);
4358 
4359 	if (newvd->vdev_isspare) {
4360 		spa_spare_activate(newvd);
4361 		spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
4362 	}
4363 
4364 	oldvdpath = spa_strdup(oldvd->vdev_path);
4365 	newvdpath = spa_strdup(newvd->vdev_path);
4366 	newvd_isspare = newvd->vdev_isspare;
4367 
4368 	/*
4369 	 * Mark newvd's DTL dirty in this txg.
4370 	 */
4371 	vdev_dirty(tvd, VDD_DTL, newvd, txg);
4372 
4373 	/*
4374 	 * Restart the resilver
4375 	 */
4376 	dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
4377 
4378 	/*
4379 	 * Commit the config
4380 	 */
4381 	(void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
4382 
4383 	spa_history_log_internal(spa, "vdev attach", NULL,
4384 	    "%s vdev=%s %s vdev=%s",
4385 	    replacing && newvd_isspare ? "spare in" :
4386 	    replacing ? "replace" : "attach", newvdpath,
4387 	    replacing ? "for" : "to", oldvdpath);
4388 
4389 	spa_strfree(oldvdpath);
4390 	spa_strfree(newvdpath);
4391 
4392 	if (spa->spa_bootfs)
4393 		spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH);
4394 
4395 	return (0);
4396 }
4397 
4398 /*
4399  * Detach a device from a mirror or replacing vdev.
4400  * If 'replace_done' is specified, only detach if the parent
4401  * is a replacing vdev.
4402  */
4403 int
4404 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
4405 {
4406 	uint64_t txg;
4407 	int error;
4408 	vdev_t *rvd = spa->spa_root_vdev;
4409 	vdev_t *vd, *pvd, *cvd, *tvd;
4410 	boolean_t unspare = B_FALSE;
4411 	uint64_t unspare_guid;
4412 	char *vdpath;
4413 
4414 	ASSERT(spa_writeable(spa));
4415 
4416 	txg = spa_vdev_enter(spa);
4417 
4418 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
4419 
4420 	if (vd == NULL)
4421 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
4422 
4423 	if (!vd->vdev_ops->vdev_op_leaf)
4424 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4425 
4426 	pvd = vd->vdev_parent;
4427 
4428 	/*
4429 	 * If the parent/child relationship is not as expected, don't do it.
4430 	 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
4431 	 * vdev that's replacing B with C.  The user's intent in replacing
4432 	 * is to go from M(A,B) to M(A,C).  If the user decides to cancel
4433 	 * the replace by detaching C, the expected behavior is to end up
4434 	 * M(A,B).  But suppose that right after deciding to detach C,
4435 	 * the replacement of B completes.  We would have M(A,C), and then
4436 	 * ask to detach C, which would leave us with just A -- not what
4437 	 * the user wanted.  To prevent this, we make sure that the
4438 	 * parent/child relationship hasn't changed -- in this example,
4439 	 * that C's parent is still the replacing vdev R.
4440 	 */
4441 	if (pvd->vdev_guid != pguid && pguid != 0)
4442 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4443 
4444 	/*
4445 	 * Only 'replacing' or 'spare' vdevs can be replaced.
4446 	 */
4447 	if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
4448 	    pvd->vdev_ops != &vdev_spare_ops)
4449 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4450 
4451 	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
4452 	    spa_version(spa) >= SPA_VERSION_SPARES);
4453 
4454 	/*
4455 	 * Only mirror, replacing, and spare vdevs support detach.
4456 	 */
4457 	if (pvd->vdev_ops != &vdev_replacing_ops &&
4458 	    pvd->vdev_ops != &vdev_mirror_ops &&
4459 	    pvd->vdev_ops != &vdev_spare_ops)
4460 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
4461 
4462 	/*
4463 	 * If this device has the only valid copy of some data,
4464 	 * we cannot safely detach it.
4465 	 */
4466 	if (vdev_dtl_required(vd))
4467 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
4468 
4469 	ASSERT(pvd->vdev_children >= 2);
4470 
4471 	/*
4472 	 * If we are detaching the second disk from a replacing vdev, then
4473 	 * check to see if we changed the original vdev's path to have "/old"
4474 	 * at the end in spa_vdev_attach().  If so, undo that change now.
4475 	 */
4476 	if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
4477 	    vd->vdev_path != NULL) {
4478 		size_t len = strlen(vd->vdev_path);
4479 
4480 		for (int c = 0; c < pvd->vdev_children; c++) {
4481 			cvd = pvd->vdev_child[c];
4482 
4483 			if (cvd == vd || cvd->vdev_path == NULL)
4484 				continue;
4485 
4486 			if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
4487 			    strcmp(cvd->vdev_path + len, "/old") == 0) {
4488 				spa_strfree(cvd->vdev_path);
4489 				cvd->vdev_path = spa_strdup(vd->vdev_path);
4490 				break;
4491 			}
4492 		}
4493 	}
4494 
4495 	/*
4496 	 * If we are detaching the original disk from a spare, then it implies
4497 	 * that the spare should become a real disk, and be removed from the
4498 	 * active spare list for the pool.
4499 	 */
4500 	if (pvd->vdev_ops == &vdev_spare_ops &&
4501 	    vd->vdev_id == 0 &&
4502 	    pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
4503 		unspare = B_TRUE;
4504 
4505 	/*
4506 	 * Erase the disk labels so the disk can be used for other things.
4507 	 * This must be done after all other error cases are handled,
4508 	 * but before we disembowel vd (so we can still do I/O to it).
4509 	 * But if we can't do it, don't treat the error as fatal --
4510 	 * it may be that the unwritability of the disk is the reason
4511 	 * it's being detached!
4512 	 */
4513 	error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
4514 
4515 	/*
4516 	 * Remove vd from its parent and compact the parent's children.
4517 	 */
4518 	vdev_remove_child(pvd, vd);
4519 	vdev_compact_children(pvd);
4520 
4521 	/*
4522 	 * Remember one of the remaining children so we can get tvd below.
4523 	 */
4524 	cvd = pvd->vdev_child[pvd->vdev_children - 1];
4525 
4526 	/*
4527 	 * If we need to remove the remaining child from the list of hot spares,
4528 	 * do it now, marking the vdev as no longer a spare in the process.
4529 	 * We must do this before vdev_remove_parent(), because that can
4530 	 * change the GUID if it creates a new toplevel GUID.  For a similar
4531 	 * reason, we must remove the spare now, in the same txg as the detach;
4532 	 * otherwise someone could attach a new sibling, change the GUID, and
4533 	 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
4534 	 */
4535 	if (unspare) {
4536 		ASSERT(cvd->vdev_isspare);
4537 		spa_spare_remove(cvd);
4538 		unspare_guid = cvd->vdev_guid;
4539 		(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
4540 		cvd->vdev_unspare = B_TRUE;
4541 	}
4542 
4543 	/*
4544 	 * If the parent mirror/replacing vdev only has one child,
4545 	 * the parent is no longer needed.  Remove it from the tree.
4546 	 */
4547 	if (pvd->vdev_children == 1) {
4548 		if (pvd->vdev_ops == &vdev_spare_ops)
4549 			cvd->vdev_unspare = B_FALSE;
4550 		vdev_remove_parent(cvd);
4551 		cvd->vdev_resilvering = B_FALSE;
4552 	}
4553 
4554 
4555 	/*
4556 	 * We don't set tvd until now because the parent we just removed
4557 	 * may have been the previous top-level vdev.
4558 	 */
4559 	tvd = cvd->vdev_top;
4560 	ASSERT(tvd->vdev_parent == rvd);
4561 
4562 	/*
4563 	 * Reevaluate the parent vdev state.
4564 	 */
4565 	vdev_propagate_state(cvd);
4566 
4567 	/*
4568 	 * If the 'autoexpand' property is set on the pool then automatically
4569 	 * try to expand the size of the pool. For example if the device we
4570 	 * just detached was smaller than the others, it may be possible to
4571 	 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
4572 	 * first so that we can obtain the updated sizes of the leaf vdevs.
4573 	 */
4574 	if (spa->spa_autoexpand) {
4575 		vdev_reopen(tvd);
4576 		vdev_expand(tvd, txg);
4577 	}
4578 
4579 	vdev_config_dirty(tvd);
4580 
4581 	/*
4582 	 * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
4583 	 * vd->vdev_detached is set and free vd's DTL object in syncing context.
4584 	 * But first make sure we're not on any *other* txg's DTL list, to
4585 	 * prevent vd from being accessed after it's freed.
4586 	 */
4587 	vdpath = spa_strdup(vd->vdev_path);
4588 	for (int t = 0; t < TXG_SIZE; t++)
4589 		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
4590 	vd->vdev_detached = B_TRUE;
4591 	vdev_dirty(tvd, VDD_DTL, vd, txg);
4592 
4593 	spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
4594 
4595 	/* hang on to the spa before we release the lock */
4596 	spa_open_ref(spa, FTAG);
4597 
4598 	error = spa_vdev_exit(spa, vd, txg, 0);
4599 
4600 	spa_history_log_internal(spa, "detach", NULL,
4601 	    "vdev=%s", vdpath);
4602 	spa_strfree(vdpath);
4603 
4604 	/*
4605 	 * If this was the removal of the original device in a hot spare vdev,
4606 	 * then we want to go through and remove the device from the hot spare
4607 	 * list of every other pool.
4608 	 */
4609 	if (unspare) {
4610 		spa_t *altspa = NULL;
4611 
4612 		mutex_enter(&spa_namespace_lock);
4613 		while ((altspa = spa_next(altspa)) != NULL) {
4614 			if (altspa->spa_state != POOL_STATE_ACTIVE ||
4615 			    altspa == spa)
4616 				continue;
4617 
4618 			spa_open_ref(altspa, FTAG);
4619 			mutex_exit(&spa_namespace_lock);
4620 			(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
4621 			mutex_enter(&spa_namespace_lock);
4622 			spa_close(altspa, FTAG);
4623 		}
4624 		mutex_exit(&spa_namespace_lock);
4625 
4626 		/* search the rest of the vdevs for spares to remove */
4627 		spa_vdev_resilver_done(spa);
4628 	}
4629 
4630 	/* all done with the spa; OK to release */
4631 	mutex_enter(&spa_namespace_lock);
4632 	spa_close(spa, FTAG);
4633 	mutex_exit(&spa_namespace_lock);
4634 
4635 	return (error);
4636 }
4637 
4638 /*
4639  * Split a set of devices from their mirrors, and create a new pool from them.
4640  */
4641 int
4642 spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
4643     nvlist_t *props, boolean_t exp)
4644 {
4645 	int error = 0;
4646 	uint64_t txg, *glist;
4647 	spa_t *newspa;
4648 	uint_t c, children, lastlog;
4649 	nvlist_t **child, *nvl, *tmp;
4650 	dmu_tx_t *tx;
4651 	char *altroot = NULL;
4652 	vdev_t *rvd, **vml = NULL;			/* vdev modify list */
4653 	boolean_t activate_slog;
4654 
4655 	ASSERT(spa_writeable(spa));
4656 
4657 	txg = spa_vdev_enter(spa);
4658 
4659 	/* clear the log and flush everything up to now */
4660 	activate_slog = spa_passivate_log(spa);
4661 	(void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4662 	error = spa_offline_log(spa);
4663 	txg = spa_vdev_config_enter(spa);
4664 
4665 	if (activate_slog)
4666 		spa_activate_log(spa);
4667 
4668 	if (error != 0)
4669 		return (spa_vdev_exit(spa, NULL, txg, error));
4670 
4671 	/* check new spa name before going any further */
4672 	if (spa_lookup(newname) != NULL)
4673 		return (spa_vdev_exit(spa, NULL, txg, EEXIST));
4674 
4675 	/*
4676 	 * scan through all the children to ensure they're all mirrors
4677 	 */
4678 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
4679 	    nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
4680 	    &children) != 0)
4681 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4682 
4683 	/* first, check to ensure we've got the right child count */
4684 	rvd = spa->spa_root_vdev;
4685 	lastlog = 0;
4686 	for (c = 0; c < rvd->vdev_children; c++) {
4687 		vdev_t *vd = rvd->vdev_child[c];
4688 
4689 		/* don't count the holes & logs as children */
4690 		if (vd->vdev_islog || vd->vdev_ishole) {
4691 			if (lastlog == 0)
4692 				lastlog = c;
4693 			continue;
4694 		}
4695 
4696 		lastlog = 0;
4697 	}
4698 	if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
4699 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4700 
4701 	/* next, ensure no spare or cache devices are part of the split */
4702 	if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
4703 	    nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
4704 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
4705 
4706 	vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
4707 	glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
4708 
4709 	/* then, loop over each vdev and validate it */
4710 	for (c = 0; c < children; c++) {
4711 		uint64_t is_hole = 0;
4712 
4713 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
4714 		    &is_hole);
4715 
4716 		if (is_hole != 0) {
4717 			if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
4718 			    spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
4719 				continue;
4720 			} else {
4721 				error = EINVAL;
4722 				break;
4723 			}
4724 		}
4725 
4726 		/* which disk is going to be split? */
4727 		if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
4728 		    &glist[c]) != 0) {
4729 			error = EINVAL;
4730 			break;
4731 		}
4732 
4733 		/* look it up in the spa */
4734 		vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
4735 		if (vml[c] == NULL) {
4736 			error = ENODEV;
4737 			break;
4738 		}
4739 
4740 		/* make sure there's nothing stopping the split */
4741 		if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
4742 		    vml[c]->vdev_islog ||
4743 		    vml[c]->vdev_ishole ||
4744 		    vml[c]->vdev_isspare ||
4745 		    vml[c]->vdev_isl2cache ||
4746 		    !vdev_writeable(vml[c]) ||
4747 		    vml[c]->vdev_children != 0 ||
4748 		    vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
4749 		    c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
4750 			error = EINVAL;
4751 			break;
4752 		}
4753 
4754 		if (vdev_dtl_required(vml[c])) {
4755 			error = EBUSY;
4756 			break;
4757 		}
4758 
4759 		/* we need certain info from the top level */
4760 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
4761 		    vml[c]->vdev_top->vdev_ms_array) == 0);
4762 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
4763 		    vml[c]->vdev_top->vdev_ms_shift) == 0);
4764 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
4765 		    vml[c]->vdev_top->vdev_asize) == 0);
4766 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
4767 		    vml[c]->vdev_top->vdev_ashift) == 0);
4768 	}
4769 
4770 	if (error != 0) {
4771 		kmem_free(vml, children * sizeof (vdev_t *));
4772 		kmem_free(glist, children * sizeof (uint64_t));
4773 		return (spa_vdev_exit(spa, NULL, txg, error));
4774 	}
4775 
4776 	/* stop writers from using the disks */
4777 	for (c = 0; c < children; c++) {
4778 		if (vml[c] != NULL)
4779 			vml[c]->vdev_offline = B_TRUE;
4780 	}
4781 	vdev_reopen(spa->spa_root_vdev);
4782 
4783 	/*
4784 	 * Temporarily record the splitting vdevs in the spa config.  This
4785 	 * will disappear once the config is regenerated.
4786 	 */
4787 	VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
4788 	VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
4789 	    glist, children) == 0);
4790 	kmem_free(glist, children * sizeof (uint64_t));
4791 
4792 	mutex_enter(&spa->spa_props_lock);
4793 	VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
4794 	    nvl) == 0);
4795 	mutex_exit(&spa->spa_props_lock);
4796 	spa->spa_config_splitting = nvl;
4797 	vdev_config_dirty(spa->spa_root_vdev);
4798 
4799 	/* configure and create the new pool */
4800 	VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
4801 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
4802 	    exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
4803 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
4804 	    spa_version(spa)) == 0);
4805 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
4806 	    spa->spa_config_txg) == 0);
4807 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4808 	    spa_generate_guid(NULL)) == 0);
4809 	(void) nvlist_lookup_string(props,
4810 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
4811 
4812 	/* add the new pool to the namespace */
4813 	newspa = spa_add(newname, config, altroot);
4814 	newspa->spa_config_txg = spa->spa_config_txg;
4815 	spa_set_log_state(newspa, SPA_LOG_CLEAR);
4816 
4817 	/* release the spa config lock, retaining the namespace lock */
4818 	spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4819 
4820 	if (zio_injection_enabled)
4821 		zio_handle_panic_injection(spa, FTAG, 1);
4822 
4823 	spa_activate(newspa, spa_mode_global);
4824 	spa_async_suspend(newspa);
4825 
4826 	/* create the new pool from the disks of the original pool */
4827 	error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE);
4828 	if (error)
4829 		goto out;
4830 
4831 	/* if that worked, generate a real config for the new pool */
4832 	if (newspa->spa_root_vdev != NULL) {
4833 		VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
4834 		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
4835 		VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
4836 		    ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
4837 		spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
4838 		    B_TRUE));
4839 	}
4840 
4841 	/* set the props */
4842 	if (props != NULL) {
4843 		spa_configfile_set(newspa, props, B_FALSE);
4844 		error = spa_prop_set(newspa, props);
4845 		if (error)
4846 			goto out;
4847 	}
4848 
4849 	/* flush everything */
4850 	txg = spa_vdev_config_enter(newspa);
4851 	vdev_config_dirty(newspa->spa_root_vdev);
4852 	(void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
4853 
4854 	if (zio_injection_enabled)
4855 		zio_handle_panic_injection(spa, FTAG, 2);
4856 
4857 	spa_async_resume(newspa);
4858 
4859 	/* finally, update the original pool's config */
4860 	txg = spa_vdev_config_enter(spa);
4861 	tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
4862 	error = dmu_tx_assign(tx, TXG_WAIT);
4863 	if (error != 0)
4864 		dmu_tx_abort(tx);
4865 	for (c = 0; c < children; c++) {
4866 		if (vml[c] != NULL) {
4867 			vdev_split(vml[c]);
4868 			if (error == 0)
4869 				spa_history_log_internal(spa, "detach", tx,
4870 				    "vdev=%s", vml[c]->vdev_path);
4871 			vdev_free(vml[c]);
4872 		}
4873 	}
4874 	vdev_config_dirty(spa->spa_root_vdev);
4875 	spa->spa_config_splitting = NULL;
4876 	nvlist_free(nvl);
4877 	if (error == 0)
4878 		dmu_tx_commit(tx);
4879 	(void) spa_vdev_exit(spa, NULL, txg, 0);
4880 
4881 	if (zio_injection_enabled)
4882 		zio_handle_panic_injection(spa, FTAG, 3);
4883 
4884 	/* split is complete; log a history record */
4885 	spa_history_log_internal(newspa, "split", NULL,
4886 	    "from pool %s", spa_name(spa));
4887 
4888 	kmem_free(vml, children * sizeof (vdev_t *));
4889 
4890 	/* if we're not going to mount the filesystems in userland, export */
4891 	if (exp)
4892 		error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
4893 		    B_FALSE, B_FALSE);
4894 
4895 	return (error);
4896 
4897 out:
4898 	spa_unload(newspa);
4899 	spa_deactivate(newspa);
4900 	spa_remove(newspa);
4901 
4902 	txg = spa_vdev_config_enter(spa);
4903 
4904 	/* re-online all offlined disks */
4905 	for (c = 0; c < children; c++) {
4906 		if (vml[c] != NULL)
4907 			vml[c]->vdev_offline = B_FALSE;
4908 	}
4909 	vdev_reopen(spa->spa_root_vdev);
4910 
4911 	nvlist_free(spa->spa_config_splitting);
4912 	spa->spa_config_splitting = NULL;
4913 	(void) spa_vdev_exit(spa, NULL, txg, error);
4914 
4915 	kmem_free(vml, children * sizeof (vdev_t *));
4916 	return (error);
4917 }
4918 
4919 static nvlist_t *
4920 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
4921 {
4922 	for (int i = 0; i < count; i++) {
4923 		uint64_t guid;
4924 
4925 		VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
4926 		    &guid) == 0);
4927 
4928 		if (guid == target_guid)
4929 			return (nvpp[i]);
4930 	}
4931 
4932 	return (NULL);
4933 }
4934 
4935 static void
4936 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
4937 	nvlist_t *dev_to_remove)
4938 {
4939 	nvlist_t **newdev = NULL;
4940 
4941 	if (count > 1)
4942 		newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
4943 
4944 	for (int i = 0, j = 0; i < count; i++) {
4945 		if (dev[i] == dev_to_remove)
4946 			continue;
4947 		VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
4948 	}
4949 
4950 	VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
4951 	VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
4952 
4953 	for (int i = 0; i < count - 1; i++)
4954 		nvlist_free(newdev[i]);
4955 
4956 	if (count > 1)
4957 		kmem_free(newdev, (count - 1) * sizeof (void *));
4958 }
4959 
4960 /*
4961  * Evacuate the device.
4962  */
4963 static int
4964 spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
4965 {
4966 	uint64_t txg;
4967 	int error = 0;
4968 
4969 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
4970 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
4971 	ASSERT(vd == vd->vdev_top);
4972 
4973 	/*
4974 	 * Evacuate the device.  We don't hold the config lock as writer
4975 	 * since we need to do I/O but we do keep the
4976 	 * spa_namespace_lock held.  Once this completes the device
4977 	 * should no longer have any blocks allocated on it.
4978 	 */
4979 	if (vd->vdev_islog) {
4980 		if (vd->vdev_stat.vs_alloc != 0)
4981 			error = spa_offline_log(spa);
4982 	} else {
4983 		error = ENOTSUP;
4984 	}
4985 
4986 	if (error)
4987 		return (error);
4988 
4989 	/*
4990 	 * The evacuation succeeded.  Remove any remaining MOS metadata
4991 	 * associated with this vdev, and wait for these changes to sync.
4992 	 */
4993 	ASSERT0(vd->vdev_stat.vs_alloc);
4994 	txg = spa_vdev_config_enter(spa);
4995 	vd->vdev_removing = B_TRUE;
4996 	vdev_dirty(vd, 0, NULL, txg);
4997 	vdev_config_dirty(vd);
4998 	spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4999 
5000 	return (0);
5001 }
5002 
5003 /*
5004  * Complete the removal by cleaning up the namespace.
5005  */
5006 static void
5007 spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5008 {
5009 	vdev_t *rvd = spa->spa_root_vdev;
5010 	uint64_t id = vd->vdev_id;
5011 	boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5012 
5013 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
5014 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5015 	ASSERT(vd == vd->vdev_top);
5016 
5017 	/*
5018 	 * Only remove any devices which are empty.
5019 	 */
5020 	if (vd->vdev_stat.vs_alloc != 0)
5021 		return;
5022 
5023 	(void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5024 
5025 	if (list_link_active(&vd->vdev_state_dirty_node))
5026 		vdev_state_clean(vd);
5027 	if (list_link_active(&vd->vdev_config_dirty_node))
5028 		vdev_config_clean(vd);
5029 
5030 	vdev_free(vd);
5031 
5032 	if (last_vdev) {
5033 		vdev_compact_children(rvd);
5034 	} else {
5035 		vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
5036 		vdev_add_child(rvd, vd);
5037 	}
5038 	vdev_config_dirty(rvd);
5039 
5040 	/*
5041 	 * Reassess the health of our root vdev.
5042 	 */
5043 	vdev_reopen(rvd);
5044 }
5045 
5046 /*
5047  * Remove a device from the pool -
5048  *
5049  * Removing a device from the vdev namespace requires several steps
5050  * and can take a significant amount of time.  As a result we use
5051  * the spa_vdev_config_[enter/exit] functions which allow us to
5052  * grab and release the spa_config_lock while still holding the namespace
5053  * lock.  During each step the configuration is synced out.
5054  */
5055 
5056 /*
5057  * Remove a device from the pool.  Currently, this supports removing only hot
5058  * spares, slogs, and level 2 ARC devices.
5059  */
5060 int
5061 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
5062 {
5063 	vdev_t *vd;
5064 	metaslab_group_t *mg;
5065 	nvlist_t **spares, **l2cache, *nv;
5066 	uint64_t txg = 0;
5067 	uint_t nspares, nl2cache;
5068 	int error = 0;
5069 	boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
5070 
5071 	ASSERT(spa_writeable(spa));
5072 
5073 	if (!locked)
5074 		txg = spa_vdev_enter(spa);
5075 
5076 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
5077 
5078 	if (spa->spa_spares.sav_vdevs != NULL &&
5079 	    nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
5080 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
5081 	    (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
5082 		/*
5083 		 * Only remove the hot spare if it's not currently in use
5084 		 * in this pool.
5085 		 */
5086 		if (vd == NULL || unspare) {
5087 			spa_vdev_remove_aux(spa->spa_spares.sav_config,
5088 			    ZPOOL_CONFIG_SPARES, spares, nspares, nv);
5089 			spa_load_spares(spa);
5090 			spa->spa_spares.sav_sync = B_TRUE;
5091 		} else {
5092 			error = EBUSY;
5093 		}
5094 	} else if (spa->spa_l2cache.sav_vdevs != NULL &&
5095 	    nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
5096 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
5097 	    (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
5098 		/*
5099 		 * Cache devices can always be removed.
5100 		 */
5101 		spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
5102 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
5103 		spa_load_l2cache(spa);
5104 		spa->spa_l2cache.sav_sync = B_TRUE;
5105 	} else if (vd != NULL && vd->vdev_islog) {
5106 		ASSERT(!locked);
5107 		ASSERT(vd == vd->vdev_top);
5108 
5109 		/*
5110 		 * XXX - Once we have bp-rewrite this should
5111 		 * become the common case.
5112 		 */
5113 
5114 		mg = vd->vdev_mg;
5115 
5116 		/*
5117 		 * Stop allocating from this vdev.
5118 		 */
5119 		metaslab_group_passivate(mg);
5120 
5121 		/*
5122 		 * Wait for the youngest allocations and frees to sync,
5123 		 * and then wait for the deferral of those frees to finish.
5124 		 */
5125 		spa_vdev_config_exit(spa, NULL,
5126 		    txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
5127 
5128 		/*
5129 		 * Attempt to evacuate the vdev.
5130 		 */
5131 		error = spa_vdev_remove_evacuate(spa, vd);
5132 
5133 		txg = spa_vdev_config_enter(spa);
5134 
5135 		/*
5136 		 * If we couldn't evacuate the vdev, unwind.
5137 		 */
5138 		if (error) {
5139 			metaslab_group_activate(mg);
5140 			return (spa_vdev_exit(spa, NULL, txg, error));
5141 		}
5142 
5143 		/*
5144 		 * Clean up the vdev namespace.
5145 		 */
5146 		spa_vdev_remove_from_namespace(spa, vd);
5147 
5148 	} else if (vd != NULL) {
5149 		/*
5150 		 * Normal vdevs cannot be removed (yet).
5151 		 */
5152 		error = ENOTSUP;
5153 	} else {
5154 		/*
5155 		 * There is no vdev of any kind with the specified guid.
5156 		 */
5157 		error = ENOENT;
5158 	}
5159 
5160 	if (!locked)
5161 		return (spa_vdev_exit(spa, NULL, txg, error));
5162 
5163 	return (error);
5164 }
5165 
5166 /*
5167  * Find any device that's done replacing, or a vdev marked 'unspare' that's
5168  * current spared, so we can detach it.
5169  */
5170 static vdev_t *
5171 spa_vdev_resilver_done_hunt(vdev_t *vd)
5172 {
5173 	vdev_t *newvd, *oldvd;
5174 
5175 	for (int c = 0; c < vd->vdev_children; c++) {
5176 		oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
5177 		if (oldvd != NULL)
5178 			return (oldvd);
5179 	}
5180 
5181 	/*
5182 	 * Check for a completed replacement.  We always consider the first
5183 	 * vdev in the list to be the oldest vdev, and the last one to be
5184 	 * the newest (see spa_vdev_attach() for how that works).  In
5185 	 * the case where the newest vdev is faulted, we will not automatically
5186 	 * remove it after a resilver completes.  This is OK as it will require
5187 	 * user intervention to determine which disk the admin wishes to keep.
5188 	 */
5189 	if (vd->vdev_ops == &vdev_replacing_ops) {
5190 		ASSERT(vd->vdev_children > 1);
5191 
5192 		newvd = vd->vdev_child[vd->vdev_children - 1];
5193 		oldvd = vd->vdev_child[0];
5194 
5195 		if (vdev_dtl_empty(newvd, DTL_MISSING) &&
5196 		    vdev_dtl_empty(newvd, DTL_OUTAGE) &&
5197 		    !vdev_dtl_required(oldvd))
5198 			return (oldvd);
5199 	}
5200 
5201 	/*
5202 	 * Check for a completed resilver with the 'unspare' flag set.
5203 	 */
5204 	if (vd->vdev_ops == &vdev_spare_ops) {
5205 		vdev_t *first = vd->vdev_child[0];
5206 		vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
5207 
5208 		if (last->vdev_unspare) {
5209 			oldvd = first;
5210 			newvd = last;
5211 		} else if (first->vdev_unspare) {
5212 			oldvd = last;
5213 			newvd = first;
5214 		} else {
5215 			oldvd = NULL;
5216 		}
5217 
5218 		if (oldvd != NULL &&
5219 		    vdev_dtl_empty(newvd, DTL_MISSING) &&
5220 		    vdev_dtl_empty(newvd, DTL_OUTAGE) &&
5221 		    !vdev_dtl_required(oldvd))
5222 			return (oldvd);
5223 
5224 		/*
5225 		 * If there are more than two spares attached to a disk,
5226 		 * and those spares are not required, then we want to
5227 		 * attempt to free them up now so that they can be used
5228 		 * by other pools.  Once we're back down to a single
5229 		 * disk+spare, we stop removing them.
5230 		 */
5231 		if (vd->vdev_children > 2) {
5232 			newvd = vd->vdev_child[1];
5233 
5234 			if (newvd->vdev_isspare && last->vdev_isspare &&
5235 			    vdev_dtl_empty(last, DTL_MISSING) &&
5236 			    vdev_dtl_empty(last, DTL_OUTAGE) &&
5237 			    !vdev_dtl_required(newvd))
5238 				return (newvd);
5239 		}
5240 	}
5241 
5242 	return (NULL);
5243 }
5244 
5245 static void
5246 spa_vdev_resilver_done(spa_t *spa)
5247 {
5248 	vdev_t *vd, *pvd, *ppvd;
5249 	uint64_t guid, sguid, pguid, ppguid;
5250 
5251 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5252 
5253 	while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
5254 		pvd = vd->vdev_parent;
5255 		ppvd = pvd->vdev_parent;
5256 		guid = vd->vdev_guid;
5257 		pguid = pvd->vdev_guid;
5258 		ppguid = ppvd->vdev_guid;
5259 		sguid = 0;
5260 		/*
5261 		 * If we have just finished replacing a hot spared device, then
5262 		 * we need to detach the parent's first child (the original hot
5263 		 * spare) as well.
5264 		 */
5265 		if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
5266 		    ppvd->vdev_children == 2) {
5267 			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
5268 			sguid = ppvd->vdev_child[1]->vdev_guid;
5269 		}
5270 		spa_config_exit(spa, SCL_ALL, FTAG);
5271 		if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
5272 			return;
5273 		if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
5274 			return;
5275 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5276 	}
5277 
5278 	spa_config_exit(spa, SCL_ALL, FTAG);
5279 }
5280 
5281 /*
5282  * Update the stored path or FRU for this vdev.
5283  */
5284 int
5285 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
5286     boolean_t ispath)
5287 {
5288 	vdev_t *vd;
5289 	boolean_t sync = B_FALSE;
5290 
5291 	ASSERT(spa_writeable(spa));
5292 
5293 	spa_vdev_state_enter(spa, SCL_ALL);
5294 
5295 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
5296 		return (spa_vdev_state_exit(spa, NULL, ENOENT));
5297 
5298 	if (!vd->vdev_ops->vdev_op_leaf)
5299 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
5300 
5301 	if (ispath) {
5302 		if (strcmp(value, vd->vdev_path) != 0) {
5303 			spa_strfree(vd->vdev_path);
5304 			vd->vdev_path = spa_strdup(value);
5305 			sync = B_TRUE;
5306 		}
5307 	} else {
5308 		if (vd->vdev_fru == NULL) {
5309 			vd->vdev_fru = spa_strdup(value);
5310 			sync = B_TRUE;
5311 		} else if (strcmp(value, vd->vdev_fru) != 0) {
5312 			spa_strfree(vd->vdev_fru);
5313 			vd->vdev_fru = spa_strdup(value);
5314 			sync = B_TRUE;
5315 		}
5316 	}
5317 
5318 	return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
5319 }
5320 
5321 int
5322 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
5323 {
5324 	return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
5325 }
5326 
5327 int
5328 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
5329 {
5330 	return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
5331 }
5332 
5333 /*
5334  * ==========================================================================
5335  * SPA Scanning
5336  * ==========================================================================
5337  */
5338 
5339 int
5340 spa_scan_stop(spa_t *spa)
5341 {
5342 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5343 	if (dsl_scan_resilvering(spa->spa_dsl_pool))
5344 		return (EBUSY);
5345 	return (dsl_scan_cancel(spa->spa_dsl_pool));
5346 }
5347 
5348 int
5349 spa_scan(spa_t *spa, pool_scan_func_t func)
5350 {
5351 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
5352 
5353 	if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
5354 		return (ENOTSUP);
5355 
5356 	/*
5357 	 * If a resilver was requested, but there is no DTL on a
5358 	 * writeable leaf device, we have nothing to do.
5359 	 */
5360 	if (func == POOL_SCAN_RESILVER &&
5361 	    !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
5362 		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
5363 		return (0);
5364 	}
5365 
5366 	return (dsl_scan(spa->spa_dsl_pool, func));
5367 }
5368 
5369 /*
5370  * ==========================================================================
5371  * SPA async task processing
5372  * ==========================================================================
5373  */
5374 
5375 static void
5376 spa_async_remove(spa_t *spa, vdev_t *vd)
5377 {
5378 	if (vd->vdev_remove_wanted) {
5379 		vd->vdev_remove_wanted = B_FALSE;
5380 		vd->vdev_delayed_close = B_FALSE;
5381 		vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
5382 
5383 		/*
5384 		 * We want to clear the stats, but we don't want to do a full
5385 		 * vdev_clear() as that will cause us to throw away
5386 		 * degraded/faulted state as well as attempt to reopen the
5387 		 * device, all of which is a waste.
5388 		 */
5389 		vd->vdev_stat.vs_read_errors = 0;
5390 		vd->vdev_stat.vs_write_errors = 0;
5391 		vd->vdev_stat.vs_checksum_errors = 0;
5392 
5393 		vdev_state_dirty(vd->vdev_top);
5394 	}
5395 
5396 	for (int c = 0; c < vd->vdev_children; c++)
5397 		spa_async_remove(spa, vd->vdev_child[c]);
5398 }
5399 
5400 static void
5401 spa_async_probe(spa_t *spa, vdev_t *vd)
5402 {
5403 	if (vd->vdev_probe_wanted) {
5404 		vd->vdev_probe_wanted = B_FALSE;
5405 		vdev_reopen(vd);	/* vdev_open() does the actual probe */
5406 	}
5407 
5408 	for (int c = 0; c < vd->vdev_children; c++)
5409 		spa_async_probe(spa, vd->vdev_child[c]);
5410 }
5411 
5412 static void
5413 spa_async_autoexpand(spa_t *spa, vdev_t *vd)
5414 {
5415 	sysevent_id_t eid;
5416 	nvlist_t *attr;
5417 	char *physpath;
5418 
5419 	if (!spa->spa_autoexpand)
5420 		return;
5421 
5422 	for (int c = 0; c < vd->vdev_children; c++) {
5423 		vdev_t *cvd = vd->vdev_child[c];
5424 		spa_async_autoexpand(spa, cvd);
5425 	}
5426 
5427 	if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
5428 		return;
5429 
5430 	physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5431 	(void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath);
5432 
5433 	VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5434 	VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
5435 
5436 	(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
5437 	    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
5438 
5439 	nvlist_free(attr);
5440 	kmem_free(physpath, MAXPATHLEN);
5441 }
5442 
5443 static void
5444 spa_async_thread(spa_t *spa)
5445 {
5446 	int tasks;
5447 
5448 	ASSERT(spa->spa_sync_on);
5449 
5450 	mutex_enter(&spa->spa_async_lock);
5451 	tasks = spa->spa_async_tasks;
5452 	spa->spa_async_tasks = 0;
5453 	mutex_exit(&spa->spa_async_lock);
5454 
5455 	/*
5456 	 * See if the config needs to be updated.
5457 	 */
5458 	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
5459 		uint64_t old_space, new_space;
5460 
5461 		mutex_enter(&spa_namespace_lock);
5462 		old_space = metaslab_class_get_space(spa_normal_class(spa));
5463 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
5464 		new_space = metaslab_class_get_space(spa_normal_class(spa));
5465 		mutex_exit(&spa_namespace_lock);
5466 
5467 		/*
5468 		 * If the pool grew as a result of the config update,
5469 		 * then log an internal history event.
5470 		 */
5471 		if (new_space != old_space) {
5472 			spa_history_log_internal(spa, "vdev online", NULL,
5473 			    "pool '%s' size: %llu(+%llu)",
5474 			    spa_name(spa), new_space, new_space - old_space);
5475 		}
5476 	}
5477 
5478 	/*
5479 	 * See if any devices need to be marked REMOVED.
5480 	 */
5481 	if (tasks & SPA_ASYNC_REMOVE) {
5482 		spa_vdev_state_enter(spa, SCL_NONE);
5483 		spa_async_remove(spa, spa->spa_root_vdev);
5484 		for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
5485 			spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
5486 		for (int i = 0; i < spa->spa_spares.sav_count; i++)
5487 			spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
5488 		(void) spa_vdev_state_exit(spa, NULL, 0);
5489 	}
5490 
5491 	if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
5492 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5493 		spa_async_autoexpand(spa, spa->spa_root_vdev);
5494 		spa_config_exit(spa, SCL_CONFIG, FTAG);
5495 	}
5496 
5497 	/*
5498 	 * See if any devices need to be probed.
5499 	 */
5500 	if (tasks & SPA_ASYNC_PROBE) {
5501 		spa_vdev_state_enter(spa, SCL_NONE);
5502 		spa_async_probe(spa, spa->spa_root_vdev);
5503 		(void) spa_vdev_state_exit(spa, NULL, 0);
5504 	}
5505 
5506 	/*
5507 	 * If any devices are done replacing, detach them.
5508 	 */
5509 	if (tasks & SPA_ASYNC_RESILVER_DONE)
5510 		spa_vdev_resilver_done(spa);
5511 
5512 	/*
5513 	 * Kick off a resilver.
5514 	 */
5515 	if (tasks & SPA_ASYNC_RESILVER)
5516 		dsl_resilver_restart(spa->spa_dsl_pool, 0);
5517 
5518 	/*
5519 	 * Let the world know that we're done.
5520 	 */
5521 	mutex_enter(&spa->spa_async_lock);
5522 	spa->spa_async_thread = NULL;
5523 	cv_broadcast(&spa->spa_async_cv);
5524 	mutex_exit(&spa->spa_async_lock);
5525 	thread_exit();
5526 }
5527 
5528 void
5529 spa_async_suspend(spa_t *spa)
5530 {
5531 	mutex_enter(&spa->spa_async_lock);
5532 	spa->spa_async_suspended++;
5533 	while (spa->spa_async_thread != NULL)
5534 		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
5535 	mutex_exit(&spa->spa_async_lock);
5536 }
5537 
5538 void
5539 spa_async_resume(spa_t *spa)
5540 {
5541 	mutex_enter(&spa->spa_async_lock);
5542 	ASSERT(spa->spa_async_suspended != 0);
5543 	spa->spa_async_suspended--;
5544 	mutex_exit(&spa->spa_async_lock);
5545 }
5546 
5547 static void
5548 spa_async_dispatch(spa_t *spa)
5549 {
5550 	mutex_enter(&spa->spa_async_lock);
5551 	if (spa->spa_async_tasks && !spa->spa_async_suspended &&
5552 	    spa->spa_async_thread == NULL &&
5553 	    rootdir != NULL && !vn_is_readonly(rootdir))
5554 		spa->spa_async_thread = thread_create(NULL, 0,
5555 		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
5556 	mutex_exit(&spa->spa_async_lock);
5557 }
5558 
5559 void
5560 spa_async_request(spa_t *spa, int task)
5561 {
5562 	zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
5563 	mutex_enter(&spa->spa_async_lock);
5564 	spa->spa_async_tasks |= task;
5565 	mutex_exit(&spa->spa_async_lock);
5566 }
5567 
5568 /*
5569  * ==========================================================================
5570  * SPA syncing routines
5571  * ==========================================================================
5572  */
5573 
5574 static int
5575 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
5576 {
5577 	bpobj_t *bpo = arg;
5578 	bpobj_enqueue(bpo, bp, tx);
5579 	return (0);
5580 }
5581 
5582 static int
5583 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
5584 {
5585 	zio_t *zio = arg;
5586 
5587 	zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
5588 	    zio->io_flags));
5589 	return (0);
5590 }
5591 
5592 static void
5593 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
5594 {
5595 	char *packed = NULL;
5596 	size_t bufsize;
5597 	size_t nvsize = 0;
5598 	dmu_buf_t *db;
5599 
5600 	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
5601 
5602 	/*
5603 	 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
5604 	 * information.  This avoids the dbuf_will_dirty() path and
5605 	 * saves us a pre-read to get data we don't actually care about.
5606 	 */
5607 	bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
5608 	packed = kmem_alloc(bufsize, KM_SLEEP);
5609 
5610 	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
5611 	    KM_SLEEP) == 0);
5612 	bzero(packed + nvsize, bufsize - nvsize);
5613 
5614 	dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
5615 
5616 	kmem_free(packed, bufsize);
5617 
5618 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
5619 	dmu_buf_will_dirty(db, tx);
5620 	*(uint64_t *)db->db_data = nvsize;
5621 	dmu_buf_rele(db, FTAG);
5622 }
5623 
5624 static void
5625 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
5626     const char *config, const char *entry)
5627 {
5628 	nvlist_t *nvroot;
5629 	nvlist_t **list;
5630 	int i;
5631 
5632 	if (!sav->sav_sync)
5633 		return;
5634 
5635 	/*
5636 	 * Update the MOS nvlist describing the list of available devices.
5637 	 * spa_validate_aux() will have already made sure this nvlist is
5638 	 * valid and the vdevs are labeled appropriately.
5639 	 */
5640 	if (sav->sav_object == 0) {
5641 		sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
5642 		    DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
5643 		    sizeof (uint64_t), tx);
5644 		VERIFY(zap_update(spa->spa_meta_objset,
5645 		    DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
5646 		    &sav->sav_object, tx) == 0);
5647 	}
5648 
5649 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5650 	if (sav->sav_count == 0) {
5651 		VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
5652 	} else {
5653 		list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
5654 		for (i = 0; i < sav->sav_count; i++)
5655 			list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
5656 			    B_FALSE, VDEV_CONFIG_L2CACHE);
5657 		VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
5658 		    sav->sav_count) == 0);
5659 		for (i = 0; i < sav->sav_count; i++)
5660 			nvlist_free(list[i]);
5661 		kmem_free(list, sav->sav_count * sizeof (void *));
5662 	}
5663 
5664 	spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
5665 	nvlist_free(nvroot);
5666 
5667 	sav->sav_sync = B_FALSE;
5668 }
5669 
5670 static void
5671 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
5672 {
5673 	nvlist_t *config;
5674 
5675 	if (list_is_empty(&spa->spa_config_dirty_list))
5676 		return;
5677 
5678 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
5679 
5680 	config = spa_config_generate(spa, spa->spa_root_vdev,
5681 	    dmu_tx_get_txg(tx), B_FALSE);
5682 
5683 	spa_config_exit(spa, SCL_STATE, FTAG);
5684 
5685 	if (spa->spa_config_syncing)
5686 		nvlist_free(spa->spa_config_syncing);
5687 	spa->spa_config_syncing = config;
5688 
5689 	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
5690 }
5691 
5692 static void
5693 spa_sync_version(void *arg1, void *arg2, dmu_tx_t *tx)
5694 {
5695 	spa_t *spa = arg1;
5696 	uint64_t version = *(uint64_t *)arg2;
5697 
5698 	/*
5699 	 * Setting the version is special cased when first creating the pool.
5700 	 */
5701 	ASSERT(tx->tx_txg != TXG_INITIAL);
5702 
5703 	ASSERT(version <= SPA_VERSION);
5704 	ASSERT(version >= spa_version(spa));
5705 
5706 	spa->spa_uberblock.ub_version = version;
5707 	vdev_config_dirty(spa->spa_root_vdev);
5708 	spa_history_log_internal(spa, "set", tx, "version=%lld", version);
5709 }
5710 
5711 /*
5712  * Set zpool properties.
5713  */
5714 static void
5715 spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx)
5716 {
5717 	spa_t *spa = arg1;
5718 	objset_t *mos = spa->spa_meta_objset;
5719 	nvlist_t *nvp = arg2;
5720 	nvpair_t *elem = NULL;
5721 
5722 	mutex_enter(&spa->spa_props_lock);
5723 
5724 	while ((elem = nvlist_next_nvpair(nvp, elem))) {
5725 		uint64_t intval;
5726 		char *strval, *fname;
5727 		zpool_prop_t prop;
5728 		const char *propname;
5729 		zprop_type_t proptype;
5730 		zfeature_info_t *feature;
5731 
5732 		switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
5733 		case ZPROP_INVAL:
5734 			/*
5735 			 * We checked this earlier in spa_prop_validate().
5736 			 */
5737 			ASSERT(zpool_prop_feature(nvpair_name(elem)));
5738 
5739 			fname = strchr(nvpair_name(elem), '@') + 1;
5740 			VERIFY0(zfeature_lookup_name(fname, &feature));
5741 
5742 			spa_feature_enable(spa, feature, tx);
5743 			spa_history_log_internal(spa, "set", tx,
5744 			    "%s=enabled", nvpair_name(elem));
5745 			break;
5746 
5747 		case ZPOOL_PROP_VERSION:
5748 			VERIFY(nvpair_value_uint64(elem, &intval) == 0);
5749 			/*
5750 			 * The version is synced seperatly before other
5751 			 * properties and should be correct by now.
5752 			 */
5753 			ASSERT3U(spa_version(spa), >=, intval);
5754 			break;
5755 
5756 		case ZPOOL_PROP_ALTROOT:
5757 			/*
5758 			 * 'altroot' is a non-persistent property. It should
5759 			 * have been set temporarily at creation or import time.
5760 			 */
5761 			ASSERT(spa->spa_root != NULL);
5762 			break;
5763 
5764 		case ZPOOL_PROP_READONLY:
5765 		case ZPOOL_PROP_CACHEFILE:
5766 			/*
5767 			 * 'readonly' and 'cachefile' are also non-persisitent
5768 			 * properties.
5769 			 */
5770 			break;
5771 		case ZPOOL_PROP_COMMENT:
5772 			VERIFY(nvpair_value_string(elem, &strval) == 0);
5773 			if (spa->spa_comment != NULL)
5774 				spa_strfree(spa->spa_comment);
5775 			spa->spa_comment = spa_strdup(strval);
5776 			/*
5777 			 * We need to dirty the configuration on all the vdevs
5778 			 * so that their labels get updated.  It's unnecessary
5779 			 * to do this for pool creation since the vdev's
5780 			 * configuratoin has already been dirtied.
5781 			 */
5782 			if (tx->tx_txg != TXG_INITIAL)
5783 				vdev_config_dirty(spa->spa_root_vdev);
5784 			spa_history_log_internal(spa, "set", tx,
5785 			    "%s=%s", nvpair_name(elem), strval);
5786 			break;
5787 		default:
5788 			/*
5789 			 * Set pool property values in the poolprops mos object.
5790 			 */
5791 			if (spa->spa_pool_props_object == 0) {
5792 				spa->spa_pool_props_object =
5793 				    zap_create_link(mos, DMU_OT_POOL_PROPS,
5794 				    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
5795 				    tx);
5796 			}
5797 
5798 			/* normalize the property name */
5799 			propname = zpool_prop_to_name(prop);
5800 			proptype = zpool_prop_get_type(prop);
5801 
5802 			if (nvpair_type(elem) == DATA_TYPE_STRING) {
5803 				ASSERT(proptype == PROP_TYPE_STRING);
5804 				VERIFY(nvpair_value_string(elem, &strval) == 0);
5805 				VERIFY(zap_update(mos,
5806 				    spa->spa_pool_props_object, propname,
5807 				    1, strlen(strval) + 1, strval, tx) == 0);
5808 				spa_history_log_internal(spa, "set", tx,
5809 				    "%s=%s", nvpair_name(elem), strval);
5810 			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
5811 				VERIFY(nvpair_value_uint64(elem, &intval) == 0);
5812 
5813 				if (proptype == PROP_TYPE_INDEX) {
5814 					const char *unused;
5815 					VERIFY(zpool_prop_index_to_string(
5816 					    prop, intval, &unused) == 0);
5817 				}
5818 				VERIFY(zap_update(mos,
5819 				    spa->spa_pool_props_object, propname,
5820 				    8, 1, &intval, tx) == 0);
5821 				spa_history_log_internal(spa, "set", tx,
5822 				    "%s=%lld", nvpair_name(elem), intval);
5823 			} else {
5824 				ASSERT(0); /* not allowed */
5825 			}
5826 
5827 			switch (prop) {
5828 			case ZPOOL_PROP_DELEGATION:
5829 				spa->spa_delegation = intval;
5830 				break;
5831 			case ZPOOL_PROP_BOOTFS:
5832 				spa->spa_bootfs = intval;
5833 				break;
5834 			case ZPOOL_PROP_FAILUREMODE:
5835 				spa->spa_failmode = intval;
5836 				break;
5837 			case ZPOOL_PROP_AUTOEXPAND:
5838 				spa->spa_autoexpand = intval;
5839 				if (tx->tx_txg != TXG_INITIAL)
5840 					spa_async_request(spa,
5841 					    SPA_ASYNC_AUTOEXPAND);
5842 				break;
5843 			case ZPOOL_PROP_DEDUPDITTO:
5844 				spa->spa_dedup_ditto = intval;
5845 				break;
5846 			default:
5847 				break;
5848 			}
5849 		}
5850 
5851 	}
5852 
5853 	mutex_exit(&spa->spa_props_lock);
5854 }
5855 
5856 /*
5857  * Perform one-time upgrade on-disk changes.  spa_version() does not
5858  * reflect the new version this txg, so there must be no changes this
5859  * txg to anything that the upgrade code depends on after it executes.
5860  * Therefore this must be called after dsl_pool_sync() does the sync
5861  * tasks.
5862  */
5863 static void
5864 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
5865 {
5866 	dsl_pool_t *dp = spa->spa_dsl_pool;
5867 
5868 	ASSERT(spa->spa_sync_pass == 1);
5869 
5870 	if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
5871 	    spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
5872 		dsl_pool_create_origin(dp, tx);
5873 
5874 		/* Keeping the origin open increases spa_minref */
5875 		spa->spa_minref += 3;
5876 	}
5877 
5878 	if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
5879 	    spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
5880 		dsl_pool_upgrade_clones(dp, tx);
5881 	}
5882 
5883 	if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
5884 	    spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
5885 		dsl_pool_upgrade_dir_clones(dp, tx);
5886 
5887 		/* Keeping the freedir open increases spa_minref */
5888 		spa->spa_minref += 3;
5889 	}
5890 
5891 	if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
5892 	    spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
5893 		spa_feature_create_zap_objects(spa, tx);
5894 	}
5895 }
5896 
5897 /*
5898  * Sync the specified transaction group.  New blocks may be dirtied as
5899  * part of the process, so we iterate until it converges.
5900  */
5901 void
5902 spa_sync(spa_t *spa, uint64_t txg)
5903 {
5904 	dsl_pool_t *dp = spa->spa_dsl_pool;
5905 	objset_t *mos = spa->spa_meta_objset;
5906 	bpobj_t *defer_bpo = &spa->spa_deferred_bpobj;
5907 	bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
5908 	vdev_t *rvd = spa->spa_root_vdev;
5909 	vdev_t *vd;
5910 	dmu_tx_t *tx;
5911 	int error;
5912 
5913 	VERIFY(spa_writeable(spa));
5914 
5915 	/*
5916 	 * Lock out configuration changes.
5917 	 */
5918 	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5919 
5920 	spa->spa_syncing_txg = txg;
5921 	spa->spa_sync_pass = 0;
5922 
5923 	/*
5924 	 * If there are any pending vdev state changes, convert them
5925 	 * into config changes that go out with this transaction group.
5926 	 */
5927 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
5928 	while (list_head(&spa->spa_state_dirty_list) != NULL) {
5929 		/*
5930 		 * We need the write lock here because, for aux vdevs,
5931 		 * calling vdev_config_dirty() modifies sav_config.
5932 		 * This is ugly and will become unnecessary when we
5933 		 * eliminate the aux vdev wart by integrating all vdevs
5934 		 * into the root vdev tree.
5935 		 */
5936 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
5937 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
5938 		while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
5939 			vdev_state_clean(vd);
5940 			vdev_config_dirty(vd);
5941 		}
5942 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
5943 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
5944 	}
5945 	spa_config_exit(spa, SCL_STATE, FTAG);
5946 
5947 	tx = dmu_tx_create_assigned(dp, txg);
5948 
5949 	/*
5950 	 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
5951 	 * set spa_deflate if we have no raid-z vdevs.
5952 	 */
5953 	if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
5954 	    spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
5955 		int i;
5956 
5957 		for (i = 0; i < rvd->vdev_children; i++) {
5958 			vd = rvd->vdev_child[i];
5959 			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
5960 				break;
5961 		}
5962 		if (i == rvd->vdev_children) {
5963 			spa->spa_deflate = TRUE;
5964 			VERIFY(0 == zap_add(spa->spa_meta_objset,
5965 			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
5966 			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
5967 		}
5968 	}
5969 
5970 	/*
5971 	 * If anything has changed in this txg, or if someone is waiting
5972 	 * for this txg to sync (eg, spa_vdev_remove()), push the
5973 	 * deferred frees from the previous txg.  If not, leave them
5974 	 * alone so that we don't generate work on an otherwise idle
5975 	 * system.
5976 	 */
5977 	if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
5978 	    !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
5979 	    !txg_list_empty(&dp->dp_sync_tasks, txg) ||
5980 	    ((dsl_scan_active(dp->dp_scan) ||
5981 	    txg_sync_waiting(dp)) && !spa_shutting_down(spa))) {
5982 		zio_t *zio = zio_root(spa, NULL, NULL, 0);
5983 		VERIFY3U(bpobj_iterate(defer_bpo,
5984 		    spa_free_sync_cb, zio, tx), ==, 0);
5985 		VERIFY0(zio_wait(zio));
5986 	}
5987 
5988 	/*
5989 	 * Iterate to convergence.
5990 	 */
5991 	do {
5992 		int pass = ++spa->spa_sync_pass;
5993 
5994 		spa_sync_config_object(spa, tx);
5995 		spa_sync_aux_dev(spa, &spa->spa_spares, tx,
5996 		    ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
5997 		spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
5998 		    ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
5999 		spa_errlog_sync(spa, txg);
6000 		dsl_pool_sync(dp, txg);
6001 
6002 		if (pass <= SYNC_PASS_DEFERRED_FREE) {
6003 			zio_t *zio = zio_root(spa, NULL, NULL, 0);
6004 			bplist_iterate(free_bpl, spa_free_sync_cb,
6005 			    zio, tx);
6006 			VERIFY(zio_wait(zio) == 0);
6007 		} else {
6008 			bplist_iterate(free_bpl, bpobj_enqueue_cb,
6009 			    defer_bpo, tx);
6010 		}
6011 
6012 		ddt_sync(spa, txg);
6013 		dsl_scan_sync(dp, tx);
6014 
6015 		while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
6016 			vdev_sync(vd, txg);
6017 
6018 		if (pass == 1)
6019 			spa_sync_upgrades(spa, tx);
6020 
6021 	} while (dmu_objset_is_dirty(mos, txg));
6022 
6023 	/*
6024 	 * Rewrite the vdev configuration (which includes the uberblock)
6025 	 * to commit the transaction group.
6026 	 *
6027 	 * If there are no dirty vdevs, we sync the uberblock to a few
6028 	 * random top-level vdevs that are known to be visible in the
6029 	 * config cache (see spa_vdev_add() for a complete description).
6030 	 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
6031 	 */
6032 	for (;;) {
6033 		/*
6034 		 * We hold SCL_STATE to prevent vdev open/close/etc.
6035 		 * while we're attempting to write the vdev labels.
6036 		 */
6037 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
6038 
6039 		if (list_is_empty(&spa->spa_config_dirty_list)) {
6040 			vdev_t *svd[SPA_DVAS_PER_BP];
6041 			int svdcount = 0;
6042 			int children = rvd->vdev_children;
6043 			int c0 = spa_get_random(children);
6044 
6045 			for (int c = 0; c < children; c++) {
6046 				vd = rvd->vdev_child[(c0 + c) % children];
6047 				if (vd->vdev_ms_array == 0 || vd->vdev_islog)
6048 					continue;
6049 				svd[svdcount++] = vd;
6050 				if (svdcount == SPA_DVAS_PER_BP)
6051 					break;
6052 			}
6053 			error = vdev_config_sync(svd, svdcount, txg, B_FALSE);
6054 			if (error != 0)
6055 				error = vdev_config_sync(svd, svdcount, txg,
6056 				    B_TRUE);
6057 		} else {
6058 			error = vdev_config_sync(rvd->vdev_child,
6059 			    rvd->vdev_children, txg, B_FALSE);
6060 			if (error != 0)
6061 				error = vdev_config_sync(rvd->vdev_child,
6062 				    rvd->vdev_children, txg, B_TRUE);
6063 		}
6064 
6065 		spa_config_exit(spa, SCL_STATE, FTAG);
6066 
6067 		if (error == 0)
6068 			break;
6069 		zio_suspend(spa, NULL);
6070 		zio_resume_wait(spa);
6071 	}
6072 	dmu_tx_commit(tx);
6073 
6074 	/*
6075 	 * Clear the dirty config list.
6076 	 */
6077 	while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
6078 		vdev_config_clean(vd);
6079 
6080 	/*
6081 	 * Now that the new config has synced transactionally,
6082 	 * let it become visible to the config cache.
6083 	 */
6084 	if (spa->spa_config_syncing != NULL) {
6085 		spa_config_set(spa, spa->spa_config_syncing);
6086 		spa->spa_config_txg = txg;
6087 		spa->spa_config_syncing = NULL;
6088 	}
6089 
6090 	spa->spa_ubsync = spa->spa_uberblock;
6091 
6092 	dsl_pool_sync_done(dp, txg);
6093 
6094 	/*
6095 	 * Update usable space statistics.
6096 	 */
6097 	while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
6098 		vdev_sync_done(vd, txg);
6099 
6100 	spa_update_dspace(spa);
6101 
6102 	/*
6103 	 * It had better be the case that we didn't dirty anything
6104 	 * since vdev_config_sync().
6105 	 */
6106 	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
6107 	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
6108 	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
6109 
6110 	spa->spa_sync_pass = 0;
6111 
6112 	spa_config_exit(spa, SCL_CONFIG, FTAG);
6113 
6114 	spa_handle_ignored_writes(spa);
6115 
6116 	/*
6117 	 * If any async tasks have been requested, kick them off.
6118 	 */
6119 	spa_async_dispatch(spa);
6120 }
6121 
6122 /*
6123  * Sync all pools.  We don't want to hold the namespace lock across these
6124  * operations, so we take a reference on the spa_t and drop the lock during the
6125  * sync.
6126  */
6127 void
6128 spa_sync_allpools(void)
6129 {
6130 	spa_t *spa = NULL;
6131 	mutex_enter(&spa_namespace_lock);
6132 	while ((spa = spa_next(spa)) != NULL) {
6133 		if (spa_state(spa) != POOL_STATE_ACTIVE ||
6134 		    !spa_writeable(spa) || spa_suspended(spa))
6135 			continue;
6136 		spa_open_ref(spa, FTAG);
6137 		mutex_exit(&spa_namespace_lock);
6138 		txg_wait_synced(spa_get_dsl(spa), 0);
6139 		mutex_enter(&spa_namespace_lock);
6140 		spa_close(spa, FTAG);
6141 	}
6142 	mutex_exit(&spa_namespace_lock);
6143 }
6144 
6145 /*
6146  * ==========================================================================
6147  * Miscellaneous routines
6148  * ==========================================================================
6149  */
6150 
6151 /*
6152  * Remove all pools in the system.
6153  */
6154 void
6155 spa_evict_all(void)
6156 {
6157 	spa_t *spa;
6158 
6159 	/*
6160 	 * Remove all cached state.  All pools should be closed now,
6161 	 * so every spa in the AVL tree should be unreferenced.
6162 	 */
6163 	mutex_enter(&spa_namespace_lock);
6164 	while ((spa = spa_next(NULL)) != NULL) {
6165 		/*
6166 		 * Stop async tasks.  The async thread may need to detach
6167 		 * a device that's been replaced, which requires grabbing
6168 		 * spa_namespace_lock, so we must drop it here.
6169 		 */
6170 		spa_open_ref(spa, FTAG);
6171 		mutex_exit(&spa_namespace_lock);
6172 		spa_async_suspend(spa);
6173 		mutex_enter(&spa_namespace_lock);
6174 		spa_close(spa, FTAG);
6175 
6176 		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
6177 			spa_unload(spa);
6178 			spa_deactivate(spa);
6179 		}
6180 		spa_remove(spa);
6181 	}
6182 	mutex_exit(&spa_namespace_lock);
6183 }
6184 
6185 vdev_t *
6186 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
6187 {
6188 	vdev_t *vd;
6189 	int i;
6190 
6191 	if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
6192 		return (vd);
6193 
6194 	if (aux) {
6195 		for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
6196 			vd = spa->spa_l2cache.sav_vdevs[i];
6197 			if (vd->vdev_guid == guid)
6198 				return (vd);
6199 		}
6200 
6201 		for (i = 0; i < spa->spa_spares.sav_count; i++) {
6202 			vd = spa->spa_spares.sav_vdevs[i];
6203 			if (vd->vdev_guid == guid)
6204 				return (vd);
6205 		}
6206 	}
6207 
6208 	return (NULL);
6209 }
6210 
6211 void
6212 spa_upgrade(spa_t *spa, uint64_t version)
6213 {
6214 	ASSERT(spa_writeable(spa));
6215 
6216 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6217 
6218 	/*
6219 	 * This should only be called for a non-faulted pool, and since a
6220 	 * future version would result in an unopenable pool, this shouldn't be
6221 	 * possible.
6222 	 */
6223 	ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION);
6224 	ASSERT(version >= spa->spa_uberblock.ub_version);
6225 
6226 	spa->spa_uberblock.ub_version = version;
6227 	vdev_config_dirty(spa->spa_root_vdev);
6228 
6229 	spa_config_exit(spa, SCL_ALL, FTAG);
6230 
6231 	txg_wait_synced(spa_get_dsl(spa), 0);
6232 }
6233 
6234 boolean_t
6235 spa_has_spare(spa_t *spa, uint64_t guid)
6236 {
6237 	int i;
6238 	uint64_t spareguid;
6239 	spa_aux_vdev_t *sav = &spa->spa_spares;
6240 
6241 	for (i = 0; i < sav->sav_count; i++)
6242 		if (sav->sav_vdevs[i]->vdev_guid == guid)
6243 			return (B_TRUE);
6244 
6245 	for (i = 0; i < sav->sav_npending; i++) {
6246 		if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
6247 		    &spareguid) == 0 && spareguid == guid)
6248 			return (B_TRUE);
6249 	}
6250 
6251 	return (B_FALSE);
6252 }
6253 
6254 /*
6255  * Check if a pool has an active shared spare device.
6256  * Note: reference count of an active spare is 2, as a spare and as a replace
6257  */
6258 static boolean_t
6259 spa_has_active_shared_spare(spa_t *spa)
6260 {
6261 	int i, refcnt;
6262 	uint64_t pool;
6263 	spa_aux_vdev_t *sav = &spa->spa_spares;
6264 
6265 	for (i = 0; i < sav->sav_count; i++) {
6266 		if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
6267 		    &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
6268 		    refcnt > 2)
6269 			return (B_TRUE);
6270 	}
6271 
6272 	return (B_FALSE);
6273 }
6274 
6275 /*
6276  * Post a sysevent corresponding to the given event.  The 'name' must be one of
6277  * the event definitions in sys/sysevent/eventdefs.h.  The payload will be
6278  * filled in from the spa and (optionally) the vdev.  This doesn't do anything
6279  * in the userland libzpool, as we don't want consumers to misinterpret ztest
6280  * or zdb as real changes.
6281  */
6282 void
6283 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
6284 {
6285 #ifdef _KERNEL
6286 	sysevent_t		*ev;
6287 	sysevent_attr_list_t	*attr = NULL;
6288 	sysevent_value_t	value;
6289 	sysevent_id_t		eid;
6290 
6291 	ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
6292 	    SE_SLEEP);
6293 
6294 	value.value_type = SE_DATA_TYPE_STRING;
6295 	value.value.sv_string = spa_name(spa);
6296 	if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
6297 		goto done;
6298 
6299 	value.value_type = SE_DATA_TYPE_UINT64;
6300 	value.value.sv_uint64 = spa_guid(spa);
6301 	if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
6302 		goto done;
6303 
6304 	if (vd) {
6305 		value.value_type = SE_DATA_TYPE_UINT64;
6306 		value.value.sv_uint64 = vd->vdev_guid;
6307 		if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
6308 		    SE_SLEEP) != 0)
6309 			goto done;
6310 
6311 		if (vd->vdev_path) {
6312 			value.value_type = SE_DATA_TYPE_STRING;
6313 			value.value.sv_string = vd->vdev_path;
6314 			if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
6315 			    &value, SE_SLEEP) != 0)
6316 				goto done;
6317 		}
6318 	}
6319 
6320 	if (sysevent_attach_attributes(ev, attr) != 0)
6321 		goto done;
6322 	attr = NULL;
6323 
6324 	(void) log_sysevent(ev, SE_SLEEP, &eid);
6325 
6326 done:
6327 	if (attr)
6328 		sysevent_free_attr(attr);
6329 	sysevent_free(ev);
6330 #endif
6331 }
6332