xref: /illumos-gate/usr/src/uts/common/fs/zfs/spa.c (revision 86714001)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
25  * Copyright (c) 2015, Nexenta Systems, Inc.  All rights reserved.
26  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27  * Copyright 2013 Saso Kiselkov. All rights reserved.
28  * Copyright (c) 2014 Integros [integros.com]
29  * Copyright 2016 Toomas Soome <tsoome@me.com>
30  * Copyright 2017 Joyent, Inc.
31  * Copyright (c) 2017 Datto Inc.
32  * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
33  */
34 
35 /*
36  * SPA: Storage Pool Allocator
37  *
38  * This file contains all the routines used when modifying on-disk SPA state.
39  * This includes opening, importing, destroying, exporting a pool, and syncing a
40  * pool.
41  */
42 
43 #include <sys/zfs_context.h>
44 #include <sys/fm/fs/zfs.h>
45 #include <sys/spa_impl.h>
46 #include <sys/zio.h>
47 #include <sys/zio_checksum.h>
48 #include <sys/dmu.h>
49 #include <sys/dmu_tx.h>
50 #include <sys/zap.h>
51 #include <sys/zil.h>
52 #include <sys/ddt.h>
53 #include <sys/vdev_impl.h>
54 #include <sys/vdev_removal.h>
55 #include <sys/vdev_indirect_mapping.h>
56 #include <sys/vdev_indirect_births.h>
57 #include <sys/metaslab.h>
58 #include <sys/metaslab_impl.h>
59 #include <sys/uberblock_impl.h>
60 #include <sys/txg.h>
61 #include <sys/avl.h>
62 #include <sys/bpobj.h>
63 #include <sys/dmu_traverse.h>
64 #include <sys/dmu_objset.h>
65 #include <sys/unique.h>
66 #include <sys/dsl_pool.h>
67 #include <sys/dsl_dataset.h>
68 #include <sys/dsl_dir.h>
69 #include <sys/dsl_prop.h>
70 #include <sys/dsl_synctask.h>
71 #include <sys/fs/zfs.h>
72 #include <sys/arc.h>
73 #include <sys/callb.h>
74 #include <sys/systeminfo.h>
75 #include <sys/spa_boot.h>
76 #include <sys/zfs_ioctl.h>
77 #include <sys/dsl_scan.h>
78 #include <sys/zfeature.h>
79 #include <sys/dsl_destroy.h>
80 #include <sys/abd.h>
81 
82 #ifdef	_KERNEL
83 #include <sys/bootprops.h>
84 #include <sys/callb.h>
85 #include <sys/cpupart.h>
86 #include <sys/pool.h>
87 #include <sys/sysdc.h>
88 #include <sys/zone.h>
89 #endif	/* _KERNEL */
90 
91 #include "zfs_prop.h"
92 #include "zfs_comutil.h"
93 
94 /*
95  * The interval, in seconds, at which failed configuration cache file writes
96  * should be retried.
97  */
98 int zfs_ccw_retry_interval = 300;
99 
100 typedef enum zti_modes {
101 	ZTI_MODE_FIXED,			/* value is # of threads (min 1) */
102 	ZTI_MODE_BATCH,			/* cpu-intensive; value is ignored */
103 	ZTI_MODE_NULL,			/* don't create a taskq */
104 	ZTI_NMODES
105 } zti_modes_t;
106 
107 #define	ZTI_P(n, q)	{ ZTI_MODE_FIXED, (n), (q) }
108 #define	ZTI_BATCH	{ ZTI_MODE_BATCH, 0, 1 }
109 #define	ZTI_NULL	{ ZTI_MODE_NULL, 0, 0 }
110 
111 #define	ZTI_N(n)	ZTI_P(n, 1)
112 #define	ZTI_ONE		ZTI_N(1)
113 
114 typedef struct zio_taskq_info {
115 	zti_modes_t zti_mode;
116 	uint_t zti_value;
117 	uint_t zti_count;
118 } zio_taskq_info_t;
119 
120 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
121 	"issue", "issue_high", "intr", "intr_high"
122 };
123 
124 /*
125  * This table defines the taskq settings for each ZFS I/O type. When
126  * initializing a pool, we use this table to create an appropriately sized
127  * taskq. Some operations are low volume and therefore have a small, static
128  * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
129  * macros. Other operations process a large amount of data; the ZTI_BATCH
130  * macro causes us to create a taskq oriented for throughput. Some operations
131  * are so high frequency and short-lived that the taskq itself can become a a
132  * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
133  * additional degree of parallelism specified by the number of threads per-
134  * taskq and the number of taskqs; when dispatching an event in this case, the
135  * particular taskq is chosen at random.
136  *
137  * The different taskq priorities are to handle the different contexts (issue
138  * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
139  * need to be handled with minimum delay.
140  */
141 const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
142 	/* ISSUE	ISSUE_HIGH	INTR		INTR_HIGH */
143 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* NULL */
144 	{ ZTI_N(8),	ZTI_NULL,	ZTI_P(12, 8),	ZTI_NULL }, /* READ */
145 	{ ZTI_BATCH,	ZTI_N(5),	ZTI_N(8),	ZTI_N(5) }, /* WRITE */
146 	{ ZTI_P(12, 8),	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* FREE */
147 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* CLAIM */
148 	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* IOCTL */
149 };
150 
151 static void spa_sync_version(void *arg, dmu_tx_t *tx);
152 static void spa_sync_props(void *arg, dmu_tx_t *tx);
153 static boolean_t spa_has_active_shared_spare(spa_t *spa);
154 static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport);
155 static void spa_vdev_resilver_done(spa_t *spa);
156 
157 uint_t		zio_taskq_batch_pct = 75;	/* 1 thread per cpu in pset */
158 id_t		zio_taskq_psrset_bind = PS_NONE;
159 boolean_t	zio_taskq_sysdc = B_TRUE;	/* use SDC scheduling class */
160 uint_t		zio_taskq_basedc = 80;		/* base duty cycle */
161 
162 boolean_t	spa_create_process = B_TRUE;	/* no process ==> no sysdc */
163 extern int	zfs_sync_pass_deferred_free;
164 
165 /*
166  * Report any spa_load_verify errors found, but do not fail spa_load.
167  * This is used by zdb to analyze non-idle pools.
168  */
169 boolean_t	spa_load_verify_dryrun = B_FALSE;
170 
171 /*
172  * This (illegal) pool name is used when temporarily importing a spa_t in order
173  * to get the vdev stats associated with the imported devices.
174  */
175 #define	TRYIMPORT_NAME	"$import"
176 
177 /*
178  * For debugging purposes: print out vdev tree during pool import.
179  */
180 boolean_t	spa_load_print_vdev_tree = B_FALSE;
181 
182 /*
183  * A non-zero value for zfs_max_missing_tvds means that we allow importing
184  * pools with missing top-level vdevs. This is strictly intended for advanced
185  * pool recovery cases since missing data is almost inevitable. Pools with
186  * missing devices can only be imported read-only for safety reasons, and their
187  * fail-mode will be automatically set to "continue".
188  *
189  * With 1 missing vdev we should be able to import the pool and mount all
190  * datasets. User data that was not modified after the missing device has been
191  * added should be recoverable. This means that snapshots created prior to the
192  * addition of that device should be completely intact.
193  *
194  * With 2 missing vdevs, some datasets may fail to mount since there are
195  * dataset statistics that are stored as regular metadata. Some data might be
196  * recoverable if those vdevs were added recently.
197  *
198  * With 3 or more missing vdevs, the pool is severely damaged and MOS entries
199  * may be missing entirely. Chances of data recovery are very low. Note that
200  * there are also risks of performing an inadvertent rewind as we might be
201  * missing all the vdevs with the latest uberblocks.
202  */
203 uint64_t	zfs_max_missing_tvds = 0;
204 
205 /*
206  * The parameters below are similar to zfs_max_missing_tvds but are only
207  * intended for a preliminary open of the pool with an untrusted config which
208  * might be incomplete or out-dated.
209  *
210  * We are more tolerant for pools opened from a cachefile since we could have
211  * an out-dated cachefile where a device removal was not registered.
212  * We could have set the limit arbitrarily high but in the case where devices
213  * are really missing we would want to return the proper error codes; we chose
214  * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
215  * and we get a chance to retrieve the trusted config.
216  */
217 uint64_t	zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
218 
219 /*
220  * In the case where config was assembled by scanning device paths (/dev/dsks
221  * by default) we are less tolerant since all the existing devices should have
222  * been detected and we want spa_load to return the right error codes.
223  */
224 uint64_t	zfs_max_missing_tvds_scan = 0;
225 
226 /*
227  * Debugging aid that pauses spa_sync() towards the end.
228  */
229 boolean_t	zfs_pause_spa_sync = B_FALSE;
230 
231 /*
232  * ==========================================================================
233  * SPA properties routines
234  * ==========================================================================
235  */
236 
237 /*
238  * Add a (source=src, propname=propval) list to an nvlist.
239  */
240 static void
241 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
242     uint64_t intval, zprop_source_t src)
243 {
244 	const char *propname = zpool_prop_to_name(prop);
245 	nvlist_t *propval;
246 
247 	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
248 	VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
249 
250 	if (strval != NULL)
251 		VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
252 	else
253 		VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
254 
255 	VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
256 	nvlist_free(propval);
257 }
258 
259 /*
260  * Get property values from the spa configuration.
261  */
262 static void
263 spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
264 {
265 	vdev_t *rvd = spa->spa_root_vdev;
266 	dsl_pool_t *pool = spa->spa_dsl_pool;
267 	uint64_t size, alloc, cap, version;
268 	zprop_source_t src = ZPROP_SRC_NONE;
269 	spa_config_dirent_t *dp;
270 	metaslab_class_t *mc = spa_normal_class(spa);
271 
272 	ASSERT(MUTEX_HELD(&spa->spa_props_lock));
273 
274 	if (rvd != NULL) {
275 		alloc = metaslab_class_get_alloc(spa_normal_class(spa));
276 		size = metaslab_class_get_space(spa_normal_class(spa));
277 		spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
278 		spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
279 		spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
280 		spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
281 		    size - alloc, src);
282 		spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL,
283 		    spa->spa_checkpoint_info.sci_dspace, src);
284 
285 		spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
286 		    metaslab_class_fragmentation(mc), src);
287 		spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
288 		    metaslab_class_expandable_space(mc), src);
289 		spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
290 		    (spa_mode(spa) == FREAD), src);
291 
292 		cap = (size == 0) ? 0 : (alloc * 100 / size);
293 		spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
294 
295 		spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
296 		    ddt_get_pool_dedup_ratio(spa), src);
297 
298 		spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
299 		    rvd->vdev_state, src);
300 
301 		version = spa_version(spa);
302 		if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
303 			src = ZPROP_SRC_DEFAULT;
304 		else
305 			src = ZPROP_SRC_LOCAL;
306 		spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
307 	}
308 
309 	if (pool != NULL) {
310 		/*
311 		 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
312 		 * when opening pools before this version freedir will be NULL.
313 		 */
314 		if (pool->dp_free_dir != NULL) {
315 			spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
316 			    dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
317 			    src);
318 		} else {
319 			spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
320 			    NULL, 0, src);
321 		}
322 
323 		if (pool->dp_leak_dir != NULL) {
324 			spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
325 			    dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
326 			    src);
327 		} else {
328 			spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
329 			    NULL, 0, src);
330 		}
331 	}
332 
333 	spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
334 
335 	if (spa->spa_comment != NULL) {
336 		spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
337 		    0, ZPROP_SRC_LOCAL);
338 	}
339 
340 	if (spa->spa_root != NULL)
341 		spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
342 		    0, ZPROP_SRC_LOCAL);
343 
344 	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
345 		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
346 		    MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
347 	} else {
348 		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
349 		    SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
350 	}
351 
352 	if ((dp = list_head(&spa->spa_config_list)) != NULL) {
353 		if (dp->scd_path == NULL) {
354 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
355 			    "none", 0, ZPROP_SRC_LOCAL);
356 		} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
357 			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
358 			    dp->scd_path, 0, ZPROP_SRC_LOCAL);
359 		}
360 	}
361 }
362 
363 /*
364  * Get zpool property values.
365  */
366 int
367 spa_prop_get(spa_t *spa, nvlist_t **nvp)
368 {
369 	objset_t *mos = spa->spa_meta_objset;
370 	zap_cursor_t zc;
371 	zap_attribute_t za;
372 	int err;
373 
374 	VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
375 
376 	mutex_enter(&spa->spa_props_lock);
377 
378 	/*
379 	 * Get properties from the spa config.
380 	 */
381 	spa_prop_get_config(spa, nvp);
382 
383 	/* If no pool property object, no more prop to get. */
384 	if (mos == NULL || spa->spa_pool_props_object == 0) {
385 		mutex_exit(&spa->spa_props_lock);
386 		return (0);
387 	}
388 
389 	/*
390 	 * Get properties from the MOS pool property object.
391 	 */
392 	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
393 	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
394 	    zap_cursor_advance(&zc)) {
395 		uint64_t intval = 0;
396 		char *strval = NULL;
397 		zprop_source_t src = ZPROP_SRC_DEFAULT;
398 		zpool_prop_t prop;
399 
400 		if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL)
401 			continue;
402 
403 		switch (za.za_integer_length) {
404 		case 8:
405 			/* integer property */
406 			if (za.za_first_integer !=
407 			    zpool_prop_default_numeric(prop))
408 				src = ZPROP_SRC_LOCAL;
409 
410 			if (prop == ZPOOL_PROP_BOOTFS) {
411 				dsl_pool_t *dp;
412 				dsl_dataset_t *ds = NULL;
413 
414 				dp = spa_get_dsl(spa);
415 				dsl_pool_config_enter(dp, FTAG);
416 				if (err = dsl_dataset_hold_obj(dp,
417 				    za.za_first_integer, FTAG, &ds)) {
418 					dsl_pool_config_exit(dp, FTAG);
419 					break;
420 				}
421 
422 				strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
423 				    KM_SLEEP);
424 				dsl_dataset_name(ds, strval);
425 				dsl_dataset_rele(ds, FTAG);
426 				dsl_pool_config_exit(dp, FTAG);
427 			} else {
428 				strval = NULL;
429 				intval = za.za_first_integer;
430 			}
431 
432 			spa_prop_add_list(*nvp, prop, strval, intval, src);
433 
434 			if (strval != NULL)
435 				kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
436 
437 			break;
438 
439 		case 1:
440 			/* string property */
441 			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
442 			err = zap_lookup(mos, spa->spa_pool_props_object,
443 			    za.za_name, 1, za.za_num_integers, strval);
444 			if (err) {
445 				kmem_free(strval, za.za_num_integers);
446 				break;
447 			}
448 			spa_prop_add_list(*nvp, prop, strval, 0, src);
449 			kmem_free(strval, za.za_num_integers);
450 			break;
451 
452 		default:
453 			break;
454 		}
455 	}
456 	zap_cursor_fini(&zc);
457 	mutex_exit(&spa->spa_props_lock);
458 out:
459 	if (err && err != ENOENT) {
460 		nvlist_free(*nvp);
461 		*nvp = NULL;
462 		return (err);
463 	}
464 
465 	return (0);
466 }
467 
468 /*
469  * Validate the given pool properties nvlist and modify the list
470  * for the property values to be set.
471  */
472 static int
473 spa_prop_validate(spa_t *spa, nvlist_t *props)
474 {
475 	nvpair_t *elem;
476 	int error = 0, reset_bootfs = 0;
477 	uint64_t objnum = 0;
478 	boolean_t has_feature = B_FALSE;
479 
480 	elem = NULL;
481 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
482 		uint64_t intval;
483 		char *strval, *slash, *check, *fname;
484 		const char *propname = nvpair_name(elem);
485 		zpool_prop_t prop = zpool_name_to_prop(propname);
486 
487 		switch (prop) {
488 		case ZPOOL_PROP_INVAL:
489 			if (!zpool_prop_feature(propname)) {
490 				error = SET_ERROR(EINVAL);
491 				break;
492 			}
493 
494 			/*
495 			 * Sanitize the input.
496 			 */
497 			if (nvpair_type(elem) != DATA_TYPE_UINT64) {
498 				error = SET_ERROR(EINVAL);
499 				break;
500 			}
501 
502 			if (nvpair_value_uint64(elem, &intval) != 0) {
503 				error = SET_ERROR(EINVAL);
504 				break;
505 			}
506 
507 			if (intval != 0) {
508 				error = SET_ERROR(EINVAL);
509 				break;
510 			}
511 
512 			fname = strchr(propname, '@') + 1;
513 			if (zfeature_lookup_name(fname, NULL) != 0) {
514 				error = SET_ERROR(EINVAL);
515 				break;
516 			}
517 
518 			has_feature = B_TRUE;
519 			break;
520 
521 		case ZPOOL_PROP_VERSION:
522 			error = nvpair_value_uint64(elem, &intval);
523 			if (!error &&
524 			    (intval < spa_version(spa) ||
525 			    intval > SPA_VERSION_BEFORE_FEATURES ||
526 			    has_feature))
527 				error = SET_ERROR(EINVAL);
528 			break;
529 
530 		case ZPOOL_PROP_DELEGATION:
531 		case ZPOOL_PROP_AUTOREPLACE:
532 		case ZPOOL_PROP_LISTSNAPS:
533 		case ZPOOL_PROP_AUTOEXPAND:
534 			error = nvpair_value_uint64(elem, &intval);
535 			if (!error && intval > 1)
536 				error = SET_ERROR(EINVAL);
537 			break;
538 
539 		case ZPOOL_PROP_BOOTFS:
540 			/*
541 			 * If the pool version is less than SPA_VERSION_BOOTFS,
542 			 * or the pool is still being created (version == 0),
543 			 * the bootfs property cannot be set.
544 			 */
545 			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
546 				error = SET_ERROR(ENOTSUP);
547 				break;
548 			}
549 
550 			/*
551 			 * Make sure the vdev config is bootable
552 			 */
553 			if (!vdev_is_bootable(spa->spa_root_vdev)) {
554 				error = SET_ERROR(ENOTSUP);
555 				break;
556 			}
557 
558 			reset_bootfs = 1;
559 
560 			error = nvpair_value_string(elem, &strval);
561 
562 			if (!error) {
563 				objset_t *os;
564 				uint64_t propval;
565 
566 				if (strval == NULL || strval[0] == '\0') {
567 					objnum = zpool_prop_default_numeric(
568 					    ZPOOL_PROP_BOOTFS);
569 					break;
570 				}
571 
572 				if (error = dmu_objset_hold(strval, FTAG, &os))
573 					break;
574 
575 				/*
576 				 * Must be ZPL, and its property settings
577 				 * must be supported by GRUB (compression
578 				 * is not gzip, and large blocks are not used).
579 				 */
580 
581 				if (dmu_objset_type(os) != DMU_OST_ZFS) {
582 					error = SET_ERROR(ENOTSUP);
583 				} else if ((error =
584 				    dsl_prop_get_int_ds(dmu_objset_ds(os),
585 				    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
586 				    &propval)) == 0 &&
587 				    !BOOTFS_COMPRESS_VALID(propval)) {
588 					error = SET_ERROR(ENOTSUP);
589 				} else {
590 					objnum = dmu_objset_id(os);
591 				}
592 				dmu_objset_rele(os, FTAG);
593 			}
594 			break;
595 
596 		case ZPOOL_PROP_FAILUREMODE:
597 			error = nvpair_value_uint64(elem, &intval);
598 			if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
599 			    intval > ZIO_FAILURE_MODE_PANIC))
600 				error = SET_ERROR(EINVAL);
601 
602 			/*
603 			 * This is a special case which only occurs when
604 			 * the pool has completely failed. This allows
605 			 * the user to change the in-core failmode property
606 			 * without syncing it out to disk (I/Os might
607 			 * currently be blocked). We do this by returning
608 			 * EIO to the caller (spa_prop_set) to trick it
609 			 * into thinking we encountered a property validation
610 			 * error.
611 			 */
612 			if (!error && spa_suspended(spa)) {
613 				spa->spa_failmode = intval;
614 				error = SET_ERROR(EIO);
615 			}
616 			break;
617 
618 		case ZPOOL_PROP_CACHEFILE:
619 			if ((error = nvpair_value_string(elem, &strval)) != 0)
620 				break;
621 
622 			if (strval[0] == '\0')
623 				break;
624 
625 			if (strcmp(strval, "none") == 0)
626 				break;
627 
628 			if (strval[0] != '/') {
629 				error = SET_ERROR(EINVAL);
630 				break;
631 			}
632 
633 			slash = strrchr(strval, '/');
634 			ASSERT(slash != NULL);
635 
636 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
637 			    strcmp(slash, "/..") == 0)
638 				error = SET_ERROR(EINVAL);
639 			break;
640 
641 		case ZPOOL_PROP_COMMENT:
642 			if ((error = nvpair_value_string(elem, &strval)) != 0)
643 				break;
644 			for (check = strval; *check != '\0'; check++) {
645 				/*
646 				 * The kernel doesn't have an easy isprint()
647 				 * check.  For this kernel check, we merely
648 				 * check ASCII apart from DEL.  Fix this if
649 				 * there is an easy-to-use kernel isprint().
650 				 */
651 				if (*check >= 0x7f) {
652 					error = SET_ERROR(EINVAL);
653 					break;
654 				}
655 			}
656 			if (strlen(strval) > ZPROP_MAX_COMMENT)
657 				error = E2BIG;
658 			break;
659 
660 		case ZPOOL_PROP_DEDUPDITTO:
661 			if (spa_version(spa) < SPA_VERSION_DEDUP)
662 				error = SET_ERROR(ENOTSUP);
663 			else
664 				error = nvpair_value_uint64(elem, &intval);
665 			if (error == 0 &&
666 			    intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
667 				error = SET_ERROR(EINVAL);
668 			break;
669 		}
670 
671 		if (error)
672 			break;
673 	}
674 
675 	if (!error && reset_bootfs) {
676 		error = nvlist_remove(props,
677 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
678 
679 		if (!error) {
680 			error = nvlist_add_uint64(props,
681 			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
682 		}
683 	}
684 
685 	return (error);
686 }
687 
688 void
689 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
690 {
691 	char *cachefile;
692 	spa_config_dirent_t *dp;
693 
694 	if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
695 	    &cachefile) != 0)
696 		return;
697 
698 	dp = kmem_alloc(sizeof (spa_config_dirent_t),
699 	    KM_SLEEP);
700 
701 	if (cachefile[0] == '\0')
702 		dp->scd_path = spa_strdup(spa_config_path);
703 	else if (strcmp(cachefile, "none") == 0)
704 		dp->scd_path = NULL;
705 	else
706 		dp->scd_path = spa_strdup(cachefile);
707 
708 	list_insert_head(&spa->spa_config_list, dp);
709 	if (need_sync)
710 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
711 }
712 
713 int
714 spa_prop_set(spa_t *spa, nvlist_t *nvp)
715 {
716 	int error;
717 	nvpair_t *elem = NULL;
718 	boolean_t need_sync = B_FALSE;
719 
720 	if ((error = spa_prop_validate(spa, nvp)) != 0)
721 		return (error);
722 
723 	while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
724 		zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
725 
726 		if (prop == ZPOOL_PROP_CACHEFILE ||
727 		    prop == ZPOOL_PROP_ALTROOT ||
728 		    prop == ZPOOL_PROP_READONLY)
729 			continue;
730 
731 		if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
732 			uint64_t ver;
733 
734 			if (prop == ZPOOL_PROP_VERSION) {
735 				VERIFY(nvpair_value_uint64(elem, &ver) == 0);
736 			} else {
737 				ASSERT(zpool_prop_feature(nvpair_name(elem)));
738 				ver = SPA_VERSION_FEATURES;
739 				need_sync = B_TRUE;
740 			}
741 
742 			/* Save time if the version is already set. */
743 			if (ver == spa_version(spa))
744 				continue;
745 
746 			/*
747 			 * In addition to the pool directory object, we might
748 			 * create the pool properties object, the features for
749 			 * read object, the features for write object, or the
750 			 * feature descriptions object.
751 			 */
752 			error = dsl_sync_task(spa->spa_name, NULL,
753 			    spa_sync_version, &ver,
754 			    6, ZFS_SPACE_CHECK_RESERVED);
755 			if (error)
756 				return (error);
757 			continue;
758 		}
759 
760 		need_sync = B_TRUE;
761 		break;
762 	}
763 
764 	if (need_sync) {
765 		return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
766 		    nvp, 6, ZFS_SPACE_CHECK_RESERVED));
767 	}
768 
769 	return (0);
770 }
771 
772 /*
773  * If the bootfs property value is dsobj, clear it.
774  */
775 void
776 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
777 {
778 	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
779 		VERIFY(zap_remove(spa->spa_meta_objset,
780 		    spa->spa_pool_props_object,
781 		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
782 		spa->spa_bootfs = 0;
783 	}
784 }
785 
786 /*ARGSUSED*/
787 static int
788 spa_change_guid_check(void *arg, dmu_tx_t *tx)
789 {
790 	uint64_t *newguid = arg;
791 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
792 	vdev_t *rvd = spa->spa_root_vdev;
793 	uint64_t vdev_state;
794 
795 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
796 		int error = (spa_has_checkpoint(spa)) ?
797 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
798 		return (SET_ERROR(error));
799 	}
800 
801 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
802 	vdev_state = rvd->vdev_state;
803 	spa_config_exit(spa, SCL_STATE, FTAG);
804 
805 	if (vdev_state != VDEV_STATE_HEALTHY)
806 		return (SET_ERROR(ENXIO));
807 
808 	ASSERT3U(spa_guid(spa), !=, *newguid);
809 
810 	return (0);
811 }
812 
813 static void
814 spa_change_guid_sync(void *arg, dmu_tx_t *tx)
815 {
816 	uint64_t *newguid = arg;
817 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
818 	uint64_t oldguid;
819 	vdev_t *rvd = spa->spa_root_vdev;
820 
821 	oldguid = spa_guid(spa);
822 
823 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
824 	rvd->vdev_guid = *newguid;
825 	rvd->vdev_guid_sum += (*newguid - oldguid);
826 	vdev_config_dirty(rvd);
827 	spa_config_exit(spa, SCL_STATE, FTAG);
828 
829 	spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
830 	    oldguid, *newguid);
831 }
832 
833 /*
834  * Change the GUID for the pool.  This is done so that we can later
835  * re-import a pool built from a clone of our own vdevs.  We will modify
836  * the root vdev's guid, our own pool guid, and then mark all of our
837  * vdevs dirty.  Note that we must make sure that all our vdevs are
838  * online when we do this, or else any vdevs that weren't present
839  * would be orphaned from our pool.  We are also going to issue a
840  * sysevent to update any watchers.
841  */
842 int
843 spa_change_guid(spa_t *spa)
844 {
845 	int error;
846 	uint64_t guid;
847 
848 	mutex_enter(&spa->spa_vdev_top_lock);
849 	mutex_enter(&spa_namespace_lock);
850 	guid = spa_generate_guid(NULL);
851 
852 	error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
853 	    spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
854 
855 	if (error == 0) {
856 		spa_write_cachefile(spa, B_FALSE, B_TRUE);
857 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
858 	}
859 
860 	mutex_exit(&spa_namespace_lock);
861 	mutex_exit(&spa->spa_vdev_top_lock);
862 
863 	return (error);
864 }
865 
866 /*
867  * ==========================================================================
868  * SPA state manipulation (open/create/destroy/import/export)
869  * ==========================================================================
870  */
871 
872 static int
873 spa_error_entry_compare(const void *a, const void *b)
874 {
875 	spa_error_entry_t *sa = (spa_error_entry_t *)a;
876 	spa_error_entry_t *sb = (spa_error_entry_t *)b;
877 	int ret;
878 
879 	ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
880 	    sizeof (zbookmark_phys_t));
881 
882 	if (ret < 0)
883 		return (-1);
884 	else if (ret > 0)
885 		return (1);
886 	else
887 		return (0);
888 }
889 
890 /*
891  * Utility function which retrieves copies of the current logs and
892  * re-initializes them in the process.
893  */
894 void
895 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
896 {
897 	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
898 
899 	bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
900 	bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
901 
902 	avl_create(&spa->spa_errlist_scrub,
903 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
904 	    offsetof(spa_error_entry_t, se_avl));
905 	avl_create(&spa->spa_errlist_last,
906 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
907 	    offsetof(spa_error_entry_t, se_avl));
908 }
909 
910 static void
911 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
912 {
913 	const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
914 	enum zti_modes mode = ztip->zti_mode;
915 	uint_t value = ztip->zti_value;
916 	uint_t count = ztip->zti_count;
917 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
918 	char name[32];
919 	uint_t flags = 0;
920 	boolean_t batch = B_FALSE;
921 
922 	if (mode == ZTI_MODE_NULL) {
923 		tqs->stqs_count = 0;
924 		tqs->stqs_taskq = NULL;
925 		return;
926 	}
927 
928 	ASSERT3U(count, >, 0);
929 
930 	tqs->stqs_count = count;
931 	tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
932 
933 	switch (mode) {
934 	case ZTI_MODE_FIXED:
935 		ASSERT3U(value, >=, 1);
936 		value = MAX(value, 1);
937 		break;
938 
939 	case ZTI_MODE_BATCH:
940 		batch = B_TRUE;
941 		flags |= TASKQ_THREADS_CPU_PCT;
942 		value = zio_taskq_batch_pct;
943 		break;
944 
945 	default:
946 		panic("unrecognized mode for %s_%s taskq (%u:%u) in "
947 		    "spa_activate()",
948 		    zio_type_name[t], zio_taskq_types[q], mode, value);
949 		break;
950 	}
951 
952 	for (uint_t i = 0; i < count; i++) {
953 		taskq_t *tq;
954 
955 		if (count > 1) {
956 			(void) snprintf(name, sizeof (name), "%s_%s_%u",
957 			    zio_type_name[t], zio_taskq_types[q], i);
958 		} else {
959 			(void) snprintf(name, sizeof (name), "%s_%s",
960 			    zio_type_name[t], zio_taskq_types[q]);
961 		}
962 
963 		if (zio_taskq_sysdc && spa->spa_proc != &p0) {
964 			if (batch)
965 				flags |= TASKQ_DC_BATCH;
966 
967 			tq = taskq_create_sysdc(name, value, 50, INT_MAX,
968 			    spa->spa_proc, zio_taskq_basedc, flags);
969 		} else {
970 			pri_t pri = maxclsyspri;
971 			/*
972 			 * The write issue taskq can be extremely CPU
973 			 * intensive.  Run it at slightly lower priority
974 			 * than the other taskqs.
975 			 */
976 			if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
977 				pri--;
978 
979 			tq = taskq_create_proc(name, value, pri, 50,
980 			    INT_MAX, spa->spa_proc, flags);
981 		}
982 
983 		tqs->stqs_taskq[i] = tq;
984 	}
985 }
986 
987 static void
988 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
989 {
990 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
991 
992 	if (tqs->stqs_taskq == NULL) {
993 		ASSERT0(tqs->stqs_count);
994 		return;
995 	}
996 
997 	for (uint_t i = 0; i < tqs->stqs_count; i++) {
998 		ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
999 		taskq_destroy(tqs->stqs_taskq[i]);
1000 	}
1001 
1002 	kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
1003 	tqs->stqs_taskq = NULL;
1004 }
1005 
1006 /*
1007  * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
1008  * Note that a type may have multiple discrete taskqs to avoid lock contention
1009  * on the taskq itself. In that case we choose which taskq at random by using
1010  * the low bits of gethrtime().
1011  */
1012 void
1013 spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
1014     task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
1015 {
1016 	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1017 	taskq_t *tq;
1018 
1019 	ASSERT3P(tqs->stqs_taskq, !=, NULL);
1020 	ASSERT3U(tqs->stqs_count, !=, 0);
1021 
1022 	if (tqs->stqs_count == 1) {
1023 		tq = tqs->stqs_taskq[0];
1024 	} else {
1025 		tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count];
1026 	}
1027 
1028 	taskq_dispatch_ent(tq, func, arg, flags, ent);
1029 }
1030 
1031 static void
1032 spa_create_zio_taskqs(spa_t *spa)
1033 {
1034 	for (int t = 0; t < ZIO_TYPES; t++) {
1035 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1036 			spa_taskqs_init(spa, t, q);
1037 		}
1038 	}
1039 }
1040 
1041 #ifdef _KERNEL
1042 static void
1043 spa_thread(void *arg)
1044 {
1045 	callb_cpr_t cprinfo;
1046 
1047 	spa_t *spa = arg;
1048 	user_t *pu = PTOU(curproc);
1049 
1050 	CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
1051 	    spa->spa_name);
1052 
1053 	ASSERT(curproc != &p0);
1054 	(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
1055 	    "zpool-%s", spa->spa_name);
1056 	(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
1057 
1058 	/* bind this thread to the requested psrset */
1059 	if (zio_taskq_psrset_bind != PS_NONE) {
1060 		pool_lock();
1061 		mutex_enter(&cpu_lock);
1062 		mutex_enter(&pidlock);
1063 		mutex_enter(&curproc->p_lock);
1064 
1065 		if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1066 		    0, NULL, NULL) == 0)  {
1067 			curthread->t_bind_pset = zio_taskq_psrset_bind;
1068 		} else {
1069 			cmn_err(CE_WARN,
1070 			    "Couldn't bind process for zfs pool \"%s\" to "
1071 			    "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1072 		}
1073 
1074 		mutex_exit(&curproc->p_lock);
1075 		mutex_exit(&pidlock);
1076 		mutex_exit(&cpu_lock);
1077 		pool_unlock();
1078 	}
1079 
1080 	if (zio_taskq_sysdc) {
1081 		sysdc_thread_enter(curthread, 100, 0);
1082 	}
1083 
1084 	spa->spa_proc = curproc;
1085 	spa->spa_did = curthread->t_did;
1086 
1087 	spa_create_zio_taskqs(spa);
1088 
1089 	mutex_enter(&spa->spa_proc_lock);
1090 	ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1091 
1092 	spa->spa_proc_state = SPA_PROC_ACTIVE;
1093 	cv_broadcast(&spa->spa_proc_cv);
1094 
1095 	CALLB_CPR_SAFE_BEGIN(&cprinfo);
1096 	while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1097 		cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1098 	CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1099 
1100 	ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1101 	spa->spa_proc_state = SPA_PROC_GONE;
1102 	spa->spa_proc = &p0;
1103 	cv_broadcast(&spa->spa_proc_cv);
1104 	CALLB_CPR_EXIT(&cprinfo);	/* drops spa_proc_lock */
1105 
1106 	mutex_enter(&curproc->p_lock);
1107 	lwp_exit();
1108 }
1109 #endif
1110 
1111 /*
1112  * Activate an uninitialized pool.
1113  */
1114 static void
1115 spa_activate(spa_t *spa, int mode)
1116 {
1117 	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1118 
1119 	spa->spa_state = POOL_STATE_ACTIVE;
1120 	spa->spa_mode = mode;
1121 
1122 	spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1123 	spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1124 
1125 	/* Try to create a covering process */
1126 	mutex_enter(&spa->spa_proc_lock);
1127 	ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1128 	ASSERT(spa->spa_proc == &p0);
1129 	spa->spa_did = 0;
1130 
1131 	/* Only create a process if we're going to be around a while. */
1132 	if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1133 		if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1134 		    NULL, 0) == 0) {
1135 			spa->spa_proc_state = SPA_PROC_CREATED;
1136 			while (spa->spa_proc_state == SPA_PROC_CREATED) {
1137 				cv_wait(&spa->spa_proc_cv,
1138 				    &spa->spa_proc_lock);
1139 			}
1140 			ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1141 			ASSERT(spa->spa_proc != &p0);
1142 			ASSERT(spa->spa_did != 0);
1143 		} else {
1144 #ifdef _KERNEL
1145 			cmn_err(CE_WARN,
1146 			    "Couldn't create process for zfs pool \"%s\"\n",
1147 			    spa->spa_name);
1148 #endif
1149 		}
1150 	}
1151 	mutex_exit(&spa->spa_proc_lock);
1152 
1153 	/* If we didn't create a process, we need to create our taskqs. */
1154 	if (spa->spa_proc == &p0) {
1155 		spa_create_zio_taskqs(spa);
1156 	}
1157 
1158 	for (size_t i = 0; i < TXG_SIZE; i++)
1159 		spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL, 0);
1160 
1161 	list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1162 	    offsetof(vdev_t, vdev_config_dirty_node));
1163 	list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
1164 	    offsetof(objset_t, os_evicting_node));
1165 	list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1166 	    offsetof(vdev_t, vdev_state_dirty_node));
1167 
1168 	txg_list_create(&spa->spa_vdev_txg_list, spa,
1169 	    offsetof(struct vdev, vdev_txg_node));
1170 
1171 	avl_create(&spa->spa_errlist_scrub,
1172 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1173 	    offsetof(spa_error_entry_t, se_avl));
1174 	avl_create(&spa->spa_errlist_last,
1175 	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1176 	    offsetof(spa_error_entry_t, se_avl));
1177 }
1178 
1179 /*
1180  * Opposite of spa_activate().
1181  */
1182 static void
1183 spa_deactivate(spa_t *spa)
1184 {
1185 	ASSERT(spa->spa_sync_on == B_FALSE);
1186 	ASSERT(spa->spa_dsl_pool == NULL);
1187 	ASSERT(spa->spa_root_vdev == NULL);
1188 	ASSERT(spa->spa_async_zio_root == NULL);
1189 	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1190 
1191 	spa_evicting_os_wait(spa);
1192 
1193 	txg_list_destroy(&spa->spa_vdev_txg_list);
1194 
1195 	list_destroy(&spa->spa_config_dirty_list);
1196 	list_destroy(&spa->spa_evicting_os_list);
1197 	list_destroy(&spa->spa_state_dirty_list);
1198 
1199 	for (int t = 0; t < ZIO_TYPES; t++) {
1200 		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1201 			spa_taskqs_fini(spa, t, q);
1202 		}
1203 	}
1204 
1205 	for (size_t i = 0; i < TXG_SIZE; i++) {
1206 		ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
1207 		VERIFY0(zio_wait(spa->spa_txg_zio[i]));
1208 		spa->spa_txg_zio[i] = NULL;
1209 	}
1210 
1211 	metaslab_class_destroy(spa->spa_normal_class);
1212 	spa->spa_normal_class = NULL;
1213 
1214 	metaslab_class_destroy(spa->spa_log_class);
1215 	spa->spa_log_class = NULL;
1216 
1217 	/*
1218 	 * If this was part of an import or the open otherwise failed, we may
1219 	 * still have errors left in the queues.  Empty them just in case.
1220 	 */
1221 	spa_errlog_drain(spa);
1222 
1223 	avl_destroy(&spa->spa_errlist_scrub);
1224 	avl_destroy(&spa->spa_errlist_last);
1225 
1226 	spa->spa_state = POOL_STATE_UNINITIALIZED;
1227 
1228 	mutex_enter(&spa->spa_proc_lock);
1229 	if (spa->spa_proc_state != SPA_PROC_NONE) {
1230 		ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1231 		spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1232 		cv_broadcast(&spa->spa_proc_cv);
1233 		while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1234 			ASSERT(spa->spa_proc != &p0);
1235 			cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1236 		}
1237 		ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1238 		spa->spa_proc_state = SPA_PROC_NONE;
1239 	}
1240 	ASSERT(spa->spa_proc == &p0);
1241 	mutex_exit(&spa->spa_proc_lock);
1242 
1243 	/*
1244 	 * We want to make sure spa_thread() has actually exited the ZFS
1245 	 * module, so that the module can't be unloaded out from underneath
1246 	 * it.
1247 	 */
1248 	if (spa->spa_did != 0) {
1249 		thread_join(spa->spa_did);
1250 		spa->spa_did = 0;
1251 	}
1252 }
1253 
1254 /*
1255  * Verify a pool configuration, and construct the vdev tree appropriately.  This
1256  * will create all the necessary vdevs in the appropriate layout, with each vdev
1257  * in the CLOSED state.  This will prep the pool before open/creation/import.
1258  * All vdev validation is done by the vdev_alloc() routine.
1259  */
1260 static int
1261 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1262     uint_t id, int atype)
1263 {
1264 	nvlist_t **child;
1265 	uint_t children;
1266 	int error;
1267 
1268 	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1269 		return (error);
1270 
1271 	if ((*vdp)->vdev_ops->vdev_op_leaf)
1272 		return (0);
1273 
1274 	error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1275 	    &child, &children);
1276 
1277 	if (error == ENOENT)
1278 		return (0);
1279 
1280 	if (error) {
1281 		vdev_free(*vdp);
1282 		*vdp = NULL;
1283 		return (SET_ERROR(EINVAL));
1284 	}
1285 
1286 	for (int c = 0; c < children; c++) {
1287 		vdev_t *vd;
1288 		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1289 		    atype)) != 0) {
1290 			vdev_free(*vdp);
1291 			*vdp = NULL;
1292 			return (error);
1293 		}
1294 	}
1295 
1296 	ASSERT(*vdp != NULL);
1297 
1298 	return (0);
1299 }
1300 
1301 /*
1302  * Opposite of spa_load().
1303  */
1304 static void
1305 spa_unload(spa_t *spa)
1306 {
1307 	int i;
1308 
1309 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1310 
1311 	spa_load_note(spa, "UNLOADING");
1312 
1313 	/*
1314 	 * Stop async tasks.
1315 	 */
1316 	spa_async_suspend(spa);
1317 
1318 	/*
1319 	 * Stop syncing.
1320 	 */
1321 	if (spa->spa_sync_on) {
1322 		txg_sync_stop(spa->spa_dsl_pool);
1323 		spa->spa_sync_on = B_FALSE;
1324 	}
1325 
1326 	/*
1327 	 * Even though vdev_free() also calls vdev_metaslab_fini, we need
1328 	 * to call it earlier, before we wait for async i/o to complete.
1329 	 * This ensures that there is no async metaslab prefetching, by
1330 	 * calling taskq_wait(mg_taskq).
1331 	 */
1332 	if (spa->spa_root_vdev != NULL) {
1333 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1334 		for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++)
1335 			vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]);
1336 		spa_config_exit(spa, SCL_ALL, FTAG);
1337 	}
1338 
1339 	/*
1340 	 * Wait for any outstanding async I/O to complete.
1341 	 */
1342 	if (spa->spa_async_zio_root != NULL) {
1343 		for (int i = 0; i < max_ncpus; i++)
1344 			(void) zio_wait(spa->spa_async_zio_root[i]);
1345 		kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
1346 		spa->spa_async_zio_root = NULL;
1347 	}
1348 
1349 	if (spa->spa_vdev_removal != NULL) {
1350 		spa_vdev_removal_destroy(spa->spa_vdev_removal);
1351 		spa->spa_vdev_removal = NULL;
1352 	}
1353 
1354 	if (spa->spa_condense_zthr != NULL) {
1355 		ASSERT(!zthr_isrunning(spa->spa_condense_zthr));
1356 		zthr_destroy(spa->spa_condense_zthr);
1357 		spa->spa_condense_zthr = NULL;
1358 	}
1359 
1360 	if (spa->spa_checkpoint_discard_zthr != NULL) {
1361 		ASSERT(!zthr_isrunning(spa->spa_checkpoint_discard_zthr));
1362 		zthr_destroy(spa->spa_checkpoint_discard_zthr);
1363 		spa->spa_checkpoint_discard_zthr = NULL;
1364 	}
1365 
1366 	spa_condense_fini(spa);
1367 
1368 	bpobj_close(&spa->spa_deferred_bpobj);
1369 
1370 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1371 
1372 	/*
1373 	 * Close all vdevs.
1374 	 */
1375 	if (spa->spa_root_vdev)
1376 		vdev_free(spa->spa_root_vdev);
1377 	ASSERT(spa->spa_root_vdev == NULL);
1378 
1379 	/*
1380 	 * Close the dsl pool.
1381 	 */
1382 	if (spa->spa_dsl_pool) {
1383 		dsl_pool_close(spa->spa_dsl_pool);
1384 		spa->spa_dsl_pool = NULL;
1385 		spa->spa_meta_objset = NULL;
1386 	}
1387 
1388 	ddt_unload(spa);
1389 
1390 	/*
1391 	 * Drop and purge level 2 cache
1392 	 */
1393 	spa_l2cache_drop(spa);
1394 
1395 	for (i = 0; i < spa->spa_spares.sav_count; i++)
1396 		vdev_free(spa->spa_spares.sav_vdevs[i]);
1397 	if (spa->spa_spares.sav_vdevs) {
1398 		kmem_free(spa->spa_spares.sav_vdevs,
1399 		    spa->spa_spares.sav_count * sizeof (void *));
1400 		spa->spa_spares.sav_vdevs = NULL;
1401 	}
1402 	if (spa->spa_spares.sav_config) {
1403 		nvlist_free(spa->spa_spares.sav_config);
1404 		spa->spa_spares.sav_config = NULL;
1405 	}
1406 	spa->spa_spares.sav_count = 0;
1407 
1408 	for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1409 		vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
1410 		vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1411 	}
1412 	if (spa->spa_l2cache.sav_vdevs) {
1413 		kmem_free(spa->spa_l2cache.sav_vdevs,
1414 		    spa->spa_l2cache.sav_count * sizeof (void *));
1415 		spa->spa_l2cache.sav_vdevs = NULL;
1416 	}
1417 	if (spa->spa_l2cache.sav_config) {
1418 		nvlist_free(spa->spa_l2cache.sav_config);
1419 		spa->spa_l2cache.sav_config = NULL;
1420 	}
1421 	spa->spa_l2cache.sav_count = 0;
1422 
1423 	spa->spa_async_suspended = 0;
1424 
1425 	spa->spa_indirect_vdevs_loaded = B_FALSE;
1426 
1427 	if (spa->spa_comment != NULL) {
1428 		spa_strfree(spa->spa_comment);
1429 		spa->spa_comment = NULL;
1430 	}
1431 
1432 	spa_config_exit(spa, SCL_ALL, FTAG);
1433 }
1434 
1435 /*
1436  * Load (or re-load) the current list of vdevs describing the active spares for
1437  * this pool.  When this is called, we have some form of basic information in
1438  * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
1439  * then re-generate a more complete list including status information.
1440  */
1441 void
1442 spa_load_spares(spa_t *spa)
1443 {
1444 	nvlist_t **spares;
1445 	uint_t nspares;
1446 	int i;
1447 	vdev_t *vd, *tvd;
1448 
1449 #ifndef _KERNEL
1450 	/*
1451 	 * zdb opens both the current state of the pool and the
1452 	 * checkpointed state (if present), with a different spa_t.
1453 	 *
1454 	 * As spare vdevs are shared among open pools, we skip loading
1455 	 * them when we load the checkpointed state of the pool.
1456 	 */
1457 	if (!spa_writeable(spa))
1458 		return;
1459 #endif
1460 
1461 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1462 
1463 	/*
1464 	 * First, close and free any existing spare vdevs.
1465 	 */
1466 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
1467 		vd = spa->spa_spares.sav_vdevs[i];
1468 
1469 		/* Undo the call to spa_activate() below */
1470 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1471 		    B_FALSE)) != NULL && tvd->vdev_isspare)
1472 			spa_spare_remove(tvd);
1473 		vdev_close(vd);
1474 		vdev_free(vd);
1475 	}
1476 
1477 	if (spa->spa_spares.sav_vdevs)
1478 		kmem_free(spa->spa_spares.sav_vdevs,
1479 		    spa->spa_spares.sav_count * sizeof (void *));
1480 
1481 	if (spa->spa_spares.sav_config == NULL)
1482 		nspares = 0;
1483 	else
1484 		VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1485 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1486 
1487 	spa->spa_spares.sav_count = (int)nspares;
1488 	spa->spa_spares.sav_vdevs = NULL;
1489 
1490 	if (nspares == 0)
1491 		return;
1492 
1493 	/*
1494 	 * Construct the array of vdevs, opening them to get status in the
1495 	 * process.   For each spare, there is potentially two different vdev_t
1496 	 * structures associated with it: one in the list of spares (used only
1497 	 * for basic validation purposes) and one in the active vdev
1498 	 * configuration (if it's spared in).  During this phase we open and
1499 	 * validate each vdev on the spare list.  If the vdev also exists in the
1500 	 * active configuration, then we also mark this vdev as an active spare.
1501 	 */
1502 	spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
1503 	    KM_SLEEP);
1504 	for (i = 0; i < spa->spa_spares.sav_count; i++) {
1505 		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1506 		    VDEV_ALLOC_SPARE) == 0);
1507 		ASSERT(vd != NULL);
1508 
1509 		spa->spa_spares.sav_vdevs[i] = vd;
1510 
1511 		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1512 		    B_FALSE)) != NULL) {
1513 			if (!tvd->vdev_isspare)
1514 				spa_spare_add(tvd);
1515 
1516 			/*
1517 			 * We only mark the spare active if we were successfully
1518 			 * able to load the vdev.  Otherwise, importing a pool
1519 			 * with a bad active spare would result in strange
1520 			 * behavior, because multiple pool would think the spare
1521 			 * is actively in use.
1522 			 *
1523 			 * There is a vulnerability here to an equally bizarre
1524 			 * circumstance, where a dead active spare is later
1525 			 * brought back to life (onlined or otherwise).  Given
1526 			 * the rarity of this scenario, and the extra complexity
1527 			 * it adds, we ignore the possibility.
1528 			 */
1529 			if (!vdev_is_dead(tvd))
1530 				spa_spare_activate(tvd);
1531 		}
1532 
1533 		vd->vdev_top = vd;
1534 		vd->vdev_aux = &spa->spa_spares;
1535 
1536 		if (vdev_open(vd) != 0)
1537 			continue;
1538 
1539 		if (vdev_validate_aux(vd) == 0)
1540 			spa_spare_add(vd);
1541 	}
1542 
1543 	/*
1544 	 * Recompute the stashed list of spares, with status information
1545 	 * this time.
1546 	 */
1547 	VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1548 	    DATA_TYPE_NVLIST_ARRAY) == 0);
1549 
1550 	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
1551 	    KM_SLEEP);
1552 	for (i = 0; i < spa->spa_spares.sav_count; i++)
1553 		spares[i] = vdev_config_generate(spa,
1554 		    spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
1555 	VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1556 	    ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1557 	for (i = 0; i < spa->spa_spares.sav_count; i++)
1558 		nvlist_free(spares[i]);
1559 	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1560 }
1561 
1562 /*
1563  * Load (or re-load) the current list of vdevs describing the active l2cache for
1564  * this pool.  When this is called, we have some form of basic information in
1565  * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
1566  * then re-generate a more complete list including status information.
1567  * Devices which are already active have their details maintained, and are
1568  * not re-opened.
1569  */
1570 void
1571 spa_load_l2cache(spa_t *spa)
1572 {
1573 	nvlist_t **l2cache;
1574 	uint_t nl2cache;
1575 	int i, j, oldnvdevs;
1576 	uint64_t guid;
1577 	vdev_t *vd, **oldvdevs, **newvdevs;
1578 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
1579 
1580 #ifndef _KERNEL
1581 	/*
1582 	 * zdb opens both the current state of the pool and the
1583 	 * checkpointed state (if present), with a different spa_t.
1584 	 *
1585 	 * As L2 caches are part of the ARC which is shared among open
1586 	 * pools, we skip loading them when we load the checkpointed
1587 	 * state of the pool.
1588 	 */
1589 	if (!spa_writeable(spa))
1590 		return;
1591 #endif
1592 
1593 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1594 
1595 	if (sav->sav_config != NULL) {
1596 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1597 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1598 		newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
1599 	} else {
1600 		nl2cache = 0;
1601 		newvdevs = NULL;
1602 	}
1603 
1604 	oldvdevs = sav->sav_vdevs;
1605 	oldnvdevs = sav->sav_count;
1606 	sav->sav_vdevs = NULL;
1607 	sav->sav_count = 0;
1608 
1609 	/*
1610 	 * Process new nvlist of vdevs.
1611 	 */
1612 	for (i = 0; i < nl2cache; i++) {
1613 		VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1614 		    &guid) == 0);
1615 
1616 		newvdevs[i] = NULL;
1617 		for (j = 0; j < oldnvdevs; j++) {
1618 			vd = oldvdevs[j];
1619 			if (vd != NULL && guid == vd->vdev_guid) {
1620 				/*
1621 				 * Retain previous vdev for add/remove ops.
1622 				 */
1623 				newvdevs[i] = vd;
1624 				oldvdevs[j] = NULL;
1625 				break;
1626 			}
1627 		}
1628 
1629 		if (newvdevs[i] == NULL) {
1630 			/*
1631 			 * Create new vdev
1632 			 */
1633 			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1634 			    VDEV_ALLOC_L2CACHE) == 0);
1635 			ASSERT(vd != NULL);
1636 			newvdevs[i] = vd;
1637 
1638 			/*
1639 			 * Commit this vdev as an l2cache device,
1640 			 * even if it fails to open.
1641 			 */
1642 			spa_l2cache_add(vd);
1643 
1644 			vd->vdev_top = vd;
1645 			vd->vdev_aux = sav;
1646 
1647 			spa_l2cache_activate(vd);
1648 
1649 			if (vdev_open(vd) != 0)
1650 				continue;
1651 
1652 			(void) vdev_validate_aux(vd);
1653 
1654 			if (!vdev_is_dead(vd))
1655 				l2arc_add_vdev(spa, vd);
1656 		}
1657 	}
1658 
1659 	/*
1660 	 * Purge vdevs that were dropped
1661 	 */
1662 	for (i = 0; i < oldnvdevs; i++) {
1663 		uint64_t pool;
1664 
1665 		vd = oldvdevs[i];
1666 		if (vd != NULL) {
1667 			ASSERT(vd->vdev_isl2cache);
1668 
1669 			if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1670 			    pool != 0ULL && l2arc_vdev_present(vd))
1671 				l2arc_remove_vdev(vd);
1672 			vdev_clear_stats(vd);
1673 			vdev_free(vd);
1674 		}
1675 	}
1676 
1677 	if (oldvdevs)
1678 		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1679 
1680 	if (sav->sav_config == NULL)
1681 		goto out;
1682 
1683 	sav->sav_vdevs = newvdevs;
1684 	sav->sav_count = (int)nl2cache;
1685 
1686 	/*
1687 	 * Recompute the stashed list of l2cache devices, with status
1688 	 * information this time.
1689 	 */
1690 	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1691 	    DATA_TYPE_NVLIST_ARRAY) == 0);
1692 
1693 	l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1694 	for (i = 0; i < sav->sav_count; i++)
1695 		l2cache[i] = vdev_config_generate(spa,
1696 		    sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
1697 	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1698 	    ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1699 out:
1700 	for (i = 0; i < sav->sav_count; i++)
1701 		nvlist_free(l2cache[i]);
1702 	if (sav->sav_count)
1703 		kmem_free(l2cache, sav->sav_count * sizeof (void *));
1704 }
1705 
1706 static int
1707 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1708 {
1709 	dmu_buf_t *db;
1710 	char *packed = NULL;
1711 	size_t nvsize = 0;
1712 	int error;
1713 	*value = NULL;
1714 
1715 	error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
1716 	if (error != 0)
1717 		return (error);
1718 
1719 	nvsize = *(uint64_t *)db->db_data;
1720 	dmu_buf_rele(db, FTAG);
1721 
1722 	packed = kmem_alloc(nvsize, KM_SLEEP);
1723 	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1724 	    DMU_READ_PREFETCH);
1725 	if (error == 0)
1726 		error = nvlist_unpack(packed, nvsize, value, 0);
1727 	kmem_free(packed, nvsize);
1728 
1729 	return (error);
1730 }
1731 
1732 /*
1733  * Concrete top-level vdevs that are not missing and are not logs. At every
1734  * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
1735  */
1736 static uint64_t
1737 spa_healthy_core_tvds(spa_t *spa)
1738 {
1739 	vdev_t *rvd = spa->spa_root_vdev;
1740 	uint64_t tvds = 0;
1741 
1742 	for (uint64_t i = 0; i < rvd->vdev_children; i++) {
1743 		vdev_t *vd = rvd->vdev_child[i];
1744 		if (vd->vdev_islog)
1745 			continue;
1746 		if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
1747 			tvds++;
1748 	}
1749 
1750 	return (tvds);
1751 }
1752 
1753 /*
1754  * Checks to see if the given vdev could not be opened, in which case we post a
1755  * sysevent to notify the autoreplace code that the device has been removed.
1756  */
1757 static void
1758 spa_check_removed(vdev_t *vd)
1759 {
1760 	for (uint64_t c = 0; c < vd->vdev_children; c++)
1761 		spa_check_removed(vd->vdev_child[c]);
1762 
1763 	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
1764 	    vdev_is_concrete(vd)) {
1765 		zfs_post_autoreplace(vd->vdev_spa, vd);
1766 		spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
1767 	}
1768 }
1769 
1770 static int
1771 spa_check_for_missing_logs(spa_t *spa)
1772 {
1773 	vdev_t *rvd = spa->spa_root_vdev;
1774 
1775 	/*
1776 	 * If we're doing a normal import, then build up any additional
1777 	 * diagnostic information about missing log devices.
1778 	 * We'll pass this up to the user for further processing.
1779 	 */
1780 	if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1781 		nvlist_t **child, *nv;
1782 		uint64_t idx = 0;
1783 
1784 		child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
1785 		    KM_SLEEP);
1786 		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1787 
1788 		for (uint64_t c = 0; c < rvd->vdev_children; c++) {
1789 			vdev_t *tvd = rvd->vdev_child[c];
1790 
1791 			/*
1792 			 * We consider a device as missing only if it failed
1793 			 * to open (i.e. offline or faulted is not considered
1794 			 * as missing).
1795 			 */
1796 			if (tvd->vdev_islog &&
1797 			    tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
1798 				child[idx++] = vdev_config_generate(spa, tvd,
1799 				    B_FALSE, VDEV_CONFIG_MISSING);
1800 			}
1801 		}
1802 
1803 		if (idx > 0) {
1804 			fnvlist_add_nvlist_array(nv,
1805 			    ZPOOL_CONFIG_CHILDREN, child, idx);
1806 			fnvlist_add_nvlist(spa->spa_load_info,
1807 			    ZPOOL_CONFIG_MISSING_DEVICES, nv);
1808 
1809 			for (uint64_t i = 0; i < idx; i++)
1810 				nvlist_free(child[i]);
1811 		}
1812 		nvlist_free(nv);
1813 		kmem_free(child, rvd->vdev_children * sizeof (char **));
1814 
1815 		if (idx > 0) {
1816 			spa_load_failed(spa, "some log devices are missing");
1817 			return (SET_ERROR(ENXIO));
1818 		}
1819 	} else {
1820 		for (uint64_t c = 0; c < rvd->vdev_children; c++) {
1821 			vdev_t *tvd = rvd->vdev_child[c];
1822 
1823 			if (tvd->vdev_islog &&
1824 			    tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
1825 				spa_set_log_state(spa, SPA_LOG_CLEAR);
1826 				spa_load_note(spa, "some log devices are "
1827 				    "missing, ZIL is dropped.");
1828 				break;
1829 			}
1830 		}
1831 	}
1832 
1833 	return (0);
1834 }
1835 
1836 /*
1837  * Check for missing log devices
1838  */
1839 static boolean_t
1840 spa_check_logs(spa_t *spa)
1841 {
1842 	boolean_t rv = B_FALSE;
1843 	dsl_pool_t *dp = spa_get_dsl(spa);
1844 
1845 	switch (spa->spa_log_state) {
1846 	case SPA_LOG_MISSING:
1847 		/* need to recheck in case slog has been restored */
1848 	case SPA_LOG_UNKNOWN:
1849 		rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
1850 		    zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
1851 		if (rv)
1852 			spa_set_log_state(spa, SPA_LOG_MISSING);
1853 		break;
1854 	}
1855 	return (rv);
1856 }
1857 
1858 static boolean_t
1859 spa_passivate_log(spa_t *spa)
1860 {
1861 	vdev_t *rvd = spa->spa_root_vdev;
1862 	boolean_t slog_found = B_FALSE;
1863 
1864 	ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1865 
1866 	if (!spa_has_slogs(spa))
1867 		return (B_FALSE);
1868 
1869 	for (int c = 0; c < rvd->vdev_children; c++) {
1870 		vdev_t *tvd = rvd->vdev_child[c];
1871 		metaslab_group_t *mg = tvd->vdev_mg;
1872 
1873 		if (tvd->vdev_islog) {
1874 			metaslab_group_passivate(mg);
1875 			slog_found = B_TRUE;
1876 		}
1877 	}
1878 
1879 	return (slog_found);
1880 }
1881 
1882 static void
1883 spa_activate_log(spa_t *spa)
1884 {
1885 	vdev_t *rvd = spa->spa_root_vdev;
1886 
1887 	ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1888 
1889 	for (int c = 0; c < rvd->vdev_children; c++) {
1890 		vdev_t *tvd = rvd->vdev_child[c];
1891 		metaslab_group_t *mg = tvd->vdev_mg;
1892 
1893 		if (tvd->vdev_islog)
1894 			metaslab_group_activate(mg);
1895 	}
1896 }
1897 
1898 int
1899 spa_reset_logs(spa_t *spa)
1900 {
1901 	int error;
1902 
1903 	error = dmu_objset_find(spa_name(spa), zil_reset,
1904 	    NULL, DS_FIND_CHILDREN);
1905 	if (error == 0) {
1906 		/*
1907 		 * We successfully offlined the log device, sync out the
1908 		 * current txg so that the "stubby" block can be removed
1909 		 * by zil_sync().
1910 		 */
1911 		txg_wait_synced(spa->spa_dsl_pool, 0);
1912 	}
1913 	return (error);
1914 }
1915 
1916 static void
1917 spa_aux_check_removed(spa_aux_vdev_t *sav)
1918 {
1919 	for (int i = 0; i < sav->sav_count; i++)
1920 		spa_check_removed(sav->sav_vdevs[i]);
1921 }
1922 
1923 void
1924 spa_claim_notify(zio_t *zio)
1925 {
1926 	spa_t *spa = zio->io_spa;
1927 
1928 	if (zio->io_error)
1929 		return;
1930 
1931 	mutex_enter(&spa->spa_props_lock);	/* any mutex will do */
1932 	if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
1933 		spa->spa_claim_max_txg = zio->io_bp->blk_birth;
1934 	mutex_exit(&spa->spa_props_lock);
1935 }
1936 
1937 typedef struct spa_load_error {
1938 	uint64_t	sle_meta_count;
1939 	uint64_t	sle_data_count;
1940 } spa_load_error_t;
1941 
1942 static void
1943 spa_load_verify_done(zio_t *zio)
1944 {
1945 	blkptr_t *bp = zio->io_bp;
1946 	spa_load_error_t *sle = zio->io_private;
1947 	dmu_object_type_t type = BP_GET_TYPE(bp);
1948 	int error = zio->io_error;
1949 	spa_t *spa = zio->io_spa;
1950 
1951 	abd_free(zio->io_abd);
1952 	if (error) {
1953 		if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
1954 		    type != DMU_OT_INTENT_LOG)
1955 			atomic_inc_64(&sle->sle_meta_count);
1956 		else
1957 			atomic_inc_64(&sle->sle_data_count);
1958 	}
1959 
1960 	mutex_enter(&spa->spa_scrub_lock);
1961 	spa->spa_scrub_inflight--;
1962 	cv_broadcast(&spa->spa_scrub_io_cv);
1963 	mutex_exit(&spa->spa_scrub_lock);
1964 }
1965 
1966 /*
1967  * Maximum number of concurrent scrub i/os to create while verifying
1968  * a pool while importing it.
1969  */
1970 int spa_load_verify_maxinflight = 10000;
1971 boolean_t spa_load_verify_metadata = B_TRUE;
1972 boolean_t spa_load_verify_data = B_TRUE;
1973 
1974 /*ARGSUSED*/
1975 static int
1976 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1977     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1978 {
1979 	if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
1980 		return (0);
1981 	/*
1982 	 * Note: normally this routine will not be called if
1983 	 * spa_load_verify_metadata is not set.  However, it may be useful
1984 	 * to manually set the flag after the traversal has begun.
1985 	 */
1986 	if (!spa_load_verify_metadata)
1987 		return (0);
1988 	if (!BP_IS_METADATA(bp) && !spa_load_verify_data)
1989 		return (0);
1990 
1991 	zio_t *rio = arg;
1992 	size_t size = BP_GET_PSIZE(bp);
1993 
1994 	mutex_enter(&spa->spa_scrub_lock);
1995 	while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight)
1996 		cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
1997 	spa->spa_scrub_inflight++;
1998 	mutex_exit(&spa->spa_scrub_lock);
1999 
2000 	zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
2001 	    spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
2002 	    ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
2003 	    ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
2004 	return (0);
2005 }
2006 
2007 /* ARGSUSED */
2008 int
2009 verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
2010 {
2011 	if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
2012 		return (SET_ERROR(ENAMETOOLONG));
2013 
2014 	return (0);
2015 }
2016 
2017 static int
2018 spa_load_verify(spa_t *spa)
2019 {
2020 	zio_t *rio;
2021 	spa_load_error_t sle = { 0 };
2022 	zpool_rewind_policy_t policy;
2023 	boolean_t verify_ok = B_FALSE;
2024 	int error = 0;
2025 
2026 	zpool_get_rewind_policy(spa->spa_config, &policy);
2027 
2028 	if (policy.zrp_request & ZPOOL_NEVER_REWIND)
2029 		return (0);
2030 
2031 	dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
2032 	error = dmu_objset_find_dp(spa->spa_dsl_pool,
2033 	    spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
2034 	    DS_FIND_CHILDREN);
2035 	dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
2036 	if (error != 0)
2037 		return (error);
2038 
2039 	rio = zio_root(spa, NULL, &sle,
2040 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
2041 
2042 	if (spa_load_verify_metadata) {
2043 		if (spa->spa_extreme_rewind) {
2044 			spa_load_note(spa, "performing a complete scan of the "
2045 			    "pool since extreme rewind is on. This may take "
2046 			    "a very long time.\n  (spa_load_verify_data=%u, "
2047 			    "spa_load_verify_metadata=%u)",
2048 			    spa_load_verify_data, spa_load_verify_metadata);
2049 		}
2050 		error = traverse_pool(spa, spa->spa_verify_min_txg,
2051 		    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
2052 		    spa_load_verify_cb, rio);
2053 	}
2054 
2055 	(void) zio_wait(rio);
2056 
2057 	spa->spa_load_meta_errors = sle.sle_meta_count;
2058 	spa->spa_load_data_errors = sle.sle_data_count;
2059 
2060 	if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
2061 		spa_load_note(spa, "spa_load_verify found %llu metadata errors "
2062 		    "and %llu data errors", (u_longlong_t)sle.sle_meta_count,
2063 		    (u_longlong_t)sle.sle_data_count);
2064 	}
2065 
2066 	if (spa_load_verify_dryrun ||
2067 	    (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
2068 	    sle.sle_data_count <= policy.zrp_maxdata)) {
2069 		int64_t loss = 0;
2070 
2071 		verify_ok = B_TRUE;
2072 		spa->spa_load_txg = spa->spa_uberblock.ub_txg;
2073 		spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
2074 
2075 		loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
2076 		VERIFY(nvlist_add_uint64(spa->spa_load_info,
2077 		    ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
2078 		VERIFY(nvlist_add_int64(spa->spa_load_info,
2079 		    ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
2080 		VERIFY(nvlist_add_uint64(spa->spa_load_info,
2081 		    ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
2082 	} else {
2083 		spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
2084 	}
2085 
2086 	if (spa_load_verify_dryrun)
2087 		return (0);
2088 
2089 	if (error) {
2090 		if (error != ENXIO && error != EIO)
2091 			error = SET_ERROR(EIO);
2092 		return (error);
2093 	}
2094 
2095 	return (verify_ok ? 0 : EIO);
2096 }
2097 
2098 /*
2099  * Find a value in the pool props object.
2100  */
2101 static void
2102 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
2103 {
2104 	(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
2105 	    zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
2106 }
2107 
2108 /*
2109  * Find a value in the pool directory object.
2110  */
2111 static int
2112 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
2113 {
2114 	int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2115 	    name, sizeof (uint64_t), 1, val);
2116 
2117 	if (error != 0 && (error != ENOENT || log_enoent)) {
2118 		spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
2119 		    "[error=%d]", name, error);
2120 	}
2121 
2122 	return (error);
2123 }
2124 
2125 static int
2126 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
2127 {
2128 	vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
2129 	return (SET_ERROR(err));
2130 }
2131 
2132 static void
2133 spa_spawn_aux_threads(spa_t *spa)
2134 {
2135 	ASSERT(spa_writeable(spa));
2136 
2137 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
2138 
2139 	spa_start_indirect_condensing_thread(spa);
2140 
2141 	ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
2142 	spa->spa_checkpoint_discard_zthr =
2143 	    zthr_create(spa_checkpoint_discard_thread_check,
2144 	    spa_checkpoint_discard_thread, spa);
2145 }
2146 
2147 /*
2148  * Fix up config after a partly-completed split.  This is done with the
2149  * ZPOOL_CONFIG_SPLIT nvlist.  Both the splitting pool and the split-off
2150  * pool have that entry in their config, but only the splitting one contains
2151  * a list of all the guids of the vdevs that are being split off.
2152  *
2153  * This function determines what to do with that list: either rejoin
2154  * all the disks to the pool, or complete the splitting process.  To attempt
2155  * the rejoin, each disk that is offlined is marked online again, and
2156  * we do a reopen() call.  If the vdev label for every disk that was
2157  * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
2158  * then we call vdev_split() on each disk, and complete the split.
2159  *
2160  * Otherwise we leave the config alone, with all the vdevs in place in
2161  * the original pool.
2162  */
2163 static void
2164 spa_try_repair(spa_t *spa, nvlist_t *config)
2165 {
2166 	uint_t extracted;
2167 	uint64_t *glist;
2168 	uint_t i, gcount;
2169 	nvlist_t *nvl;
2170 	vdev_t **vd;
2171 	boolean_t attempt_reopen;
2172 
2173 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
2174 		return;
2175 
2176 	/* check that the config is complete */
2177 	if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
2178 	    &glist, &gcount) != 0)
2179 		return;
2180 
2181 	vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
2182 
2183 	/* attempt to online all the vdevs & validate */
2184 	attempt_reopen = B_TRUE;
2185 	for (i = 0; i < gcount; i++) {
2186 		if (glist[i] == 0)	/* vdev is hole */
2187 			continue;
2188 
2189 		vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2190 		if (vd[i] == NULL) {
2191 			/*
2192 			 * Don't bother attempting to reopen the disks;
2193 			 * just do the split.
2194 			 */
2195 			attempt_reopen = B_FALSE;
2196 		} else {
2197 			/* attempt to re-online it */
2198 			vd[i]->vdev_offline = B_FALSE;
2199 		}
2200 	}
2201 
2202 	if (attempt_reopen) {
2203 		vdev_reopen(spa->spa_root_vdev);
2204 
2205 		/* check each device to see what state it's in */
2206 		for (extracted = 0, i = 0; i < gcount; i++) {
2207 			if (vd[i] != NULL &&
2208 			    vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2209 				break;
2210 			++extracted;
2211 		}
2212 	}
2213 
2214 	/*
2215 	 * If every disk has been moved to the new pool, or if we never
2216 	 * even attempted to look at them, then we split them off for
2217 	 * good.
2218 	 */
2219 	if (!attempt_reopen || gcount == extracted) {
2220 		for (i = 0; i < gcount; i++)
2221 			if (vd[i] != NULL)
2222 				vdev_split(vd[i]);
2223 		vdev_reopen(spa->spa_root_vdev);
2224 	}
2225 
2226 	kmem_free(vd, gcount * sizeof (vdev_t *));
2227 }
2228 
2229 static int
2230 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
2231 {
2232 	char *ereport = FM_EREPORT_ZFS_POOL;
2233 	int error;
2234 
2235 	spa->spa_load_state = state;
2236 
2237 	gethrestime(&spa->spa_loaded_ts);
2238 	error = spa_load_impl(spa, type, &ereport);
2239 
2240 	/*
2241 	 * Don't count references from objsets that are already closed
2242 	 * and are making their way through the eviction process.
2243 	 */
2244 	spa_evicting_os_wait(spa);
2245 	spa->spa_minref = refcount_count(&spa->spa_refcount);
2246 	if (error) {
2247 		if (error != EEXIST) {
2248 			spa->spa_loaded_ts.tv_sec = 0;
2249 			spa->spa_loaded_ts.tv_nsec = 0;
2250 		}
2251 		if (error != EBADF) {
2252 			zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
2253 		}
2254 	}
2255 	spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2256 	spa->spa_ena = 0;
2257 
2258 	return (error);
2259 }
2260 
2261 /*
2262  * Count the number of per-vdev ZAPs associated with all of the vdevs in the
2263  * vdev tree rooted in the given vd, and ensure that each ZAP is present in the
2264  * spa's per-vdev ZAP list.
2265  */
2266 static uint64_t
2267 vdev_count_verify_zaps(vdev_t *vd)
2268 {
2269 	spa_t *spa = vd->vdev_spa;
2270 	uint64_t total = 0;
2271 	if (vd->vdev_top_zap != 0) {
2272 		total++;
2273 		ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2274 		    spa->spa_all_vdev_zaps, vd->vdev_top_zap));
2275 	}
2276 	if (vd->vdev_leaf_zap != 0) {
2277 		total++;
2278 		ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2279 		    spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
2280 	}
2281 
2282 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
2283 		total += vdev_count_verify_zaps(vd->vdev_child[i]);
2284 	}
2285 
2286 	return (total);
2287 }
2288 
2289 static int
2290 spa_verify_host(spa_t *spa, nvlist_t *mos_config)
2291 {
2292 	uint64_t hostid;
2293 	char *hostname;
2294 	uint64_t myhostid = 0;
2295 
2296 	if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
2297 	    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
2298 		hostname = fnvlist_lookup_string(mos_config,
2299 		    ZPOOL_CONFIG_HOSTNAME);
2300 
2301 		myhostid = zone_get_hostid(NULL);
2302 
2303 		if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
2304 			cmn_err(CE_WARN, "pool '%s' could not be "
2305 			    "loaded as it was last accessed by "
2306 			    "another system (host: %s hostid: 0x%llx). "
2307 			    "See: http://illumos.org/msg/ZFS-8000-EY",
2308 			    spa_name(spa), hostname, (u_longlong_t)hostid);
2309 			spa_load_failed(spa, "hostid verification failed: pool "
2310 			    "last accessed by host: %s (hostid: 0x%llx)",
2311 			    hostname, (u_longlong_t)hostid);
2312 			return (SET_ERROR(EBADF));
2313 		}
2314 	}
2315 
2316 	return (0);
2317 }
2318 
2319 static int
2320 spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
2321 {
2322 	int error = 0;
2323 	nvlist_t *nvtree, *nvl, *config = spa->spa_config;
2324 	int parse;
2325 	vdev_t *rvd;
2326 	uint64_t pool_guid;
2327 	char *comment;
2328 
2329 	/*
2330 	 * Versioning wasn't explicitly added to the label until later, so if
2331 	 * it's not present treat it as the initial version.
2332 	 */
2333 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
2334 	    &spa->spa_ubsync.ub_version) != 0)
2335 		spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
2336 
2337 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
2338 		spa_load_failed(spa, "invalid config provided: '%s' missing",
2339 		    ZPOOL_CONFIG_POOL_GUID);
2340 		return (SET_ERROR(EINVAL));
2341 	}
2342 
2343 	/*
2344 	 * If we are doing an import, ensure that the pool is not already
2345 	 * imported by checking if its pool guid already exists in the
2346 	 * spa namespace.
2347 	 *
2348 	 * The only case that we allow an already imported pool to be
2349 	 * imported again, is when the pool is checkpointed and we want to
2350 	 * look at its checkpointed state from userland tools like zdb.
2351 	 */
2352 #ifdef _KERNEL
2353 	if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
2354 	    spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
2355 	    spa_guid_exists(pool_guid, 0)) {
2356 #else
2357 	if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
2358 	    spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
2359 	    spa_guid_exists(pool_guid, 0) &&
2360 	    !spa_importing_readonly_checkpoint(spa)) {
2361 #endif
2362 		spa_load_failed(spa, "a pool with guid %llu is already open",
2363 		    (u_longlong_t)pool_guid);
2364 		return (SET_ERROR(EEXIST));
2365 	}
2366 
2367 	spa->spa_config_guid = pool_guid;
2368 
2369 	nvlist_free(spa->spa_load_info);
2370 	spa->spa_load_info = fnvlist_alloc();
2371 
2372 	ASSERT(spa->spa_comment == NULL);
2373 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
2374 		spa->spa_comment = spa_strdup(comment);
2375 
2376 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
2377 	    &spa->spa_config_txg);
2378 
2379 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
2380 		spa->spa_config_splitting = fnvlist_dup(nvl);
2381 
2382 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
2383 		spa_load_failed(spa, "invalid config provided: '%s' missing",
2384 		    ZPOOL_CONFIG_VDEV_TREE);
2385 		return (SET_ERROR(EINVAL));
2386 	}
2387 
2388 	/*
2389 	 * Create "The Godfather" zio to hold all async IOs
2390 	 */
2391 	spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
2392 	    KM_SLEEP);
2393 	for (int i = 0; i < max_ncpus; i++) {
2394 		spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
2395 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2396 		    ZIO_FLAG_GODFATHER);
2397 	}
2398 
2399 	/*
2400 	 * Parse the configuration into a vdev tree.  We explicitly set the
2401 	 * value that will be returned by spa_version() since parsing the
2402 	 * configuration requires knowing the version number.
2403 	 */
2404 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2405 	parse = (type == SPA_IMPORT_EXISTING ?
2406 	    VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
2407 	error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
2408 	spa_config_exit(spa, SCL_ALL, FTAG);
2409 
2410 	if (error != 0) {
2411 		spa_load_failed(spa, "unable to parse config [error=%d]",
2412 		    error);
2413 		return (error);
2414 	}
2415 
2416 	ASSERT(spa->spa_root_vdev == rvd);
2417 	ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
2418 	ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
2419 
2420 	if (type != SPA_IMPORT_ASSEMBLE) {
2421 		ASSERT(spa_guid(spa) == pool_guid);
2422 	}
2423 
2424 	return (0);
2425 }
2426 
2427 /*
2428  * Recursively open all vdevs in the vdev tree. This function is called twice:
2429  * first with the untrusted config, then with the trusted config.
2430  */
2431 static int
2432 spa_ld_open_vdevs(spa_t *spa)
2433 {
2434 	int error = 0;
2435 
2436 	/*
2437 	 * spa_missing_tvds_allowed defines how many top-level vdevs can be
2438 	 * missing/unopenable for the root vdev to be still considered openable.
2439 	 */
2440 	if (spa->spa_trust_config) {
2441 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
2442 	} else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
2443 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
2444 	} else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
2445 		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
2446 	} else {
2447 		spa->spa_missing_tvds_allowed = 0;
2448 	}
2449 
2450 	spa->spa_missing_tvds_allowed =
2451 	    MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
2452 
2453 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2454 	error = vdev_open(spa->spa_root_vdev);
2455 	spa_config_exit(spa, SCL_ALL, FTAG);
2456 
2457 	if (spa->spa_missing_tvds != 0) {
2458 		spa_load_note(spa, "vdev tree has %lld missing top-level "
2459 		    "vdevs.", (u_longlong_t)spa->spa_missing_tvds);
2460 		if (spa->spa_trust_config && (spa->spa_mode & FWRITE)) {
2461 			/*
2462 			 * Although theoretically we could allow users to open
2463 			 * incomplete pools in RW mode, we'd need to add a lot
2464 			 * of extra logic (e.g. adjust pool space to account
2465 			 * for missing vdevs).
2466 			 * This limitation also prevents users from accidentally
2467 			 * opening the pool in RW mode during data recovery and
2468 			 * damaging it further.
2469 			 */
2470 			spa_load_note(spa, "pools with missing top-level "
2471 			    "vdevs can only be opened in read-only mode.");
2472 			error = SET_ERROR(ENXIO);
2473 		} else {
2474 			spa_load_note(spa, "current settings allow for maximum "
2475 			    "%lld missing top-level vdevs at this stage.",
2476 			    (u_longlong_t)spa->spa_missing_tvds_allowed);
2477 		}
2478 	}
2479 	if (error != 0) {
2480 		spa_load_failed(spa, "unable to open vdev tree [error=%d]",
2481 		    error);
2482 	}
2483 	if (spa->spa_missing_tvds != 0 || error != 0)
2484 		vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
2485 
2486 	return (error);
2487 }
2488 
2489 /*
2490  * We need to validate the vdev labels against the configuration that
2491  * we have in hand. This function is called twice: first with an untrusted
2492  * config, then with a trusted config. The validation is more strict when the
2493  * config is trusted.
2494  */
2495 static int
2496 spa_ld_validate_vdevs(spa_t *spa)
2497 {
2498 	int error = 0;
2499 	vdev_t *rvd = spa->spa_root_vdev;
2500 
2501 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2502 	error = vdev_validate(rvd);
2503 	spa_config_exit(spa, SCL_ALL, FTAG);
2504 
2505 	if (error != 0) {
2506 		spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
2507 		return (error);
2508 	}
2509 
2510 	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
2511 		spa_load_failed(spa, "cannot open vdev tree after invalidating "
2512 		    "some vdevs");
2513 		vdev_dbgmsg_print_tree(rvd, 2);
2514 		return (SET_ERROR(ENXIO));
2515 	}
2516 
2517 	return (0);
2518 }
2519 
2520 static void
2521 spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
2522 {
2523 	spa->spa_state = POOL_STATE_ACTIVE;
2524 	spa->spa_ubsync = spa->spa_uberblock;
2525 	spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2526 	    TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2527 	spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2528 	    spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2529 	spa->spa_claim_max_txg = spa->spa_first_txg;
2530 	spa->spa_prev_software_version = ub->ub_software_version;
2531 }
2532 
2533 static int
2534 spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
2535 {
2536 	vdev_t *rvd = spa->spa_root_vdev;
2537 	nvlist_t *label;
2538 	uberblock_t *ub = &spa->spa_uberblock;
2539 
2540 	/*
2541 	 * If we are opening the checkpointed state of the pool by
2542 	 * rewinding to it, at this point we will have written the
2543 	 * checkpointed uberblock to the vdev labels, so searching
2544 	 * the labels will find the right uberblock.  However, if
2545 	 * we are opening the checkpointed state read-only, we have
2546 	 * not modified the labels. Therefore, we must ignore the
2547 	 * labels and continue using the spa_uberblock that was set
2548 	 * by spa_ld_checkpoint_rewind.
2549 	 *
2550 	 * Note that it would be fine to ignore the labels when
2551 	 * rewinding (opening writeable) as well. However, if we
2552 	 * crash just after writing the labels, we will end up
2553 	 * searching the labels. Doing so in the common case means
2554 	 * that this code path gets exercised normally, rather than
2555 	 * just in the edge case.
2556 	 */
2557 	if (ub->ub_checkpoint_txg != 0 &&
2558 	    spa_importing_readonly_checkpoint(spa)) {
2559 		spa_ld_select_uberblock_done(spa, ub);
2560 		return (0);
2561 	}
2562 
2563 	/*
2564 	 * Find the best uberblock.
2565 	 */
2566 	vdev_uberblock_load(rvd, ub, &label);
2567 
2568 	/*
2569 	 * If we weren't able to find a single valid uberblock, return failure.
2570 	 */
2571 	if (ub->ub_txg == 0) {
2572 		nvlist_free(label);
2573 		spa_load_failed(spa, "no valid uberblock found");
2574 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
2575 	}
2576 
2577 	spa_load_note(spa, "using uberblock with txg=%llu",
2578 	    (u_longlong_t)ub->ub_txg);
2579 
2580 	/*
2581 	 * If the pool has an unsupported version we can't open it.
2582 	 */
2583 	if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
2584 		nvlist_free(label);
2585 		spa_load_failed(spa, "version %llu is not supported",
2586 		    (u_longlong_t)ub->ub_version);
2587 		return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
2588 	}
2589 
2590 	if (ub->ub_version >= SPA_VERSION_FEATURES) {
2591 		nvlist_t *features;
2592 
2593 		/*
2594 		 * If we weren't able to find what's necessary for reading the
2595 		 * MOS in the label, return failure.
2596 		 */
2597 		if (label == NULL) {
2598 			spa_load_failed(spa, "label config unavailable");
2599 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2600 			    ENXIO));
2601 		}
2602 
2603 		if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
2604 		    &features) != 0) {
2605 			nvlist_free(label);
2606 			spa_load_failed(spa, "invalid label: '%s' missing",
2607 			    ZPOOL_CONFIG_FEATURES_FOR_READ);
2608 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2609 			    ENXIO));
2610 		}
2611 
2612 		/*
2613 		 * Update our in-core representation with the definitive values
2614 		 * from the label.
2615 		 */
2616 		nvlist_free(spa->spa_label_features);
2617 		VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
2618 	}
2619 
2620 	nvlist_free(label);
2621 
2622 	/*
2623 	 * Look through entries in the label nvlist's features_for_read. If
2624 	 * there is a feature listed there which we don't understand then we
2625 	 * cannot open a pool.
2626 	 */
2627 	if (ub->ub_version >= SPA_VERSION_FEATURES) {
2628 		nvlist_t *unsup_feat;
2629 
2630 		VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
2631 		    0);
2632 
2633 		for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
2634 		    NULL); nvp != NULL;
2635 		    nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
2636 			if (!zfeature_is_supported(nvpair_name(nvp))) {
2637 				VERIFY(nvlist_add_string(unsup_feat,
2638 				    nvpair_name(nvp), "") == 0);
2639 			}
2640 		}
2641 
2642 		if (!nvlist_empty(unsup_feat)) {
2643 			VERIFY(nvlist_add_nvlist(spa->spa_load_info,
2644 			    ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
2645 			nvlist_free(unsup_feat);
2646 			spa_load_failed(spa, "some features are unsupported");
2647 			return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2648 			    ENOTSUP));
2649 		}
2650 
2651 		nvlist_free(unsup_feat);
2652 	}
2653 
2654 	if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
2655 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2656 		spa_try_repair(spa, spa->spa_config);
2657 		spa_config_exit(spa, SCL_ALL, FTAG);
2658 		nvlist_free(spa->spa_config_splitting);
2659 		spa->spa_config_splitting = NULL;
2660 	}
2661 
2662 	/*
2663 	 * Initialize internal SPA structures.
2664 	 */
2665 	spa_ld_select_uberblock_done(spa, ub);
2666 
2667 	return (0);
2668 }
2669 
2670 static int
2671 spa_ld_open_rootbp(spa_t *spa)
2672 {
2673 	int error = 0;
2674 	vdev_t *rvd = spa->spa_root_vdev;
2675 
2676 	error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
2677 	if (error != 0) {
2678 		spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
2679 		    "[error=%d]", error);
2680 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2681 	}
2682 	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
2683 
2684 	return (0);
2685 }
2686 
2687 static int
2688 spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
2689     boolean_t reloading)
2690 {
2691 	vdev_t *mrvd, *rvd = spa->spa_root_vdev;
2692 	nvlist_t *nv, *mos_config, *policy;
2693 	int error = 0, copy_error;
2694 	uint64_t healthy_tvds, healthy_tvds_mos;
2695 	uint64_t mos_config_txg;
2696 
2697 	if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
2698 	    != 0)
2699 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2700 
2701 	/*
2702 	 * If we're assembling a pool from a split, the config provided is
2703 	 * already trusted so there is nothing to do.
2704 	 */
2705 	if (type == SPA_IMPORT_ASSEMBLE)
2706 		return (0);
2707 
2708 	healthy_tvds = spa_healthy_core_tvds(spa);
2709 
2710 	if (load_nvlist(spa, spa->spa_config_object, &mos_config)
2711 	    != 0) {
2712 		spa_load_failed(spa, "unable to retrieve MOS config");
2713 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2714 	}
2715 
2716 	/*
2717 	 * If we are doing an open, pool owner wasn't verified yet, thus do
2718 	 * the verification here.
2719 	 */
2720 	if (spa->spa_load_state == SPA_LOAD_OPEN) {
2721 		error = spa_verify_host(spa, mos_config);
2722 		if (error != 0) {
2723 			nvlist_free(mos_config);
2724 			return (error);
2725 		}
2726 	}
2727 
2728 	nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
2729 
2730 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2731 
2732 	/*
2733 	 * Build a new vdev tree from the trusted config
2734 	 */
2735 	VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
2736 
2737 	/*
2738 	 * Vdev paths in the MOS may be obsolete. If the untrusted config was
2739 	 * obtained by scanning /dev/dsk, then it will have the right vdev
2740 	 * paths. We update the trusted MOS config with this information.
2741 	 * We first try to copy the paths with vdev_copy_path_strict, which
2742 	 * succeeds only when both configs have exactly the same vdev tree.
2743 	 * If that fails, we fall back to a more flexible method that has a
2744 	 * best effort policy.
2745 	 */
2746 	copy_error = vdev_copy_path_strict(rvd, mrvd);
2747 	if (copy_error != 0 || spa_load_print_vdev_tree) {
2748 		spa_load_note(spa, "provided vdev tree:");
2749 		vdev_dbgmsg_print_tree(rvd, 2);
2750 		spa_load_note(spa, "MOS vdev tree:");
2751 		vdev_dbgmsg_print_tree(mrvd, 2);
2752 	}
2753 	if (copy_error != 0) {
2754 		spa_load_note(spa, "vdev_copy_path_strict failed, falling "
2755 		    "back to vdev_copy_path_relaxed");
2756 		vdev_copy_path_relaxed(rvd, mrvd);
2757 	}
2758 
2759 	vdev_close(rvd);
2760 	vdev_free(rvd);
2761 	spa->spa_root_vdev = mrvd;
2762 	rvd = mrvd;
2763 	spa_config_exit(spa, SCL_ALL, FTAG);
2764 
2765 	/*
2766 	 * We will use spa_config if we decide to reload the spa or if spa_load
2767 	 * fails and we rewind. We must thus regenerate the config using the
2768 	 * MOS information with the updated paths. Rewind policy is an import
2769 	 * setting and is not in the MOS. We copy it over to our new, trusted
2770 	 * config.
2771 	 */
2772 	mos_config_txg = fnvlist_lookup_uint64(mos_config,
2773 	    ZPOOL_CONFIG_POOL_TXG);
2774 	nvlist_free(mos_config);
2775 	mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
2776 	if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_REWIND_POLICY,
2777 	    &policy) == 0)
2778 		fnvlist_add_nvlist(mos_config, ZPOOL_REWIND_POLICY, policy);
2779 	spa_config_set(spa, mos_config);
2780 	spa->spa_config_source = SPA_CONFIG_SRC_MOS;
2781 
2782 	/*
2783 	 * Now that we got the config from the MOS, we should be more strict
2784 	 * in checking blkptrs and can make assumptions about the consistency
2785 	 * of the vdev tree. spa_trust_config must be set to true before opening
2786 	 * vdevs in order for them to be writeable.
2787 	 */
2788 	spa->spa_trust_config = B_TRUE;
2789 
2790 	/*
2791 	 * Open and validate the new vdev tree
2792 	 */
2793 	error = spa_ld_open_vdevs(spa);
2794 	if (error != 0)
2795 		return (error);
2796 
2797 	error = spa_ld_validate_vdevs(spa);
2798 	if (error != 0)
2799 		return (error);
2800 
2801 	if (copy_error != 0 || spa_load_print_vdev_tree) {
2802 		spa_load_note(spa, "final vdev tree:");
2803 		vdev_dbgmsg_print_tree(rvd, 2);
2804 	}
2805 
2806 	if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
2807 	    !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
2808 		/*
2809 		 * Sanity check to make sure that we are indeed loading the
2810 		 * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
2811 		 * in the config provided and they happened to be the only ones
2812 		 * to have the latest uberblock, we could involuntarily perform
2813 		 * an extreme rewind.
2814 		 */
2815 		healthy_tvds_mos = spa_healthy_core_tvds(spa);
2816 		if (healthy_tvds_mos - healthy_tvds >=
2817 		    SPA_SYNC_MIN_VDEVS) {
2818 			spa_load_note(spa, "config provided misses too many "
2819 			    "top-level vdevs compared to MOS (%lld vs %lld). ",
2820 			    (u_longlong_t)healthy_tvds,
2821 			    (u_longlong_t)healthy_tvds_mos);
2822 			spa_load_note(spa, "vdev tree:");
2823 			vdev_dbgmsg_print_tree(rvd, 2);
2824 			if (reloading) {
2825 				spa_load_failed(spa, "config was already "
2826 				    "provided from MOS. Aborting.");
2827 				return (spa_vdev_err(rvd,
2828 				    VDEV_AUX_CORRUPT_DATA, EIO));
2829 			}
2830 			spa_load_note(spa, "spa must be reloaded using MOS "
2831 			    "config");
2832 			return (SET_ERROR(EAGAIN));
2833 		}
2834 	}
2835 
2836 	error = spa_check_for_missing_logs(spa);
2837 	if (error != 0)
2838 		return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
2839 
2840 	if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
2841 		spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
2842 		    "guid sum (%llu != %llu)",
2843 		    (u_longlong_t)spa->spa_uberblock.ub_guid_sum,
2844 		    (u_longlong_t)rvd->vdev_guid_sum);
2845 		return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
2846 		    ENXIO));
2847 	}
2848 
2849 	return (0);
2850 }
2851 
2852 static int
2853 spa_ld_open_indirect_vdev_metadata(spa_t *spa)
2854 {
2855 	int error = 0;
2856 	vdev_t *rvd = spa->spa_root_vdev;
2857 
2858 	/*
2859 	 * Everything that we read before spa_remove_init() must be stored
2860 	 * on concreted vdevs.  Therefore we do this as early as possible.
2861 	 */
2862 	error = spa_remove_init(spa);
2863 	if (error != 0) {
2864 		spa_load_failed(spa, "spa_remove_init failed [error=%d]",
2865 		    error);
2866 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2867 	}
2868 
2869 	/*
2870 	 * Retrieve information needed to condense indirect vdev mappings.
2871 	 */
2872 	error = spa_condense_init(spa);
2873 	if (error != 0) {
2874 		spa_load_failed(spa, "spa_condense_init failed [error=%d]",
2875 		    error);
2876 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
2877 	}
2878 
2879 	return (0);
2880 }
2881 
2882 static int
2883 spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
2884 {
2885 	int error = 0;
2886 	vdev_t *rvd = spa->spa_root_vdev;
2887 
2888 	if (spa_version(spa) >= SPA_VERSION_FEATURES) {
2889 		boolean_t missing_feat_read = B_FALSE;
2890 		nvlist_t *unsup_feat, *enabled_feat;
2891 
2892 		if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
2893 		    &spa->spa_feat_for_read_obj, B_TRUE) != 0) {
2894 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2895 		}
2896 
2897 		if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
2898 		    &spa->spa_feat_for_write_obj, B_TRUE) != 0) {
2899 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2900 		}
2901 
2902 		if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
2903 		    &spa->spa_feat_desc_obj, B_TRUE) != 0) {
2904 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2905 		}
2906 
2907 		enabled_feat = fnvlist_alloc();
2908 		unsup_feat = fnvlist_alloc();
2909 
2910 		if (!spa_features_check(spa, B_FALSE,
2911 		    unsup_feat, enabled_feat))
2912 			missing_feat_read = B_TRUE;
2913 
2914 		if (spa_writeable(spa) ||
2915 		    spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
2916 			if (!spa_features_check(spa, B_TRUE,
2917 			    unsup_feat, enabled_feat)) {
2918 				*missing_feat_writep = B_TRUE;
2919 			}
2920 		}
2921 
2922 		fnvlist_add_nvlist(spa->spa_load_info,
2923 		    ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
2924 
2925 		if (!nvlist_empty(unsup_feat)) {
2926 			fnvlist_add_nvlist(spa->spa_load_info,
2927 			    ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
2928 		}
2929 
2930 		fnvlist_free(enabled_feat);
2931 		fnvlist_free(unsup_feat);
2932 
2933 		if (!missing_feat_read) {
2934 			fnvlist_add_boolean(spa->spa_load_info,
2935 			    ZPOOL_CONFIG_CAN_RDONLY);
2936 		}
2937 
2938 		/*
2939 		 * If the state is SPA_LOAD_TRYIMPORT, our objective is
2940 		 * twofold: to determine whether the pool is available for
2941 		 * import in read-write mode and (if it is not) whether the
2942 		 * pool is available for import in read-only mode. If the pool
2943 		 * is available for import in read-write mode, it is displayed
2944 		 * as available in userland; if it is not available for import
2945 		 * in read-only mode, it is displayed as unavailable in
2946 		 * userland. If the pool is available for import in read-only
2947 		 * mode but not read-write mode, it is displayed as unavailable
2948 		 * in userland with a special note that the pool is actually
2949 		 * available for open in read-only mode.
2950 		 *
2951 		 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
2952 		 * missing a feature for write, we must first determine whether
2953 		 * the pool can be opened read-only before returning to
2954 		 * userland in order to know whether to display the
2955 		 * abovementioned note.
2956 		 */
2957 		if (missing_feat_read || (*missing_feat_writep &&
2958 		    spa_writeable(spa))) {
2959 			spa_load_failed(spa, "pool uses unsupported features");
2960 			return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
2961 			    ENOTSUP));
2962 		}
2963 
2964 		/*
2965 		 * Load refcounts for ZFS features from disk into an in-memory
2966 		 * cache during SPA initialization.
2967 		 */
2968 		for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
2969 			uint64_t refcount;
2970 
2971 			error = feature_get_refcount_from_disk(spa,
2972 			    &spa_feature_table[i], &refcount);
2973 			if (error == 0) {
2974 				spa->spa_feat_refcount_cache[i] = refcount;
2975 			} else if (error == ENOTSUP) {
2976 				spa->spa_feat_refcount_cache[i] =
2977 				    SPA_FEATURE_DISABLED;
2978 			} else {
2979 				spa_load_failed(spa, "error getting refcount "
2980 				    "for feature %s [error=%d]",
2981 				    spa_feature_table[i].fi_guid, error);
2982 				return (spa_vdev_err(rvd,
2983 				    VDEV_AUX_CORRUPT_DATA, EIO));
2984 			}
2985 		}
2986 	}
2987 
2988 	if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
2989 		if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
2990 		    &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
2991 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
2992 	}
2993 
2994 	return (0);
2995 }
2996 
2997 static int
2998 spa_ld_load_special_directories(spa_t *spa)
2999 {
3000 	int error = 0;
3001 	vdev_t *rvd = spa->spa_root_vdev;
3002 
3003 	spa->spa_is_initializing = B_TRUE;
3004 	error = dsl_pool_open(spa->spa_dsl_pool);
3005 	spa->spa_is_initializing = B_FALSE;
3006 	if (error != 0) {
3007 		spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
3008 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3009 	}
3010 
3011 	return (0);
3012 }
3013 
3014 static int
3015 spa_ld_get_props(spa_t *spa)
3016 {
3017 	int error = 0;
3018 	uint64_t obj;
3019 	vdev_t *rvd = spa->spa_root_vdev;
3020 
3021 	/* Grab the secret checksum salt from the MOS. */
3022 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3023 	    DMU_POOL_CHECKSUM_SALT, 1,
3024 	    sizeof (spa->spa_cksum_salt.zcs_bytes),
3025 	    spa->spa_cksum_salt.zcs_bytes);
3026 	if (error == ENOENT) {
3027 		/* Generate a new salt for subsequent use */
3028 		(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
3029 		    sizeof (spa->spa_cksum_salt.zcs_bytes));
3030 	} else if (error != 0) {
3031 		spa_load_failed(spa, "unable to retrieve checksum salt from "
3032 		    "MOS [error=%d]", error);
3033 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3034 	}
3035 
3036 	if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
3037 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3038 	error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
3039 	if (error != 0) {
3040 		spa_load_failed(spa, "error opening deferred-frees bpobj "
3041 		    "[error=%d]", error);
3042 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3043 	}
3044 
3045 	/*
3046 	 * Load the bit that tells us to use the new accounting function
3047 	 * (raid-z deflation).  If we have an older pool, this will not
3048 	 * be present.
3049 	 */
3050 	error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
3051 	if (error != 0 && error != ENOENT)
3052 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3053 
3054 	error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
3055 	    &spa->spa_creation_version, B_FALSE);
3056 	if (error != 0 && error != ENOENT)
3057 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3058 
3059 	/*
3060 	 * Load the persistent error log.  If we have an older pool, this will
3061 	 * not be present.
3062 	 */
3063 	error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
3064 	    B_FALSE);
3065 	if (error != 0 && error != ENOENT)
3066 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3067 
3068 	error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
3069 	    &spa->spa_errlog_scrub, B_FALSE);
3070 	if (error != 0 && error != ENOENT)
3071 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3072 
3073 	/*
3074 	 * Load the history object.  If we have an older pool, this
3075 	 * will not be present.
3076 	 */
3077 	error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
3078 	if (error != 0 && error != ENOENT)
3079 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3080 
3081 	/*
3082 	 * Load the per-vdev ZAP map. If we have an older pool, this will not
3083 	 * be present; in this case, defer its creation to a later time to
3084 	 * avoid dirtying the MOS this early / out of sync context. See
3085 	 * spa_sync_config_object.
3086 	 */
3087 
3088 	/* The sentinel is only available in the MOS config. */
3089 	nvlist_t *mos_config;
3090 	if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
3091 		spa_load_failed(spa, "unable to retrieve MOS config");
3092 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3093 	}
3094 
3095 	error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
3096 	    &spa->spa_all_vdev_zaps, B_FALSE);
3097 
3098 	if (error == ENOENT) {
3099 		VERIFY(!nvlist_exists(mos_config,
3100 		    ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
3101 		spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
3102 		ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
3103 	} else if (error != 0) {
3104 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3105 	} else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
3106 		/*
3107 		 * An older version of ZFS overwrote the sentinel value, so
3108 		 * we have orphaned per-vdev ZAPs in the MOS. Defer their
3109 		 * destruction to later; see spa_sync_config_object.
3110 		 */
3111 		spa->spa_avz_action = AVZ_ACTION_DESTROY;
3112 		/*
3113 		 * We're assuming that no vdevs have had their ZAPs created
3114 		 * before this. Better be sure of it.
3115 		 */
3116 		ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
3117 	}
3118 	nvlist_free(mos_config);
3119 
3120 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3121 
3122 	error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
3123 	    B_FALSE);
3124 	if (error && error != ENOENT)
3125 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3126 
3127 	if (error == 0) {
3128 		uint64_t autoreplace;
3129 
3130 		spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
3131 		spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
3132 		spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
3133 		spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
3134 		spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
3135 		spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
3136 		    &spa->spa_dedup_ditto);
3137 
3138 		spa->spa_autoreplace = (autoreplace != 0);
3139 	}
3140 
3141 	/*
3142 	 * If we are importing a pool with missing top-level vdevs,
3143 	 * we enforce that the pool doesn't panic or get suspended on
3144 	 * error since the likelihood of missing data is extremely high.
3145 	 */
3146 	if (spa->spa_missing_tvds > 0 &&
3147 	    spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
3148 	    spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
3149 		spa_load_note(spa, "forcing failmode to 'continue' "
3150 		    "as some top level vdevs are missing");
3151 		spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
3152 	}
3153 
3154 	return (0);
3155 }
3156 
3157 static int
3158 spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
3159 {
3160 	int error = 0;
3161 	vdev_t *rvd = spa->spa_root_vdev;
3162 
3163 	/*
3164 	 * If we're assembling the pool from the split-off vdevs of
3165 	 * an existing pool, we don't want to attach the spares & cache
3166 	 * devices.
3167 	 */
3168 
3169 	/*
3170 	 * Load any hot spares for this pool.
3171 	 */
3172 	error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
3173 	    B_FALSE);
3174 	if (error != 0 && error != ENOENT)
3175 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3176 	if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
3177 		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
3178 		if (load_nvlist(spa, spa->spa_spares.sav_object,
3179 		    &spa->spa_spares.sav_config) != 0) {
3180 			spa_load_failed(spa, "error loading spares nvlist");
3181 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3182 		}
3183 
3184 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3185 		spa_load_spares(spa);
3186 		spa_config_exit(spa, SCL_ALL, FTAG);
3187 	} else if (error == 0) {
3188 		spa->spa_spares.sav_sync = B_TRUE;
3189 	}
3190 
3191 	/*
3192 	 * Load any level 2 ARC devices for this pool.
3193 	 */
3194 	error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
3195 	    &spa->spa_l2cache.sav_object, B_FALSE);
3196 	if (error != 0 && error != ENOENT)
3197 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3198 	if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
3199 		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
3200 		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
3201 		    &spa->spa_l2cache.sav_config) != 0) {
3202 			spa_load_failed(spa, "error loading l2cache nvlist");
3203 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3204 		}
3205 
3206 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3207 		spa_load_l2cache(spa);
3208 		spa_config_exit(spa, SCL_ALL, FTAG);
3209 	} else if (error == 0) {
3210 		spa->spa_l2cache.sav_sync = B_TRUE;
3211 	}
3212 
3213 	return (0);
3214 }
3215 
3216 static int
3217 spa_ld_load_vdev_metadata(spa_t *spa)
3218 {
3219 	int error = 0;
3220 	vdev_t *rvd = spa->spa_root_vdev;
3221 
3222 	/*
3223 	 * If the 'autoreplace' property is set, then post a resource notifying
3224 	 * the ZFS DE that it should not issue any faults for unopenable
3225 	 * devices.  We also iterate over the vdevs, and post a sysevent for any
3226 	 * unopenable vdevs so that the normal autoreplace handler can take
3227 	 * over.
3228 	 */
3229 	if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
3230 		spa_check_removed(spa->spa_root_vdev);
3231 		/*
3232 		 * For the import case, this is done in spa_import(), because
3233 		 * at this point we're using the spare definitions from
3234 		 * the MOS config, not necessarily from the userland config.
3235 		 */
3236 		if (spa->spa_load_state != SPA_LOAD_IMPORT) {
3237 			spa_aux_check_removed(&spa->spa_spares);
3238 			spa_aux_check_removed(&spa->spa_l2cache);
3239 		}
3240 	}
3241 
3242 	/*
3243 	 * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
3244 	 */
3245 	error = vdev_load(rvd);
3246 	if (error != 0) {
3247 		spa_load_failed(spa, "vdev_load failed [error=%d]", error);
3248 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
3249 	}
3250 
3251 	/*
3252 	 * Propagate the leaf DTLs we just loaded all the way up the vdev tree.
3253 	 */
3254 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3255 	vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
3256 	spa_config_exit(spa, SCL_ALL, FTAG);
3257 
3258 	return (0);
3259 }
3260 
3261 static int
3262 spa_ld_load_dedup_tables(spa_t *spa)
3263 {
3264 	int error = 0;
3265 	vdev_t *rvd = spa->spa_root_vdev;
3266 
3267 	error = ddt_load(spa);
3268 	if (error != 0) {
3269 		spa_load_failed(spa, "ddt_load failed [error=%d]", error);
3270 		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3271 	}
3272 
3273 	return (0);
3274 }
3275 
3276 static int
3277 spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport)
3278 {
3279 	vdev_t *rvd = spa->spa_root_vdev;
3280 
3281 	if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
3282 		boolean_t missing = spa_check_logs(spa);
3283 		if (missing) {
3284 			if (spa->spa_missing_tvds != 0) {
3285 				spa_load_note(spa, "spa_check_logs failed "
3286 				    "so dropping the logs");
3287 			} else {
3288 				*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
3289 				spa_load_failed(spa, "spa_check_logs failed");
3290 				return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
3291 				    ENXIO));
3292 			}
3293 		}
3294 	}
3295 
3296 	return (0);
3297 }
3298 
3299 static int
3300 spa_ld_verify_pool_data(spa_t *spa)
3301 {
3302 	int error = 0;
3303 	vdev_t *rvd = spa->spa_root_vdev;
3304 
3305 	/*
3306 	 * We've successfully opened the pool, verify that we're ready
3307 	 * to start pushing transactions.
3308 	 */
3309 	if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
3310 		error = spa_load_verify(spa);
3311 		if (error != 0) {
3312 			spa_load_failed(spa, "spa_load_verify failed "
3313 			    "[error=%d]", error);
3314 			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
3315 			    error));
3316 		}
3317 	}
3318 
3319 	return (0);
3320 }
3321 
3322 static void
3323 spa_ld_claim_log_blocks(spa_t *spa)
3324 {
3325 	dmu_tx_t *tx;
3326 	dsl_pool_t *dp = spa_get_dsl(spa);
3327 
3328 	/*
3329 	 * Claim log blocks that haven't been committed yet.
3330 	 * This must all happen in a single txg.
3331 	 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
3332 	 * invoked from zil_claim_log_block()'s i/o done callback.
3333 	 * Price of rollback is that we abandon the log.
3334 	 */
3335 	spa->spa_claiming = B_TRUE;
3336 
3337 	tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
3338 	(void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
3339 	    zil_claim, tx, DS_FIND_CHILDREN);
3340 	dmu_tx_commit(tx);
3341 
3342 	spa->spa_claiming = B_FALSE;
3343 
3344 	spa_set_log_state(spa, SPA_LOG_GOOD);
3345 }
3346 
3347 static void
3348 spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
3349     boolean_t update_config_cache)
3350 {
3351 	vdev_t *rvd = spa->spa_root_vdev;
3352 	int need_update = B_FALSE;
3353 
3354 	/*
3355 	 * If the config cache is stale, or we have uninitialized
3356 	 * metaslabs (see spa_vdev_add()), then update the config.
3357 	 *
3358 	 * If this is a verbatim import, trust the current
3359 	 * in-core spa_config and update the disk labels.
3360 	 */
3361 	if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
3362 	    spa->spa_load_state == SPA_LOAD_IMPORT ||
3363 	    spa->spa_load_state == SPA_LOAD_RECOVER ||
3364 	    (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
3365 		need_update = B_TRUE;
3366 
3367 	for (int c = 0; c < rvd->vdev_children; c++)
3368 		if (rvd->vdev_child[c]->vdev_ms_array == 0)
3369 			need_update = B_TRUE;
3370 
3371 	/*
3372 	 * Update the config cache asychronously in case we're the
3373 	 * root pool, in which case the config cache isn't writable yet.
3374 	 */
3375 	if (need_update)
3376 		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
3377 }
3378 
3379 static void
3380 spa_ld_prepare_for_reload(spa_t *spa)
3381 {
3382 	int mode = spa->spa_mode;
3383 	int async_suspended = spa->spa_async_suspended;
3384 
3385 	spa_unload(spa);
3386 	spa_deactivate(spa);
3387 	spa_activate(spa, mode);
3388 
3389 	/*
3390 	 * We save the value of spa_async_suspended as it gets reset to 0 by
3391 	 * spa_unload(). We want to restore it back to the original value before
3392 	 * returning as we might be calling spa_async_resume() later.
3393 	 */
3394 	spa->spa_async_suspended = async_suspended;
3395 }
3396 
3397 static int
3398 spa_ld_read_checkpoint_txg(spa_t *spa)
3399 {
3400 	uberblock_t checkpoint;
3401 	int error = 0;
3402 
3403 	ASSERT0(spa->spa_checkpoint_txg);
3404 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
3405 
3406 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3407 	    DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
3408 	    sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
3409 
3410 	if (error == ENOENT)
3411 		return (0);
3412 
3413 	if (error != 0)
3414 		return (error);
3415 
3416 	ASSERT3U(checkpoint.ub_txg, !=, 0);
3417 	ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
3418 	ASSERT3U(checkpoint.ub_timestamp, !=, 0);
3419 	spa->spa_checkpoint_txg = checkpoint.ub_txg;
3420 	spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
3421 
3422 	return (0);
3423 }
3424 
3425 static int
3426 spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
3427 {
3428 	int error = 0;
3429 
3430 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
3431 	ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
3432 
3433 	/*
3434 	 * Never trust the config that is provided unless we are assembling
3435 	 * a pool following a split.
3436 	 * This means don't trust blkptrs and the vdev tree in general. This
3437 	 * also effectively puts the spa in read-only mode since
3438 	 * spa_writeable() checks for spa_trust_config to be true.
3439 	 * We will later load a trusted config from the MOS.
3440 	 */
3441 	if (type != SPA_IMPORT_ASSEMBLE)
3442 		spa->spa_trust_config = B_FALSE;
3443 
3444 	/*
3445 	 * Parse the config provided to create a vdev tree.
3446 	 */
3447 	error = spa_ld_parse_config(spa, type);
3448 	if (error != 0)
3449 		return (error);
3450 
3451 	/*
3452 	 * Now that we have the vdev tree, try to open each vdev. This involves
3453 	 * opening the underlying physical device, retrieving its geometry and
3454 	 * probing the vdev with a dummy I/O. The state of each vdev will be set
3455 	 * based on the success of those operations. After this we'll be ready
3456 	 * to read from the vdevs.
3457 	 */
3458 	error = spa_ld_open_vdevs(spa);
3459 	if (error != 0)
3460 		return (error);
3461 
3462 	/*
3463 	 * Read the label of each vdev and make sure that the GUIDs stored
3464 	 * there match the GUIDs in the config provided.
3465 	 * If we're assembling a new pool that's been split off from an
3466 	 * existing pool, the labels haven't yet been updated so we skip
3467 	 * validation for now.
3468 	 */
3469 	if (type != SPA_IMPORT_ASSEMBLE) {
3470 		error = spa_ld_validate_vdevs(spa);
3471 		if (error != 0)
3472 			return (error);
3473 	}
3474 
3475 	/*
3476 	 * Read all vdev labels to find the best uberblock (i.e. latest,
3477 	 * unless spa_load_max_txg is set) and store it in spa_uberblock. We
3478 	 * get the list of features required to read blkptrs in the MOS from
3479 	 * the vdev label with the best uberblock and verify that our version
3480 	 * of zfs supports them all.
3481 	 */
3482 	error = spa_ld_select_uberblock(spa, type);
3483 	if (error != 0)
3484 		return (error);
3485 
3486 	/*
3487 	 * Pass that uberblock to the dsl_pool layer which will open the root
3488 	 * blkptr. This blkptr points to the latest version of the MOS and will
3489 	 * allow us to read its contents.
3490 	 */
3491 	error = spa_ld_open_rootbp(spa);
3492 	if (error != 0)
3493 		return (error);
3494 
3495 	return (0);
3496 }
3497 
3498 static int
3499 spa_ld_checkpoint_rewind(spa_t *spa)
3500 {
3501 	uberblock_t checkpoint;
3502 	int error = 0;
3503 
3504 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
3505 	ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
3506 
3507 	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3508 	    DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
3509 	    sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
3510 
3511 	if (error != 0) {
3512 		spa_load_failed(spa, "unable to retrieve checkpointed "
3513 		    "uberblock from the MOS config [error=%d]", error);
3514 
3515 		if (error == ENOENT)
3516 			error = ZFS_ERR_NO_CHECKPOINT;
3517 
3518 		return (error);
3519 	}
3520 
3521 	ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
3522 	ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
3523 
3524 	/*
3525 	 * We need to update the txg and timestamp of the checkpointed
3526 	 * uberblock to be higher than the latest one. This ensures that
3527 	 * the checkpointed uberblock is selected if we were to close and
3528 	 * reopen the pool right after we've written it in the vdev labels.
3529 	 * (also see block comment in vdev_uberblock_compare)
3530 	 */
3531 	checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
3532 	checkpoint.ub_timestamp = gethrestime_sec();
3533 
3534 	/*
3535 	 * Set current uberblock to be the checkpointed uberblock.
3536 	 */
3537 	spa->spa_uberblock = checkpoint;
3538 
3539 	/*
3540 	 * If we are doing a normal rewind, then the pool is open for
3541 	 * writing and we sync the "updated" checkpointed uberblock to
3542 	 * disk. Once this is done, we've basically rewound the whole
3543 	 * pool and there is no way back.
3544 	 *
3545 	 * There are cases when we don't want to attempt and sync the
3546 	 * checkpointed uberblock to disk because we are opening a
3547 	 * pool as read-only. Specifically, verifying the checkpointed
3548 	 * state with zdb, and importing the checkpointed state to get
3549 	 * a "preview" of its content.
3550 	 */
3551 	if (spa_writeable(spa)) {
3552 		vdev_t *rvd = spa->spa_root_vdev;
3553 
3554 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3555 		vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
3556 		int svdcount = 0;
3557 		int children = rvd->vdev_children;
3558 		int c0 = spa_get_random(children);
3559 
3560 		for (int c = 0; c < children; c++) {
3561 			vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
3562 
3563 			/* Stop when revisiting the first vdev */
3564 			if (c > 0 && svd[0] == vd)
3565 				break;
3566 
3567 			if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
3568 			    !vdev_is_concrete(vd))
3569 				continue;
3570 
3571 			svd[svdcount++] = vd;
3572 			if (svdcount == SPA_SYNC_MIN_VDEVS)
3573 				break;
3574 		}
3575 		error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
3576 		if (error == 0)
3577 			spa->spa_last_synced_guid = rvd->vdev_guid;
3578 		spa_config_exit(spa, SCL_ALL, FTAG);
3579 
3580 		if (error != 0) {
3581 			spa_load_failed(spa, "failed to write checkpointed "
3582 			    "uberblock to the vdev labels [error=%d]", error);
3583 			return (error);
3584 		}
3585 	}
3586 
3587 	return (0);
3588 }
3589 
3590 static int
3591 spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
3592     boolean_t *update_config_cache)
3593 {
3594 	int error;
3595 
3596 	/*
3597 	 * Parse the config for pool, open and validate vdevs,
3598 	 * select an uberblock, and use that uberblock to open
3599 	 * the MOS.
3600 	 */
3601 	error = spa_ld_mos_init(spa, type);
3602 	if (error != 0)
3603 		return (error);
3604 
3605 	/*
3606 	 * Retrieve the trusted config stored in the MOS and use it to create
3607 	 * a new, exact version of the vdev tree, then reopen all vdevs.
3608 	 */
3609 	error = spa_ld_trusted_config(spa, type, B_FALSE);
3610 	if (error == EAGAIN) {
3611 		if (update_config_cache != NULL)
3612 			*update_config_cache = B_TRUE;
3613 
3614 		/*
3615 		 * Redo the loading process with the trusted config if it is
3616 		 * too different from the untrusted config.
3617 		 */
3618 		spa_ld_prepare_for_reload(spa);
3619 		spa_load_note(spa, "RELOADING");
3620 		error = spa_ld_mos_init(spa, type);
3621 		if (error != 0)
3622 			return (error);
3623 
3624 		error = spa_ld_trusted_config(spa, type, B_TRUE);
3625 		if (error != 0)
3626 			return (error);
3627 
3628 	} else if (error != 0) {
3629 		return (error);
3630 	}
3631 
3632 	return (0);
3633 }
3634 
3635 /*
3636  * Load an existing storage pool, using the config provided. This config
3637  * describes which vdevs are part of the pool and is later validated against
3638  * partial configs present in each vdev's label and an entire copy of the
3639  * config stored in the MOS.
3640  */
3641 static int
3642 spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport)
3643 {
3644 	int error = 0;
3645 	boolean_t missing_feat_write = B_FALSE;
3646 	boolean_t checkpoint_rewind =
3647 	    (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
3648 	boolean_t update_config_cache = B_FALSE;
3649 
3650 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
3651 	ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
3652 
3653 	spa_load_note(spa, "LOADING");
3654 
3655 	error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
3656 	if (error != 0)
3657 		return (error);
3658 
3659 	/*
3660 	 * If we are rewinding to the checkpoint then we need to repeat
3661 	 * everything we've done so far in this function but this time
3662 	 * selecting the checkpointed uberblock and using that to open
3663 	 * the MOS.
3664 	 */
3665 	if (checkpoint_rewind) {
3666 		/*
3667 		 * If we are rewinding to the checkpoint update config cache
3668 		 * anyway.
3669 		 */
3670 		update_config_cache = B_TRUE;
3671 
3672 		/*
3673 		 * Extract the checkpointed uberblock from the current MOS
3674 		 * and use this as the pool's uberblock from now on. If the
3675 		 * pool is imported as writeable we also write the checkpoint
3676 		 * uberblock to the labels, making the rewind permanent.
3677 		 */
3678 		error = spa_ld_checkpoint_rewind(spa);
3679 		if (error != 0)
3680 			return (error);
3681 
3682 		/*
3683 		 * Redo the loading process process again with the
3684 		 * checkpointed uberblock.
3685 		 */
3686 		spa_ld_prepare_for_reload(spa);
3687 		spa_load_note(spa, "LOADING checkpointed uberblock");
3688 		error = spa_ld_mos_with_trusted_config(spa, type, NULL);
3689 		if (error != 0)
3690 			return (error);
3691 	}
3692 
3693 	/*
3694 	 * Retrieve the checkpoint txg if the pool has a checkpoint.
3695 	 */
3696 	error = spa_ld_read_checkpoint_txg(spa);
3697 	if (error != 0)
3698 		return (error);
3699 
3700 	/*
3701 	 * Retrieve the mapping of indirect vdevs. Those vdevs were removed
3702 	 * from the pool and their contents were re-mapped to other vdevs. Note
3703 	 * that everything that we read before this step must have been
3704 	 * rewritten on concrete vdevs after the last device removal was
3705 	 * initiated. Otherwise we could be reading from indirect vdevs before
3706 	 * we have loaded their mappings.
3707 	 */
3708 	error = spa_ld_open_indirect_vdev_metadata(spa);
3709 	if (error != 0)
3710 		return (error);
3711 
3712 	/*
3713 	 * Retrieve the full list of active features from the MOS and check if
3714 	 * they are all supported.
3715 	 */
3716 	error = spa_ld_check_features(spa, &missing_feat_write);
3717 	if (error != 0)
3718 		return (error);
3719 
3720 	/*
3721 	 * Load several special directories from the MOS needed by the dsl_pool
3722 	 * layer.
3723 	 */
3724 	error = spa_ld_load_special_directories(spa);
3725 	if (error != 0)
3726 		return (error);
3727 
3728 	/*
3729 	 * Retrieve pool properties from the MOS.
3730 	 */
3731 	error = spa_ld_get_props(spa);
3732 	if (error != 0)
3733 		return (error);
3734 
3735 	/*
3736 	 * Retrieve the list of auxiliary devices - cache devices and spares -
3737 	 * and open them.
3738 	 */
3739 	error = spa_ld_open_aux_vdevs(spa, type);
3740 	if (error != 0)
3741 		return (error);
3742 
3743 	/*
3744 	 * Load the metadata for all vdevs. Also check if unopenable devices
3745 	 * should be autoreplaced.
3746 	 */
3747 	error = spa_ld_load_vdev_metadata(spa);
3748 	if (error != 0)
3749 		return (error);
3750 
3751 	error = spa_ld_load_dedup_tables(spa);
3752 	if (error != 0)
3753 		return (error);
3754 
3755 	/*
3756 	 * Verify the logs now to make sure we don't have any unexpected errors
3757 	 * when we claim log blocks later.
3758 	 */
3759 	error = spa_ld_verify_logs(spa, type, ereport);
3760 	if (error != 0)
3761 		return (error);
3762 
3763 	if (missing_feat_write) {
3764 		ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
3765 
3766 		/*
3767 		 * At this point, we know that we can open the pool in
3768 		 * read-only mode but not read-write mode. We now have enough
3769 		 * information and can return to userland.
3770 		 */
3771 		return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
3772 		    ENOTSUP));
3773 	}
3774 
3775 	/*
3776 	 * Traverse the last txgs to make sure the pool was left off in a safe
3777 	 * state. When performing an extreme rewind, we verify the whole pool,
3778 	 * which can take a very long time.
3779 	 */
3780 	error = spa_ld_verify_pool_data(spa);
3781 	if (error != 0)
3782 		return (error);
3783 
3784 	/*
3785 	 * Calculate the deflated space for the pool. This must be done before
3786 	 * we write anything to the pool because we'd need to update the space
3787 	 * accounting using the deflated sizes.
3788 	 */
3789 	spa_update_dspace(spa);
3790 
3791 	/*
3792 	 * We have now retrieved all the information we needed to open the
3793 	 * pool. If we are importing the pool in read-write mode, a few
3794 	 * additional steps must be performed to finish the import.
3795 	 */
3796 	if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
3797 	    spa->spa_load_max_txg == UINT64_MAX)) {
3798 		uint64_t config_cache_txg = spa->spa_config_txg;
3799 
3800 		ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
3801 
3802 		/*
3803 		 * In case of a checkpoint rewind, log the original txg
3804 		 * of the checkpointed uberblock.
3805 		 */
3806 		if (checkpoint_rewind) {
3807 			spa_history_log_internal(spa, "checkpoint rewind",
3808 			    NULL, "rewound state to txg=%llu",
3809 			    (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
3810 		}
3811 
3812 		/*
3813 		 * Traverse the ZIL and claim all blocks.
3814 		 */
3815 		spa_ld_claim_log_blocks(spa);
3816 
3817 		/*
3818 		 * Kick-off the syncing thread.
3819 		 */
3820 		spa->spa_sync_on = B_TRUE;
3821 		txg_sync_start(spa->spa_dsl_pool);
3822 
3823 		/*
3824 		 * Wait for all claims to sync.  We sync up to the highest
3825 		 * claimed log block birth time so that claimed log blocks
3826 		 * don't appear to be from the future.  spa_claim_max_txg
3827 		 * will have been set for us by ZIL traversal operations
3828 		 * performed above.
3829 		 */
3830 		txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
3831 
3832 		/*
3833 		 * Check if we need to request an update of the config. On the
3834 		 * next sync, we would update the config stored in vdev labels
3835 		 * and the cachefile (by default /etc/zfs/zpool.cache).
3836 		 */
3837 		spa_ld_check_for_config_update(spa, config_cache_txg,
3838 		    update_config_cache);
3839 
3840 		/*
3841 		 * Check all DTLs to see if anything needs resilvering.
3842 		 */
3843 		if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
3844 		    vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
3845 			spa_async_request(spa, SPA_ASYNC_RESILVER);
3846 
3847 		/*
3848 		 * Log the fact that we booted up (so that we can detect if
3849 		 * we rebooted in the middle of an operation).
3850 		 */
3851 		spa_history_log_version(spa, "open");
3852 
3853 		/*
3854 		 * Delete any inconsistent datasets.
3855 		 */
3856 		(void) dmu_objset_find(spa_name(spa),
3857 		    dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
3858 
3859 		/*
3860 		 * Clean up any stale temporary dataset userrefs.
3861 		 */
3862 		dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
3863 
3864 		spa_restart_removal(spa);
3865 
3866 		spa_spawn_aux_threads(spa);
3867 	}
3868 
3869 	spa_load_note(spa, "LOADED");
3870 
3871 	return (0);
3872 }
3873 
3874 static int
3875 spa_load_retry(spa_t *spa, spa_load_state_t state)
3876 {
3877 	int mode = spa->spa_mode;
3878 
3879 	spa_unload(spa);
3880 	spa_deactivate(spa);
3881 
3882 	spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
3883 
3884 	spa_activate(spa, mode);
3885 	spa_async_suspend(spa);
3886 
3887 	spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
3888 	    (u_longlong_t)spa->spa_load_max_txg);
3889 
3890 	return (spa_load(spa, state, SPA_IMPORT_EXISTING));
3891 }
3892 
3893 /*
3894  * If spa_load() fails this function will try loading prior txg's. If
3895  * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
3896  * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
3897  * function will not rewind the pool and will return the same error as
3898  * spa_load().
3899  */
3900 static int
3901 spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
3902     int rewind_flags)
3903 {
3904 	nvlist_t *loadinfo = NULL;
3905 	nvlist_t *config = NULL;
3906 	int load_error, rewind_error;
3907 	uint64_t safe_rewind_txg;
3908 	uint64_t min_txg;
3909 
3910 	if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
3911 		spa->spa_load_max_txg = spa->spa_load_txg;
3912 		spa_set_log_state(spa, SPA_LOG_CLEAR);
3913 	} else {
3914 		spa->spa_load_max_txg = max_request;
3915 		if (max_request != UINT64_MAX)
3916 			spa->spa_extreme_rewind = B_TRUE;
3917 	}
3918 
3919 	load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
3920 	if (load_error == 0)
3921 		return (0);
3922 	if (load_error == ZFS_ERR_NO_CHECKPOINT) {
3923 		/*
3924 		 * When attempting checkpoint-rewind on a pool with no
3925 		 * checkpoint, we should not attempt to load uberblocks
3926 		 * from previous txgs when spa_load fails.
3927 		 */
3928 		ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
3929 		return (load_error);
3930 	}
3931 
3932 	if (spa->spa_root_vdev != NULL)
3933 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
3934 
3935 	spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
3936 	spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
3937 
3938 	if (rewind_flags & ZPOOL_NEVER_REWIND) {
3939 		nvlist_free(config);
3940 		return (load_error);
3941 	}
3942 
3943 	if (state == SPA_LOAD_RECOVER) {
3944 		/* Price of rolling back is discarding txgs, including log */
3945 		spa_set_log_state(spa, SPA_LOG_CLEAR);
3946 	} else {
3947 		/*
3948 		 * If we aren't rolling back save the load info from our first
3949 		 * import attempt so that we can restore it after attempting
3950 		 * to rewind.
3951 		 */
3952 		loadinfo = spa->spa_load_info;
3953 		spa->spa_load_info = fnvlist_alloc();
3954 	}
3955 
3956 	spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
3957 	safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
3958 	min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
3959 	    TXG_INITIAL : safe_rewind_txg;
3960 
3961 	/*
3962 	 * Continue as long as we're finding errors, we're still within
3963 	 * the acceptable rewind range, and we're still finding uberblocks
3964 	 */
3965 	while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
3966 	    spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
3967 		if (spa->spa_load_max_txg < safe_rewind_txg)
3968 			spa->spa_extreme_rewind = B_TRUE;
3969 		rewind_error = spa_load_retry(spa, state);
3970 	}
3971 
3972 	spa->spa_extreme_rewind = B_FALSE;
3973 	spa->spa_load_max_txg = UINT64_MAX;
3974 
3975 	if (config && (rewind_error || state != SPA_LOAD_RECOVER))
3976 		spa_config_set(spa, config);
3977 	else
3978 		nvlist_free(config);
3979 
3980 	if (state == SPA_LOAD_RECOVER) {
3981 		ASSERT3P(loadinfo, ==, NULL);
3982 		return (rewind_error);
3983 	} else {
3984 		/* Store the rewind info as part of the initial load info */
3985 		fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
3986 		    spa->spa_load_info);
3987 
3988 		/* Restore the initial load info */
3989 		fnvlist_free(spa->spa_load_info);
3990 		spa->spa_load_info = loadinfo;
3991 
3992 		return (load_error);
3993 	}
3994 }
3995 
3996 /*
3997  * Pool Open/Import
3998  *
3999  * The import case is identical to an open except that the configuration is sent
4000  * down from userland, instead of grabbed from the configuration cache.  For the
4001  * case of an open, the pool configuration will exist in the
4002  * POOL_STATE_UNINITIALIZED state.
4003  *
4004  * The stats information (gen/count/ustats) is used to gather vdev statistics at
4005  * the same time open the pool, without having to keep around the spa_t in some
4006  * ambiguous state.
4007  */
4008 static int
4009 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
4010     nvlist_t **config)
4011 {
4012 	spa_t *spa;
4013 	spa_load_state_t state = SPA_LOAD_OPEN;
4014 	int error;
4015 	int locked = B_FALSE;
4016 
4017 	*spapp = NULL;
4018 
4019 	/*
4020 	 * As disgusting as this is, we need to support recursive calls to this
4021 	 * function because dsl_dir_open() is called during spa_load(), and ends
4022 	 * up calling spa_open() again.  The real fix is to figure out how to
4023 	 * avoid dsl_dir_open() calling this in the first place.
4024 	 */
4025 	if (mutex_owner(&spa_namespace_lock) != curthread) {
4026 		mutex_enter(&spa_namespace_lock);
4027 		locked = B_TRUE;
4028 	}
4029 
4030 	if ((spa = spa_lookup(pool)) == NULL) {
4031 		if (locked)
4032 			mutex_exit(&spa_namespace_lock);
4033 		return (SET_ERROR(ENOENT));
4034 	}
4035 
4036 	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
4037 		zpool_rewind_policy_t policy;
4038 
4039 		zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
4040 		    &policy);
4041 		if (policy.zrp_request & ZPOOL_DO_REWIND)
4042 			state = SPA_LOAD_RECOVER;
4043 
4044 		spa_activate(spa, spa_mode_global);
4045 
4046 		if (state != SPA_LOAD_RECOVER)
4047 			spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
4048 		spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
4049 
4050 		zfs_dbgmsg("spa_open_common: opening %s", pool);
4051 		error = spa_load_best(spa, state, policy.zrp_txg,
4052 		    policy.zrp_request);
4053 
4054 		if (error == EBADF) {
4055 			/*
4056 			 * If vdev_validate() returns failure (indicated by
4057 			 * EBADF), it indicates that one of the vdevs indicates
4058 			 * that the pool has been exported or destroyed.  If
4059 			 * this is the case, the config cache is out of sync and
4060 			 * we should remove the pool from the namespace.
4061 			 */
4062 			spa_unload(spa);
4063 			spa_deactivate(spa);
4064 			spa_write_cachefile(spa, B_TRUE, B_TRUE);
4065 			spa_remove(spa);
4066 			if (locked)
4067 				mutex_exit(&spa_namespace_lock);
4068 			return (SET_ERROR(ENOENT));
4069 		}
4070 
4071 		if (error) {
4072 			/*
4073 			 * We can't open the pool, but we still have useful
4074 			 * information: the state of each vdev after the
4075 			 * attempted vdev_open().  Return this to the user.
4076 			 */
4077 			if (config != NULL && spa->spa_config) {
4078 				VERIFY(nvlist_dup(spa->spa_config, config,
4079 				    KM_SLEEP) == 0);
4080 				VERIFY(nvlist_add_nvlist(*config,
4081 				    ZPOOL_CONFIG_LOAD_INFO,
4082 				    spa->spa_load_info) == 0);
4083 			}
4084 			spa_unload(spa);
4085 			spa_deactivate(spa);
4086 			spa->spa_last_open_failed = error;
4087 			if (locked)
4088 				mutex_exit(&spa_namespace_lock);
4089 			*spapp = NULL;
4090 			return (error);
4091 		}
4092 	}
4093 
4094 	spa_open_ref(spa, tag);
4095 
4096 	if (config != NULL)
4097 		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
4098 
4099 	/*
4100 	 * If we've recovered the pool, pass back any information we
4101 	 * gathered while doing the load.
4102 	 */
4103 	if (state == SPA_LOAD_RECOVER) {
4104 		VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
4105 		    spa->spa_load_info) == 0);
4106 	}
4107 
4108 	if (locked) {
4109 		spa->spa_last_open_failed = 0;
4110 		spa->spa_last_ubsync_txg = 0;
4111 		spa->spa_load_txg = 0;
4112 		mutex_exit(&spa_namespace_lock);
4113 	}
4114 
4115 	*spapp = spa;
4116 
4117 	return (0);
4118 }
4119 
4120 int
4121 spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
4122     nvlist_t **config)
4123 {
4124 	return (spa_open_common(name, spapp, tag, policy, config));
4125 }
4126 
4127 int
4128 spa_open(const char *name, spa_t **spapp, void *tag)
4129 {
4130 	return (spa_open_common(name, spapp, tag, NULL, NULL));
4131 }
4132 
4133 /*
4134  * Lookup the given spa_t, incrementing the inject count in the process,
4135  * preventing it from being exported or destroyed.
4136  */
4137 spa_t *
4138 spa_inject_addref(char *name)
4139 {
4140 	spa_t *spa;
4141 
4142 	mutex_enter(&spa_namespace_lock);
4143 	if ((spa = spa_lookup(name)) == NULL) {
4144 		mutex_exit(&spa_namespace_lock);
4145 		return (NULL);
4146 	}
4147 	spa->spa_inject_ref++;
4148 	mutex_exit(&spa_namespace_lock);
4149 
4150 	return (spa);
4151 }
4152 
4153 void
4154 spa_inject_delref(spa_t *spa)
4155 {
4156 	mutex_enter(&spa_namespace_lock);
4157 	spa->spa_inject_ref--;
4158 	mutex_exit(&spa_namespace_lock);
4159 }
4160 
4161 /*
4162  * Add spares device information to the nvlist.
4163  */
4164 static void
4165 spa_add_spares(spa_t *spa, nvlist_t *config)
4166 {
4167 	nvlist_t **spares;
4168 	uint_t i, nspares;
4169 	nvlist_t *nvroot;
4170 	uint64_t guid;
4171 	vdev_stat_t *vs;
4172 	uint_t vsc;
4173 	uint64_t pool;
4174 
4175 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
4176 
4177 	if (spa->spa_spares.sav_count == 0)
4178 		return;
4179 
4180 	VERIFY(nvlist_lookup_nvlist(config,
4181 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4182 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
4183 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
4184 	if (nspares != 0) {
4185 		VERIFY(nvlist_add_nvlist_array(nvroot,
4186 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
4187 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
4188 		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
4189 
4190 		/*
4191 		 * Go through and find any spares which have since been
4192 		 * repurposed as an active spare.  If this is the case, update
4193 		 * their status appropriately.
4194 		 */
4195 		for (i = 0; i < nspares; i++) {
4196 			VERIFY(nvlist_lookup_uint64(spares[i],
4197 			    ZPOOL_CONFIG_GUID, &guid) == 0);
4198 			if (spa_spare_exists(guid, &pool, NULL) &&
4199 			    pool != 0ULL) {
4200 				VERIFY(nvlist_lookup_uint64_array(
4201 				    spares[i], ZPOOL_CONFIG_VDEV_STATS,
4202 				    (uint64_t **)&vs, &vsc) == 0);
4203 				vs->vs_state = VDEV_STATE_CANT_OPEN;
4204 				vs->vs_aux = VDEV_AUX_SPARED;
4205 			}
4206 		}
4207 	}
4208 }
4209 
4210 /*
4211  * Add l2cache device information to the nvlist, including vdev stats.
4212  */
4213 static void
4214 spa_add_l2cache(spa_t *spa, nvlist_t *config)
4215 {
4216 	nvlist_t **l2cache;
4217 	uint_t i, j, nl2cache;
4218 	nvlist_t *nvroot;
4219 	uint64_t guid;
4220 	vdev_t *vd;
4221 	vdev_stat_t *vs;
4222 	uint_t vsc;
4223 
4224 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
4225 
4226 	if (spa->spa_l2cache.sav_count == 0)
4227 		return;
4228 
4229 	VERIFY(nvlist_lookup_nvlist(config,
4230 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4231 	VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
4232 	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
4233 	if (nl2cache != 0) {
4234 		VERIFY(nvlist_add_nvlist_array(nvroot,
4235 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
4236 		VERIFY(nvlist_lookup_nvlist_array(nvroot,
4237 		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
4238 
4239 		/*
4240 		 * Update level 2 cache device stats.
4241 		 */
4242 
4243 		for (i = 0; i < nl2cache; i++) {
4244 			VERIFY(nvlist_lookup_uint64(l2cache[i],
4245 			    ZPOOL_CONFIG_GUID, &guid) == 0);
4246 
4247 			vd = NULL;
4248 			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
4249 				if (guid ==
4250 				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
4251 					vd = spa->spa_l2cache.sav_vdevs[j];
4252 					break;
4253 				}
4254 			}
4255 			ASSERT(vd != NULL);
4256 
4257 			VERIFY(nvlist_lookup_uint64_array(l2cache[i],
4258 			    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
4259 			    == 0);
4260 			vdev_get_stats(vd, vs);
4261 		}
4262 	}
4263 }
4264 
4265 static void
4266 spa_add_feature_stats(spa_t *spa, nvlist_t *config)
4267 {
4268 	nvlist_t *features;
4269 	zap_cursor_t zc;
4270 	zap_attribute_t za;
4271 
4272 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
4273 	VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
4274 
4275 	if (spa->spa_feat_for_read_obj != 0) {
4276 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
4277 		    spa->spa_feat_for_read_obj);
4278 		    zap_cursor_retrieve(&zc, &za) == 0;
4279 		    zap_cursor_advance(&zc)) {
4280 			ASSERT(za.za_integer_length == sizeof (uint64_t) &&
4281 			    za.za_num_integers == 1);
4282 			VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
4283 			    za.za_first_integer));
4284 		}
4285 		zap_cursor_fini(&zc);
4286 	}
4287 
4288 	if (spa->spa_feat_for_write_obj != 0) {
4289 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
4290 		    spa->spa_feat_for_write_obj);
4291 		    zap_cursor_retrieve(&zc, &za) == 0;
4292 		    zap_cursor_advance(&zc)) {
4293 			ASSERT(za.za_integer_length == sizeof (uint64_t) &&
4294 			    za.za_num_integers == 1);
4295 			VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
4296 			    za.za_first_integer));
4297 		}
4298 		zap_cursor_fini(&zc);
4299 	}
4300 
4301 	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
4302 	    features) == 0);
4303 	nvlist_free(features);
4304 }
4305 
4306 int
4307 spa_get_stats(const char *name, nvlist_t **config,
4308     char *altroot, size_t buflen)
4309 {
4310 	int error;
4311 	spa_t *spa;
4312 
4313 	*config = NULL;
4314 	error = spa_open_common(name, &spa, FTAG, NULL, config);
4315 
4316 	if (spa != NULL) {
4317 		/*
4318 		 * This still leaves a window of inconsistency where the spares
4319 		 * or l2cache devices could change and the config would be
4320 		 * self-inconsistent.
4321 		 */
4322 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4323 
4324 		if (*config != NULL) {
4325 			uint64_t loadtimes[2];
4326 
4327 			loadtimes[0] = spa->spa_loaded_ts.tv_sec;
4328 			loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
4329 			VERIFY(nvlist_add_uint64_array(*config,
4330 			    ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
4331 
4332 			VERIFY(nvlist_add_uint64(*config,
4333 			    ZPOOL_CONFIG_ERRCOUNT,
4334 			    spa_get_errlog_size(spa)) == 0);
4335 
4336 			if (spa_suspended(spa))
4337 				VERIFY(nvlist_add_uint64(*config,
4338 				    ZPOOL_CONFIG_SUSPENDED,
4339 				    spa->spa_failmode) == 0);
4340 
4341 			spa_add_spares(spa, *config);
4342 			spa_add_l2cache(spa, *config);
4343 			spa_add_feature_stats(spa, *config);
4344 		}
4345 	}
4346 
4347 	/*
4348 	 * We want to get the alternate root even for faulted pools, so we cheat
4349 	 * and call spa_lookup() directly.
4350 	 */
4351 	if (altroot) {
4352 		if (spa == NULL) {
4353 			mutex_enter(&spa_namespace_lock);
4354 			spa = spa_lookup(name);
4355 			if (spa)
4356 				spa_altroot(spa, altroot, buflen);
4357 			else
4358 				altroot[0] = '\0';
4359 			spa = NULL;
4360 			mutex_exit(&spa_namespace_lock);
4361 		} else {
4362 			spa_altroot(spa, altroot, buflen);
4363 		}
4364 	}
4365 
4366 	if (spa != NULL) {
4367 		spa_config_exit(spa, SCL_CONFIG, FTAG);
4368 		spa_close(spa, FTAG);
4369 	}
4370 
4371 	return (error);
4372 }
4373 
4374 /*
4375  * Validate that the auxiliary device array is well formed.  We must have an
4376  * array of nvlists, each which describes a valid leaf vdev.  If this is an
4377  * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
4378  * specified, as long as they are well-formed.
4379  */
4380 static int
4381 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
4382     spa_aux_vdev_t *sav, const char *config, uint64_t version,
4383     vdev_labeltype_t label)
4384 {
4385 	nvlist_t **dev;
4386 	uint_t i, ndev;
4387 	vdev_t *vd;
4388 	int error;
4389 
4390 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
4391 
4392 	/*
4393 	 * It's acceptable to have no devs specified.
4394 	 */
4395 	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
4396 		return (0);
4397 
4398 	if (ndev == 0)
4399 		return (SET_ERROR(EINVAL));
4400 
4401 	/*
4402 	 * Make sure the pool is formatted with a version that supports this
4403 	 * device type.
4404 	 */
4405 	if (spa_version(spa) < version)
4406 		return (SET_ERROR(ENOTSUP));
4407 
4408 	/*
4409 	 * Set the pending device list so we correctly handle device in-use
4410 	 * checking.
4411 	 */
4412 	sav->sav_pending = dev;
4413 	sav->sav_npending = ndev;
4414 
4415 	for (i = 0; i < ndev; i++) {
4416 		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
4417 		    mode)) != 0)
4418 			goto out;
4419 
4420 		if (!vd->vdev_ops->vdev_op_leaf) {
4421 			vdev_free(vd);
4422 			error = SET_ERROR(EINVAL);
4423 			goto out;
4424 		}
4425 
4426 		/*
4427 		 * The L2ARC currently only supports disk devices in
4428 		 * kernel context.  For user-level testing, we allow it.
4429 		 */
4430 #ifdef _KERNEL
4431 		if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
4432 		    strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
4433 			error = SET_ERROR(ENOTBLK);
4434 			vdev_free(vd);
4435 			goto out;
4436 		}
4437 #endif
4438 		vd->vdev_top = vd;
4439 
4440 		if ((error = vdev_open(vd)) == 0 &&
4441 		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
4442 			VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
4443 			    vd->vdev_guid) == 0);
4444 		}
4445 
4446 		vdev_free(vd);
4447 
4448 		if (error &&
4449 		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
4450 			goto out;
4451 		else
4452 			error = 0;
4453 	}
4454 
4455 out:
4456 	sav->sav_pending = NULL;
4457 	sav->sav_npending = 0;
4458 	return (error);
4459 }
4460 
4461 static int
4462 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
4463 {
4464 	int error;
4465 
4466 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
4467 
4468 	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
4469 	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
4470 	    VDEV_LABEL_SPARE)) != 0) {
4471 		return (error);
4472 	}
4473 
4474 	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
4475 	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
4476 	    VDEV_LABEL_L2CACHE));
4477 }
4478 
4479 static void
4480 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
4481     const char *config)
4482 {
4483 	int i;
4484 
4485 	if (sav->sav_config != NULL) {
4486 		nvlist_t **olddevs;
4487 		uint_t oldndevs;
4488 		nvlist_t **newdevs;
4489 
4490 		/*
4491 		 * Generate new dev list by concatentating with the
4492 		 * current dev list.
4493 		 */
4494 		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
4495 		    &olddevs, &oldndevs) == 0);
4496 
4497 		newdevs = kmem_alloc(sizeof (void *) *
4498 		    (ndevs + oldndevs), KM_SLEEP);
4499 		for (i = 0; i < oldndevs; i++)
4500 			VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
4501 			    KM_SLEEP) == 0);
4502 		for (i = 0; i < ndevs; i++)
4503 			VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
4504 			    KM_SLEEP) == 0);
4505 
4506 		VERIFY(nvlist_remove(sav->sav_config, config,
4507 		    DATA_TYPE_NVLIST_ARRAY) == 0);
4508 
4509 		VERIFY(nvlist_add_nvlist_array(sav->sav_config,
4510 		    config, newdevs, ndevs + oldndevs) == 0);
4511 		for (i = 0; i < oldndevs + ndevs; i++)
4512 			nvlist_free(newdevs[i]);
4513 		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
4514 	} else {
4515 		/*
4516 		 * Generate a new dev list.
4517 		 */
4518 		VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
4519 		    KM_SLEEP) == 0);
4520 		VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
4521 		    devs, ndevs) == 0);
4522 	}
4523 }
4524 
4525 /*
4526  * Stop and drop level 2 ARC devices
4527  */
4528 void
4529 spa_l2cache_drop(spa_t *spa)
4530 {
4531 	vdev_t *vd;
4532 	int i;
4533 	spa_aux_vdev_t *sav = &spa->spa_l2cache;
4534 
4535 	for (i = 0; i < sav->sav_count; i++) {
4536 		uint64_t pool;
4537 
4538 		vd = sav->sav_vdevs[i];
4539 		ASSERT(vd != NULL);
4540 
4541 		if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
4542 		    pool != 0ULL && l2arc_vdev_present(vd))
4543 			l2arc_remove_vdev(vd);
4544 	}
4545 }
4546 
4547 /*
4548  * Pool Creation
4549  */
4550 int
4551 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
4552     nvlist_t *zplprops)
4553 {
4554 	spa_t *spa;
4555 	char *altroot = NULL;
4556 	vdev_t *rvd;
4557 	dsl_pool_t *dp;
4558 	dmu_tx_t *tx;
4559 	int error = 0;
4560 	uint64_t txg = TXG_INITIAL;
4561 	nvlist_t **spares, **l2cache;
4562 	uint_t nspares, nl2cache;
4563 	uint64_t version, obj;
4564 	boolean_t has_features;
4565 
4566 	/*
4567 	 * If this pool already exists, return failure.
4568 	 */
4569 	mutex_enter(&spa_namespace_lock);
4570 	if (spa_lookup(pool) != NULL) {
4571 		mutex_exit(&spa_namespace_lock);
4572 		return (SET_ERROR(EEXIST));
4573 	}
4574 
4575 	/*
4576 	 * Allocate a new spa_t structure.
4577 	 */
4578 	(void) nvlist_lookup_string(props,
4579 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
4580 	spa = spa_add(pool, NULL, altroot);
4581 	spa_activate(spa, spa_mode_global);
4582 
4583 	if (props && (error = spa_prop_validate(spa, props))) {
4584 		spa_deactivate(spa);
4585 		spa_remove(spa);
4586 		mutex_exit(&spa_namespace_lock);
4587 		return (error);
4588 	}
4589 
4590 	has_features = B_FALSE;
4591 	for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
4592 	    elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
4593 		if (zpool_prop_feature(nvpair_name(elem)))
4594 			has_features = B_TRUE;
4595 	}
4596 
4597 	if (has_features || nvlist_lookup_uint64(props,
4598 	    zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
4599 		version = SPA_VERSION;
4600 	}
4601 	ASSERT(SPA_VERSION_IS_SUPPORTED(version));
4602 
4603 	spa->spa_first_txg = txg;
4604 	spa->spa_uberblock.ub_txg = txg - 1;
4605 	spa->spa_uberblock.ub_version = version;
4606 	spa->spa_ubsync = spa->spa_uberblock;
4607 	spa->spa_load_state = SPA_LOAD_CREATE;
4608 	spa->spa_removing_phys.sr_state = DSS_NONE;
4609 	spa->spa_removing_phys.sr_removing_vdev = -1;
4610 	spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
4611 
4612 	/*
4613 	 * Create "The Godfather" zio to hold all async IOs
4614 	 */
4615 	spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
4616 	    KM_SLEEP);
4617 	for (int i = 0; i < max_ncpus; i++) {
4618 		spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
4619 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
4620 		    ZIO_FLAG_GODFATHER);
4621 	}
4622 
4623 	/*
4624 	 * Create the root vdev.
4625 	 */
4626 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4627 
4628 	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
4629 
4630 	ASSERT(error != 0 || rvd != NULL);
4631 	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
4632 
4633 	if (error == 0 && !zfs_allocatable_devs(nvroot))
4634 		error = SET_ERROR(EINVAL);
4635 
4636 	if (error == 0 &&
4637 	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
4638 	    (error = spa_validate_aux(spa, nvroot, txg,
4639 	    VDEV_ALLOC_ADD)) == 0) {
4640 		for (int c = 0; c < rvd->vdev_children; c++) {
4641 			vdev_metaslab_set_size(rvd->vdev_child[c]);
4642 			vdev_expand(rvd->vdev_child[c], txg);
4643 		}
4644 	}
4645 
4646 	spa_config_exit(spa, SCL_ALL, FTAG);
4647 
4648 	if (error != 0) {
4649 		spa_unload(spa);
4650 		spa_deactivate(spa);
4651 		spa_remove(spa);
4652 		mutex_exit(&spa_namespace_lock);
4653 		return (error);
4654 	}
4655 
4656 	/*
4657 	 * Get the list of spares, if specified.
4658 	 */
4659 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
4660 	    &spares, &nspares) == 0) {
4661 		VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
4662 		    KM_SLEEP) == 0);
4663 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
4664 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
4665 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4666 		spa_load_spares(spa);
4667 		spa_config_exit(spa, SCL_ALL, FTAG);
4668 		spa->spa_spares.sav_sync = B_TRUE;
4669 	}
4670 
4671 	/*
4672 	 * Get the list of level 2 cache devices, if specified.
4673 	 */
4674 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
4675 	    &l2cache, &nl2cache) == 0) {
4676 		VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
4677 		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
4678 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
4679 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
4680 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4681 		spa_load_l2cache(spa);
4682 		spa_config_exit(spa, SCL_ALL, FTAG);
4683 		spa->spa_l2cache.sav_sync = B_TRUE;
4684 	}
4685 
4686 	spa->spa_is_initializing = B_TRUE;
4687 	spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
4688 	spa->spa_meta_objset = dp->dp_meta_objset;
4689 	spa->spa_is_initializing = B_FALSE;
4690 
4691 	/*
4692 	 * Create DDTs (dedup tables).
4693 	 */
4694 	ddt_create(spa);
4695 
4696 	spa_update_dspace(spa);
4697 
4698 	tx = dmu_tx_create_assigned(dp, txg);
4699 
4700 	/*
4701 	 * Create the pool config object.
4702 	 */
4703 	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
4704 	    DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
4705 	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
4706 
4707 	if (zap_add(spa->spa_meta_objset,
4708 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
4709 	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
4710 		cmn_err(CE_PANIC, "failed to add pool config");
4711 	}
4712 
4713 	if (spa_version(spa) >= SPA_VERSION_FEATURES)
4714 		spa_feature_create_zap_objects(spa, tx);
4715 
4716 	if (zap_add(spa->spa_meta_objset,
4717 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
4718 	    sizeof (uint64_t), 1, &version, tx) != 0) {
4719 		cmn_err(CE_PANIC, "failed to add pool version");
4720 	}
4721 
4722 	/* Newly created pools with the right version are always deflated. */
4723 	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
4724 		spa->spa_deflate = TRUE;
4725 		if (zap_add(spa->spa_meta_objset,
4726 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
4727 		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
4728 			cmn_err(CE_PANIC, "failed to add deflate");
4729 		}
4730 	}
4731 
4732 	/*
4733 	 * Create the deferred-free bpobj.  Turn off compression
4734 	 * because sync-to-convergence takes longer if the blocksize
4735 	 * keeps changing.
4736 	 */
4737 	obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
4738 	dmu_object_set_compress(spa->spa_meta_objset, obj,
4739 	    ZIO_COMPRESS_OFF, tx);
4740 	if (zap_add(spa->spa_meta_objset,
4741 	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
4742 	    sizeof (uint64_t), 1, &obj, tx) != 0) {
4743 		cmn_err(CE_PANIC, "failed to add bpobj");
4744 	}
4745 	VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
4746 	    spa->spa_meta_objset, obj));
4747 
4748 	/*
4749 	 * Create the pool's history object.
4750 	 */
4751 	if (version >= SPA_VERSION_ZPOOL_HISTORY)
4752 		spa_history_create_obj(spa, tx);
4753 
4754 	/*
4755 	 * Generate some random noise for salted checksums to operate on.
4756 	 */
4757 	(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
4758 	    sizeof (spa->spa_cksum_salt.zcs_bytes));
4759 
4760 	/*
4761 	 * Set pool properties.
4762 	 */
4763 	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
4764 	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
4765 	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
4766 	spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
4767 
4768 	if (props != NULL) {
4769 		spa_configfile_set(spa, props, B_FALSE);
4770 		spa_sync_props(props, tx);
4771 	}
4772 
4773 	dmu_tx_commit(tx);
4774 
4775 	spa->spa_sync_on = B_TRUE;
4776 	txg_sync_start(spa->spa_dsl_pool);
4777 
4778 	/*
4779 	 * We explicitly wait for the first transaction to complete so that our
4780 	 * bean counters are appropriately updated.
4781 	 */
4782 	txg_wait_synced(spa->spa_dsl_pool, txg);
4783 
4784 	spa_spawn_aux_threads(spa);
4785 
4786 	spa_write_cachefile(spa, B_FALSE, B_TRUE);
4787 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
4788 
4789 	spa_history_log_version(spa, "create");
4790 
4791 	/*
4792 	 * Don't count references from objsets that are already closed
4793 	 * and are making their way through the eviction process.
4794 	 */
4795 	spa_evicting_os_wait(spa);
4796 	spa->spa_minref = refcount_count(&spa->spa_refcount);
4797 	spa->spa_load_state = SPA_LOAD_NONE;
4798 
4799 	mutex_exit(&spa_namespace_lock);
4800 
4801 	return (0);
4802 }
4803 
4804 #ifdef _KERNEL
4805 /*
4806  * Get the root pool information from the root disk, then import the root pool
4807  * during the system boot up time.
4808  */
4809 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
4810 
4811 static nvlist_t *
4812 spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
4813 {
4814 	nvlist_t *config;
4815 	nvlist_t *nvtop, *nvroot;
4816 	uint64_t pgid;
4817 
4818 	if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
4819 		return (NULL);
4820 
4821 	/*
4822 	 * Add this top-level vdev to the child array.
4823 	 */
4824 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4825 	    &nvtop) == 0);
4826 	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4827 	    &pgid) == 0);
4828 	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
4829 
4830 	/*
4831 	 * Put this pool's top-level vdevs into a root vdev.
4832 	 */
4833 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
4834 	VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
4835 	    VDEV_TYPE_ROOT) == 0);
4836 	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
4837 	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
4838 	VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4839 	    &nvtop, 1) == 0);
4840 
4841 	/*
4842 	 * Replace the existing vdev_tree with the new root vdev in
4843 	 * this pool's configuration (remove the old, add the new).
4844 	 */
4845 	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
4846 	nvlist_free(nvroot);
4847 	return (config);
4848 }
4849 
4850 /*
4851  * Walk the vdev tree and see if we can find a device with "better"
4852  * configuration. A configuration is "better" if the label on that
4853  * device has a more recent txg.
4854  */
4855 static void
4856 spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
4857 {
4858 	for (int c = 0; c < vd->vdev_children; c++)
4859 		spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
4860 
4861 	if (vd->vdev_ops->vdev_op_leaf) {
4862 		nvlist_t *label;
4863 		uint64_t label_txg;
4864 
4865 		if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
4866 		    &label) != 0)
4867 			return;
4868 
4869 		VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
4870 		    &label_txg) == 0);
4871 
4872 		/*
4873 		 * Do we have a better boot device?
4874 		 */
4875 		if (label_txg > *txg) {
4876 			*txg = label_txg;
4877 			*avd = vd;
4878 		}
4879 		nvlist_free(label);
4880 	}
4881 }
4882 
4883 /*
4884  * Import a root pool.
4885  *
4886  * For x86. devpath_list will consist of devid and/or physpath name of
4887  * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
4888  * The GRUB "findroot" command will return the vdev we should boot.
4889  *
4890  * For Sparc, devpath_list consists the physpath name of the booting device
4891  * no matter the rootpool is a single device pool or a mirrored pool.
4892  * e.g.
4893  *	"/pci@1f,0/ide@d/disk@0,0:a"
4894  */
4895 int
4896 spa_import_rootpool(char *devpath, char *devid)
4897 {
4898 	spa_t *spa;
4899 	vdev_t *rvd, *bvd, *avd = NULL;
4900 	nvlist_t *config, *nvtop;
4901 	uint64_t guid, txg;
4902 	char *pname;
4903 	int error;
4904 
4905 	/*
4906 	 * Read the label from the boot device and generate a configuration.
4907 	 */
4908 	config = spa_generate_rootconf(devpath, devid, &guid);
4909 #if defined(_OBP) && defined(_KERNEL)
4910 	if (config == NULL) {
4911 		if (strstr(devpath, "/iscsi/ssd") != NULL) {
4912 			/* iscsi boot */
4913 			get_iscsi_bootpath_phy(devpath);
4914 			config = spa_generate_rootconf(devpath, devid, &guid);
4915 		}
4916 	}
4917 #endif
4918 	if (config == NULL) {
4919 		cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
4920 		    devpath);
4921 		return (SET_ERROR(EIO));
4922 	}
4923 
4924 	VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4925 	    &pname) == 0);
4926 	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
4927 
4928 	mutex_enter(&spa_namespace_lock);
4929 	if ((spa = spa_lookup(pname)) != NULL) {
4930 		/*
4931 		 * Remove the existing root pool from the namespace so that we
4932 		 * can replace it with the correct config we just read in.
4933 		 */
4934 		spa_remove(spa);
4935 	}
4936 
4937 	spa = spa_add(pname, config, NULL);
4938 	spa->spa_is_root = B_TRUE;
4939 	spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
4940 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
4941 	    &spa->spa_ubsync.ub_version) != 0)
4942 		spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
4943 
4944 	/*
4945 	 * Build up a vdev tree based on the boot device's label config.
4946 	 */
4947 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4948 	    &nvtop) == 0);
4949 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4950 	error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
4951 	    VDEV_ALLOC_ROOTPOOL);
4952 	spa_config_exit(spa, SCL_ALL, FTAG);
4953 	if (error) {
4954 		mutex_exit(&spa_namespace_lock);
4955 		nvlist_free(config);
4956 		cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
4957 		    pname);
4958 		return (error);
4959 	}
4960 
4961 	/*
4962 	 * Get the boot vdev.
4963 	 */
4964 	if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
4965 		cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
4966 		    (u_longlong_t)guid);
4967 		error = SET_ERROR(ENOENT);
4968 		goto out;
4969 	}
4970 
4971 	/*
4972 	 * Determine if there is a better boot device.
4973 	 */
4974 	avd = bvd;
4975 	spa_alt_rootvdev(rvd, &avd, &txg);
4976 	if (avd != bvd) {
4977 		cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
4978 		    "try booting from '%s'", avd->vdev_path);
4979 		error = SET_ERROR(EINVAL);
4980 		goto out;
4981 	}
4982 
4983 	/*
4984 	 * If the boot device is part of a spare vdev then ensure that
4985 	 * we're booting off the active spare.
4986 	 */
4987 	if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
4988 	    !bvd->vdev_isspare) {
4989 		cmn_err(CE_NOTE, "The boot device is currently spared. Please "
4990 		    "try booting from '%s'",
4991 		    bvd->vdev_parent->
4992 		    vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
4993 		error = SET_ERROR(EINVAL);
4994 		goto out;
4995 	}
4996 
4997 	error = 0;
4998 out:
4999 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5000 	vdev_free(rvd);
5001 	spa_config_exit(spa, SCL_ALL, FTAG);
5002 	mutex_exit(&spa_namespace_lock);
5003 
5004 	nvlist_free(config);
5005 	return (error);
5006 }
5007 
5008 #endif
5009 
5010 /*
5011  * Import a non-root pool into the system.
5012  */
5013 int
5014 spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
5015 {
5016 	spa_t *spa;
5017 	char *altroot = NULL;
5018 	spa_load_state_t state = SPA_LOAD_IMPORT;
5019 	zpool_rewind_policy_t policy;
5020 	uint64_t mode = spa_mode_global;
5021 	uint64_t readonly = B_FALSE;
5022 	int error;
5023 	nvlist_t *nvroot;
5024 	nvlist_t **spares, **l2cache;
5025 	uint_t nspares, nl2cache;
5026 
5027 	/*
5028 	 * If a pool with this name exists, return failure.
5029 	 */
5030 	mutex_enter(&spa_namespace_lock);
5031 	if (spa_lookup(pool) != NULL) {
5032 		mutex_exit(&spa_namespace_lock);
5033 		return (SET_ERROR(EEXIST));
5034 	}
5035 
5036 	/*
5037 	 * Create and initialize the spa structure.
5038 	 */
5039 	(void) nvlist_lookup_string(props,
5040 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
5041 	(void) nvlist_lookup_uint64(props,
5042 	    zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
5043 	if (readonly)
5044 		mode = FREAD;
5045 	spa = spa_add(pool, config, altroot);
5046 	spa->spa_import_flags = flags;
5047 
5048 	/*
5049 	 * Verbatim import - Take a pool and insert it into the namespace
5050 	 * as if it had been loaded at boot.
5051 	 */
5052 	if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
5053 		if (props != NULL)
5054 			spa_configfile_set(spa, props, B_FALSE);
5055 
5056 		spa_write_cachefile(spa, B_FALSE, B_TRUE);
5057 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
5058 		zfs_dbgmsg("spa_import: verbatim import of %s", pool);
5059 		mutex_exit(&spa_namespace_lock);
5060 		return (0);
5061 	}
5062 
5063 	spa_activate(spa, mode);
5064 
5065 	/*
5066 	 * Don't start async tasks until we know everything is healthy.
5067 	 */
5068 	spa_async_suspend(spa);
5069 
5070 	zpool_get_rewind_policy(config, &policy);
5071 	if (policy.zrp_request & ZPOOL_DO_REWIND)
5072 		state = SPA_LOAD_RECOVER;
5073 
5074 	spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
5075 
5076 	if (state != SPA_LOAD_RECOVER) {
5077 		spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
5078 		zfs_dbgmsg("spa_import: importing %s", pool);
5079 	} else {
5080 		zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
5081 		    "(RECOVERY MODE)", pool, (longlong_t)policy.zrp_txg);
5082 	}
5083 	error = spa_load_best(spa, state, policy.zrp_txg, policy.zrp_request);
5084 
5085 	/*
5086 	 * Propagate anything learned while loading the pool and pass it
5087 	 * back to caller (i.e. rewind info, missing devices, etc).
5088 	 */
5089 	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
5090 	    spa->spa_load_info) == 0);
5091 
5092 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5093 	/*
5094 	 * Toss any existing sparelist, as it doesn't have any validity
5095 	 * anymore, and conflicts with spa_has_spare().
5096 	 */
5097 	if (spa->spa_spares.sav_config) {
5098 		nvlist_free(spa->spa_spares.sav_config);
5099 		spa->spa_spares.sav_config = NULL;
5100 		spa_load_spares(spa);
5101 	}
5102 	if (spa->spa_l2cache.sav_config) {
5103 		nvlist_free(spa->spa_l2cache.sav_config);
5104 		spa->spa_l2cache.sav_config = NULL;
5105 		spa_load_l2cache(spa);
5106 	}
5107 
5108 	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5109 	    &nvroot) == 0);
5110 	if (error == 0)
5111 		error = spa_validate_aux(spa, nvroot, -1ULL,
5112 		    VDEV_ALLOC_SPARE);
5113 	if (error == 0)
5114 		error = spa_validate_aux(spa, nvroot, -1ULL,
5115 		    VDEV_ALLOC_L2CACHE);
5116 	spa_config_exit(spa, SCL_ALL, FTAG);
5117 
5118 	if (props != NULL)
5119 		spa_configfile_set(spa, props, B_FALSE);
5120 
5121 	if (error != 0 || (props && spa_writeable(spa) &&
5122 	    (error = spa_prop_set(spa, props)))) {
5123 		spa_unload(spa);
5124 		spa_deactivate(spa);
5125 		spa_remove(spa);
5126 		mutex_exit(&spa_namespace_lock);
5127 		return (error);
5128 	}
5129 
5130 	spa_async_resume(spa);
5131 
5132 	/*
5133 	 * Override any spares and level 2 cache devices as specified by
5134 	 * the user, as these may have correct device names/devids, etc.
5135 	 */
5136 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
5137 	    &spares, &nspares) == 0) {
5138 		if (spa->spa_spares.sav_config)
5139 			VERIFY(nvlist_remove(spa->spa_spares.sav_config,
5140 			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
5141 		else
5142 			VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
5143 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
5144 		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
5145 		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
5146 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5147 		spa_load_spares(spa);
5148 		spa_config_exit(spa, SCL_ALL, FTAG);
5149 		spa->spa_spares.sav_sync = B_TRUE;
5150 	}
5151 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
5152 	    &l2cache, &nl2cache) == 0) {
5153 		if (spa->spa_l2cache.sav_config)
5154 			VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
5155 			    ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
5156 		else
5157 			VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
5158 			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
5159 		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
5160 		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
5161 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5162 		spa_load_l2cache(spa);
5163 		spa_config_exit(spa, SCL_ALL, FTAG);
5164 		spa->spa_l2cache.sav_sync = B_TRUE;
5165 	}
5166 
5167 	/*
5168 	 * Check for any removed devices.
5169 	 */
5170 	if (spa->spa_autoreplace) {
5171 		spa_aux_check_removed(&spa->spa_spares);
5172 		spa_aux_check_removed(&spa->spa_l2cache);
5173 	}
5174 
5175 	if (spa_writeable(spa)) {
5176 		/*
5177 		 * Update the config cache to include the newly-imported pool.
5178 		 */
5179 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
5180 	}
5181 
5182 	/*
5183 	 * It's possible that the pool was expanded while it was exported.
5184 	 * We kick off an async task to handle this for us.
5185 	 */
5186 	spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
5187 
5188 	spa_history_log_version(spa, "import");
5189 
5190 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
5191 
5192 	mutex_exit(&spa_namespace_lock);
5193 
5194 	return (0);
5195 }
5196 
5197 nvlist_t *
5198 spa_tryimport(nvlist_t *tryconfig)
5199 {
5200 	nvlist_t *config = NULL;
5201 	char *poolname, *cachefile;
5202 	spa_t *spa;
5203 	uint64_t state;
5204 	int error;
5205 	zpool_rewind_policy_t policy;
5206 
5207 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
5208 		return (NULL);
5209 
5210 	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
5211 		return (NULL);
5212 
5213 	/*
5214 	 * Create and initialize the spa structure.
5215 	 */
5216 	mutex_enter(&spa_namespace_lock);
5217 	spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
5218 	spa_activate(spa, FREAD);
5219 
5220 	/*
5221 	 * Rewind pool if a max txg was provided. Note that even though we
5222 	 * retrieve the complete rewind policy, only the rewind txg is relevant
5223 	 * for tryimport.
5224 	 */
5225 	zpool_get_rewind_policy(spa->spa_config, &policy);
5226 	if (policy.zrp_txg != UINT64_MAX) {
5227 		spa->spa_load_max_txg = policy.zrp_txg;
5228 		spa->spa_extreme_rewind = B_TRUE;
5229 		zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
5230 		    poolname, (longlong_t)policy.zrp_txg);
5231 	} else {
5232 		zfs_dbgmsg("spa_tryimport: importing %s", poolname);
5233 	}
5234 
5235 	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
5236 	    == 0) {
5237 		zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
5238 		spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
5239 	} else {
5240 		spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
5241 	}
5242 
5243 	error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
5244 
5245 	/*
5246 	 * If 'tryconfig' was at least parsable, return the current config.
5247 	 */
5248 	if (spa->spa_root_vdev != NULL) {
5249 		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
5250 		VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
5251 		    poolname) == 0);
5252 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
5253 		    state) == 0);
5254 		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
5255 		    spa->spa_uberblock.ub_timestamp) == 0);
5256 		VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
5257 		    spa->spa_load_info) == 0);
5258 
5259 		/*
5260 		 * If the bootfs property exists on this pool then we
5261 		 * copy it out so that external consumers can tell which
5262 		 * pools are bootable.
5263 		 */
5264 		if ((!error || error == EEXIST) && spa->spa_bootfs) {
5265 			char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5266 
5267 			/*
5268 			 * We have to play games with the name since the
5269 			 * pool was opened as TRYIMPORT_NAME.
5270 			 */
5271 			if (dsl_dsobj_to_dsname(spa_name(spa),
5272 			    spa->spa_bootfs, tmpname) == 0) {
5273 				char *cp;
5274 				char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5275 
5276 				cp = strchr(tmpname, '/');
5277 				if (cp == NULL) {
5278 					(void) strlcpy(dsname, tmpname,
5279 					    MAXPATHLEN);
5280 				} else {
5281 					(void) snprintf(dsname, MAXPATHLEN,
5282 					    "%s/%s", poolname, ++cp);
5283 				}
5284 				VERIFY(nvlist_add_string(config,
5285 				    ZPOOL_CONFIG_BOOTFS, dsname) == 0);
5286 				kmem_free(dsname, MAXPATHLEN);
5287 			}
5288 			kmem_free(tmpname, MAXPATHLEN);
5289 		}
5290 
5291 		/*
5292 		 * Add the list of hot spares and level 2 cache devices.
5293 		 */
5294 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5295 		spa_add_spares(spa, config);
5296 		spa_add_l2cache(spa, config);
5297 		spa_config_exit(spa, SCL_CONFIG, FTAG);
5298 	}
5299 
5300 	spa_unload(spa);
5301 	spa_deactivate(spa);
5302 	spa_remove(spa);
5303 	mutex_exit(&spa_namespace_lock);
5304 
5305 	return (config);
5306 }
5307 
5308 /*
5309  * Pool export/destroy
5310  *
5311  * The act of destroying or exporting a pool is very simple.  We make sure there
5312  * is no more pending I/O and any references to the pool are gone.  Then, we
5313  * update the pool state and sync all the labels to disk, removing the
5314  * configuration from the cache afterwards. If the 'hardforce' flag is set, then
5315  * we don't sync the labels or remove the configuration cache.
5316  */
5317 static int
5318 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
5319     boolean_t force, boolean_t hardforce)
5320 {
5321 	spa_t *spa;
5322 
5323 	if (oldconfig)
5324 		*oldconfig = NULL;
5325 
5326 	if (!(spa_mode_global & FWRITE))
5327 		return (SET_ERROR(EROFS));
5328 
5329 	mutex_enter(&spa_namespace_lock);
5330 	if ((spa = spa_lookup(pool)) == NULL) {
5331 		mutex_exit(&spa_namespace_lock);
5332 		return (SET_ERROR(ENOENT));
5333 	}
5334 
5335 	/*
5336 	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
5337 	 * reacquire the namespace lock, and see if we can export.
5338 	 */
5339 	spa_open_ref(spa, FTAG);
5340 	mutex_exit(&spa_namespace_lock);
5341 	spa_async_suspend(spa);
5342 	mutex_enter(&spa_namespace_lock);
5343 	spa_close(spa, FTAG);
5344 
5345 	/*
5346 	 * The pool will be in core if it's openable,
5347 	 * in which case we can modify its state.
5348 	 */
5349 	if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
5350 		/*
5351 		 * Objsets may be open only because they're dirty, so we
5352 		 * have to force it to sync before checking spa_refcnt.
5353 		 */
5354 		txg_wait_synced(spa->spa_dsl_pool, 0);
5355 		spa_evicting_os_wait(spa);
5356 
5357 		/*
5358 		 * A pool cannot be exported or destroyed if there are active
5359 		 * references.  If we are resetting a pool, allow references by
5360 		 * fault injection handlers.
5361 		 */
5362 		if (!spa_refcount_zero(spa) ||
5363 		    (spa->spa_inject_ref != 0 &&
5364 		    new_state != POOL_STATE_UNINITIALIZED)) {
5365 			spa_async_resume(spa);
5366 			mutex_exit(&spa_namespace_lock);
5367 			return (SET_ERROR(EBUSY));
5368 		}
5369 
5370 		/*
5371 		 * A pool cannot be exported if it has an active shared spare.
5372 		 * This is to prevent other pools stealing the active spare
5373 		 * from an exported pool. At user's own will, such pool can
5374 		 * be forcedly exported.
5375 		 */
5376 		if (!force && new_state == POOL_STATE_EXPORTED &&
5377 		    spa_has_active_shared_spare(spa)) {
5378 			spa_async_resume(spa);
5379 			mutex_exit(&spa_namespace_lock);
5380 			return (SET_ERROR(EXDEV));
5381 		}
5382 
5383 		/*
5384 		 * We want this to be reflected on every label,
5385 		 * so mark them all dirty.  spa_unload() will do the
5386 		 * final sync that pushes these changes out.
5387 		 */
5388 		if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
5389 			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5390 			spa->spa_state = new_state;
5391 			spa->spa_final_txg = spa_last_synced_txg(spa) +
5392 			    TXG_DEFER_SIZE + 1;
5393 			vdev_config_dirty(spa->spa_root_vdev);
5394 			spa_config_exit(spa, SCL_ALL, FTAG);
5395 		}
5396 	}
5397 
5398 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
5399 
5400 	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
5401 		spa_unload(spa);
5402 		spa_deactivate(spa);
5403 	}
5404 
5405 	if (oldconfig && spa->spa_config)
5406 		VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
5407 
5408 	if (new_state != POOL_STATE_UNINITIALIZED) {
5409 		if (!hardforce)
5410 			spa_write_cachefile(spa, B_TRUE, B_TRUE);
5411 		spa_remove(spa);
5412 	}
5413 	mutex_exit(&spa_namespace_lock);
5414 
5415 	return (0);
5416 }
5417 
5418 /*
5419  * Destroy a storage pool.
5420  */
5421 int
5422 spa_destroy(char *pool)
5423 {
5424 	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
5425 	    B_FALSE, B_FALSE));
5426 }
5427 
5428 /*
5429  * Export a storage pool.
5430  */
5431 int
5432 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
5433     boolean_t hardforce)
5434 {
5435 	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
5436 	    force, hardforce));
5437 }
5438 
5439 /*
5440  * Similar to spa_export(), this unloads the spa_t without actually removing it
5441  * from the namespace in any way.
5442  */
5443 int
5444 spa_reset(char *pool)
5445 {
5446 	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
5447 	    B_FALSE, B_FALSE));
5448 }
5449 
5450 /*
5451  * ==========================================================================
5452  * Device manipulation
5453  * ==========================================================================
5454  */
5455 
5456 /*
5457  * Add a device to a storage pool.
5458  */
5459 int
5460 spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
5461 {
5462 	uint64_t txg, id;
5463 	int error;
5464 	vdev_t *rvd = spa->spa_root_vdev;
5465 	vdev_t *vd, *tvd;
5466 	nvlist_t **spares, **l2cache;
5467 	uint_t nspares, nl2cache;
5468 
5469 	ASSERT(spa_writeable(spa));
5470 
5471 	txg = spa_vdev_enter(spa);
5472 
5473 	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
5474 	    VDEV_ALLOC_ADD)) != 0)
5475 		return (spa_vdev_exit(spa, NULL, txg, error));
5476 
5477 	spa->spa_pending_vdev = vd;	/* spa_vdev_exit() will clear this */
5478 
5479 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
5480 	    &nspares) != 0)
5481 		nspares = 0;
5482 
5483 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
5484 	    &nl2cache) != 0)
5485 		nl2cache = 0;
5486 
5487 	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
5488 		return (spa_vdev_exit(spa, vd, txg, EINVAL));
5489 
5490 	if (vd->vdev_children != 0 &&
5491 	    (error = vdev_create(vd, txg, B_FALSE)) != 0)
5492 		return (spa_vdev_exit(spa, vd, txg, error));
5493 
5494 	/*
5495 	 * We must validate the spares and l2cache devices after checking the
5496 	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
5497 	 */
5498 	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
5499 		return (spa_vdev_exit(spa, vd, txg, error));
5500 
5501 	/*
5502 	 * If we are in the middle of a device removal, we can only add
5503 	 * devices which match the existing devices in the pool.
5504 	 * If we are in the middle of a removal, or have some indirect
5505 	 * vdevs, we can not add raidz toplevels.
5506 	 */
5507 	if (spa->spa_vdev_removal != NULL ||
5508 	    spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
5509 		for (int c = 0; c < vd->vdev_children; c++) {
5510 			tvd = vd->vdev_child[c];
5511 			if (spa->spa_vdev_removal != NULL &&
5512 			    tvd->vdev_ashift !=
5513 			    spa->spa_vdev_removal->svr_vdev->vdev_ashift) {
5514 				return (spa_vdev_exit(spa, vd, txg, EINVAL));
5515 			}
5516 			/* Fail if top level vdev is raidz */
5517 			if (tvd->vdev_ops == &vdev_raidz_ops) {
5518 				return (spa_vdev_exit(spa, vd, txg, EINVAL));
5519 			}
5520 			/*
5521 			 * Need the top level mirror to be
5522 			 * a mirror of leaf vdevs only
5523 			 */
5524 			if (tvd->vdev_ops == &vdev_mirror_ops) {
5525 				for (uint64_t cid = 0;
5526 				    cid < tvd->vdev_children; cid++) {
5527 					vdev_t *cvd = tvd->vdev_child[cid];
5528 					if (!cvd->vdev_ops->vdev_op_leaf) {
5529 						return (spa_vdev_exit(spa, vd,
5530 						    txg, EINVAL));
5531 					}
5532 				}
5533 			}
5534 		}
5535 	}
5536 
5537 	for (int c = 0; c < vd->vdev_children; c++) {
5538 
5539 		/*
5540 		 * Set the vdev id to the first hole, if one exists.
5541 		 */
5542 		for (id = 0; id < rvd->vdev_children; id++) {
5543 			if (rvd->vdev_child[id]->vdev_ishole) {
5544 				vdev_free(rvd->vdev_child[id]);
5545 				break;
5546 			}
5547 		}
5548 		tvd = vd->vdev_child[c];
5549 		vdev_remove_child(vd, tvd);
5550 		tvd->vdev_id = id;
5551 		vdev_add_child(rvd, tvd);
5552 		vdev_config_dirty(tvd);
5553 	}
5554 
5555 	if (nspares != 0) {
5556 		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
5557 		    ZPOOL_CONFIG_SPARES);
5558 		spa_load_spares(spa);
5559 		spa->spa_spares.sav_sync = B_TRUE;
5560 	}
5561 
5562 	if (nl2cache != 0) {
5563 		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
5564 		    ZPOOL_CONFIG_L2CACHE);
5565 		spa_load_l2cache(spa);
5566 		spa->spa_l2cache.sav_sync = B_TRUE;
5567 	}
5568 
5569 	/*
5570 	 * We have to be careful when adding new vdevs to an existing pool.
5571 	 * If other threads start allocating from these vdevs before we
5572 	 * sync the config cache, and we lose power, then upon reboot we may
5573 	 * fail to open the pool because there are DVAs that the config cache
5574 	 * can't translate.  Therefore, we first add the vdevs without
5575 	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
5576 	 * and then let spa_config_update() initialize the new metaslabs.
5577 	 *
5578 	 * spa_load() checks for added-but-not-initialized vdevs, so that
5579 	 * if we lose power at any point in this sequence, the remaining
5580 	 * steps will be completed the next time we load the pool.
5581 	 */
5582 	(void) spa_vdev_exit(spa, vd, txg, 0);
5583 
5584 	mutex_enter(&spa_namespace_lock);
5585 	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
5586 	spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
5587 	mutex_exit(&spa_namespace_lock);
5588 
5589 	return (0);
5590 }
5591 
5592 /*
5593  * Attach a device to a mirror.  The arguments are the path to any device
5594  * in the mirror, and the nvroot for the new device.  If the path specifies
5595  * a device that is not mirrored, we automatically insert the mirror vdev.
5596  *
5597  * If 'replacing' is specified, the new device is intended to replace the
5598  * existing device; in this case the two devices are made into their own
5599  * mirror using the 'replacing' vdev, which is functionally identical to
5600  * the mirror vdev (it actually reuses all the same ops) but has a few
5601  * extra rules: you can't attach to it after it's been created, and upon
5602  * completion of resilvering, the first disk (the one being replaced)
5603  * is automatically detached.
5604  */
5605 int
5606 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
5607 {
5608 	uint64_t txg, dtl_max_txg;
5609 	vdev_t *rvd = spa->spa_root_vdev;
5610 	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
5611 	vdev_ops_t *pvops;
5612 	char *oldvdpath, *newvdpath;
5613 	int newvd_isspare;
5614 	int error;
5615 
5616 	ASSERT(spa_writeable(spa));
5617 
5618 	txg = spa_vdev_enter(spa);
5619 
5620 	oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
5621 
5622 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
5623 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
5624 		error = (spa_has_checkpoint(spa)) ?
5625 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
5626 		return (spa_vdev_exit(spa, NULL, txg, error));
5627 	}
5628 
5629 	if (spa->spa_vdev_removal != NULL ||
5630 	    spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
5631 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
5632 	}
5633 
5634 	if (oldvd == NULL)
5635 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
5636 
5637 	if (!oldvd->vdev_ops->vdev_op_leaf)
5638 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5639 
5640 	pvd = oldvd->vdev_parent;
5641 
5642 	if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
5643 	    VDEV_ALLOC_ATTACH)) != 0)
5644 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
5645 
5646 	if (newrootvd->vdev_children != 1)
5647 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
5648 
5649 	newvd = newrootvd->vdev_child[0];
5650 
5651 	if (!newvd->vdev_ops->vdev_op_leaf)
5652 		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
5653 
5654 	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
5655 		return (spa_vdev_exit(spa, newrootvd, txg, error));
5656 
5657 	/*
5658 	 * Spares can't replace logs
5659 	 */
5660 	if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
5661 		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
5662 
5663 	if (!replacing) {
5664 		/*
5665 		 * For attach, the only allowable parent is a mirror or the root
5666 		 * vdev.
5667 		 */
5668 		if (pvd->vdev_ops != &vdev_mirror_ops &&
5669 		    pvd->vdev_ops != &vdev_root_ops)
5670 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
5671 
5672 		pvops = &vdev_mirror_ops;
5673 	} else {
5674 		/*
5675 		 * Active hot spares can only be replaced by inactive hot
5676 		 * spares.
5677 		 */
5678 		if (pvd->vdev_ops == &vdev_spare_ops &&
5679 		    oldvd->vdev_isspare &&
5680 		    !spa_has_spare(spa, newvd->vdev_guid))
5681 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
5682 
5683 		/*
5684 		 * If the source is a hot spare, and the parent isn't already a
5685 		 * spare, then we want to create a new hot spare.  Otherwise, we
5686 		 * want to create a replacing vdev.  The user is not allowed to
5687 		 * attach to a spared vdev child unless the 'isspare' state is
5688 		 * the same (spare replaces spare, non-spare replaces
5689 		 * non-spare).
5690 		 */
5691 		if (pvd->vdev_ops == &vdev_replacing_ops &&
5692 		    spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
5693 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
5694 		} else if (pvd->vdev_ops == &vdev_spare_ops &&
5695 		    newvd->vdev_isspare != oldvd->vdev_isspare) {
5696 			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
5697 		}
5698 
5699 		if (newvd->vdev_isspare)
5700 			pvops = &vdev_spare_ops;
5701 		else
5702 			pvops = &vdev_replacing_ops;
5703 	}
5704 
5705 	/*
5706 	 * Make sure the new device is big enough.
5707 	 */
5708 	if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
5709 		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
5710 
5711 	/*
5712 	 * The new device cannot have a higher alignment requirement
5713 	 * than the top-level vdev.
5714 	 */
5715 	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
5716 		return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
5717 
5718 	/*
5719 	 * If this is an in-place replacement, update oldvd's path and devid
5720 	 * to make it distinguishable from newvd, and unopenable from now on.
5721 	 */
5722 	if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
5723 		spa_strfree(oldvd->vdev_path);
5724 		oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
5725 		    KM_SLEEP);
5726 		(void) sprintf(oldvd->vdev_path, "%s/%s",
5727 		    newvd->vdev_path, "old");
5728 		if (oldvd->vdev_devid != NULL) {
5729 			spa_strfree(oldvd->vdev_devid);
5730 			oldvd->vdev_devid = NULL;
5731 		}
5732 	}
5733 
5734 	/* mark the device being resilvered */
5735 	newvd->vdev_resilver_txg = txg;
5736 
5737 	/*
5738 	 * If the parent is not a mirror, or if we're replacing, insert the new
5739 	 * mirror/replacing/spare vdev above oldvd.
5740 	 */
5741 	if (pvd->vdev_ops != pvops)
5742 		pvd = vdev_add_parent(oldvd, pvops);
5743 
5744 	ASSERT(pvd->vdev_top->vdev_parent == rvd);
5745 	ASSERT(pvd->vdev_ops == pvops);
5746 	ASSERT(oldvd->vdev_parent == pvd);
5747 
5748 	/*
5749 	 * Extract the new device from its root and add it to pvd.
5750 	 */
5751 	vdev_remove_child(newrootvd, newvd);
5752 	newvd->vdev_id = pvd->vdev_children;
5753 	newvd->vdev_crtxg = oldvd->vdev_crtxg;
5754 	vdev_add_child(pvd, newvd);
5755 
5756 	tvd = newvd->vdev_top;
5757 	ASSERT(pvd->vdev_top == tvd);
5758 	ASSERT(tvd->vdev_parent == rvd);
5759 
5760 	vdev_config_dirty(tvd);
5761 
5762 	/*
5763 	 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
5764 	 * for any dmu_sync-ed blocks.  It will propagate upward when
5765 	 * spa_vdev_exit() calls vdev_dtl_reassess().
5766 	 */
5767 	dtl_max_txg = txg + TXG_CONCURRENT_STATES;
5768 
5769 	vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
5770 	    dtl_max_txg - TXG_INITIAL);
5771 
5772 	if (newvd->vdev_isspare) {
5773 		spa_spare_activate(newvd);
5774 		spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
5775 	}
5776 
5777 	oldvdpath = spa_strdup(oldvd->vdev_path);
5778 	newvdpath = spa_strdup(newvd->vdev_path);
5779 	newvd_isspare = newvd->vdev_isspare;
5780 
5781 	/*
5782 	 * Mark newvd's DTL dirty in this txg.
5783 	 */
5784 	vdev_dirty(tvd, VDD_DTL, newvd, txg);
5785 
5786 	/*
5787 	 * Schedule the resilver to restart in the future. We do this to
5788 	 * ensure that dmu_sync-ed blocks have been stitched into the
5789 	 * respective datasets.
5790 	 */
5791 	dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
5792 
5793 	if (spa->spa_bootfs)
5794 		spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
5795 
5796 	spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
5797 
5798 	/*
5799 	 * Commit the config
5800 	 */
5801 	(void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
5802 
5803 	spa_history_log_internal(spa, "vdev attach", NULL,
5804 	    "%s vdev=%s %s vdev=%s",
5805 	    replacing && newvd_isspare ? "spare in" :
5806 	    replacing ? "replace" : "attach", newvdpath,
5807 	    replacing ? "for" : "to", oldvdpath);
5808 
5809 	spa_strfree(oldvdpath);
5810 	spa_strfree(newvdpath);
5811 
5812 	return (0);
5813 }
5814 
5815 /*
5816  * Detach a device from a mirror or replacing vdev.
5817  *
5818  * If 'replace_done' is specified, only detach if the parent
5819  * is a replacing vdev.
5820  */
5821 int
5822 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
5823 {
5824 	uint64_t txg;
5825 	int error;
5826 	vdev_t *rvd = spa->spa_root_vdev;
5827 	vdev_t *vd, *pvd, *cvd, *tvd;
5828 	boolean_t unspare = B_FALSE;
5829 	uint64_t unspare_guid = 0;
5830 	char *vdpath;
5831 
5832 	ASSERT(spa_writeable(spa));
5833 
5834 	txg = spa_vdev_enter(spa);
5835 
5836 	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
5837 
5838 	/*
5839 	 * Besides being called directly from the userland through the
5840 	 * ioctl interface, spa_vdev_detach() can be potentially called
5841 	 * at the end of spa_vdev_resilver_done().
5842 	 *
5843 	 * In the regular case, when we have a checkpoint this shouldn't
5844 	 * happen as we never empty the DTLs of a vdev during the scrub
5845 	 * [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done()
5846 	 * should never get here when we have a checkpoint.
5847 	 *
5848 	 * That said, even in a case when we checkpoint the pool exactly
5849 	 * as spa_vdev_resilver_done() calls this function everything
5850 	 * should be fine as the resilver will return right away.
5851 	 */
5852 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
5853 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
5854 		error = (spa_has_checkpoint(spa)) ?
5855 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
5856 		return (spa_vdev_exit(spa, NULL, txg, error));
5857 	}
5858 
5859 	if (vd == NULL)
5860 		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
5861 
5862 	if (!vd->vdev_ops->vdev_op_leaf)
5863 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5864 
5865 	pvd = vd->vdev_parent;
5866 
5867 	/*
5868 	 * If the parent/child relationship is not as expected, don't do it.
5869 	 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
5870 	 * vdev that's replacing B with C.  The user's intent in replacing
5871 	 * is to go from M(A,B) to M(A,C).  If the user decides to cancel
5872 	 * the replace by detaching C, the expected behavior is to end up
5873 	 * M(A,B).  But suppose that right after deciding to detach C,
5874 	 * the replacement of B completes.  We would have M(A,C), and then
5875 	 * ask to detach C, which would leave us with just A -- not what
5876 	 * the user wanted.  To prevent this, we make sure that the
5877 	 * parent/child relationship hasn't changed -- in this example,
5878 	 * that C's parent is still the replacing vdev R.
5879 	 */
5880 	if (pvd->vdev_guid != pguid && pguid != 0)
5881 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
5882 
5883 	/*
5884 	 * Only 'replacing' or 'spare' vdevs can be replaced.
5885 	 */
5886 	if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
5887 	    pvd->vdev_ops != &vdev_spare_ops)
5888 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5889 
5890 	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
5891 	    spa_version(spa) >= SPA_VERSION_SPARES);
5892 
5893 	/*
5894 	 * Only mirror, replacing, and spare vdevs support detach.
5895 	 */
5896 	if (pvd->vdev_ops != &vdev_replacing_ops &&
5897 	    pvd->vdev_ops != &vdev_mirror_ops &&
5898 	    pvd->vdev_ops != &vdev_spare_ops)
5899 		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
5900 
5901 	/*
5902 	 * If this device has the only valid copy of some data,
5903 	 * we cannot safely detach it.
5904 	 */
5905 	if (vdev_dtl_required(vd))
5906 		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
5907 
5908 	ASSERT(pvd->vdev_children >= 2);
5909 
5910 	/*
5911 	 * If we are detaching the second disk from a replacing vdev, then
5912 	 * check to see if we changed the original vdev's path to have "/old"
5913 	 * at the end in spa_vdev_attach().  If so, undo that change now.
5914 	 */
5915 	if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
5916 	    vd->vdev_path != NULL) {
5917 		size_t len = strlen(vd->vdev_path);
5918 
5919 		for (int c = 0; c < pvd->vdev_children; c++) {
5920 			cvd = pvd->vdev_child[c];
5921 
5922 			if (cvd == vd || cvd->vdev_path == NULL)
5923 				continue;
5924 
5925 			if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
5926 			    strcmp(cvd->vdev_path + len, "/old") == 0) {
5927 				spa_strfree(cvd->vdev_path);
5928 				cvd->vdev_path = spa_strdup(vd->vdev_path);
5929 				break;
5930 			}
5931 		}
5932 	}
5933 
5934 	/*
5935 	 * If we are detaching the original disk from a spare, then it implies
5936 	 * that the spare should become a real disk, and be removed from the
5937 	 * active spare list for the pool.
5938 	 */
5939 	if (pvd->vdev_ops == &vdev_spare_ops &&
5940 	    vd->vdev_id == 0 &&
5941 	    pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
5942 		unspare = B_TRUE;
5943 
5944 	/*
5945 	 * Erase the disk labels so the disk can be used for other things.
5946 	 * This must be done after all other error cases are handled,
5947 	 * but before we disembowel vd (so we can still do I/O to it).
5948 	 * But if we can't do it, don't treat the error as fatal --
5949 	 * it may be that the unwritability of the disk is the reason
5950 	 * it's being detached!
5951 	 */
5952 	error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
5953 
5954 	/*
5955 	 * Remove vd from its parent and compact the parent's children.
5956 	 */
5957 	vdev_remove_child(pvd, vd);
5958 	vdev_compact_children(pvd);
5959 
5960 	/*
5961 	 * Remember one of the remaining children so we can get tvd below.
5962 	 */
5963 	cvd = pvd->vdev_child[pvd->vdev_children - 1];
5964 
5965 	/*
5966 	 * If we need to remove the remaining child from the list of hot spares,
5967 	 * do it now, marking the vdev as no longer a spare in the process.
5968 	 * We must do this before vdev_remove_parent(), because that can
5969 	 * change the GUID if it creates a new toplevel GUID.  For a similar
5970 	 * reason, we must remove the spare now, in the same txg as the detach;
5971 	 * otherwise someone could attach a new sibling, change the GUID, and
5972 	 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
5973 	 */
5974 	if (unspare) {
5975 		ASSERT(cvd->vdev_isspare);
5976 		spa_spare_remove(cvd);
5977 		unspare_guid = cvd->vdev_guid;
5978 		(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
5979 		cvd->vdev_unspare = B_TRUE;
5980 	}
5981 
5982 	/*
5983 	 * If the parent mirror/replacing vdev only has one child,
5984 	 * the parent is no longer needed.  Remove it from the tree.
5985 	 */
5986 	if (pvd->vdev_children == 1) {
5987 		if (pvd->vdev_ops == &vdev_spare_ops)
5988 			cvd->vdev_unspare = B_FALSE;
5989 		vdev_remove_parent(cvd);
5990 	}
5991 
5992 
5993 	/*
5994 	 * We don't set tvd until now because the parent we just removed
5995 	 * may have been the previous top-level vdev.
5996 	 */
5997 	tvd = cvd->vdev_top;
5998 	ASSERT(tvd->vdev_parent == rvd);
5999 
6000 	/*
6001 	 * Reevaluate the parent vdev state.
6002 	 */
6003 	vdev_propagate_state(cvd);
6004 
6005 	/*
6006 	 * If the 'autoexpand' property is set on the pool then automatically
6007 	 * try to expand the size of the pool. For example if the device we
6008 	 * just detached was smaller than the others, it may be possible to
6009 	 * add metaslabs (i.e. grow the pool). We need to reopen the vdev
6010 	 * first so that we can obtain the updated sizes of the leaf vdevs.
6011 	 */
6012 	if (spa->spa_autoexpand) {
6013 		vdev_reopen(tvd);
6014 		vdev_expand(tvd, txg);
6015 	}
6016 
6017 	vdev_config_dirty(tvd);
6018 
6019 	/*
6020 	 * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
6021 	 * vd->vdev_detached is set and free vd's DTL object in syncing context.
6022 	 * But first make sure we're not on any *other* txg's DTL list, to
6023 	 * prevent vd from being accessed after it's freed.
6024 	 */
6025 	vdpath = spa_strdup(vd->vdev_path);
6026 	for (int t = 0; t < TXG_SIZE; t++)
6027 		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
6028 	vd->vdev_detached = B_TRUE;
6029 	vdev_dirty(tvd, VDD_DTL, vd, txg);
6030 
6031 	spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
6032 
6033 	/* hang on to the spa before we release the lock */
6034 	spa_open_ref(spa, FTAG);
6035 
6036 	error = spa_vdev_exit(spa, vd, txg, 0);
6037 
6038 	spa_history_log_internal(spa, "detach", NULL,
6039 	    "vdev=%s", vdpath);
6040 	spa_strfree(vdpath);
6041 
6042 	/*
6043 	 * If this was the removal of the original device in a hot spare vdev,
6044 	 * then we want to go through and remove the device from the hot spare
6045 	 * list of every other pool.
6046 	 */
6047 	if (unspare) {
6048 		spa_t *altspa = NULL;
6049 
6050 		mutex_enter(&spa_namespace_lock);
6051 		while ((altspa = spa_next(altspa)) != NULL) {
6052 			if (altspa->spa_state != POOL_STATE_ACTIVE ||
6053 			    altspa == spa)
6054 				continue;
6055 
6056 			spa_open_ref(altspa, FTAG);
6057 			mutex_exit(&spa_namespace_lock);
6058 			(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
6059 			mutex_enter(&spa_namespace_lock);
6060 			spa_close(altspa, FTAG);
6061 		}
6062 		mutex_exit(&spa_namespace_lock);
6063 
6064 		/* search the rest of the vdevs for spares to remove */
6065 		spa_vdev_resilver_done(spa);
6066 	}
6067 
6068 	/* all done with the spa; OK to release */
6069 	mutex_enter(&spa_namespace_lock);
6070 	spa_close(spa, FTAG);
6071 	mutex_exit(&spa_namespace_lock);
6072 
6073 	return (error);
6074 }
6075 
6076 /*
6077  * Split a set of devices from their mirrors, and create a new pool from them.
6078  */
6079 int
6080 spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
6081     nvlist_t *props, boolean_t exp)
6082 {
6083 	int error = 0;
6084 	uint64_t txg, *glist;
6085 	spa_t *newspa;
6086 	uint_t c, children, lastlog;
6087 	nvlist_t **child, *nvl, *tmp;
6088 	dmu_tx_t *tx;
6089 	char *altroot = NULL;
6090 	vdev_t *rvd, **vml = NULL;			/* vdev modify list */
6091 	boolean_t activate_slog;
6092 
6093 	ASSERT(spa_writeable(spa));
6094 
6095 	txg = spa_vdev_enter(spa);
6096 
6097 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
6098 	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
6099 		error = (spa_has_checkpoint(spa)) ?
6100 		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
6101 		return (spa_vdev_exit(spa, NULL, txg, error));
6102 	}
6103 
6104 	/* clear the log and flush everything up to now */
6105 	activate_slog = spa_passivate_log(spa);
6106 	(void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
6107 	error = spa_reset_logs(spa);
6108 	txg = spa_vdev_config_enter(spa);
6109 
6110 	if (activate_slog)
6111 		spa_activate_log(spa);
6112 
6113 	if (error != 0)
6114 		return (spa_vdev_exit(spa, NULL, txg, error));
6115 
6116 	/* check new spa name before going any further */
6117 	if (spa_lookup(newname) != NULL)
6118 		return (spa_vdev_exit(spa, NULL, txg, EEXIST));
6119 
6120 	/*
6121 	 * scan through all the children to ensure they're all mirrors
6122 	 */
6123 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
6124 	    nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
6125 	    &children) != 0)
6126 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
6127 
6128 	/* first, check to ensure we've got the right child count */
6129 	rvd = spa->spa_root_vdev;
6130 	lastlog = 0;
6131 	for (c = 0; c < rvd->vdev_children; c++) {
6132 		vdev_t *vd = rvd->vdev_child[c];
6133 
6134 		/* don't count the holes & logs as children */
6135 		if (vd->vdev_islog || !vdev_is_concrete(vd)) {
6136 			if (lastlog == 0)
6137 				lastlog = c;
6138 			continue;
6139 		}
6140 
6141 		lastlog = 0;
6142 	}
6143 	if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
6144 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
6145 
6146 	/* next, ensure no spare or cache devices are part of the split */
6147 	if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
6148 	    nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
6149 		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
6150 
6151 	vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
6152 	glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
6153 
6154 	/* then, loop over each vdev and validate it */
6155 	for (c = 0; c < children; c++) {
6156 		uint64_t is_hole = 0;
6157 
6158 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
6159 		    &is_hole);
6160 
6161 		if (is_hole != 0) {
6162 			if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
6163 			    spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
6164 				continue;
6165 			} else {
6166 				error = SET_ERROR(EINVAL);
6167 				break;
6168 			}
6169 		}
6170 
6171 		/* which disk is going to be split? */
6172 		if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
6173 		    &glist[c]) != 0) {
6174 			error = SET_ERROR(EINVAL);
6175 			break;
6176 		}
6177 
6178 		/* look it up in the spa */
6179 		vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
6180 		if (vml[c] == NULL) {
6181 			error = SET_ERROR(ENODEV);
6182 			break;
6183 		}
6184 
6185 		/* make sure there's nothing stopping the split */
6186 		if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
6187 		    vml[c]->vdev_islog ||
6188 		    !vdev_is_concrete(vml[c]) ||
6189 		    vml[c]->vdev_isspare ||
6190 		    vml[c]->vdev_isl2cache ||
6191 		    !vdev_writeable(vml[c]) ||
6192 		    vml[c]->vdev_children != 0 ||
6193 		    vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
6194 		    c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
6195 			error = SET_ERROR(EINVAL);
6196 			break;
6197 		}
6198 
6199 		if (vdev_dtl_required(vml[c])) {
6200 			error = SET_ERROR(EBUSY);
6201 			break;
6202 		}
6203 
6204 		/* we need certain info from the top level */
6205 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
6206 		    vml[c]->vdev_top->vdev_ms_array) == 0);
6207 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
6208 		    vml[c]->vdev_top->vdev_ms_shift) == 0);
6209 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
6210 		    vml[c]->vdev_top->vdev_asize) == 0);
6211 		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
6212 		    vml[c]->vdev_top->vdev_ashift) == 0);
6213 
6214 		/* transfer per-vdev ZAPs */
6215 		ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
6216 		VERIFY0(nvlist_add_uint64(child[c],
6217 		    ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
6218 
6219 		ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
6220 		VERIFY0(nvlist_add_uint64(child[c],
6221 		    ZPOOL_CONFIG_VDEV_TOP_ZAP,
6222 		    vml[c]->vdev_parent->vdev_top_zap));
6223 	}
6224 
6225 	if (error != 0) {
6226 		kmem_free(vml, children * sizeof (vdev_t *));
6227 		kmem_free(glist, children * sizeof (uint64_t));
6228 		return (spa_vdev_exit(spa, NULL, txg, error));
6229 	}
6230 
6231 	/* stop writers from using the disks */
6232 	for (c = 0; c < children; c++) {
6233 		if (vml[c] != NULL)
6234 			vml[c]->vdev_offline = B_TRUE;
6235 	}
6236 	vdev_reopen(spa->spa_root_vdev);
6237 
6238 	/*
6239 	 * Temporarily record the splitting vdevs in the spa config.  This
6240 	 * will disappear once the config is regenerated.
6241 	 */
6242 	VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
6243 	VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
6244 	    glist, children) == 0);
6245 	kmem_free(glist, children * sizeof (uint64_t));
6246 
6247 	mutex_enter(&spa->spa_props_lock);
6248 	VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
6249 	    nvl) == 0);
6250 	mutex_exit(&spa->spa_props_lock);
6251 	spa->spa_config_splitting = nvl;
6252 	vdev_config_dirty(spa->spa_root_vdev);
6253 
6254 	/* configure and create the new pool */
6255 	VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
6256 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
6257 	    exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
6258 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
6259 	    spa_version(spa)) == 0);
6260 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
6261 	    spa->spa_config_txg) == 0);
6262 	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
6263 	    spa_generate_guid(NULL)) == 0);
6264 	VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
6265 	(void) nvlist_lookup_string(props,
6266 	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
6267 
6268 	/* add the new pool to the namespace */
6269 	newspa = spa_add(newname, config, altroot);
6270 	newspa->spa_avz_action = AVZ_ACTION_REBUILD;
6271 	newspa->spa_config_txg = spa->spa_config_txg;
6272 	spa_set_log_state(newspa, SPA_LOG_CLEAR);
6273 
6274 	/* release the spa config lock, retaining the namespace lock */
6275 	spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
6276 
6277 	if (zio_injection_enabled)
6278 		zio_handle_panic_injection(spa, FTAG, 1);
6279 
6280 	spa_activate(newspa, spa_mode_global);
6281 	spa_async_suspend(newspa);
6282 
6283 	newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
6284 
6285 	/* create the new pool from the disks of the original pool */
6286 	error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE);
6287 	if (error)
6288 		goto out;
6289 
6290 	/* if that worked, generate a real config for the new pool */
6291 	if (newspa->spa_root_vdev != NULL) {
6292 		VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
6293 		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
6294 		VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
6295 		    ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
6296 		spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
6297 		    B_TRUE));
6298 	}
6299 
6300 	/* set the props */
6301 	if (props != NULL) {
6302 		spa_configfile_set(newspa, props, B_FALSE);
6303 		error = spa_prop_set(newspa, props);
6304 		if (error)
6305 			goto out;
6306 	}
6307 
6308 	/* flush everything */
6309 	txg = spa_vdev_config_enter(newspa);
6310 	vdev_config_dirty(newspa->spa_root_vdev);
6311 	(void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
6312 
6313 	if (zio_injection_enabled)
6314 		zio_handle_panic_injection(spa, FTAG, 2);
6315 
6316 	spa_async_resume(newspa);
6317 
6318 	/* finally, update the original pool's config */
6319 	txg = spa_vdev_config_enter(spa);
6320 	tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
6321 	error = dmu_tx_assign(tx, TXG_WAIT);
6322 	if (error != 0)
6323 		dmu_tx_abort(tx);
6324 	for (c = 0; c < children; c++) {
6325 		if (vml[c] != NULL) {
6326 			vdev_split(vml[c]);
6327 			if (error == 0)
6328 				spa_history_log_internal(spa, "detach", tx,
6329 				    "vdev=%s", vml[c]->vdev_path);
6330 
6331 			vdev_free(vml[c]);
6332 		}
6333 	}
6334 	spa->spa_avz_action = AVZ_ACTION_REBUILD;
6335 	vdev_config_dirty(spa->spa_root_vdev);
6336 	spa->spa_config_splitting = NULL;
6337 	nvlist_free(nvl);
6338 	if (error == 0)
6339 		dmu_tx_commit(tx);
6340 	(void) spa_vdev_exit(spa, NULL, txg, 0);
6341 
6342 	if (zio_injection_enabled)
6343 		zio_handle_panic_injection(spa, FTAG, 3);
6344 
6345 	/* split is complete; log a history record */
6346 	spa_history_log_internal(newspa, "split", NULL,
6347 	    "from pool %s", spa_name(spa));
6348 
6349 	kmem_free(vml, children * sizeof (vdev_t *));
6350 
6351 	/* if we're not going to mount the filesystems in userland, export */
6352 	if (exp)
6353 		error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
6354 		    B_FALSE, B_FALSE);
6355 
6356 	return (error);
6357 
6358 out:
6359 	spa_unload(newspa);
6360 	spa_deactivate(newspa);
6361 	spa_remove(newspa);
6362 
6363 	txg = spa_vdev_config_enter(spa);
6364 
6365 	/* re-online all offlined disks */
6366 	for (c = 0; c < children; c++) {
6367 		if (vml[c] != NULL)
6368 			vml[c]->vdev_offline = B_FALSE;
6369 	}
6370 	vdev_reopen(spa->spa_root_vdev);
6371 
6372 	nvlist_free(spa->spa_config_splitting);
6373 	spa->spa_config_splitting = NULL;
6374 	(void) spa_vdev_exit(spa, NULL, txg, error);
6375 
6376 	kmem_free(vml, children * sizeof (vdev_t *));
6377 	return (error);
6378 }
6379 
6380 /*
6381  * Find any device that's done replacing, or a vdev marked 'unspare' that's
6382  * currently spared, so we can detach it.
6383  */
6384 static vdev_t *
6385 spa_vdev_resilver_done_hunt(vdev_t *vd)
6386 {
6387 	vdev_t *newvd, *oldvd;
6388 
6389 	for (int c = 0; c < vd->vdev_children; c++) {
6390 		oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
6391 		if (oldvd != NULL)
6392 			return (oldvd);
6393 	}
6394 
6395 	/*
6396 	 * Check for a completed replacement.  We always consider the first
6397 	 * vdev in the list to be the oldest vdev, and the last one to be
6398 	 * the newest (see spa_vdev_attach() for how that works).  In
6399 	 * the case where the newest vdev is faulted, we will not automatically
6400 	 * remove it after a resilver completes.  This is OK as it will require
6401 	 * user intervention to determine which disk the admin wishes to keep.
6402 	 */
6403 	if (vd->vdev_ops == &vdev_replacing_ops) {
6404 		ASSERT(vd->vdev_children > 1);
6405 
6406 		newvd = vd->vdev_child[vd->vdev_children - 1];
6407 		oldvd = vd->vdev_child[0];
6408 
6409 		if (vdev_dtl_empty(newvd, DTL_MISSING) &&
6410 		    vdev_dtl_empty(newvd, DTL_OUTAGE) &&
6411 		    !vdev_dtl_required(oldvd))
6412 			return (oldvd);
6413 	}
6414 
6415 	/*
6416 	 * Check for a completed resilver with the 'unspare' flag set.
6417 	 */
6418 	if (vd->vdev_ops == &vdev_spare_ops) {
6419 		vdev_t *first = vd->vdev_child[0];
6420 		vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
6421 
6422 		if (last->vdev_unspare) {
6423 			oldvd = first;
6424 			newvd = last;
6425 		} else if (first->vdev_unspare) {
6426 			oldvd = last;
6427 			newvd = first;
6428 		} else {
6429 			oldvd = NULL;
6430 		}
6431 
6432 		if (oldvd != NULL &&
6433 		    vdev_dtl_empty(newvd, DTL_MISSING) &&
6434 		    vdev_dtl_empty(newvd, DTL_OUTAGE) &&
6435 		    !vdev_dtl_required(oldvd))
6436 			return (oldvd);
6437 
6438 		/*
6439 		 * If there are more than two spares attached to a disk,
6440 		 * and those spares are not required, then we want to
6441 		 * attempt to free them up now so that they can be used
6442 		 * by other pools.  Once we're back down to a single
6443 		 * disk+spare, we stop removing them.
6444 		 */
6445 		if (vd->vdev_children > 2) {
6446 			newvd = vd->vdev_child[1];
6447 
6448 			if (newvd->vdev_isspare && last->vdev_isspare &&
6449 			    vdev_dtl_empty(last, DTL_MISSING) &&
6450 			    vdev_dtl_empty(last, DTL_OUTAGE) &&
6451 			    !vdev_dtl_required(newvd))
6452 				return (newvd);
6453 		}
6454 	}
6455 
6456 	return (NULL);
6457 }
6458 
6459 static void
6460 spa_vdev_resilver_done(spa_t *spa)
6461 {
6462 	vdev_t *vd, *pvd, *ppvd;
6463 	uint64_t guid, sguid, pguid, ppguid;
6464 
6465 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6466 
6467 	while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
6468 		pvd = vd->vdev_parent;
6469 		ppvd = pvd->vdev_parent;
6470 		guid = vd->vdev_guid;
6471 		pguid = pvd->vdev_guid;
6472 		ppguid = ppvd->vdev_guid;
6473 		sguid = 0;
6474 		/*
6475 		 * If we have just finished replacing a hot spared device, then
6476 		 * we need to detach the parent's first child (the original hot
6477 		 * spare) as well.
6478 		 */
6479 		if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
6480 		    ppvd->vdev_children == 2) {
6481 			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
6482 			sguid = ppvd->vdev_child[1]->vdev_guid;
6483 		}
6484 		ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
6485 
6486 		spa_config_exit(spa, SCL_ALL, FTAG);
6487 		if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
6488 			return;
6489 		if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
6490 			return;
6491 		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
6492 	}
6493 
6494 	spa_config_exit(spa, SCL_ALL, FTAG);
6495 }
6496 
6497 /*
6498  * Update the stored path or FRU for this vdev.
6499  */
6500 int
6501 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
6502     boolean_t ispath)
6503 {
6504 	vdev_t *vd;
6505 	boolean_t sync = B_FALSE;
6506 
6507 	ASSERT(spa_writeable(spa));
6508 
6509 	spa_vdev_state_enter(spa, SCL_ALL);
6510 
6511 	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
6512 		return (spa_vdev_state_exit(spa, NULL, ENOENT));
6513 
6514 	if (!vd->vdev_ops->vdev_op_leaf)
6515 		return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
6516 
6517 	if (ispath) {
6518 		if (strcmp(value, vd->vdev_path) != 0) {
6519 			spa_strfree(vd->vdev_path);
6520 			vd->vdev_path = spa_strdup(value);
6521 			sync = B_TRUE;
6522 		}
6523 	} else {
6524 		if (vd->vdev_fru == NULL) {
6525 			vd->vdev_fru = spa_strdup(value);
6526 			sync = B_TRUE;
6527 		} else if (strcmp(value, vd->vdev_fru) != 0) {
6528 			spa_strfree(vd->vdev_fru);
6529 			vd->vdev_fru = spa_strdup(value);
6530 			sync = B_TRUE;
6531 		}
6532 	}
6533 
6534 	return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
6535 }
6536 
6537 int
6538 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
6539 {
6540 	return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
6541 }
6542 
6543 int
6544 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
6545 {
6546 	return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
6547 }
6548 
6549 /*
6550  * ==========================================================================
6551  * SPA Scanning
6552  * ==========================================================================
6553  */
6554 int
6555 spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
6556 {
6557 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
6558 
6559 	if (dsl_scan_resilvering(spa->spa_dsl_pool))
6560 		return (SET_ERROR(EBUSY));
6561 
6562 	return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
6563 }
6564 
6565 int
6566 spa_scan_stop(spa_t *spa)
6567 {
6568 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
6569 	if (dsl_scan_resilvering(spa->spa_dsl_pool))
6570 		return (SET_ERROR(EBUSY));
6571 	return (dsl_scan_cancel(spa->spa_dsl_pool));
6572 }
6573 
6574 int
6575 spa_scan(spa_t *spa, pool_scan_func_t func)
6576 {
6577 	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
6578 
6579 	if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
6580 		return (SET_ERROR(ENOTSUP));
6581 
6582 	/*
6583 	 * If a resilver was requested, but there is no DTL on a
6584 	 * writeable leaf device, we have nothing to do.
6585 	 */
6586 	if (func == POOL_SCAN_RESILVER &&
6587 	    !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
6588 		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
6589 		return (0);
6590 	}
6591 
6592 	return (dsl_scan(spa->spa_dsl_pool, func));
6593 }
6594 
6595 /*
6596  * ==========================================================================
6597  * SPA async task processing
6598  * ==========================================================================
6599  */
6600 
6601 static void
6602 spa_async_remove(spa_t *spa, vdev_t *vd)
6603 {
6604 	if (vd->vdev_remove_wanted) {
6605 		vd->vdev_remove_wanted = B_FALSE;
6606 		vd->vdev_delayed_close = B_FALSE;
6607 		vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
6608 
6609 		/*
6610 		 * We want to clear the stats, but we don't want to do a full
6611 		 * vdev_clear() as that will cause us to throw away
6612 		 * degraded/faulted state as well as attempt to reopen the
6613 		 * device, all of which is a waste.
6614 		 */
6615 		vd->vdev_stat.vs_read_errors = 0;
6616 		vd->vdev_stat.vs_write_errors = 0;
6617 		vd->vdev_stat.vs_checksum_errors = 0;
6618 
6619 		vdev_state_dirty(vd->vdev_top);
6620 	}
6621 
6622 	for (int c = 0; c < vd->vdev_children; c++)
6623 		spa_async_remove(spa, vd->vdev_child[c]);
6624 }
6625 
6626 static void
6627 spa_async_probe(spa_t *spa, vdev_t *vd)
6628 {
6629 	if (vd->vdev_probe_wanted) {
6630 		vd->vdev_probe_wanted = B_FALSE;
6631 		vdev_reopen(vd);	/* vdev_open() does the actual probe */
6632 	}
6633 
6634 	for (int c = 0; c < vd->vdev_children; c++)
6635 		spa_async_probe(spa, vd->vdev_child[c]);
6636 }
6637 
6638 static void
6639 spa_async_autoexpand(spa_t *spa, vdev_t *vd)
6640 {
6641 	sysevent_id_t eid;
6642 	nvlist_t *attr;
6643 	char *physpath;
6644 
6645 	if (!spa->spa_autoexpand)
6646 		return;
6647 
6648 	for (int c = 0; c < vd->vdev_children; c++) {
6649 		vdev_t *cvd = vd->vdev_child[c];
6650 		spa_async_autoexpand(spa, cvd);
6651 	}
6652 
6653 	if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
6654 		return;
6655 
6656 	physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6657 	(void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath);
6658 
6659 	VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
6660 	VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
6661 
6662 	(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
6663 	    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
6664 
6665 	nvlist_free(attr);
6666 	kmem_free(physpath, MAXPATHLEN);
6667 }
6668 
6669 static void
6670 spa_async_thread(void *arg)
6671 {
6672 	spa_t *spa = (spa_t *)arg;
6673 	int tasks;
6674 
6675 	ASSERT(spa->spa_sync_on);
6676 
6677 	mutex_enter(&spa->spa_async_lock);
6678 	tasks = spa->spa_async_tasks;
6679 	spa->spa_async_tasks = 0;
6680 	mutex_exit(&spa->spa_async_lock);
6681 
6682 	/*
6683 	 * See if the config needs to be updated.
6684 	 */
6685 	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
6686 		uint64_t old_space, new_space;
6687 
6688 		mutex_enter(&spa_namespace_lock);
6689 		old_space = metaslab_class_get_space(spa_normal_class(spa));
6690 		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6691 		new_space = metaslab_class_get_space(spa_normal_class(spa));
6692 		mutex_exit(&spa_namespace_lock);
6693 
6694 		/*
6695 		 * If the pool grew as a result of the config update,
6696 		 * then log an internal history event.
6697 		 */
6698 		if (new_space != old_space) {
6699 			spa_history_log_internal(spa, "vdev online", NULL,
6700 			    "pool '%s' size: %llu(+%llu)",
6701 			    spa_name(spa), new_space, new_space - old_space);
6702 		}
6703 	}
6704 
6705 	/*
6706 	 * See if any devices need to be marked REMOVED.
6707 	 */
6708 	if (tasks & SPA_ASYNC_REMOVE) {
6709 		spa_vdev_state_enter(spa, SCL_NONE);
6710 		spa_async_remove(spa, spa->spa_root_vdev);
6711 		for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
6712 			spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
6713 		for (int i = 0; i < spa->spa_spares.sav_count; i++)
6714 			spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
6715 		(void) spa_vdev_state_exit(spa, NULL, 0);
6716 	}
6717 
6718 	if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
6719 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
6720 		spa_async_autoexpand(spa, spa->spa_root_vdev);
6721 		spa_config_exit(spa, SCL_CONFIG, FTAG);
6722 	}
6723 
6724 	/*
6725 	 * See if any devices need to be probed.
6726 	 */
6727 	if (tasks & SPA_ASYNC_PROBE) {
6728 		spa_vdev_state_enter(spa, SCL_NONE);
6729 		spa_async_probe(spa, spa->spa_root_vdev);
6730 		(void) spa_vdev_state_exit(spa, NULL, 0);
6731 	}
6732 
6733 	/*
6734 	 * If any devices are done replacing, detach them.
6735 	 */
6736 	if (tasks & SPA_ASYNC_RESILVER_DONE)
6737 		spa_vdev_resilver_done(spa);
6738 
6739 	/*
6740 	 * Kick off a resilver.
6741 	 */
6742 	if (tasks & SPA_ASYNC_RESILVER)
6743 		dsl_resilver_restart(spa->spa_dsl_pool, 0);
6744 
6745 	/*
6746 	 * Let the world know that we're done.
6747 	 */
6748 	mutex_enter(&spa->spa_async_lock);
6749 	spa->spa_async_thread = NULL;
6750 	cv_broadcast(&spa->spa_async_cv);
6751 	mutex_exit(&spa->spa_async_lock);
6752 	thread_exit();
6753 }
6754 
6755 void
6756 spa_async_suspend(spa_t *spa)
6757 {
6758 	mutex_enter(&spa->spa_async_lock);
6759 	spa->spa_async_suspended++;
6760 	while (spa->spa_async_thread != NULL)
6761 		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
6762 	mutex_exit(&spa->spa_async_lock);
6763 
6764 	spa_vdev_remove_suspend(spa);
6765 
6766 	zthr_t *condense_thread = spa->spa_condense_zthr;
6767 	if (condense_thread != NULL && zthr_isrunning(condense_thread))
6768 		VERIFY0(zthr_cancel(condense_thread));
6769 
6770 	zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
6771 	if (discard_thread != NULL && zthr_isrunning(discard_thread))
6772 		VERIFY0(zthr_cancel(discard_thread));
6773 }
6774 
6775 void
6776 spa_async_resume(spa_t *spa)
6777 {
6778 	mutex_enter(&spa->spa_async_lock);
6779 	ASSERT(spa->spa_async_suspended != 0);
6780 	spa->spa_async_suspended--;
6781 	mutex_exit(&spa->spa_async_lock);
6782 	spa_restart_removal(spa);
6783 
6784 	zthr_t *condense_thread = spa->spa_condense_zthr;
6785 	if (condense_thread != NULL && !zthr_isrunning(condense_thread))
6786 		zthr_resume(condense_thread);
6787 
6788 	zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
6789 	if (discard_thread != NULL && !zthr_isrunning(discard_thread))
6790 		zthr_resume(discard_thread);
6791 }
6792 
6793 static boolean_t
6794 spa_async_tasks_pending(spa_t *spa)
6795 {
6796 	uint_t non_config_tasks;
6797 	uint_t config_task;
6798 	boolean_t config_task_suspended;
6799 
6800 	non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
6801 	config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
6802 	if (spa->spa_ccw_fail_time == 0) {
6803 		config_task_suspended = B_FALSE;
6804 	} else {
6805 		config_task_suspended =
6806 		    (gethrtime() - spa->spa_ccw_fail_time) <
6807 		    (zfs_ccw_retry_interval * NANOSEC);
6808 	}
6809 
6810 	return (non_config_tasks || (config_task && !config_task_suspended));
6811 }
6812 
6813 static void
6814 spa_async_dispatch(spa_t *spa)
6815 {
6816 	mutex_enter(&spa->spa_async_lock);
6817 	if (spa_async_tasks_pending(spa) &&
6818 	    !spa->spa_async_suspended &&
6819 	    spa->spa_async_thread == NULL &&
6820 	    rootdir != NULL)
6821 		spa->spa_async_thread = thread_create(NULL, 0,
6822 		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
6823 	mutex_exit(&spa->spa_async_lock);
6824 }
6825 
6826 void
6827 spa_async_request(spa_t *spa, int task)
6828 {
6829 	zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
6830 	mutex_enter(&spa->spa_async_lock);
6831 	spa->spa_async_tasks |= task;
6832 	mutex_exit(&spa->spa_async_lock);
6833 }
6834 
6835 /*
6836  * ==========================================================================
6837  * SPA syncing routines
6838  * ==========================================================================
6839  */
6840 
6841 static int
6842 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
6843 {
6844 	bpobj_t *bpo = arg;
6845 	bpobj_enqueue(bpo, bp, tx);
6846 	return (0);
6847 }
6848 
6849 static int
6850 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
6851 {
6852 	zio_t *zio = arg;
6853 
6854 	zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
6855 	    zio->io_flags));
6856 	return (0);
6857 }
6858 
6859 /*
6860  * Note: this simple function is not inlined to make it easier to dtrace the
6861  * amount of time spent syncing frees.
6862  */
6863 static void
6864 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
6865 {
6866 	zio_t *zio = zio_root(spa, NULL, NULL, 0);
6867 	bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
6868 	VERIFY(zio_wait(zio) == 0);
6869 }
6870 
6871 /*
6872  * Note: this simple function is not inlined to make it easier to dtrace the
6873  * amount of time spent syncing deferred frees.
6874  */
6875 static void
6876 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
6877 {
6878 	zio_t *zio = zio_root(spa, NULL, NULL, 0);
6879 	VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
6880 	    spa_free_sync_cb, zio, tx), ==, 0);
6881 	VERIFY0(zio_wait(zio));
6882 }
6883 
6884 
6885 static void
6886 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
6887 {
6888 	char *packed = NULL;
6889 	size_t bufsize;
6890 	size_t nvsize = 0;
6891 	dmu_buf_t *db;
6892 
6893 	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
6894 
6895 	/*
6896 	 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
6897 	 * information.  This avoids the dmu_buf_will_dirty() path and
6898 	 * saves us a pre-read to get data we don't actually care about.
6899 	 */
6900 	bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
6901 	packed = kmem_alloc(bufsize, KM_SLEEP);
6902 
6903 	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
6904 	    KM_SLEEP) == 0);
6905 	bzero(packed + nvsize, bufsize - nvsize);
6906 
6907 	dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
6908 
6909 	kmem_free(packed, bufsize);
6910 
6911 	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
6912 	dmu_buf_will_dirty(db, tx);
6913 	*(uint64_t *)db->db_data = nvsize;
6914 	dmu_buf_rele(db, FTAG);
6915 }
6916 
6917 static void
6918 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
6919     const char *config, const char *entry)
6920 {
6921 	nvlist_t *nvroot;
6922 	nvlist_t **list;
6923 	int i;
6924 
6925 	if (!sav->sav_sync)
6926 		return;
6927 
6928 	/*
6929 	 * Update the MOS nvlist describing the list of available devices.
6930 	 * spa_validate_aux() will have already made sure this nvlist is
6931 	 * valid and the vdevs are labeled appropriately.
6932 	 */
6933 	if (sav->sav_object == 0) {
6934 		sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
6935 		    DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
6936 		    sizeof (uint64_t), tx);
6937 		VERIFY(zap_update(spa->spa_meta_objset,
6938 		    DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
6939 		    &sav->sav_object, tx) == 0);
6940 	}
6941 
6942 	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
6943 	if (sav->sav_count == 0) {
6944 		VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
6945 	} else {
6946 		list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
6947 		for (i = 0; i < sav->sav_count; i++)
6948 			list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
6949 			    B_FALSE, VDEV_CONFIG_L2CACHE);
6950 		VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
6951 		    sav->sav_count) == 0);
6952 		for (i = 0; i < sav->sav_count; i++)
6953 			nvlist_free(list[i]);
6954 		kmem_free(list, sav->sav_count * sizeof (void *));
6955 	}
6956 
6957 	spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
6958 	nvlist_free(nvroot);
6959 
6960 	sav->sav_sync = B_FALSE;
6961 }
6962 
6963 /*
6964  * Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
6965  * The all-vdev ZAP must be empty.
6966  */
6967 static void
6968 spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
6969 {
6970 	spa_t *spa = vd->vdev_spa;
6971 	if (vd->vdev_top_zap != 0) {
6972 		VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
6973 		    vd->vdev_top_zap, tx));
6974 	}
6975 	if (vd->vdev_leaf_zap != 0) {
6976 		VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
6977 		    vd->vdev_leaf_zap, tx));
6978 	}
6979 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
6980 		spa_avz_build(vd->vdev_child[i], avz, tx);
6981 	}
6982 }
6983 
6984 static void
6985 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
6986 {
6987 	nvlist_t *config;
6988 
6989 	/*
6990 	 * If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
6991 	 * its config may not be dirty but we still need to build per-vdev ZAPs.
6992 	 * Similarly, if the pool is being assembled (e.g. after a split), we
6993 	 * need to rebuild the AVZ although the config may not be dirty.
6994 	 */
6995 	if (list_is_empty(&spa->spa_config_dirty_list) &&
6996 	    spa->spa_avz_action == AVZ_ACTION_NONE)
6997 		return;
6998 
6999 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
7000 
7001 	ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
7002 	    spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
7003 	    spa->spa_all_vdev_zaps != 0);
7004 
7005 	if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
7006 		/* Make and build the new AVZ */
7007 		uint64_t new_avz = zap_create(spa->spa_meta_objset,
7008 		    DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
7009 		spa_avz_build(spa->spa_root_vdev, new_avz, tx);
7010 
7011 		/* Diff old AVZ with new one */
7012 		zap_cursor_t zc;
7013 		zap_attribute_t za;
7014 
7015 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
7016 		    spa->spa_all_vdev_zaps);
7017 		    zap_cursor_retrieve(&zc, &za) == 0;
7018 		    zap_cursor_advance(&zc)) {
7019 			uint64_t vdzap = za.za_first_integer;
7020 			if (zap_lookup_int(spa->spa_meta_objset, new_avz,
7021 			    vdzap) == ENOENT) {
7022 				/*
7023 				 * ZAP is listed in old AVZ but not in new one;
7024 				 * destroy it
7025 				 */
7026 				VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
7027 				    tx));
7028 			}
7029 		}
7030 
7031 		zap_cursor_fini(&zc);
7032 
7033 		/* Destroy the old AVZ */
7034 		VERIFY0(zap_destroy(spa->spa_meta_objset,
7035 		    spa->spa_all_vdev_zaps, tx));
7036 
7037 		/* Replace the old AVZ in the dir obj with the new one */
7038 		VERIFY0(zap_update(spa->spa_meta_objset,
7039 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
7040 		    sizeof (new_avz), 1, &new_avz, tx));
7041 
7042 		spa->spa_all_vdev_zaps = new_avz;
7043 	} else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
7044 		zap_cursor_t zc;
7045 		zap_attribute_t za;
7046 
7047 		/* Walk through the AVZ and destroy all listed ZAPs */
7048 		for (zap_cursor_init(&zc, spa->spa_meta_objset,
7049 		    spa->spa_all_vdev_zaps);
7050 		    zap_cursor_retrieve(&zc, &za) == 0;
7051 		    zap_cursor_advance(&zc)) {
7052 			uint64_t zap = za.za_first_integer;
7053 			VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
7054 		}
7055 
7056 		zap_cursor_fini(&zc);
7057 
7058 		/* Destroy and unlink the AVZ itself */
7059 		VERIFY0(zap_destroy(spa->spa_meta_objset,
7060 		    spa->spa_all_vdev_zaps, tx));
7061 		VERIFY0(zap_remove(spa->spa_meta_objset,
7062 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
7063 		spa->spa_all_vdev_zaps = 0;
7064 	}
7065 
7066 	if (spa->spa_all_vdev_zaps == 0) {
7067 		spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
7068 		    DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
7069 		    DMU_POOL_VDEV_ZAP_MAP, tx);
7070 	}
7071 	spa->spa_avz_action = AVZ_ACTION_NONE;
7072 
7073 	/* Create ZAPs for vdevs that don't have them. */
7074 	vdev_construct_zaps(spa->spa_root_vdev, tx);
7075 
7076 	config = spa_config_generate(spa, spa->spa_root_vdev,
7077 	    dmu_tx_get_txg(tx), B_FALSE);
7078 
7079 	/*
7080 	 * If we're upgrading the spa version then make sure that
7081 	 * the config object gets updated with the correct version.
7082 	 */
7083 	if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
7084 		fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
7085 		    spa->spa_uberblock.ub_version);
7086 
7087 	spa_config_exit(spa, SCL_STATE, FTAG);
7088 
7089 	nvlist_free(spa->spa_config_syncing);
7090 	spa->spa_config_syncing = config;
7091 
7092 	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
7093 }
7094 
7095 static void
7096 spa_sync_version(void *arg, dmu_tx_t *tx)
7097 {
7098 	uint64_t *versionp = arg;
7099 	uint64_t version = *versionp;
7100 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
7101 
7102 	/*
7103 	 * Setting the version is special cased when first creating the pool.
7104 	 */
7105 	ASSERT(tx->tx_txg != TXG_INITIAL);
7106 
7107 	ASSERT(SPA_VERSION_IS_SUPPORTED(version));
7108 	ASSERT(version >= spa_version(spa));
7109 
7110 	spa->spa_uberblock.ub_version = version;
7111 	vdev_config_dirty(spa->spa_root_vdev);
7112 	spa_history_log_internal(spa, "set", tx, "version=%lld", version);
7113 }
7114 
7115 /*
7116  * Set zpool properties.
7117  */
7118 static void
7119 spa_sync_props(void *arg, dmu_tx_t *tx)
7120 {
7121 	nvlist_t *nvp = arg;
7122 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
7123 	objset_t *mos = spa->spa_meta_objset;
7124 	nvpair_t *elem = NULL;
7125 
7126 	mutex_enter(&spa->spa_props_lock);
7127 
7128 	while ((elem = nvlist_next_nvpair(nvp, elem))) {
7129 		uint64_t intval;
7130 		char *strval, *fname;
7131 		zpool_prop_t prop;
7132 		const char *propname;
7133 		zprop_type_t proptype;
7134 		spa_feature_t fid;
7135 
7136 		switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
7137 		case ZPOOL_PROP_INVAL:
7138 			/*
7139 			 * We checked this earlier in spa_prop_validate().
7140 			 */
7141 			ASSERT(zpool_prop_feature(nvpair_name(elem)));
7142 
7143 			fname = strchr(nvpair_name(elem), '@') + 1;
7144 			VERIFY0(zfeature_lookup_name(fname, &fid));
7145 
7146 			spa_feature_enable(spa, fid, tx);
7147 			spa_history_log_internal(spa, "set", tx,
7148 			    "%s=enabled", nvpair_name(elem));
7149 			break;
7150 
7151 		case ZPOOL_PROP_VERSION:
7152 			intval = fnvpair_value_uint64(elem);
7153 			/*
7154 			 * The version is synced seperatly before other
7155 			 * properties and should be correct by now.
7156 			 */
7157 			ASSERT3U(spa_version(spa), >=, intval);
7158 			break;
7159 
7160 		case ZPOOL_PROP_ALTROOT:
7161 			/*
7162 			 * 'altroot' is a non-persistent property. It should
7163 			 * have been set temporarily at creation or import time.
7164 			 */
7165 			ASSERT(spa->spa_root != NULL);
7166 			break;
7167 
7168 		case ZPOOL_PROP_READONLY:
7169 		case ZPOOL_PROP_CACHEFILE:
7170 			/*
7171 			 * 'readonly' and 'cachefile' are also non-persisitent
7172 			 * properties.
7173 			 */
7174 			break;
7175 		case ZPOOL_PROP_COMMENT:
7176 			strval = fnvpair_value_string(elem);
7177 			if (spa->spa_comment != NULL)
7178 				spa_strfree(spa->spa_comment);
7179 			spa->spa_comment = spa_strdup(strval);
7180 			/*
7181 			 * We need to dirty the configuration on all the vdevs
7182 			 * so that their labels get updated.  It's unnecessary
7183 			 * to do this for pool creation since the vdev's
7184 			 * configuratoin has already been dirtied.
7185 			 */
7186 			if (tx->tx_txg != TXG_INITIAL)
7187 				vdev_config_dirty(spa->spa_root_vdev);
7188 			spa_history_log_internal(spa, "set", tx,
7189 			    "%s=%s", nvpair_name(elem), strval);
7190 			break;
7191 		default:
7192 			/*
7193 			 * Set pool property values in the poolprops mos object.
7194 			 */
7195 			if (spa->spa_pool_props_object == 0) {
7196 				spa->spa_pool_props_object =
7197 				    zap_create_link(mos, DMU_OT_POOL_PROPS,
7198 				    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
7199 				    tx);
7200 			}
7201 
7202 			/* normalize the property name */
7203 			propname = zpool_prop_to_name(prop);
7204 			proptype = zpool_prop_get_type(prop);
7205 
7206 			if (nvpair_type(elem) == DATA_TYPE_STRING) {
7207 				ASSERT(proptype == PROP_TYPE_STRING);
7208 				strval = fnvpair_value_string(elem);
7209 				VERIFY0(zap_update(mos,
7210 				    spa->spa_pool_props_object, propname,
7211 				    1, strlen(strval) + 1, strval, tx));
7212 				spa_history_log_internal(spa, "set", tx,
7213 				    "%s=%s", nvpair_name(elem), strval);
7214 			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
7215 				intval = fnvpair_value_uint64(elem);
7216 
7217 				if (proptype == PROP_TYPE_INDEX) {
7218 					const char *unused;
7219 					VERIFY0(zpool_prop_index_to_string(
7220 					    prop, intval, &unused));
7221 				}
7222 				VERIFY0(zap_update(mos,
7223 				    spa->spa_pool_props_object, propname,
7224 				    8, 1, &intval, tx));
7225 				spa_history_log_internal(spa, "set", tx,
7226 				    "%s=%lld", nvpair_name(elem), intval);
7227 			} else {
7228 				ASSERT(0); /* not allowed */
7229 			}
7230 
7231 			switch (prop) {
7232 			case ZPOOL_PROP_DELEGATION:
7233 				spa->spa_delegation = intval;
7234 				break;
7235 			case ZPOOL_PROP_BOOTFS:
7236 				spa->spa_bootfs = intval;
7237 				break;
7238 			case ZPOOL_PROP_FAILUREMODE:
7239 				spa->spa_failmode = intval;
7240 				break;
7241 			case ZPOOL_PROP_AUTOEXPAND:
7242 				spa->spa_autoexpand = intval;
7243 				if (tx->tx_txg != TXG_INITIAL)
7244 					spa_async_request(spa,
7245 					    SPA_ASYNC_AUTOEXPAND);
7246 				break;
7247 			case ZPOOL_PROP_DEDUPDITTO:
7248 				spa->spa_dedup_ditto = intval;
7249 				break;
7250 			default:
7251 				break;
7252 			}
7253 		}
7254 
7255 	}
7256 
7257 	mutex_exit(&spa->spa_props_lock);
7258 }
7259 
7260 /*
7261  * Perform one-time upgrade on-disk changes.  spa_version() does not
7262  * reflect the new version this txg, so there must be no changes this
7263  * txg to anything that the upgrade code depends on after it executes.
7264  * Therefore this must be called after dsl_pool_sync() does the sync
7265  * tasks.
7266  */
7267 static void
7268 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
7269 {
7270 	dsl_pool_t *dp = spa->spa_dsl_pool;
7271 
7272 	ASSERT(spa->spa_sync_pass == 1);
7273 
7274 	rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
7275 
7276 	if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
7277 	    spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
7278 		dsl_pool_create_origin(dp, tx);
7279 
7280 		/* Keeping the origin open increases spa_minref */
7281 		spa->spa_minref += 3;
7282 	}
7283 
7284 	if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
7285 	    spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
7286 		dsl_pool_upgrade_clones(dp, tx);
7287 	}
7288 
7289 	if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
7290 	    spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
7291 		dsl_pool_upgrade_dir_clones(dp, tx);
7292 
7293 		/* Keeping the freedir open increases spa_minref */
7294 		spa->spa_minref += 3;
7295 	}
7296 
7297 	if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
7298 	    spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
7299 		spa_feature_create_zap_objects(spa, tx);
7300 	}
7301 
7302 	/*
7303 	 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
7304 	 * when possibility to use lz4 compression for metadata was added
7305 	 * Old pools that have this feature enabled must be upgraded to have
7306 	 * this feature active
7307 	 */
7308 	if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
7309 		boolean_t lz4_en = spa_feature_is_enabled(spa,
7310 		    SPA_FEATURE_LZ4_COMPRESS);
7311 		boolean_t lz4_ac = spa_feature_is_active(spa,
7312 		    SPA_FEATURE_LZ4_COMPRESS);
7313 
7314 		if (lz4_en && !lz4_ac)
7315 			spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
7316 	}
7317 
7318 	/*
7319 	 * If we haven't written the salt, do so now.  Note that the
7320 	 * feature may not be activated yet, but that's fine since
7321 	 * the presence of this ZAP entry is backwards compatible.
7322 	 */
7323 	if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
7324 	    DMU_POOL_CHECKSUM_SALT) == ENOENT) {
7325 		VERIFY0(zap_add(spa->spa_meta_objset,
7326 		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
7327 		    sizeof (spa->spa_cksum_salt.zcs_bytes),
7328 		    spa->spa_cksum_salt.zcs_bytes, tx));
7329 	}
7330 
7331 	rrw_exit(&dp->dp_config_rwlock, FTAG);
7332 }
7333 
7334 static void
7335 vdev_indirect_state_sync_verify(vdev_t *vd)
7336 {
7337 	vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
7338 	vdev_indirect_births_t *vib = vd->vdev_indirect_births;
7339 
7340 	if (vd->vdev_ops == &vdev_indirect_ops) {
7341 		ASSERT(vim != NULL);
7342 		ASSERT(vib != NULL);
7343 	}
7344 
7345 	if (vdev_obsolete_sm_object(vd) != 0) {
7346 		ASSERT(vd->vdev_obsolete_sm != NULL);
7347 		ASSERT(vd->vdev_removing ||
7348 		    vd->vdev_ops == &vdev_indirect_ops);
7349 		ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
7350 		ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
7351 
7352 		ASSERT3U(vdev_obsolete_sm_object(vd), ==,
7353 		    space_map_object(vd->vdev_obsolete_sm));
7354 		ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
7355 		    space_map_allocated(vd->vdev_obsolete_sm));
7356 	}
7357 	ASSERT(vd->vdev_obsolete_segments != NULL);
7358 
7359 	/*
7360 	 * Since frees / remaps to an indirect vdev can only
7361 	 * happen in syncing context, the obsolete segments
7362 	 * tree must be empty when we start syncing.
7363 	 */
7364 	ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
7365 }
7366 
7367 /*
7368  * Sync the specified transaction group.  New blocks may be dirtied as
7369  * part of the process, so we iterate until it converges.
7370  */
7371 void
7372 spa_sync(spa_t *spa, uint64_t txg)
7373 {
7374 	dsl_pool_t *dp = spa->spa_dsl_pool;
7375 	objset_t *mos = spa->spa_meta_objset;
7376 	bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
7377 	vdev_t *rvd = spa->spa_root_vdev;
7378 	vdev_t *vd;
7379 	dmu_tx_t *tx;
7380 	int error;
7381 	uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
7382 	    zfs_vdev_queue_depth_pct / 100;
7383 
7384 	VERIFY(spa_writeable(spa));
7385 
7386 	/*
7387 	 * Wait for i/os issued in open context that need to complete
7388 	 * before this txg syncs.
7389 	 */
7390 	VERIFY0(zio_wait(spa->spa_txg_zio[txg & TXG_MASK]));
7391 	spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL, 0);
7392 
7393 	/*
7394 	 * Lock out configuration changes.
7395 	 */
7396 	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
7397 
7398 	spa->spa_syncing_txg = txg;
7399 	spa->spa_sync_pass = 0;
7400 
7401 	mutex_enter(&spa->spa_alloc_lock);
7402 	VERIFY0(avl_numnodes(&spa->spa_alloc_tree));
7403 	mutex_exit(&spa->spa_alloc_lock);
7404 
7405 	/*
7406 	 * If there are any pending vdev state changes, convert them
7407 	 * into config changes that go out with this transaction group.
7408 	 */
7409 	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
7410 	while (list_head(&spa->spa_state_dirty_list) != NULL) {
7411 		/*
7412 		 * We need the write lock here because, for aux vdevs,
7413 		 * calling vdev_config_dirty() modifies sav_config.
7414 		 * This is ugly and will become unnecessary when we
7415 		 * eliminate the aux vdev wart by integrating all vdevs
7416 		 * into the root vdev tree.
7417 		 */
7418 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7419 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
7420 		while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
7421 			vdev_state_clean(vd);
7422 			vdev_config_dirty(vd);
7423 		}
7424 		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
7425 		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
7426 	}
7427 	spa_config_exit(spa, SCL_STATE, FTAG);
7428 
7429 	tx = dmu_tx_create_assigned(dp, txg);
7430 
7431 	spa->spa_sync_starttime = gethrtime();
7432 	VERIFY(cyclic_reprogram(spa->spa_deadman_cycid,
7433 	    spa->spa_sync_starttime + spa->spa_deadman_synctime));
7434 
7435 	/*
7436 	 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
7437 	 * set spa_deflate if we have no raid-z vdevs.
7438 	 */
7439 	if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
7440 	    spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
7441 		int i;
7442 
7443 		for (i = 0; i < rvd->vdev_children; i++) {
7444 			vd = rvd->vdev_child[i];
7445 			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
7446 				break;
7447 		}
7448 		if (i == rvd->vdev_children) {
7449 			spa->spa_deflate = TRUE;
7450 			VERIFY(0 == zap_add(spa->spa_meta_objset,
7451 			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
7452 			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
7453 		}
7454 	}
7455 
7456 	/*
7457 	 * Set the top-level vdev's max queue depth. Evaluate each
7458 	 * top-level's async write queue depth in case it changed.
7459 	 * The max queue depth will not change in the middle of syncing
7460 	 * out this txg.
7461 	 */
7462 	uint64_t queue_depth_total = 0;
7463 	for (int c = 0; c < rvd->vdev_children; c++) {
7464 		vdev_t *tvd = rvd->vdev_child[c];
7465 		metaslab_group_t *mg = tvd->vdev_mg;
7466 
7467 		if (mg == NULL || mg->mg_class != spa_normal_class(spa) ||
7468 		    !metaslab_group_initialized(mg))
7469 			continue;
7470 
7471 		/*
7472 		 * It is safe to do a lock-free check here because only async
7473 		 * allocations look at mg_max_alloc_queue_depth, and async
7474 		 * allocations all happen from spa_sync().
7475 		 */
7476 		ASSERT0(refcount_count(&mg->mg_alloc_queue_depth));
7477 		mg->mg_max_alloc_queue_depth = max_queue_depth;
7478 		queue_depth_total += mg->mg_max_alloc_queue_depth;
7479 	}
7480 	metaslab_class_t *mc = spa_normal_class(spa);
7481 	ASSERT0(refcount_count(&mc->mc_alloc_slots));
7482 	mc->mc_alloc_max_slots = queue_depth_total;
7483 	mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
7484 
7485 	ASSERT3U(mc->mc_alloc_max_slots, <=,
7486 	    max_queue_depth * rvd->vdev_children);
7487 
7488 	for (int c = 0; c < rvd->vdev_children; c++) {
7489 		vdev_t *vd = rvd->vdev_child[c];
7490 		vdev_indirect_state_sync_verify(vd);
7491 
7492 		if (vdev_indirect_should_condense(vd)) {
7493 			spa_condense_indirect_start_sync(vd, tx);
7494 			break;
7495 		}
7496 	}
7497 
7498 	/*
7499 	 * Iterate to convergence.
7500 	 */
7501 	do {
7502 		int pass = ++spa->spa_sync_pass;
7503 
7504 		spa_sync_config_object(spa, tx);
7505 		spa_sync_aux_dev(spa, &spa->spa_spares, tx,
7506 		    ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
7507 		spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
7508 		    ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
7509 		spa_errlog_sync(spa, txg);
7510 		dsl_pool_sync(dp, txg);
7511 
7512 		if (pass < zfs_sync_pass_deferred_free) {
7513 			spa_sync_frees(spa, free_bpl, tx);
7514 		} else {
7515 			/*
7516 			 * We can not defer frees in pass 1, because
7517 			 * we sync the deferred frees later in pass 1.
7518 			 */
7519 			ASSERT3U(pass, >, 1);
7520 			bplist_iterate(free_bpl, bpobj_enqueue_cb,
7521 			    &spa->spa_deferred_bpobj, tx);
7522 		}
7523 
7524 		ddt_sync(spa, txg);
7525 		dsl_scan_sync(dp, tx);
7526 
7527 		if (spa->spa_vdev_removal != NULL)
7528 			svr_sync(spa, tx);
7529 
7530 		while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
7531 		    != NULL)
7532 			vdev_sync(vd, txg);
7533 
7534 		if (pass == 1) {
7535 			spa_sync_upgrades(spa, tx);
7536 			ASSERT3U(txg, >=,
7537 			    spa->spa_uberblock.ub_rootbp.blk_birth);
7538 			/*
7539 			 * Note: We need to check if the MOS is dirty
7540 			 * because we could have marked the MOS dirty
7541 			 * without updating the uberblock (e.g. if we
7542 			 * have sync tasks but no dirty user data).  We
7543 			 * need to check the uberblock's rootbp because
7544 			 * it is updated if we have synced out dirty
7545 			 * data (though in this case the MOS will most
7546 			 * likely also be dirty due to second order
7547 			 * effects, we don't want to rely on that here).
7548 			 */
7549 			if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
7550 			    !dmu_objset_is_dirty(mos, txg)) {
7551 				/*
7552 				 * Nothing changed on the first pass,
7553 				 * therefore this TXG is a no-op.  Avoid
7554 				 * syncing deferred frees, so that we
7555 				 * can keep this TXG as a no-op.
7556 				 */
7557 				ASSERT(txg_list_empty(&dp->dp_dirty_datasets,
7558 				    txg));
7559 				ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
7560 				ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
7561 				ASSERT(txg_list_empty(&dp->dp_early_sync_tasks,
7562 				    txg));
7563 				break;
7564 			}
7565 			spa_sync_deferred_frees(spa, tx);
7566 		}
7567 
7568 	} while (dmu_objset_is_dirty(mos, txg));
7569 
7570 	if (!list_is_empty(&spa->spa_config_dirty_list)) {
7571 		/*
7572 		 * Make sure that the number of ZAPs for all the vdevs matches
7573 		 * the number of ZAPs in the per-vdev ZAP list. This only gets
7574 		 * called if the config is dirty; otherwise there may be
7575 		 * outstanding AVZ operations that weren't completed in
7576 		 * spa_sync_config_object.
7577 		 */
7578 		uint64_t all_vdev_zap_entry_count;
7579 		ASSERT0(zap_count(spa->spa_meta_objset,
7580 		    spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
7581 		ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
7582 		    all_vdev_zap_entry_count);
7583 	}
7584 
7585 	if (spa->spa_vdev_removal != NULL) {
7586 		ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
7587 	}
7588 
7589 	/*
7590 	 * Rewrite the vdev configuration (which includes the uberblock)
7591 	 * to commit the transaction group.
7592 	 *
7593 	 * If there are no dirty vdevs, we sync the uberblock to a few
7594 	 * random top-level vdevs that are known to be visible in the
7595 	 * config cache (see spa_vdev_add() for a complete description).
7596 	 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
7597 	 */
7598 	for (;;) {
7599 		/*
7600 		 * We hold SCL_STATE to prevent vdev open/close/etc.
7601 		 * while we're attempting to write the vdev labels.
7602 		 */
7603 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
7604 
7605 		if (list_is_empty(&spa->spa_config_dirty_list)) {
7606 			vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
7607 			int svdcount = 0;
7608 			int children = rvd->vdev_children;
7609 			int c0 = spa_get_random(children);
7610 
7611 			for (int c = 0; c < children; c++) {
7612 				vd = rvd->vdev_child[(c0 + c) % children];
7613 
7614 				/* Stop when revisiting the first vdev */
7615 				if (c > 0 && svd[0] == vd)
7616 					break;
7617 
7618 				if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
7619 				    !vdev_is_concrete(vd))
7620 					continue;
7621 
7622 				svd[svdcount++] = vd;
7623 				if (svdcount == SPA_SYNC_MIN_VDEVS)
7624 					break;
7625 			}
7626 			error = vdev_config_sync(svd, svdcount, txg);
7627 		} else {
7628 			error = vdev_config_sync(rvd->vdev_child,
7629 			    rvd->vdev_children, txg);
7630 		}
7631 
7632 		if (error == 0)
7633 			spa->spa_last_synced_guid = rvd->vdev_guid;
7634 
7635 		spa_config_exit(spa, SCL_STATE, FTAG);
7636 
7637 		if (error == 0)
7638 			break;
7639 		zio_suspend(spa, NULL);
7640 		zio_resume_wait(spa);
7641 	}
7642 	dmu_tx_commit(tx);
7643 
7644 	VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
7645 
7646 	/*
7647 	 * Clear the dirty config list.
7648 	 */
7649 	while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
7650 		vdev_config_clean(vd);
7651 
7652 	/*
7653 	 * Now that the new config has synced transactionally,
7654 	 * let it become visible to the config cache.
7655 	 */
7656 	if (spa->spa_config_syncing != NULL) {
7657 		spa_config_set(spa, spa->spa_config_syncing);
7658 		spa->spa_config_txg = txg;
7659 		spa->spa_config_syncing = NULL;
7660 	}
7661 
7662 	dsl_pool_sync_done(dp, txg);
7663 
7664 	mutex_enter(&spa->spa_alloc_lock);
7665 	VERIFY0(avl_numnodes(&spa->spa_alloc_tree));
7666 	mutex_exit(&spa->spa_alloc_lock);
7667 
7668 	/*
7669 	 * Update usable space statistics.
7670 	 */
7671 	while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
7672 		vdev_sync_done(vd, txg);
7673 
7674 	spa_update_dspace(spa);
7675 
7676 	/*
7677 	 * It had better be the case that we didn't dirty anything
7678 	 * since vdev_config_sync().
7679 	 */
7680 	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
7681 	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
7682 	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
7683 
7684 	while (zfs_pause_spa_sync)
7685 		delay(1);
7686 
7687 	spa->spa_sync_pass = 0;
7688 
7689 	/*
7690 	 * Update the last synced uberblock here. We want to do this at
7691 	 * the end of spa_sync() so that consumers of spa_last_synced_txg()
7692 	 * will be guaranteed that all the processing associated with
7693 	 * that txg has been completed.
7694 	 */
7695 	spa->spa_ubsync = spa->spa_uberblock;
7696 	spa_config_exit(spa, SCL_CONFIG, FTAG);
7697 
7698 	spa_handle_ignored_writes(spa);
7699 
7700 	/*
7701 	 * If any async tasks have been requested, kick them off.
7702 	 */
7703 	spa_async_dispatch(spa);
7704 }
7705 
7706 /*
7707  * Sync all pools.  We don't want to hold the namespace lock across these
7708  * operations, so we take a reference on the spa_t and drop the lock during the
7709  * sync.
7710  */
7711 void
7712 spa_sync_allpools(void)
7713 {
7714 	spa_t *spa = NULL;
7715 	mutex_enter(&spa_namespace_lock);
7716 	while ((spa = spa_next(spa)) != NULL) {
7717 		if (spa_state(spa) != POOL_STATE_ACTIVE ||
7718 		    !spa_writeable(spa) || spa_suspended(spa))
7719 			continue;
7720 		spa_open_ref(spa, FTAG);
7721 		mutex_exit(&spa_namespace_lock);
7722 		txg_wait_synced(spa_get_dsl(spa), 0);
7723 		mutex_enter(&spa_namespace_lock);
7724 		spa_close(spa, FTAG);
7725 	}
7726 	mutex_exit(&spa_namespace_lock);
7727 }
7728 
7729 /*
7730  * ==========================================================================
7731  * Miscellaneous routines
7732  * ==========================================================================
7733  */
7734 
7735 /*
7736  * Remove all pools in the system.
7737  */
7738 void
7739 spa_evict_all(void)
7740 {
7741 	spa_t *spa;
7742 
7743 	/*
7744 	 * Remove all cached state.  All pools should be closed now,
7745 	 * so every spa in the AVL tree should be unreferenced.
7746 	 */
7747 	mutex_enter(&spa_namespace_lock);
7748 	while ((spa = spa_next(NULL)) != NULL) {
7749 		/*
7750 		 * Stop async tasks.  The async thread may need to detach
7751 		 * a device that's been replaced, which requires grabbing
7752 		 * spa_namespace_lock, so we must drop it here.
7753 		 */
7754 		spa_open_ref(spa, FTAG);
7755 		mutex_exit(&spa_namespace_lock);
7756 		spa_async_suspend(spa);
7757 		mutex_enter(&spa_namespace_lock);
7758 		spa_close(spa, FTAG);
7759 
7760 		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
7761 			spa_unload(spa);
7762 			spa_deactivate(spa);
7763 		}
7764 		spa_remove(spa);
7765 	}
7766 	mutex_exit(&spa_namespace_lock);
7767 }
7768 
7769 vdev_t *
7770 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
7771 {
7772 	vdev_t *vd;
7773 	int i;
7774 
7775 	if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
7776 		return (vd);
7777 
7778 	if (aux) {
7779 		for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
7780 			vd = spa->spa_l2cache.sav_vdevs[i];
7781 			if (vd->vdev_guid == guid)
7782 				return (vd);
7783 		}
7784 
7785 		for (i = 0; i < spa->spa_spares.sav_count; i++) {
7786 			vd = spa->spa_spares.sav_vdevs[i];
7787 			if (vd->vdev_guid == guid)
7788 				return (vd);
7789 		}
7790 	}
7791 
7792 	return (NULL);
7793 }
7794 
7795 void
7796 spa_upgrade(spa_t *spa, uint64_t version)
7797 {
7798 	ASSERT(spa_writeable(spa));
7799 
7800 	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
7801 
7802 	/*
7803 	 * This should only be called for a non-faulted pool, and since a
7804 	 * future version would result in an unopenable pool, this shouldn't be
7805 	 * possible.
7806 	 */
7807 	ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
7808 	ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
7809 
7810 	spa->spa_uberblock.ub_version = version;
7811 	vdev_config_dirty(spa->spa_root_vdev);
7812 
7813 	spa_config_exit(spa, SCL_ALL, FTAG);
7814 
7815 	txg_wait_synced(spa_get_dsl(spa), 0);
7816 }
7817 
7818 boolean_t
7819 spa_has_spare(spa_t *spa, uint64_t guid)
7820 {
7821 	int i;
7822 	uint64_t spareguid;
7823 	spa_aux_vdev_t *sav = &spa->spa_spares;
7824 
7825 	for (i = 0; i < sav->sav_count; i++)
7826 		if (sav->sav_vdevs[i]->vdev_guid == guid)
7827 			return (B_TRUE);
7828 
7829 	for (i = 0; i < sav->sav_npending; i++) {
7830 		if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
7831 		    &spareguid) == 0 && spareguid == guid)
7832 			return (B_TRUE);
7833 	}
7834 
7835 	return (B_FALSE);
7836 }
7837 
7838 /*
7839  * Check if a pool has an active shared spare device.
7840  * Note: reference count of an active spare is 2, as a spare and as a replace
7841  */
7842 static boolean_t
7843 spa_has_active_shared_spare(spa_t *spa)
7844 {
7845 	int i, refcnt;
7846 	uint64_t pool;
7847 	spa_aux_vdev_t *sav = &spa->spa_spares;
7848 
7849 	for (i = 0; i < sav->sav_count; i++) {
7850 		if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
7851 		    &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
7852 		    refcnt > 2)
7853 			return (B_TRUE);
7854 	}
7855 
7856 	return (B_FALSE);
7857 }
7858 
7859 sysevent_t *
7860 spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
7861 {
7862 	sysevent_t		*ev = NULL;
7863 #ifdef _KERNEL
7864 	sysevent_attr_list_t	*attr = NULL;
7865 	sysevent_value_t	value;
7866 
7867 	ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
7868 	    SE_SLEEP);
7869 	ASSERT(ev != NULL);
7870 
7871 	value.value_type = SE_DATA_TYPE_STRING;
7872 	value.value.sv_string = spa_name(spa);
7873 	if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
7874 		goto done;
7875 
7876 	value.value_type = SE_DATA_TYPE_UINT64;
7877 	value.value.sv_uint64 = spa_guid(spa);
7878 	if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
7879 		goto done;
7880 
7881 	if (vd) {
7882 		value.value_type = SE_DATA_TYPE_UINT64;
7883 		value.value.sv_uint64 = vd->vdev_guid;
7884 		if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
7885 		    SE_SLEEP) != 0)
7886 			goto done;
7887 
7888 		if (vd->vdev_path) {
7889 			value.value_type = SE_DATA_TYPE_STRING;
7890 			value.value.sv_string = vd->vdev_path;
7891 			if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
7892 			    &value, SE_SLEEP) != 0)
7893 				goto done;
7894 		}
7895 	}
7896 
7897 	if (hist_nvl != NULL) {
7898 		fnvlist_merge((nvlist_t *)attr, hist_nvl);
7899 	}
7900 
7901 	if (sysevent_attach_attributes(ev, attr) != 0)
7902 		goto done;
7903 	attr = NULL;
7904 
7905 done:
7906 	if (attr)
7907 		sysevent_free_attr(attr);
7908 
7909 #endif
7910 	return (ev);
7911 }
7912 
7913 void
7914 spa_event_post(sysevent_t *ev)
7915 {
7916 #ifdef _KERNEL
7917 	sysevent_id_t		eid;
7918 
7919 	(void) log_sysevent(ev, SE_SLEEP, &eid);
7920 	sysevent_free(ev);
7921 #endif
7922 }
7923 
7924 void
7925 spa_event_discard(sysevent_t *ev)
7926 {
7927 #ifdef _KERNEL
7928 	sysevent_free(ev);
7929 #endif
7930 }
7931 
7932 /*
7933  * Post a sysevent corresponding to the given event.  The 'name' must be one of
7934  * the event definitions in sys/sysevent/eventdefs.h.  The payload will be
7935  * filled in from the spa and (optionally) the vdev and history nvl.  This
7936  * doesn't do anything in the userland libzpool, as we don't want consumers to
7937  * misinterpret ztest or zdb as real changes.
7938  */
7939 void
7940 spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
7941 {
7942 	spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
7943 }
7944