1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
25 * Copyright (c) 2015, Nexenta Systems, Inc.  All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright 2013 Saso Kiselkov. All rights reserved.
28 * Copyright (c) 2014 Integros [integros.com]
29 * Copyright 2016 Toomas Soome <tsoome@me.com>
30 * Copyright 2019 Joyent, Inc.
31 * Copyright (c) 2017, Intel Corporation.
32 * Copyright (c) 2017 Datto Inc.
33 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
34 */
35
36/*
37 * SPA: Storage Pool Allocator
38 *
39 * This file contains all the routines used when modifying on-disk SPA state.
40 * This includes opening, importing, destroying, exporting a pool, and syncing a
41 * pool.
42 */
43
44#include <sys/zfs_context.h>
45#include <sys/fm/fs/zfs.h>
46#include <sys/spa_impl.h>
47#include <sys/zio.h>
48#include <sys/zio_checksum.h>
49#include <sys/dmu.h>
50#include <sys/dmu_tx.h>
51#include <sys/zap.h>
52#include <sys/zil.h>
53#include <sys/ddt.h>
54#include <sys/vdev_impl.h>
55#include <sys/vdev_removal.h>
56#include <sys/vdev_indirect_mapping.h>
57#include <sys/vdev_indirect_births.h>
58#include <sys/vdev_initialize.h>
59#include <sys/vdev_trim.h>
60#include <sys/metaslab.h>
61#include <sys/metaslab_impl.h>
62#include <sys/mmp.h>
63#include <sys/uberblock_impl.h>
64#include <sys/txg.h>
65#include <sys/avl.h>
66#include <sys/bpobj.h>
67#include <sys/dmu_traverse.h>
68#include <sys/dmu_objset.h>
69#include <sys/unique.h>
70#include <sys/dsl_pool.h>
71#include <sys/dsl_dataset.h>
72#include <sys/dsl_dir.h>
73#include <sys/dsl_prop.h>
74#include <sys/dsl_synctask.h>
75#include <sys/fs/zfs.h>
76#include <sys/arc.h>
77#include <sys/callb.h>
78#include <sys/systeminfo.h>
79#include <sys/spa_boot.h>
80#include <sys/zfs_ioctl.h>
81#include <sys/dsl_scan.h>
82#include <sys/zfeature.h>
83#include <sys/dsl_destroy.h>
84#include <sys/abd.h>
85
86#ifdef	_KERNEL
87#include <sys/bootprops.h>
88#include <sys/callb.h>
89#include <sys/cpupart.h>
90#include <sys/pool.h>
91#include <sys/sysdc.h>
92#include <sys/zone.h>
93#endif	/* _KERNEL */
94
95#include "zfs_prop.h"
96#include "zfs_comutil.h"
97
98/*
99 * The interval, in seconds, at which failed configuration cache file writes
100 * should be retried.
101 */
102int zfs_ccw_retry_interval = 300;
103
104typedef enum zti_modes {
105	ZTI_MODE_FIXED,			/* value is # of threads (min 1) */
106	ZTI_MODE_BATCH,			/* cpu-intensive; value is ignored */
107	ZTI_MODE_NULL,			/* don't create a taskq */
108	ZTI_NMODES
109} zti_modes_t;
110
111#define	ZTI_P(n, q)	{ ZTI_MODE_FIXED, (n), (q) }
112#define	ZTI_BATCH	{ ZTI_MODE_BATCH, 0, 1 }
113#define	ZTI_NULL	{ ZTI_MODE_NULL, 0, 0 }
114
115#define	ZTI_N(n)	ZTI_P(n, 1)
116#define	ZTI_ONE		ZTI_N(1)
117
118typedef struct zio_taskq_info {
119	zti_modes_t zti_mode;
120	uint_t zti_value;
121	uint_t zti_count;
122} zio_taskq_info_t;
123
124static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
125	"issue", "issue_high", "intr", "intr_high"
126};
127
128/*
129 * This table defines the taskq settings for each ZFS I/O type. When
130 * initializing a pool, we use this table to create an appropriately sized
131 * taskq. Some operations are low volume and therefore have a small, static
132 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
133 * macros. Other operations process a large amount of data; the ZTI_BATCH
134 * macro causes us to create a taskq oriented for throughput. Some operations
135 * are so high frequency and short-lived that the taskq itself can become a
136 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an
137 * additional degree of parallelism specified by the number of threads per-
138 * taskq and the number of taskqs; when dispatching an event in this case, the
139 * particular taskq is chosen at random.
140 *
141 * The different taskq priorities are to handle the different contexts (issue
142 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
143 * need to be handled with minimum delay.
144 */
145const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
146	/* ISSUE	ISSUE_HIGH	INTR		INTR_HIGH */
147	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* NULL */
148	{ ZTI_N(8),	ZTI_NULL,	ZTI_P(12, 8),	ZTI_NULL }, /* READ */
149	{ ZTI_BATCH,	ZTI_N(5),	ZTI_N(8),	ZTI_N(5) }, /* WRITE */
150	{ ZTI_P(12, 8),	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* FREE */
151	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* CLAIM */
152	{ ZTI_ONE,	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* IOCTL */
153	{ ZTI_N(4),	ZTI_NULL,	ZTI_ONE,	ZTI_NULL }, /* TRIM */
154};
155
156static void spa_sync_version(void *arg, dmu_tx_t *tx);
157static void spa_sync_props(void *arg, dmu_tx_t *tx);
158static boolean_t spa_has_active_shared_spare(spa_t *spa);
159static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport);
160static void spa_vdev_resilver_done(spa_t *spa);
161
162uint_t		zio_taskq_batch_pct = 75;	/* 1 thread per cpu in pset */
163id_t		zio_taskq_psrset_bind = PS_NONE;
164boolean_t	zio_taskq_sysdc = B_TRUE;	/* use SDC scheduling class */
165uint_t		zio_taskq_basedc = 80;		/* base duty cycle */
166
167boolean_t	spa_create_process = B_TRUE;	/* no process ==> no sysdc */
168extern int	zfs_sync_pass_deferred_free;
169
170/*
171 * Report any spa_load_verify errors found, but do not fail spa_load.
172 * This is used by zdb to analyze non-idle pools.
173 */
174boolean_t	spa_load_verify_dryrun = B_FALSE;
175
176/*
177 * This (illegal) pool name is used when temporarily importing a spa_t in order
178 * to get the vdev stats associated with the imported devices.
179 */
180#define	TRYIMPORT_NAME	"$import"
181
182/*
183 * For debugging purposes: print out vdev tree during pool import.
184 */
185boolean_t	spa_load_print_vdev_tree = B_FALSE;
186
187/*
188 * A non-zero value for zfs_max_missing_tvds means that we allow importing
189 * pools with missing top-level vdevs. This is strictly intended for advanced
190 * pool recovery cases since missing data is almost inevitable. Pools with
191 * missing devices can only be imported read-only for safety reasons, and their
192 * fail-mode will be automatically set to "continue".
193 *
194 * With 1 missing vdev we should be able to import the pool and mount all
195 * datasets. User data that was not modified after the missing device has been
196 * added should be recoverable. This means that snapshots created prior to the
197 * addition of that device should be completely intact.
198 *
199 * With 2 missing vdevs, some datasets may fail to mount since there are
200 * dataset statistics that are stored as regular metadata. Some data might be
201 * recoverable if those vdevs were added recently.
202 *
203 * With 3 or more missing vdevs, the pool is severely damaged and MOS entries
204 * may be missing entirely. Chances of data recovery are very low. Note that
205 * there are also risks of performing an inadvertent rewind as we might be
206 * missing all the vdevs with the latest uberblocks.
207 */
208uint64_t	zfs_max_missing_tvds = 0;
209
210/*
211 * The parameters below are similar to zfs_max_missing_tvds but are only
212 * intended for a preliminary open of the pool with an untrusted config which
213 * might be incomplete or out-dated.
214 *
215 * We are more tolerant for pools opened from a cachefile since we could have
216 * an out-dated cachefile where a device removal was not registered.
217 * We could have set the limit arbitrarily high but in the case where devices
218 * are really missing we would want to return the proper error codes; we chose
219 * SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
220 * and we get a chance to retrieve the trusted config.
221 */
222uint64_t	zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
223
224/*
225 * In the case where config was assembled by scanning device paths (/dev/dsks
226 * by default) we are less tolerant since all the existing devices should have
227 * been detected and we want spa_load to return the right error codes.
228 */
229uint64_t	zfs_max_missing_tvds_scan = 0;
230
231/*
232 * Interval in seconds at which to poll spare vdevs for health.
233 * Setting this to zero disables spare polling.
234 * Set to three hours by default.
235 */
236uint_t		spa_spare_poll_interval_seconds = 60 * 60 * 3;
237
238/*
239 * Debugging aid that pauses spa_sync() towards the end.
240 */
241boolean_t	zfs_pause_spa_sync = B_FALSE;
242
243/*
244 * ==========================================================================
245 * SPA properties routines
246 * ==========================================================================
247 */
248
249/*
250 * Add a (source=src, propname=propval) list to an nvlist.
251 */
252static void
253spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
254    uint64_t intval, zprop_source_t src)
255{
256	const char *propname = zpool_prop_to_name(prop);
257	nvlist_t *propval;
258
259	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
260	VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
261
262	if (strval != NULL)
263		VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
264	else
265		VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
266
267	VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
268	nvlist_free(propval);
269}
270
271/*
272 * Get property values from the spa configuration.
273 */
274static void
275spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
276{
277	vdev_t *rvd = spa->spa_root_vdev;
278	dsl_pool_t *pool = spa->spa_dsl_pool;
279	uint64_t size, alloc, cap, version;
280	zprop_source_t src = ZPROP_SRC_NONE;
281	spa_config_dirent_t *dp;
282	metaslab_class_t *mc = spa_normal_class(spa);
283
284	ASSERT(MUTEX_HELD(&spa->spa_props_lock));
285
286	if (rvd != NULL) {
287		alloc = metaslab_class_get_alloc(mc);
288		alloc += metaslab_class_get_alloc(spa_special_class(spa));
289		alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
290
291		size = metaslab_class_get_space(mc);
292		size += metaslab_class_get_space(spa_special_class(spa));
293		size += metaslab_class_get_space(spa_dedup_class(spa));
294
295		spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
296		spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
297		spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
298		spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
299		    size - alloc, src);
300		spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL,
301		    spa->spa_checkpoint_info.sci_dspace, src);
302
303		spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
304		    metaslab_class_fragmentation(mc), src);
305		spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
306		    metaslab_class_expandable_space(mc), src);
307		spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
308		    (spa_mode(spa) == FREAD), src);
309
310		cap = (size == 0) ? 0 : (alloc * 100 / size);
311		spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
312
313		spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
314		    ddt_get_pool_dedup_ratio(spa), src);
315
316		spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
317		    rvd->vdev_state, src);
318
319		version = spa_version(spa);
320		if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
321			src = ZPROP_SRC_DEFAULT;
322		else
323			src = ZPROP_SRC_LOCAL;
324		spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
325	}
326
327	if (pool != NULL) {
328		/*
329		 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
330		 * when opening pools before this version freedir will be NULL.
331		 */
332		if (pool->dp_free_dir != NULL) {
333			spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
334			    dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
335			    src);
336		} else {
337			spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
338			    NULL, 0, src);
339		}
340
341		if (pool->dp_leak_dir != NULL) {
342			spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
343			    dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
344			    src);
345		} else {
346			spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
347			    NULL, 0, src);
348		}
349	}
350
351	spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
352
353	if (spa->spa_comment != NULL) {
354		spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
355		    0, ZPROP_SRC_LOCAL);
356	}
357
358	if (spa->spa_root != NULL)
359		spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
360		    0, ZPROP_SRC_LOCAL);
361
362	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
363		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
364		    MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
365	} else {
366		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
367		    SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
368	}
369
370	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
371		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
372		    DNODE_MAX_SIZE, ZPROP_SRC_NONE);
373	} else {
374		spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
375		    DNODE_MIN_SIZE, ZPROP_SRC_NONE);
376	}
377
378	if ((dp = list_head(&spa->spa_config_list)) != NULL) {
379		if (dp->scd_path == NULL) {
380			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
381			    "none", 0, ZPROP_SRC_LOCAL);
382		} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
383			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
384			    dp->scd_path, 0, ZPROP_SRC_LOCAL);
385		}
386	}
387}
388
389/*
390 * Get zpool property values.
391 */
392int
393spa_prop_get(spa_t *spa, nvlist_t **nvp)
394{
395	objset_t *mos = spa->spa_meta_objset;
396	zap_cursor_t zc;
397	zap_attribute_t za;
398	int err;
399
400	VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
401
402	mutex_enter(&spa->spa_props_lock);
403
404	/*
405	 * Get properties from the spa config.
406	 */
407	spa_prop_get_config(spa, nvp);
408
409	/* If no pool property object, no more prop to get. */
410	if (mos == NULL || spa->spa_pool_props_object == 0) {
411		mutex_exit(&spa->spa_props_lock);
412		return (0);
413	}
414
415	/*
416	 * Get properties from the MOS pool property object.
417	 */
418	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
419	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
420	    zap_cursor_advance(&zc)) {
421		uint64_t intval = 0;
422		char *strval = NULL;
423		zprop_source_t src = ZPROP_SRC_DEFAULT;
424		zpool_prop_t prop;
425
426		if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL)
427			continue;
428
429		switch (za.za_integer_length) {
430		case 8:
431			/* integer property */
432			if (za.za_first_integer !=
433			    zpool_prop_default_numeric(prop))
434				src = ZPROP_SRC_LOCAL;
435
436			if (prop == ZPOOL_PROP_BOOTFS) {
437				dsl_pool_t *dp;
438				dsl_dataset_t *ds = NULL;
439
440				dp = spa_get_dsl(spa);
441				dsl_pool_config_enter(dp, FTAG);
442				err = dsl_dataset_hold_obj(dp,
443				    za.za_first_integer, FTAG, &ds);
444				if (err != 0) {
445					dsl_pool_config_exit(dp, FTAG);
446					break;
447				}
448
449				strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
450				    KM_SLEEP);
451				dsl_dataset_name(ds, strval);
452				dsl_dataset_rele(ds, FTAG);
453				dsl_pool_config_exit(dp, FTAG);
454			} else {
455				strval = NULL;
456				intval = za.za_first_integer;
457			}
458
459			spa_prop_add_list(*nvp, prop, strval, intval, src);
460
461			if (strval != NULL)
462				kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
463
464			break;
465
466		case 1:
467			/* string property */
468			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
469			err = zap_lookup(mos, spa->spa_pool_props_object,
470			    za.za_name, 1, za.za_num_integers, strval);
471			if (err) {
472				kmem_free(strval, za.za_num_integers);
473				break;
474			}
475			spa_prop_add_list(*nvp, prop, strval, 0, src);
476			kmem_free(strval, za.za_num_integers);
477			break;
478
479		default:
480			break;
481		}
482	}
483	zap_cursor_fini(&zc);
484	mutex_exit(&spa->spa_props_lock);
485out:
486	if (err && err != ENOENT) {
487		nvlist_free(*nvp);
488		*nvp = NULL;
489		return (err);
490	}
491
492	return (0);
493}
494
495/*
496 * Validate the given pool properties nvlist and modify the list
497 * for the property values to be set.
498 */
499static int
500spa_prop_validate(spa_t *spa, nvlist_t *props)
501{
502	nvpair_t *elem;
503	int error = 0, reset_bootfs = 0;
504	uint64_t objnum = 0;
505	boolean_t has_feature = B_FALSE;
506
507	elem = NULL;
508	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
509		uint64_t intval;
510		char *strval, *slash, *check, *fname;
511		const char *propname = nvpair_name(elem);
512		zpool_prop_t prop = zpool_name_to_prop(propname);
513
514		switch (prop) {
515		case ZPOOL_PROP_INVAL:
516			if (!zpool_prop_feature(propname)) {
517				error = SET_ERROR(EINVAL);
518				break;
519			}
520
521			/*
522			 * Sanitize the input.
523			 */
524			if (nvpair_type(elem) != DATA_TYPE_UINT64) {
525				error = SET_ERROR(EINVAL);
526				break;
527			}
528
529			if (nvpair_value_uint64(elem, &intval) != 0) {
530				error = SET_ERROR(EINVAL);
531				break;
532			}
533
534			if (intval != 0) {
535				error = SET_ERROR(EINVAL);
536				break;
537			}
538
539			fname = strchr(propname, '@') + 1;
540			if (zfeature_lookup_name(fname, NULL) != 0) {
541				error = SET_ERROR(EINVAL);
542				break;
543			}
544
545			has_feature = B_TRUE;
546			break;
547
548		case ZPOOL_PROP_VERSION:
549			error = nvpair_value_uint64(elem, &intval);
550			if (!error &&
551			    (intval < spa_version(spa) ||
552			    intval > SPA_VERSION_BEFORE_FEATURES ||
553			    has_feature))
554				error = SET_ERROR(EINVAL);
555			break;
556
557		case ZPOOL_PROP_DELEGATION:
558		case ZPOOL_PROP_AUTOREPLACE:
559		case ZPOOL_PROP_LISTSNAPS:
560		case ZPOOL_PROP_AUTOEXPAND:
561		case ZPOOL_PROP_AUTOTRIM:
562			error = nvpair_value_uint64(elem, &intval);
563			if (!error && intval > 1)
564				error = SET_ERROR(EINVAL);
565			break;
566
567		case ZPOOL_PROP_MULTIHOST:
568			error = nvpair_value_uint64(elem, &intval);
569			if (!error && intval > 1)
570				error = SET_ERROR(EINVAL);
571
572			if (!error && !spa_get_hostid())
573				error = SET_ERROR(ENOTSUP);
574
575			break;
576
577		case ZPOOL_PROP_BOOTFS:
578			/*
579			 * If the pool version is less than SPA_VERSION_BOOTFS,
580			 * or the pool is still being created (version == 0),
581			 * the bootfs property cannot be set.
582			 */
583			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
584				error = SET_ERROR(ENOTSUP);
585				break;
586			}
587
588			/*
589			 * Make sure the vdev config is bootable
590			 */
591			if (!vdev_is_bootable(spa->spa_root_vdev)) {
592				error = SET_ERROR(ENOTSUP);
593				break;
594			}
595
596			reset_bootfs = 1;
597
598			error = nvpair_value_string(elem, &strval);
599
600			if (!error) {
601				objset_t *os;
602				uint64_t propval;
603
604				if (strval == NULL || strval[0] == '\0') {
605					objnum = zpool_prop_default_numeric(
606					    ZPOOL_PROP_BOOTFS);
607					break;
608				}
609
610				error = dmu_objset_hold(strval, FTAG, &os);
611				if (error != 0)
612					break;
613
614				/*
615				 * Must be ZPL, and its property settings
616				 * must be supported.
617				 */
618
619				if (dmu_objset_type(os) != DMU_OST_ZFS) {
620					error = SET_ERROR(ENOTSUP);
621				} else if ((error =
622				    dsl_prop_get_int_ds(dmu_objset_ds(os),
623				    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
624				    &propval)) == 0 &&
625				    !BOOTFS_COMPRESS_VALID(propval)) {
626					error = SET_ERROR(ENOTSUP);
627				} else {
628					objnum = dmu_objset_id(os);
629				}
630				dmu_objset_rele(os, FTAG);
631			}
632			break;
633
634		case ZPOOL_PROP_FAILUREMODE:
635			error = nvpair_value_uint64(elem, &intval);
636			if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
637			    intval > ZIO_FAILURE_MODE_PANIC))
638				error = SET_ERROR(EINVAL);
639
640			/*
641			 * This is a special case which only occurs when
642			 * the pool has completely failed. This allows
643			 * the user to change the in-core failmode property
644			 * without syncing it out to disk (I/Os might
645			 * currently be blocked). We do this by returning
646			 * EIO to the caller (spa_prop_set) to trick it
647			 * into thinking we encountered a property validation
648			 * error.
649			 */
650			if (!error && spa_suspended(spa)) {
651				spa->spa_failmode = intval;
652				error = SET_ERROR(EIO);
653			}
654			break;
655
656		case ZPOOL_PROP_CACHEFILE:
657			if ((error = nvpair_value_string(elem, &strval)) != 0)
658				break;
659
660			if (strval[0] == '\0')
661				break;
662
663			if (strcmp(strval, "none") == 0)
664				break;
665
666			if (strval[0] != '/') {
667				error = SET_ERROR(EINVAL);
668				break;
669			}
670
671			slash = strrchr(strval, '/');
672			ASSERT(slash != NULL);
673
674			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
675			    strcmp(slash, "/..") == 0)
676				error = SET_ERROR(EINVAL);
677			break;
678
679		case ZPOOL_PROP_COMMENT:
680			if ((error = nvpair_value_string(elem, &strval)) != 0)
681				break;
682			for (check = strval; *check != '\0'; check++) {
683				/*
684				 * The kernel doesn't have an easy isprint()
685				 * check.  For this kernel check, we merely
686				 * check ASCII apart from DEL.  Fix this if
687				 * there is an easy-to-use kernel isprint().
688				 */
689				if (*check >= 0x7f) {
690					error = SET_ERROR(EINVAL);
691					break;
692				}
693			}
694			if (strlen(strval) > ZPROP_MAX_COMMENT)
695				error = E2BIG;
696			break;
697
698		case ZPOOL_PROP_DEDUPDITTO:
699			if (spa_version(spa) < SPA_VERSION_DEDUP)
700				error = SET_ERROR(ENOTSUP);
701			else
702				error = nvpair_value_uint64(elem, &intval);
703			if (error == 0 &&
704			    intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
705				error = SET_ERROR(EINVAL);
706			break;
707		}
708
709		if (error)
710			break;
711	}
712
713	if (!error && reset_bootfs) {
714		error = nvlist_remove(props,
715		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
716
717		if (!error) {
718			error = nvlist_add_uint64(props,
719			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
720		}
721	}
722
723	return (error);
724}
725
726void
727spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
728{
729	char *cachefile;
730	spa_config_dirent_t *dp;
731
732	if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
733	    &cachefile) != 0)
734		return;
735
736	dp = kmem_alloc(sizeof (spa_config_dirent_t),
737	    KM_SLEEP);
738
739	if (cachefile[0] == '\0')
740		dp->scd_path = spa_strdup(spa_config_path);
741	else if (strcmp(cachefile, "none") == 0)
742		dp->scd_path = NULL;
743	else
744		dp->scd_path = spa_strdup(cachefile);
745
746	list_insert_head(&spa->spa_config_list, dp);
747	if (need_sync)
748		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
749}
750
751int
752spa_prop_set(spa_t *spa, nvlist_t *nvp)
753{
754	int error;
755	nvpair_t *elem = NULL;
756	boolean_t need_sync = B_FALSE;
757
758	if ((error = spa_prop_validate(spa, nvp)) != 0)
759		return (error);
760
761	while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
762		zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
763
764		if (prop == ZPOOL_PROP_CACHEFILE ||
765		    prop == ZPOOL_PROP_ALTROOT ||
766		    prop == ZPOOL_PROP_READONLY)
767			continue;
768
769		if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
770			uint64_t ver;
771
772			if (prop == ZPOOL_PROP_VERSION) {
773				VERIFY(nvpair_value_uint64(elem, &ver) == 0);
774			} else {
775				ASSERT(zpool_prop_feature(nvpair_name(elem)));
776				ver = SPA_VERSION_FEATURES;
777				need_sync = B_TRUE;
778			}
779
780			/* Save time if the version is already set. */
781			if (ver == spa_version(spa))
782				continue;
783
784			/*
785			 * In addition to the pool directory object, we might
786			 * create the pool properties object, the features for
787			 * read object, the features for write object, or the
788			 * feature descriptions object.
789			 */
790			error = dsl_sync_task(spa->spa_name, NULL,
791			    spa_sync_version, &ver,
792			    6, ZFS_SPACE_CHECK_RESERVED);
793			if (error)
794				return (error);
795			continue;
796		}
797
798		need_sync = B_TRUE;
799		break;
800	}
801
802	if (need_sync) {
803		return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
804		    nvp, 6, ZFS_SPACE_CHECK_RESERVED));
805	}
806
807	return (0);
808}
809
810/*
811 * If the bootfs property value is dsobj, clear it.
812 */
813void
814spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
815{
816	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
817		VERIFY(zap_remove(spa->spa_meta_objset,
818		    spa->spa_pool_props_object,
819		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
820		spa->spa_bootfs = 0;
821	}
822}
823
824/*ARGSUSED*/
825static int
826spa_change_guid_check(void *arg, dmu_tx_t *tx)
827{
828	uint64_t *newguid = arg;
829	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
830	vdev_t *rvd = spa->spa_root_vdev;
831	uint64_t vdev_state;
832
833	if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
834		int error = (spa_has_checkpoint(spa)) ?
835		    ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
836		return (SET_ERROR(error));
837	}
838
839	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
840	vdev_state = rvd->vdev_state;
841	spa_config_exit(spa, SCL_STATE, FTAG);
842
843	if (vdev_state != VDEV_STATE_HEALTHY)
844		return (SET_ERROR(ENXIO));
845
846	ASSERT3U(spa_guid(spa), !=, *newguid);
847
848	return (0);
849}
850
851static void
852spa_change_guid_sync(void *arg, dmu_tx_t *tx)
853{
854	uint64_t *newguid = arg;
855	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
856	uint64_t oldguid;
857	vdev_t *rvd = spa->spa_root_vdev;
858
859	oldguid = spa_guid(spa);
860
861	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
862	rvd->vdev_guid = *newguid;
863	rvd->vdev_guid_sum += (*newguid - oldguid);
864	vdev_config_dirty(rvd);
865	spa_config_exit(spa, SCL_STATE, FTAG);
866
867	spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
868	    oldguid, *newguid);
869}
870
871/*
872 * Change the GUID for the pool.  This is done so that we can later
873 * re-import a pool built from a clone of our own vdevs.  We will modify
874 * the root vdev's guid, our own pool guid, and then mark all of our
875 * vdevs dirty.  Note that we must make sure that all our vdevs are
876 * online when we do this, or else any vdevs that weren't present
877 * would be orphaned from our pool.  We are also going to issue a
878 * sysevent to update any watchers.
879 */
880int
881spa_change_guid(spa_t *spa)
882{
883	int error;
884	uint64_t guid;
885
886	mutex_enter(&spa->spa_vdev_top_lock);
887	mutex_enter(&spa_namespace_lock);
888	guid = spa_generate_guid(NULL);
889
890	error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
891	    spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
892
893	if (error == 0) {
894		spa_write_cachefile(spa, B_FALSE, B_TRUE);
895		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
896	}
897
898	mutex_exit(&spa_namespace_lock);
899	mutex_exit(&spa->spa_vdev_top_lock);
900
901	return (error);
902}
903
904/*
905 * ==========================================================================
906 * SPA state manipulation (open/create/destroy/import/export)
907 * ==========================================================================
908 */
909
910static int
911spa_error_entry_compare(const void *a, const void *b)
912{
913	const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
914	const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
915	int ret;
916
917	ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
918	    sizeof (zbookmark_phys_t));
919
920	return (TREE_ISIGN(ret));
921}
922
923/*
924 * Utility function which retrieves copies of the current logs and
925 * re-initializes them in the process.
926 */
927void
928spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
929{
930	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
931
932	bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
933	bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
934
935	avl_create(&spa->spa_errlist_scrub,
936	    spa_error_entry_compare, sizeof (spa_error_entry_t),
937	    offsetof(spa_error_entry_t, se_avl));
938	avl_create(&spa->spa_errlist_last,
939	    spa_error_entry_compare, sizeof (spa_error_entry_t),
940	    offsetof(spa_error_entry_t, se_avl));
941}
942
943static void
944spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
945{
946	const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
947	enum zti_modes mode = ztip->zti_mode;
948	uint_t value = ztip->zti_value;
949	uint_t count = ztip->zti_count;
950	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
951	char name[32];
952	uint_t flags = 0;
953	boolean_t batch = B_FALSE;
954
955	if (mode == ZTI_MODE_NULL) {
956		tqs->stqs_count = 0;
957		tqs->stqs_taskq = NULL;
958		return;
959	}
960
961	ASSERT3U(count, >, 0);
962
963	tqs->stqs_count = count;
964	tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
965
966	switch (mode) {
967	case ZTI_MODE_FIXED:
968		ASSERT3U(value, >=, 1);
969		value = MAX(value, 1);
970		break;
971
972	case ZTI_MODE_BATCH:
973		batch = B_TRUE;
974		flags |= TASKQ_THREADS_CPU_PCT;
975		value = zio_taskq_batch_pct;
976		break;
977
978	default:
979		panic("unrecognized mode for %s_%s taskq (%u:%u) in "
980		    "spa_activate()",
981		    zio_type_name[t], zio_taskq_types[q], mode, value);
982		break;
983	}
984
985	for (uint_t i = 0; i < count; i++) {
986		taskq_t *tq;
987
988		if (count > 1) {
989			(void) snprintf(name, sizeof (name), "%s_%s_%u",
990			    zio_type_name[t], zio_taskq_types[q], i);
991		} else {
992			(void) snprintf(name, sizeof (name), "%s_%s",
993			    zio_type_name[t], zio_taskq_types[q]);
994		}
995
996		if (zio_taskq_sysdc && spa->spa_proc != &p0) {
997			if (batch)
998				flags |= TASKQ_DC_BATCH;
999
1000			tq = taskq_create_sysdc(name, value, 50, INT_MAX,
1001			    spa->spa_proc, zio_taskq_basedc, flags);
1002		} else {
1003			pri_t pri = maxclsyspri;
1004			/*
1005			 * The write issue taskq can be extremely CPU
1006			 * intensive.  Run it at slightly lower priority
1007			 * than the other taskqs.
1008			 */
1009			if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
1010				pri--;
1011
1012			tq = taskq_create_proc(name, value, pri, 50,
1013			    INT_MAX, spa->spa_proc, flags);
1014		}
1015
1016		tqs->stqs_taskq[i] = tq;
1017	}
1018}
1019
1020static void
1021spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
1022{
1023	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1024
1025	if (tqs->stqs_taskq == NULL) {
1026		ASSERT0(tqs->stqs_count);
1027		return;
1028	}
1029
1030	for (uint_t i = 0; i < tqs->stqs_count; i++) {
1031		ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
1032		taskq_destroy(tqs->stqs_taskq[i]);
1033	}
1034
1035	kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
1036	tqs->stqs_taskq = NULL;
1037}
1038
1039/*
1040 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
1041 * Note that a type may have multiple discrete taskqs to avoid lock contention
1042 * on the taskq itself. In that case we choose which taskq at random by using
1043 * the low bits of gethrtime().
1044 */
1045void
1046spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
1047    task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
1048{
1049	spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1050	taskq_t *tq;
1051
1052	ASSERT3P(tqs->stqs_taskq, !=, NULL);
1053	ASSERT3U(tqs->stqs_count, !=, 0);
1054
1055	if (tqs->stqs_count == 1) {
1056		tq = tqs->stqs_taskq[0];
1057	} else {
1058		tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count];
1059	}
1060
1061	taskq_dispatch_ent(tq, func, arg, flags, ent);
1062}
1063
1064static void
1065spa_create_zio_taskqs(spa_t *spa)
1066{
1067	for (int t = 0; t < ZIO_TYPES; t++) {
1068		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1069			spa_taskqs_init(spa, t, q);
1070		}
1071	}
1072}
1073
1074#ifdef _KERNEL
1075static void
1076spa_thread(void *arg)
1077{
1078	callb_cpr_t cprinfo;
1079
1080	spa_t *spa = arg;
1081	user_t *pu = PTOU(curproc);
1082
1083	CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
1084	    spa->spa_name);
1085
1086	ASSERT(curproc != &p0);
1087	(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
1088	    "zpool-%s", spa->spa_name);
1089	(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
1090
1091	/* bind this thread to the requested psrset */
1092	if (zio_taskq_psrset_bind != PS_NONE) {
1093		pool_lock();
1094		mutex_enter(&cpu_lock);
1095		mutex_enter(&pidlock);
1096		mutex_enter(&curproc->p_lock);
1097
1098		if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
1099		    0, NULL, NULL) == 0)  {
1100			curthread->t_bind_pset = zio_taskq_psrset_bind;
1101		} else {
1102			cmn_err(CE_WARN,
1103			    "Couldn't bind process for zfs pool \"%s\" to "
1104			    "pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
1105		}
1106
1107		mutex_exit(&curproc->p_lock);
1108		mutex_exit(&pidlock);
1109		mutex_exit(&cpu_lock);
1110		pool_unlock();
1111	}
1112
1113	if (zio_taskq_sysdc) {
1114		sysdc_thread_enter(curthread, 100, 0);
1115	}
1116
1117	spa->spa_proc = curproc;
1118	spa->spa_did = curthread->t_did;
1119
1120	spa_create_zio_taskqs(spa);
1121
1122	mutex_enter(&spa->spa_proc_lock);
1123	ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
1124
1125	spa->spa_proc_state = SPA_PROC_ACTIVE;
1126	cv_broadcast(&spa->spa_proc_cv);
1127
1128	CALLB_CPR_SAFE_BEGIN(&cprinfo);
1129	while (spa->spa_proc_state == SPA_PROC_ACTIVE)
1130		cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1131	CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
1132
1133	ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
1134	spa->spa_proc_state = SPA_PROC_GONE;
1135	spa->spa_proc = &p0;
1136	cv_broadcast(&spa->spa_proc_cv);
1137	CALLB_CPR_EXIT(&cprinfo);	/* drops spa_proc_lock */
1138
1139	mutex_enter(&curproc->p_lock);
1140	lwp_exit();
1141}
1142#endif
1143
1144/*
1145 * Activate an uninitialized pool.
1146 */
1147static void
1148spa_activate(spa_t *spa, int mode)
1149{
1150	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
1151
1152	spa->spa_state = POOL_STATE_ACTIVE;
1153	spa->spa_mode = mode;
1154
1155	spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
1156	spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
1157	spa->spa_special_class = metaslab_class_create(spa, zfs_metaslab_ops);
1158	spa->spa_dedup_class = metaslab_class_create(spa, zfs_metaslab_ops);
1159
1160	/* Try to create a covering process */
1161	mutex_enter(&spa->spa_proc_lock);
1162	ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
1163	ASSERT(spa->spa_proc == &p0);
1164	spa->spa_did = 0;
1165
1166	/* Only create a process if we're going to be around a while. */
1167	if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
1168		if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
1169		    NULL, 0) == 0) {
1170			spa->spa_proc_state = SPA_PROC_CREATED;
1171			while (spa->spa_proc_state == SPA_PROC_CREATED) {
1172				cv_wait(&spa->spa_proc_cv,
1173				    &spa->spa_proc_lock);
1174			}
1175			ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1176			ASSERT(spa->spa_proc != &p0);
1177			ASSERT(spa->spa_did != 0);
1178		} else {
1179#ifdef _KERNEL
1180			cmn_err(CE_WARN,
1181			    "Couldn't create process for zfs pool \"%s\"\n",
1182			    spa->spa_name);
1183#endif
1184		}
1185	}
1186	mutex_exit(&spa->spa_proc_lock);
1187
1188	/* If we didn't create a process, we need to create our taskqs. */
1189	if (spa->spa_proc == &p0) {
1190		spa_create_zio_taskqs(spa);
1191	}
1192
1193	for (size_t i = 0; i < TXG_SIZE; i++) {
1194		spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL,
1195		    ZIO_FLAG_CANFAIL);
1196	}
1197
1198	list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
1199	    offsetof(vdev_t, vdev_config_dirty_node));
1200	list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
1201	    offsetof(objset_t, os_evicting_node));
1202	list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
1203	    offsetof(vdev_t, vdev_state_dirty_node));
1204
1205	txg_list_create(&spa->spa_vdev_txg_list, spa,
1206	    offsetof(struct vdev, vdev_txg_node));
1207
1208	avl_create(&spa->spa_errlist_scrub,
1209	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1210	    offsetof(spa_error_entry_t, se_avl));
1211	avl_create(&spa->spa_errlist_last,
1212	    spa_error_entry_compare, sizeof (spa_error_entry_t),
1213	    offsetof(spa_error_entry_t, se_avl));
1214
1215	spa_keystore_init(&spa->spa_keystore);
1216
1217	/*
1218	 * The taskq to upgrade datasets in this pool. Currently used by
1219	 * feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
1220	 */
1221	spa->spa_upgrade_taskq = taskq_create("z_upgrade", boot_ncpus,
1222	    minclsyspri, 1, INT_MAX, TASKQ_DYNAMIC);
1223}
1224
1225/*
1226 * Opposite of spa_activate().
1227 */
1228static void
1229spa_deactivate(spa_t *spa)
1230{
1231	ASSERT(spa->spa_sync_on == B_FALSE);
1232	ASSERT(spa->spa_dsl_pool == NULL);
1233	ASSERT(spa->spa_root_vdev == NULL);
1234	ASSERT(spa->spa_async_zio_root == NULL);
1235	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
1236
1237	spa_evicting_os_wait(spa);
1238
1239	if (spa->spa_upgrade_taskq) {
1240		taskq_destroy(spa->spa_upgrade_taskq);
1241		spa->spa_upgrade_taskq = NULL;
1242	}
1243
1244	txg_list_destroy(&spa->spa_vdev_txg_list);
1245
1246	list_destroy(&spa->spa_config_dirty_list);
1247	list_destroy(&spa->spa_evicting_os_list);
1248	list_destroy(&spa->spa_state_dirty_list);
1249
1250	for (int t = 0; t < ZIO_TYPES; t++) {
1251		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
1252			spa_taskqs_fini(spa, t, q);
1253		}
1254	}
1255
1256	for (size_t i = 0; i < TXG_SIZE; i++) {
1257		ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
1258		VERIFY0(zio_wait(spa->spa_txg_zio[i]));
1259		spa->spa_txg_zio[i] = NULL;
1260	}
1261
1262	metaslab_class_destroy(spa->spa_normal_class);
1263	spa->spa_normal_class = NULL;
1264
1265	metaslab_class_destroy(spa->spa_log_class);
1266	spa->spa_log_class = NULL;
1267
1268	metaslab_class_destroy(spa->spa_special_class);
1269	spa->spa_special_class = NULL;
1270
1271	metaslab_class_destroy(spa->spa_dedup_class);
1272	spa->spa_dedup_class = NULL;
1273
1274	/*
1275	 * If this was part of an import or the open otherwise failed, we may
1276	 * still have errors left in the queues.  Empty them just in case.
1277	 */
1278	spa_errlog_drain(spa);
1279	avl_destroy(&spa->spa_errlist_scrub);
1280	avl_destroy(&spa->spa_errlist_last);
1281
1282	spa_keystore_fini(&spa->spa_keystore);
1283
1284	spa->spa_state = POOL_STATE_UNINITIALIZED;
1285
1286	mutex_enter(&spa->spa_proc_lock);
1287	if (spa->spa_proc_state != SPA_PROC_NONE) {
1288		ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
1289		spa->spa_proc_state = SPA_PROC_DEACTIVATE;
1290		cv_broadcast(&spa->spa_proc_cv);
1291		while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
1292			ASSERT(spa->spa_proc != &p0);
1293			cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
1294		}
1295		ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
1296		spa->spa_proc_state = SPA_PROC_NONE;
1297	}
1298	ASSERT(spa->spa_proc == &p0);
1299	mutex_exit(&spa->spa_proc_lock);
1300
1301	/*
1302	 * We want to make sure spa_thread() has actually exited the ZFS
1303	 * module, so that the module can't be unloaded out from underneath
1304	 * it.
1305	 */
1306	if (spa->spa_did != 0) {
1307		thread_join(spa->spa_did);
1308		spa->spa_did = 0;
1309	}
1310}
1311
1312/*
1313 * Verify a pool configuration, and construct the vdev tree appropriately.  This
1314 * will create all the necessary vdevs in the appropriate layout, with each vdev
1315 * in the CLOSED state.  This will prep the pool before open/creation/import.
1316 * All vdev validation is done by the vdev_alloc() routine.
1317 */
1318static int
1319spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
1320    uint_t id, int atype)
1321{
1322	nvlist_t **child;
1323	uint_t children;
1324	int error;
1325
1326	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
1327		return (error);
1328
1329	if ((*vdp)->vdev_ops->vdev_op_leaf)
1330		return (0);
1331
1332	error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1333	    &child, &children);
1334
1335	if (error == ENOENT)
1336		return (0);
1337
1338	if (error) {
1339		vdev_free(*vdp);
1340		*vdp = NULL;
1341		return (SET_ERROR(EINVAL));
1342	}
1343
1344	for (int c = 0; c < children; c++) {
1345		vdev_t *vd;
1346		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
1347		    atype)) != 0) {
1348			vdev_free(*vdp);
1349			*vdp = NULL;
1350			return (error);
1351		}
1352	}
1353
1354	ASSERT(*vdp != NULL);
1355
1356	return (0);
1357}
1358
1359static boolean_t
1360spa_should_flush_logs_on_unload(spa_t *spa)
1361{
1362	if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
1363		return (B_FALSE);
1364
1365	if (!spa_writeable(spa))
1366		return (B_FALSE);
1367
1368	if (!spa->spa_sync_on)
1369		return (B_FALSE);
1370
1371	if (spa_state(spa) != POOL_STATE_EXPORTED)
1372		return (B_FALSE);
1373
1374	if (zfs_keep_log_spacemaps_at_export)
1375		return (B_FALSE);
1376
1377	return (B_TRUE);
1378}
1379
1380/*
1381 * Opens a transaction that will set the flag that will instruct
1382 * spa_sync to attempt to flush all the metaslabs for that txg.
1383 */
1384static void
1385spa_unload_log_sm_flush_all(spa_t *spa)
1386{
1387	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
1388
1389	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
1390
1391	ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
1392	spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
1393
1394	dmu_tx_commit(tx);
1395	txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
1396}
1397
1398static void
1399spa_unload_log_sm_metadata(spa_t *spa)
1400{
1401	void *cookie = NULL;
1402	spa_log_sm_t *sls;
1403
1404	while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
1405	    &cookie)) != NULL) {
1406		VERIFY0(sls->sls_mscount);
1407		kmem_free(sls, sizeof (spa_log_sm_t));
1408	}
1409
1410	for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
1411	    e != NULL; e = list_head(&spa->spa_log_summary)) {
1412		VERIFY0(e->lse_mscount);
1413		list_remove(&spa->spa_log_summary, e);
1414		kmem_free(e, sizeof (log_summary_entry_t));
1415	}
1416
1417	spa->spa_unflushed_stats.sus_nblocks = 0;
1418	spa->spa_unflushed_stats.sus_memused = 0;
1419	spa->spa_unflushed_stats.sus_blocklimit = 0;
1420}
1421
1422/*
1423 * Opposite of spa_load().
1424 */
1425static void
1426spa_unload(spa_t *spa)
1427{
1428	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1429	ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
1430
1431	spa_import_progress_remove(spa);
1432	spa_load_note(spa, "UNLOADING");
1433
1434	/*
1435	 * If the log space map feature is enabled and the pool is getting
1436	 * exported (but not destroyed), we want to spend some time flushing
1437	 * as many metaslabs as we can in an attempt to destroy log space
1438	 * maps and save import time.
1439	 */
1440	if (spa_should_flush_logs_on_unload(spa))
1441		spa_unload_log_sm_flush_all(spa);
1442
1443	/*
1444	 * Stop async tasks.
1445	 */
1446	spa_async_suspend(spa);
1447
1448	if (spa->spa_root_vdev) {
1449		vdev_t *root_vdev = spa->spa_root_vdev;
1450		vdev_initialize_stop_all(root_vdev, VDEV_INITIALIZE_ACTIVE);
1451		vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
1452		vdev_autotrim_stop_all(spa);
1453	}
1454
1455	/*
1456	 * Stop syncing.
1457	 */
1458	if (spa->spa_sync_on) {
1459		txg_sync_stop(spa->spa_dsl_pool);
1460		spa->spa_sync_on = B_FALSE;
1461	}
1462
1463	/*
1464	 * This ensures that there is no async metaslab prefetching
1465	 * while we attempt to unload the spa.
1466	 */
1467	if (spa->spa_root_vdev != NULL) {
1468		for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
1469			vdev_t *vc = spa->spa_root_vdev->vdev_child[c];
1470			if (vc->vdev_mg != NULL)
1471				taskq_wait(vc->vdev_mg->mg_taskq);
1472		}
1473	}
1474
1475	if (spa->spa_mmp.mmp_thread)
1476		mmp_thread_stop(spa);
1477
1478	/*
1479	 * Wait for any outstanding async I/O to complete.
1480	 */
1481	if (spa->spa_async_zio_root != NULL) {
1482		for (int i = 0; i < max_ncpus; i++)
1483			(void) zio_wait(spa->spa_async_zio_root[i]);
1484		kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
1485		spa->spa_async_zio_root = NULL;
1486	}
1487
1488	if (spa->spa_vdev_removal != NULL) {
1489		spa_vdev_removal_destroy(spa->spa_vdev_removal);
1490		spa->spa_vdev_removal = NULL;
1491	}
1492
1493	if (spa->spa_condense_zthr != NULL) {
1494		zthr_destroy(spa->spa_condense_zthr);
1495		spa->spa_condense_zthr = NULL;
1496	}
1497
1498	if (spa->spa_checkpoint_discard_zthr != NULL) {
1499		zthr_destroy(spa->spa_checkpoint_discard_zthr);
1500		spa->spa_checkpoint_discard_zthr = NULL;
1501	}
1502
1503	spa_condense_fini(spa);
1504
1505	bpobj_close(&spa->spa_deferred_bpobj);
1506
1507	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1508
1509	/*
1510	 * Close all vdevs.
1511	 */
1512	if (spa->spa_root_vdev)
1513		vdev_free(spa->spa_root_vdev);
1514	ASSERT(spa->spa_root_vdev == NULL);
1515
1516	/*
1517	 * Close the dsl pool.
1518	 */
1519	if (spa->spa_dsl_pool) {
1520		dsl_pool_close(spa->spa_dsl_pool);
1521		spa->spa_dsl_pool = NULL;
1522		spa->spa_meta_objset = NULL;
1523	}
1524
1525	ddt_unload(spa);
1526	spa_unload_log_sm_metadata(spa);
1527
1528	/*
1529	 * Drop and purge level 2 cache
1530	 */
1531	spa_l2cache_drop(spa);
1532
1533	for (int i = 0; i < spa->spa_spares.sav_count; i++)
1534		vdev_free(spa->spa_spares.sav_vdevs[i]);
1535	if (spa->spa_spares.sav_vdevs) {
1536		kmem_free(spa->spa_spares.sav_vdevs,
1537		    spa->spa_spares.sav_count * sizeof (void *));
1538		spa->spa_spares.sav_vdevs = NULL;
1539	}
1540	if (spa->spa_spares.sav_config) {
1541		nvlist_free(spa->spa_spares.sav_config);
1542		spa->spa_spares.sav_config = NULL;
1543	}
1544	spa->spa_spares.sav_count = 0;
1545
1546	for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
1547		vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
1548		vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1549	}
1550	if (spa->spa_l2cache.sav_vdevs) {
1551		kmem_free(spa->spa_l2cache.sav_vdevs,
1552		    spa->spa_l2cache.sav_count * sizeof (void *));
1553		spa->spa_l2cache.sav_vdevs = NULL;
1554	}
1555	if (spa->spa_l2cache.sav_config) {
1556		nvlist_free(spa->spa_l2cache.sav_config);
1557		spa->spa_l2cache.sav_config = NULL;
1558	}
1559	spa->spa_l2cache.sav_count = 0;
1560
1561	spa->spa_async_suspended = 0;
1562
1563	spa->spa_indirect_vdevs_loaded = B_FALSE;
1564
1565	if (spa->spa_comment != NULL) {
1566		spa_strfree(spa->spa_comment);
1567		spa->spa_comment = NULL;
1568	}
1569
1570	spa_config_exit(spa, SCL_ALL, spa);
1571}
1572
1573/*
1574 * Load (or re-load) the current list of vdevs describing the active spares for
1575 * this pool.  When this is called, we have some form of basic information in
1576 * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
1577 * then re-generate a more complete list including status information.
1578 */
1579void
1580spa_load_spares(spa_t *spa)
1581{
1582	nvlist_t **spares;
1583	uint_t nspares;
1584	int i;
1585	vdev_t *vd, *tvd;
1586
1587#ifndef _KERNEL
1588	/*
1589	 * zdb opens both the current state of the pool and the
1590	 * checkpointed state (if present), with a different spa_t.
1591	 *
1592	 * As spare vdevs are shared among open pools, we skip loading
1593	 * them when we load the checkpointed state of the pool.
1594	 */
1595	if (!spa_writeable(spa))
1596		return;
1597#endif
1598
1599	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1600
1601	/*
1602	 * First, close and free any existing spare vdevs.
1603	 */
1604	for (i = 0; i < spa->spa_spares.sav_count; i++) {
1605		vd = spa->spa_spares.sav_vdevs[i];
1606
1607		/* Undo the call to spa_activate() below */
1608		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1609		    B_FALSE)) != NULL && tvd->vdev_isspare)
1610			spa_spare_remove(tvd);
1611		vdev_close(vd);
1612		vdev_free(vd);
1613	}
1614
1615	if (spa->spa_spares.sav_vdevs)
1616		kmem_free(spa->spa_spares.sav_vdevs,
1617		    spa->spa_spares.sav_count * sizeof (void *));
1618
1619	if (spa->spa_spares.sav_config == NULL)
1620		nspares = 0;
1621	else
1622		VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1623		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1624
1625	spa->spa_spares.sav_count = (int)nspares;
1626	spa->spa_spares.sav_vdevs = NULL;
1627
1628	if (nspares == 0)
1629		return;
1630
1631	/*
1632	 * Construct the array of vdevs, opening them to get status in the
1633	 * process.   For each spare, there is potentially two different vdev_t
1634	 * structures associated with it: one in the list of spares (used only
1635	 * for basic validation purposes) and one in the active vdev
1636	 * configuration (if it's spared in).  During this phase we open and
1637	 * validate each vdev on the spare list.  If the vdev also exists in the
1638	 * active configuration, then we also mark this vdev as an active spare.
1639	 */
1640	spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
1641	    KM_SLEEP);
1642	for (i = 0; i < spa->spa_spares.sav_count; i++) {
1643		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
1644		    VDEV_ALLOC_SPARE) == 0);
1645		ASSERT(vd != NULL);
1646
1647		spa->spa_spares.sav_vdevs[i] = vd;
1648
1649		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
1650		    B_FALSE)) != NULL) {
1651			if (!tvd->vdev_isspare)
1652				spa_spare_add(tvd);
1653
1654			/*
1655			 * We only mark the spare active if we were successfully
1656			 * able to load the vdev.  Otherwise, importing a pool
1657			 * with a bad active spare would result in strange
1658			 * behavior, because multiple pool would think the spare
1659			 * is actively in use.
1660			 *
1661			 * There is a vulnerability here to an equally bizarre
1662			 * circumstance, where a dead active spare is later
1663			 * brought back to life (onlined or otherwise).  Given
1664			 * the rarity of this scenario, and the extra complexity
1665			 * it adds, we ignore the possibility.
1666			 */
1667			if (!vdev_is_dead(tvd))
1668				spa_spare_activate(tvd);
1669		}
1670
1671		vd->vdev_top = vd;
1672		vd->vdev_aux = &spa->spa_spares;
1673
1674		if (vdev_open(vd) != 0)
1675			continue;
1676
1677		if (vdev_validate_aux(vd) == 0)
1678			spa_spare_add(vd);
1679	}
1680
1681	/*
1682	 * Recompute the stashed list of spares, with status information
1683	 * this time.
1684	 */
1685	VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
1686	    DATA_TYPE_NVLIST_ARRAY) == 0);
1687
1688	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
1689	    KM_SLEEP);
1690	for (i = 0; i < spa->spa_spares.sav_count; i++)
1691		spares[i] = vdev_config_generate(spa,
1692		    spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
1693	VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1694	    ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
1695	for (i = 0; i < spa->spa_spares.sav_count; i++)
1696		nvlist_free(spares[i]);
1697	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
1698}
1699
1700/*
1701 * Load (or re-load) the current list of vdevs describing the active l2cache for
1702 * this pool.  When this is called, we have some form of basic information in
1703 * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
1704 * then re-generate a more complete list including status information.
1705 * Devices which are already active have their details maintained, and are
1706 * not re-opened.
1707 */
1708void
1709spa_load_l2cache(spa_t *spa)
1710{
1711	nvlist_t **l2cache;
1712	uint_t nl2cache;
1713	int i, j, oldnvdevs;
1714	uint64_t guid;
1715	vdev_t *vd, **oldvdevs, **newvdevs;
1716	spa_aux_vdev_t *sav = &spa->spa_l2cache;
1717
1718#ifndef _KERNEL
1719	/*
1720	 * zdb opens both the current state of the pool and the
1721	 * checkpointed state (if present), with a different spa_t.
1722	 *
1723	 * As L2 caches are part of the ARC which is shared among open
1724	 * pools, we skip loading them when we load the checkpointed
1725	 * state of the pool.
1726	 */
1727	if (!spa_writeable(spa))
1728		return;
1729#endif
1730
1731	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1732
1733	if (sav->sav_config != NULL) {
1734		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
1735		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1736		newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
1737	} else {
1738		nl2cache = 0;
1739		newvdevs = NULL;
1740	}
1741
1742	oldvdevs = sav->sav_vdevs;
1743	oldnvdevs = sav->sav_count;
1744	sav->sav_vdevs = NULL;
1745	sav->sav_count = 0;
1746
1747	/*
1748	 * Process new nvlist of vdevs.
1749	 */
1750	for (i = 0; i < nl2cache; i++) {
1751		VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
1752		    &guid) == 0);
1753
1754		newvdevs[i] = NULL;
1755		for (j = 0; j < oldnvdevs; j++) {
1756			vd = oldvdevs[j];
1757			if (vd != NULL && guid == vd->vdev_guid) {
1758				/*
1759				 * Retain previous vdev for add/remove ops.
1760				 */
1761				newvdevs[i] = vd;
1762				oldvdevs[j] = NULL;
1763				break;
1764			}
1765		}
1766
1767		if (newvdevs[i] == NULL) {
1768			/*
1769			 * Create new vdev
1770			 */
1771			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
1772			    VDEV_ALLOC_L2CACHE) == 0);
1773			ASSERT(vd != NULL);
1774			newvdevs[i] = vd;
1775
1776			/*
1777			 * Commit this vdev as an l2cache device,
1778			 * even if it fails to open.
1779			 */
1780			spa_l2cache_add(vd);
1781
1782			vd->vdev_top = vd;
1783			vd->vdev_aux = sav;
1784
1785			spa_l2cache_activate(vd);
1786
1787			if (vdev_open(vd) != 0)
1788				continue;
1789
1790			(void) vdev_validate_aux(vd);
1791
1792			if (!vdev_is_dead(vd))
1793				l2arc_add_vdev(spa, vd);
1794		}
1795	}
1796
1797	/*
1798	 * Purge vdevs that were dropped
1799	 */
1800	for (i = 0; i < oldnvdevs; i++) {
1801		uint64_t pool;
1802
1803		vd = oldvdevs[i];
1804		if (vd != NULL) {
1805			ASSERT(vd->vdev_isl2cache);
1806
1807			if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1808			    pool != 0ULL && l2arc_vdev_present(vd))
1809				l2arc_remove_vdev(vd);
1810			vdev_clear_stats(vd);
1811			vdev_free(vd);
1812		}
1813	}
1814
1815	if (oldvdevs)
1816		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1817
1818	if (sav->sav_config == NULL)
1819		goto out;
1820
1821	sav->sav_vdevs = newvdevs;
1822	sav->sav_count = (int)nl2cache;
1823
1824	/*
1825	 * Recompute the stashed list of l2cache devices, with status
1826	 * information this time.
1827	 */
1828	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1829	    DATA_TYPE_NVLIST_ARRAY) == 0);
1830
1831	l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1832	for (i = 0; i < sav->sav_count; i++)
1833		l2cache[i] = vdev_config_generate(spa,
1834		    sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
1835	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1836	    ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1837out:
1838	for (i = 0; i < sav->sav_count; i++)
1839		nvlist_free(l2cache[i]);
1840	if (sav->sav_count)
1841		kmem_free(l2cache, sav->sav_count * sizeof (void *));
1842}
1843
1844static int
1845load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1846{
1847	dmu_buf_t *db;
1848	char *packed = NULL;
1849	size_t nvsize = 0;
1850	int error;
1851	*value = NULL;
1852
1853	error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
1854	if (error != 0)
1855		return (error);
1856
1857	nvsize = *(uint64_t *)db->db_data;
1858	dmu_buf_rele(db, FTAG);
1859
1860	packed = kmem_alloc(nvsize, KM_SLEEP);
1861	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1862	    DMU_READ_PREFETCH);
1863	if (error == 0)
1864		error = nvlist_unpack(packed, nvsize, value, 0);
1865	kmem_free(packed, nvsize);
1866
1867	return (error);
1868}
1869
1870/*
1871 * Concrete top-level vdevs that are not missing and are not logs. At every
1872 * spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
1873 */
1874static uint64_t
1875spa_healthy_core_tvds(spa_t *spa)
1876{
1877	vdev_t *rvd = spa->spa_root_vdev;
1878	uint64_t tvds = 0;
1879
1880	for (uint64_t i = 0; i < rvd->vdev_children; i++) {
1881		vdev_t *vd = rvd->vdev_child[i];
1882		if (vd->vdev_islog)
1883			continue;
1884		if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
1885			tvds++;
1886	}
1887
1888	return (tvds);
1889}
1890
1891/*
1892 * Checks to see if the given vdev could not be opened, in which case we post a
1893 * sysevent to notify the autoreplace code that the device has been removed.
1894 */
1895static void
1896spa_check_removed(vdev_t *vd)
1897{
1898	for (uint64_t c = 0; c < vd->vdev_children; c++)
1899		spa_check_removed(vd->vdev_child[c]);
1900
1901	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
1902	    vdev_is_concrete(vd)) {
1903		zfs_post_autoreplace(vd->vdev_spa, vd);
1904		spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
1905	}
1906}
1907
1908static int
1909spa_check_for_missing_logs(spa_t *spa)
1910{
1911	vdev_t *rvd = spa->spa_root_vdev;
1912
1913	/*
1914	 * If we're doing a normal import, then build up any additional
1915	 * diagnostic information about missing log devices.
1916	 * We'll pass this up to the user for further processing.
1917	 */
1918	if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
1919		nvlist_t **child, *nv;
1920		uint64_t idx = 0;
1921
1922		child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **),
1923		    KM_SLEEP);
1924		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1925
1926		for (uint64_t c = 0; c < rvd->vdev_children; c++) {
1927			vdev_t *tvd = rvd->vdev_child[c];
1928
1929			/*
1930			 * We consider a device as missing only if it failed
1931			 * to open (i.e. offline or faulted is not considered
1932			 * as missing).
1933			 */
1934			if (tvd->vdev_islog &&
1935			    tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
1936				child[idx++] = vdev_config_generate(spa, tvd,
1937				    B_FALSE, VDEV_CONFIG_MISSING);
1938			}
1939		}
1940
1941		if (idx > 0) {
1942			fnvlist_add_nvlist_array(nv,
1943			    ZPOOL_CONFIG_CHILDREN, child, idx);
1944			fnvlist_add_nvlist(spa->spa_load_info,
1945			    ZPOOL_CONFIG_MISSING_DEVICES, nv);
1946
1947			for (uint64_t i = 0; i < idx; i++)
1948				nvlist_free(child[i]);
1949		}
1950		nvlist_free(nv);
1951		kmem_free(child, rvd->vdev_children * sizeof (char **));
1952
1953		if (idx > 0) {
1954			spa_load_failed(spa, "some log devices are missing");
1955			vdev_dbgmsg_print_tree(rvd, 2);
1956			return (SET_ERROR(ENXIO));
1957		}
1958	} else {
1959		for (uint64_t c = 0; c < rvd->vdev_children; c++) {
1960			vdev_t *tvd = rvd->vdev_child[c];
1961
1962			if (tvd->vdev_islog &&
1963			    tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
1964				spa_set_log_state(spa, SPA_LOG_CLEAR);
1965				spa_load_note(spa, "some log devices are "
1966				    "missing, ZIL is dropped.");
1967				vdev_dbgmsg_print_tree(rvd, 2);
1968				break;
1969			}
1970		}
1971	}
1972
1973	return (0);
1974}
1975
1976/*
1977 * Check for missing log devices
1978 */
1979static boolean_t
1980spa_check_logs(spa_t *spa)
1981{
1982	boolean_t rv = B_FALSE;
1983	dsl_pool_t *dp = spa_get_dsl(spa);
1984
1985	switch (spa->spa_log_state) {
1986	case SPA_LOG_MISSING:
1987		/* need to recheck in case slog has been restored */
1988	case SPA_LOG_UNKNOWN:
1989		rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
1990		    zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
1991		if (rv)
1992			spa_set_log_state(spa, SPA_LOG_MISSING);
1993		break;
1994	}
1995	return (rv);
1996}
1997
1998static boolean_t
1999spa_passivate_log(spa_t *spa)
2000{
2001	vdev_t *rvd = spa->spa_root_vdev;
2002	boolean_t slog_found = B_FALSE;
2003
2004	ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
2005
2006	if (!spa_has_slogs(spa))
2007		return (B_FALSE);
2008
2009	for (int c = 0; c < rvd->vdev_children; c++) {
2010		vdev_t *tvd = rvd->vdev_child[c];
2011		metaslab_group_t *mg = tvd->vdev_mg;
2012
2013		if (tvd->vdev_islog) {
2014			metaslab_group_passivate(mg);
2015			slog_found = B_TRUE;
2016		}
2017	}
2018
2019	return (slog_found);
2020}
2021
2022static void
2023spa_activate_log(spa_t *spa)
2024{
2025	vdev_t *rvd = spa->spa_root_vdev;
2026
2027	ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
2028
2029	for (int c = 0; c < rvd->vdev_children; c++) {
2030		vdev_t *tvd = rvd->vdev_child[c];
2031		metaslab_group_t *mg = tvd->vdev_mg;
2032
2033		if (tvd->vdev_islog)
2034			metaslab_group_activate(mg);
2035	}
2036}
2037
2038int
2039spa_reset_logs(spa_t *spa)
2040{
2041	int error;
2042
2043	error = dmu_objset_find(spa_name(spa), zil_reset,
2044	    NULL, DS_FIND_CHILDREN);
2045	if (error == 0) {
2046		/*
2047		 * We successfully offlined the log device, sync out the
2048		 * current txg so that the "stubby" block can be removed
2049		 * by zil_sync().
2050		 */
2051		txg_wait_synced(spa->spa_dsl_pool, 0);
2052	}
2053	return (error);
2054}
2055
2056static void
2057spa_aux_check_removed(spa_aux_vdev_t *sav)
2058{
2059	for (int i = 0; i < sav->sav_count; i++)
2060		spa_check_removed(sav->sav_vdevs[i]);
2061}
2062
2063void
2064spa_claim_notify(zio_t *zio)
2065{
2066	spa_t *spa = zio->io_spa;
2067
2068	if (zio->io_error)
2069		return;
2070
2071	mutex_enter(&spa->spa_props_lock);	/* any mutex will do */
2072	if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
2073		spa->spa_claim_max_txg = zio->io_bp->blk_birth;
2074	mutex_exit(&spa->spa_props_lock);
2075}
2076
2077typedef struct spa_load_error {
2078	uint64_t	sle_meta_count;
2079	uint64_t	sle_data_count;
2080} spa_load_error_t;
2081
2082static void
2083spa_load_verify_done(zio_t *zio)
2084{
2085	blkptr_t *bp = zio->io_bp;
2086	spa_load_error_t *sle = zio->io_private;
2087	dmu_object_type_t type = BP_GET_TYPE(bp);
2088	int error = zio->io_error;
2089	spa_t *spa = zio->io_spa;
2090
2091	abd_free(zio->io_abd);
2092	if (error) {
2093		if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
2094		    type != DMU_OT_INTENT_LOG)
2095			atomic_inc_64(&sle->sle_meta_count);
2096		else
2097			atomic_inc_64(&sle->sle_data_count);
2098	}
2099
2100	mutex_enter(&spa->spa_scrub_lock);
2101	spa->spa_load_verify_ios--;
2102	cv_broadcast(&spa->spa_scrub_io_cv);
2103	mutex_exit(&spa->spa_scrub_lock);
2104}
2105
2106/*
2107 * Maximum number of concurrent scrub i/os to create while verifying
2108 * a pool while importing it.
2109 */
2110int spa_load_verify_maxinflight = 10000;
2111boolean_t spa_load_verify_metadata = B_TRUE;
2112boolean_t spa_load_verify_data = B_TRUE;
2113
2114/*ARGSUSED*/
2115static int
2116spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
2117    const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
2118{
2119	if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
2120		return (0);
2121	/*
2122	 * Note: normally this routine will not be called if
2123	 * spa_load_verify_metadata is not set.  However, it may be useful
2124	 * to manually set the flag after the traversal has begun.
2125	 */
2126	if (!spa_load_verify_metadata)
2127		return (0);
2128	if (!BP_IS_METADATA(bp) && !spa_load_verify_data)
2129		return (0);
2130
2131	zio_t *rio = arg;
2132	size_t size = BP_GET_PSIZE(bp);
2133
2134	mutex_enter(&spa->spa_scrub_lock);
2135	while (spa->spa_load_verify_ios >= spa_load_verify_maxinflight)
2136		cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
2137	spa->spa_load_verify_ios++;
2138	mutex_exit(&spa->spa_scrub_lock);
2139
2140	zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
2141	    spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
2142	    ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
2143	    ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
2144	return (0);
2145}
2146
2147/* ARGSUSED */
2148int
2149verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
2150{
2151	if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
2152		return (SET_ERROR(ENAMETOOLONG));
2153
2154	return (0);
2155}
2156
2157static int
2158spa_load_verify(spa_t *spa)
2159{
2160	zio_t *rio;
2161	spa_load_error_t sle = { 0 };
2162	zpool_load_policy_t policy;
2163	boolean_t verify_ok = B_FALSE;
2164	int error = 0;
2165
2166	zpool_get_load_policy(spa->spa_config, &policy);
2167
2168	if (policy.zlp_rewind & ZPOOL_NEVER_REWIND)
2169		return (0);
2170
2171	dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
2172	error = dmu_objset_find_dp(spa->spa_dsl_pool,
2173	    spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
2174	    DS_FIND_CHILDREN);
2175	dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
2176	if (error != 0)
2177		return (error);
2178
2179	rio = zio_root(spa, NULL, &sle,
2180	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
2181
2182	if (spa_load_verify_metadata) {
2183		if (spa->spa_extreme_rewind) {
2184			spa_load_note(spa, "performing a complete scan of the "
2185			    "pool since extreme rewind is on. This may take "
2186			    "a very long time.\n  (spa_load_verify_data=%u, "
2187			    "spa_load_verify_metadata=%u)",
2188			    spa_load_verify_data, spa_load_verify_metadata);
2189		}
2190		error = traverse_pool(spa, spa->spa_verify_min_txg,
2191		    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
2192		    TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
2193	}
2194
2195	(void) zio_wait(rio);
2196
2197	spa->spa_load_meta_errors = sle.sle_meta_count;
2198	spa->spa_load_data_errors = sle.sle_data_count;
2199
2200	if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
2201		spa_load_note(spa, "spa_load_verify found %llu metadata errors "
2202		    "and %llu data errors", (u_longlong_t)sle.sle_meta_count,
2203		    (u_longlong_t)sle.sle_data_count);
2204	}
2205
2206	if (spa_load_verify_dryrun ||
2207	    (!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
2208	    sle.sle_data_count <= policy.zlp_maxdata)) {
2209		int64_t loss = 0;
2210
2211		verify_ok = B_TRUE;
2212		spa->spa_load_txg = spa->spa_uberblock.ub_txg;
2213		spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
2214
2215		loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
2216		VERIFY(nvlist_add_uint64(spa->spa_load_info,
2217		    ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
2218		VERIFY(nvlist_add_int64(spa->spa_load_info,
2219		    ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
2220		VERIFY(nvlist_add_uint64(spa->spa_load_info,
2221		    ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
2222	} else {
2223		spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
2224	}
2225
2226	if (spa_load_verify_dryrun)
2227		return (0);
2228
2229	if (error) {
2230		if (error != ENXIO && error != EIO)
2231			error = SET_ERROR(EIO);
2232		return (error);
2233	}
2234
2235	return (verify_ok ? 0 : EIO);
2236}
2237
2238/*
2239 * Find a value in the pool props object.
2240 */
2241static void
2242spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
2243{
2244	(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
2245	    zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
2246}
2247
2248/*
2249 * Find a value in the pool directory object.
2250 */
2251static int
2252spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
2253{
2254	int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
2255	    name, sizeof (uint64_t), 1, val);
2256
2257	if (error != 0 && (error != ENOENT || log_enoent)) {
2258		spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
2259		    "[error=%d]", name, error);
2260	}
2261
2262	return (error);
2263}
2264
2265static int
2266spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
2267{
2268	vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
2269	return (SET_ERROR(err));
2270}
2271
2272static void
2273spa_spawn_aux_threads(spa_t *spa)
2274{
2275	ASSERT(spa_writeable(spa));
2276
2277	ASSERT(MUTEX_HELD(&spa_namespace_lock));
2278
2279	spa_start_indirect_condensing_thread(spa);
2280
2281	ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
2282	spa->spa_checkpoint_discard_zthr =
2283	    zthr_create(spa_checkpoint_discard_thread_check,
2284	    spa_checkpoint_discard_thread, spa);
2285}
2286
2287/*
2288 * Fix up config after a partly-completed split.  This is done with the
2289 * ZPOOL_CONFIG_SPLIT nvlist.  Both the splitting pool and the split-off
2290 * pool have that entry in their config, but only the splitting one contains
2291 * a list of all the guids of the vdevs that are being split off.
2292 *
2293 * This function determines what to do with that list: either rejoin
2294 * all the disks to the pool, or complete the splitting process.  To attempt
2295 * the rejoin, each disk that is offlined is marked online again, and
2296 * we do a reopen() call.  If the vdev label for every disk that was
2297 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
2298 * then we call vdev_split() on each disk, and complete the split.
2299 *
2300 * Otherwise we leave the config alone, with all the vdevs in place in
2301 * the original pool.
2302 */
2303static void
2304spa_try_repair(spa_t *spa, nvlist_t *config)
2305{
2306	uint_t extracted;
2307	uint64_t *glist;
2308	uint_t i, gcount;
2309	nvlist_t *nvl;
2310	vdev_t **vd;
2311	boolean_t attempt_reopen;
2312
2313	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
2314		return;
2315
2316	/* check that the config is complete */
2317	if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
2318	    &glist, &gcount) != 0)
2319		return;
2320
2321	vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
2322
2323	/* attempt to online all the vdevs & validate */
2324	attempt_reopen = B_TRUE;
2325	for (i = 0; i < gcount; i++) {
2326		if (glist[i] == 0)	/* vdev is hole */
2327			continue;
2328
2329		vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
2330		if (vd[i] == NULL) {
2331			/*
2332			 * Don't bother attempting to reopen the disks;
2333			 * just do the split.
2334			 */
2335			attempt_reopen = B_FALSE;
2336		} else {
2337			/* attempt to re-online it */
2338			vd[i]->vdev_offline = B_FALSE;
2339		}
2340	}
2341
2342	if (attempt_reopen) {
2343		vdev_reopen(spa->spa_root_vdev);
2344
2345		/* check each device to see what state it's in */
2346		for (extracted = 0, i = 0; i < gcount; i++) {
2347			if (vd[i] != NULL &&
2348			    vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
2349				break;
2350			++extracted;
2351		}
2352	}
2353
2354	/*
2355	 * If every disk has been moved to the new pool, or if we never
2356	 * even attempted to look at them, then we split them off for
2357	 * good.
2358	 */
2359	if (!attempt_reopen || gcount == extracted) {
2360		for (i = 0; i < gcount; i++)
2361			if (vd[i] != NULL)
2362				vdev_split(vd[i]);
2363		vdev_reopen(spa->spa_root_vdev);
2364	}
2365
2366	kmem_free(vd, gcount * sizeof (vdev_t *));
2367}
2368
2369static int
2370spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
2371{
2372	char *ereport = FM_EREPORT_ZFS_POOL;
2373	int error;
2374
2375	spa->spa_load_state = state;
2376	(void) spa_import_progress_set_state(spa, spa_load_state(spa));
2377
2378	gethrestime(&spa->spa_loaded_ts);
2379	error = spa_load_impl(spa, type, &ereport);
2380
2381	/*
2382	 * Don't count references from objsets that are already closed
2383	 * and are making their way through the eviction process.
2384	 */
2385	spa_evicting_os_wait(spa);
2386	spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
2387	if (error) {
2388		if (error != EEXIST) {
2389			spa->spa_loaded_ts.tv_sec = 0;
2390			spa->spa_loaded_ts.tv_nsec = 0;
2391		}
2392		if (error != EBADF) {
2393			zfs_ereport_post(ereport, spa, NULL, NULL, NULL, 0, 0);
2394		}
2395	}
2396	spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
2397	spa->spa_ena = 0;
2398
2399	(void) spa_import_progress_set_state(spa, spa_load_state(spa));
2400
2401	return (error);
2402}
2403
2404/*
2405 * Count the number of per-vdev ZAPs associated with all of the vdevs in the
2406 * vdev tree rooted in the given vd, and ensure that each ZAP is present in the
2407 * spa's per-vdev ZAP list.
2408 */
2409static uint64_t
2410vdev_count_verify_zaps(vdev_t *vd)
2411{
2412	spa_t *spa = vd->vdev_spa;
2413	uint64_t total = 0;
2414	if (vd->vdev_top_zap != 0) {
2415		total++;
2416		ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2417		    spa->spa_all_vdev_zaps, vd->vdev_top_zap));
2418	}
2419	if (vd->vdev_leaf_zap != 0) {
2420		total++;
2421		ASSERT0(zap_lookup_int(spa->spa_meta_objset,
2422		    spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
2423	}
2424
2425	for (uint64_t i = 0; i < vd->vdev_children; i++) {
2426		total += vdev_count_verify_zaps(vd->vdev_child[i]);
2427	}
2428
2429	return (total);
2430}
2431
2432/*
2433 * Determine whether the activity check is required.
2434 */
2435static boolean_t
2436spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
2437    nvlist_t *config)
2438{
2439	uint64_t state = 0;
2440	uint64_t hostid = 0;
2441	uint64_t tryconfig_txg = 0;
2442	uint64_t tryconfig_timestamp = 0;
2443	uint16_t tryconfig_mmp_seq = 0;
2444	nvlist_t *nvinfo;
2445
2446	if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
2447		nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
2448		(void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
2449		    &tryconfig_txg);
2450		(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
2451		    &tryconfig_timestamp);
2452		(void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ,
2453		    &tryconfig_mmp_seq);
2454	}
2455
2456	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
2457
2458	/*
2459	 * Disable the MMP activity check - This is used by zdb which
2460	 * is intended to be used on potentially active pools.
2461	 */
2462	if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
2463		return (B_FALSE);
2464
2465	/*
2466	 * Skip the activity check when the MMP feature is disabled.
2467	 */
2468	if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
2469		return (B_FALSE);
2470
2471	/*
2472	 * If the tryconfig_ values are nonzero, they are the results of an
2473	 * earlier tryimport.  If they all match the uberblock we just found,
2474	 * then the pool has not changed and we return false so we do not test
2475	 * a second time.
2476	 */
2477	if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
2478	    tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp &&
2479	    tryconfig_mmp_seq && tryconfig_mmp_seq ==
2480	    (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0))
2481		return (B_FALSE);
2482
2483	/*
2484	 * Allow the activity check to be skipped when importing the pool
2485	 * on the same host which last imported it.  Since the hostid from
2486	 * configuration may be stale use the one read from the label.
2487	 */
2488	if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
2489		hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
2490
2491	if (hostid == spa_get_hostid())
2492		return (B_FALSE);
2493
2494	/*
2495	 * Skip the activity test when the pool was cleanly exported.
2496	 */
2497	if (state != POOL_STATE_ACTIVE)
2498		return (B_FALSE);
2499
2500	return (B_TRUE);
2501}
2502
2503/*
2504 * Nanoseconds the activity check must watch for changes on-disk.
2505 */
2506static uint64_t
2507spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
2508{
2509	uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
2510	uint64_t multihost_interval = MSEC2NSEC(
2511	    MMP_INTERVAL_OK(zfs_multihost_interval));
2512	uint64_t import_delay = MAX(NANOSEC, import_intervals *
2513	    multihost_interval);
2514
2515	/*
2516	 * Local tunables determine a minimum duration except for the case
2517	 * where we know when the remote host will suspend the pool if MMP
2518	 * writes do not land.
2519	 *
2520	 * See Big Theory comment at the top of mmp.c for the reasoning behind
2521	 * these cases and times.
2522	 */
2523
2524	ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100);
2525
2526	if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
2527	    MMP_FAIL_INT(ub) > 0) {
2528
2529		/* MMP on remote host will suspend pool after failed writes */
2530		import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) *
2531		    MMP_IMPORT_SAFETY_FACTOR / 100;
2532
2533		zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp "
2534		    "mmp_fails=%llu ub_mmp mmp_interval=%llu "
2535		    "import_intervals=%u", import_delay, MMP_FAIL_INT(ub),
2536		    MMP_INTERVAL(ub), import_intervals);
2537
2538	} else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
2539	    MMP_FAIL_INT(ub) == 0) {
2540
2541		/* MMP on remote host will never suspend pool */
2542		import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) +
2543		    ub->ub_mmp_delay) * import_intervals);
2544
2545		zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp "
2546		    "mmp_interval=%llu ub_mmp_delay=%llu "
2547		    "import_intervals=%u", import_delay, MMP_INTERVAL(ub),
2548		    ub->ub_mmp_delay, import_intervals);
2549
2550	} else if (MMP_VALID(ub)) {
2551		/*
2552		 * zfs-0.7 compatability case
2553		 */
2554
2555		import_delay = MAX(import_delay, (multihost_interval +
2556		    ub->ub_mmp_delay) * import_intervals);
2557
2558		zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu "
2559		    "import_intervals=%u leaves=%u", import_delay,
2560		    ub->ub_mmp_delay, import_intervals,
2561		    vdev_count_leaves(spa));
2562	} else {
2563		/* Using local tunings is the only reasonable option */
2564		zfs_dbgmsg("pool last imported on non-MMP aware "
2565		    "host using import_delay=%llu multihost_interval=%llu "
2566		    "import_intervals=%u", import_delay, multihost_interval,
2567		    import_intervals);
2568	}
2569
2570	return (import_delay);
2571}
2572
2573/*
2574 * Perform the import activity check.  If the user canceled the import or
2575 * we detected activity then fail.
2576 */
2577static int
2578spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
2579{
2580	uint64_t txg = ub->ub_txg;
2581	uint64_t timestamp = ub->ub_timestamp;
2582	uint64_t mmp_config = ub->ub_mmp_config;
2583	uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0;
2584	uint64_t import_delay;
2585	hrtime_t import_expire;
2586	nvlist_t *mmp_label = NULL;
2587	vdev_t *rvd = spa->spa_root_vdev;
2588	kcondvar_t cv;
2589	kmutex_t mtx;
2590	int error = 0;
2591
2592	cv_init(&cv, NULL, CV_DEFAULT, NULL);
2593	mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
2594	mutex_enter(&mtx);
2595
2596	/*
2597	 * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
2598	 * during the earlier tryimport.  If the txg recorded there is 0 then
2599	 * the pool is known to be active on another host.
2600	 *
2601	 * Otherwise, the pool might be in use on another host.  Check for
2602	 * changes in the uberblocks on disk if necessary.
2603	 */
2604	if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
2605		nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
2606		    ZPOOL_CONFIG_LOAD_INFO);
2607
2608		if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
2609		    fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
2610			vdev_uberblock_load(rvd, ub, &mmp_label);
2611			error = SET_ERROR(EREMOTEIO);
2612			goto out;
2613		}
2614	}
2615
2616	import_delay = spa_activity_check_duration(spa, ub);
2617
2618	/* Add a small random factor in case of simultaneous imports (0-25%) */
2619	import_delay += import_delay * spa_get_random(250) / 1000;
2620
2621	import_expire = gethrtime() + import_delay;
2622
2623	while (gethrtime() < import_expire) {
2624		(void) spa_import_progress_set_mmp_check(spa,
2625		    NSEC2SEC(import_expire - gethrtime()));
2626
2627		vdev_uberblock_load(rvd, ub, &mmp_label);
2628
2629		if (txg != ub->ub_txg || timestamp != ub->ub_timestamp ||
2630		    mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) {
2631			zfs_dbgmsg("multihost activity detected "
2632			    "txg %llu ub_txg  %llu "
2633			    "timestamp %llu ub_timestamp  %llu "
2634			    "mmp_config %#llx ub_mmp_config %#llx",
2635			    txg, ub->ub_txg, timestamp, ub->ub_timestamp,
2636			    mmp_config, ub->ub_mmp_config);
2637
2638			error = SET_ERROR(EREMOTEIO);
2639			break;
2640		}
2641
2642		if (mmp_label) {
2643			nvlist_free(mmp_label);
2644			mmp_label = NULL;
2645		}
2646
2647		error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
2648		if (error != -1) {
2649			error = SET_ERROR(EINTR);
2650			break;
2651		}
2652		error = 0;
2653	}
2654
2655out:
2656	mutex_exit(&mtx);
2657	mutex_destroy(&mtx);
2658	cv_destroy(&cv);
2659
2660	/*
2661	 * If the pool is determined to be active store the status in the
2662	 * spa->spa_load_info nvlist.  If the remote hostname or hostid are
2663	 * available from configuration read from disk store them as well.
2664	 * This allows 'zpool import' to generate a more useful message.
2665	 *
2666	 * ZPOOL_CONFIG_MMP_STATE    - observed pool status (mandatory)
2667	 * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
2668	 * ZPOOL_CONFIG_MMP_HOSTID   - hostid from the active pool
2669	 */
2670	if (error == EREMOTEIO) {
2671		char *hostname = "<unknown>";
2672		uint64_t hostid = 0;
2673
2674		if (mmp_label) {
2675			if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
2676				hostname = fnvlist_lookup_string(mmp_label,
2677				    ZPOOL_CONFIG_HOSTNAME);
2678				fnvlist_add_string(spa->spa_load_info,
2679				    ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
2680			}
2681
2682			if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
2683				hostid = fnvlist_lookup_uint64(mmp_label,
2684				    ZPOOL_CONFIG_HOSTID);
2685				fnvlist_add_uint64(spa->spa_load_info,
2686				    ZPOOL_CONFIG_MMP_HOSTID, hostid);
2687			}
2688		}
2689
2690		fnvlist_add_uint64(spa->spa_load_info,
2691		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
2692		fnvlist_add_uint64(spa->spa_load_info,
2693		    ZPOOL_CONFIG_MMP_TXG, 0);
2694
2695		error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
2696	}
2697
2698	if (mmp_label)
2699		nvlist_free(mmp_label);
2700
2701	return (error);
2702}
2703
2704static int
2705spa_verify_host(spa_t *spa, nvlist_t *mos_config)
2706{
2707	uint64_t hostid;
2708	char *hostname;
2709	uint64_t myhostid = 0;
2710
2711	if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
2712	    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
2713		hostname = fnvlist_lookup_string(mos_config,
2714		    ZPOOL_CONFIG_HOSTNAME);
2715
2716		myhostid = zone_get_hostid(NULL);
2717
2718		if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
2719			cmn_err(CE_WARN, "pool '%s' could not be "
2720			    "loaded as it was last accessed by "
2721			    "another system (host: %s hostid: 0x%llx). "
2722			    "See: http://illumos.org/msg/ZFS-8000-EY",
2723			    spa_name(spa), hostname, (u_longlong_t)hostid);
2724			spa_load_failed(spa, "hostid verification failed: pool "
2725			    "last accessed by host: %s (hostid: 0x%llx)",
2726			    hostname, (u_longlong_t)hostid);
2727			return (SET_ERROR(EBADF));
2728		}
2729	}
2730
2731	return (0);
2732}
2733
2734static int
2735spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
2736{
2737	int error = 0;
2738	nvlist_t *nvtree, *nvl, *config = spa->spa_config;
2739	int parse;
2740	vdev_t *rvd;
2741	uint64_t pool_guid;
2742	char *comment;
2743
2744	/*
2745	 * Versioning wasn't explicitly added to the label until later, so if
2746	 * it's not present treat it as the initial version.
2747	 */
2748	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
2749	    &spa->spa_ubsync.ub_version) != 0)
2750		spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
2751
2752	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
2753		spa_load_failed(spa, "invalid config provided: '%s' missing",
2754		    ZPOOL_CONFIG_POOL_GUID);
2755		return (SET_ERROR(EINVAL));
2756	}
2757
2758	/*
2759	 * If we are doing an import, ensure that the pool is not already
2760	 * imported by checking if its pool guid already exists in the
2761	 * spa namespace.
2762	 *
2763	 * The only case that we allow an already imported pool to be
2764	 * imported again, is when the pool is checkpointed and we want to
2765	 * look at its checkpointed state from userland tools like zdb.
2766	 */
2767#ifdef _KERNEL
2768	if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
2769	    spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
2770	    spa_guid_exists(pool_guid, 0)) {
2771#else
2772	if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
2773	    spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
2774	    spa_guid_exists(pool_guid, 0) &&
2775	    !spa_importing_readonly_checkpoint(spa)) {
2776#endif
2777		spa_load_failed(spa, "a pool with guid %llu is already open",
2778		    (u_longlong_t)pool_guid);
2779		return (SET_ERROR(EEXIST));
2780	}
2781
2782	spa->spa_config_guid = pool_guid;
2783
2784	nvlist_free(spa->spa_load_info);
2785	spa->spa_load_info = fnvlist_alloc();
2786
2787	ASSERT(spa->spa_comment == NULL);
2788	if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
2789		spa->spa_comment = spa_strdup(comment);
2790
2791	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
2792	    &spa->spa_config_txg);
2793
2794	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
2795		spa->spa_config_splitting = fnvlist_dup(nvl);
2796
2797	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
2798		spa_load_failed(spa, "invalid config provided: '%s' missing",
2799		    ZPOOL_CONFIG_VDEV_TREE);
2800		return (SET_ERROR(EINVAL));
2801	}
2802
2803	/*
2804	 * Create "The Godfather" zio to hold all async IOs
2805	 */
2806	spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
2807	    KM_SLEEP);
2808	for (int i = 0; i < max_ncpus; i++) {
2809		spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
2810		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2811		    ZIO_FLAG_GODFATHER);
2812	}
2813
2814	/*
2815	 * Parse the configuration into a vdev tree.  We explicitly set the
2816	 * value that will be returned by spa_version() since parsing the
2817	 * configuration requires knowing the version number.
2818	 */
2819	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2820	parse = (type == SPA_IMPORT_EXISTING ?
2821	    VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
2822	error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
2823	spa_config_exit(spa, SCL_ALL, FTAG);
2824
2825	if (error != 0) {
2826		spa_load_failed(spa, "unable to parse config [error=%d]",
2827		    error);
2828		return (error);
2829	}
2830
2831	ASSERT(spa->spa_root_vdev == rvd);
2832	ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
2833	ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
2834
2835	if (type != SPA_IMPORT_ASSEMBLE) {
2836		ASSERT(spa_guid(spa) == pool_guid);
2837	}
2838
2839	return (0);
2840}
2841
2842/*
2843 * Recursively open all vdevs in the vdev tree. This function is called twice:
2844 * first with the untrusted config, then with the trusted config.
2845 */
2846static int
2847spa_ld_open_vdevs(spa_t *spa)
2848{
2849	int error = 0;
2850
2851	/*
2852	 * spa_missing_tvds_allowed defines how many top-level vdevs can be
2853	 * missing/unopenable for the root vdev to be still considered openable.
2854	 */
2855	if (spa->spa_trust_config) {
2856		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
2857	} else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
2858		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
2859	} else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
2860		spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
2861	} else {
2862		spa->spa_missing_tvds_allowed = 0;
2863	}
2864
2865	spa->spa_missing_tvds_allowed =
2866	    MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
2867
2868	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2869	error = vdev_open(spa->spa_root_vdev);
2870	spa_config_exit(spa, SCL_ALL, FTAG);
2871
2872	if (spa->spa_missing_tvds != 0) {
2873		spa_load_note(spa, "vdev tree has %lld missing top-level "
2874		    "vdevs.", (u_longlong_t)spa->spa_missing_tvds);
2875		if (spa->spa_trust_config && (spa->spa_mode & FWRITE)) {
2876			/*
2877			 * Although theoretically we could allow users to open
2878			 * incomplete pools in RW mode, we'd need to add a lot
2879			 * of extra logic (e.g. adjust pool space to account
2880			 * for missing vdevs).
2881			 * This limitation also prevents users from accidentally
2882			 * opening the pool in RW mode during data recovery and
2883			 * damaging it further.
2884			 */
2885			spa_load_note(spa, "pools with missing top-level "
2886			    "vdevs can only be opened in read-only mode.");
2887			error = SET_ERROR(ENXIO);
2888		} else {
2889			spa_load_note(spa, "current settings allow for maximum "
2890			    "%lld missing top-level vdevs at this stage.",
2891			    (u_longlong_t)spa->spa_missing_tvds_allowed);
2892		}
2893	}
2894	if (error != 0) {
2895		spa_load_failed(spa, "unable to open vdev tree [error=%d]",
2896		    error);
2897	}
2898	if (spa->spa_missing_tvds != 0 || error != 0)
2899		vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
2900
2901	return (error);
2902}
2903
2904/*
2905 * We need to validate the vdev labels against the configuration that
2906 * we have in hand. This function is called twice: first with an untrusted
2907 * config, then with a trusted config. The validation is more strict when the
2908 * config is trusted.
2909 */
2910static int
2911spa_ld_validate_vdevs(spa_t *spa)
2912{
2913	int error = 0;
2914	vdev_t *rvd = spa->spa_root_vdev;
2915
2916	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2917	error = vdev_validate(rvd);
2918	spa_config_exit(spa, SCL_ALL, FTAG);
2919
2920	if (error != 0) {
2921		spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
2922		return (error);
2923	}
2924
2925	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
2926		spa_load_failed(spa, "cannot open vdev tree after invalidating "
2927		    "some vdevs");
2928		vdev_dbgmsg_print_tree(rvd, 2);
2929		return (SET_ERROR(ENXIO));
2930	}
2931
2932	return (0);
2933}
2934
2935static void
2936spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
2937{
2938	spa->spa_state = POOL_STATE_ACTIVE;
2939	spa->spa_ubsync = spa->spa_uberblock;
2940	spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
2941	    TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
2942	spa->spa_first_txg = spa->spa_last_ubsync_txg ?
2943	    spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
2944	spa->spa_claim_max_txg = spa->spa_first_txg;
2945	spa->spa_prev_software_version = ub->ub_software_version;
2946}
2947
2948static int
2949spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
2950{
2951	vdev_t *rvd = spa->spa_root_vdev;
2952	nvlist_t *label;
2953	uberblock_t *ub = &spa->spa_uberblock;
2954	boolean_t activity_check = B_FALSE;
2955
2956	/*
2957	 * If we are opening the checkpointed state of the pool by
2958	 * rewinding to it, at this point we will have written the
2959	 * checkpointed uberblock to the vdev labels, so searching
2960	 * the labels will find the right uberblock.  However, if
2961	 * we are opening the checkpointed state read-only, we have
2962	 * not modified the labels. Therefore, we must ignore the
2963	 * labels and continue using the spa_uberblock that was set
2964	 * by spa_ld_checkpoint_rewind.
2965	 *
2966	 * Note that it would be fine to ignore the labels when
2967	 * rewinding (opening writeable) as well. However, if we
2968	 * crash just after writing the labels, we will end up
2969	 * searching the labels. Doing so in the common case means
2970	 * that this code path gets exercised normally, rather than
2971	 * just in the edge case.
2972	 */
2973	if (ub->ub_checkpoint_txg != 0 &&
2974	    spa_importing_readonly_checkpoint(spa)) {
2975		spa_ld_select_uberblock_done(spa, ub);
2976		return (0);
2977	}
2978
2979	/*
2980	 * Find the best uberblock.
2981	 */
2982	vdev_uberblock_load(rvd, ub, &label);
2983
2984	/*
2985	 * If we weren't able to find a single valid uberblock, return failure.
2986	 */
2987	if (ub->ub_txg == 0) {
2988		nvlist_free(label);
2989		spa_load_failed(spa, "no valid uberblock found");
2990		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
2991	}
2992
2993	if (spa->spa_load_max_txg != UINT64_MAX) {
2994		(void) spa_import_progress_set_max_txg(spa,
2995		    (u_longlong_t)spa->spa_load_max_txg);
2996	}
2997	spa_load_note(spa, "using uberblock with txg=%llu",
2998	    (u_longlong_t)ub->ub_txg);
2999
3000	/*
3001	 * For pools which have the multihost property on determine if the
3002	 * pool is truly inactive and can be safely imported.  Prevent
3003	 * hosts which don't have a hostid set from importing the pool.
3004	 */
3005	activity_check = spa_activity_check_required(spa, ub, label,
3006	    spa->spa_config);
3007	if (activity_check) {
3008		if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
3009		    spa_get_hostid() == 0) {
3010			nvlist_free(label);
3011			fnvlist_add_uint64(spa->spa_load_info,
3012			    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
3013			return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
3014		}
3015
3016		int error = spa_activity_check(spa, ub, spa->spa_config);
3017		if (error) {
3018			nvlist_free(label);
3019			return (error);
3020		}
3021
3022		fnvlist_add_uint64(spa->spa_load_info,
3023		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
3024		fnvlist_add_uint64(spa->spa_load_info,
3025		    ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
3026		fnvlist_add_uint16(spa->spa_load_info,
3027		    ZPOOL_CONFIG_MMP_SEQ,
3028		    (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0));
3029	}
3030
3031	/*
3032	 * If the pool has an unsupported version we can't open it.
3033	 */
3034	if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
3035		nvlist_free(label);
3036		spa_load_failed(spa, "version %llu is not supported",
3037		    (u_longlong_t)ub->ub_version);
3038		return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
3039	}
3040
3041	if (ub->ub_version >= SPA_VERSION_FEATURES) {
3042		nvlist_t *features;
3043
3044		/*
3045		 * If we weren't able to find what's necessary for reading the
3046		 * MOS in the label, return failure.
3047		 */
3048		if (label == NULL) {
3049			spa_load_failed(spa, "label config unavailable");
3050			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
3051			    ENXIO));
3052		}
3053
3054		if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
3055		    &features) != 0) {
3056			nvlist_free(label);
3057			spa_load_failed(spa, "invalid label: '%s' missing",
3058			    ZPOOL_CONFIG_FEATURES_FOR_READ);
3059			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
3060			    ENXIO));
3061		}
3062
3063		/*
3064		 * Update our in-core representation with the definitive values
3065		 * from the label.
3066		 */
3067		nvlist_free(spa->spa_label_features);
3068		VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
3069	}
3070
3071	nvlist_free(label);
3072
3073	/*
3074	 * Look through entries in the label nvlist's features_for_read. If
3075	 * there is a feature listed there which we don't understand then we
3076	 * cannot open a pool.
3077	 */
3078	if (ub->ub_version >= SPA_VERSION_FEATURES) {
3079		nvlist_t *unsup_feat;
3080
3081		VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
3082		    0);
3083
3084		for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
3085		    NULL); nvp != NULL;
3086		    nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
3087			if (!zfeature_is_supported(nvpair_name(nvp))) {
3088				VERIFY(nvlist_add_string(unsup_feat,
3089				    nvpair_name(nvp), "") == 0);
3090			}
3091		}
3092
3093		if (!nvlist_empty(unsup_feat)) {
3094			VERIFY(nvlist_add_nvlist(spa->spa_load_info,
3095			    ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
3096			nvlist_free(unsup_feat);
3097			spa_load_failed(spa, "some features are unsupported");
3098			return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
3099			    ENOTSUP));
3100		}
3101
3102		nvlist_free(unsup_feat);
3103	}
3104
3105	if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
3106		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3107		spa_try_repair(spa, spa->spa_config);
3108		spa_config_exit(spa, SCL_ALL, FTAG);
3109		nvlist_free(spa->spa_config_splitting);
3110		spa->spa_config_splitting = NULL;
3111	}
3112
3113	/*
3114	 * Initialize internal SPA structures.
3115	 */
3116	spa_ld_select_uberblock_done(spa, ub);
3117
3118	return (0);
3119}
3120
3121static int
3122spa_ld_open_rootbp(spa_t *spa)
3123{
3124	int error = 0;
3125	vdev_t *rvd = spa->spa_root_vdev;
3126
3127	error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
3128	if (error != 0) {
3129		spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
3130		    "[error=%d]", error);
3131		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3132	}
3133	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
3134
3135	return (0);
3136}
3137
3138static int
3139spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
3140    boolean_t reloading)
3141{
3142	vdev_t *mrvd, *rvd = spa->spa_root_vdev;
3143	nvlist_t *nv, *mos_config, *policy;
3144	int error = 0, copy_error;
3145	uint64_t healthy_tvds, healthy_tvds_mos;
3146	uint64_t mos_config_txg;
3147
3148	if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
3149	    != 0)
3150		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3151
3152	/*
3153	 * If we're assembling a pool from a split, the config provided is
3154	 * already trusted so there is nothing to do.
3155	 */
3156	if (type == SPA_IMPORT_ASSEMBLE)
3157		return (0);
3158
3159	healthy_tvds = spa_healthy_core_tvds(spa);
3160
3161	if (load_nvlist(spa, spa->spa_config_object, &mos_config)
3162	    != 0) {
3163		spa_load_failed(spa, "unable to retrieve MOS config");
3164		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3165	}
3166
3167	/*
3168	 * If we are doing an open, pool owner wasn't verified yet, thus do
3169	 * the verification here.
3170	 */
3171	if (spa->spa_load_state == SPA_LOAD_OPEN) {
3172		error = spa_verify_host(spa, mos_config);
3173		if (error != 0) {
3174			nvlist_free(mos_config);
3175			return (error);
3176		}
3177	}
3178
3179	nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
3180
3181	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3182
3183	/*
3184	 * Build a new vdev tree from the trusted config
3185	 */
3186	VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
3187
3188	/*
3189	 * Vdev paths in the MOS may be obsolete. If the untrusted config was
3190	 * obtained by scanning /dev/dsk, then it will have the right vdev
3191	 * paths. We update the trusted MOS config with this information.
3192	 * We first try to copy the paths with vdev_copy_path_strict, which
3193	 * succeeds only when both configs have exactly the same vdev tree.
3194	 * If that fails, we fall back to a more flexible method that has a
3195	 * best effort policy.
3196	 */
3197	copy_error = vdev_copy_path_strict(rvd, mrvd);
3198	if (copy_error != 0 || spa_load_print_vdev_tree) {
3199		spa_load_note(spa, "provided vdev tree:");
3200		vdev_dbgmsg_print_tree(rvd, 2);
3201		spa_load_note(spa, "MOS vdev tree:");
3202		vdev_dbgmsg_print_tree(mrvd, 2);
3203	}
3204	if (copy_error != 0) {
3205		spa_load_note(spa, "vdev_copy_path_strict failed, falling "
3206		    "back to vdev_copy_path_relaxed");
3207		vdev_copy_path_relaxed(rvd, mrvd);
3208	}
3209
3210	vdev_close(rvd);
3211	vdev_free(rvd);
3212	spa->spa_root_vdev = mrvd;
3213	rvd = mrvd;
3214	spa_config_exit(spa, SCL_ALL, FTAG);
3215
3216	/*
3217	 * We will use spa_config if we decide to reload the spa or if spa_load
3218	 * fails and we rewind. We must thus regenerate the config using the
3219	 * MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
3220	 * pass settings on how to load the pool and is not stored in the MOS.
3221	 * We copy it over to our new, trusted config.
3222	 */
3223	mos_config_txg = fnvlist_lookup_uint64(mos_config,
3224	    ZPOOL_CONFIG_POOL_TXG);
3225	nvlist_free(mos_config);
3226	mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
3227	if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
3228	    &policy) == 0)
3229		fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
3230	spa_config_set(spa, mos_config);
3231	spa->spa_config_source = SPA_CONFIG_SRC_MOS;
3232
3233	/*
3234	 * Now that we got the config from the MOS, we should be more strict
3235	 * in checking blkptrs and can make assumptions about the consistency
3236	 * of the vdev tree. spa_trust_config must be set to true before opening
3237	 * vdevs in order for them to be writeable.
3238	 */
3239	spa->spa_trust_config = B_TRUE;
3240
3241	/*
3242	 * Open and validate the new vdev tree
3243	 */
3244	error = spa_ld_open_vdevs(spa);
3245	if (error != 0)
3246		return (error);
3247
3248	error = spa_ld_validate_vdevs(spa);
3249	if (error != 0)
3250		return (error);
3251
3252	if (copy_error != 0 || spa_load_print_vdev_tree) {
3253		spa_load_note(spa, "final vdev tree:");
3254		vdev_dbgmsg_print_tree(rvd, 2);
3255	}
3256
3257	if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
3258	    !spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
3259		/*
3260		 * Sanity check to make sure that we are indeed loading the
3261		 * latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
3262		 * in the config provided and they happened to be the only ones
3263		 * to have the latest uberblock, we could involuntarily perform
3264		 * an extreme rewind.
3265		 */
3266		healthy_tvds_mos = spa_healthy_core_tvds(spa);
3267		if (healthy_tvds_mos - healthy_tvds >=
3268		    SPA_SYNC_MIN_VDEVS) {
3269			spa_load_note(spa, "config provided misses too many "
3270			    "top-level vdevs compared to MOS (%lld vs %lld). ",
3271			    (u_longlong_t)healthy_tvds,
3272			    (u_longlong_t)healthy_tvds_mos);
3273			spa_load_note(spa, "vdev tree:");
3274			vdev_dbgmsg_print_tree(rvd, 2);
3275			if (reloading) {
3276				spa_load_failed(spa, "config was already "
3277				    "provided from MOS. Aborting.");
3278				return (spa_vdev_err(rvd,
3279				    VDEV_AUX_CORRUPT_DATA, EIO));
3280			}
3281			spa_load_note(spa, "spa must be reloaded using MOS "
3282			    "config");
3283			return (SET_ERROR(EAGAIN));
3284		}
3285	}
3286
3287	error = spa_check_for_missing_logs(spa);
3288	if (error != 0)
3289		return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
3290
3291	if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
3292		spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
3293		    "guid sum (%llu != %llu)",
3294		    (u_longlong_t)spa->spa_uberblock.ub_guid_sum,
3295		    (u_longlong_t)rvd->vdev_guid_sum);
3296		return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
3297		    ENXIO));
3298	}
3299
3300	return (0);
3301}
3302
3303static int
3304spa_ld_open_indirect_vdev_metadata(spa_t *spa)
3305{
3306	int error = 0;
3307	vdev_t *rvd = spa->spa_root_vdev;
3308
3309	/*
3310	 * Everything that we read before spa_remove_init() must be stored
3311	 * on concreted vdevs.  Therefore we do this as early as possible.
3312	 */
3313	error = spa_remove_init(spa);
3314	if (error != 0) {
3315		spa_load_failed(spa, "spa_remove_init failed [error=%d]",
3316		    error);
3317		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3318	}
3319
3320	/*
3321	 * Retrieve information needed to condense indirect vdev mappings.
3322	 */
3323	error = spa_condense_init(spa);
3324	if (error != 0) {
3325		spa_load_failed(spa, "spa_condense_init failed [error=%d]",
3326		    error);
3327		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
3328	}
3329
3330	return (0);
3331}
3332
3333static int
3334spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
3335{
3336	int error = 0;
3337	vdev_t *rvd = spa->spa_root_vdev;
3338
3339	if (spa_version(spa) >= SPA_VERSION_FEATURES) {
3340		boolean_t missing_feat_read = B_FALSE;
3341		nvlist_t *unsup_feat, *enabled_feat;
3342
3343		if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
3344		    &spa->spa_feat_for_read_obj, B_TRUE) != 0) {
3345			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3346		}
3347
3348		if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
3349		    &spa->spa_feat_for_write_obj, B_TRUE) != 0) {
3350			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3351		}
3352
3353		if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
3354		    &spa->spa_feat_desc_obj, B_TRUE) != 0) {
3355			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3356		}
3357
3358		enabled_feat = fnvlist_alloc();
3359		unsup_feat = fnvlist_alloc();
3360
3361		if (!spa_features_check(spa, B_FALSE,
3362		    unsup_feat, enabled_feat))
3363			missing_feat_read = B_TRUE;
3364
3365		if (spa_writeable(spa) ||
3366		    spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
3367			if (!spa_features_check(spa, B_TRUE,
3368			    unsup_feat, enabled_feat)) {
3369				*missing_feat_writep = B_TRUE;
3370			}
3371		}
3372
3373		fnvlist_add_nvlist(spa->spa_load_info,
3374		    ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
3375
3376		if (!nvlist_empty(unsup_feat)) {
3377			fnvlist_add_nvlist(spa->spa_load_info,
3378			    ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
3379		}
3380
3381		fnvlist_free(enabled_feat);
3382		fnvlist_free(unsup_feat);
3383
3384		if (!missing_feat_read) {
3385			fnvlist_add_boolean(spa->spa_load_info,
3386			    ZPOOL_CONFIG_CAN_RDONLY);
3387		}
3388
3389		/*
3390		 * If the state is SPA_LOAD_TRYIMPORT, our objective is
3391		 * twofold: to determine whether the pool is available for
3392		 * import in read-write mode and (if it is not) whether the
3393		 * pool is available for import in read-only mode. If the pool
3394		 * is available for import in read-write mode, it is displayed
3395		 * as available in userland; if it is not available for import
3396		 * in read-only mode, it is displayed as unavailable in
3397		 * userland. If the pool is available for import in read-only
3398		 * mode but not read-write mode, it is displayed as unavailable
3399		 * in userland with a special note that the pool is actually
3400		 * available for open in read-only mode.
3401		 *
3402		 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
3403		 * missing a feature for write, we must first determine whether
3404		 * the pool can be opened read-only before returning to
3405		 * userland in order to know whether to display the
3406		 * abovementioned note.
3407		 */
3408		if (missing_feat_read || (*missing_feat_writep &&
3409		    spa_writeable(spa))) {
3410			spa_load_failed(spa, "pool uses unsupported features");
3411			return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
3412			    ENOTSUP));
3413		}
3414
3415		/*
3416		 * Load refcounts for ZFS features from disk into an in-memory
3417		 * cache during SPA initialization.
3418		 */
3419		for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
3420			uint64_t refcount;
3421
3422			error = feature_get_refcount_from_disk(spa,
3423			    &spa_feature_table[i], &refcount);
3424			if (error == 0) {
3425				spa->spa_feat_refcount_cache[i] = refcount;
3426			} else if (error == ENOTSUP) {
3427				spa->spa_feat_refcount_cache[i] =
3428				    SPA_FEATURE_DISABLED;
3429			} else {
3430				spa_load_failed(spa, "error getting refcount "
3431				    "for feature %s [error=%d]",
3432				    spa_feature_table[i].fi_guid, error);
3433				return (spa_vdev_err(rvd,
3434				    VDEV_AUX_CORRUPT_DATA, EIO));
3435			}
3436		}
3437	}
3438
3439	if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
3440		if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
3441		    &spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
3442			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3443	}
3444
3445	/*
3446	 * Encryption was added before bookmark_v2, even though bookmark_v2
3447	 * is now a dependency. If this pool has encryption enabled without
3448	 * bookmark_v2, trigger an errata message.
3449	 */
3450	if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
3451	    !spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
3452		spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
3453	}
3454
3455	return (0);
3456}
3457
3458static int
3459spa_ld_load_special_directories(spa_t *spa)
3460{
3461	int error = 0;
3462	vdev_t *rvd = spa->spa_root_vdev;
3463
3464	spa->spa_is_initializing = B_TRUE;
3465	error = dsl_pool_open(spa->spa_dsl_pool);
3466	spa->spa_is_initializing = B_FALSE;
3467	if (error != 0) {
3468		spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
3469		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3470	}
3471
3472	return (0);
3473}
3474
3475static int
3476spa_ld_get_props(spa_t *spa)
3477{
3478	int error = 0;
3479	uint64_t obj;
3480	vdev_t *rvd = spa->spa_root_vdev;
3481
3482	/* Grab the secret checksum salt from the MOS. */
3483	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3484	    DMU_POOL_CHECKSUM_SALT, 1,
3485	    sizeof (spa->spa_cksum_salt.zcs_bytes),
3486	    spa->spa_cksum_salt.zcs_bytes);
3487	if (error == ENOENT) {
3488		/* Generate a new salt for subsequent use */
3489		(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
3490		    sizeof (spa->spa_cksum_salt.zcs_bytes));
3491	} else if (error != 0) {
3492		spa_load_failed(spa, "unable to retrieve checksum salt from "
3493		    "MOS [error=%d]", error);
3494		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3495	}
3496
3497	if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
3498		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3499	error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
3500	if (error != 0) {
3501		spa_load_failed(spa, "error opening deferred-frees bpobj "
3502		    "[error=%d]", error);
3503		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3504	}
3505
3506	/*
3507	 * Load the bit that tells us to use the new accounting function
3508	 * (raid-z deflation).  If we have an older pool, this will not
3509	 * be present.
3510	 */
3511	error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
3512	if (error != 0 && error != ENOENT)
3513		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3514
3515	error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
3516	    &spa->spa_creation_version, B_FALSE);
3517	if (error != 0 && error != ENOENT)
3518		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3519
3520	/*
3521	 * Load the persistent error log.  If we have an older pool, this will
3522	 * not be present.
3523	 */
3524	error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
3525	    B_FALSE);
3526	if (error != 0 && error != ENOENT)
3527		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3528
3529	error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
3530	    &spa->spa_errlog_scrub, B_FALSE);
3531	if (error != 0 && error != ENOENT)
3532		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3533
3534	/*
3535	 * Load the history object.  If we have an older pool, this
3536	 * will not be present.
3537	 */
3538	error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
3539	if (error != 0 && error != ENOENT)
3540		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3541
3542	/*
3543	 * Load the per-vdev ZAP map. If we have an older pool, this will not
3544	 * be present; in this case, defer its creation to a later time to
3545	 * avoid dirtying the MOS this early / out of sync context. See
3546	 * spa_sync_config_object.
3547	 */
3548
3549	/* The sentinel is only available in the MOS config. */
3550	nvlist_t *mos_config;
3551	if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
3552		spa_load_failed(spa, "unable to retrieve MOS config");
3553		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3554	}
3555
3556	error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
3557	    &spa->spa_all_vdev_zaps, B_FALSE);
3558
3559	if (error == ENOENT) {
3560		VERIFY(!nvlist_exists(mos_config,
3561		    ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
3562		spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
3563		ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
3564	} else if (error != 0) {
3565		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3566	} else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
3567		/*
3568		 * An older version of ZFS overwrote the sentinel value, so
3569		 * we have orphaned per-vdev ZAPs in the MOS. Defer their
3570		 * destruction to later; see spa_sync_config_object.
3571		 */
3572		spa->spa_avz_action = AVZ_ACTION_DESTROY;
3573		/*
3574		 * We're assuming that no vdevs have had their ZAPs created
3575		 * before this. Better be sure of it.
3576		 */
3577		ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
3578	}
3579	nvlist_free(mos_config);
3580
3581	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3582
3583	error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
3584	    B_FALSE);
3585	if (error && error != ENOENT)
3586		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3587
3588	if (error == 0) {
3589		uint64_t autoreplace;
3590
3591		spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
3592		spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
3593		spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
3594		spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
3595		spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
3596		spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
3597		spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
3598		    &spa->spa_dedup_ditto);
3599		spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
3600		spa->spa_autoreplace = (autoreplace != 0);
3601	}
3602
3603	/*
3604	 * If we are importing a pool with missing top-level vdevs,
3605	 * we enforce that the pool doesn't panic or get suspended on
3606	 * error since the likelihood of missing data is extremely high.
3607	 */
3608	if (spa->spa_missing_tvds > 0 &&
3609	    spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
3610	    spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
3611		spa_load_note(spa, "forcing failmode to 'continue' "
3612		    "as some top level vdevs are missing");
3613		spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
3614	}
3615
3616	return (0);
3617}
3618
3619static int
3620spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
3621{
3622	int error = 0;
3623	vdev_t *rvd = spa->spa_root_vdev;
3624
3625	/*
3626	 * If we're assembling the pool from the split-off vdevs of
3627	 * an existing pool, we don't want to attach the spares & cache
3628	 * devices.
3629	 */
3630
3631	/*
3632	 * Load any hot spares for this pool.
3633	 */
3634	error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
3635	    B_FALSE);
3636	if (error != 0 && error != ENOENT)
3637		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3638	if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
3639		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
3640		if (load_nvlist(spa, spa->spa_spares.sav_object,
3641		    &spa->spa_spares.sav_config) != 0) {
3642			spa_load_failed(spa, "error loading spares nvlist");
3643			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3644		}
3645
3646		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3647		spa_load_spares(spa);
3648		spa_config_exit(spa, SCL_ALL, FTAG);
3649	} else if (error == 0) {
3650		spa->spa_spares.sav_sync = B_TRUE;
3651	}
3652
3653	/*
3654	 * Load any level 2 ARC devices for this pool.
3655	 */
3656	error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
3657	    &spa->spa_l2cache.sav_object, B_FALSE);
3658	if (error != 0 && error != ENOENT)
3659		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3660	if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
3661		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
3662		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
3663		    &spa->spa_l2cache.sav_config) != 0) {
3664			spa_load_failed(spa, "error loading l2cache nvlist");
3665			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3666		}
3667
3668		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3669		spa_load_l2cache(spa);
3670		spa_config_exit(spa, SCL_ALL, FTAG);
3671	} else if (error == 0) {
3672		spa->spa_l2cache.sav_sync = B_TRUE;
3673	}
3674
3675	return (0);
3676}
3677
3678static int
3679spa_ld_load_vdev_metadata(spa_t *spa)
3680{
3681	int error = 0;
3682	vdev_t *rvd = spa->spa_root_vdev;
3683
3684	/*
3685	 * If the 'multihost' property is set, then never allow a pool to
3686	 * be imported when the system hostid is zero.  The exception to
3687	 * this rule is zdb which is always allowed to access pools.
3688	 */
3689	if (spa_multihost(spa) && spa_get_hostid() == 0 &&
3690	    (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
3691		fnvlist_add_uint64(spa->spa_load_info,
3692		    ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
3693		return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
3694	}
3695
3696	/*
3697	 * If the 'autoreplace' property is set, then post a resource notifying
3698	 * the ZFS DE that it should not issue any faults for unopenable
3699	 * devices.  We also iterate over the vdevs, and post a sysevent for any
3700	 * unopenable vdevs so that the normal autoreplace handler can take
3701	 * over.
3702	 */
3703	if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
3704		spa_check_removed(spa->spa_root_vdev);
3705		/*
3706		 * For the import case, this is done in spa_import(), because
3707		 * at this point we're using the spare definitions from
3708		 * the MOS config, not necessarily from the userland config.
3709		 */
3710		if (spa->spa_load_state != SPA_LOAD_IMPORT) {
3711			spa_aux_check_removed(&spa->spa_spares);
3712			spa_aux_check_removed(&spa->spa_l2cache);
3713		}
3714	}
3715
3716	/*
3717	 * Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
3718	 */
3719	error = vdev_load(rvd);
3720	if (error != 0) {
3721		spa_load_failed(spa, "vdev_load failed [error=%d]", error);
3722		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
3723	}
3724
3725	error = spa_ld_log_spacemaps(spa);
3726	if (error != 0) {
3727		spa_load_failed(spa, "spa_ld_log_sm_data failed [error=%d]",
3728		    error);
3729		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
3730	}
3731
3732	/*
3733	 * Propagate the leaf DTLs we just loaded all the way up the vdev tree.
3734	 */
3735	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3736	vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
3737	spa_config_exit(spa, SCL_ALL, FTAG);
3738
3739	return (0);
3740}
3741
3742static int
3743spa_ld_load_dedup_tables(spa_t *spa)
3744{
3745	int error = 0;
3746	vdev_t *rvd = spa->spa_root_vdev;
3747
3748	error = ddt_load(spa);
3749	if (error != 0) {
3750		spa_load_failed(spa, "ddt_load failed [error=%d]", error);
3751		return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
3752	}
3753
3754	return (0);
3755}
3756
3757static int
3758spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport)
3759{
3760	vdev_t *rvd = spa->spa_root_vdev;
3761
3762	if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
3763		boolean_t missing = spa_check_logs(spa);
3764		if (missing) {
3765			if (spa->spa_missing_tvds != 0) {
3766				spa_load_note(spa, "spa_check_logs failed "
3767				    "so dropping the logs");
3768			} else {
3769				*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
3770				spa_load_failed(spa, "spa_check_logs failed");
3771				return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
3772				    ENXIO));
3773			}
3774		}
3775	}
3776
3777	return (0);
3778}
3779
3780static int
3781spa_ld_verify_pool_data(spa_t *spa)
3782{
3783	int error = 0;
3784	vdev_t *rvd = spa->spa_root_vdev;
3785
3786	/*
3787	 * We've successfully opened the pool, verify that we're ready
3788	 * to start pushing transactions.
3789	 */
3790	if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
3791		error = spa_load_verify(spa);
3792		if (error != 0) {
3793			spa_load_failed(spa, "spa_load_verify failed "
3794			    "[error=%d]", error);
3795			return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
3796			    error));
3797		}
3798	}
3799
3800	return (0);
3801}
3802
3803static void
3804spa_ld_claim_log_blocks(spa_t *spa)
3805{
3806	dmu_tx_t *tx;
3807	dsl_pool_t *dp = spa_get_dsl(spa);
3808
3809	/*
3810	 * Claim log blocks that haven't been committed yet.
3811	 * This must all happen in a single txg.
3812	 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
3813	 * invoked from zil_claim_log_block()'s i/o done callback.
3814	 * Price of rollback is that we abandon the log.
3815	 */
3816	spa->spa_claiming = B_TRUE;
3817
3818	tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
3819	(void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
3820	    zil_claim, tx, DS_FIND_CHILDREN);
3821	dmu_tx_commit(tx);
3822
3823	spa->spa_claiming = B_FALSE;
3824
3825	spa_set_log_state(spa, SPA_LOG_GOOD);
3826}
3827
3828static void
3829spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
3830    boolean_t update_config_cache)
3831{
3832	vdev_t *rvd = spa->spa_root_vdev;
3833	int need_update = B_FALSE;
3834
3835	/*
3836	 * If the config cache is stale, or we have uninitialized
3837	 * metaslabs (see spa_vdev_add()), then update the config.
3838	 *
3839	 * If this is a verbatim import, trust the current
3840	 * in-core spa_config and update the disk labels.
3841	 */
3842	if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
3843	    spa->spa_load_state == SPA_LOAD_IMPORT ||
3844	    spa->spa_load_state == SPA_LOAD_RECOVER ||
3845	    (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
3846		need_update = B_TRUE;
3847
3848	for (int c = 0; c < rvd->vdev_children; c++)
3849		if (rvd->vdev_child[c]->vdev_ms_array == 0)
3850			need_update = B_TRUE;
3851
3852	/*
3853	 * Update the config cache asychronously in case we're the
3854	 * root pool, in which case the config cache isn't writable yet.
3855	 */
3856	if (need_update)
3857		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
3858}
3859
3860static void
3861spa_ld_prepare_for_reload(spa_t *spa)
3862{
3863	int mode = spa->spa_mode;
3864	int async_suspended = spa->spa_async_suspended;
3865
3866	spa_unload(spa);
3867	spa_deactivate(spa);
3868	spa_activate(spa, mode);
3869
3870	/*
3871	 * We save the value of spa_async_suspended as it gets reset to 0 by
3872	 * spa_unload(). We want to restore it back to the original value before
3873	 * returning as we might be calling spa_async_resume() later.
3874	 */
3875	spa->spa_async_suspended = async_suspended;
3876}
3877
3878static int
3879spa_ld_read_checkpoint_txg(spa_t *spa)
3880{
3881	uberblock_t checkpoint;
3882	int error = 0;
3883
3884	ASSERT0(spa->spa_checkpoint_txg);
3885	ASSERT(MUTEX_HELD(&spa_namespace_lock));
3886
3887	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3888	    DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
3889	    sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
3890
3891	if (error == ENOENT)
3892		return (0);
3893
3894	if (error != 0)
3895		return (error);
3896
3897	ASSERT3U(checkpoint.ub_txg, !=, 0);
3898	ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
3899	ASSERT3U(checkpoint.ub_timestamp, !=, 0);
3900	spa->spa_checkpoint_txg = checkpoint.ub_txg;
3901	spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
3902
3903	return (0);
3904}
3905
3906static int
3907spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
3908{
3909	int error = 0;
3910
3911	ASSERT(MUTEX_HELD(&spa_namespace_lock));
3912	ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
3913
3914	/*
3915	 * Never trust the config that is provided unless we are assembling
3916	 * a pool following a split.
3917	 * This means don't trust blkptrs and the vdev tree in general. This
3918	 * also effectively puts the spa in read-only mode since
3919	 * spa_writeable() checks for spa_trust_config to be true.
3920	 * We will later load a trusted config from the MOS.
3921	 */
3922	if (type != SPA_IMPORT_ASSEMBLE)
3923		spa->spa_trust_config = B_FALSE;
3924
3925	/*
3926	 * Parse the config provided to create a vdev tree.
3927	 */
3928	error = spa_ld_parse_config(spa, type);
3929	if (error != 0)
3930		return (error);
3931
3932	spa_import_progress_add(spa);
3933
3934	/*
3935	 * Now that we have the vdev tree, try to open each vdev. This involves
3936	 * opening the underlying physical device, retrieving its geometry and
3937	 * probing the vdev with a dummy I/O. The state of each vdev will be set
3938	 * based on the success of those operations. After this we'll be ready
3939	 * to read from the vdevs.
3940	 */
3941	error = spa_ld_open_vdevs(spa);
3942	if (error != 0)
3943		return (error);
3944
3945	/*
3946	 * Read the label of each vdev and make sure that the GUIDs stored
3947	 * there match the GUIDs in the config provided.
3948	 * If we're assembling a new pool that's been split off from an
3949	 * existing pool, the labels haven't yet been updated so we skip
3950	 * validation for now.
3951	 */
3952	if (type != SPA_IMPORT_ASSEMBLE) {
3953		error = spa_ld_validate_vdevs(spa);
3954		if (error != 0)
3955			return (error);
3956	}
3957
3958	/*
3959	 * Read all vdev labels to find the best uberblock (i.e. latest,
3960	 * unless spa_load_max_txg is set) and store it in spa_uberblock. We
3961	 * get the list of features required to read blkptrs in the MOS from
3962	 * the vdev label with the best uberblock and verify that our version
3963	 * of zfs supports them all.
3964	 */
3965	error = spa_ld_select_uberblock(spa, type);
3966	if (error != 0)
3967		return (error);
3968
3969	/*
3970	 * Pass that uberblock to the dsl_pool layer which will open the root
3971	 * blkptr. This blkptr points to the latest version of the MOS and will
3972	 * allow us to read its contents.
3973	 */
3974	error = spa_ld_open_rootbp(spa);
3975	if (error != 0)
3976		return (error);
3977
3978	return (0);
3979}
3980
3981static int
3982spa_ld_checkpoint_rewind(spa_t *spa)
3983{
3984	uberblock_t checkpoint;
3985	int error = 0;
3986
3987	ASSERT(MUTEX_HELD(&spa_namespace_lock));
3988	ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
3989
3990	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3991	    DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
3992	    sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
3993
3994	if (error != 0) {
3995		spa_load_failed(spa, "unable to retrieve checkpointed "
3996		    "uberblock from the MOS config [error=%d]", error);
3997
3998		if (error == ENOENT)
3999			error = ZFS_ERR_NO_CHECKPOINT;
4000
4001		return (error);
4002	}
4003
4004	ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
4005	ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
4006
4007	/*
4008	 * We need to update the txg and timestamp of the checkpointed
4009	 * uberblock to be higher than the latest one. This ensures that
4010	 * the checkpointed uberblock is selected if we were to close and
4011	 * reopen the pool right after we've written it in the vdev labels.
4012	 * (also see block comment in vdev_uberblock_compare)
4013	 */
4014	checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
4015	checkpoint.ub_timestamp = gethrestime_sec();
4016
4017	/*
4018	 * Set current uberblock to be the checkpointed uberblock.
4019	 */
4020	spa->spa_uberblock = checkpoint;
4021
4022	/*
4023	 * If we are doing a normal rewind, then the pool is open for
4024	 * writing and we sync the "updated" checkpointed uberblock to
4025	 * disk. Once this is done, we've basically rewound the whole
4026	 * pool and there is no way back.
4027	 *
4028	 * There are cases when we don't want to attempt and sync the
4029	 * checkpointed uberblock to disk because we are opening a
4030	 * pool as read-only. Specifically, verifying the checkpointed
4031	 * state with zdb, and importing the checkpointed state to get
4032	 * a "preview" of its content.
4033	 */
4034	if (spa_writeable(spa)) {
4035		vdev_t *rvd = spa->spa_root_vdev;
4036
4037		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4038		vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
4039		int svdcount = 0;
4040		int children = rvd->vdev_children;
4041		int c0 = spa_get_random(children);
4042
4043		for (int c = 0; c < children; c++) {
4044			vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
4045
4046			/* Stop when revisiting the first vdev */
4047			if (c > 0 && svd[0] == vd)
4048				break;
4049
4050			if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
4051			    !vdev_is_concrete(vd))
4052				continue;
4053
4054			svd[svdcount++] = vd;
4055			if (svdcount == SPA_SYNC_MIN_VDEVS)
4056				break;
4057		}
4058		error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
4059		if (error == 0)
4060			spa->spa_last_synced_guid = rvd->vdev_guid;
4061		spa_config_exit(spa, SCL_ALL, FTAG);
4062
4063		if (error != 0) {
4064			spa_load_failed(spa, "failed to write checkpointed "
4065			    "uberblock to the vdev labels [error=%d]", error);
4066			return (error);
4067		}
4068	}
4069
4070	return (0);
4071}
4072
4073static int
4074spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
4075    boolean_t *update_config_cache)
4076{
4077	int error;
4078
4079	/*
4080	 * Parse the config for pool, open and validate vdevs,
4081	 * select an uberblock, and use that uberblock to open
4082	 * the MOS.
4083	 */
4084	error = spa_ld_mos_init(spa, type);
4085	if (error != 0)
4086		return (error);
4087
4088	/*
4089	 * Retrieve the trusted config stored in the MOS and use it to create
4090	 * a new, exact version of the vdev tree, then reopen all vdevs.
4091	 */
4092	error = spa_ld_trusted_config(spa, type, B_FALSE);
4093	if (error == EAGAIN) {
4094		if (update_config_cache != NULL)
4095			*update_config_cache = B_TRUE;
4096
4097		/*
4098		 * Redo the loading process with the trusted config if it is
4099		 * too different from the untrusted config.
4100		 */
4101		spa_ld_prepare_for_reload(spa);
4102		spa_load_note(spa, "RELOADING");
4103		error = spa_ld_mos_init(spa, type);
4104		if (error != 0)
4105			return (error);
4106
4107		error = spa_ld_trusted_config(spa, type, B_TRUE);
4108		if (error != 0)
4109			return (error);
4110
4111	} else if (error != 0) {
4112		return (error);
4113	}
4114
4115	return (0);
4116}
4117
4118/*
4119 * Load an existing storage pool, using the config provided. This config
4120 * describes which vdevs are part of the pool and is later validated against
4121 * partial configs present in each vdev's label and an entire copy of the
4122 * config stored in the MOS.
4123 */
4124static int
4125spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport)
4126{
4127	int error = 0;
4128	boolean_t missing_feat_write = B_FALSE;
4129	boolean_t checkpoint_rewind =
4130	    (spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
4131	boolean_t update_config_cache = B_FALSE;
4132
4133	ASSERT(MUTEX_HELD(&spa_namespace_lock));
4134	ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
4135
4136	spa_load_note(spa, "LOADING");
4137
4138	error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
4139	if (error != 0)
4140		return (error);
4141
4142	/*
4143	 * If we are rewinding to the checkpoint then we need to repeat
4144	 * everything we've done so far in this function but this time
4145	 * selecting the checkpointed uberblock and using that to open
4146	 * the MOS.
4147	 */
4148	if (checkpoint_rewind) {
4149		/*
4150		 * If we are rewinding to the checkpoint update config cache
4151		 * anyway.
4152		 */
4153		update_config_cache = B_TRUE;
4154
4155		/*
4156		 * Extract the checkpointed uberblock from the current MOS
4157		 * and use this as the pool's uberblock from now on. If the
4158		 * pool is imported as writeable we also write the checkpoint
4159		 * uberblock to the labels, making the rewind permanent.
4160		 */
4161		error = spa_ld_checkpoint_rewind(spa);
4162		if (error != 0)
4163			return (error);
4164
4165		/*
4166		 * Redo the loading process process again with the
4167		 * checkpointed uberblock.
4168		 */
4169		spa_ld_prepare_for_reload(spa);
4170		spa_load_note(spa, "LOADING checkpointed uberblock");
4171		error = spa_ld_mos_with_trusted_config(spa, type, NULL);
4172		if (error != 0)
4173			return (error);
4174	}
4175
4176	/*
4177	 * Retrieve the checkpoint txg if the pool has a checkpoint.
4178	 */
4179	error = spa_ld_read_checkpoint_txg(spa);
4180	if (error != 0)
4181		return (error);
4182
4183	/*
4184	 * Retrieve the mapping of indirect vdevs. Those vdevs were removed
4185	 * from the pool and their contents were re-mapped to other vdevs. Note
4186	 * that everything that we read before this step must have been
4187	 * rewritten on concrete vdevs after the last device removal was
4188	 * initiated. Otherwise we could be reading from indirect vdevs before
4189	 * we have loaded their mappings.
4190	 */
4191	error = spa_ld_open_indirect_vdev_metadata(spa);
4192	if (error != 0)
4193		return (error);
4194
4195	/*
4196	 * Retrieve the full list of active features from the MOS and check if
4197	 * they are all supported.
4198	 */
4199	error = spa_ld_check_features(spa, &missing_feat_write);
4200	if (error != 0)
4201		return (error);
4202
4203	/*
4204	 * Load several special directories from the MOS needed by the dsl_pool
4205	 * layer.
4206	 */
4207	error = spa_ld_load_special_directories(spa);
4208	if (error != 0)
4209		return (error);
4210
4211	/*
4212	 * Retrieve pool properties from the MOS.
4213	 */
4214	error = spa_ld_get_props(spa);
4215	if (error != 0)
4216		return (error);
4217
4218	/*
4219	 * Retrieve the list of auxiliary devices - cache devices and spares -
4220	 * and open them.
4221	 */
4222	error = spa_ld_open_aux_vdevs(spa, type);
4223	if (error != 0)
4224		return (error);
4225
4226	/*
4227	 * Load the metadata for all vdevs. Also check if unopenable devices
4228	 * should be autoreplaced.
4229	 */
4230	error = spa_ld_load_vdev_metadata(spa);
4231	if (error != 0)
4232		return (error);
4233
4234	error = spa_ld_load_dedup_tables(spa);
4235	if (error != 0)
4236		return (error);
4237
4238	/*
4239	 * Verify the logs now to make sure we don't have any unexpected errors
4240	 * when we claim log blocks later.
4241	 */
4242	error = spa_ld_verify_logs(spa, type, ereport);
4243	if (error != 0)
4244		return (error);
4245
4246	if (missing_feat_write) {
4247		ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
4248
4249		/*
4250		 * At this point, we know that we can open the pool in
4251		 * read-only mode but not read-write mode. We now have enough
4252		 * information and can return to userland.
4253		 */
4254		return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
4255		    ENOTSUP));
4256	}
4257
4258	/*
4259	 * Traverse the last txgs to make sure the pool was left off in a safe
4260	 * state. When performing an extreme rewind, we verify the whole pool,
4261	 * which can take a very long time.
4262	 */
4263	error = spa_ld_verify_pool_data(spa);
4264	if (error != 0)
4265		return (error);
4266
4267	/*
4268	 * Calculate the deflated space for the pool. This must be done before
4269	 * we write anything to the pool because we'd need to update the space
4270	 * accounting using the deflated sizes.
4271	 */
4272	spa_update_dspace(spa);
4273
4274	/*
4275	 * We have now retrieved all the information we needed to open the
4276	 * pool. If we are importing the pool in read-write mode, a few
4277	 * additional steps must be performed to finish the import.
4278	 */
4279	if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
4280	    spa->spa_load_max_txg == UINT64_MAX)) {
4281		uint64_t config_cache_txg = spa->spa_config_txg;
4282
4283		ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
4284
4285		/*
4286		 * In case of a checkpoint rewind, log the original txg
4287		 * of the checkpointed uberblock.
4288		 */
4289		if (checkpoint_rewind) {
4290			spa_history_log_internal(spa, "checkpoint rewind",
4291			    NULL, "rewound state to txg=%llu",
4292			    (u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
4293		}
4294
4295		/*
4296		 * Traverse the ZIL and claim all blocks.
4297		 */
4298		spa_ld_claim_log_blocks(spa);
4299
4300		/*
4301		 * Kick-off the syncing thread.
4302		 */
4303		spa->spa_sync_on = B_TRUE;
4304		txg_sync_start(spa->spa_dsl_pool);
4305		mmp_thread_start(spa);
4306
4307		/*
4308		 * Wait for all claims to sync.  We sync up to the highest
4309		 * claimed log block birth time so that claimed log blocks
4310		 * don't appear to be from the future.  spa_claim_max_txg
4311		 * will have been set for us by ZIL traversal operations
4312		 * performed above.
4313		 */
4314		txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
4315
4316		/*
4317		 * Check if we need to request an update of the config. On the
4318		 * next sync, we would update the config stored in vdev labels
4319		 * and the cachefile (by default /etc/zfs/zpool.cache).
4320		 */
4321		spa_ld_check_for_config_update(spa, config_cache_txg,
4322		    update_config_cache);
4323
4324		/*
4325		 * Check all DTLs to see if anything needs resilvering.
4326		 */
4327		if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
4328		    vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
4329			spa_async_request(spa, SPA_ASYNC_RESILVER);
4330
4331		/*
4332		 * Log the fact that we booted up (so that we can detect if
4333		 * we rebooted in the middle of an operation).
4334		 */
4335		spa_history_log_version(spa, "open");
4336
4337		spa_restart_removal(spa);
4338		spa_spawn_aux_threads(spa);
4339
4340		/*
4341		 * Delete any inconsistent datasets.
4342		 *
4343		 * Note:
4344		 * Since we may be issuing deletes for clones here,
4345		 * we make sure to do so after we've spawned all the
4346		 * auxiliary threads above (from which the livelist
4347		 * deletion zthr is part of).
4348		 */
4349		(void) dmu_objset_find(spa_name(spa),
4350		    dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
4351
4352		/*
4353		 * Clean up any stale temporary dataset userrefs.
4354		 */
4355		dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
4356
4357		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4358		vdev_initialize_restart(spa->spa_root_vdev);
4359		vdev_trim_restart(spa->spa_root_vdev);
4360		vdev_autotrim_restart(spa);
4361		spa_config_exit(spa, SCL_CONFIG, FTAG);
4362	}
4363
4364	spa_import_progress_remove(spa);
4365	spa_load_note(spa, "LOADED");
4366
4367	return (0);
4368}
4369
4370static int
4371spa_load_retry(spa_t *spa, spa_load_state_t state)
4372{
4373	int mode = spa->spa_mode;
4374
4375	spa_unload(spa);
4376	spa_deactivate(spa);
4377
4378	spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
4379
4380	spa_activate(spa, mode);
4381	spa_async_suspend(spa);
4382
4383	spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
4384	    (u_longlong_t)spa->spa_load_max_txg);
4385
4386	return (spa_load(spa, state, SPA_IMPORT_EXISTING));
4387}
4388
4389/*
4390 * If spa_load() fails this function will try loading prior txg's. If
4391 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
4392 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
4393 * function will not rewind the pool and will return the same error as
4394 * spa_load().
4395 */
4396static int
4397spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
4398    int rewind_flags)
4399{
4400	nvlist_t *loadinfo = NULL;
4401	nvlist_t *config = NULL;
4402	int load_error, rewind_error;
4403	uint64_t safe_rewind_txg;
4404	uint64_t min_txg;
4405
4406	if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
4407		spa->spa_load_max_txg = spa->spa_load_txg;
4408		spa_set_log_state(spa, SPA_LOG_CLEAR);
4409	} else {
4410		spa->spa_load_max_txg = max_request;
4411		if (max_request != UINT64_MAX)
4412			spa->spa_extreme_rewind = B_TRUE;
4413	}
4414
4415	load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
4416	if (load_error == 0)
4417		return (0);
4418	if (load_error == ZFS_ERR_NO_CHECKPOINT) {
4419		/*
4420		 * When attempting checkpoint-rewind on a pool with no
4421		 * checkpoint, we should not attempt to load uberblocks
4422		 * from previous txgs when spa_load fails.
4423		 */
4424		ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
4425		spa_import_progress_remove(spa);
4426		return (load_error);
4427	}
4428
4429	if (spa->spa_root_vdev != NULL)
4430		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
4431
4432	spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
4433	spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
4434
4435	if (rewind_flags & ZPOOL_NEVER_REWIND) {
4436		nvlist_free(config);
4437		spa_import_progress_remove(spa);
4438		return (load_error);
4439	}
4440
4441	if (state == SPA_LOAD_RECOVER) {
4442		/* Price of rolling back is discarding txgs, including log */
4443		spa_set_log_state(spa, SPA_LOG_CLEAR);
4444	} else {
4445		/*
4446		 * If we aren't rolling back save the load info from our first
4447		 * import attempt so that we can restore it after attempting
4448		 * to rewind.
4449		 */
4450		loadinfo = spa->spa_load_info;
4451		spa->spa_load_info = fnvlist_alloc();
4452	}
4453
4454	spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
4455	safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
4456	min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
4457	    TXG_INITIAL : safe_rewind_txg;
4458
4459	/*
4460	 * Continue as long as we're finding errors, we're still within
4461	 * the acceptable rewind range, and we're still finding uberblocks
4462	 */
4463	while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
4464	    spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
4465		if (spa->spa_load_max_txg < safe_rewind_txg)
4466			spa->spa_extreme_rewind = B_TRUE;
4467		rewind_error = spa_load_retry(spa, state);
4468	}
4469
4470	spa->spa_extreme_rewind = B_FALSE;
4471	spa->spa_load_max_txg = UINT64_MAX;
4472
4473	if (config && (rewind_error || state != SPA_LOAD_RECOVER))
4474		spa_config_set(spa, config);
4475	else
4476		nvlist_free(config);
4477
4478	if (state == SPA_LOAD_RECOVER) {
4479		ASSERT3P(loadinfo, ==, NULL);
4480		spa_import_progress_remove(spa);
4481		return (rewind_error);
4482	} else {
4483		/* Store the rewind info as part of the initial load info */
4484		fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
4485		    spa->spa_load_info);
4486
4487		/* Restore the initial load info */
4488		fnvlist_free(spa->spa_load_info);
4489		spa->spa_load_info = loadinfo;
4490
4491		spa_import_progress_remove(spa);
4492		return (load_error);
4493	}
4494}
4495
4496/*
4497 * Pool Open/Import
4498 *
4499 * The import case is identical to an open except that the configuration is sent
4500 * down from userland, instead of grabbed from the configuration cache.  For the
4501 * case of an open, the pool configuration will exist in the
4502 * POOL_STATE_UNINITIALIZED state.
4503 *
4504 * The stats information (gen/count/ustats) is used to gather vdev statistics at
4505 * the same time open the pool, without having to keep around the spa_t in some
4506 * ambiguous state.
4507 */
4508static int
4509spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
4510    nvlist_t **config)
4511{
4512	spa_t *spa;
4513	spa_load_state_t state = SPA_LOAD_OPEN;
4514	int error;
4515	int locked = B_FALSE;
4516
4517	*spapp = NULL;
4518
4519	/*
4520	 * As disgusting as this is, we need to support recursive calls to this
4521	 * function because dsl_dir_open() is called during spa_load(), and ends
4522	 * up calling spa_open() again.  The real fix is to figure out how to
4523	 * avoid dsl_dir_open() calling this in the first place.
4524	 */
4525	if (mutex_owner(&spa_namespace_lock) != curthread) {
4526		mutex_enter(&spa_namespace_lock);
4527		locked = B_TRUE;
4528	}
4529
4530	if ((spa = spa_lookup(pool)) == NULL) {
4531		if (locked)
4532			mutex_exit(&spa_namespace_lock);
4533		return (SET_ERROR(ENOENT));
4534	}
4535
4536	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
4537		zpool_load_policy_t policy;
4538
4539		zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
4540		    &policy);
4541		if (policy.zlp_rewind & ZPOOL_DO_REWIND)
4542			state = SPA_LOAD_RECOVER;
4543
4544		spa_activate(spa, spa_mode_global);
4545
4546		if (state != SPA_LOAD_RECOVER)
4547			spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
4548		spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
4549
4550		zfs_dbgmsg("spa_open_common: opening %s", pool);
4551		error = spa_load_best(spa, state, policy.zlp_txg,
4552		    policy.zlp_rewind);
4553
4554		if (error == EBADF) {
4555			/*
4556			 * If vdev_validate() returns failure (indicated by
4557			 * EBADF), it indicates that one of the vdevs indicates
4558			 * that the pool has been exported or destroyed.  If
4559			 * this is the case, the config cache is out of sync and
4560			 * we should remove the pool from the namespace.
4561			 */
4562			spa_unload(spa);
4563			spa_deactivate(spa);
4564			spa_write_cachefile(spa, B_TRUE, B_TRUE);
4565			spa_remove(spa);
4566			if (locked)
4567				mutex_exit(&spa_namespace_lock);
4568			return (SET_ERROR(ENOENT));
4569		}
4570
4571		if (error) {
4572			/*
4573			 * We can't open the pool, but we still have useful
4574			 * information: the state of each vdev after the
4575			 * attempted vdev_open().  Return this to the user.
4576			 */
4577			if (config != NULL && spa->spa_config) {
4578				VERIFY(nvlist_dup(spa->spa_config, config,
4579				    KM_SLEEP) == 0);
4580				VERIFY(nvlist_add_nvlist(*config,
4581				    ZPOOL_CONFIG_LOAD_INFO,
4582				    spa->spa_load_info) == 0);
4583			}
4584			spa_unload(spa);
4585			spa_deactivate(spa);
4586			spa->spa_last_open_failed = error;
4587			if (locked)
4588				mutex_exit(&spa_namespace_lock);
4589			*spapp = NULL;
4590			return (error);
4591		}
4592	}
4593
4594	spa_open_ref(spa, tag);
4595
4596	if (config != NULL)
4597		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
4598
4599	/*
4600	 * If we've recovered the pool, pass back any information we
4601	 * gathered while doing the load.
4602	 */
4603	if (state == SPA_LOAD_RECOVER) {
4604		VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
4605		    spa->spa_load_info) == 0);
4606	}
4607
4608	if (locked) {
4609		spa->spa_last_open_failed = 0;
4610		spa->spa_last_ubsync_txg = 0;
4611		spa->spa_load_txg = 0;
4612		mutex_exit(&spa_namespace_lock);
4613	}
4614
4615	*spapp = spa;
4616
4617	return (0);
4618}
4619
4620int
4621spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
4622    nvlist_t **config)
4623{
4624	return (spa_open_common(name, spapp, tag, policy, config));
4625}
4626
4627int
4628spa_open(const char *name, spa_t **spapp, void *tag)
4629{
4630	return (spa_open_common(name, spapp, tag, NULL, NULL));
4631}
4632
4633/*
4634 * Lookup the given spa_t, incrementing the inject count in the process,
4635 * preventing it from being exported or destroyed.
4636 */
4637spa_t *
4638spa_inject_addref(char *name)
4639{
4640	spa_t *spa;
4641
4642	mutex_enter(&spa_namespace_lock);
4643	if ((spa = spa_lookup(name)) == NULL) {
4644		mutex_exit(&spa_namespace_lock);
4645		return (NULL);
4646	}
4647	spa->spa_inject_ref++;
4648	mutex_exit(&spa_namespace_lock);
4649
4650	return (spa);
4651}
4652
4653void
4654spa_inject_delref(spa_t *spa)
4655{
4656	mutex_enter(&spa_namespace_lock);
4657	spa->spa_inject_ref--;
4658	mutex_exit(&spa_namespace_lock);
4659}
4660
4661/*
4662 * Add spares device information to the nvlist.
4663 */
4664static void
4665spa_add_spares(spa_t *spa, nvlist_t *config)
4666{
4667	nvlist_t **spares;
4668	uint_t i, nspares;
4669	nvlist_t *nvroot;
4670	uint64_t guid;
4671	vdev_stat_t *vs;
4672	uint_t vsc;
4673	uint64_t pool;
4674
4675	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
4676
4677	if (spa->spa_spares.sav_count == 0)
4678		return;
4679
4680	VERIFY(nvlist_lookup_nvlist(config,
4681	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4682	VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
4683	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
4684	if (nspares != 0) {
4685		VERIFY(nvlist_add_nvlist_array(nvroot,
4686		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
4687		VERIFY(nvlist_lookup_nvlist_array(nvroot,
4688		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
4689
4690		/*
4691		 * Go through and find any spares which have since been
4692		 * repurposed as an active spare.  If this is the case, update
4693		 * their status appropriately.
4694		 */
4695		for (i = 0; i < nspares; i++) {
4696			VERIFY(nvlist_lookup_uint64(spares[i],
4697			    ZPOOL_CONFIG_GUID, &guid) == 0);
4698			if (spa_spare_exists(guid, &pool, NULL) &&
4699			    pool != 0ULL) {
4700				VERIFY(nvlist_lookup_uint64_array(
4701				    spares[i], ZPOOL_CONFIG_VDEV_STATS,
4702				    (uint64_t **)&vs, &vsc) == 0);
4703				vs->vs_state = VDEV_STATE_CANT_OPEN;
4704				vs->vs_aux = VDEV_AUX_SPARED;
4705			}
4706		}
4707	}
4708}
4709
4710/*
4711 * Add l2cache device information to the nvlist, including vdev stats.
4712 */
4713static void
4714spa_add_l2cache(spa_t *spa, nvlist_t *config)
4715{
4716	nvlist_t **l2cache;
4717	uint_t i, j, nl2cache;
4718	nvlist_t *nvroot;
4719	uint64_t guid;
4720	vdev_t *vd;
4721	vdev_stat_t *vs;
4722	uint_t vsc;
4723
4724	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
4725
4726	if (spa->spa_l2cache.sav_count == 0)
4727		return;
4728
4729	VERIFY(nvlist_lookup_nvlist(config,
4730	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4731	VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
4732	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
4733	if (nl2cache != 0) {
4734		VERIFY(nvlist_add_nvlist_array(nvroot,
4735		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
4736		VERIFY(nvlist_lookup_nvlist_array(nvroot,
4737		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
4738
4739		/*
4740		 * Update level 2 cache device stats.
4741		 */
4742
4743		for (i = 0; i < nl2cache; i++) {
4744			VERIFY(nvlist_lookup_uint64(l2cache[i],
4745			    ZPOOL_CONFIG_GUID, &guid) == 0);
4746
4747			vd = NULL;
4748			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
4749				if (guid ==
4750				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
4751					vd = spa->spa_l2cache.sav_vdevs[j];
4752					break;
4753				}
4754			}
4755			ASSERT(vd != NULL);
4756
4757			VERIFY(nvlist_lookup_uint64_array(l2cache[i],
4758			    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
4759			    == 0);
4760			vdev_get_stats(vd, vs);
4761			vdev_config_generate_stats(vd, l2cache[i]);
4762
4763		}
4764	}
4765}
4766
4767static void
4768spa_add_feature_stats(spa_t *spa, nvlist_t *config)
4769{
4770	nvlist_t *features;
4771	zap_cursor_t zc;
4772	zap_attribute_t za;
4773
4774	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
4775	VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
4776
4777	if (spa->spa_feat_for_read_obj != 0) {
4778		for (zap_cursor_init(&zc, spa->spa_meta_objset,
4779		    spa->spa_feat_for_read_obj);
4780		    zap_cursor_retrieve(&zc, &za) == 0;
4781		    zap_cursor_advance(&zc)) {
4782			ASSERT(za.za_integer_length == sizeof (uint64_t) &&
4783			    za.za_num_integers == 1);
4784			VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
4785			    za.za_first_integer));
4786		}
4787		zap_cursor_fini(&zc);
4788	}
4789
4790	if (spa->spa_feat_for_write_obj != 0) {
4791		for (zap_cursor_init(&zc, spa->spa_meta_objset,
4792		    spa->spa_feat_for_write_obj);
4793		    zap_cursor_retrieve(&zc, &za) == 0;
4794		    zap_cursor_advance(&zc)) {
4795			ASSERT(za.za_integer_length == sizeof (uint64_t) &&
4796			    za.za_num_integers == 1);
4797			VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
4798			    za.za_first_integer));
4799		}
4800		zap_cursor_fini(&zc);
4801	}
4802
4803	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
4804	    features) == 0);
4805	nvlist_free(features);
4806}
4807
4808int
4809spa_get_stats(const char *name, nvlist_t **config,
4810    char *altroot, size_t buflen)
4811{
4812	int error;
4813	spa_t *spa;
4814
4815	*config = NULL;
4816	error = spa_open_common(name, &spa, FTAG, NULL, config);
4817
4818	if (spa != NULL) {
4819		/*
4820		 * This still leaves a window of inconsistency where the spares
4821		 * or l2cache devices could change and the config would be
4822		 * self-inconsistent.
4823		 */
4824		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4825
4826		if (*config != NULL) {
4827			uint64_t loadtimes[2];
4828
4829			loadtimes[0] = spa->spa_loaded_ts.tv_sec;
4830			loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
4831			VERIFY(nvlist_add_uint64_array(*config,
4832			    ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
4833
4834			VERIFY(nvlist_add_uint64(*config,
4835			    ZPOOL_CONFIG_ERRCOUNT,
4836			    spa_get_errlog_size(spa)) == 0);
4837
4838			if (spa_suspended(spa)) {
4839				VERIFY(nvlist_add_uint64(*config,
4840				    ZPOOL_CONFIG_SUSPENDED,
4841				    spa->spa_failmode) == 0);
4842				VERIFY(nvlist_add_uint64(*config,
4843				    ZPOOL_CONFIG_SUSPENDED_REASON,
4844				    spa->spa_suspended) == 0);
4845			}
4846
4847			spa_add_spares(spa, *config);
4848			spa_add_l2cache(spa, *config);
4849			spa_add_feature_stats(spa, *config);
4850		}
4851	}
4852
4853	/*
4854	 * We want to get the alternate root even for faulted pools, so we cheat
4855	 * and call spa_lookup() directly.
4856	 */
4857	if (altroot) {
4858		if (spa == NULL) {
4859			mutex_enter(&spa_namespace_lock);
4860			spa = spa_lookup(name);
4861			if (spa)
4862				spa_altroot(spa, altroot, buflen);
4863			else
4864				altroot[0] = '\0';
4865			spa = NULL;
4866			mutex_exit(&spa_namespace_lock);
4867		} else {
4868			spa_altroot(spa, altroot, buflen);
4869		}
4870	}
4871
4872	if (spa != NULL) {
4873		spa_config_exit(spa, SCL_CONFIG, FTAG);
4874		spa_close(spa, FTAG);
4875	}
4876
4877	return (error);
4878}
4879
4880/*
4881 * Validate that the auxiliary device array is well formed.  We must have an
4882 * array of nvlists, each which describes a valid leaf vdev.  If this is an
4883 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
4884 * specified, as long as they are well-formed.
4885 */
4886static int
4887spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
4888    spa_aux_vdev_t *sav, const char *config, uint64_t version,
4889    vdev_labeltype_t label)
4890{
4891	nvlist_t **dev;
4892	uint_t i, ndev;
4893	vdev_t *vd;
4894	int error;
4895
4896	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
4897
4898	/*
4899	 * It's acceptable to have no devs specified.
4900	 */
4901	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
4902		return (0);
4903
4904	if (ndev == 0)
4905		return (SET_ERROR(EINVAL));
4906
4907	/*
4908	 * Make sure the pool is formatted with a version that supports this
4909	 * device type.
4910	 */
4911	if (spa_version(spa) < version)
4912		return (SET_ERROR(ENOTSUP));
4913
4914	/*
4915	 * Set the pending device list so we correctly handle device in-use
4916	 * checking.
4917	 */
4918	sav->sav_pending = dev;
4919	sav->sav_npending = ndev;
4920
4921	for (i = 0; i < ndev; i++) {
4922		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
4923		    mode)) != 0)
4924			goto out;
4925
4926		if (!vd->vdev_ops->vdev_op_leaf) {
4927			vdev_free(vd);
4928			error = SET_ERROR(EINVAL);
4929			goto out;
4930		}
4931
4932		vd->vdev_top = vd;
4933
4934		if ((error = vdev_open(vd)) == 0 &&
4935		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
4936			VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
4937			    vd->vdev_guid) == 0);
4938		}
4939
4940		vdev_free(vd);
4941
4942		if (error &&
4943		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
4944			goto out;
4945		else
4946			error = 0;
4947	}
4948
4949out:
4950	sav->sav_pending = NULL;
4951	sav->sav_npending = 0;
4952	return (error);
4953}
4954
4955static int
4956spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
4957{
4958	int error;
4959
4960	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
4961
4962	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
4963	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
4964	    VDEV_LABEL_SPARE)) != 0) {
4965		return (error);
4966	}
4967
4968	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
4969	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
4970	    VDEV_LABEL_L2CACHE));
4971}
4972
4973static void
4974spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
4975    const char *config)
4976{
4977	int i;
4978
4979	if (sav->sav_config != NULL) {
4980		nvlist_t **olddevs;
4981		uint_t oldndevs;
4982		nvlist_t **newdevs;
4983
4984		/*
4985		 * Generate new dev list by concatentating with the
4986		 * current dev list.
4987		 */
4988		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
4989		    &olddevs, &oldndevs) == 0);
4990
4991		newdevs = kmem_alloc(sizeof (void *) *
4992		    (ndevs + oldndevs), KM_SLEEP);
4993		for (i = 0; i < oldndevs; i++)
4994			VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
4995			    KM_SLEEP) == 0);
4996		for (i = 0; i < ndevs; i++)
4997			VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
4998			    KM_SLEEP) == 0);
4999
5000		VERIFY(nvlist_remove(sav->sav_config, config,
5001		    DATA_TYPE_NVLIST_ARRAY) == 0);
5002
5003		VERIFY(nvlist_add_nvlist_array(sav->sav_config,
5004		    config, newdevs, ndevs + oldndevs) == 0);
5005		for (i = 0; i < oldndevs + ndevs; i++)
5006			nvlist_free(newdevs[i]);
5007		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
5008	} else {
5009		/*
5010		 * Generate a new dev list.
5011		 */
5012		VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
5013		    KM_SLEEP) == 0);
5014		VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
5015		    devs, ndevs) == 0);
5016	}
5017}
5018
5019/*
5020 * Stop and drop level 2 ARC devices
5021 */
5022void
5023spa_l2cache_drop(spa_t *spa)
5024{
5025	vdev_t *vd;
5026	int i;
5027	spa_aux_vdev_t *sav = &spa->spa_l2cache;
5028
5029	for (i = 0; i < sav->sav_count; i++) {
5030		uint64_t pool;
5031
5032		vd = sav->sav_vdevs[i];
5033		ASSERT(vd != NULL);
5034
5035		if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
5036		    pool != 0ULL && l2arc_vdev_present(vd))
5037			l2arc_remove_vdev(vd);
5038	}
5039}
5040
5041/*
5042 * Verify encryption parameters for spa creation. If we are encrypting, we must
5043 * have the encryption feature flag enabled.
5044 */
5045static int
5046spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
5047    boolean_t has_encryption)
5048{
5049	if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
5050	    dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
5051	    !has_encryption)
5052		return (SET_ERROR(ENOTSUP));
5053
5054	return (dmu_objset_create_crypt_check(NULL, dcp, NULL));
5055}
5056
5057/*
5058 * Pool Creation
5059 */
5060int
5061spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
5062    nvlist_t *zplprops, dsl_crypto_params_t *dcp)
5063{
5064	spa_t *spa;
5065	char *altroot = NULL;
5066	vdev_t *rvd;
5067	dsl_pool_t *dp;
5068	dmu_tx_t *tx;
5069	int error = 0;
5070	uint64_t txg = TXG_INITIAL;
5071	nvlist_t **spares, **l2cache;
5072	uint_t nspares, nl2cache;
5073	uint64_t version, obj;
5074	boolean_t has_features;
5075	char *poolname;
5076	nvlist_t *nvl;
5077	boolean_t has_encryption;
5078	spa_feature_t feat;
5079	char *feat_name;
5080
5081	if (props == NULL ||
5082	    nvlist_lookup_string(props,
5083	    zpool_prop_to_name(ZPOOL_PROP_TNAME), &poolname) != 0)
5084		poolname = (char *)pool;
5085
5086	/*
5087	 * If this pool already exists, return failure.
5088	 */
5089	mutex_enter(&spa_namespace_lock);
5090	if (spa_lookup(poolname) != NULL) {
5091		mutex_exit(&spa_namespace_lock);
5092		return (SET_ERROR(EEXIST));
5093	}
5094
5095	/*
5096	 * Allocate a new spa_t structure.
5097	 */
5098	nvl = fnvlist_alloc();
5099	fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
5100	(void) nvlist_lookup_string(props,
5101	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
5102	spa = spa_add(poolname, nvl, altroot);
5103	fnvlist_free(nvl);
5104	spa_activate(spa, spa_mode_global);
5105
5106	if (props && (error = spa_prop_validate(spa, props))) {
5107		spa_deactivate(spa);
5108		spa_remove(spa);
5109		mutex_exit(&spa_namespace_lock);
5110		return (error);
5111	}
5112
5113	/*
5114	 * Temporary pool names should never be written to disk.
5115	 */
5116	if (poolname != pool)
5117		spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
5118
5119	has_features = B_FALSE;
5120	has_encryption = B_FALSE;
5121	for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
5122	    elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
5123		if (zpool_prop_feature(nvpair_name(elem))) {
5124			has_features = B_TRUE;
5125			feat_name = strchr(nvpair_name(elem), '@') + 1;
5126			VERIFY0(zfeature_lookup_name(feat_name, &feat));
5127			if (feat == SPA_FEATURE_ENCRYPTION)
5128				has_encryption = B_TRUE;
5129		}
5130	}
5131
5132	/* verify encryption params, if they were provided */
5133	if (dcp != NULL) {
5134		error = spa_create_check_encryption_params(dcp, has_encryption);
5135		if (error != 0) {
5136			spa_deactivate(spa);
5137			spa_remove(spa);
5138			mutex_exit(&spa_namespace_lock);
5139			return (error);
5140		}
5141	}
5142
5143	if (has_features || nvlist_lookup_uint64(props,
5144	    zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
5145		version = SPA_VERSION;
5146	}
5147	ASSERT(SPA_VERSION_IS_SUPPORTED(version));
5148
5149	spa->spa_first_txg = txg;
5150	spa->spa_uberblock.ub_txg = txg - 1;
5151	spa->spa_uberblock.ub_version = version;
5152	spa->spa_ubsync = spa->spa_uberblock;
5153	spa->spa_load_state = SPA_LOAD_CREATE;
5154	spa->spa_removing_phys.sr_state = DSS_NONE;
5155	spa->spa_removing_phys.sr_removing_vdev = -1;
5156	spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
5157	spa->spa_indirect_vdevs_loaded = B_TRUE;
5158
5159	/*
5160	 * Create "The Godfather" zio to hold all async IOs
5161	 */
5162	spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
5163	    KM_SLEEP);
5164	for (int i = 0; i < max_ncpus; i++) {
5165		spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
5166		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
5167		    ZIO_FLAG_GODFATHER);
5168	}
5169
5170	/*
5171	 * Create the root vdev.
5172	 */
5173	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5174
5175	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
5176
5177	ASSERT(error != 0 || rvd != NULL);
5178	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
5179
5180	if (error == 0 && !zfs_allocatable_devs(nvroot))
5181		error = SET_ERROR(EINVAL);
5182
5183	if (error == 0 &&
5184	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
5185	    (error = spa_validate_aux(spa, nvroot, txg,
5186	    VDEV_ALLOC_ADD)) == 0) {
5187		/*
5188		 * instantiate the metaslab groups (this will dirty the vdevs)
5189		 * we can no longer error exit past this point
5190		 */
5191		for (int c = 0; error == 0 && c < rvd->vdev_children; c++) {
5192			vdev_t *vd = rvd->vdev_child[c];
5193
5194			vdev_metaslab_set_size(vd);
5195			vdev_expand(vd, txg);
5196		}
5197	}
5198
5199	spa_config_exit(spa, SCL_ALL, FTAG);
5200
5201	if (error != 0) {
5202		spa_unload(spa);
5203		spa_deactivate(spa);
5204		spa_remove(spa);
5205		mutex_exit(&spa_namespace_lock);
5206		return (error);
5207	}
5208
5209	/*
5210	 * Get the list of spares, if specified.
5211	 */
5212	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
5213	    &spares, &nspares) == 0) {
5214		VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
5215		    KM_SLEEP) == 0);
5216		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
5217		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
5218		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5219		spa_load_spares(spa);
5220		spa_config_exit(spa, SCL_ALL, FTAG);
5221		spa->spa_spares.sav_sync = B_TRUE;
5222	}
5223
5224	/*
5225	 * Get the list of level 2 cache devices, if specified.
5226	 */
5227	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
5228	    &l2cache, &nl2cache) == 0) {
5229		VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
5230		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
5231		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
5232		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
5233		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5234		spa_load_l2cache(spa);
5235		spa_config_exit(spa, SCL_ALL, FTAG);
5236		spa->spa_l2cache.sav_sync = B_TRUE;
5237	}
5238
5239	spa->spa_is_initializing = B_TRUE;
5240	spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
5241	spa->spa_is_initializing = B_FALSE;
5242
5243	/*
5244	 * Create DDTs (dedup tables).
5245	 */
5246	ddt_create(spa);
5247
5248	spa_update_dspace(spa);
5249
5250	tx = dmu_tx_create_assigned(dp, txg);
5251
5252	/*
5253	 * Create the pool config object.
5254	 */
5255	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
5256	    DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
5257	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
5258
5259	if (zap_add(spa->spa_meta_objset,
5260	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
5261	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
5262		cmn_err(CE_PANIC, "failed to add pool config");
5263	}
5264
5265	if (zap_add(spa->spa_meta_objset,
5266	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
5267	    sizeof (uint64_t), 1, &version, tx) != 0) {
5268		cmn_err(CE_PANIC, "failed to add pool version");
5269	}
5270
5271	/* Newly created pools with the right version are always deflated. */
5272	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
5273		spa->spa_deflate = TRUE;
5274		if (zap_add(spa->spa_meta_objset,
5275		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
5276		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
5277			cmn_err(CE_PANIC, "failed to add deflate");
5278		}
5279	}
5280
5281	/*
5282	 * Create the deferred-free bpobj.  Turn off compression
5283	 * because sync-to-convergence takes longer if the blocksize
5284	 * keeps changing.
5285	 */
5286	obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
5287	dmu_object_set_compress(spa->spa_meta_objset, obj,
5288	    ZIO_COMPRESS_OFF, tx);
5289	if (zap_add(spa->spa_meta_objset,
5290	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
5291	    sizeof (uint64_t), 1, &obj, tx) != 0) {
5292		cmn_err(CE_PANIC, "failed to add bpobj");
5293	}
5294	VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
5295	    spa->spa_meta_objset, obj));
5296
5297	/*
5298	 * Create the pool's history object.
5299	 */
5300	if (version >= SPA_VERSION_ZPOOL_HISTORY)
5301		spa_history_create_obj(spa, tx);
5302
5303	/*
5304	 * Generate some random noise for salted checksums to operate on.
5305	 */
5306	(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
5307	    sizeof (spa->spa_cksum_salt.zcs_bytes));
5308
5309	/*
5310	 * Set pool properties.
5311	 */
5312	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
5313	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
5314	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
5315	spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
5316	spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
5317	spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
5318
5319	if (props != NULL) {
5320		spa_configfile_set(spa, props, B_FALSE);
5321		spa_sync_props(props, tx);
5322	}
5323
5324	dmu_tx_commit(tx);
5325
5326	spa->spa_sync_on = B_TRUE;
5327	txg_sync_start(spa->spa_dsl_pool);
5328	mmp_thread_start(spa);
5329
5330	/*
5331	 * We explicitly wait for the first transaction to complete so that our
5332	 * bean counters are appropriately updated.
5333	 */
5334	txg_wait_synced(spa->spa_dsl_pool, txg);
5335
5336	spa_spawn_aux_threads(spa);
5337
5338	spa_write_cachefile(spa, B_FALSE, B_TRUE);
5339	spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
5340
5341	spa_history_log_version(spa, "create");
5342
5343	/*
5344	 * Don't count references from objsets that are already closed
5345	 * and are making their way through the eviction process.
5346	 */
5347	spa_evicting_os_wait(spa);
5348	spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
5349	spa->spa_load_state = SPA_LOAD_NONE;
5350
5351	mutex_exit(&spa_namespace_lock);
5352
5353	return (0);
5354}
5355
5356#ifdef _KERNEL
5357/*
5358 * Get the root pool information from the root disk, then import the root pool
5359 * during the system boot up time.
5360 */
5361extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
5362
5363static nvlist_t *
5364spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
5365{
5366	nvlist_t *config;
5367	nvlist_t *nvtop, *nvroot;
5368	uint64_t pgid;
5369
5370	if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
5371		return (NULL);
5372
5373	/*
5374	 * Add this top-level vdev to the child array.
5375	 */
5376	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5377	    &nvtop) == 0);
5378	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
5379	    &pgid) == 0);
5380	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
5381
5382	/*
5383	 * Put this pool's top-level vdevs into a root vdev.
5384	 */
5385	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5386	VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
5387	    VDEV_TYPE_ROOT) == 0);
5388	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
5389	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
5390	VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
5391	    &nvtop, 1) == 0);
5392
5393	/*
5394	 * Replace the existing vdev_tree with the new root vdev in
5395	 * this pool's configuration (remove the old, add the new).
5396	 */
5397	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
5398	nvlist_free(nvroot);
5399	return (config);
5400}
5401
5402/*
5403 * Walk the vdev tree and see if we can find a device with "better"
5404 * configuration. A configuration is "better" if the label on that
5405 * device has a more recent txg.
5406 */
5407static void
5408spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
5409{
5410	for (int c = 0; c < vd->vdev_children; c++)
5411		spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
5412
5413	if (vd->vdev_ops->vdev_op_leaf) {
5414		nvlist_t *label;
5415		uint64_t label_txg;
5416
5417		if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
5418		    &label) != 0)
5419			return;
5420
5421		VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
5422		    &label_txg) == 0);
5423
5424		/*
5425		 * Do we have a better boot device?
5426		 */
5427		if (label_txg > *txg) {
5428			*txg = label_txg;
5429			*avd = vd;
5430		}
5431		nvlist_free(label);
5432	}
5433}
5434
5435/*
5436 * Import a root pool.
5437 *
5438 * For x86. devpath_list will consist of devid and/or physpath name of
5439 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
5440 * The GRUB "findroot" command will return the vdev we should boot.
5441 *
5442 * For Sparc, devpath_list consists the physpath name of the booting device
5443 * no matter the rootpool is a single device pool or a mirrored pool.
5444 * e.g.
5445 *	"/pci@1f,0/ide@d/disk@0,0:a"
5446 */
5447int
5448spa_import_rootpool(char *devpath, char *devid)
5449{
5450	spa_t *spa;
5451	vdev_t *rvd, *bvd, *avd = NULL;
5452	nvlist_t *config, *nvtop;
5453	uint64_t guid, txg;
5454	char *pname;
5455	int error;
5456
5457	/*
5458	 * Read the label from the boot device and generate a configuration.
5459	 */
5460	config = spa_generate_rootconf(devpath, devid, &guid);
5461#if defined(_OBP) && defined(_KERNEL)
5462	if (config == NULL) {
5463		if (strstr(devpath, "/iscsi/ssd") != NULL) {
5464			/* iscsi boot */
5465			get_iscsi_bootpath_phy(devpath);
5466			config = spa_generate_rootconf(devpath, devid, &guid);
5467		}
5468	}
5469#endif
5470	if (config == NULL) {
5471		cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
5472		    devpath);
5473		return (SET_ERROR(EIO));
5474	}
5475
5476	VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
5477	    &pname) == 0);
5478	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
5479
5480	mutex_enter(&spa_namespace_lock);
5481	if ((spa = spa_lookup(pname)) != NULL) {
5482		/*
5483		 * Remove the existing root pool from the namespace so that we
5484		 * can replace it with the correct config we just read in.
5485		 */
5486		spa_remove(spa);
5487	}
5488
5489	spa = spa_add(pname, config, NULL);
5490	spa->spa_is_root = B_TRUE;
5491	spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
5492	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
5493	    &spa->spa_ubsync.ub_version) != 0)
5494		spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
5495
5496	/*
5497	 * Build up a vdev tree based on the boot device's label config.
5498	 */
5499	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5500	    &nvtop) == 0);
5501	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5502	error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
5503	    VDEV_ALLOC_ROOTPOOL);
5504	spa_config_exit(spa, SCL_ALL, FTAG);
5505	if (error) {
5506		mutex_exit(&spa_namespace_lock);
5507		nvlist_free(config);
5508		cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
5509		    pname);
5510		return (error);
5511	}
5512
5513	/*
5514	 * Get the boot vdev.
5515	 */
5516	if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
5517		cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
5518		    (u_longlong_t)guid);
5519		error = SET_ERROR(ENOENT);
5520		goto out;
5521	}
5522
5523	/*
5524	 * Determine if there is a better boot device.
5525	 */
5526	avd = bvd;
5527	spa_alt_rootvdev(rvd, &avd, &txg);
5528	if (avd != bvd) {
5529		cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
5530		    "try booting from '%s'", avd->vdev_path);
5531		error = SET_ERROR(EINVAL);
5532		goto out;
5533	}
5534
5535	/*
5536	 * If the boot device is part of a spare vdev then ensure that
5537	 * we're booting off the active spare.
5538	 */
5539	if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
5540	    !bvd->vdev_isspare) {
5541		cmn_err(CE_NOTE, "The boot device is currently spared. Please "
5542		    "try booting from '%s'",
5543		    bvd->vdev_parent->
5544		    vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
5545		error = SET_ERROR(EINVAL);
5546		goto out;
5547	}
5548
5549	error = 0;
5550out:
5551	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5552	vdev_free(rvd);
5553	spa_config_exit(spa, SCL_ALL, FTAG);
5554	mutex_exit(&spa_namespace_lock);
5555
5556	nvlist_free(config);
5557	return (error);
5558}
5559
5560#endif
5561
5562/*
5563 * Import a non-root pool into the system.
5564 */
5565int
5566spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
5567{
5568	spa_t *spa;
5569	char *altroot = NULL;
5570	spa_load_state_t state = SPA_LOAD_IMPORT;
5571	zpool_load_policy_t policy;
5572	uint64_t mode = spa_mode_global;
5573	uint64_t readonly = B_FALSE;
5574	int error;
5575	nvlist_t *nvroot;
5576	nvlist_t **spares, **l2cache;
5577	uint_t nspares, nl2cache;
5578
5579	/*
5580	 * If a pool with this name exists, return failure.
5581	 */
5582	mutex_enter(&spa_namespace_lock);
5583	if (spa_lookup(pool) != NULL) {
5584		mutex_exit(&spa_namespace_lock);
5585		return (SET_ERROR(EEXIST));
5586	}
5587
5588	/*
5589	 * Create and initialize the spa structure.
5590	 */
5591	(void) nvlist_lookup_string(props,
5592	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
5593	(void) nvlist_lookup_uint64(props,
5594	    zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
5595	if (readonly)
5596		mode = FREAD;
5597	spa = spa_add(pool, config, altroot);
5598	spa->spa_import_flags = flags;
5599
5600	/*
5601	 * Verbatim import - Take a pool and insert it into the namespace
5602	 * as if it had been loaded at boot.
5603	 */
5604	if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
5605		if (props != NULL)
5606			spa_configfile_set(spa, props, B_FALSE);
5607
5608		spa_write_cachefile(spa, B_FALSE, B_TRUE);
5609		spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
5610		zfs_dbgmsg("spa_import: verbatim import of %s", pool);
5611		mutex_exit(&spa_namespace_lock);
5612		return (0);
5613	}
5614
5615	spa_activate(spa, mode);
5616
5617	/*
5618	 * Don't start async tasks until we know everything is healthy.
5619	 */
5620	spa_async_suspend(spa);
5621
5622	zpool_get_load_policy(config, &policy);
5623	if (policy.zlp_rewind & ZPOOL_DO_REWIND)
5624		state = SPA_LOAD_RECOVER;
5625
5626	spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
5627
5628	if (state != SPA_LOAD_RECOVER) {
5629		spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
5630		zfs_dbgmsg("spa_import: importing %s", pool);
5631	} else {
5632		zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
5633		    "(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
5634	}
5635	error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
5636
5637	/*
5638	 * Propagate anything learned while loading the pool and pass it
5639	 * back to caller (i.e. rewind info, missing devices, etc).
5640	 */
5641	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
5642	    spa->spa_load_info) == 0);
5643
5644	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5645	/*
5646	 * Toss any existing sparelist, as it doesn't have any validity
5647	 * anymore, and conflicts with spa_has_spare().
5648	 */
5649	if (spa->spa_spares.sav_config) {
5650		nvlist_free(spa->spa_spares.sav_config);
5651		spa->spa_spares.sav_config = NULL;
5652		spa_load_spares(spa);
5653	}
5654	if (spa->spa_l2cache.sav_config) {
5655		nvlist_free(spa->spa_l2cache.sav_config);
5656		spa->spa_l2cache.sav_config = NULL;
5657		spa_load_l2cache(spa);
5658	}
5659
5660	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5661	    &nvroot) == 0);
5662	if (error == 0)
5663		error = spa_validate_aux(spa, nvroot, -1ULL,
5664		    VDEV_ALLOC_SPARE);
5665	if (error == 0)
5666		error = spa_validate_aux(spa, nvroot, -1ULL,
5667		    VDEV_ALLOC_L2CACHE);
5668	spa_config_exit(spa, SCL_ALL, FTAG);
5669
5670	if (props != NULL)
5671		spa_configfile_set(spa, props, B_FALSE);
5672
5673	if (error != 0 || (props && spa_writeable(spa) &&
5674	    (error = spa_prop_set(spa, props)))) {
5675		spa_unload(spa);
5676		spa_deactivate(spa);
5677		spa_remove(spa);
5678		mutex_exit(&spa_namespace_lock);
5679		return (error);
5680	}
5681
5682	spa_async_resume(spa);
5683
5684	/*
5685	 * Override any spares and level 2 cache devices as specified by
5686	 * the user, as these may have correct device names/devids, etc.
5687	 */
5688	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
5689	    &spares, &nspares) == 0) {
5690		if (spa->spa_spares.sav_config)
5691			VERIFY(nvlist_remove(spa->spa_spares.sav_config,
5692			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
5693		else
5694			VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
5695			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
5696		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
5697		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
5698		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5699		spa_load_spares(spa);
5700		spa_config_exit(spa, SCL_ALL, FTAG);
5701		spa->spa_spares.sav_sync = B_TRUE;
5702	}
5703	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
5704	    &l2cache, &nl2cache) == 0) {
5705		if (spa->spa_l2cache.sav_config)
5706			VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
5707			    ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
5708		else
5709			VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
5710			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
5711		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
5712		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
5713		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5714		spa_load_l2cache(spa);
5715		spa_config_exit(spa, SCL_ALL, FTAG);
5716		spa->spa_l2cache.sav_sync = B_TRUE;
5717	}
5718
5719	/*
5720	 * Check for any removed devices.
5721	 */
5722	if (spa->spa_autoreplace) {
5723		spa_aux_check_removed(&spa->spa_spares);
5724		spa_aux_check_removed(&spa->spa_l2cache);
5725	}
5726
5727	if (spa_writeable(spa)) {
5728		/*
5729		 * Update the config cache to include the newly-imported pool.
5730		 */
5731		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
5732	}
5733
5734	/*
5735	 * It's possible that the pool was expanded while it was exported.
5736	 * We kick off an async task to handle this for us.
5737	 */
5738	spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
5739
5740	spa_history_log_version(spa, "import");
5741
5742	spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
5743
5744	mutex_exit(&spa_namespace_lock);
5745
5746	return (0);
5747}
5748
5749nvlist_t *
5750spa_tryimport(nvlist_t *tryconfig)
5751{
5752	nvlist_t *config = NULL;
5753	char *poolname, *cachefile;
5754	spa_t *spa;
5755	uint64_t state;
5756	int error;
5757	zpool_load_policy_t policy;
5758
5759	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
5760		return (NULL);
5761
5762	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
5763		return (NULL);
5764
5765	/*
5766	 * Create and initialize the spa structure.
5767	 */
5768	mutex_enter(&spa_namespace_lock);
5769	spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
5770	spa_activate(spa, FREAD);
5771
5772	/*
5773	 * Rewind pool if a max txg was provided.
5774	 */
5775	zpool_get_load_policy(spa->spa_config, &policy);
5776	if (policy.zlp_txg != UINT64_MAX) {
5777		spa->spa_load_max_txg = policy.zlp_txg;
5778		spa->spa_extreme_rewind = B_TRUE;
5779		zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
5780		    poolname, (longlong_t)policy.zlp_txg);
5781	} else {
5782		zfs_dbgmsg("spa_tryimport: importing %s", poolname);
5783	}
5784
5785	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
5786	    == 0) {
5787		zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
5788		spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
5789	} else {
5790		spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
5791	}
5792
5793	error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
5794
5795	/*
5796	 * If 'tryconfig' was at least parsable, return the current config.
5797	 */
5798	if (spa->spa_root_vdev != NULL) {
5799		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
5800		VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
5801		    poolname) == 0);
5802		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
5803		    state) == 0);
5804		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
5805		    spa->spa_uberblock.ub_timestamp) == 0);
5806		VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
5807		    spa->spa_load_info) == 0);
5808
5809		/*
5810		 * If the bootfs property exists on this pool then we
5811		 * copy it out so that external consumers can tell which
5812		 * pools are bootable.
5813		 */
5814		if ((!error || error == EEXIST) && spa->spa_bootfs) {
5815			char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5816
5817			/*
5818			 * We have to play games with the name since the
5819			 * pool was opened as TRYIMPORT_NAME.
5820			 */
5821			if (dsl_dsobj_to_dsname(spa_name(spa),
5822			    spa->spa_bootfs, tmpname) == 0) {
5823				char *cp;
5824				char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5825
5826				cp = strchr(tmpname, '/');
5827				if (cp == NULL) {
5828					(void) strlcpy(dsname, tmpname,
5829					    MAXPATHLEN);
5830				} else {
5831					(void) snprintf(dsname, MAXPATHLEN,
5832					    "%s/%s", poolname, ++cp);
5833				}
5834				VERIFY(nvlist_add_string(config,
5835				    ZPOOL_CONFIG_BOOTFS, dsname) == 0);
5836				kmem_free(dsname, MAXPATHLEN);
5837			}
5838			kmem_free(tmpname, MAXPATHLEN);
5839		}
5840
5841		/*
5842		 * Add the list of hot spares and level 2 cache devices.
5843		 */
5844		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
5845		spa_add_spares(spa, config);
5846		spa_add_l2cache(spa, config);
5847		spa_config_exit(spa, SCL_CONFIG, FTAG);
5848	}
5849
5850	spa_unload(spa);
5851	spa_deactivate(spa);
5852	spa_remove(spa);
5853	mutex_exit(&spa_namespace_lock);
5854
5855	return (config);
5856}
5857
5858/*
5859 * Pool export/destroy
5860 *
5861 * The act of destroying or exporting a pool is very simple.  We make sure there
5862 * is no more pending I/O and any references to the pool are gone.  Then, we
5863 * update the pool state and sync all the labels to disk, removing the
5864 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
5865 * we don't sync the labels or remove the configuration cache.
5866 */
5867static int
5868spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
5869    boolean_t force, boolean_t hardforce)
5870{
5871	spa_t *spa;
5872
5873	if (oldconfig)
5874		*oldconfig = NULL;
5875
5876	if (!(spa_mode_global & FWRITE))
5877		return (SET_ERROR(EROFS));
5878
5879	mutex_enter(&spa_namespace_lock);
5880	if ((spa = spa_lookup(pool)) == NULL) {
5881		mutex_exit(&spa_namespace_lock);
5882		return (SET_ERROR(ENOENT));
5883	}
5884
5885	/*
5886	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
5887	 * reacquire the namespace lock, and see if we can export.
5888	 */
5889	spa_open_ref(spa, FTAG);
5890	mutex_exit(&spa_namespace_lock);
5891	spa_async_suspend(spa);
5892	mutex_enter(&spa_namespace_lock);
5893	spa_close(spa, FTAG);
5894
5895	/*
5896	 * The pool will be in core if it's openable,
5897	 * in which case we can modify its state.
5898	 */
5899	if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
5900
5901		/*
5902		 * Objsets may be open only because they're dirty, so we
5903		 * have to force it to sync before checking spa_refcnt.
5904		 */
5905		txg_wait_synced(spa->spa_dsl_pool, 0);
5906		spa_evicting_os_wait(spa);
5907
5908		/*
5909		 * A pool cannot be exported or destroyed if there are active
5910		 * references.  If we are resetting a pool, allow references by
5911		 * fault injection handlers.
5912		 */
5913		if (!spa_refcount_zero(spa) ||
5914		    (spa->spa_inject_ref != 0 &&
5915		    new_state != POOL_STATE_UNINITIALIZED)) {
5916			spa_async_resume(spa);
5917			mutex_exit(&spa_namespace_lock);
5918			return (SET_ERROR(EBUSY));
5919		}
5920
5921		/*
5922		 * A pool cannot be exported if it has an active shared spare.
5923		 * This is to prevent other pools stealing the active spare
5924		 * from an exported pool. At user's own will, such pool can
5925		 * be forcedly exported.
5926		 */
5927		if (!force && new_state == POOL_STATE_EXPORTED &&
5928		    spa_has_active_shared_spare(spa)) {
5929			spa_async_resume(spa);
5930			mutex_exit(&spa_namespace_lock);
5931			return (SET_ERROR(EXDEV));
5932		}
5933
5934		/*
5935		 * We're about to export or destroy this pool. Make sure
5936		 * we stop all initialization and trim activity here before
5937		 * we set the spa_final_txg. This will ensure that all
5938		 * dirty data resulting from the initialization is
5939		 * committed to disk before we unload the pool.
5940		 */
5941		if (spa->spa_root_vdev != NULL) {
5942			vdev_t *rvd = spa->spa_root_vdev;
5943			vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
5944			vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
5945			vdev_autotrim_stop_all(spa);
5946		}
5947
5948		/*
5949		 * We want this to be reflected on every label,
5950		 * so mark them all dirty.  spa_unload() will do the
5951		 * final sync that pushes these changes out.
5952		 */
5953		if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
5954			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
5955			spa->spa_state = new_state;
5956			spa->spa_final_txg = spa_last_synced_txg(spa) +
5957			    TXG_DEFER_SIZE + 1;
5958			vdev_config_dirty(spa->spa_root_vdev);
5959			spa_config_exit(spa, SCL_ALL, FTAG);
5960		}
5961	}
5962
5963	spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
5964
5965	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
5966		spa_unload(spa);
5967		spa_deactivate(spa);
5968	}
5969
5970	if (oldconfig && spa->spa_config)
5971		VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
5972
5973	if (new_state != POOL_STATE_UNINITIALIZED) {
5974		if (!hardforce)
5975			spa_write_cachefile(spa, B_TRUE, B_TRUE);
5976		spa_remove(spa);
5977	}
5978	mutex_exit(&spa_namespace_lock);
5979
5980	return (0);
5981}
5982
5983/*
5984 * Destroy a storage pool.
5985 */
5986int
5987spa_destroy(char *pool)
5988{
5989	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
5990	    B_FALSE, B_FALSE));
5991}
5992
5993/*
5994 * Export a storage pool.
5995 */
5996int
5997spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
5998    boolean_t hardforce)
5999{
6000	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
6001	    force, hardforce));
6002}
6003
6004/*
6005 * Similar to spa_export(), this unloads the spa_t without actually removing it
6006 * from the namespace in any way.
6007 */
6008int
6009spa_reset(char *pool)
6010{
6011	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
6012	    B_FALSE, B_FALSE));
6013}
6014
6015/*
6016 * ==========================================================================
6017 * Device manipulation
6018 * ==========================================================================
6019 */
6020
6021/*
6022 * Add a device to a storage pool.
6023 */
6024int
6025spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
6026{
6027	uint64_t txg;
6028	int error;
6029	vdev_t *rvd = spa->spa_root_vdev;
6030	vdev_t *vd, *tvd;
6031	nvlist_t **spares, **l2cache;
6032	uint_t nspares, nl2cache;
6033
6034	ASSERT(spa_writeable(spa));
6035
6036	txg = spa_vdev_enter(spa);
6037
6038	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
6039	    VDEV_ALLOC_ADD)) != 0)
6040		return (spa_vdev_exit(spa, NULL, txg, error));
6041
6042	spa->spa_pending_vdev = vd;	/* spa_vdev_exit() will clear this */
6043
6044	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
6045	    &nspares) != 0)
6046		nspares = 0;
6047
6048	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
6049	    &nl2cache) != 0)
6050		nl2cache = 0;
6051
6052	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
6053		return (spa_vdev_exit(spa, vd, txg, EINVAL));
6054
6055	if (vd->vdev_children != 0 &&
6056	    (error = vdev_create(vd, txg, B_FALSE)) != 0)
6057		return (spa_vdev_exit(spa, vd, txg, error));
6058
6059	/*
6060	 * We must validate the spares and l2cache devices after checking the
6061	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
6062	 */
6063	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
6064		return (spa_vdev_exit(spa, vd, txg, error));
6065
6066	/*
6067	 * If we are in the middle of a device removal, we can only add
6068	 * devices which match the existing devices in the pool.
6069	 * If we are in the middle of a removal, or have some indirect
6070	 * vdevs, we can not add raidz toplevels.
6071	 */
6072	if (spa->spa_vdev_removal != NULL ||
6073	    spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
6074		for (int c = 0; c < vd->vdev_children; c++) {
6075			tvd = vd->vdev_child[c];
6076			if (spa->spa_vdev_removal != NULL &&
6077			    tvd->vdev_ashift != spa->spa_max_ashift) {
6078				return (spa_vdev_exit(spa, vd, txg, EINVAL));
6079			}
6080			/* Fail if top level vdev is raidz */
6081			if (tvd->vdev_ops == &vdev_raidz_ops) {
6082				return (spa_vdev_exit(spa, vd, txg, EINVAL));
6083			}
6084			/*
6085			 * Need the top level mirror to be
6086			 * a mirror of leaf vdevs only
6087			 */
6088			if (tvd->vdev_ops == &vdev_mirror_ops) {
6089				for (uint64_t cid = 0;
6090				    cid < tvd->vdev_children; cid++) {
6091					vdev_t *cvd = tvd->vdev_child[cid];
6092					if (!cvd->vdev_ops->vdev_op_leaf) {
6093						return (spa_vdev_exit(spa, vd,
6094						    txg, EINVAL));
6095					}
6096				}
6097			}
6098		}
6099	}
6100
6101	for (int c = 0; c < vd->vdev_children; c++) {
6102		tvd = vd->vdev_child[c];
6103		vdev_remove_child(vd, tvd);
6104		tvd->vdev_id = rvd->vdev_children;
6105		vdev_add_child(rvd, tvd);
6106		vdev_config_dirty(tvd);
6107	}
6108
6109	if (nspares != 0) {
6110		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
6111		    ZPOOL_CONFIG_SPARES);
6112		spa_load_spares(spa);
6113		spa->spa_spares.sav_sync = B_TRUE;
6114	}
6115
6116	if (nl2cache != 0) {
6117		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
6118		    ZPOOL_CONFIG_L2CACHE);
6119		spa_load_l2cache(spa);
6120		spa->spa_l2cache.sav_sync = B_TRUE;
6121	}
6122
6123	/*
6124	 * We have to be careful when adding new vdevs to an existing pool.
6125	 * If other threads start allocating from these vdevs before we
6126	 * sync the config cache, and we lose power, then upon reboot we may
6127	 * fail to open the pool because there are DVAs that the config cache
6128	 * can't translate.  Therefore, we first add the vdevs without
6129	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
6130	 * and then let spa_config_update() initialize the new metaslabs.
6131	 *
6132	 * spa_load() checks for added-but-not-initialized vdevs, so that
6133	 * if we lose power at any point in this sequence, the remaining
6134	 * steps will be completed the next time we load the pool.
6135	 */
6136	(void) spa_vdev_exit(spa, vd, txg, 0);
6137
6138	mutex_enter(&spa_namespace_lock);
6139	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
6140	spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
6141	mutex_exit(&spa_namespace_lock);
6142
6143	return (0);
6144}
6145
6146/*
6147 * Attach a device to a mirror.  The arguments are the path to any device
6148 * in the mirror, and the nvroot for the new device.  If the path specifies
6149 * a device that is not mirrored, we automatically insert the mirror vdev.
6150 *
6151 * If 'replacing' is specified, the new device is intended to replace the
6152 * existing device; in this case the two devices are made into their own
6153 * mirror using the 'replacing' vdev, which is functionally identical to
6154 * the mirror vdev (it actually reuses all the same ops) but has a few
6155 * extra rules: you can't attach to it after it's been created, and upon
6156 * completion of resilvering, the first disk (the one being replaced)
6157 * is automatically detached.
6158 */
6159int
6160spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
6161{
6162	uint64_t txg, dtl_max_txg;
6163	vdev_t *rvd = spa->spa_root_vdev;
6164	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
6165	vdev_ops_t *pvops;
6166	char *oldvdpath, *newvdpath;
6167	int newvd_isspare;
6168	int error;
6169
6170	ASSERT(spa_writeable(spa));
6171
6172	txg = spa_vdev_enter(spa);
6173
6174