spa.c revision 8956713aded83a741173fcd4f9ef1c83521fbea9
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 * This file contains all the routines used when modifying on-disk SPA state.
29 * This includes opening, importing, destroying, exporting a pool, and syncing a
30 * pool.
31 */
32
33#include <sys/zfs_context.h>
34#include <sys/fm/fs/zfs.h>
35#include <sys/spa_impl.h>
36#include <sys/zio.h>
37#include <sys/zio_checksum.h>
38#include <sys/zio_compress.h>
39#include <sys/dmu.h>
40#include <sys/dmu_tx.h>
41#include <sys/zap.h>
42#include <sys/zil.h>
43#include <sys/vdev_impl.h>
44#include <sys/metaslab.h>
45#include <sys/uberblock_impl.h>
46#include <sys/txg.h>
47#include <sys/avl.h>
48#include <sys/dmu_traverse.h>
49#include <sys/dmu_objset.h>
50#include <sys/unique.h>
51#include <sys/dsl_pool.h>
52#include <sys/dsl_dataset.h>
53#include <sys/dsl_dir.h>
54#include <sys/dsl_prop.h>
55#include <sys/dsl_synctask.h>
56#include <sys/fs/zfs.h>
57#include <sys/arc.h>
58#include <sys/callb.h>
59#include <sys/systeminfo.h>
60#include <sys/sunddi.h>
61#include <sys/spa_boot.h>
62
63#ifdef	_KERNEL
64#include <sys/zone.h>
65#endif	/* _KERNEL */
66
67#include "zfs_prop.h"
68#include "zfs_comutil.h"
69
70enum zti_modes {
71	zti_mode_fixed,			/* value is # of threads (min 1) */
72	zti_mode_online_percent,	/* value is % of online CPUs */
73	zti_mode_tune,			/* fill from zio_taskq_tune_* */
74	zti_nmodes
75};
76
77#define	ZTI_THREAD_FIX(n)	{ zti_mode_fixed, (n) }
78#define	ZTI_THREAD_PCT(n)	{ zti_mode_online_percent, (n) }
79#define	ZTI_THREAD_TUNE		{ zti_mode_tune, 0 }
80
81#define	ZTI_THREAD_ONE		ZTI_THREAD_FIX(1)
82
83typedef struct zio_taskq_info {
84	const char *zti_name;
85	struct {
86		enum zti_modes zti_mode;
87		uint_t zti_value;
88	} zti_nthreads[ZIO_TASKQ_TYPES];
89} zio_taskq_info_t;
90
91static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
92				"issue",		"intr"
93};
94
95const zio_taskq_info_t zio_taskqs[ZIO_TYPES] = {
96	/*			ISSUE			INTR		*/
97	{ "spa_zio_null",	{ ZTI_THREAD_ONE,	ZTI_THREAD_ONE } },
98	{ "spa_zio_read",	{ ZTI_THREAD_FIX(8),	ZTI_THREAD_TUNE } },
99	{ "spa_zio_write",	{ ZTI_THREAD_TUNE,	ZTI_THREAD_FIX(8) } },
100	{ "spa_zio_free",	{ ZTI_THREAD_ONE,	ZTI_THREAD_ONE } },
101	{ "spa_zio_claim",	{ ZTI_THREAD_ONE,	ZTI_THREAD_ONE } },
102	{ "spa_zio_ioctl",	{ ZTI_THREAD_ONE,	ZTI_THREAD_ONE } },
103};
104
105enum zti_modes zio_taskq_tune_mode = zti_mode_online_percent;
106uint_t zio_taskq_tune_value = 80;	/* #threads = 80% of # online CPUs */
107
108static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx);
109static boolean_t spa_has_active_shared_spare(spa_t *spa);
110
111/*
112 * ==========================================================================
113 * SPA properties routines
114 * ==========================================================================
115 */
116
117/*
118 * Add a (source=src, propname=propval) list to an nvlist.
119 */
120static void
121spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
122    uint64_t intval, zprop_source_t src)
123{
124	const char *propname = zpool_prop_to_name(prop);
125	nvlist_t *propval;
126
127	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
128	VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
129
130	if (strval != NULL)
131		VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
132	else
133		VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
134
135	VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
136	nvlist_free(propval);
137}
138
139/*
140 * Get property values from the spa configuration.
141 */
142static void
143spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
144{
145	uint64_t size;
146	uint64_t used;
147	uint64_t cap, version;
148	zprop_source_t src = ZPROP_SRC_NONE;
149	spa_config_dirent_t *dp;
150
151	ASSERT(MUTEX_HELD(&spa->spa_props_lock));
152
153	if (spa->spa_root_vdev != NULL) {
154		size = spa_get_space(spa);
155		used = spa_get_alloc(spa);
156		spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
157		spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
158		spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src);
159		spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL,
160		    size - used, src);
161
162		cap = (size == 0) ? 0 : (used * 100 / size);
163		spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
164
165		spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
166		    spa->spa_root_vdev->vdev_state, src);
167
168		version = spa_version(spa);
169		if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
170			src = ZPROP_SRC_DEFAULT;
171		else
172			src = ZPROP_SRC_LOCAL;
173		spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
174	}
175
176	spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
177
178	if (spa->spa_root != NULL)
179		spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
180		    0, ZPROP_SRC_LOCAL);
181
182	if ((dp = list_head(&spa->spa_config_list)) != NULL) {
183		if (dp->scd_path == NULL) {
184			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
185			    "none", 0, ZPROP_SRC_LOCAL);
186		} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
187			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
188			    dp->scd_path, 0, ZPROP_SRC_LOCAL);
189		}
190	}
191}
192
193/*
194 * Get zpool property values.
195 */
196int
197spa_prop_get(spa_t *spa, nvlist_t **nvp)
198{
199	zap_cursor_t zc;
200	zap_attribute_t za;
201	objset_t *mos = spa->spa_meta_objset;
202	int err;
203
204	VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
205
206	mutex_enter(&spa->spa_props_lock);
207
208	/*
209	 * Get properties from the spa config.
210	 */
211	spa_prop_get_config(spa, nvp);
212
213	/* If no pool property object, no more prop to get. */
214	if (spa->spa_pool_props_object == 0) {
215		mutex_exit(&spa->spa_props_lock);
216		return (0);
217	}
218
219	/*
220	 * Get properties from the MOS pool property object.
221	 */
222	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
223	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
224	    zap_cursor_advance(&zc)) {
225		uint64_t intval = 0;
226		char *strval = NULL;
227		zprop_source_t src = ZPROP_SRC_DEFAULT;
228		zpool_prop_t prop;
229
230		if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
231			continue;
232
233		switch (za.za_integer_length) {
234		case 8:
235			/* integer property */
236			if (za.za_first_integer !=
237			    zpool_prop_default_numeric(prop))
238				src = ZPROP_SRC_LOCAL;
239
240			if (prop == ZPOOL_PROP_BOOTFS) {
241				dsl_pool_t *dp;
242				dsl_dataset_t *ds = NULL;
243
244				dp = spa_get_dsl(spa);
245				rw_enter(&dp->dp_config_rwlock, RW_READER);
246				if (err = dsl_dataset_hold_obj(dp,
247				    za.za_first_integer, FTAG, &ds)) {
248					rw_exit(&dp->dp_config_rwlock);
249					break;
250				}
251
252				strval = kmem_alloc(
253				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
254				    KM_SLEEP);
255				dsl_dataset_name(ds, strval);
256				dsl_dataset_rele(ds, FTAG);
257				rw_exit(&dp->dp_config_rwlock);
258			} else {
259				strval = NULL;
260				intval = za.za_first_integer;
261			}
262
263			spa_prop_add_list(*nvp, prop, strval, intval, src);
264
265			if (strval != NULL)
266				kmem_free(strval,
267				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
268
269			break;
270
271		case 1:
272			/* string property */
273			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
274			err = zap_lookup(mos, spa->spa_pool_props_object,
275			    za.za_name, 1, za.za_num_integers, strval);
276			if (err) {
277				kmem_free(strval, za.za_num_integers);
278				break;
279			}
280			spa_prop_add_list(*nvp, prop, strval, 0, src);
281			kmem_free(strval, za.za_num_integers);
282			break;
283
284		default:
285			break;
286		}
287	}
288	zap_cursor_fini(&zc);
289	mutex_exit(&spa->spa_props_lock);
290out:
291	if (err && err != ENOENT) {
292		nvlist_free(*nvp);
293		*nvp = NULL;
294		return (err);
295	}
296
297	return (0);
298}
299
300/*
301 * Validate the given pool properties nvlist and modify the list
302 * for the property values to be set.
303 */
304static int
305spa_prop_validate(spa_t *spa, nvlist_t *props)
306{
307	nvpair_t *elem;
308	int error = 0, reset_bootfs = 0;
309	uint64_t objnum;
310
311	elem = NULL;
312	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
313		zpool_prop_t prop;
314		char *propname, *strval;
315		uint64_t intval;
316		objset_t *os;
317		char *slash;
318
319		propname = nvpair_name(elem);
320
321		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL)
322			return (EINVAL);
323
324		switch (prop) {
325		case ZPOOL_PROP_VERSION:
326			error = nvpair_value_uint64(elem, &intval);
327			if (!error &&
328			    (intval < spa_version(spa) || intval > SPA_VERSION))
329				error = EINVAL;
330			break;
331
332		case ZPOOL_PROP_DELEGATION:
333		case ZPOOL_PROP_AUTOREPLACE:
334		case ZPOOL_PROP_LISTSNAPS:
335			error = nvpair_value_uint64(elem, &intval);
336			if (!error && intval > 1)
337				error = EINVAL;
338			break;
339
340		case ZPOOL_PROP_BOOTFS:
341			/*
342			 * If the pool version is less than SPA_VERSION_BOOTFS,
343			 * or the pool is still being created (version == 0),
344			 * the bootfs property cannot be set.
345			 */
346			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
347				error = ENOTSUP;
348				break;
349			}
350
351			/*
352			 * Make sure the vdev config is bootable
353			 */
354			if (!vdev_is_bootable(spa->spa_root_vdev)) {
355				error = ENOTSUP;
356				break;
357			}
358
359			reset_bootfs = 1;
360
361			error = nvpair_value_string(elem, &strval);
362
363			if (!error) {
364				uint64_t compress;
365
366				if (strval == NULL || strval[0] == '\0') {
367					objnum = zpool_prop_default_numeric(
368					    ZPOOL_PROP_BOOTFS);
369					break;
370				}
371
372				if (error = dmu_objset_open(strval, DMU_OST_ZFS,
373				    DS_MODE_USER | DS_MODE_READONLY, &os))
374					break;
375
376				/* We don't support gzip bootable datasets */
377				if ((error = dsl_prop_get_integer(strval,
378				    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
379				    &compress, NULL)) == 0 &&
380				    !BOOTFS_COMPRESS_VALID(compress)) {
381					error = ENOTSUP;
382				} else {
383					objnum = dmu_objset_id(os);
384				}
385				dmu_objset_close(os);
386			}
387			break;
388
389		case ZPOOL_PROP_FAILUREMODE:
390			error = nvpair_value_uint64(elem, &intval);
391			if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
392			    intval > ZIO_FAILURE_MODE_PANIC))
393				error = EINVAL;
394
395			/*
396			 * This is a special case which only occurs when
397			 * the pool has completely failed. This allows
398			 * the user to change the in-core failmode property
399			 * without syncing it out to disk (I/Os might
400			 * currently be blocked). We do this by returning
401			 * EIO to the caller (spa_prop_set) to trick it
402			 * into thinking we encountered a property validation
403			 * error.
404			 */
405			if (!error && spa_suspended(spa)) {
406				spa->spa_failmode = intval;
407				error = EIO;
408			}
409			break;
410
411		case ZPOOL_PROP_CACHEFILE:
412			if ((error = nvpair_value_string(elem, &strval)) != 0)
413				break;
414
415			if (strval[0] == '\0')
416				break;
417
418			if (strcmp(strval, "none") == 0)
419				break;
420
421			if (strval[0] != '/') {
422				error = EINVAL;
423				break;
424			}
425
426			slash = strrchr(strval, '/');
427			ASSERT(slash != NULL);
428
429			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
430			    strcmp(slash, "/..") == 0)
431				error = EINVAL;
432			break;
433		}
434
435		if (error)
436			break;
437	}
438
439	if (!error && reset_bootfs) {
440		error = nvlist_remove(props,
441		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
442
443		if (!error) {
444			error = nvlist_add_uint64(props,
445			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
446		}
447	}
448
449	return (error);
450}
451
452void
453spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
454{
455	char *cachefile;
456	spa_config_dirent_t *dp;
457
458	if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
459	    &cachefile) != 0)
460		return;
461
462	dp = kmem_alloc(sizeof (spa_config_dirent_t),
463	    KM_SLEEP);
464
465	if (cachefile[0] == '\0')
466		dp->scd_path = spa_strdup(spa_config_path);
467	else if (strcmp(cachefile, "none") == 0)
468		dp->scd_path = NULL;
469	else
470		dp->scd_path = spa_strdup(cachefile);
471
472	list_insert_head(&spa->spa_config_list, dp);
473	if (need_sync)
474		spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
475}
476
477int
478spa_prop_set(spa_t *spa, nvlist_t *nvp)
479{
480	int error;
481	nvpair_t *elem;
482	boolean_t need_sync = B_FALSE;
483	zpool_prop_t prop;
484
485	if ((error = spa_prop_validate(spa, nvp)) != 0)
486		return (error);
487
488	elem = NULL;
489	while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
490		if ((prop = zpool_name_to_prop(
491		    nvpair_name(elem))) == ZPROP_INVAL)
492			return (EINVAL);
493
494		if (prop == ZPOOL_PROP_CACHEFILE || prop == ZPOOL_PROP_ALTROOT)
495			continue;
496
497		need_sync = B_TRUE;
498		break;
499	}
500
501	if (need_sync)
502		return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
503		    spa, nvp, 3));
504	else
505		return (0);
506}
507
508/*
509 * If the bootfs property value is dsobj, clear it.
510 */
511void
512spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
513{
514	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
515		VERIFY(zap_remove(spa->spa_meta_objset,
516		    spa->spa_pool_props_object,
517		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
518		spa->spa_bootfs = 0;
519	}
520}
521
522/*
523 * ==========================================================================
524 * SPA state manipulation (open/create/destroy/import/export)
525 * ==========================================================================
526 */
527
528static int
529spa_error_entry_compare(const void *a, const void *b)
530{
531	spa_error_entry_t *sa = (spa_error_entry_t *)a;
532	spa_error_entry_t *sb = (spa_error_entry_t *)b;
533	int ret;
534
535	ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
536	    sizeof (zbookmark_t));
537
538	if (ret < 0)
539		return (-1);
540	else if (ret > 0)
541		return (1);
542	else
543		return (0);
544}
545
546/*
547 * Utility function which retrieves copies of the current logs and
548 * re-initializes them in the process.
549 */
550void
551spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
552{
553	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
554
555	bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
556	bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
557
558	avl_create(&spa->spa_errlist_scrub,
559	    spa_error_entry_compare, sizeof (spa_error_entry_t),
560	    offsetof(spa_error_entry_t, se_avl));
561	avl_create(&spa->spa_errlist_last,
562	    spa_error_entry_compare, sizeof (spa_error_entry_t),
563	    offsetof(spa_error_entry_t, se_avl));
564}
565
566/*
567 * Activate an uninitialized pool.
568 */
569static void
570spa_activate(spa_t *spa, int mode)
571{
572	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
573
574	spa->spa_state = POOL_STATE_ACTIVE;
575	spa->spa_mode = mode;
576
577	spa->spa_normal_class = metaslab_class_create(zfs_metaslab_ops);
578	spa->spa_log_class = metaslab_class_create(zfs_metaslab_ops);
579
580	for (int t = 0; t < ZIO_TYPES; t++) {
581		const zio_taskq_info_t *ztip = &zio_taskqs[t];
582		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
583			enum zti_modes mode = ztip->zti_nthreads[q].zti_mode;
584			uint_t value = ztip->zti_nthreads[q].zti_value;
585			char name[32];
586
587			(void) snprintf(name, sizeof (name),
588			    "%s_%s", ztip->zti_name, zio_taskq_types[q]);
589
590			if (mode == zti_mode_tune) {
591				mode = zio_taskq_tune_mode;
592				value = zio_taskq_tune_value;
593				if (mode == zti_mode_tune)
594					mode = zti_mode_online_percent;
595			}
596
597			switch (mode) {
598			case zti_mode_fixed:
599				ASSERT3U(value, >=, 1);
600				value = MAX(value, 1);
601
602				spa->spa_zio_taskq[t][q] = taskq_create(name,
603				    value, maxclsyspri, 50, INT_MAX,
604				    TASKQ_PREPOPULATE);
605				break;
606
607			case zti_mode_online_percent:
608				spa->spa_zio_taskq[t][q] = taskq_create(name,
609				    value, maxclsyspri, 50, INT_MAX,
610				    TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
611				break;
612
613			case zti_mode_tune:
614			default:
615				panic("unrecognized mode for "
616				    "zio_taskqs[%u]->zti_nthreads[%u] (%u:%u) "
617				    "in spa_activate()",
618				    t, q, mode, value);
619				break;
620			}
621		}
622	}
623
624	list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
625	    offsetof(vdev_t, vdev_config_dirty_node));
626	list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
627	    offsetof(vdev_t, vdev_state_dirty_node));
628
629	txg_list_create(&spa->spa_vdev_txg_list,
630	    offsetof(struct vdev, vdev_txg_node));
631
632	avl_create(&spa->spa_errlist_scrub,
633	    spa_error_entry_compare, sizeof (spa_error_entry_t),
634	    offsetof(spa_error_entry_t, se_avl));
635	avl_create(&spa->spa_errlist_last,
636	    spa_error_entry_compare, sizeof (spa_error_entry_t),
637	    offsetof(spa_error_entry_t, se_avl));
638}
639
640/*
641 * Opposite of spa_activate().
642 */
643static void
644spa_deactivate(spa_t *spa)
645{
646	ASSERT(spa->spa_sync_on == B_FALSE);
647	ASSERT(spa->spa_dsl_pool == NULL);
648	ASSERT(spa->spa_root_vdev == NULL);
649	ASSERT(spa->spa_async_zio_root == NULL);
650	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
651
652	txg_list_destroy(&spa->spa_vdev_txg_list);
653
654	list_destroy(&spa->spa_config_dirty_list);
655	list_destroy(&spa->spa_state_dirty_list);
656
657	for (int t = 0; t < ZIO_TYPES; t++) {
658		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
659			taskq_destroy(spa->spa_zio_taskq[t][q]);
660			spa->spa_zio_taskq[t][q] = NULL;
661		}
662	}
663
664	metaslab_class_destroy(spa->spa_normal_class);
665	spa->spa_normal_class = NULL;
666
667	metaslab_class_destroy(spa->spa_log_class);
668	spa->spa_log_class = NULL;
669
670	/*
671	 * If this was part of an import or the open otherwise failed, we may
672	 * still have errors left in the queues.  Empty them just in case.
673	 */
674	spa_errlog_drain(spa);
675
676	avl_destroy(&spa->spa_errlist_scrub);
677	avl_destroy(&spa->spa_errlist_last);
678
679	spa->spa_state = POOL_STATE_UNINITIALIZED;
680}
681
682/*
683 * Verify a pool configuration, and construct the vdev tree appropriately.  This
684 * will create all the necessary vdevs in the appropriate layout, with each vdev
685 * in the CLOSED state.  This will prep the pool before open/creation/import.
686 * All vdev validation is done by the vdev_alloc() routine.
687 */
688static int
689spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
690    uint_t id, int atype)
691{
692	nvlist_t **child;
693	uint_t c, children;
694	int error;
695
696	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
697		return (error);
698
699	if ((*vdp)->vdev_ops->vdev_op_leaf)
700		return (0);
701
702	error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
703	    &child, &children);
704
705	if (error == ENOENT)
706		return (0);
707
708	if (error) {
709		vdev_free(*vdp);
710		*vdp = NULL;
711		return (EINVAL);
712	}
713
714	for (c = 0; c < children; c++) {
715		vdev_t *vd;
716		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
717		    atype)) != 0) {
718			vdev_free(*vdp);
719			*vdp = NULL;
720			return (error);
721		}
722	}
723
724	ASSERT(*vdp != NULL);
725
726	return (0);
727}
728
729/*
730 * Opposite of spa_load().
731 */
732static void
733spa_unload(spa_t *spa)
734{
735	int i;
736
737	ASSERT(MUTEX_HELD(&spa_namespace_lock));
738
739	/*
740	 * Stop async tasks.
741	 */
742	spa_async_suspend(spa);
743
744	/*
745	 * Stop syncing.
746	 */
747	if (spa->spa_sync_on) {
748		txg_sync_stop(spa->spa_dsl_pool);
749		spa->spa_sync_on = B_FALSE;
750	}
751
752	/*
753	 * Wait for any outstanding async I/O to complete.
754	 */
755	if (spa->spa_async_zio_root != NULL) {
756		(void) zio_wait(spa->spa_async_zio_root);
757		spa->spa_async_zio_root = NULL;
758	}
759
760	/*
761	 * Close the dsl pool.
762	 */
763	if (spa->spa_dsl_pool) {
764		dsl_pool_close(spa->spa_dsl_pool);
765		spa->spa_dsl_pool = NULL;
766	}
767
768	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
769
770	/*
771	 * Drop and purge level 2 cache
772	 */
773	spa_l2cache_drop(spa);
774
775	/*
776	 * Close all vdevs.
777	 */
778	if (spa->spa_root_vdev)
779		vdev_free(spa->spa_root_vdev);
780	ASSERT(spa->spa_root_vdev == NULL);
781
782	for (i = 0; i < spa->spa_spares.sav_count; i++)
783		vdev_free(spa->spa_spares.sav_vdevs[i]);
784	if (spa->spa_spares.sav_vdevs) {
785		kmem_free(spa->spa_spares.sav_vdevs,
786		    spa->spa_spares.sav_count * sizeof (void *));
787		spa->spa_spares.sav_vdevs = NULL;
788	}
789	if (spa->spa_spares.sav_config) {
790		nvlist_free(spa->spa_spares.sav_config);
791		spa->spa_spares.sav_config = NULL;
792	}
793	spa->spa_spares.sav_count = 0;
794
795	for (i = 0; i < spa->spa_l2cache.sav_count; i++)
796		vdev_free(spa->spa_l2cache.sav_vdevs[i]);
797	if (spa->spa_l2cache.sav_vdevs) {
798		kmem_free(spa->spa_l2cache.sav_vdevs,
799		    spa->spa_l2cache.sav_count * sizeof (void *));
800		spa->spa_l2cache.sav_vdevs = NULL;
801	}
802	if (spa->spa_l2cache.sav_config) {
803		nvlist_free(spa->spa_l2cache.sav_config);
804		spa->spa_l2cache.sav_config = NULL;
805	}
806	spa->spa_l2cache.sav_count = 0;
807
808	spa->spa_async_suspended = 0;
809
810	spa_config_exit(spa, SCL_ALL, FTAG);
811}
812
813/*
814 * Load (or re-load) the current list of vdevs describing the active spares for
815 * this pool.  When this is called, we have some form of basic information in
816 * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
817 * then re-generate a more complete list including status information.
818 */
819static void
820spa_load_spares(spa_t *spa)
821{
822	nvlist_t **spares;
823	uint_t nspares;
824	int i;
825	vdev_t *vd, *tvd;
826
827	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
828
829	/*
830	 * First, close and free any existing spare vdevs.
831	 */
832	for (i = 0; i < spa->spa_spares.sav_count; i++) {
833		vd = spa->spa_spares.sav_vdevs[i];
834
835		/* Undo the call to spa_activate() below */
836		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
837		    B_FALSE)) != NULL && tvd->vdev_isspare)
838			spa_spare_remove(tvd);
839		vdev_close(vd);
840		vdev_free(vd);
841	}
842
843	if (spa->spa_spares.sav_vdevs)
844		kmem_free(spa->spa_spares.sav_vdevs,
845		    spa->spa_spares.sav_count * sizeof (void *));
846
847	if (spa->spa_spares.sav_config == NULL)
848		nspares = 0;
849	else
850		VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
851		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
852
853	spa->spa_spares.sav_count = (int)nspares;
854	spa->spa_spares.sav_vdevs = NULL;
855
856	if (nspares == 0)
857		return;
858
859	/*
860	 * Construct the array of vdevs, opening them to get status in the
861	 * process.   For each spare, there is potentially two different vdev_t
862	 * structures associated with it: one in the list of spares (used only
863	 * for basic validation purposes) and one in the active vdev
864	 * configuration (if it's spared in).  During this phase we open and
865	 * validate each vdev on the spare list.  If the vdev also exists in the
866	 * active configuration, then we also mark this vdev as an active spare.
867	 */
868	spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
869	    KM_SLEEP);
870	for (i = 0; i < spa->spa_spares.sav_count; i++) {
871		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
872		    VDEV_ALLOC_SPARE) == 0);
873		ASSERT(vd != NULL);
874
875		spa->spa_spares.sav_vdevs[i] = vd;
876
877		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
878		    B_FALSE)) != NULL) {
879			if (!tvd->vdev_isspare)
880				spa_spare_add(tvd);
881
882			/*
883			 * We only mark the spare active if we were successfully
884			 * able to load the vdev.  Otherwise, importing a pool
885			 * with a bad active spare would result in strange
886			 * behavior, because multiple pool would think the spare
887			 * is actively in use.
888			 *
889			 * There is a vulnerability here to an equally bizarre
890			 * circumstance, where a dead active spare is later
891			 * brought back to life (onlined or otherwise).  Given
892			 * the rarity of this scenario, and the extra complexity
893			 * it adds, we ignore the possibility.
894			 */
895			if (!vdev_is_dead(tvd))
896				spa_spare_activate(tvd);
897		}
898
899		vd->vdev_top = vd;
900		vd->vdev_aux = &spa->spa_spares;
901
902		if (vdev_open(vd) != 0)
903			continue;
904
905		if (vdev_validate_aux(vd) == 0)
906			spa_spare_add(vd);
907	}
908
909	/*
910	 * Recompute the stashed list of spares, with status information
911	 * this time.
912	 */
913	VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
914	    DATA_TYPE_NVLIST_ARRAY) == 0);
915
916	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
917	    KM_SLEEP);
918	for (i = 0; i < spa->spa_spares.sav_count; i++)
919		spares[i] = vdev_config_generate(spa,
920		    spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE);
921	VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
922	    ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
923	for (i = 0; i < spa->spa_spares.sav_count; i++)
924		nvlist_free(spares[i]);
925	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
926}
927
928/*
929 * Load (or re-load) the current list of vdevs describing the active l2cache for
930 * this pool.  When this is called, we have some form of basic information in
931 * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
932 * then re-generate a more complete list including status information.
933 * Devices which are already active have their details maintained, and are
934 * not re-opened.
935 */
936static void
937spa_load_l2cache(spa_t *spa)
938{
939	nvlist_t **l2cache;
940	uint_t nl2cache;
941	int i, j, oldnvdevs;
942	uint64_t guid, size;
943	vdev_t *vd, **oldvdevs, **newvdevs;
944	spa_aux_vdev_t *sav = &spa->spa_l2cache;
945
946	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
947
948	if (sav->sav_config != NULL) {
949		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
950		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
951		newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
952	} else {
953		nl2cache = 0;
954	}
955
956	oldvdevs = sav->sav_vdevs;
957	oldnvdevs = sav->sav_count;
958	sav->sav_vdevs = NULL;
959	sav->sav_count = 0;
960
961	/*
962	 * Process new nvlist of vdevs.
963	 */
964	for (i = 0; i < nl2cache; i++) {
965		VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
966		    &guid) == 0);
967
968		newvdevs[i] = NULL;
969		for (j = 0; j < oldnvdevs; j++) {
970			vd = oldvdevs[j];
971			if (vd != NULL && guid == vd->vdev_guid) {
972				/*
973				 * Retain previous vdev for add/remove ops.
974				 */
975				newvdevs[i] = vd;
976				oldvdevs[j] = NULL;
977				break;
978			}
979		}
980
981		if (newvdevs[i] == NULL) {
982			/*
983			 * Create new vdev
984			 */
985			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
986			    VDEV_ALLOC_L2CACHE) == 0);
987			ASSERT(vd != NULL);
988			newvdevs[i] = vd;
989
990			/*
991			 * Commit this vdev as an l2cache device,
992			 * even if it fails to open.
993			 */
994			spa_l2cache_add(vd);
995
996			vd->vdev_top = vd;
997			vd->vdev_aux = sav;
998
999			spa_l2cache_activate(vd);
1000
1001			if (vdev_open(vd) != 0)
1002				continue;
1003
1004			(void) vdev_validate_aux(vd);
1005
1006			if (!vdev_is_dead(vd)) {
1007				size = vdev_get_rsize(vd);
1008				l2arc_add_vdev(spa, vd,
1009				    VDEV_LABEL_START_SIZE,
1010				    size - VDEV_LABEL_START_SIZE);
1011			}
1012		}
1013	}
1014
1015	/*
1016	 * Purge vdevs that were dropped
1017	 */
1018	for (i = 0; i < oldnvdevs; i++) {
1019		uint64_t pool;
1020
1021		vd = oldvdevs[i];
1022		if (vd != NULL) {
1023			if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1024			    pool != 0ULL && l2arc_vdev_present(vd))
1025				l2arc_remove_vdev(vd);
1026			(void) vdev_close(vd);
1027			spa_l2cache_remove(vd);
1028		}
1029	}
1030
1031	if (oldvdevs)
1032		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
1033
1034	if (sav->sav_config == NULL)
1035		goto out;
1036
1037	sav->sav_vdevs = newvdevs;
1038	sav->sav_count = (int)nl2cache;
1039
1040	/*
1041	 * Recompute the stashed list of l2cache devices, with status
1042	 * information this time.
1043	 */
1044	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
1045	    DATA_TYPE_NVLIST_ARRAY) == 0);
1046
1047	l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
1048	for (i = 0; i < sav->sav_count; i++)
1049		l2cache[i] = vdev_config_generate(spa,
1050		    sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE);
1051	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1052	    ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
1053out:
1054	for (i = 0; i < sav->sav_count; i++)
1055		nvlist_free(l2cache[i]);
1056	if (sav->sav_count)
1057		kmem_free(l2cache, sav->sav_count * sizeof (void *));
1058}
1059
1060static int
1061load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
1062{
1063	dmu_buf_t *db;
1064	char *packed = NULL;
1065	size_t nvsize = 0;
1066	int error;
1067	*value = NULL;
1068
1069	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
1070	nvsize = *(uint64_t *)db->db_data;
1071	dmu_buf_rele(db, FTAG);
1072
1073	packed = kmem_alloc(nvsize, KM_SLEEP);
1074	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
1075	    DMU_READ_PREFETCH);
1076	if (error == 0)
1077		error = nvlist_unpack(packed, nvsize, value, 0);
1078	kmem_free(packed, nvsize);
1079
1080	return (error);
1081}
1082
1083/*
1084 * Checks to see if the given vdev could not be opened, in which case we post a
1085 * sysevent to notify the autoreplace code that the device has been removed.
1086 */
1087static void
1088spa_check_removed(vdev_t *vd)
1089{
1090	int c;
1091
1092	for (c = 0; c < vd->vdev_children; c++)
1093		spa_check_removed(vd->vdev_child[c]);
1094
1095	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
1096		zfs_post_autoreplace(vd->vdev_spa, vd);
1097		spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
1098	}
1099}
1100
1101/*
1102 * Load the slog device state from the config object since it's possible
1103 * that the label does not contain the most up-to-date information.
1104 */
1105void
1106spa_load_log_state(spa_t *spa)
1107{
1108	nvlist_t *nv, *nvroot, **child;
1109	uint64_t is_log;
1110	uint_t children, c;
1111	vdev_t *rvd = spa->spa_root_vdev;
1112
1113	VERIFY(load_nvlist(spa, spa->spa_config_object, &nv) == 0);
1114	VERIFY(nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1115	VERIFY(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1116	    &child, &children) == 0);
1117
1118	for (c = 0; c < children; c++) {
1119		vdev_t *tvd = rvd->vdev_child[c];
1120
1121		if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
1122		    &is_log) == 0 && is_log)
1123			vdev_load_log_state(tvd, child[c]);
1124	}
1125	nvlist_free(nv);
1126}
1127
1128/*
1129 * Check for missing log devices
1130 */
1131int
1132spa_check_logs(spa_t *spa)
1133{
1134	switch (spa->spa_log_state) {
1135	case SPA_LOG_MISSING:
1136		/* need to recheck in case slog has been restored */
1137	case SPA_LOG_UNKNOWN:
1138		if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL,
1139		    DS_FIND_CHILDREN)) {
1140			spa->spa_log_state = SPA_LOG_MISSING;
1141			return (1);
1142		}
1143		break;
1144	}
1145	return (0);
1146}
1147
1148/*
1149 * Load an existing storage pool, using the pool's builtin spa_config as a
1150 * source of configuration information.
1151 */
1152static int
1153spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig)
1154{
1155	int error = 0;
1156	nvlist_t *nvroot = NULL;
1157	vdev_t *rvd;
1158	uberblock_t *ub = &spa->spa_uberblock;
1159	uint64_t config_cache_txg = spa->spa_config_txg;
1160	uint64_t pool_guid;
1161	uint64_t version;
1162	uint64_t autoreplace = 0;
1163	int orig_mode = spa->spa_mode;
1164	char *ereport = FM_EREPORT_ZFS_POOL;
1165
1166	/*
1167	 * If this is an untrusted config, access the pool in read-only mode.
1168	 * This prevents things like resilvering recently removed devices.
1169	 */
1170	if (!mosconfig)
1171		spa->spa_mode = FREAD;
1172
1173	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1174
1175	spa->spa_load_state = state;
1176
1177	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
1178	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
1179		error = EINVAL;
1180		goto out;
1181	}
1182
1183	/*
1184	 * Versioning wasn't explicitly added to the label until later, so if
1185	 * it's not present treat it as the initial version.
1186	 */
1187	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
1188		version = SPA_VERSION_INITIAL;
1189
1190	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
1191	    &spa->spa_config_txg);
1192
1193	if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
1194	    spa_guid_exists(pool_guid, 0)) {
1195		error = EEXIST;
1196		goto out;
1197	}
1198
1199	spa->spa_load_guid = pool_guid;
1200
1201	/*
1202	 * Create "The Godfather" zio to hold all async IOs
1203	 */
1204	spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
1205	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
1206
1207	/*
1208	 * Parse the configuration into a vdev tree.  We explicitly set the
1209	 * value that will be returned by spa_version() since parsing the
1210	 * configuration requires knowing the version number.
1211	 */
1212	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1213	spa->spa_ubsync.ub_version = version;
1214	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
1215	spa_config_exit(spa, SCL_ALL, FTAG);
1216
1217	if (error != 0)
1218		goto out;
1219
1220	ASSERT(spa->spa_root_vdev == rvd);
1221	ASSERT(spa_guid(spa) == pool_guid);
1222
1223	/*
1224	 * Try to open all vdevs, loading each label in the process.
1225	 */
1226	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1227	error = vdev_open(rvd);
1228	spa_config_exit(spa, SCL_ALL, FTAG);
1229	if (error != 0)
1230		goto out;
1231
1232	/*
1233	 * We need to validate the vdev labels against the configuration that
1234	 * we have in hand, which is dependent on the setting of mosconfig. If
1235	 * mosconfig is true then we're validating the vdev labels based on
1236	 * that config. Otherwise, we're validating against the cached config
1237	 * (zpool.cache) that was read when we loaded the zfs module, and then
1238	 * later we will recursively call spa_load() and validate against
1239	 * the vdev config.
1240	 */
1241	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1242	error = vdev_validate(rvd);
1243	spa_config_exit(spa, SCL_ALL, FTAG);
1244	if (error != 0)
1245		goto out;
1246
1247	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1248		error = ENXIO;
1249		goto out;
1250	}
1251
1252	/*
1253	 * Find the best uberblock.
1254	 */
1255	vdev_uberblock_load(NULL, rvd, ub);
1256
1257	/*
1258	 * If we weren't able to find a single valid uberblock, return failure.
1259	 */
1260	if (ub->ub_txg == 0) {
1261		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1262		    VDEV_AUX_CORRUPT_DATA);
1263		error = ENXIO;
1264		goto out;
1265	}
1266
1267	/*
1268	 * If the pool is newer than the code, we can't open it.
1269	 */
1270	if (ub->ub_version > SPA_VERSION) {
1271		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1272		    VDEV_AUX_VERSION_NEWER);
1273		error = ENOTSUP;
1274		goto out;
1275	}
1276
1277	/*
1278	 * If the vdev guid sum doesn't match the uberblock, we have an
1279	 * incomplete configuration.
1280	 */
1281	if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
1282		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1283		    VDEV_AUX_BAD_GUID_SUM);
1284		error = ENXIO;
1285		goto out;
1286	}
1287
1288	/*
1289	 * Initialize internal SPA structures.
1290	 */
1291	spa->spa_state = POOL_STATE_ACTIVE;
1292	spa->spa_ubsync = spa->spa_uberblock;
1293	spa->spa_first_txg = spa_last_synced_txg(spa) + 1;
1294	error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
1295	if (error) {
1296		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1297		    VDEV_AUX_CORRUPT_DATA);
1298		goto out;
1299	}
1300	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
1301
1302	if (zap_lookup(spa->spa_meta_objset,
1303	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1304	    sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
1305		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1306		    VDEV_AUX_CORRUPT_DATA);
1307		error = EIO;
1308		goto out;
1309	}
1310
1311	if (!mosconfig) {
1312		nvlist_t *newconfig;
1313		uint64_t hostid;
1314
1315		if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) {
1316			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1317			    VDEV_AUX_CORRUPT_DATA);
1318			error = EIO;
1319			goto out;
1320		}
1321
1322		if (!spa_is_root(spa) && nvlist_lookup_uint64(newconfig,
1323		    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
1324			char *hostname;
1325			unsigned long myhostid = 0;
1326
1327			VERIFY(nvlist_lookup_string(newconfig,
1328			    ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
1329
1330#ifdef	_KERNEL
1331			myhostid = zone_get_hostid(NULL);
1332#else	/* _KERNEL */
1333			/*
1334			 * We're emulating the system's hostid in userland, so
1335			 * we can't use zone_get_hostid().
1336			 */
1337			(void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
1338#endif	/* _KERNEL */
1339			if (hostid != 0 && myhostid != 0 &&
1340			    hostid != myhostid) {
1341				cmn_err(CE_WARN, "pool '%s' could not be "
1342				    "loaded as it was last accessed by "
1343				    "another system (host: %s hostid: 0x%lx). "
1344				    "See: http://www.sun.com/msg/ZFS-8000-EY",
1345				    spa_name(spa), hostname,
1346				    (unsigned long)hostid);
1347				error = EBADF;
1348				goto out;
1349			}
1350		}
1351
1352		spa_config_set(spa, newconfig);
1353		spa_unload(spa);
1354		spa_deactivate(spa);
1355		spa_activate(spa, orig_mode);
1356
1357		return (spa_load(spa, newconfig, state, B_TRUE));
1358	}
1359
1360	if (zap_lookup(spa->spa_meta_objset,
1361	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1362	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) {
1363		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1364		    VDEV_AUX_CORRUPT_DATA);
1365		error = EIO;
1366		goto out;
1367	}
1368
1369	/*
1370	 * Load the bit that tells us to use the new accounting function
1371	 * (raid-z deflation).  If we have an older pool, this will not
1372	 * be present.
1373	 */
1374	error = zap_lookup(spa->spa_meta_objset,
1375	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
1376	    sizeof (uint64_t), 1, &spa->spa_deflate);
1377	if (error != 0 && error != ENOENT) {
1378		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1379		    VDEV_AUX_CORRUPT_DATA);
1380		error = EIO;
1381		goto out;
1382	}
1383
1384	/*
1385	 * Load the persistent error log.  If we have an older pool, this will
1386	 * not be present.
1387	 */
1388	error = zap_lookup(spa->spa_meta_objset,
1389	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
1390	    sizeof (uint64_t), 1, &spa->spa_errlog_last);
1391	if (error != 0 && error != ENOENT) {
1392		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1393		    VDEV_AUX_CORRUPT_DATA);
1394		error = EIO;
1395		goto out;
1396	}
1397
1398	error = zap_lookup(spa->spa_meta_objset,
1399	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
1400	    sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
1401	if (error != 0 && error != ENOENT) {
1402		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1403		    VDEV_AUX_CORRUPT_DATA);
1404		error = EIO;
1405		goto out;
1406	}
1407
1408	/*
1409	 * Load the history object.  If we have an older pool, this
1410	 * will not be present.
1411	 */
1412	error = zap_lookup(spa->spa_meta_objset,
1413	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
1414	    sizeof (uint64_t), 1, &spa->spa_history);
1415	if (error != 0 && error != ENOENT) {
1416		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1417		    VDEV_AUX_CORRUPT_DATA);
1418		error = EIO;
1419		goto out;
1420	}
1421
1422	/*
1423	 * Load any hot spares for this pool.
1424	 */
1425	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1426	    DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object);
1427	if (error != 0 && error != ENOENT) {
1428		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1429		    VDEV_AUX_CORRUPT_DATA);
1430		error = EIO;
1431		goto out;
1432	}
1433	if (error == 0) {
1434		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
1435		if (load_nvlist(spa, spa->spa_spares.sav_object,
1436		    &spa->spa_spares.sav_config) != 0) {
1437			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1438			    VDEV_AUX_CORRUPT_DATA);
1439			error = EIO;
1440			goto out;
1441		}
1442
1443		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1444		spa_load_spares(spa);
1445		spa_config_exit(spa, SCL_ALL, FTAG);
1446	}
1447
1448	/*
1449	 * Load any level 2 ARC devices for this pool.
1450	 */
1451	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1452	    DMU_POOL_L2CACHE, sizeof (uint64_t), 1,
1453	    &spa->spa_l2cache.sav_object);
1454	if (error != 0 && error != ENOENT) {
1455		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1456		    VDEV_AUX_CORRUPT_DATA);
1457		error = EIO;
1458		goto out;
1459	}
1460	if (error == 0) {
1461		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
1462		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
1463		    &spa->spa_l2cache.sav_config) != 0) {
1464			vdev_set_state(rvd, B_TRUE,
1465			    VDEV_STATE_CANT_OPEN,
1466			    VDEV_AUX_CORRUPT_DATA);
1467			error = EIO;
1468			goto out;
1469		}
1470
1471		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1472		spa_load_l2cache(spa);
1473		spa_config_exit(spa, SCL_ALL, FTAG);
1474	}
1475
1476	spa_load_log_state(spa);
1477
1478	if (spa_check_logs(spa)) {
1479		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1480		    VDEV_AUX_BAD_LOG);
1481		error = ENXIO;
1482		ereport = FM_EREPORT_ZFS_LOG_REPLAY;
1483		goto out;
1484	}
1485
1486
1487	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
1488
1489	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1490	    DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object);
1491
1492	if (error && error != ENOENT) {
1493		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1494		    VDEV_AUX_CORRUPT_DATA);
1495		error = EIO;
1496		goto out;
1497	}
1498
1499	if (error == 0) {
1500		(void) zap_lookup(spa->spa_meta_objset,
1501		    spa->spa_pool_props_object,
1502		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS),
1503		    sizeof (uint64_t), 1, &spa->spa_bootfs);
1504		(void) zap_lookup(spa->spa_meta_objset,
1505		    spa->spa_pool_props_object,
1506		    zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE),
1507		    sizeof (uint64_t), 1, &autoreplace);
1508		(void) zap_lookup(spa->spa_meta_objset,
1509		    spa->spa_pool_props_object,
1510		    zpool_prop_to_name(ZPOOL_PROP_DELEGATION),
1511		    sizeof (uint64_t), 1, &spa->spa_delegation);
1512		(void) zap_lookup(spa->spa_meta_objset,
1513		    spa->spa_pool_props_object,
1514		    zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
1515		    sizeof (uint64_t), 1, &spa->spa_failmode);
1516	}
1517
1518	/*
1519	 * If the 'autoreplace' property is set, then post a resource notifying
1520	 * the ZFS DE that it should not issue any faults for unopenable
1521	 * devices.  We also iterate over the vdevs, and post a sysevent for any
1522	 * unopenable vdevs so that the normal autoreplace handler can take
1523	 * over.
1524	 */
1525	if (autoreplace && state != SPA_LOAD_TRYIMPORT)
1526		spa_check_removed(spa->spa_root_vdev);
1527
1528	/*
1529	 * Load the vdev state for all toplevel vdevs.
1530	 */
1531	vdev_load(rvd);
1532
1533	/*
1534	 * Propagate the leaf DTLs we just loaded all the way up the tree.
1535	 */
1536	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1537	vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
1538	spa_config_exit(spa, SCL_ALL, FTAG);
1539
1540	/*
1541	 * Check the state of the root vdev.  If it can't be opened, it
1542	 * indicates one or more toplevel vdevs are faulted.
1543	 */
1544	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1545		error = ENXIO;
1546		goto out;
1547	}
1548
1549	if (spa_writeable(spa)) {
1550		dmu_tx_t *tx;
1551		int need_update = B_FALSE;
1552
1553		ASSERT(state != SPA_LOAD_TRYIMPORT);
1554
1555		/*
1556		 * Claim log blocks that haven't been committed yet.
1557		 * This must all happen in a single txg.
1558		 */
1559		tx = dmu_tx_create_assigned(spa_get_dsl(spa),
1560		    spa_first_txg(spa));
1561		(void) dmu_objset_find(spa_name(spa),
1562		    zil_claim, tx, DS_FIND_CHILDREN);
1563		dmu_tx_commit(tx);
1564
1565		spa->spa_log_state = SPA_LOG_GOOD;
1566		spa->spa_sync_on = B_TRUE;
1567		txg_sync_start(spa->spa_dsl_pool);
1568
1569		/*
1570		 * Wait for all claims to sync.
1571		 */
1572		txg_wait_synced(spa->spa_dsl_pool, 0);
1573
1574		/*
1575		 * If the config cache is stale, or we have uninitialized
1576		 * metaslabs (see spa_vdev_add()), then update the config.
1577		 */
1578		if (config_cache_txg != spa->spa_config_txg ||
1579		    state == SPA_LOAD_IMPORT)
1580			need_update = B_TRUE;
1581
1582		for (int c = 0; c < rvd->vdev_children; c++)
1583			if (rvd->vdev_child[c]->vdev_ms_array == 0)
1584				need_update = B_TRUE;
1585
1586		/*
1587		 * Update the config cache asychronously in case we're the
1588		 * root pool, in which case the config cache isn't writable yet.
1589		 */
1590		if (need_update)
1591			spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
1592
1593		/*
1594		 * Check all DTLs to see if anything needs resilvering.
1595		 */
1596		if (vdev_resilver_needed(rvd, NULL, NULL))
1597			spa_async_request(spa, SPA_ASYNC_RESILVER);
1598	}
1599
1600	error = 0;
1601out:
1602	spa->spa_minref = refcount_count(&spa->spa_refcount);
1603	if (error && error != EBADF)
1604		zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
1605	spa->spa_load_state = SPA_LOAD_NONE;
1606	spa->spa_ena = 0;
1607
1608	return (error);
1609}
1610
1611/*
1612 * Pool Open/Import
1613 *
1614 * The import case is identical to an open except that the configuration is sent
1615 * down from userland, instead of grabbed from the configuration cache.  For the
1616 * case of an open, the pool configuration will exist in the
1617 * POOL_STATE_UNINITIALIZED state.
1618 *
1619 * The stats information (gen/count/ustats) is used to gather vdev statistics at
1620 * the same time open the pool, without having to keep around the spa_t in some
1621 * ambiguous state.
1622 */
1623static int
1624spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config)
1625{
1626	spa_t *spa;
1627	int error;
1628	int locked = B_FALSE;
1629
1630	*spapp = NULL;
1631
1632	/*
1633	 * As disgusting as this is, we need to support recursive calls to this
1634	 * function because dsl_dir_open() is called during spa_load(), and ends
1635	 * up calling spa_open() again.  The real fix is to figure out how to
1636	 * avoid dsl_dir_open() calling this in the first place.
1637	 */
1638	if (mutex_owner(&spa_namespace_lock) != curthread) {
1639		mutex_enter(&spa_namespace_lock);
1640		locked = B_TRUE;
1641	}
1642
1643	if ((spa = spa_lookup(pool)) == NULL) {
1644		if (locked)
1645			mutex_exit(&spa_namespace_lock);
1646		return (ENOENT);
1647	}
1648	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
1649
1650		spa_activate(spa, spa_mode_global);
1651
1652		error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
1653
1654		if (error == EBADF) {
1655			/*
1656			 * If vdev_validate() returns failure (indicated by
1657			 * EBADF), it indicates that one of the vdevs indicates
1658			 * that the pool has been exported or destroyed.  If
1659			 * this is the case, the config cache is out of sync and
1660			 * we should remove the pool from the namespace.
1661			 */
1662			spa_unload(spa);
1663			spa_deactivate(spa);
1664			spa_config_sync(spa, B_TRUE, B_TRUE);
1665			spa_remove(spa);
1666			if (locked)
1667				mutex_exit(&spa_namespace_lock);
1668			return (ENOENT);
1669		}
1670
1671		if (error) {
1672			/*
1673			 * We can't open the pool, but we still have useful
1674			 * information: the state of each vdev after the
1675			 * attempted vdev_open().  Return this to the user.
1676			 */
1677			if (config != NULL && spa->spa_root_vdev != NULL)
1678				*config = spa_config_generate(spa, NULL, -1ULL,
1679				    B_TRUE);
1680			spa_unload(spa);
1681			spa_deactivate(spa);
1682			spa->spa_last_open_failed = B_TRUE;
1683			if (locked)
1684				mutex_exit(&spa_namespace_lock);
1685			*spapp = NULL;
1686			return (error);
1687		} else {
1688			spa->spa_last_open_failed = B_FALSE;
1689		}
1690	}
1691
1692	spa_open_ref(spa, tag);
1693
1694	if (locked)
1695		mutex_exit(&spa_namespace_lock);
1696
1697	*spapp = spa;
1698
1699	if (config != NULL)
1700		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1701
1702	return (0);
1703}
1704
1705int
1706spa_open(const char *name, spa_t **spapp, void *tag)
1707{
1708	return (spa_open_common(name, spapp, tag, NULL));
1709}
1710
1711/*
1712 * Lookup the given spa_t, incrementing the inject count in the process,
1713 * preventing it from being exported or destroyed.
1714 */
1715spa_t *
1716spa_inject_addref(char *name)
1717{
1718	spa_t *spa;
1719
1720	mutex_enter(&spa_namespace_lock);
1721	if ((spa = spa_lookup(name)) == NULL) {
1722		mutex_exit(&spa_namespace_lock);
1723		return (NULL);
1724	}
1725	spa->spa_inject_ref++;
1726	mutex_exit(&spa_namespace_lock);
1727
1728	return (spa);
1729}
1730
1731void
1732spa_inject_delref(spa_t *spa)
1733{
1734	mutex_enter(&spa_namespace_lock);
1735	spa->spa_inject_ref--;
1736	mutex_exit(&spa_namespace_lock);
1737}
1738
1739/*
1740 * Add spares device information to the nvlist.
1741 */
1742static void
1743spa_add_spares(spa_t *spa, nvlist_t *config)
1744{
1745	nvlist_t **spares;
1746	uint_t i, nspares;
1747	nvlist_t *nvroot;
1748	uint64_t guid;
1749	vdev_stat_t *vs;
1750	uint_t vsc;
1751	uint64_t pool;
1752
1753	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
1754
1755	if (spa->spa_spares.sav_count == 0)
1756		return;
1757
1758	VERIFY(nvlist_lookup_nvlist(config,
1759	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1760	VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1761	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1762	if (nspares != 0) {
1763		VERIFY(nvlist_add_nvlist_array(nvroot,
1764		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
1765		VERIFY(nvlist_lookup_nvlist_array(nvroot,
1766		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1767
1768		/*
1769		 * Go through and find any spares which have since been
1770		 * repurposed as an active spare.  If this is the case, update
1771		 * their status appropriately.
1772		 */
1773		for (i = 0; i < nspares; i++) {
1774			VERIFY(nvlist_lookup_uint64(spares[i],
1775			    ZPOOL_CONFIG_GUID, &guid) == 0);
1776			if (spa_spare_exists(guid, &pool, NULL) &&
1777			    pool != 0ULL) {
1778				VERIFY(nvlist_lookup_uint64_array(
1779				    spares[i], ZPOOL_CONFIG_STATS,
1780				    (uint64_t **)&vs, &vsc) == 0);
1781				vs->vs_state = VDEV_STATE_CANT_OPEN;
1782				vs->vs_aux = VDEV_AUX_SPARED;
1783			}
1784		}
1785	}
1786}
1787
1788/*
1789 * Add l2cache device information to the nvlist, including vdev stats.
1790 */
1791static void
1792spa_add_l2cache(spa_t *spa, nvlist_t *config)
1793{
1794	nvlist_t **l2cache;
1795	uint_t i, j, nl2cache;
1796	nvlist_t *nvroot;
1797	uint64_t guid;
1798	vdev_t *vd;
1799	vdev_stat_t *vs;
1800	uint_t vsc;
1801
1802	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
1803
1804	if (spa->spa_l2cache.sav_count == 0)
1805		return;
1806
1807	VERIFY(nvlist_lookup_nvlist(config,
1808	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1809	VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
1810	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1811	if (nl2cache != 0) {
1812		VERIFY(nvlist_add_nvlist_array(nvroot,
1813		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1814		VERIFY(nvlist_lookup_nvlist_array(nvroot,
1815		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1816
1817		/*
1818		 * Update level 2 cache device stats.
1819		 */
1820
1821		for (i = 0; i < nl2cache; i++) {
1822			VERIFY(nvlist_lookup_uint64(l2cache[i],
1823			    ZPOOL_CONFIG_GUID, &guid) == 0);
1824
1825			vd = NULL;
1826			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
1827				if (guid ==
1828				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
1829					vd = spa->spa_l2cache.sav_vdevs[j];
1830					break;
1831				}
1832			}
1833			ASSERT(vd != NULL);
1834
1835			VERIFY(nvlist_lookup_uint64_array(l2cache[i],
1836			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
1837			vdev_get_stats(vd, vs);
1838		}
1839	}
1840}
1841
1842int
1843spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
1844{
1845	int error;
1846	spa_t *spa;
1847
1848	*config = NULL;
1849	error = spa_open_common(name, &spa, FTAG, config);
1850
1851	if (spa != NULL) {
1852		/*
1853		 * This still leaves a window of inconsistency where the spares
1854		 * or l2cache devices could change and the config would be
1855		 * self-inconsistent.
1856		 */
1857		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
1858
1859		if (*config != NULL) {
1860			VERIFY(nvlist_add_uint64(*config,
1861			    ZPOOL_CONFIG_ERRCOUNT,
1862			    spa_get_errlog_size(spa)) == 0);
1863
1864			if (spa_suspended(spa))
1865				VERIFY(nvlist_add_uint64(*config,
1866				    ZPOOL_CONFIG_SUSPENDED,
1867				    spa->spa_failmode) == 0);
1868
1869			spa_add_spares(spa, *config);
1870			spa_add_l2cache(spa, *config);
1871		}
1872	}
1873
1874	/*
1875	 * We want to get the alternate root even for faulted pools, so we cheat
1876	 * and call spa_lookup() directly.
1877	 */
1878	if (altroot) {
1879		if (spa == NULL) {
1880			mutex_enter(&spa_namespace_lock);
1881			spa = spa_lookup(name);
1882			if (spa)
1883				spa_altroot(spa, altroot, buflen);
1884			else
1885				altroot[0] = '\0';
1886			spa = NULL;
1887			mutex_exit(&spa_namespace_lock);
1888		} else {
1889			spa_altroot(spa, altroot, buflen);
1890		}
1891	}
1892
1893	if (spa != NULL) {
1894		spa_config_exit(spa, SCL_CONFIG, FTAG);
1895		spa_close(spa, FTAG);
1896	}
1897
1898	return (error);
1899}
1900
1901/*
1902 * Validate that the auxiliary device array is well formed.  We must have an
1903 * array of nvlists, each which describes a valid leaf vdev.  If this is an
1904 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
1905 * specified, as long as they are well-formed.
1906 */
1907static int
1908spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
1909    spa_aux_vdev_t *sav, const char *config, uint64_t version,
1910    vdev_labeltype_t label)
1911{
1912	nvlist_t **dev;
1913	uint_t i, ndev;
1914	vdev_t *vd;
1915	int error;
1916
1917	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1918
1919	/*
1920	 * It's acceptable to have no devs specified.
1921	 */
1922	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
1923		return (0);
1924
1925	if (ndev == 0)
1926		return (EINVAL);
1927
1928	/*
1929	 * Make sure the pool is formatted with a version that supports this
1930	 * device type.
1931	 */
1932	if (spa_version(spa) < version)
1933		return (ENOTSUP);
1934
1935	/*
1936	 * Set the pending device list so we correctly handle device in-use
1937	 * checking.
1938	 */
1939	sav->sav_pending = dev;
1940	sav->sav_npending = ndev;
1941
1942	for (i = 0; i < ndev; i++) {
1943		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
1944		    mode)) != 0)
1945			goto out;
1946
1947		if (!vd->vdev_ops->vdev_op_leaf) {
1948			vdev_free(vd);
1949			error = EINVAL;
1950			goto out;
1951		}
1952
1953		/*
1954		 * The L2ARC currently only supports disk devices in
1955		 * kernel context.  For user-level testing, we allow it.
1956		 */
1957#ifdef _KERNEL
1958		if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
1959		    strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
1960			error = ENOTBLK;
1961			goto out;
1962		}
1963#endif
1964		vd->vdev_top = vd;
1965
1966		if ((error = vdev_open(vd)) == 0 &&
1967		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
1968			VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
1969			    vd->vdev_guid) == 0);
1970		}
1971
1972		vdev_free(vd);
1973
1974		if (error &&
1975		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
1976			goto out;
1977		else
1978			error = 0;
1979	}
1980
1981out:
1982	sav->sav_pending = NULL;
1983	sav->sav_npending = 0;
1984	return (error);
1985}
1986
1987static int
1988spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
1989{
1990	int error;
1991
1992	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1993
1994	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1995	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
1996	    VDEV_LABEL_SPARE)) != 0) {
1997		return (error);
1998	}
1999
2000	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
2001	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
2002	    VDEV_LABEL_L2CACHE));
2003}
2004
2005static void
2006spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
2007    const char *config)
2008{
2009	int i;
2010
2011	if (sav->sav_config != NULL) {
2012		nvlist_t **olddevs;
2013		uint_t oldndevs;
2014		nvlist_t **newdevs;
2015
2016		/*
2017		 * Generate new dev list by concatentating with the
2018		 * current dev list.
2019		 */
2020		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
2021		    &olddevs, &oldndevs) == 0);
2022
2023		newdevs = kmem_alloc(sizeof (void *) *
2024		    (ndevs + oldndevs), KM_SLEEP);
2025		for (i = 0; i < oldndevs; i++)
2026			VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
2027			    KM_SLEEP) == 0);
2028		for (i = 0; i < ndevs; i++)
2029			VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
2030			    KM_SLEEP) == 0);
2031
2032		VERIFY(nvlist_remove(sav->sav_config, config,
2033		    DATA_TYPE_NVLIST_ARRAY) == 0);
2034
2035		VERIFY(nvlist_add_nvlist_array(sav->sav_config,
2036		    config, newdevs, ndevs + oldndevs) == 0);
2037		for (i = 0; i < oldndevs + ndevs; i++)
2038			nvlist_free(newdevs[i]);
2039		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
2040	} else {
2041		/*
2042		 * Generate a new dev list.
2043		 */
2044		VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
2045		    KM_SLEEP) == 0);
2046		VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
2047		    devs, ndevs) == 0);
2048	}
2049}
2050
2051/*
2052 * Stop and drop level 2 ARC devices
2053 */
2054void
2055spa_l2cache_drop(spa_t *spa)
2056{
2057	vdev_t *vd;
2058	int i;
2059	spa_aux_vdev_t *sav = &spa->spa_l2cache;
2060
2061	for (i = 0; i < sav->sav_count; i++) {
2062		uint64_t pool;
2063
2064		vd = sav->sav_vdevs[i];
2065		ASSERT(vd != NULL);
2066
2067		if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
2068		    pool != 0ULL && l2arc_vdev_present(vd))
2069			l2arc_remove_vdev(vd);
2070		if (vd->vdev_isl2cache)
2071			spa_l2cache_remove(vd);
2072		vdev_clear_stats(vd);
2073		(void) vdev_close(vd);
2074	}
2075}
2076
2077/*
2078 * Pool Creation
2079 */
2080int
2081spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
2082    const char *history_str, nvlist_t *zplprops)
2083{
2084	spa_t *spa;
2085	char *altroot = NULL;
2086	vdev_t *rvd;
2087	dsl_pool_t *dp;
2088	dmu_tx_t *tx;
2089	int c, error = 0;
2090	uint64_t txg = TXG_INITIAL;
2091	nvlist_t **spares, **l2cache;
2092	uint_t nspares, nl2cache;
2093	uint64_t version;
2094
2095	/*
2096	 * If this pool already exists, return failure.
2097	 */
2098	mutex_enter(&spa_namespace_lock);
2099	if (spa_lookup(pool) != NULL) {
2100		mutex_exit(&spa_namespace_lock);
2101		return (EEXIST);
2102	}
2103
2104	/*
2105	 * Allocate a new spa_t structure.
2106	 */
2107	(void) nvlist_lookup_string(props,
2108	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2109	spa = spa_add(pool, altroot);
2110	spa_activate(spa, spa_mode_global);
2111
2112	spa->spa_uberblock.ub_txg = txg - 1;
2113
2114	if (props && (error = spa_prop_validate(spa, props))) {
2115		spa_deactivate(spa);
2116		spa_remove(spa);
2117		mutex_exit(&spa_namespace_lock);
2118		return (error);
2119	}
2120
2121	if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION),
2122	    &version) != 0)
2123		version = SPA_VERSION;
2124	ASSERT(version <= SPA_VERSION);
2125	spa->spa_uberblock.ub_version = version;
2126	spa->spa_ubsync = spa->spa_uberblock;
2127
2128	/*
2129	 * Create "The Godfather" zio to hold all async IOs
2130	 */
2131	spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
2132	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
2133
2134	/*
2135	 * Create the root vdev.
2136	 */
2137	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2138
2139	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
2140
2141	ASSERT(error != 0 || rvd != NULL);
2142	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
2143
2144	if (error == 0 && !zfs_allocatable_devs(nvroot))
2145		error = EINVAL;
2146
2147	if (error == 0 &&
2148	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
2149	    (error = spa_validate_aux(spa, nvroot, txg,
2150	    VDEV_ALLOC_ADD)) == 0) {
2151		for (c = 0; c < rvd->vdev_children; c++)
2152			vdev_init(rvd->vdev_child[c], txg);
2153		vdev_config_dirty(rvd);
2154	}
2155
2156	spa_config_exit(spa, SCL_ALL, FTAG);
2157
2158	if (error != 0) {
2159		spa_unload(spa);
2160		spa_deactivate(spa);
2161		spa_remove(spa);
2162		mutex_exit(&spa_namespace_lock);
2163		return (error);
2164	}
2165
2166	/*
2167	 * Get the list of spares, if specified.
2168	 */
2169	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2170	    &spares, &nspares) == 0) {
2171		VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
2172		    KM_SLEEP) == 0);
2173		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
2174		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2175		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2176		spa_load_spares(spa);
2177		spa_config_exit(spa, SCL_ALL, FTAG);
2178		spa->spa_spares.sav_sync = B_TRUE;
2179	}
2180
2181	/*
2182	 * Get the list of level 2 cache devices, if specified.
2183	 */
2184	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2185	    &l2cache, &nl2cache) == 0) {
2186		VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2187		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2188		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2189		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2190		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2191		spa_load_l2cache(spa);
2192		spa_config_exit(spa, SCL_ALL, FTAG);
2193		spa->spa_l2cache.sav_sync = B_TRUE;
2194	}
2195
2196	spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
2197	spa->spa_meta_objset = dp->dp_meta_objset;
2198
2199	tx = dmu_tx_create_assigned(dp, txg);
2200
2201	/*
2202	 * Create the pool config object.
2203	 */
2204	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
2205	    DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
2206	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
2207
2208	if (zap_add(spa->spa_meta_objset,
2209	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
2210	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
2211		cmn_err(CE_PANIC, "failed to add pool config");
2212	}
2213
2214	/* Newly created pools with the right version are always deflated. */
2215	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
2216		spa->spa_deflate = TRUE;
2217		if (zap_add(spa->spa_meta_objset,
2218		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
2219		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
2220			cmn_err(CE_PANIC, "failed to add deflate");
2221		}
2222	}
2223
2224	/*
2225	 * Create the deferred-free bplist object.  Turn off compression
2226	 * because sync-to-convergence takes longer if the blocksize
2227	 * keeps changing.
2228	 */
2229	spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset,
2230	    1 << 14, tx);
2231	dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj,
2232	    ZIO_COMPRESS_OFF, tx);
2233
2234	if (zap_add(spa->spa_meta_objset,
2235	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
2236	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) {
2237		cmn_err(CE_PANIC, "failed to add bplist");
2238	}
2239
2240	/*
2241	 * Create the pool's history object.
2242	 */
2243	if (version >= SPA_VERSION_ZPOOL_HISTORY)
2244		spa_history_create_obj(spa, tx);
2245
2246	/*
2247	 * Set pool properties.
2248	 */
2249	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
2250	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2251	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
2252	if (props != NULL) {
2253		spa_configfile_set(spa, props, B_FALSE);
2254		spa_sync_props(spa, props, CRED(), tx);
2255	}
2256
2257	dmu_tx_commit(tx);
2258
2259	spa->spa_sync_on = B_TRUE;
2260	txg_sync_start(spa->spa_dsl_pool);
2261
2262	/*
2263	 * We explicitly wait for the first transaction to complete so that our
2264	 * bean counters are appropriately updated.
2265	 */
2266	txg_wait_synced(spa->spa_dsl_pool, txg);
2267
2268	spa_config_sync(spa, B_FALSE, B_TRUE);
2269
2270	if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL)
2271		(void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE);
2272
2273	spa->spa_minref = refcount_count(&spa->spa_refcount);
2274
2275	mutex_exit(&spa_namespace_lock);
2276
2277	return (0);
2278}
2279
2280#ifdef _KERNEL
2281/*
2282 * Build a "root" vdev for a top level vdev read in from a rootpool
2283 * device label.
2284 */
2285static void
2286spa_build_rootpool_config(nvlist_t *config)
2287{
2288	nvlist_t *nvtop, *nvroot;
2289	uint64_t pgid;
2290
2291	/*
2292	 * Add this top-level vdev to the child array.
2293	 */
2294	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop)
2295	    == 0);
2296	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid)
2297	    == 0);
2298
2299	/*
2300	 * Put this pool's top-level vdevs into a root vdev.
2301	 */
2302	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2303	VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT)
2304	    == 0);
2305	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
2306	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
2307	VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2308	    &nvtop, 1) == 0);
2309
2310	/*
2311	 * Replace the existing vdev_tree with the new root vdev in
2312	 * this pool's configuration (remove the old, add the new).
2313	 */
2314	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
2315	nvlist_free(nvroot);
2316}
2317
2318/*
2319 * Get the root pool information from the root disk, then import the root pool
2320 * during the system boot up time.
2321 */
2322extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
2323
2324int
2325spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf,
2326    uint64_t *besttxg)
2327{
2328	nvlist_t *config;
2329	uint64_t txg;
2330	int error;
2331
2332	if (error = vdev_disk_read_rootlabel(devpath, devid, &config))
2333		return (error);
2334
2335	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
2336
2337	if (bestconf != NULL)
2338		*bestconf = config;
2339	else
2340		nvlist_free(config);
2341	*besttxg = txg;
2342	return (0);
2343}
2344
2345boolean_t
2346spa_rootdev_validate(nvlist_t *nv)
2347{
2348	uint64_t ival;
2349
2350	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2351	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2352	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2353		return (B_FALSE);
2354
2355	return (B_TRUE);
2356}
2357
2358
2359/*
2360 * Given the boot device's physical path or devid, check if the device
2361 * is in a valid state.  If so, return the configuration from the vdev
2362 * label.
2363 */
2364int
2365spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf)
2366{
2367	nvlist_t *conf = NULL;
2368	uint64_t txg = 0;
2369	nvlist_t *nvtop, **child;
2370	char *type;
2371	char *bootpath = NULL;
2372	uint_t children, c;
2373	char *tmp;
2374	int error;
2375
2376	if (devpath && ((tmp = strchr(devpath, ' ')) != NULL))
2377		*tmp = '\0';
2378	if (error = spa_check_rootconf(devpath, devid, &conf, &txg)) {
2379		cmn_err(CE_NOTE, "error reading device label");
2380		return (error);
2381	}
2382	if (txg == 0) {
2383		cmn_err(CE_NOTE, "this device is detached");
2384		nvlist_free(conf);
2385		return (EINVAL);
2386	}
2387
2388	VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE,
2389	    &nvtop) == 0);
2390	VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0);
2391
2392	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2393		if (spa_rootdev_validate(nvtop)) {
2394			goto out;
2395		} else {
2396			nvlist_free(conf);
2397			return (EINVAL);
2398		}
2399	}
2400
2401	ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0);
2402
2403	VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN,
2404	    &child, &children) == 0);
2405
2406	/*
2407	 * Go thru vdevs in the mirror to see if the given device
2408	 * has the most recent txg. Only the device with the most
2409	 * recent txg has valid information and should be booted.
2410	 */
2411	for (c = 0; c < children; c++) {
2412		char *cdevid, *cpath;
2413		uint64_t tmptxg;
2414
2415		cpath = NULL;
2416		cdevid = NULL;
2417		(void) nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH,
2418		    &cpath);
2419		(void) nvlist_lookup_string(child[c], ZPOOL_CONFIG_DEVID,
2420		    &cdevid);
2421		if (cpath == NULL && cdevid == NULL)
2422			return (EINVAL);
2423		if ((spa_check_rootconf(cpath, cdevid, NULL,
2424		    &tmptxg) == 0) && (tmptxg > txg)) {
2425			txg = tmptxg;
2426			VERIFY(nvlist_lookup_string(child[c],
2427			    ZPOOL_CONFIG_PATH, &bootpath) == 0);
2428		}
2429	}
2430
2431	/* Does the best device match the one we've booted from? */
2432	if (bootpath) {
2433		cmn_err(CE_NOTE, "try booting from '%s'", bootpath);
2434		return (EINVAL);
2435	}
2436out:
2437	*bestconf = conf;
2438	return (0);
2439}
2440
2441/*
2442 * Import a root pool.
2443 *
2444 * For x86. devpath_list will consist of devid and/or physpath name of
2445 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
2446 * The GRUB "findroot" command will return the vdev we should boot.
2447 *
2448 * For Sparc, devpath_list consists the physpath name of the booting device
2449 * no matter the rootpool is a single device pool or a mirrored pool.
2450 * e.g.
2451 *	"/pci@1f,0/ide@d/disk@0,0:a"
2452 */
2453int
2454spa_import_rootpool(char *devpath, char *devid)
2455{
2456	nvlist_t *conf = NULL;
2457	char *pname;
2458	int error;
2459	spa_t *spa;
2460
2461	/*
2462	 * Get the vdev pathname and configuation from the most
2463	 * recently updated vdev (highest txg).
2464	 */
2465	if (error = spa_get_rootconf(devpath, devid, &conf))
2466		goto msg_out;
2467
2468	/*
2469	 * Add type "root" vdev to the config.
2470	 */
2471	spa_build_rootpool_config(conf);
2472
2473	VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0);
2474
2475	mutex_enter(&spa_namespace_lock);
2476	if ((spa = spa_lookup(pname)) != NULL) {
2477		/*
2478		 * Remove the existing root pool from the namespace so that we
2479		 * can replace it with the correct config we just read in.
2480		 */
2481		spa_remove(spa);
2482	}
2483
2484	spa = spa_add(pname, NULL);
2485
2486	spa->spa_is_root = B_TRUE;
2487	VERIFY(nvlist_dup(conf, &spa->spa_config, 0) == 0);
2488	mutex_exit(&spa_namespace_lock);
2489
2490	nvlist_free(conf);
2491	return (0);
2492
2493msg_out:
2494	cmn_err(CE_NOTE, "\n"
2495	    "  ***************************************************  \n"
2496	    "  *  This device is not bootable!                   *  \n"
2497	    "  *  It is either offlined or detached or faulted.  *  \n"
2498	    "  *  Please try to boot from a different device.    *  \n"
2499	    "  ***************************************************  ");
2500
2501	return (error);
2502}
2503#endif
2504
2505/*
2506 * Take a pool and insert it into the namespace as if it had been loaded at
2507 * boot.
2508 */
2509int
2510spa_import_verbatim(const char *pool, nvlist_t *config, nvlist_t *props)
2511{
2512	spa_t *spa;
2513	char *altroot = NULL;
2514
2515	mutex_enter(&spa_namespace_lock);
2516	if (spa_lookup(pool) != NULL) {
2517		mutex_exit(&spa_namespace_lock);
2518		return (EEXIST);
2519	}
2520
2521	(void) nvlist_lookup_string(props,
2522	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2523	spa = spa_add(pool, altroot);
2524
2525	VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
2526
2527	if (props != NULL)
2528		spa_configfile_set(spa, props, B_FALSE);
2529
2530	spa_config_sync(spa, B_FALSE, B_TRUE);
2531
2532	mutex_exit(&spa_namespace_lock);
2533
2534	return (0);
2535}
2536
2537/*
2538 * Import a non-root pool into the system.
2539 */
2540int
2541spa_import(const char *pool, nvlist_t *config, nvlist_t *props)
2542{
2543	spa_t *spa;
2544	char *altroot = NULL;
2545	int error;
2546	nvlist_t *nvroot;
2547	nvlist_t **spares, **l2cache;
2548	uint_t nspares, nl2cache;
2549
2550	/*
2551	 * If a pool with this name exists, return failure.
2552	 */
2553	mutex_enter(&spa_namespace_lock);
2554	if ((spa = spa_lookup(pool)) != NULL) {
2555		mutex_exit(&spa_namespace_lock);
2556		return (EEXIST);
2557	}
2558
2559	/*
2560	 * Create and initialize the spa structure.
2561	 */
2562	(void) nvlist_lookup_string(props,
2563	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2564	spa = spa_add(pool, altroot);
2565	spa_activate(spa, spa_mode_global);
2566
2567	/*
2568	 * Don't start async tasks until we know everything is healthy.
2569	 */
2570	spa_async_suspend(spa);
2571
2572	/*
2573	 * Pass off the heavy lifting to spa_load().  Pass TRUE for mosconfig
2574	 * because the user-supplied config is actually the one to trust when
2575	 * doing an import.
2576	 */
2577	error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE);
2578
2579	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2580	/*
2581	 * Toss any existing sparelist, as it doesn't have any validity
2582	 * anymore, and conflicts with spa_has_spare().
2583	 */
2584	if (spa->spa_spares.sav_config) {
2585		nvlist_free(spa->spa_spares.sav_config);
2586		spa->spa_spares.sav_config = NULL;
2587		spa_load_spares(spa);
2588	}
2589	if (spa->spa_l2cache.sav_config) {
2590		nvlist_free(spa->spa_l2cache.sav_config);
2591		spa->spa_l2cache.sav_config = NULL;
2592		spa_load_l2cache(spa);
2593	}
2594
2595	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2596	    &nvroot) == 0);
2597	if (error == 0)
2598		error = spa_validate_aux(spa, nvroot, -1ULL,
2599		    VDEV_ALLOC_SPARE);
2600	if (error == 0)
2601		error = spa_validate_aux(spa, nvroot, -1ULL,
2602		    VDEV_ALLOC_L2CACHE);
2603	spa_config_exit(spa, SCL_ALL, FTAG);
2604
2605	if (props != NULL)
2606		spa_configfile_set(spa, props, B_FALSE);
2607
2608	if (error != 0 || (props && spa_writeable(spa) &&
2609	    (error = spa_prop_set(spa, props)))) {
2610		spa_unload(spa);
2611		spa_deactivate(spa);
2612		spa_remove(spa);
2613		mutex_exit(&spa_namespace_lock);
2614		return (error);
2615	}
2616
2617	spa_async_resume(spa);
2618
2619	/*
2620	 * Override any spares and level 2 cache devices as specified by
2621	 * the user, as these may have correct device names/devids, etc.
2622	 */
2623	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2624	    &spares, &nspares) == 0) {
2625		if (spa->spa_spares.sav_config)
2626			VERIFY(nvlist_remove(spa->spa_spares.sav_config,
2627			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
2628		else
2629			VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
2630			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2631		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
2632		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2633		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2634		spa_load_spares(spa);
2635		spa_config_exit(spa, SCL_ALL, FTAG);
2636		spa->spa_spares.sav_sync = B_TRUE;
2637	}
2638	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2639	    &l2cache, &nl2cache) == 0) {
2640		if (spa->spa_l2cache.sav_config)
2641			VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
2642			    ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
2643		else
2644			VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2645			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2646		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2647		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2648		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2649		spa_load_l2cache(spa);
2650		spa_config_exit(spa, SCL_ALL, FTAG);
2651		spa->spa_l2cache.sav_sync = B_TRUE;
2652	}
2653
2654	if (spa_writeable(spa)) {
2655		/*
2656		 * Update the config cache to include the newly-imported pool.
2657		 */
2658		spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, B_FALSE);
2659	}
2660
2661	mutex_exit(&spa_namespace_lock);
2662
2663	return (0);
2664}
2665
2666
2667/*
2668 * This (illegal) pool name is used when temporarily importing a spa_t in order
2669 * to get the vdev stats associated with the imported devices.
2670 */
2671#define	TRYIMPORT_NAME	"$import"
2672
2673nvlist_t *
2674spa_tryimport(nvlist_t *tryconfig)
2675{
2676	nvlist_t *config = NULL;
2677	char *poolname;
2678	spa_t *spa;
2679	uint64_t state;
2680	int error;
2681
2682	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
2683		return (NULL);
2684
2685	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
2686		return (NULL);
2687
2688	/*
2689	 * Create and initialize the spa structure.
2690	 */
2691	mutex_enter(&spa_namespace_lock);
2692	spa = spa_add(TRYIMPORT_NAME, NULL);
2693	spa_activate(spa, FREAD);
2694
2695	/*
2696	 * Pass off the heavy lifting to spa_load().
2697	 * Pass TRUE for mosconfig because the user-supplied config
2698	 * is actually the one to trust when doing an import.
2699	 */
2700	error = spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
2701
2702	/*
2703	 * If 'tryconfig' was at least parsable, return the current config.
2704	 */
2705	if (spa->spa_root_vdev != NULL) {
2706		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2707		VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
2708		    poolname) == 0);
2709		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2710		    state) == 0);
2711		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
2712		    spa->spa_uberblock.ub_timestamp) == 0);
2713
2714		/*
2715		 * If the bootfs property exists on this pool then we
2716		 * copy it out so that external consumers can tell which
2717		 * pools are bootable.
2718		 */
2719		if ((!error || error == EEXIST) && spa->spa_bootfs) {
2720			char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2721
2722			/*
2723			 * We have to play games with the name since the
2724			 * pool was opened as TRYIMPORT_NAME.
2725			 */
2726			if (dsl_dsobj_to_dsname(spa_name(spa),
2727			    spa->spa_bootfs, tmpname) == 0) {
2728				char *cp;
2729				char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2730
2731				cp = strchr(tmpname, '/');
2732				if (cp == NULL) {
2733					(void) strlcpy(dsname, tmpname,
2734					    MAXPATHLEN);
2735				} else {
2736					(void) snprintf(dsname, MAXPATHLEN,
2737					    "%s/%s", poolname, ++cp);
2738				}
2739				VERIFY(nvlist_add_string(config,
2740				    ZPOOL_CONFIG_BOOTFS, dsname) == 0);
2741				kmem_free(dsname, MAXPATHLEN);
2742			}
2743			kmem_free(tmpname, MAXPATHLEN);
2744		}
2745
2746		/*
2747		 * Add the list of hot spares and level 2 cache devices.
2748		 */
2749		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
2750		spa_add_spares(spa, config);
2751		spa_add_l2cache(spa, config);
2752		spa_config_exit(spa, SCL_CONFIG, FTAG);
2753	}
2754
2755	spa_unload(spa);
2756	spa_deactivate(spa);
2757	spa_remove(spa);
2758	mutex_exit(&spa_namespace_lock);
2759
2760	return (config);
2761}
2762
2763/*
2764 * Pool export/destroy
2765 *
2766 * The act of destroying or exporting a pool is very simple.  We make sure there
2767 * is no more pending I/O and any references to the pool are gone.  Then, we
2768 * update the pool state and sync all the labels to disk, removing the
2769 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
2770 * we don't sync the labels or remove the configuration cache.
2771 */
2772static int
2773spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
2774    boolean_t force, boolean_t hardforce)
2775{
2776	spa_t *spa;
2777
2778	if (oldconfig)
2779		*oldconfig = NULL;
2780
2781	if (!(spa_mode_global & FWRITE))
2782		return (EROFS);
2783
2784	mutex_enter(&spa_namespace_lock);
2785	if ((spa = spa_lookup(pool)) == NULL) {
2786		mutex_exit(&spa_namespace_lock);
2787		return (ENOENT);
2788	}
2789
2790	/*
2791	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
2792	 * reacquire the namespace lock, and see if we can export.
2793	 */
2794	spa_open_ref(spa, FTAG);
2795	mutex_exit(&spa_namespace_lock);
2796	spa_async_suspend(spa);
2797	mutex_enter(&spa_namespace_lock);
2798	spa_close(spa, FTAG);
2799
2800	/*
2801	 * The pool will be in core if it's openable,
2802	 * in which case we can modify its state.
2803	 */
2804	if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
2805		/*
2806		 * Objsets may be open only because they're dirty, so we
2807		 * have to force it to sync before checking spa_refcnt.
2808		 */
2809		txg_wait_synced(spa->spa_dsl_pool, 0);
2810
2811		/*
2812		 * A pool cannot be exported or destroyed if there are active
2813		 * references.  If we are resetting a pool, allow references by
2814		 * fault injection handlers.
2815		 */
2816		if (!spa_refcount_zero(spa) ||
2817		    (spa->spa_inject_ref != 0 &&
2818		    new_state != POOL_STATE_UNINITIALIZED)) {
2819			spa_async_resume(spa);
2820			mutex_exit(&spa_namespace_lock);
2821			return (EBUSY);
2822		}
2823
2824		/*
2825		 * A pool cannot be exported if it has an active shared spare.
2826		 * This is to prevent other pools stealing the active spare
2827		 * from an exported pool. At user's own will, such pool can
2828		 * be forcedly exported.
2829		 */
2830		if (!force && new_state == POOL_STATE_EXPORTED &&
2831		    spa_has_active_shared_spare(spa)) {
2832			spa_async_resume(spa);
2833			mutex_exit(&spa_namespace_lock);
2834			return (EXDEV);
2835		}
2836
2837		/*
2838		 * We want this to be reflected on every label,
2839		 * so mark them all dirty.  spa_unload() will do the
2840		 * final sync that pushes these changes out.
2841		 */
2842		if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
2843			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2844			spa->spa_state = new_state;
2845			spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
2846			vdev_config_dirty(spa->spa_root_vdev);
2847			spa_config_exit(spa, SCL_ALL, FTAG);
2848		}
2849	}
2850
2851	spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
2852
2853	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
2854		spa_unload(spa);
2855		spa_deactivate(spa);
2856	}
2857
2858	if (oldconfig && spa->spa_config)
2859		VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
2860
2861	if (new_state != POOL_STATE_UNINITIALIZED) {
2862		if (!hardforce)
2863			spa_config_sync(spa, B_TRUE, B_TRUE);
2864		spa_remove(spa);
2865	}
2866	mutex_exit(&spa_namespace_lock);
2867
2868	return (0);
2869}
2870
2871/*
2872 * Destroy a storage pool.
2873 */
2874int
2875spa_destroy(char *pool)
2876{
2877	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
2878	    B_FALSE, B_FALSE));
2879}
2880
2881/*
2882 * Export a storage pool.
2883 */
2884int
2885spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
2886    boolean_t hardforce)
2887{
2888	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
2889	    force, hardforce));
2890}
2891
2892/*
2893 * Similar to spa_export(), this unloads the spa_t without actually removing it
2894 * from the namespace in any way.
2895 */
2896int
2897spa_reset(char *pool)
2898{
2899	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
2900	    B_FALSE, B_FALSE));
2901}
2902
2903/*
2904 * ==========================================================================
2905 * Device manipulation
2906 * ==========================================================================
2907 */
2908
2909/*
2910 * Add a device to a storage pool.
2911 */
2912int
2913spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
2914{
2915	uint64_t txg;
2916	int error;
2917	vdev_t *rvd = spa->spa_root_vdev;
2918	vdev_t *vd, *tvd;
2919	nvlist_t **spares, **l2cache;
2920	uint_t nspares, nl2cache;
2921
2922	txg = spa_vdev_enter(spa);
2923
2924	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
2925	    VDEV_ALLOC_ADD)) != 0)
2926		return (spa_vdev_exit(spa, NULL, txg, error));
2927
2928	spa->spa_pending_vdev = vd;	/* spa_vdev_exit() will clear this */
2929
2930	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
2931	    &nspares) != 0)
2932		nspares = 0;
2933
2934	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
2935	    &nl2cache) != 0)
2936		nl2cache = 0;
2937
2938	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
2939		return (spa_vdev_exit(spa, vd, txg, EINVAL));
2940
2941	if (vd->vdev_children != 0 &&
2942	    (error = vdev_create(vd, txg, B_FALSE)) != 0)
2943		return (spa_vdev_exit(spa, vd, txg, error));
2944
2945	/*
2946	 * We must validate the spares and l2cache devices after checking the
2947	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
2948	 */
2949	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
2950		return (spa_vdev_exit(spa, vd, txg, error));
2951
2952	/*
2953	 * Transfer each new top-level vdev from vd to rvd.
2954	 */
2955	for (int c = 0; c < vd->vdev_children; c++) {
2956		tvd = vd->vdev_child[c];
2957		vdev_remove_child(vd, tvd);
2958		tvd->vdev_id = rvd->vdev_children;
2959		vdev_add_child(rvd, tvd);
2960		vdev_config_dirty(tvd);
2961	}
2962
2963	if (nspares != 0) {
2964		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
2965		    ZPOOL_CONFIG_SPARES);
2966		spa_load_spares(spa);
2967		spa->spa_spares.sav_sync = B_TRUE;
2968	}
2969
2970	if (nl2cache != 0) {
2971		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
2972		    ZPOOL_CONFIG_L2CACHE);
2973		spa_load_l2cache(spa);
2974		spa->spa_l2cache.sav_sync = B_TRUE;
2975	}
2976
2977	/*
2978	 * We have to be careful when adding new vdevs to an existing pool.
2979	 * If other threads start allocating from these vdevs before we
2980	 * sync the config cache, and we lose power, then upon reboot we may
2981	 * fail to open the pool because there are DVAs that the config cache
2982	 * can't translate.  Therefore, we first add the vdevs without
2983	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
2984	 * and then let spa_config_update() initialize the new metaslabs.
2985	 *
2986	 * spa_load() checks for added-but-not-initialized vdevs, so that
2987	 * if we lose power at any point in this sequence, the remaining
2988	 * steps will be completed the next time we load the pool.
2989	 */
2990	(void) spa_vdev_exit(spa, vd, txg, 0);
2991
2992	mutex_enter(&spa_namespace_lock);
2993	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
2994	mutex_exit(&spa_namespace_lock);
2995
2996	return (0);
2997}
2998
2999/*
3000 * Attach a device to a mirror.  The arguments are the path to any device
3001 * in the mirror, and the nvroot for the new device.  If the path specifies
3002 * a device that is not mirrored, we automatically insert the mirror vdev.
3003 *
3004 * If 'replacing' is specified, the new device is intended to replace the
3005 * existing device; in this case the two devices are made into their own
3006 * mirror using the 'replacing' vdev, which is functionally identical to
3007 * the mirror vdev (it actually reuses all the same ops) but has a few
3008 * extra rules: you can't attach to it after it's been created, and upon
3009 * completion of resilvering, the first disk (the one being replaced)
3010 * is automatically detached.
3011 */
3012int
3013spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
3014{
3015	uint64_t txg, open_txg;
3016	vdev_t *rvd = spa->spa_root_vdev;
3017	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
3018	vdev_ops_t *pvops;
3019	dmu_tx_t *tx;
3020	char *oldvdpath, *newvdpath;
3021	int newvd_isspare;
3022	int error;
3023
3024	txg = spa_vdev_enter(spa);
3025
3026	oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
3027
3028	if (oldvd == NULL)
3029		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
3030
3031	if (!oldvd->vdev_ops->vdev_op_leaf)
3032		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3033
3034	pvd = oldvd->vdev_parent;
3035
3036	if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
3037	    VDEV_ALLOC_ADD)) != 0)
3038		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
3039
3040	if (newrootvd->vdev_children != 1)
3041		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
3042
3043	newvd = newrootvd->vdev_child[0];
3044
3045	if (!newvd->vdev_ops->vdev_op_leaf)
3046		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
3047
3048	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
3049		return (spa_vdev_exit(spa, newrootvd, txg, error));
3050
3051	/*
3052	 * Spares can't replace logs
3053	 */
3054	if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
3055		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3056
3057	if (!replacing) {
3058		/*
3059		 * For attach, the only allowable parent is a mirror or the root
3060		 * vdev.
3061		 */
3062		if (pvd->vdev_ops != &vdev_mirror_ops &&
3063		    pvd->vdev_ops != &vdev_root_ops)
3064			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3065
3066		pvops = &vdev_mirror_ops;
3067	} else {
3068		/*
3069		 * Active hot spares can only be replaced by inactive hot
3070		 * spares.
3071		 */
3072		if (pvd->vdev_ops == &vdev_spare_ops &&
3073		    pvd->vdev_child[1] == oldvd &&
3074		    !spa_has_spare(spa, newvd->vdev_guid))
3075			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3076
3077		/*
3078		 * If the source is a hot spare, and the parent isn't already a
3079		 * spare, then we want to create a new hot spare.  Otherwise, we
3080		 * want to create a replacing vdev.  The user is not allowed to
3081		 * attach to a spared vdev child unless the 'isspare' state is
3082		 * the same (spare replaces spare, non-spare replaces
3083		 * non-spare).
3084		 */
3085		if (pvd->vdev_ops == &vdev_replacing_ops)
3086			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3087		else if (pvd->vdev_ops == &vdev_spare_ops &&
3088		    newvd->vdev_isspare != oldvd->vdev_isspare)
3089			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
3090		else if (pvd->vdev_ops != &vdev_spare_ops &&
3091		    newvd->vdev_isspare)
3092			pvops = &vdev_spare_ops;
3093		else
3094			pvops = &vdev_replacing_ops;
3095	}
3096
3097	/*
3098	 * Compare the new device size with the replaceable/attachable
3099	 * device size.
3100	 */
3101	if (newvd->vdev_psize < vdev_get_rsize(oldvd))
3102		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
3103
3104	/*
3105	 * The new device cannot have a higher alignment requirement
3106	 * than the top-level vdev.
3107	 */
3108	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
3109		return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
3110
3111	/*
3112	 * If this is an in-place replacement, update oldvd's path and devid
3113	 * to make it distinguishable from newvd, and unopenable from now on.
3114	 */
3115	if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
3116		spa_strfree(oldvd->vdev_path);
3117		oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
3118		    KM_SLEEP);
3119		(void) sprintf(oldvd->vdev_path, "%s/%s",
3120		    newvd->vdev_path, "old");
3121		if (oldvd->vdev_devid != NULL) {
3122			spa_strfree(oldvd->vdev_devid);
3123			oldvd->vdev_devid = NULL;
3124		}
3125	}
3126
3127	/*
3128	 * If the parent is not a mirror, or if we're replacing, insert the new
3129	 * mirror/replacing/spare vdev above oldvd.
3130	 */
3131	if (pvd->vdev_ops != pvops)
3132		pvd = vdev_add_parent(oldvd, pvops);
3133
3134	ASSERT(pvd->vdev_top->vdev_parent == rvd);
3135	ASSERT(pvd->vdev_ops == pvops);
3136	ASSERT(oldvd->vdev_parent == pvd);
3137
3138	/*
3139	 * Extract the new device from its root and add it to pvd.
3140	 */
3141	vdev_remove_child(newrootvd, newvd);
3142	newvd->vdev_id = pvd->vdev_children;
3143	vdev_add_child(pvd, newvd);
3144
3145	/*
3146	 * If newvd is smaller than oldvd, but larger than its rsize,
3147	 * the addition of newvd may have decreased our parent's asize.
3148	 */
3149	pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
3150
3151	tvd = newvd->vdev_top;
3152	ASSERT(pvd->vdev_top == tvd);
3153	ASSERT(tvd->vdev_parent == rvd);
3154
3155	vdev_config_dirty(tvd);
3156
3157	/*
3158	 * Set newvd's DTL to [TXG_INITIAL, open_txg].  It will propagate
3159	 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
3160	 */
3161	open_txg = txg + TXG_CONCURRENT_STATES - 1;
3162
3163	vdev_dtl_dirty(newvd, DTL_MISSING,
3164	    TXG_INITIAL, open_txg - TXG_INITIAL + 1);
3165
3166	if (newvd->vdev_isspare) {
3167		spa_spare_activate(newvd);
3168		spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
3169	}
3170
3171	oldvdpath = spa_strdup(oldvd->vdev_path);
3172	newvdpath = spa_strdup(newvd->vdev_path);
3173	newvd_isspare = newvd->vdev_isspare;
3174
3175	/*
3176	 * Mark newvd's DTL dirty in this txg.
3177	 */
3178	vdev_dirty(tvd, VDD_DTL, newvd, txg);
3179
3180	(void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
3181
3182	tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
3183	if (dmu_tx_assign(tx, TXG_WAIT) == 0) {
3184		spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx,
3185		    CRED(),  "%s vdev=%s %s vdev=%s",
3186		    replacing && newvd_isspare ? "spare in" :
3187		    replacing ? "replace" : "attach", newvdpath,
3188		    replacing ? "for" : "to", oldvdpath);
3189		dmu_tx_commit(tx);
3190	} else {
3191		dmu_tx_abort(tx);
3192	}
3193
3194	spa_strfree(oldvdpath);
3195	spa_strfree(newvdpath);
3196
3197	/*
3198	 * Kick off a resilver to update newvd.
3199	 */
3200	VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0);
3201
3202	return (0);
3203}
3204
3205/*
3206 * Detach a device from a mirror or replacing vdev.
3207 * If 'replace_done' is specified, only detach if the parent
3208 * is a replacing vdev.
3209 */
3210int
3211spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
3212{
3213	uint64_t txg;
3214	int error;
3215	vdev_t *rvd = spa->spa_root_vdev;
3216	vdev_t *vd, *pvd, *cvd, *tvd;
3217	boolean_t unspare = B_FALSE;
3218	uint64_t unspare_guid;
3219	size_t len;
3220
3221	txg = spa_vdev_enter(spa);
3222
3223	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3224
3225	if (vd == NULL)
3226		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
3227
3228	if (!vd->vdev_ops->vdev_op_leaf)
3229		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3230
3231	pvd = vd->vdev_parent;
3232
3233	/*
3234	 * If the parent/child relationship is not as expected, don't do it.
3235	 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
3236	 * vdev that's replacing B with C.  The user's intent in replacing
3237	 * is to go from M(A,B) to M(A,C).  If the user decides to cancel
3238	 * the replace by detaching C, the expected behavior is to end up
3239	 * M(A,B).  But suppose that right after deciding to detach C,
3240	 * the replacement of B completes.  We would have M(A,C), and then
3241	 * ask to detach C, which would leave us with just A -- not what
3242	 * the user wanted.  To prevent this, we make sure that the
3243	 * parent/child relationship hasn't changed -- in this example,
3244	 * that C's parent is still the replacing vdev R.
3245	 */
3246	if (pvd->vdev_guid != pguid && pguid != 0)
3247		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3248
3249	/*
3250	 * If replace_done is specified, only remove this device if it's
3251	 * the first child of a replacing vdev.  For the 'spare' vdev, either
3252	 * disk can be removed.
3253	 */
3254	if (replace_done) {
3255		if (pvd->vdev_ops == &vdev_replacing_ops) {
3256			if (vd->vdev_id != 0)
3257				return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3258		} else if (pvd->vdev_ops != &vdev_spare_ops) {
3259			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3260		}
3261	}
3262
3263	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
3264	    spa_version(spa) >= SPA_VERSION_SPARES);
3265
3266	/*
3267	 * Only mirror, replacing, and spare vdevs support detach.
3268	 */
3269	if (pvd->vdev_ops != &vdev_replacing_ops &&
3270	    pvd->vdev_ops != &vdev_mirror_ops &&
3271	    pvd->vdev_ops != &vdev_spare_ops)
3272		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3273
3274	/*
3275	 * If this device has the only valid copy of some data,
3276	 * we cannot safely detach it.
3277	 */
3278	if (vdev_dtl_required(vd))
3279		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3280
3281	ASSERT(pvd->vdev_children >= 2);
3282
3283	/*
3284	 * If we are detaching the second disk from a replacing vdev, then
3285	 * check to see if we changed the original vdev's path to have "/old"
3286	 * at the end in spa_vdev_attach().  If so, undo that change now.
3287	 */
3288	if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 &&
3289	    pvd->vdev_child[0]->vdev_path != NULL &&
3290	    pvd->vdev_child[1]->vdev_path != NULL) {
3291		ASSERT(pvd->vdev_child[1] == vd);
3292		cvd = pvd->vdev_child[0];
3293		len = strlen(vd->vdev_path);
3294		if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
3295		    strcmp(cvd->vdev_path + len, "/old") == 0) {
3296			spa_strfree(cvd->vdev_path);
3297			cvd->vdev_path = spa_strdup(vd->vdev_path);
3298		}
3299	}
3300
3301	/*
3302	 * If we are detaching the original disk from a spare, then it implies
3303	 * that the spare should become a real disk, and be removed from the
3304	 * active spare list for the pool.
3305	 */
3306	if (pvd->vdev_ops == &vdev_spare_ops &&
3307	    vd->vdev_id == 0 && pvd->vdev_child[1]->vdev_isspare)
3308		unspare = B_TRUE;
3309
3310	/*
3311	 * Erase the disk labels so the disk can be used for other things.
3312	 * This must be done after all other error cases are handled,
3313	 * but before we disembowel vd (so we can still do I/O to it).
3314	 * But if we can't do it, don't treat the error as fatal --
3315	 * it may be that the unwritability of the disk is the reason
3316	 * it's being detached!
3317	 */
3318	error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
3319
3320	/*
3321	 * Remove vd from its parent and compact the parent's children.
3322	 */
3323	vdev_remove_child(pvd, vd);
3324	vdev_compact_children(pvd);
3325
3326	/*
3327	 * Remember one of the remaining children so we can get tvd below.
3328	 */
3329	cvd = pvd->vdev_child[0];
3330
3331	/*
3332	 * If we need to remove the remaining child from the list of hot spares,
3333	 * do it now, marking the vdev as no longer a spare in the process.
3334	 * We must do this before vdev_remove_parent(), because that can
3335	 * change the GUID if it creates a new toplevel GUID.  For a similar
3336	 * reason, we must remove the spare now, in the same txg as the detach;
3337	 * otherwise someone could attach a new sibling, change the GUID, and
3338	 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
3339	 */
3340	if (unspare) {
3341		ASSERT(cvd->vdev_isspare);
3342		spa_spare_remove(cvd);
3343		unspare_guid = cvd->vdev_guid;
3344		(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
3345	}
3346
3347	/*
3348	 * If the parent mirror/replacing vdev only has one child,
3349	 * the parent is no longer needed.  Remove it from the tree.
3350	 */
3351	if (pvd->vdev_children == 1)
3352		vdev_remove_parent(cvd);
3353
3354	/*
3355	 * We don't set tvd until now because the parent we just removed
3356	 * may have been the previous top-level vdev.
3357	 */
3358	tvd = cvd->vdev_top;
3359	ASSERT(tvd->vdev_parent == rvd);
3360
3361	/*
3362	 * Reevaluate the parent vdev state.
3363	 */
3364	vdev_propagate_state(cvd);
3365
3366	/*
3367	 * If the device we just detached was smaller than the others, it may be
3368	 * possible to add metaslabs (i.e. grow the pool).  vdev_metaslab_init()
3369	 * can't fail because the existing metaslabs are already in core, so
3370	 * there's nothing to read from disk.
3371	 */
3372	VERIFY(vdev_metaslab_init(tvd, txg) == 0);
3373
3374	vdev_config_dirty(tvd);
3375
3376	/*
3377	 * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
3378	 * vd->vdev_detached is set and free vd's DTL object in syncing context.
3379	 * But first make sure we're not on any *other* txg's DTL list, to
3380	 * prevent vd from being accessed after it's freed.
3381	 */
3382	for (int t = 0; t < TXG_SIZE; t++)
3383		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
3384	vd->vdev_detached = B_TRUE;
3385	vdev_dirty(tvd, VDD_DTL, vd, txg);
3386
3387	spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
3388
3389	error = spa_vdev_exit(spa, vd, txg, 0);
3390
3391	/*
3392	 * If this was the removal of the original device in a hot spare vdev,
3393	 * then we want to go through and remove the device from the hot spare
3394	 * list of every other pool.
3395	 */
3396	if (unspare) {
3397		spa_t *myspa = spa;
3398		spa = NULL;
3399		mutex_enter(&spa_namespace_lock);
3400		while ((spa = spa_next(spa)) != NULL) {
3401			if (spa->spa_state != POOL_STATE_ACTIVE)
3402				continue;
3403			if (spa == myspa)
3404				continue;
3405			spa_open_ref(spa, FTAG);
3406			mutex_exit(&spa_namespace_lock);
3407			(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
3408			mutex_enter(&spa_namespace_lock);
3409			spa_close(spa, FTAG);
3410		}
3411		mutex_exit(&spa_namespace_lock);
3412	}
3413
3414	return (error);
3415}
3416
3417static nvlist_t *
3418spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
3419{
3420	for (int i = 0; i < count; i++) {
3421		uint64_t guid;
3422
3423		VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
3424		    &guid) == 0);
3425
3426		if (guid == target_guid)
3427			return (nvpp[i]);
3428	}
3429
3430	return (NULL);
3431}
3432
3433static void
3434spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
3435	nvlist_t *dev_to_remove)
3436{
3437	nvlist_t **newdev = NULL;
3438
3439	if (count > 1)
3440		newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
3441
3442	for (int i = 0, j = 0; i < count; i++) {
3443		if (dev[i] == dev_to_remove)
3444			continue;
3445		VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
3446	}
3447
3448	VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
3449	VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
3450
3451	for (int i = 0; i < count - 1; i++)
3452		nvlist_free(newdev[i]);
3453
3454	if (count > 1)
3455		kmem_free(newdev, (count - 1) * sizeof (void *));
3456}
3457
3458/*
3459 * Remove a device from the pool.  Currently, this supports removing only hot
3460 * spares and level 2 ARC devices.
3461 */
3462int
3463spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
3464{
3465	vdev_t *vd;
3466	nvlist_t **spares, **l2cache, *nv;
3467	uint_t nspares, nl2cache;
3468	uint64_t txg = 0;
3469	int error = 0;
3470	boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
3471
3472	if (!locked)
3473		txg = spa_vdev_enter(spa);
3474
3475	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3476
3477	if (spa->spa_spares.sav_vdevs != NULL &&
3478	    nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3479	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
3480	    (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
3481		/*
3482		 * Only remove the hot spare if it's not currently in use
3483		 * in this pool.
3484		 */
3485		if (vd == NULL || unspare) {
3486			spa_vdev_remove_aux(spa->spa_spares.sav_config,
3487			    ZPOOL_CONFIG_SPARES, spares, nspares, nv);
3488			spa_load_spares(spa);
3489			spa->spa_spares.sav_sync = B_TRUE;
3490		} else {
3491			error = EBUSY;
3492		}
3493	} else if (spa->spa_l2cache.sav_vdevs != NULL &&
3494	    nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3495	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
3496	    (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
3497		/*
3498		 * Cache devices can always be removed.
3499		 */
3500		spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
3501		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
3502		spa_load_l2cache(spa);
3503		spa->spa_l2cache.sav_sync = B_TRUE;
3504	} else if (vd != NULL) {
3505		/*
3506		 * Normal vdevs cannot be removed (yet).
3507		 */
3508		error = ENOTSUP;
3509	} else {
3510		/*
3511		 * There is no vdev of any kind with the specified guid.
3512		 */
3513		error = ENOENT;
3514	}
3515
3516	if (!locked)
3517		return (spa_vdev_exit(spa, NULL, txg, error));
3518
3519	return (error);
3520}
3521
3522/*
3523 * Find any device that's done replacing, or a vdev marked 'unspare' that's
3524 * current spared, so we can detach it.
3525 */
3526static vdev_t *
3527spa_vdev_resilver_done_hunt(vdev_t *vd)
3528{
3529	vdev_t *newvd, *oldvd;
3530	int c;
3531
3532	for (c = 0; c < vd->vdev_children; c++) {
3533		oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
3534		if (oldvd != NULL)
3535			return (oldvd);
3536	}
3537
3538	/*
3539	 * Check for a completed replacement.
3540	 */
3541	if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
3542		oldvd = vd->vdev_child[0];
3543		newvd = vd->vdev_child[1];
3544
3545		if (vdev_dtl_empty(newvd, DTL_MISSING) &&
3546		    !vdev_dtl_required(oldvd))
3547			return (oldvd);
3548	}
3549
3550	/*
3551	 * Check for a completed resilver with the 'unspare' flag set.
3552	 */
3553	if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) {
3554		newvd = vd->vdev_child[0];
3555		oldvd = vd->vdev_child[1];
3556
3557		if (newvd->vdev_unspare &&
3558		    vdev_dtl_empty(newvd, DTL_MISSING) &&
3559		    !vdev_dtl_required(oldvd)) {
3560			newvd->vdev_unspare = 0;
3561			return (oldvd);
3562		}
3563	}
3564
3565	return (NULL);
3566}
3567
3568static void
3569spa_vdev_resilver_done(spa_t *spa)
3570{
3571	vdev_t *vd, *pvd, *ppvd;
3572	uint64_t guid, sguid, pguid, ppguid;
3573
3574	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3575
3576	while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
3577		pvd = vd->vdev_parent;
3578		ppvd = pvd->vdev_parent;
3579		guid = vd->vdev_guid;
3580		pguid = pvd->vdev_guid;
3581		ppguid = ppvd->vdev_guid;
3582		sguid = 0;
3583		/*
3584		 * If we have just finished replacing a hot spared device, then
3585		 * we need to detach the parent's first child (the original hot
3586		 * spare) as well.
3587		 */
3588		if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0) {
3589			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
3590			ASSERT(ppvd->vdev_children == 2);
3591			sguid = ppvd->vdev_child[1]->vdev_guid;
3592		}
3593		spa_config_exit(spa, SCL_ALL, FTAG);
3594		if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
3595			return;
3596		if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
3597			return;
3598		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3599	}
3600
3601	spa_config_exit(spa, SCL_ALL, FTAG);
3602}
3603
3604/*
3605 * Update the stored path or FRU for this vdev.  Dirty the vdev configuration,
3606 * relying on spa_vdev_enter/exit() to synchronize the labels and cache.
3607 */
3608int
3609spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
3610    boolean_t ispath)
3611{
3612	vdev_t *vd;
3613	uint64_t txg;
3614
3615	txg = spa_vdev_enter(spa);
3616
3617	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
3618		return (spa_vdev_exit(spa, NULL, txg, ENOENT));
3619
3620	if (!vd->vdev_ops->vdev_op_leaf)
3621		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3622
3623	if (ispath) {
3624		spa_strfree(vd->vdev_path);
3625		vd->vdev_path = spa_strdup(value);
3626	} else {
3627		if (vd->vdev_fru != NULL)
3628			spa_strfree(vd->vdev_fru);
3629		vd->vdev_fru = spa_strdup(value);
3630	}
3631
3632	vdev_config_dirty(vd->vdev_top);
3633
3634	return (spa_vdev_exit(spa, NULL, txg, 0));
3635}
3636
3637int
3638spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
3639{
3640	return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
3641}
3642
3643int
3644spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
3645{
3646	return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
3647}
3648
3649/*
3650 * ==========================================================================
3651 * SPA Scrubbing
3652 * ==========================================================================
3653 */
3654
3655int
3656spa_scrub(spa_t *spa, pool_scrub_type_t type)
3657{
3658	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
3659
3660	if ((uint_t)type >= POOL_SCRUB_TYPES)
3661		return (ENOTSUP);
3662
3663	/*
3664	 * If a resilver was requested, but there is no DTL on a
3665	 * writeable leaf device, we have nothing to do.
3666	 */
3667	if (type == POOL_SCRUB_RESILVER &&
3668	    !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
3669		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
3670		return (0);
3671	}
3672
3673	if (type == POOL_SCRUB_EVERYTHING &&
3674	    spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE &&
3675	    spa->spa_dsl_pool->dp_scrub_isresilver)
3676		return (EBUSY);
3677
3678	if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) {
3679		return (dsl_pool_scrub_clean(spa->spa_dsl_pool));
3680	} else if (type == POOL_SCRUB_NONE) {
3681		return (dsl_pool_scrub_cancel(spa->spa_dsl_pool));
3682	} else {
3683		return (EINVAL);
3684	}
3685}
3686
3687/*
3688 * ==========================================================================
3689 * SPA async task processing
3690 * ==========================================================================
3691 */
3692
3693static void
3694spa_async_remove(spa_t *spa, vdev_t *vd)
3695{
3696	if (vd->vdev_remove_wanted) {
3697		vd->vdev_remove_wanted = 0;
3698		vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
3699		vdev_clear(spa, vd);
3700		vdev_state_dirty(vd->vdev_top);
3701	}
3702
3703	for (int c = 0; c < vd->vdev_children; c++)
3704		spa_async_remove(spa, vd->vdev_child[c]);
3705}
3706
3707static void
3708spa_async_probe(spa_t *spa, vdev_t *vd)
3709{
3710	if (vd->vdev_probe_wanted) {
3711		vd->vdev_probe_wanted = 0;
3712		vdev_reopen(vd);	/* vdev_open() does the actual probe */
3713	}
3714
3715	for (int c = 0; c < vd->vdev_children; c++)
3716		spa_async_probe(spa, vd->vdev_child[c]);
3717}
3718
3719static void
3720spa_async_thread(spa_t *spa)
3721{
3722	int tasks;
3723
3724	ASSERT(spa->spa_sync_on);
3725
3726	mutex_enter(&spa->spa_async_lock);
3727	tasks = spa->spa_async_tasks;
3728	spa->spa_async_tasks = 0;
3729	mutex_exit(&spa->spa_async_lock);
3730
3731	/*
3732	 * See if the config needs to be updated.
3733	 */
3734	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
3735		mutex_enter(&spa_namespace_lock);
3736		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
3737		mutex_exit(&spa_namespace_lock);
3738	}
3739
3740	/*
3741	 * See if any devices need to be marked REMOVED.
3742	 */
3743	if (tasks & SPA_ASYNC_REMOVE) {
3744		spa_vdev_state_enter(spa);
3745		spa_async_remove(spa, spa->spa_root_vdev);
3746		for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
3747			spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
3748		for (int i = 0; i < spa->spa_spares.sav_count; i++)
3749			spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
3750		(void) spa_vdev_state_exit(spa, NULL, 0);
3751	}
3752
3753	/*
3754	 * See if any devices need to be probed.
3755	 */
3756	if (tasks & SPA_ASYNC_PROBE) {
3757		spa_vdev_state_enter(spa);
3758		spa_async_probe(spa, spa->spa_root_vdev);
3759		(void) spa_vdev_state_exit(spa, NULL, 0);
3760	}
3761
3762	/*
3763	 * If any devices are done replacing, detach them.
3764	 */
3765	if (tasks & SPA_ASYNC_RESILVER_DONE)
3766		spa_vdev_resilver_done(spa);
3767
3768	/*
3769	 * Kick off a resilver.
3770	 */
3771	if (tasks & SPA_ASYNC_RESILVER)
3772		VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0);
3773
3774	/*
3775	 * Let the world know that we're done.
3776	 */
3777	mutex_enter(&spa->spa_async_lock);
3778	spa->spa_async_thread = NULL;
3779	cv_broadcast(&spa->spa_async_cv);
3780	mutex_exit(&spa->spa_async_lock);
3781	thread_exit();
3782}
3783
3784void
3785spa_async_suspend(spa_t *spa)
3786{
3787	mutex_enter(&spa->spa_async_lock);
3788	spa->spa_async_suspended++;
3789	while (spa->spa_async_thread != NULL)
3790		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
3791	mutex_exit(&spa->spa_async_lock);
3792}
3793
3794void
3795spa_async_resume(spa_t *spa)
3796{
3797	mutex_enter(&spa->spa_async_lock);
3798	ASSERT(spa->spa_async_suspended != 0);
3799	spa->spa_async_suspended--;
3800	mutex_exit(&spa->spa_async_lock);
3801}
3802
3803static void
3804spa_async_dispatch(spa_t *spa)
3805{
3806	mutex_enter(&spa->spa_async_lock);
3807	if (spa->spa_async_tasks && !spa->spa_async_suspended &&
3808	    spa->spa_async_thread == NULL &&
3809	    rootdir != NULL && !vn_is_readonly(rootdir))
3810		spa->spa_async_thread = thread_create(NULL, 0,
3811		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
3812	mutex_exit(&spa->spa_async_lock);
3813}
3814
3815void
3816spa_async_request(spa_t *spa, int task)
3817{
3818	mutex_enter(&spa->spa_async_lock);
3819	spa->spa_async_tasks |= task;
3820	mutex_exit(&spa->spa_async_lock);
3821}
3822
3823/*
3824 * ==========================================================================
3825 * SPA syncing routines
3826 * ==========================================================================
3827 */
3828
3829static void
3830spa_sync_deferred_frees(spa_t *spa, uint64_t txg)
3831{
3832	bplist_t *bpl = &spa->spa_sync_bplist;
3833	dmu_tx_t *tx;
3834	blkptr_t blk;
3835	uint64_t itor = 0;
3836	zio_t *zio;
3837	int error;
3838	uint8_t c = 1;
3839
3840	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
3841
3842	while (bplist_iterate(bpl, &itor, &blk) == 0) {
3843		ASSERT(blk.blk_birth < txg);
3844		zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL,
3845		    ZIO_FLAG_MUSTSUCCEED));
3846	}
3847
3848	error = zio_wait(zio);
3849	ASSERT3U(error, ==, 0);
3850
3851	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3852	bplist_vacate(bpl, tx);
3853
3854	/*
3855	 * Pre-dirty the first block so we sync to convergence faster.
3856	 * (Usually only the first block is needed.)
3857	 */
3858	dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx);
3859	dmu_tx_commit(tx);
3860}
3861
3862static void
3863spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
3864{
3865	char *packed = NULL;
3866	size_t bufsize;
3867	size_t nvsize = 0;
3868	dmu_buf_t *db;
3869
3870	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
3871
3872	/*
3873	 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
3874	 * information.  This avoids the dbuf_will_dirty() path and
3875	 * saves us a pre-read to get data we don't actually care about.
3876	 */
3877	bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE);
3878	packed = kmem_alloc(bufsize, KM_SLEEP);
3879
3880	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
3881	    KM_SLEEP) == 0);
3882	bzero(packed + nvsize, bufsize - nvsize);
3883
3884	dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
3885
3886	kmem_free(packed, bufsize);
3887
3888	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
3889	dmu_buf_will_dirty(db, tx);
3890	*(uint64_t *)db->db_data = nvsize;
3891	dmu_buf_rele(db, FTAG);
3892}
3893
3894static void
3895spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
3896    const char *config, const char *entry)
3897{
3898	nvlist_t *nvroot;
3899	nvlist_t **list;
3900	int i;
3901
3902	if (!sav->sav_sync)
3903		return;
3904
3905	/*
3906	 * Update the MOS nvlist describing the list of available devices.
3907	 * spa_validate_aux() will have already made sure this nvlist is
3908	 * valid and the vdevs are labeled appropriately.
3909	 */
3910	if (sav->sav_object == 0) {
3911		sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
3912		    DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
3913		    sizeof (uint64_t), tx);
3914		VERIFY(zap_update(spa->spa_meta_objset,
3915		    DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
3916		    &sav->sav_object, tx) == 0);
3917	}
3918
3919	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3920	if (sav->sav_count == 0) {
3921		VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
3922	} else {
3923		list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
3924		for (i = 0; i < sav->sav_count; i++)
3925			list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
3926			    B_FALSE, B_FALSE, B_TRUE);
3927		VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
3928		    sav->sav_count) == 0);
3929		for (i = 0; i < sav->sav_count; i++)
3930			nvlist_free(list[i]);
3931		kmem_free(list, sav->sav_count * sizeof (void *));
3932	}
3933
3934	spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
3935	nvlist_free(nvroot);
3936
3937	sav->sav_sync = B_FALSE;
3938}
3939
3940static void
3941spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
3942{
3943	nvlist_t *config;
3944
3945	if (list_is_empty(&spa->spa_config_dirty_list))
3946		return;
3947
3948	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
3949
3950	config = spa_config_generate(spa, spa->spa_root_vdev,
3951	    dmu_tx_get_txg(tx), B_FALSE);
3952
3953	spa_config_exit(spa, SCL_STATE, FTAG);
3954
3955	if (spa->spa_config_syncing)
3956		nvlist_free(spa->spa_config_syncing);
3957	spa->spa_config_syncing = config;
3958
3959	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
3960}
3961
3962/*
3963 * Set zpool properties.
3964 */
3965static void
3966spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
3967{
3968	spa_t *spa = arg1;
3969	objset_t *mos = spa->spa_meta_objset;
3970	nvlist_t *nvp = arg2;
3971	nvpair_t *elem;
3972	uint64_t intval;
3973	char *strval;
3974	zpool_prop_t prop;
3975	const char *propname;
3976	zprop_type_t proptype;
3977
3978	mutex_enter(&spa->spa_props_lock);
3979
3980	elem = NULL;
3981	while ((elem = nvlist_next_nvpair(nvp, elem))) {
3982		switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
3983		case ZPOOL_PROP_VERSION:
3984			/*
3985			 * Only set version for non-zpool-creation cases
3986			 * (set/import). spa_create() needs special care
3987			 * for version setting.
3988			 */
3989			if (tx->tx_txg != TXG_INITIAL) {
3990				VERIFY(nvpair_value_uint64(elem,
3991				    &intval) == 0);
3992				ASSERT(intval <= SPA_VERSION);
3993				ASSERT(intval >= spa_version(spa));
3994				spa->spa_uberblock.ub_version = intval;
3995				vdev_config_dirty(spa->spa_root_vdev);
3996			}
3997			break;
3998
3999		case ZPOOL_PROP_ALTROOT:
4000			/*
4001			 * 'altroot' is a non-persistent property. It should
4002			 * have been set temporarily at creation or import time.
4003			 */
4004			ASSERT(spa->spa_root != NULL);
4005			break;
4006
4007		case ZPOOL_PROP_CACHEFILE:
4008			/*
4009			 * 'cachefile' is also a non-persisitent property.
4010			 */
4011			break;
4012		default:
4013			/*
4014			 * Set pool property values in the poolprops mos object.
4015			 */
4016			if (spa->spa_pool_props_object == 0) {
4017				objset_t *mos = spa->spa_meta_objset;
4018
4019				VERIFY((spa->spa_pool_props_object =
4020				    zap_create(mos, DMU_OT_POOL_PROPS,
4021				    DMU_OT_NONE, 0, tx)) > 0);
4022
4023				VERIFY(zap_update(mos,
4024				    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
4025				    8, 1, &spa->spa_pool_props_object, tx)
4026				    == 0);
4027			}
4028
4029			/* normalize the property name */
4030			propname = zpool_prop_to_name(prop);
4031			proptype = zpool_prop_get_type(prop);
4032
4033			if (nvpair_type(elem) == DATA_TYPE_STRING) {
4034				ASSERT(proptype == PROP_TYPE_STRING);
4035				VERIFY(nvpair_value_string(elem, &strval) == 0);
4036				VERIFY(zap_update(mos,
4037				    spa->spa_pool_props_object, propname,
4038				    1, strlen(strval) + 1, strval, tx) == 0);
4039
4040			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
4041				VERIFY(nvpair_value_uint64(elem, &intval) == 0);
4042
4043				if (proptype == PROP_TYPE_INDEX) {
4044					const char *unused;
4045					VERIFY(zpool_prop_index_to_string(
4046					    prop, intval, &unused) == 0);
4047				}
4048				VERIFY(zap_update(mos,
4049				    spa->spa_pool_props_object, propname,
4050				    8, 1, &intval, tx) == 0);
4051			} else {
4052				ASSERT(0); /* not allowed */
4053			}
4054
4055			switch (prop) {
4056			case ZPOOL_PROP_DELEGATION:
4057				spa->spa_delegation = intval;
4058				break;
4059			case ZPOOL_PROP_BOOTFS:
4060				spa->spa_bootfs = intval;
4061				break;
4062			case ZPOOL_PROP_FAILUREMODE:
4063				spa->spa_failmode = intval;
4064				break;
4065			default:
4066				break;
4067			}
4068		}
4069
4070		/* log internal history if this is not a zpool create */
4071		if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY &&
4072		    tx->tx_txg != TXG_INITIAL) {
4073			spa_history_internal_log(LOG_POOL_PROPSET,
4074			    spa, tx, cr, "%s %lld %s",
4075			    nvpair_name(elem), intval, spa_name(spa));
4076		}
4077	}
4078
4079	mutex_exit(&spa->spa_props_lock);
4080}
4081
4082/*
4083 * Sync the specified transaction group.  New blocks may be dirtied as
4084 * part of the process, so we iterate until it converges.
4085 */
4086void
4087spa_sync(spa_t *spa, uint64_t txg)
4088{
4089	dsl_pool_t *dp = spa->spa_dsl_pool;
4090	objset_t *mos = spa->spa_meta_objset;
4091	bplist_t *bpl = &spa->spa_sync_bplist;
4092	vdev_t *rvd = spa->spa_root_vdev;
4093	vdev_t *vd;
4094	dmu_tx_t *tx;
4095	int dirty_vdevs;
4096	int error;
4097
4098	/*
4099	 * Lock out configuration changes.
4100	 */
4101	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
4102
4103	spa->spa_syncing_txg = txg;
4104	spa->spa_sync_pass = 0;
4105
4106	/*
4107	 * If there are any pending vdev state changes, convert them
4108	 * into config changes that go out with this transaction group.
4109	 */
4110	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4111	while (list_head(&spa->spa_state_dirty_list) != NULL) {
4112		/*
4113		 * We need the write lock here because, for aux vdevs,
4114		 * calling vdev_config_dirty() modifies sav_config.
4115		 * This is ugly and will become unnecessary when we
4116		 * eliminate the aux vdev wart by integrating all vdevs
4117		 * into the root vdev tree.
4118		 */
4119		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
4120		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
4121		while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
4122			vdev_state_clean(vd);
4123			vdev_config_dirty(vd);
4124		}
4125		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
4126		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
4127	}
4128	spa_config_exit(spa, SCL_STATE, FTAG);
4129
4130	VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj));
4131
4132	tx = dmu_tx_create_assigned(dp, txg);
4133
4134	/*
4135	 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
4136	 * set spa_deflate if we have no raid-z vdevs.
4137	 */
4138	if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
4139	    spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
4140		int i;
4141
4142		for (i = 0; i < rvd->vdev_children; i++) {
4143			vd = rvd->vdev_child[i];
4144			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
4145				break;
4146		}
4147		if (i == rvd->vdev_children) {
4148			spa->spa_deflate = TRUE;
4149			VERIFY(0 == zap_add(spa->spa_meta_objset,
4150			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
4151			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
4152		}
4153	}
4154
4155	if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
4156	    spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
4157		dsl_pool_create_origin(dp, tx);
4158
4159		/* Keeping the origin open increases spa_minref */
4160		spa->spa_minref += 3;
4161	}
4162
4163	if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
4164	    spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
4165		dsl_pool_upgrade_clones(dp, tx);
4166	}
4167
4168	/*
4169	 * If anything has changed in this txg, push the deferred frees
4170	 * from the previous txg.  If not, leave them alone so that we
4171	 * don't generate work on an otherwise idle system.
4172	 */
4173	if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
4174	    !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
4175	    !txg_list_empty(&dp->dp_sync_tasks, txg))
4176		spa_sync_deferred_frees(spa, txg);
4177
4178	/*
4179	 * Iterate to convergence.
4180	 */
4181	do {
4182		spa->spa_sync_pass++;
4183
4184		spa_sync_config_object(spa, tx);
4185		spa_sync_aux_dev(spa, &spa->spa_spares, tx,
4186		    ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
4187		spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
4188		    ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
4189		spa_errlog_sync(spa, txg);
4190		dsl_pool_sync(dp, txg);
4191
4192		dirty_vdevs = 0;
4193		while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
4194			vdev_sync(vd, txg);
4195			dirty_vdevs++;
4196		}
4197
4198		bplist_sync(bpl, tx);
4199	} while (dirty_vdevs);
4200
4201	bplist_close(bpl);
4202
4203	dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass);
4204
4205	/*
4206	 * Rewrite the vdev configuration (which includes the uberblock)
4207	 * to commit the transaction group.
4208	 *
4209	 * If there are no dirty vdevs, we sync the uberblock to a few
4210	 * random top-level vdevs that are known to be visible in the
4211	 * config cache (see spa_vdev_add() for a complete description).
4212	 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
4213	 */
4214	for (;;) {
4215		/*
4216		 * We hold SCL_STATE to prevent vdev open/close/etc.
4217		 * while we're attempting to write the vdev labels.
4218		 */
4219		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4220
4221		if (list_is_empty(&spa->spa_config_dirty_list)) {
4222			vdev_t *svd[SPA_DVAS_PER_BP];
4223			int svdcount = 0;
4224			int children = rvd->vdev_children;
4225			int c0 = spa_get_random(children);
4226			int c;
4227
4228			for (c = 0; c < children; c++) {
4229				vd = rvd->vdev_child[(c0 + c) % children];
4230				if (vd->vdev_ms_array == 0 || vd->vdev_islog)
4231					continue;
4232				svd[svdcount++] = vd;
4233				if (svdcount == SPA_DVAS_PER_BP)
4234					break;
4235			}
4236			error = vdev_config_sync(svd, svdcount, txg, B_FALSE);
4237			if (error != 0)
4238				error = vdev_config_sync(svd, svdcount, txg,
4239				    B_TRUE);
4240		} else {
4241			error = vdev_config_sync(rvd->vdev_child,
4242			    rvd->vdev_children, txg, B_FALSE);
4243			if (error != 0)
4244				error = vdev_config_sync(rvd->vdev_child,
4245				    rvd->vdev_children, txg, B_TRUE);
4246		}
4247
4248		spa_config_exit(spa, SCL_STATE, FTAG);
4249
4250		if (error == 0)
4251			break;
4252		zio_suspend(spa, NULL);
4253		zio_resume_wait(spa);
4254	}
4255	dmu_tx_commit(tx);
4256
4257	/*
4258	 * Clear the dirty config list.
4259	 */
4260	while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
4261		vdev_config_clean(vd);
4262
4263	/*
4264	 * Now that the new config has synced transactionally,
4265	 * let it become visible to the config cache.
4266	 */
4267	if (spa->spa_config_syncing != NULL) {
4268		spa_config_set(spa, spa->spa_config_syncing);
4269		spa->spa_config_txg = txg;
4270		spa->spa_config_syncing = NULL;
4271	}
4272
4273	spa->spa_ubsync = spa->spa_uberblock;
4274
4275	/*
4276	 * Clean up the ZIL records for the synced txg.
4277	 */
4278	dsl_pool_zil_clean(dp);
4279
4280	/*
4281	 * Update usable space statistics.
4282	 */
4283	while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
4284		vdev_sync_done(vd, txg);
4285
4286	/*
4287	 * It had better be the case that we didn't dirty anything
4288	 * since vdev_config_sync().
4289	 */
4290	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
4291	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
4292	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
4293	ASSERT(bpl->bpl_queue == NULL);
4294
4295	spa_config_exit(spa, SCL_CONFIG, FTAG);
4296
4297	/*
4298	 * If any async tasks have been requested, kick them off.
4299	 */
4300	spa_async_dispatch(spa);
4301}
4302
4303/*
4304 * Sync all pools.  We don't want to hold the namespace lock across these
4305 * operations, so we take a reference on the spa_t and drop the lock during the
4306 * sync.
4307 */
4308void
4309spa_sync_allpools(void)
4310{
4311	spa_t *spa = NULL;
4312	mutex_enter(&spa_namespace_lock);
4313	while ((spa = spa_next(spa)) != NULL) {
4314		if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa))
4315			continue;
4316		spa_open_ref(spa, FTAG);
4317		mutex_exit(&spa_namespace_lock);
4318		txg_wait_synced(spa_get_dsl(spa), 0);
4319		mutex_enter(&spa_namespace_lock);
4320		spa_close(spa, FTAG);
4321	}
4322	mutex_exit(&spa_namespace_lock);
4323}
4324
4325/*
4326 * ==========================================================================
4327 * Miscellaneous routines
4328 * ==========================================================================
4329 */
4330
4331/*
4332 * Remove all pools in the system.
4333 */
4334void
4335spa_evict_all(void)
4336{
4337	spa_t *spa;
4338
4339	/*
4340	 * Remove all cached state.  All pools should be closed now,
4341	 * so every spa in the AVL tree should be unreferenced.
4342	 */
4343	mutex_enter(&spa_namespace_lock);
4344	while ((spa = spa_next(NULL)) != NULL) {
4345		/*
4346		 * Stop async tasks.  The async thread may need to detach
4347		 * a device that's been replaced, which requires grabbing
4348		 * spa_namespace_lock, so we must drop it here.
4349		 */
4350		spa_open_ref(spa, FTAG);
4351		mutex_exit(&spa_namespace_lock);
4352		spa_async_suspend(spa);
4353		mutex_enter(&spa_namespace_lock);
4354		spa_close(spa, FTAG);
4355
4356		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4357			spa_unload(spa);
4358			spa_deactivate(spa);
4359		}
4360		spa_remove(spa);
4361	}
4362	mutex_exit(&spa_namespace_lock);
4363}
4364
4365vdev_t *
4366spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
4367{
4368	vdev_t *vd;
4369	int i;
4370
4371	if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
4372		return (vd);
4373
4374	if (aux) {
4375		for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
4376			vd = spa->spa_l2cache.sav_vdevs[i];
4377			if (vd->vdev_guid == guid)
4378				return (vd);
4379		}
4380
4381		for (i = 0; i < spa->spa_spares.sav_count; i++) {
4382			vd = spa->spa_spares.sav_vdevs[i];
4383			if (vd->vdev_guid == guid)
4384				return (vd);
4385		}
4386	}
4387
4388	return (NULL);
4389}
4390
4391void
4392spa_upgrade(spa_t *spa, uint64_t version)
4393{
4394	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4395
4396	/*
4397	 * This should only be called for a non-faulted pool, and since a
4398	 * future version would result in an unopenable pool, this shouldn't be
4399	 * possible.
4400	 */
4401	ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION);
4402	ASSERT(version >= spa->spa_uberblock.ub_version);
4403
4404	spa->spa_uberblock.ub_version = version;
4405	vdev_config_dirty(spa->spa_root_vdev);
4406
4407	spa_config_exit(spa, SCL_ALL, FTAG);
4408
4409	txg_wait_synced(spa_get_dsl(spa), 0);
4410}
4411
4412boolean_t
4413spa_has_spare(spa_t *spa, uint64_t guid)
4414{
4415	int i;
4416	uint64_t spareguid;
4417	spa_aux_vdev_t *sav = &spa->spa_spares;
4418
4419	for (i = 0; i < sav->sav_count; i++)
4420		if (sav->sav_vdevs[i]->vdev_guid == guid)
4421			return (B_TRUE);
4422
4423	for (i = 0; i < sav->sav_npending; i++) {
4424		if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
4425		    &spareguid) == 0 && spareguid == guid)
4426			return (B_TRUE);
4427	}
4428
4429	return (B_FALSE);
4430}
4431
4432/*
4433 * Check if a pool has an active shared spare device.
4434 * Note: reference count of an active spare is 2, as a spare and as a replace
4435 */
4436static boolean_t
4437spa_has_active_shared_spare(spa_t *spa)
4438{
4439	int i, refcnt;
4440	uint64_t pool;
4441	spa_aux_vdev_t *sav = &spa->spa_spares;
4442
4443	for (i = 0; i < sav->sav_count; i++) {
4444		if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
4445		    &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
4446		    refcnt > 2)
4447			return (B_TRUE);
4448	}
4449
4450	return (B_FALSE);
4451}
4452
4453/*
4454 * Post a sysevent corresponding to the given event.  The 'name' must be one of
4455 * the event definitions in sys/sysevent/eventdefs.h.  The payload will be
4456 * filled in from the spa and (optionally) the vdev.  This doesn't do anything
4457 * in the userland libzpool, as we don't want consumers to misinterpret ztest
4458 * or zdb as real changes.
4459 */
4460void
4461spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
4462{
4463#ifdef _KERNEL
4464	sysevent_t		*ev;
4465	sysevent_attr_list_t	*attr = NULL;
4466	sysevent_value_t	value;
4467	sysevent_id_t		eid;
4468
4469	ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
4470	    SE_SLEEP);
4471
4472	value.value_type = SE_DATA_TYPE_STRING;
4473	value.value.sv_string = spa_name(spa);
4474	if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
4475		goto done;
4476
4477	value.value_type = SE_DATA_TYPE_UINT64;
4478	value.value.sv_uint64 = spa_guid(spa);
4479	if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
4480		goto done;
4481
4482	if (vd) {
4483		value.value_type = SE_DATA_TYPE_UINT64;
4484		value.value.sv_uint64 = vd->vdev_guid;
4485		if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
4486		    SE_SLEEP) != 0)
4487			goto done;
4488
4489		if (vd->vdev_path) {
4490			value.value_type = SE_DATA_TYPE_STRING;
4491			value.value.sv_string = vd->vdev_path;
4492			if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
4493			    &value, SE_SLEEP) != 0)
4494				goto done;
4495		}
4496	}
4497
4498	if (sysevent_attach_attributes(ev, attr) != 0)
4499		goto done;
4500	attr = NULL;
4501
4502	(void) log_sysevent(ev, SE_SLEEP, &eid);
4503
4504done:
4505	if (attr)
4506		sysevent_free_attr(attr);
4507	sysevent_free(ev);
4508#endif
4509}
4510