spa.c revision 00504c01522ac1118e1553c547be8c646c2e57a0
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 * This file contains all the routines used when modifying on-disk SPA state.
29 * This includes opening, importing, destroying, exporting a pool, and syncing a
30 * pool.
31 */
32
33#include <sys/zfs_context.h>
34#include <sys/fm/fs/zfs.h>
35#include <sys/spa_impl.h>
36#include <sys/zio.h>
37#include <sys/zio_checksum.h>
38#include <sys/zio_compress.h>
39#include <sys/dmu.h>
40#include <sys/dmu_tx.h>
41#include <sys/zap.h>
42#include <sys/zil.h>
43#include <sys/vdev_impl.h>
44#include <sys/metaslab.h>
45#include <sys/uberblock_impl.h>
46#include <sys/txg.h>
47#include <sys/avl.h>
48#include <sys/dmu_traverse.h>
49#include <sys/dmu_objset.h>
50#include <sys/unique.h>
51#include <sys/dsl_pool.h>
52#include <sys/dsl_dataset.h>
53#include <sys/dsl_dir.h>
54#include <sys/dsl_prop.h>
55#include <sys/dsl_synctask.h>
56#include <sys/fs/zfs.h>
57#include <sys/arc.h>
58#include <sys/callb.h>
59#include <sys/systeminfo.h>
60#include <sys/sunddi.h>
61#include <sys/spa_boot.h>
62
63#include "zfs_prop.h"
64#include "zfs_comutil.h"
65
66int zio_taskq_threads[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
67	/*	ISSUE	INTR					*/
68	{	1,	1	},	/* ZIO_TYPE_NULL	*/
69	{	1,	8	},	/* ZIO_TYPE_READ	*/
70	{	8,	1	},	/* ZIO_TYPE_WRITE	*/
71	{	1,	1	},	/* ZIO_TYPE_FREE	*/
72	{	1,	1	},	/* ZIO_TYPE_CLAIM	*/
73	{	1,	1	},	/* ZIO_TYPE_IOCTL	*/
74};
75
76static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx);
77static boolean_t spa_has_active_shared_spare(spa_t *spa);
78
79/*
80 * ==========================================================================
81 * SPA properties routines
82 * ==========================================================================
83 */
84
85/*
86 * Add a (source=src, propname=propval) list to an nvlist.
87 */
88static void
89spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
90    uint64_t intval, zprop_source_t src)
91{
92	const char *propname = zpool_prop_to_name(prop);
93	nvlist_t *propval;
94
95	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
96	VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
97
98	if (strval != NULL)
99		VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
100	else
101		VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
102
103	VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
104	nvlist_free(propval);
105}
106
107/*
108 * Get property values from the spa configuration.
109 */
110static void
111spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
112{
113	uint64_t size = spa_get_space(spa);
114	uint64_t used = spa_get_alloc(spa);
115	uint64_t cap, version;
116	zprop_source_t src = ZPROP_SRC_NONE;
117	spa_config_dirent_t *dp;
118
119	ASSERT(MUTEX_HELD(&spa->spa_props_lock));
120
121	/*
122	 * readonly properties
123	 */
124	spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
125	spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
126	spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src);
127	spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, size - used, src);
128
129	cap = (size == 0) ? 0 : (used * 100 / size);
130	spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
131
132	spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
133	spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
134	    spa->spa_root_vdev->vdev_state, src);
135
136	/*
137	 * settable properties that are not stored in the pool property object.
138	 */
139	version = spa_version(spa);
140	if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
141		src = ZPROP_SRC_DEFAULT;
142	else
143		src = ZPROP_SRC_LOCAL;
144	spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
145
146	if (spa->spa_root != NULL)
147		spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
148		    0, ZPROP_SRC_LOCAL);
149
150	if ((dp = list_head(&spa->spa_config_list)) != NULL) {
151		if (dp->scd_path == NULL) {
152			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
153			    "none", 0, ZPROP_SRC_LOCAL);
154		} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
155			spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
156			    dp->scd_path, 0, ZPROP_SRC_LOCAL);
157		}
158	}
159}
160
161/*
162 * Get zpool property values.
163 */
164int
165spa_prop_get(spa_t *spa, nvlist_t **nvp)
166{
167	zap_cursor_t zc;
168	zap_attribute_t za;
169	objset_t *mos = spa->spa_meta_objset;
170	int err;
171
172	VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
173
174	mutex_enter(&spa->spa_props_lock);
175
176	/*
177	 * Get properties from the spa config.
178	 */
179	spa_prop_get_config(spa, nvp);
180
181	/* If no pool property object, no more prop to get. */
182	if (spa->spa_pool_props_object == 0) {
183		mutex_exit(&spa->spa_props_lock);
184		return (0);
185	}
186
187	/*
188	 * Get properties from the MOS pool property object.
189	 */
190	for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
191	    (err = zap_cursor_retrieve(&zc, &za)) == 0;
192	    zap_cursor_advance(&zc)) {
193		uint64_t intval = 0;
194		char *strval = NULL;
195		zprop_source_t src = ZPROP_SRC_DEFAULT;
196		zpool_prop_t prop;
197
198		if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
199			continue;
200
201		switch (za.za_integer_length) {
202		case 8:
203			/* integer property */
204			if (za.za_first_integer !=
205			    zpool_prop_default_numeric(prop))
206				src = ZPROP_SRC_LOCAL;
207
208			if (prop == ZPOOL_PROP_BOOTFS) {
209				dsl_pool_t *dp;
210				dsl_dataset_t *ds = NULL;
211
212				dp = spa_get_dsl(spa);
213				rw_enter(&dp->dp_config_rwlock, RW_READER);
214				if (err = dsl_dataset_hold_obj(dp,
215				    za.za_first_integer, FTAG, &ds)) {
216					rw_exit(&dp->dp_config_rwlock);
217					break;
218				}
219
220				strval = kmem_alloc(
221				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
222				    KM_SLEEP);
223				dsl_dataset_name(ds, strval);
224				dsl_dataset_rele(ds, FTAG);
225				rw_exit(&dp->dp_config_rwlock);
226			} else {
227				strval = NULL;
228				intval = za.za_first_integer;
229			}
230
231			spa_prop_add_list(*nvp, prop, strval, intval, src);
232
233			if (strval != NULL)
234				kmem_free(strval,
235				    MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
236
237			break;
238
239		case 1:
240			/* string property */
241			strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
242			err = zap_lookup(mos, spa->spa_pool_props_object,
243			    za.za_name, 1, za.za_num_integers, strval);
244			if (err) {
245				kmem_free(strval, za.za_num_integers);
246				break;
247			}
248			spa_prop_add_list(*nvp, prop, strval, 0, src);
249			kmem_free(strval, za.za_num_integers);
250			break;
251
252		default:
253			break;
254		}
255	}
256	zap_cursor_fini(&zc);
257	mutex_exit(&spa->spa_props_lock);
258out:
259	if (err && err != ENOENT) {
260		nvlist_free(*nvp);
261		*nvp = NULL;
262		return (err);
263	}
264
265	return (0);
266}
267
268/*
269 * Validate the given pool properties nvlist and modify the list
270 * for the property values to be set.
271 */
272static int
273spa_prop_validate(spa_t *spa, nvlist_t *props)
274{
275	nvpair_t *elem;
276	int error = 0, reset_bootfs = 0;
277	uint64_t objnum;
278
279	elem = NULL;
280	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
281		zpool_prop_t prop;
282		char *propname, *strval;
283		uint64_t intval;
284		objset_t *os;
285		char *slash;
286
287		propname = nvpair_name(elem);
288
289		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL)
290			return (EINVAL);
291
292		switch (prop) {
293		case ZPOOL_PROP_VERSION:
294			error = nvpair_value_uint64(elem, &intval);
295			if (!error &&
296			    (intval < spa_version(spa) || intval > SPA_VERSION))
297				error = EINVAL;
298			break;
299
300		case ZPOOL_PROP_DELEGATION:
301		case ZPOOL_PROP_AUTOREPLACE:
302		case ZPOOL_PROP_LISTSNAPS:
303			error = nvpair_value_uint64(elem, &intval);
304			if (!error && intval > 1)
305				error = EINVAL;
306			break;
307
308		case ZPOOL_PROP_BOOTFS:
309			if (spa_version(spa) < SPA_VERSION_BOOTFS) {
310				error = ENOTSUP;
311				break;
312			}
313
314			/*
315			 * Make sure the vdev config is bootable
316			 */
317			if (!vdev_is_bootable(spa->spa_root_vdev)) {
318				error = ENOTSUP;
319				break;
320			}
321
322			reset_bootfs = 1;
323
324			error = nvpair_value_string(elem, &strval);
325
326			if (!error) {
327				uint64_t compress;
328
329				if (strval == NULL || strval[0] == '\0') {
330					objnum = zpool_prop_default_numeric(
331					    ZPOOL_PROP_BOOTFS);
332					break;
333				}
334
335				if (error = dmu_objset_open(strval, DMU_OST_ZFS,
336				    DS_MODE_USER | DS_MODE_READONLY, &os))
337					break;
338
339				/* We don't support gzip bootable datasets */
340				if ((error = dsl_prop_get_integer(strval,
341				    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
342				    &compress, NULL)) == 0 &&
343				    !BOOTFS_COMPRESS_VALID(compress)) {
344					error = ENOTSUP;
345				} else {
346					objnum = dmu_objset_id(os);
347				}
348				dmu_objset_close(os);
349			}
350			break;
351
352		case ZPOOL_PROP_FAILUREMODE:
353			error = nvpair_value_uint64(elem, &intval);
354			if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
355			    intval > ZIO_FAILURE_MODE_PANIC))
356				error = EINVAL;
357
358			/*
359			 * This is a special case which only occurs when
360			 * the pool has completely failed. This allows
361			 * the user to change the in-core failmode property
362			 * without syncing it out to disk (I/Os might
363			 * currently be blocked). We do this by returning
364			 * EIO to the caller (spa_prop_set) to trick it
365			 * into thinking we encountered a property validation
366			 * error.
367			 */
368			if (!error && spa_suspended(spa)) {
369				spa->spa_failmode = intval;
370				error = EIO;
371			}
372			break;
373
374		case ZPOOL_PROP_CACHEFILE:
375			if ((error = nvpair_value_string(elem, &strval)) != 0)
376				break;
377
378			if (strval[0] == '\0')
379				break;
380
381			if (strcmp(strval, "none") == 0)
382				break;
383
384			if (strval[0] != '/') {
385				error = EINVAL;
386				break;
387			}
388
389			slash = strrchr(strval, '/');
390			ASSERT(slash != NULL);
391
392			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
393			    strcmp(slash, "/..") == 0)
394				error = EINVAL;
395			break;
396		}
397
398		if (error)
399			break;
400	}
401
402	if (!error && reset_bootfs) {
403		error = nvlist_remove(props,
404		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
405
406		if (!error) {
407			error = nvlist_add_uint64(props,
408			    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
409		}
410	}
411
412	return (error);
413}
414
415int
416spa_prop_set(spa_t *spa, nvlist_t *nvp)
417{
418	int error;
419
420	if ((error = spa_prop_validate(spa, nvp)) != 0)
421		return (error);
422
423	return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
424	    spa, nvp, 3));
425}
426
427/*
428 * If the bootfs property value is dsobj, clear it.
429 */
430void
431spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
432{
433	if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
434		VERIFY(zap_remove(spa->spa_meta_objset,
435		    spa->spa_pool_props_object,
436		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
437		spa->spa_bootfs = 0;
438	}
439}
440
441/*
442 * ==========================================================================
443 * SPA state manipulation (open/create/destroy/import/export)
444 * ==========================================================================
445 */
446
447static int
448spa_error_entry_compare(const void *a, const void *b)
449{
450	spa_error_entry_t *sa = (spa_error_entry_t *)a;
451	spa_error_entry_t *sb = (spa_error_entry_t *)b;
452	int ret;
453
454	ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
455	    sizeof (zbookmark_t));
456
457	if (ret < 0)
458		return (-1);
459	else if (ret > 0)
460		return (1);
461	else
462		return (0);
463}
464
465/*
466 * Utility function which retrieves copies of the current logs and
467 * re-initializes them in the process.
468 */
469void
470spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
471{
472	ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
473
474	bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
475	bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
476
477	avl_create(&spa->spa_errlist_scrub,
478	    spa_error_entry_compare, sizeof (spa_error_entry_t),
479	    offsetof(spa_error_entry_t, se_avl));
480	avl_create(&spa->spa_errlist_last,
481	    spa_error_entry_compare, sizeof (spa_error_entry_t),
482	    offsetof(spa_error_entry_t, se_avl));
483}
484
485/*
486 * Activate an uninitialized pool.
487 */
488static void
489spa_activate(spa_t *spa)
490{
491	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
492
493	spa->spa_state = POOL_STATE_ACTIVE;
494
495	spa->spa_normal_class = metaslab_class_create();
496	spa->spa_log_class = metaslab_class_create();
497
498	for (int t = 0; t < ZIO_TYPES; t++) {
499		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
500			spa->spa_zio_taskq[t][q] = taskq_create("spa_zio",
501			    zio_taskq_threads[t][q], maxclsyspri, 50,
502			    INT_MAX, TASKQ_PREPOPULATE);
503		}
504	}
505
506	list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
507	    offsetof(vdev_t, vdev_config_dirty_node));
508	list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
509	    offsetof(vdev_t, vdev_state_dirty_node));
510
511	txg_list_create(&spa->spa_vdev_txg_list,
512	    offsetof(struct vdev, vdev_txg_node));
513
514	avl_create(&spa->spa_errlist_scrub,
515	    spa_error_entry_compare, sizeof (spa_error_entry_t),
516	    offsetof(spa_error_entry_t, se_avl));
517	avl_create(&spa->spa_errlist_last,
518	    spa_error_entry_compare, sizeof (spa_error_entry_t),
519	    offsetof(spa_error_entry_t, se_avl));
520}
521
522/*
523 * Opposite of spa_activate().
524 */
525static void
526spa_deactivate(spa_t *spa)
527{
528	ASSERT(spa->spa_sync_on == B_FALSE);
529	ASSERT(spa->spa_dsl_pool == NULL);
530	ASSERT(spa->spa_root_vdev == NULL);
531
532	ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
533
534	txg_list_destroy(&spa->spa_vdev_txg_list);
535
536	list_destroy(&spa->spa_config_dirty_list);
537	list_destroy(&spa->spa_state_dirty_list);
538
539	for (int t = 0; t < ZIO_TYPES; t++) {
540		for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
541			taskq_destroy(spa->spa_zio_taskq[t][q]);
542			spa->spa_zio_taskq[t][q] = NULL;
543		}
544	}
545
546	metaslab_class_destroy(spa->spa_normal_class);
547	spa->spa_normal_class = NULL;
548
549	metaslab_class_destroy(spa->spa_log_class);
550	spa->spa_log_class = NULL;
551
552	/*
553	 * If this was part of an import or the open otherwise failed, we may
554	 * still have errors left in the queues.  Empty them just in case.
555	 */
556	spa_errlog_drain(spa);
557
558	avl_destroy(&spa->spa_errlist_scrub);
559	avl_destroy(&spa->spa_errlist_last);
560
561	spa->spa_state = POOL_STATE_UNINITIALIZED;
562}
563
564/*
565 * Verify a pool configuration, and construct the vdev tree appropriately.  This
566 * will create all the necessary vdevs in the appropriate layout, with each vdev
567 * in the CLOSED state.  This will prep the pool before open/creation/import.
568 * All vdev validation is done by the vdev_alloc() routine.
569 */
570static int
571spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
572    uint_t id, int atype)
573{
574	nvlist_t **child;
575	uint_t c, children;
576	int error;
577
578	if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
579		return (error);
580
581	if ((*vdp)->vdev_ops->vdev_op_leaf)
582		return (0);
583
584	error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
585	    &child, &children);
586
587	if (error == ENOENT)
588		return (0);
589
590	if (error) {
591		vdev_free(*vdp);
592		*vdp = NULL;
593		return (EINVAL);
594	}
595
596	for (c = 0; c < children; c++) {
597		vdev_t *vd;
598		if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
599		    atype)) != 0) {
600			vdev_free(*vdp);
601			*vdp = NULL;
602			return (error);
603		}
604	}
605
606	ASSERT(*vdp != NULL);
607
608	return (0);
609}
610
611/*
612 * Opposite of spa_load().
613 */
614static void
615spa_unload(spa_t *spa)
616{
617	int i;
618
619	ASSERT(MUTEX_HELD(&spa_namespace_lock));
620
621	/*
622	 * Stop async tasks.
623	 */
624	spa_async_suspend(spa);
625
626	/*
627	 * Stop syncing.
628	 */
629	if (spa->spa_sync_on) {
630		txg_sync_stop(spa->spa_dsl_pool);
631		spa->spa_sync_on = B_FALSE;
632	}
633
634	/*
635	 * Wait for any outstanding async I/O to complete.
636	 */
637	mutex_enter(&spa->spa_async_root_lock);
638	while (spa->spa_async_root_count != 0)
639		cv_wait(&spa->spa_async_root_cv, &spa->spa_async_root_lock);
640	mutex_exit(&spa->spa_async_root_lock);
641
642	/*
643	 * Drop and purge level 2 cache
644	 */
645	spa_l2cache_drop(spa);
646
647	/*
648	 * Close the dsl pool.
649	 */
650	if (spa->spa_dsl_pool) {
651		dsl_pool_close(spa->spa_dsl_pool);
652		spa->spa_dsl_pool = NULL;
653	}
654
655	/*
656	 * Close all vdevs.
657	 */
658	if (spa->spa_root_vdev)
659		vdev_free(spa->spa_root_vdev);
660	ASSERT(spa->spa_root_vdev == NULL);
661
662	for (i = 0; i < spa->spa_spares.sav_count; i++)
663		vdev_free(spa->spa_spares.sav_vdevs[i]);
664	if (spa->spa_spares.sav_vdevs) {
665		kmem_free(spa->spa_spares.sav_vdevs,
666		    spa->spa_spares.sav_count * sizeof (void *));
667		spa->spa_spares.sav_vdevs = NULL;
668	}
669	if (spa->spa_spares.sav_config) {
670		nvlist_free(spa->spa_spares.sav_config);
671		spa->spa_spares.sav_config = NULL;
672	}
673	spa->spa_spares.sav_count = 0;
674
675	for (i = 0; i < spa->spa_l2cache.sav_count; i++)
676		vdev_free(spa->spa_l2cache.sav_vdevs[i]);
677	if (spa->spa_l2cache.sav_vdevs) {
678		kmem_free(spa->spa_l2cache.sav_vdevs,
679		    spa->spa_l2cache.sav_count * sizeof (void *));
680		spa->spa_l2cache.sav_vdevs = NULL;
681	}
682	if (spa->spa_l2cache.sav_config) {
683		nvlist_free(spa->spa_l2cache.sav_config);
684		spa->spa_l2cache.sav_config = NULL;
685	}
686	spa->spa_l2cache.sav_count = 0;
687
688	spa->spa_async_suspended = 0;
689}
690
691/*
692 * Load (or re-load) the current list of vdevs describing the active spares for
693 * this pool.  When this is called, we have some form of basic information in
694 * 'spa_spares.sav_config'.  We parse this into vdevs, try to open them, and
695 * then re-generate a more complete list including status information.
696 */
697static void
698spa_load_spares(spa_t *spa)
699{
700	nvlist_t **spares;
701	uint_t nspares;
702	int i;
703	vdev_t *vd, *tvd;
704
705	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
706
707	/*
708	 * First, close and free any existing spare vdevs.
709	 */
710	for (i = 0; i < spa->spa_spares.sav_count; i++) {
711		vd = spa->spa_spares.sav_vdevs[i];
712
713		/* Undo the call to spa_activate() below */
714		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
715		    B_FALSE)) != NULL && tvd->vdev_isspare)
716			spa_spare_remove(tvd);
717		vdev_close(vd);
718		vdev_free(vd);
719	}
720
721	if (spa->spa_spares.sav_vdevs)
722		kmem_free(spa->spa_spares.sav_vdevs,
723		    spa->spa_spares.sav_count * sizeof (void *));
724
725	if (spa->spa_spares.sav_config == NULL)
726		nspares = 0;
727	else
728		VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
729		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
730
731	spa->spa_spares.sav_count = (int)nspares;
732	spa->spa_spares.sav_vdevs = NULL;
733
734	if (nspares == 0)
735		return;
736
737	/*
738	 * Construct the array of vdevs, opening them to get status in the
739	 * process.   For each spare, there is potentially two different vdev_t
740	 * structures associated with it: one in the list of spares (used only
741	 * for basic validation purposes) and one in the active vdev
742	 * configuration (if it's spared in).  During this phase we open and
743	 * validate each vdev on the spare list.  If the vdev also exists in the
744	 * active configuration, then we also mark this vdev as an active spare.
745	 */
746	spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
747	    KM_SLEEP);
748	for (i = 0; i < spa->spa_spares.sav_count; i++) {
749		VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
750		    VDEV_ALLOC_SPARE) == 0);
751		ASSERT(vd != NULL);
752
753		spa->spa_spares.sav_vdevs[i] = vd;
754
755		if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
756		    B_FALSE)) != NULL) {
757			if (!tvd->vdev_isspare)
758				spa_spare_add(tvd);
759
760			/*
761			 * We only mark the spare active if we were successfully
762			 * able to load the vdev.  Otherwise, importing a pool
763			 * with a bad active spare would result in strange
764			 * behavior, because multiple pool would think the spare
765			 * is actively in use.
766			 *
767			 * There is a vulnerability here to an equally bizarre
768			 * circumstance, where a dead active spare is later
769			 * brought back to life (onlined or otherwise).  Given
770			 * the rarity of this scenario, and the extra complexity
771			 * it adds, we ignore the possibility.
772			 */
773			if (!vdev_is_dead(tvd))
774				spa_spare_activate(tvd);
775		}
776
777		vd->vdev_top = vd;
778
779		if (vdev_open(vd) != 0)
780			continue;
781
782		if (vdev_validate_aux(vd) == 0)
783			spa_spare_add(vd);
784	}
785
786	/*
787	 * Recompute the stashed list of spares, with status information
788	 * this time.
789	 */
790	VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
791	    DATA_TYPE_NVLIST_ARRAY) == 0);
792
793	spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
794	    KM_SLEEP);
795	for (i = 0; i < spa->spa_spares.sav_count; i++)
796		spares[i] = vdev_config_generate(spa,
797		    spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE);
798	VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
799	    ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
800	for (i = 0; i < spa->spa_spares.sav_count; i++)
801		nvlist_free(spares[i]);
802	kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
803}
804
805/*
806 * Load (or re-load) the current list of vdevs describing the active l2cache for
807 * this pool.  When this is called, we have some form of basic information in
808 * 'spa_l2cache.sav_config'.  We parse this into vdevs, try to open them, and
809 * then re-generate a more complete list including status information.
810 * Devices which are already active have their details maintained, and are
811 * not re-opened.
812 */
813static void
814spa_load_l2cache(spa_t *spa)
815{
816	nvlist_t **l2cache;
817	uint_t nl2cache;
818	int i, j, oldnvdevs;
819	uint64_t guid, size;
820	vdev_t *vd, **oldvdevs, **newvdevs;
821	spa_aux_vdev_t *sav = &spa->spa_l2cache;
822
823	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
824
825	if (sav->sav_config != NULL) {
826		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
827		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
828		newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
829	} else {
830		nl2cache = 0;
831	}
832
833	oldvdevs = sav->sav_vdevs;
834	oldnvdevs = sav->sav_count;
835	sav->sav_vdevs = NULL;
836	sav->sav_count = 0;
837
838	/*
839	 * Process new nvlist of vdevs.
840	 */
841	for (i = 0; i < nl2cache; i++) {
842		VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
843		    &guid) == 0);
844
845		newvdevs[i] = NULL;
846		for (j = 0; j < oldnvdevs; j++) {
847			vd = oldvdevs[j];
848			if (vd != NULL && guid == vd->vdev_guid) {
849				/*
850				 * Retain previous vdev for add/remove ops.
851				 */
852				newvdevs[i] = vd;
853				oldvdevs[j] = NULL;
854				break;
855			}
856		}
857
858		if (newvdevs[i] == NULL) {
859			/*
860			 * Create new vdev
861			 */
862			VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
863			    VDEV_ALLOC_L2CACHE) == 0);
864			ASSERT(vd != NULL);
865			newvdevs[i] = vd;
866
867			/*
868			 * Commit this vdev as an l2cache device,
869			 * even if it fails to open.
870			 */
871			spa_l2cache_add(vd);
872
873			vd->vdev_top = vd;
874			vd->vdev_aux = sav;
875
876			spa_l2cache_activate(vd);
877
878			if (vdev_open(vd) != 0)
879				continue;
880
881			(void) vdev_validate_aux(vd);
882
883			if (!vdev_is_dead(vd)) {
884				size = vdev_get_rsize(vd);
885				l2arc_add_vdev(spa, vd,
886				    VDEV_LABEL_START_SIZE,
887				    size - VDEV_LABEL_START_SIZE);
888			}
889		}
890	}
891
892	/*
893	 * Purge vdevs that were dropped
894	 */
895	for (i = 0; i < oldnvdevs; i++) {
896		uint64_t pool;
897
898		vd = oldvdevs[i];
899		if (vd != NULL) {
900			if ((spa_mode & FWRITE) &&
901			    spa_l2cache_exists(vd->vdev_guid, &pool) &&
902			    pool != 0ULL &&
903			    l2arc_vdev_present(vd)) {
904				l2arc_remove_vdev(vd);
905			}
906			(void) vdev_close(vd);
907			spa_l2cache_remove(vd);
908		}
909	}
910
911	if (oldvdevs)
912		kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
913
914	if (sav->sav_config == NULL)
915		goto out;
916
917	sav->sav_vdevs = newvdevs;
918	sav->sav_count = (int)nl2cache;
919
920	/*
921	 * Recompute the stashed list of l2cache devices, with status
922	 * information this time.
923	 */
924	VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
925	    DATA_TYPE_NVLIST_ARRAY) == 0);
926
927	l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
928	for (i = 0; i < sav->sav_count; i++)
929		l2cache[i] = vdev_config_generate(spa,
930		    sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE);
931	VERIFY(nvlist_add_nvlist_array(sav->sav_config,
932	    ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
933out:
934	for (i = 0; i < sav->sav_count; i++)
935		nvlist_free(l2cache[i]);
936	if (sav->sav_count)
937		kmem_free(l2cache, sav->sav_count * sizeof (void *));
938}
939
940static int
941load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
942{
943	dmu_buf_t *db;
944	char *packed = NULL;
945	size_t nvsize = 0;
946	int error;
947	*value = NULL;
948
949	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
950	nvsize = *(uint64_t *)db->db_data;
951	dmu_buf_rele(db, FTAG);
952
953	packed = kmem_alloc(nvsize, KM_SLEEP);
954	error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed);
955	if (error == 0)
956		error = nvlist_unpack(packed, nvsize, value, 0);
957	kmem_free(packed, nvsize);
958
959	return (error);
960}
961
962/*
963 * Checks to see if the given vdev could not be opened, in which case we post a
964 * sysevent to notify the autoreplace code that the device has been removed.
965 */
966static void
967spa_check_removed(vdev_t *vd)
968{
969	int c;
970
971	for (c = 0; c < vd->vdev_children; c++)
972		spa_check_removed(vd->vdev_child[c]);
973
974	if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
975		zfs_post_autoreplace(vd->vdev_spa, vd);
976		spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
977	}
978}
979
980/*
981 * Check for missing log devices
982 */
983int
984spa_check_logs(spa_t *spa)
985{
986	switch (spa->spa_log_state) {
987	case SPA_LOG_MISSING:
988		/* need to recheck in case slog has been restored */
989	case SPA_LOG_UNKNOWN:
990		if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL,
991		    DS_FIND_CHILDREN)) {
992			spa->spa_log_state = SPA_LOG_MISSING;
993			return (1);
994		}
995		break;
996
997	case SPA_LOG_CLEAR:
998		(void) dmu_objset_find(spa->spa_name, zil_clear_log_chain, NULL,
999		    DS_FIND_CHILDREN);
1000		break;
1001	}
1002	spa->spa_log_state = SPA_LOG_GOOD;
1003	return (0);
1004}
1005
1006/*
1007 * Load an existing storage pool, using the pool's builtin spa_config as a
1008 * source of configuration information.
1009 */
1010static int
1011spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig)
1012{
1013	int error = 0;
1014	nvlist_t *nvroot = NULL;
1015	vdev_t *rvd;
1016	uberblock_t *ub = &spa->spa_uberblock;
1017	uint64_t config_cache_txg = spa->spa_config_txg;
1018	uint64_t pool_guid;
1019	uint64_t version;
1020	uint64_t autoreplace = 0;
1021	char *ereport = FM_EREPORT_ZFS_POOL;
1022
1023	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1024
1025	spa->spa_load_state = state;
1026
1027	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
1028	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
1029		error = EINVAL;
1030		goto out;
1031	}
1032
1033	/*
1034	 * Versioning wasn't explicitly added to the label until later, so if
1035	 * it's not present treat it as the initial version.
1036	 */
1037	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
1038		version = SPA_VERSION_INITIAL;
1039
1040	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
1041	    &spa->spa_config_txg);
1042
1043	if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
1044	    spa_guid_exists(pool_guid, 0)) {
1045		error = EEXIST;
1046		goto out;
1047	}
1048
1049	spa->spa_load_guid = pool_guid;
1050
1051	/*
1052	 * Parse the configuration into a vdev tree.  We explicitly set the
1053	 * value that will be returned by spa_version() since parsing the
1054	 * configuration requires knowing the version number.
1055	 */
1056	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1057	spa->spa_ubsync.ub_version = version;
1058	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
1059	spa_config_exit(spa, SCL_ALL, FTAG);
1060
1061	if (error != 0)
1062		goto out;
1063
1064	ASSERT(spa->spa_root_vdev == rvd);
1065	ASSERT(spa_guid(spa) == pool_guid);
1066
1067	/*
1068	 * Try to open all vdevs, loading each label in the process.
1069	 */
1070	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1071	error = vdev_open(rvd);
1072	spa_config_exit(spa, SCL_ALL, FTAG);
1073	if (error != 0)
1074		goto out;
1075
1076	/*
1077	 * Validate the labels for all leaf vdevs.  We need to grab the config
1078	 * lock because all label I/O is done with ZIO_FLAG_CONFIG_WRITER.
1079	 */
1080	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1081	error = vdev_validate(rvd);
1082	spa_config_exit(spa, SCL_ALL, FTAG);
1083
1084	if (error != 0)
1085		goto out;
1086
1087	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1088		error = ENXIO;
1089		goto out;
1090	}
1091
1092	/*
1093	 * Find the best uberblock.
1094	 */
1095	vdev_uberblock_load(NULL, rvd, ub);
1096
1097	/*
1098	 * If we weren't able to find a single valid uberblock, return failure.
1099	 */
1100	if (ub->ub_txg == 0) {
1101		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1102		    VDEV_AUX_CORRUPT_DATA);
1103		error = ENXIO;
1104		goto out;
1105	}
1106
1107	/*
1108	 * If the pool is newer than the code, we can't open it.
1109	 */
1110	if (ub->ub_version > SPA_VERSION) {
1111		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1112		    VDEV_AUX_VERSION_NEWER);
1113		error = ENOTSUP;
1114		goto out;
1115	}
1116
1117	/*
1118	 * If the vdev guid sum doesn't match the uberblock, we have an
1119	 * incomplete configuration.
1120	 */
1121	if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
1122		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1123		    VDEV_AUX_BAD_GUID_SUM);
1124		error = ENXIO;
1125		goto out;
1126	}
1127
1128	/*
1129	 * Initialize internal SPA structures.
1130	 */
1131	spa->spa_state = POOL_STATE_ACTIVE;
1132	spa->spa_ubsync = spa->spa_uberblock;
1133	spa->spa_first_txg = spa_last_synced_txg(spa) + 1;
1134	error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
1135	if (error) {
1136		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1137		    VDEV_AUX_CORRUPT_DATA);
1138		goto out;
1139	}
1140	spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
1141
1142	if (zap_lookup(spa->spa_meta_objset,
1143	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1144	    sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
1145		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1146		    VDEV_AUX_CORRUPT_DATA);
1147		error = EIO;
1148		goto out;
1149	}
1150
1151	if (!mosconfig) {
1152		nvlist_t *newconfig;
1153		uint64_t hostid;
1154
1155		if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) {
1156			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1157			    VDEV_AUX_CORRUPT_DATA);
1158			error = EIO;
1159			goto out;
1160		}
1161
1162		if (!spa_is_root(spa) && nvlist_lookup_uint64(newconfig,
1163		    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
1164			char *hostname;
1165			unsigned long myhostid = 0;
1166
1167			VERIFY(nvlist_lookup_string(newconfig,
1168			    ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
1169
1170			(void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
1171			if (hostid != 0 && myhostid != 0 &&
1172			    (unsigned long)hostid != myhostid) {
1173				cmn_err(CE_WARN, "pool '%s' could not be "
1174				    "loaded as it was last accessed by "
1175				    "another system (host: %s hostid: 0x%lx). "
1176				    "See: http://www.sun.com/msg/ZFS-8000-EY",
1177				    spa_name(spa), hostname,
1178				    (unsigned long)hostid);
1179				error = EBADF;
1180				goto out;
1181			}
1182		}
1183
1184		spa_config_set(spa, newconfig);
1185		spa_unload(spa);
1186		spa_deactivate(spa);
1187		spa_activate(spa);
1188
1189		return (spa_load(spa, newconfig, state, B_TRUE));
1190	}
1191
1192	if (zap_lookup(spa->spa_meta_objset,
1193	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1194	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) {
1195		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1196		    VDEV_AUX_CORRUPT_DATA);
1197		error = EIO;
1198		goto out;
1199	}
1200
1201	/*
1202	 * Load the bit that tells us to use the new accounting function
1203	 * (raid-z deflation).  If we have an older pool, this will not
1204	 * be present.
1205	 */
1206	error = zap_lookup(spa->spa_meta_objset,
1207	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
1208	    sizeof (uint64_t), 1, &spa->spa_deflate);
1209	if (error != 0 && error != ENOENT) {
1210		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1211		    VDEV_AUX_CORRUPT_DATA);
1212		error = EIO;
1213		goto out;
1214	}
1215
1216	/*
1217	 * Load the persistent error log.  If we have an older pool, this will
1218	 * not be present.
1219	 */
1220	error = zap_lookup(spa->spa_meta_objset,
1221	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
1222	    sizeof (uint64_t), 1, &spa->spa_errlog_last);
1223	if (error != 0 && error != ENOENT) {
1224		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1225		    VDEV_AUX_CORRUPT_DATA);
1226		error = EIO;
1227		goto out;
1228	}
1229
1230	error = zap_lookup(spa->spa_meta_objset,
1231	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
1232	    sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
1233	if (error != 0 && error != ENOENT) {
1234		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1235		    VDEV_AUX_CORRUPT_DATA);
1236		error = EIO;
1237		goto out;
1238	}
1239
1240	/*
1241	 * Load the history object.  If we have an older pool, this
1242	 * will not be present.
1243	 */
1244	error = zap_lookup(spa->spa_meta_objset,
1245	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
1246	    sizeof (uint64_t), 1, &spa->spa_history);
1247	if (error != 0 && error != ENOENT) {
1248		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1249		    VDEV_AUX_CORRUPT_DATA);
1250		error = EIO;
1251		goto out;
1252	}
1253
1254	/*
1255	 * Load any hot spares for this pool.
1256	 */
1257	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1258	    DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object);
1259	if (error != 0 && error != ENOENT) {
1260		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1261		    VDEV_AUX_CORRUPT_DATA);
1262		error = EIO;
1263		goto out;
1264	}
1265	if (error == 0) {
1266		ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
1267		if (load_nvlist(spa, spa->spa_spares.sav_object,
1268		    &spa->spa_spares.sav_config) != 0) {
1269			vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1270			    VDEV_AUX_CORRUPT_DATA);
1271			error = EIO;
1272			goto out;
1273		}
1274
1275		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1276		spa_load_spares(spa);
1277		spa_config_exit(spa, SCL_ALL, FTAG);
1278	}
1279
1280	/*
1281	 * Load any level 2 ARC devices for this pool.
1282	 */
1283	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1284	    DMU_POOL_L2CACHE, sizeof (uint64_t), 1,
1285	    &spa->spa_l2cache.sav_object);
1286	if (error != 0 && error != ENOENT) {
1287		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1288		    VDEV_AUX_CORRUPT_DATA);
1289		error = EIO;
1290		goto out;
1291	}
1292	if (error == 0) {
1293		ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
1294		if (load_nvlist(spa, spa->spa_l2cache.sav_object,
1295		    &spa->spa_l2cache.sav_config) != 0) {
1296			vdev_set_state(rvd, B_TRUE,
1297			    VDEV_STATE_CANT_OPEN,
1298			    VDEV_AUX_CORRUPT_DATA);
1299			error = EIO;
1300			goto out;
1301		}
1302
1303		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1304		spa_load_l2cache(spa);
1305		spa_config_exit(spa, SCL_ALL, FTAG);
1306	}
1307
1308	if (spa_check_logs(spa)) {
1309		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1310		    VDEV_AUX_BAD_LOG);
1311		error = ENXIO;
1312		ereport = FM_EREPORT_ZFS_LOG_REPLAY;
1313		goto out;
1314	}
1315
1316
1317	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
1318
1319	error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1320	    DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object);
1321
1322	if (error && error != ENOENT) {
1323		vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1324		    VDEV_AUX_CORRUPT_DATA);
1325		error = EIO;
1326		goto out;
1327	}
1328
1329	if (error == 0) {
1330		(void) zap_lookup(spa->spa_meta_objset,
1331		    spa->spa_pool_props_object,
1332		    zpool_prop_to_name(ZPOOL_PROP_BOOTFS),
1333		    sizeof (uint64_t), 1, &spa->spa_bootfs);
1334		(void) zap_lookup(spa->spa_meta_objset,
1335		    spa->spa_pool_props_object,
1336		    zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE),
1337		    sizeof (uint64_t), 1, &autoreplace);
1338		(void) zap_lookup(spa->spa_meta_objset,
1339		    spa->spa_pool_props_object,
1340		    zpool_prop_to_name(ZPOOL_PROP_DELEGATION),
1341		    sizeof (uint64_t), 1, &spa->spa_delegation);
1342		(void) zap_lookup(spa->spa_meta_objset,
1343		    spa->spa_pool_props_object,
1344		    zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
1345		    sizeof (uint64_t), 1, &spa->spa_failmode);
1346	}
1347
1348	/*
1349	 * If the 'autoreplace' property is set, then post a resource notifying
1350	 * the ZFS DE that it should not issue any faults for unopenable
1351	 * devices.  We also iterate over the vdevs, and post a sysevent for any
1352	 * unopenable vdevs so that the normal autoreplace handler can take
1353	 * over.
1354	 */
1355	if (autoreplace && state != SPA_LOAD_TRYIMPORT)
1356		spa_check_removed(spa->spa_root_vdev);
1357
1358	/*
1359	 * Load the vdev state for all toplevel vdevs.
1360	 */
1361	vdev_load(rvd);
1362
1363	/*
1364	 * Propagate the leaf DTLs we just loaded all the way up the tree.
1365	 */
1366	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1367	vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
1368	spa_config_exit(spa, SCL_ALL, FTAG);
1369
1370	/*
1371	 * Check the state of the root vdev.  If it can't be opened, it
1372	 * indicates one or more toplevel vdevs are faulted.
1373	 */
1374	if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1375		error = ENXIO;
1376		goto out;
1377	}
1378
1379	if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) {
1380		dmu_tx_t *tx;
1381		int need_update = B_FALSE;
1382		int c;
1383
1384		/*
1385		 * Claim log blocks that haven't been committed yet.
1386		 * This must all happen in a single txg.
1387		 */
1388		tx = dmu_tx_create_assigned(spa_get_dsl(spa),
1389		    spa_first_txg(spa));
1390		(void) dmu_objset_find(spa_name(spa),
1391		    zil_claim, tx, DS_FIND_CHILDREN);
1392		dmu_tx_commit(tx);
1393
1394		spa->spa_sync_on = B_TRUE;
1395		txg_sync_start(spa->spa_dsl_pool);
1396
1397		/*
1398		 * Wait for all claims to sync.
1399		 */
1400		txg_wait_synced(spa->spa_dsl_pool, 0);
1401
1402		/*
1403		 * If the config cache is stale, or we have uninitialized
1404		 * metaslabs (see spa_vdev_add()), then update the config.
1405		 */
1406		if (config_cache_txg != spa->spa_config_txg ||
1407		    state == SPA_LOAD_IMPORT)
1408			need_update = B_TRUE;
1409
1410		for (c = 0; c < rvd->vdev_children; c++)
1411			if (rvd->vdev_child[c]->vdev_ms_array == 0)
1412				need_update = B_TRUE;
1413
1414		/*
1415		 * Update the config cache asychronously in case we're the
1416		 * root pool, in which case the config cache isn't writable yet.
1417		 */
1418		if (need_update)
1419			spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
1420	}
1421
1422	error = 0;
1423out:
1424	spa->spa_minref = refcount_count(&spa->spa_refcount);
1425	if (error && error != EBADF)
1426		zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
1427	spa->spa_load_state = SPA_LOAD_NONE;
1428	spa->spa_ena = 0;
1429
1430	return (error);
1431}
1432
1433/*
1434 * Pool Open/Import
1435 *
1436 * The import case is identical to an open except that the configuration is sent
1437 * down from userland, instead of grabbed from the configuration cache.  For the
1438 * case of an open, the pool configuration will exist in the
1439 * POOL_STATE_UNINITIALIZED state.
1440 *
1441 * The stats information (gen/count/ustats) is used to gather vdev statistics at
1442 * the same time open the pool, without having to keep around the spa_t in some
1443 * ambiguous state.
1444 */
1445static int
1446spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config)
1447{
1448	spa_t *spa;
1449	int error;
1450	int locked = B_FALSE;
1451
1452	*spapp = NULL;
1453
1454	/*
1455	 * As disgusting as this is, we need to support recursive calls to this
1456	 * function because dsl_dir_open() is called during spa_load(), and ends
1457	 * up calling spa_open() again.  The real fix is to figure out how to
1458	 * avoid dsl_dir_open() calling this in the first place.
1459	 */
1460	if (mutex_owner(&spa_namespace_lock) != curthread) {
1461		mutex_enter(&spa_namespace_lock);
1462		locked = B_TRUE;
1463	}
1464
1465	if ((spa = spa_lookup(pool)) == NULL) {
1466		if (locked)
1467			mutex_exit(&spa_namespace_lock);
1468		return (ENOENT);
1469	}
1470	if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
1471
1472		spa_activate(spa);
1473
1474		error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
1475
1476		if (error == EBADF) {
1477			/*
1478			 * If vdev_validate() returns failure (indicated by
1479			 * EBADF), it indicates that one of the vdevs indicates
1480			 * that the pool has been exported or destroyed.  If
1481			 * this is the case, the config cache is out of sync and
1482			 * we should remove the pool from the namespace.
1483			 */
1484			spa_unload(spa);
1485			spa_deactivate(spa);
1486			spa_config_sync(spa, B_TRUE, B_TRUE);
1487			spa_remove(spa);
1488			if (locked)
1489				mutex_exit(&spa_namespace_lock);
1490			return (ENOENT);
1491		}
1492
1493		if (error) {
1494			/*
1495			 * We can't open the pool, but we still have useful
1496			 * information: the state of each vdev after the
1497			 * attempted vdev_open().  Return this to the user.
1498			 */
1499			if (config != NULL && spa->spa_root_vdev != NULL)
1500				*config = spa_config_generate(spa, NULL, -1ULL,
1501				    B_TRUE);
1502			spa_unload(spa);
1503			spa_deactivate(spa);
1504			spa->spa_last_open_failed = B_TRUE;
1505			if (locked)
1506				mutex_exit(&spa_namespace_lock);
1507			*spapp = NULL;
1508			return (error);
1509		} else {
1510			spa->spa_last_open_failed = B_FALSE;
1511		}
1512	}
1513
1514	spa_open_ref(spa, tag);
1515
1516	if (locked)
1517		mutex_exit(&spa_namespace_lock);
1518
1519	*spapp = spa;
1520
1521	if (config != NULL)
1522		*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1523
1524	return (0);
1525}
1526
1527int
1528spa_open(const char *name, spa_t **spapp, void *tag)
1529{
1530	return (spa_open_common(name, spapp, tag, NULL));
1531}
1532
1533/*
1534 * Lookup the given spa_t, incrementing the inject count in the process,
1535 * preventing it from being exported or destroyed.
1536 */
1537spa_t *
1538spa_inject_addref(char *name)
1539{
1540	spa_t *spa;
1541
1542	mutex_enter(&spa_namespace_lock);
1543	if ((spa = spa_lookup(name)) == NULL) {
1544		mutex_exit(&spa_namespace_lock);
1545		return (NULL);
1546	}
1547	spa->spa_inject_ref++;
1548	mutex_exit(&spa_namespace_lock);
1549
1550	return (spa);
1551}
1552
1553void
1554spa_inject_delref(spa_t *spa)
1555{
1556	mutex_enter(&spa_namespace_lock);
1557	spa->spa_inject_ref--;
1558	mutex_exit(&spa_namespace_lock);
1559}
1560
1561/*
1562 * Add spares device information to the nvlist.
1563 */
1564static void
1565spa_add_spares(spa_t *spa, nvlist_t *config)
1566{
1567	nvlist_t **spares;
1568	uint_t i, nspares;
1569	nvlist_t *nvroot;
1570	uint64_t guid;
1571	vdev_stat_t *vs;
1572	uint_t vsc;
1573	uint64_t pool;
1574
1575	if (spa->spa_spares.sav_count == 0)
1576		return;
1577
1578	VERIFY(nvlist_lookup_nvlist(config,
1579	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1580	VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1581	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1582	if (nspares != 0) {
1583		VERIFY(nvlist_add_nvlist_array(nvroot,
1584		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
1585		VERIFY(nvlist_lookup_nvlist_array(nvroot,
1586		    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1587
1588		/*
1589		 * Go through and find any spares which have since been
1590		 * repurposed as an active spare.  If this is the case, update
1591		 * their status appropriately.
1592		 */
1593		for (i = 0; i < nspares; i++) {
1594			VERIFY(nvlist_lookup_uint64(spares[i],
1595			    ZPOOL_CONFIG_GUID, &guid) == 0);
1596			if (spa_spare_exists(guid, &pool, NULL) &&
1597			    pool != 0ULL) {
1598				VERIFY(nvlist_lookup_uint64_array(
1599				    spares[i], ZPOOL_CONFIG_STATS,
1600				    (uint64_t **)&vs, &vsc) == 0);
1601				vs->vs_state = VDEV_STATE_CANT_OPEN;
1602				vs->vs_aux = VDEV_AUX_SPARED;
1603			}
1604		}
1605	}
1606}
1607
1608/*
1609 * Add l2cache device information to the nvlist, including vdev stats.
1610 */
1611static void
1612spa_add_l2cache(spa_t *spa, nvlist_t *config)
1613{
1614	nvlist_t **l2cache;
1615	uint_t i, j, nl2cache;
1616	nvlist_t *nvroot;
1617	uint64_t guid;
1618	vdev_t *vd;
1619	vdev_stat_t *vs;
1620	uint_t vsc;
1621
1622	if (spa->spa_l2cache.sav_count == 0)
1623		return;
1624
1625	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
1626
1627	VERIFY(nvlist_lookup_nvlist(config,
1628	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1629	VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
1630	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1631	if (nl2cache != 0) {
1632		VERIFY(nvlist_add_nvlist_array(nvroot,
1633		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1634		VERIFY(nvlist_lookup_nvlist_array(nvroot,
1635		    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1636
1637		/*
1638		 * Update level 2 cache device stats.
1639		 */
1640
1641		for (i = 0; i < nl2cache; i++) {
1642			VERIFY(nvlist_lookup_uint64(l2cache[i],
1643			    ZPOOL_CONFIG_GUID, &guid) == 0);
1644
1645			vd = NULL;
1646			for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
1647				if (guid ==
1648				    spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
1649					vd = spa->spa_l2cache.sav_vdevs[j];
1650					break;
1651				}
1652			}
1653			ASSERT(vd != NULL);
1654
1655			VERIFY(nvlist_lookup_uint64_array(l2cache[i],
1656			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
1657			vdev_get_stats(vd, vs);
1658		}
1659	}
1660
1661	spa_config_exit(spa, SCL_CONFIG, FTAG);
1662}
1663
1664int
1665spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
1666{
1667	int error;
1668	spa_t *spa;
1669
1670	*config = NULL;
1671	error = spa_open_common(name, &spa, FTAG, config);
1672
1673	if (spa && *config != NULL) {
1674		VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT,
1675		    spa_get_errlog_size(spa)) == 0);
1676
1677		if (spa_suspended(spa))
1678			VERIFY(nvlist_add_uint64(*config,
1679			    ZPOOL_CONFIG_SUSPENDED, spa->spa_failmode) == 0);
1680
1681		spa_add_spares(spa, *config);
1682		spa_add_l2cache(spa, *config);
1683	}
1684
1685	/*
1686	 * We want to get the alternate root even for faulted pools, so we cheat
1687	 * and call spa_lookup() directly.
1688	 */
1689	if (altroot) {
1690		if (spa == NULL) {
1691			mutex_enter(&spa_namespace_lock);
1692			spa = spa_lookup(name);
1693			if (spa)
1694				spa_altroot(spa, altroot, buflen);
1695			else
1696				altroot[0] = '\0';
1697			spa = NULL;
1698			mutex_exit(&spa_namespace_lock);
1699		} else {
1700			spa_altroot(spa, altroot, buflen);
1701		}
1702	}
1703
1704	if (spa != NULL)
1705		spa_close(spa, FTAG);
1706
1707	return (error);
1708}
1709
1710/*
1711 * Validate that the auxiliary device array is well formed.  We must have an
1712 * array of nvlists, each which describes a valid leaf vdev.  If this is an
1713 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
1714 * specified, as long as they are well-formed.
1715 */
1716static int
1717spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
1718    spa_aux_vdev_t *sav, const char *config, uint64_t version,
1719    vdev_labeltype_t label)
1720{
1721	nvlist_t **dev;
1722	uint_t i, ndev;
1723	vdev_t *vd;
1724	int error;
1725
1726	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1727
1728	/*
1729	 * It's acceptable to have no devs specified.
1730	 */
1731	if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
1732		return (0);
1733
1734	if (ndev == 0)
1735		return (EINVAL);
1736
1737	/*
1738	 * Make sure the pool is formatted with a version that supports this
1739	 * device type.
1740	 */
1741	if (spa_version(spa) < version)
1742		return (ENOTSUP);
1743
1744	/*
1745	 * Set the pending device list so we correctly handle device in-use
1746	 * checking.
1747	 */
1748	sav->sav_pending = dev;
1749	sav->sav_npending = ndev;
1750
1751	for (i = 0; i < ndev; i++) {
1752		if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
1753		    mode)) != 0)
1754			goto out;
1755
1756		if (!vd->vdev_ops->vdev_op_leaf) {
1757			vdev_free(vd);
1758			error = EINVAL;
1759			goto out;
1760		}
1761
1762		/*
1763		 * The L2ARC currently only supports disk devices in
1764		 * kernel context.  For user-level testing, we allow it.
1765		 */
1766#ifdef _KERNEL
1767		if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
1768		    strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
1769			error = ENOTBLK;
1770			goto out;
1771		}
1772#endif
1773		vd->vdev_top = vd;
1774
1775		if ((error = vdev_open(vd)) == 0 &&
1776		    (error = vdev_label_init(vd, crtxg, label)) == 0) {
1777			VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
1778			    vd->vdev_guid) == 0);
1779		}
1780
1781		vdev_free(vd);
1782
1783		if (error &&
1784		    (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
1785			goto out;
1786		else
1787			error = 0;
1788	}
1789
1790out:
1791	sav->sav_pending = NULL;
1792	sav->sav_npending = 0;
1793	return (error);
1794}
1795
1796static int
1797spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
1798{
1799	int error;
1800
1801	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1802
1803	if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1804	    &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
1805	    VDEV_LABEL_SPARE)) != 0) {
1806		return (error);
1807	}
1808
1809	return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1810	    &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
1811	    VDEV_LABEL_L2CACHE));
1812}
1813
1814static void
1815spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
1816    const char *config)
1817{
1818	int i;
1819
1820	if (sav->sav_config != NULL) {
1821		nvlist_t **olddevs;
1822		uint_t oldndevs;
1823		nvlist_t **newdevs;
1824
1825		/*
1826		 * Generate new dev list by concatentating with the
1827		 * current dev list.
1828		 */
1829		VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
1830		    &olddevs, &oldndevs) == 0);
1831
1832		newdevs = kmem_alloc(sizeof (void *) *
1833		    (ndevs + oldndevs), KM_SLEEP);
1834		for (i = 0; i < oldndevs; i++)
1835			VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
1836			    KM_SLEEP) == 0);
1837		for (i = 0; i < ndevs; i++)
1838			VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
1839			    KM_SLEEP) == 0);
1840
1841		VERIFY(nvlist_remove(sav->sav_config, config,
1842		    DATA_TYPE_NVLIST_ARRAY) == 0);
1843
1844		VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1845		    config, newdevs, ndevs + oldndevs) == 0);
1846		for (i = 0; i < oldndevs + ndevs; i++)
1847			nvlist_free(newdevs[i]);
1848		kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
1849	} else {
1850		/*
1851		 * Generate a new dev list.
1852		 */
1853		VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
1854		    KM_SLEEP) == 0);
1855		VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
1856		    devs, ndevs) == 0);
1857	}
1858}
1859
1860/*
1861 * Stop and drop level 2 ARC devices
1862 */
1863void
1864spa_l2cache_drop(spa_t *spa)
1865{
1866	vdev_t *vd;
1867	int i;
1868	spa_aux_vdev_t *sav = &spa->spa_l2cache;
1869
1870	for (i = 0; i < sav->sav_count; i++) {
1871		uint64_t pool;
1872
1873		vd = sav->sav_vdevs[i];
1874		ASSERT(vd != NULL);
1875
1876		if ((spa_mode & FWRITE) &&
1877		    spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL &&
1878		    l2arc_vdev_present(vd)) {
1879			l2arc_remove_vdev(vd);
1880		}
1881		if (vd->vdev_isl2cache)
1882			spa_l2cache_remove(vd);
1883		vdev_clear_stats(vd);
1884		(void) vdev_close(vd);
1885	}
1886}
1887
1888/*
1889 * Pool Creation
1890 */
1891int
1892spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
1893    const char *history_str, nvlist_t *zplprops)
1894{
1895	spa_t *spa;
1896	char *altroot = NULL;
1897	vdev_t *rvd;
1898	dsl_pool_t *dp;
1899	dmu_tx_t *tx;
1900	int c, error = 0;
1901	uint64_t txg = TXG_INITIAL;
1902	nvlist_t **spares, **l2cache;
1903	uint_t nspares, nl2cache;
1904	uint64_t version;
1905
1906	/*
1907	 * If this pool already exists, return failure.
1908	 */
1909	mutex_enter(&spa_namespace_lock);
1910	if (spa_lookup(pool) != NULL) {
1911		mutex_exit(&spa_namespace_lock);
1912		return (EEXIST);
1913	}
1914
1915	/*
1916	 * Allocate a new spa_t structure.
1917	 */
1918	(void) nvlist_lookup_string(props,
1919	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
1920	spa = spa_add(pool, altroot);
1921	spa_activate(spa);
1922
1923	spa->spa_uberblock.ub_txg = txg - 1;
1924
1925	if (props && (error = spa_prop_validate(spa, props))) {
1926		spa_unload(spa);
1927		spa_deactivate(spa);
1928		spa_remove(spa);
1929		mutex_exit(&spa_namespace_lock);
1930		return (error);
1931	}
1932
1933	if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION),
1934	    &version) != 0)
1935		version = SPA_VERSION;
1936	ASSERT(version <= SPA_VERSION);
1937	spa->spa_uberblock.ub_version = version;
1938	spa->spa_ubsync = spa->spa_uberblock;
1939
1940	/*
1941	 * Create the root vdev.
1942	 */
1943	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1944
1945	error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
1946
1947	ASSERT(error != 0 || rvd != NULL);
1948	ASSERT(error != 0 || spa->spa_root_vdev == rvd);
1949
1950	if (error == 0 && !zfs_allocatable_devs(nvroot))
1951		error = EINVAL;
1952
1953	if (error == 0 &&
1954	    (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
1955	    (error = spa_validate_aux(spa, nvroot, txg,
1956	    VDEV_ALLOC_ADD)) == 0) {
1957		for (c = 0; c < rvd->vdev_children; c++)
1958			vdev_init(rvd->vdev_child[c], txg);
1959		vdev_config_dirty(rvd);
1960	}
1961
1962	spa_config_exit(spa, SCL_ALL, FTAG);
1963
1964	if (error != 0) {
1965		spa_unload(spa);
1966		spa_deactivate(spa);
1967		spa_remove(spa);
1968		mutex_exit(&spa_namespace_lock);
1969		return (error);
1970	}
1971
1972	/*
1973	 * Get the list of spares, if specified.
1974	 */
1975	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1976	    &spares, &nspares) == 0) {
1977		VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
1978		    KM_SLEEP) == 0);
1979		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
1980		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
1981		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1982		spa_load_spares(spa);
1983		spa_config_exit(spa, SCL_ALL, FTAG);
1984		spa->spa_spares.sav_sync = B_TRUE;
1985	}
1986
1987	/*
1988	 * Get the list of level 2 cache devices, if specified.
1989	 */
1990	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1991	    &l2cache, &nl2cache) == 0) {
1992		VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
1993		    NV_UNIQUE_NAME, KM_SLEEP) == 0);
1994		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
1995		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1996		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1997		spa_load_l2cache(spa);
1998		spa_config_exit(spa, SCL_ALL, FTAG);
1999		spa->spa_l2cache.sav_sync = B_TRUE;
2000	}
2001
2002	spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
2003	spa->spa_meta_objset = dp->dp_meta_objset;
2004
2005	tx = dmu_tx_create_assigned(dp, txg);
2006
2007	/*
2008	 * Create the pool config object.
2009	 */
2010	spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
2011	    DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
2012	    DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
2013
2014	if (zap_add(spa->spa_meta_objset,
2015	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
2016	    sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
2017		cmn_err(CE_PANIC, "failed to add pool config");
2018	}
2019
2020	/* Newly created pools with the right version are always deflated. */
2021	if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
2022		spa->spa_deflate = TRUE;
2023		if (zap_add(spa->spa_meta_objset,
2024		    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
2025		    sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
2026			cmn_err(CE_PANIC, "failed to add deflate");
2027		}
2028	}
2029
2030	/*
2031	 * Create the deferred-free bplist object.  Turn off compression
2032	 * because sync-to-convergence takes longer if the blocksize
2033	 * keeps changing.
2034	 */
2035	spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset,
2036	    1 << 14, tx);
2037	dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj,
2038	    ZIO_COMPRESS_OFF, tx);
2039
2040	if (zap_add(spa->spa_meta_objset,
2041	    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
2042	    sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) {
2043		cmn_err(CE_PANIC, "failed to add bplist");
2044	}
2045
2046	/*
2047	 * Create the pool's history object.
2048	 */
2049	if (version >= SPA_VERSION_ZPOOL_HISTORY)
2050		spa_history_create_obj(spa, tx);
2051
2052	/*
2053	 * Set pool properties.
2054	 */
2055	spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
2056	spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2057	spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
2058	if (props)
2059		spa_sync_props(spa, props, CRED(), tx);
2060
2061	dmu_tx_commit(tx);
2062
2063	spa->spa_sync_on = B_TRUE;
2064	txg_sync_start(spa->spa_dsl_pool);
2065
2066	/*
2067	 * We explicitly wait for the first transaction to complete so that our
2068	 * bean counters are appropriately updated.
2069	 */
2070	txg_wait_synced(spa->spa_dsl_pool, txg);
2071
2072	spa_config_sync(spa, B_FALSE, B_TRUE);
2073
2074	if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL)
2075		(void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE);
2076
2077	mutex_exit(&spa_namespace_lock);
2078
2079	spa->spa_minref = refcount_count(&spa->spa_refcount);
2080
2081	return (0);
2082}
2083
2084/*
2085 * Import the given pool into the system.  We set up the necessary spa_t and
2086 * then call spa_load() to do the dirty work.
2087 */
2088static int
2089spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props,
2090    boolean_t isroot, boolean_t allowfaulted)
2091{
2092	spa_t *spa;
2093	char *altroot = NULL;
2094	int error, loaderr;
2095	nvlist_t *nvroot;
2096	nvlist_t **spares, **l2cache;
2097	uint_t nspares, nl2cache;
2098
2099	/*
2100	 * If a pool with this name exists, return failure.
2101	 */
2102	mutex_enter(&spa_namespace_lock);
2103	if ((spa = spa_lookup(pool)) != NULL) {
2104		if (isroot) {
2105			/*
2106			 * Remove the existing root pool from the
2107			 * namespace so that we can replace it with
2108			 * the correct config we just read in.
2109			 */
2110			ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
2111			spa_remove(spa);
2112		} else {
2113			mutex_exit(&spa_namespace_lock);
2114			return (EEXIST);
2115		}
2116	}
2117
2118	/*
2119	 * Create and initialize the spa structure.
2120	 */
2121	(void) nvlist_lookup_string(props,
2122	    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2123	spa = spa_add(pool, altroot);
2124	spa_activate(spa);
2125
2126	if (allowfaulted)
2127		spa->spa_import_faulted = B_TRUE;
2128	spa->spa_is_root = isroot;
2129
2130	/*
2131	 * Pass off the heavy lifting to spa_load().
2132	 * Pass TRUE for mosconfig (unless this is a root pool) because
2133	 * the user-supplied config is actually the one to trust when
2134	 * doing an import.
2135	 */
2136	loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, !isroot);
2137
2138	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2139	/*
2140	 * Toss any existing sparelist, as it doesn't have any validity anymore,
2141	 * and conflicts with spa_has_spare().
2142	 */
2143	if (!isroot && spa->spa_spares.sav_config) {
2144		nvlist_free(spa->spa_spares.sav_config);
2145		spa->spa_spares.sav_config = NULL;
2146		spa_load_spares(spa);
2147	}
2148	if (!isroot && spa->spa_l2cache.sav_config) {
2149		nvlist_free(spa->spa_l2cache.sav_config);
2150		spa->spa_l2cache.sav_config = NULL;
2151		spa_load_l2cache(spa);
2152	}
2153
2154	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2155	    &nvroot) == 0);
2156	if (error == 0)
2157		error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE);
2158	if (error == 0)
2159		error = spa_validate_aux(spa, nvroot, -1ULL,
2160		    VDEV_ALLOC_L2CACHE);
2161	spa_config_exit(spa, SCL_ALL, FTAG);
2162
2163	if (error != 0 || (props && (error = spa_prop_set(spa, props)))) {
2164		if (loaderr != 0 && loaderr != EINVAL && allowfaulted) {
2165			/*
2166			 * If we failed to load the pool, but 'allowfaulted' is
2167			 * set, then manually set the config as if the config
2168			 * passed in was specified in the cache file.
2169			 */
2170			error = 0;
2171			spa->spa_import_faulted = B_FALSE;
2172			if (spa->spa_config == NULL)
2173				spa->spa_config = spa_config_generate(spa,
2174				    NULL, -1ULL, B_TRUE);
2175			spa_unload(spa);
2176			spa_deactivate(spa);
2177			spa_config_sync(spa, B_FALSE, B_TRUE);
2178		} else {
2179			spa_unload(spa);
2180			spa_deactivate(spa);
2181			spa_remove(spa);
2182		}
2183		mutex_exit(&spa_namespace_lock);
2184		return (error);
2185	}
2186
2187	/*
2188	 * Override any spares and level 2 cache devices as specified by
2189	 * the user, as these may have correct device names/devids, etc.
2190	 */
2191	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2192	    &spares, &nspares) == 0) {
2193		if (spa->spa_spares.sav_config)
2194			VERIFY(nvlist_remove(spa->spa_spares.sav_config,
2195			    ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
2196		else
2197			VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
2198			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2199		VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
2200		    ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2201		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2202		spa_load_spares(spa);
2203		spa_config_exit(spa, SCL_ALL, FTAG);
2204		spa->spa_spares.sav_sync = B_TRUE;
2205	}
2206	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2207	    &l2cache, &nl2cache) == 0) {
2208		if (spa->spa_l2cache.sav_config)
2209			VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
2210			    ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
2211		else
2212			VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2213			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
2214		VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2215		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2216		spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2217		spa_load_l2cache(spa);
2218		spa_config_exit(spa, SCL_ALL, FTAG);
2219		spa->spa_l2cache.sav_sync = B_TRUE;
2220	}
2221
2222	if (spa_mode & FWRITE) {
2223		/*
2224		 * Update the config cache to include the newly-imported pool.
2225		 */
2226		spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot);
2227	}
2228
2229	spa->spa_import_faulted = B_FALSE;
2230	mutex_exit(&spa_namespace_lock);
2231
2232	return (0);
2233}
2234
2235#ifdef _KERNEL
2236/*
2237 * Build a "root" vdev for a top level vdev read in from a rootpool
2238 * device label.
2239 */
2240static void
2241spa_build_rootpool_config(nvlist_t *config)
2242{
2243	nvlist_t *nvtop, *nvroot;
2244	uint64_t pgid;
2245
2246	/*
2247	 * Add this top-level vdev to the child array.
2248	 */
2249	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop)
2250	    == 0);
2251	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid)
2252	    == 0);
2253
2254	/*
2255	 * Put this pool's top-level vdevs into a root vdev.
2256	 */
2257	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2258	VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT)
2259	    == 0);
2260	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
2261	VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
2262	VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2263	    &nvtop, 1) == 0);
2264
2265	/*
2266	 * Replace the existing vdev_tree with the new root vdev in
2267	 * this pool's configuration (remove the old, add the new).
2268	 */
2269	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
2270	nvlist_free(nvroot);
2271}
2272
2273/*
2274 * Get the root pool information from the root disk, then import the root pool
2275 * during the system boot up time.
2276 */
2277extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
2278
2279int
2280spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf,
2281    uint64_t *besttxg)
2282{
2283	nvlist_t *config;
2284	uint64_t txg;
2285	int error;
2286
2287	if (error = vdev_disk_read_rootlabel(devpath, devid, &config))
2288		return (error);
2289
2290	VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
2291
2292	if (bestconf != NULL)
2293		*bestconf = config;
2294	else
2295		nvlist_free(config);
2296	*besttxg = txg;
2297	return (0);
2298}
2299
2300boolean_t
2301spa_rootdev_validate(nvlist_t *nv)
2302{
2303	uint64_t ival;
2304
2305	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2306	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2307	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2308		return (B_FALSE);
2309
2310	return (B_TRUE);
2311}
2312
2313
2314/*
2315 * Given the boot device's physical path or devid, check if the device
2316 * is in a valid state.  If so, return the configuration from the vdev
2317 * label.
2318 */
2319int
2320spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf)
2321{
2322	nvlist_t *conf = NULL;
2323	uint64_t txg = 0;
2324	nvlist_t *nvtop, **child;
2325	char *type;
2326	char *bootpath = NULL;
2327	uint_t children, c;
2328	char *tmp;
2329	int error;
2330
2331	if (devpath && ((tmp = strchr(devpath, ' ')) != NULL))
2332		*tmp = '\0';
2333	if (error = spa_check_rootconf(devpath, devid, &conf, &txg)) {
2334		cmn_err(CE_NOTE, "error reading device label");
2335		return (error);
2336	}
2337	if (txg == 0) {
2338		cmn_err(CE_NOTE, "this device is detached");
2339		nvlist_free(conf);
2340		return (EINVAL);
2341	}
2342
2343	VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE,
2344	    &nvtop) == 0);
2345	VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0);
2346
2347	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2348		if (spa_rootdev_validate(nvtop)) {
2349			goto out;
2350		} else {
2351			nvlist_free(conf);
2352			return (EINVAL);
2353		}
2354	}
2355
2356	ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0);
2357
2358	VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN,
2359	    &child, &children) == 0);
2360
2361	/*
2362	 * Go thru vdevs in the mirror to see if the given device
2363	 * has the most recent txg. Only the device with the most
2364	 * recent txg has valid information and should be booted.
2365	 */
2366	for (c = 0; c < children; c++) {
2367		char *cdevid, *cpath;
2368		uint64_t tmptxg;
2369
2370		if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH,
2371		    &cpath) != 0)
2372			return (EINVAL);
2373		if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_DEVID,
2374		    &cdevid) != 0)
2375			return (EINVAL);
2376		if ((spa_check_rootconf(cpath, cdevid, NULL,
2377		    &tmptxg) == 0) && (tmptxg > txg)) {
2378			txg = tmptxg;
2379			VERIFY(nvlist_lookup_string(child[c],
2380			    ZPOOL_CONFIG_PATH, &bootpath) == 0);
2381		}
2382	}
2383
2384	/* Does the best device match the one we've booted from? */
2385	if (bootpath) {
2386		cmn_err(CE_NOTE, "try booting from '%s'", bootpath);
2387		return (EINVAL);
2388	}
2389out:
2390	*bestconf = conf;
2391	return (0);
2392}
2393
2394/*
2395 * Import a root pool.
2396 *
2397 * For x86. devpath_list will consist of devid and/or physpath name of
2398 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
2399 * The GRUB "findroot" command will return the vdev we should boot.
2400 *
2401 * For Sparc, devpath_list consists the physpath name of the booting device
2402 * no matter the rootpool is a single device pool or a mirrored pool.
2403 * e.g.
2404 *	"/pci@1f,0/ide@d/disk@0,0:a"
2405 */
2406int
2407spa_import_rootpool(char *devpath, char *devid)
2408{
2409	nvlist_t *conf = NULL;
2410	char *pname;
2411	int error;
2412
2413	/*
2414	 * Get the vdev pathname and configuation from the most
2415	 * recently updated vdev (highest txg).
2416	 */
2417	if (error = spa_get_rootconf(devpath, devid, &conf))
2418		goto msg_out;
2419
2420	/*
2421	 * Add type "root" vdev to the config.
2422	 */
2423	spa_build_rootpool_config(conf);
2424
2425	VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0);
2426
2427	/*
2428	 * We specify 'allowfaulted' for this to be treated like spa_open()
2429	 * instead of spa_import().  This prevents us from marking vdevs as
2430	 * persistently unavailable, and generates FMA ereports as if it were a
2431	 * pool open, not import.
2432	 */
2433	error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE);
2434	ASSERT(error != EEXIST);
2435
2436	nvlist_free(conf);
2437	return (error);
2438
2439msg_out:
2440	cmn_err(CE_NOTE, "\n"
2441	    "  ***************************************************  \n"
2442	    "  *  This device is not bootable!                   *  \n"
2443	    "  *  It is either offlined or detached or faulted.  *  \n"
2444	    "  *  Please try to boot from a different device.    *  \n"
2445	    "  ***************************************************  ");
2446
2447	return (error);
2448}
2449#endif
2450
2451/*
2452 * Import a non-root pool into the system.
2453 */
2454int
2455spa_import(const char *pool, nvlist_t *config, nvlist_t *props)
2456{
2457	return (spa_import_common(pool, config, props, B_FALSE, B_FALSE));
2458}
2459
2460int
2461spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props)
2462{
2463	return (spa_import_common(pool, config, props, B_FALSE, B_TRUE));
2464}
2465
2466
2467/*
2468 * This (illegal) pool name is used when temporarily importing a spa_t in order
2469 * to get the vdev stats associated with the imported devices.
2470 */
2471#define	TRYIMPORT_NAME	"$import"
2472
2473nvlist_t *
2474spa_tryimport(nvlist_t *tryconfig)
2475{
2476	nvlist_t *config = NULL;
2477	char *poolname;
2478	spa_t *spa;
2479	uint64_t state;
2480
2481	if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
2482		return (NULL);
2483
2484	if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
2485		return (NULL);
2486
2487	/*
2488	 * Create and initialize the spa structure.
2489	 */
2490	mutex_enter(&spa_namespace_lock);
2491	spa = spa_add(TRYIMPORT_NAME, NULL);
2492	spa_activate(spa);
2493
2494	/*
2495	 * Pass off the heavy lifting to spa_load().
2496	 * Pass TRUE for mosconfig because the user-supplied config
2497	 * is actually the one to trust when doing an import.
2498	 */
2499	(void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
2500
2501	/*
2502	 * If 'tryconfig' was at least parsable, return the current config.
2503	 */
2504	if (spa->spa_root_vdev != NULL) {
2505		config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2506		VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
2507		    poolname) == 0);
2508		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2509		    state) == 0);
2510		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
2511		    spa->spa_uberblock.ub_timestamp) == 0);
2512
2513		/*
2514		 * If the bootfs property exists on this pool then we
2515		 * copy it out so that external consumers can tell which
2516		 * pools are bootable.
2517		 */
2518		if (spa->spa_bootfs) {
2519			char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2520
2521			/*
2522			 * We have to play games with the name since the
2523			 * pool was opened as TRYIMPORT_NAME.
2524			 */
2525			if (dsl_dsobj_to_dsname(spa_name(spa),
2526			    spa->spa_bootfs, tmpname) == 0) {
2527				char *cp;
2528				char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2529
2530				cp = strchr(tmpname, '/');
2531				if (cp == NULL) {
2532					(void) strlcpy(dsname, tmpname,
2533					    MAXPATHLEN);
2534				} else {
2535					(void) snprintf(dsname, MAXPATHLEN,
2536					    "%s/%s", poolname, ++cp);
2537				}
2538				VERIFY(nvlist_add_string(config,
2539				    ZPOOL_CONFIG_BOOTFS, dsname) == 0);
2540				kmem_free(dsname, MAXPATHLEN);
2541			}
2542			kmem_free(tmpname, MAXPATHLEN);
2543		}
2544
2545		/*
2546		 * Add the list of hot spares and level 2 cache devices.
2547		 */
2548		spa_add_spares(spa, config);
2549		spa_add_l2cache(spa, config);
2550	}
2551
2552	spa_unload(spa);
2553	spa_deactivate(spa);
2554	spa_remove(spa);
2555	mutex_exit(&spa_namespace_lock);
2556
2557	return (config);
2558}
2559
2560/*
2561 * Pool export/destroy
2562 *
2563 * The act of destroying or exporting a pool is very simple.  We make sure there
2564 * is no more pending I/O and any references to the pool are gone.  Then, we
2565 * update the pool state and sync all the labels to disk, removing the
2566 * configuration from the cache afterwards.
2567 */
2568static int
2569spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
2570    boolean_t force)
2571{
2572	spa_t *spa;
2573
2574	if (oldconfig)
2575		*oldconfig = NULL;
2576
2577	if (!(spa_mode & FWRITE))
2578		return (EROFS);
2579
2580	mutex_enter(&spa_namespace_lock);
2581	if ((spa = spa_lookup(pool)) == NULL) {
2582		mutex_exit(&spa_namespace_lock);
2583		return (ENOENT);
2584	}
2585
2586	/*
2587	 * Put a hold on the pool, drop the namespace lock, stop async tasks,
2588	 * reacquire the namespace lock, and see if we can export.
2589	 */
2590	spa_open_ref(spa, FTAG);
2591	mutex_exit(&spa_namespace_lock);
2592	spa_async_suspend(spa);
2593	mutex_enter(&spa_namespace_lock);
2594	spa_close(spa, FTAG);
2595
2596	/*
2597	 * The pool will be in core if it's openable,
2598	 * in which case we can modify its state.
2599	 */
2600	if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
2601		/*
2602		 * Objsets may be open only because they're dirty, so we
2603		 * have to force it to sync before checking spa_refcnt.
2604		 */
2605		txg_wait_synced(spa->spa_dsl_pool, 0);
2606
2607		/*
2608		 * A pool cannot be exported or destroyed if there are active
2609		 * references.  If we are resetting a pool, allow references by
2610		 * fault injection handlers.
2611		 */
2612		if (!spa_refcount_zero(spa) ||
2613		    (spa->spa_inject_ref != 0 &&
2614		    new_state != POOL_STATE_UNINITIALIZED)) {
2615			spa_async_resume(spa);
2616			mutex_exit(&spa_namespace_lock);
2617			return (EBUSY);
2618		}
2619
2620		/*
2621		 * A pool cannot be exported if it has an active shared spare.
2622		 * This is to prevent other pools stealing the active spare
2623		 * from an exported pool. At user's own will, such pool can
2624		 * be forcedly exported.
2625		 */
2626		if (!force && new_state == POOL_STATE_EXPORTED &&
2627		    spa_has_active_shared_spare(spa)) {
2628			spa_async_resume(spa);
2629			mutex_exit(&spa_namespace_lock);
2630			return (EXDEV);
2631		}
2632
2633		/*
2634		 * We want this to be reflected on every label,
2635		 * so mark them all dirty.  spa_unload() will do the
2636		 * final sync that pushes these changes out.
2637		 */
2638		if (new_state != POOL_STATE_UNINITIALIZED) {
2639			spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2640			spa->spa_state = new_state;
2641			spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
2642			vdev_config_dirty(spa->spa_root_vdev);
2643			spa_config_exit(spa, SCL_ALL, FTAG);
2644		}
2645	}
2646
2647	spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
2648
2649	if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
2650		spa_unload(spa);
2651		spa_deactivate(spa);
2652	}
2653
2654	if (oldconfig && spa->spa_config)
2655		VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
2656
2657	if (new_state != POOL_STATE_UNINITIALIZED) {
2658		spa_config_sync(spa, B_TRUE, B_TRUE);
2659		spa_remove(spa);
2660	}
2661	mutex_exit(&spa_namespace_lock);
2662
2663	return (0);
2664}
2665
2666/*
2667 * Destroy a storage pool.
2668 */
2669int
2670spa_destroy(char *pool)
2671{
2672	return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, B_FALSE));
2673}
2674
2675/*
2676 * Export a storage pool.
2677 */
2678int
2679spa_export(char *pool, nvlist_t **oldconfig, boolean_t force)
2680{
2681	return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, force));
2682}
2683
2684/*
2685 * Similar to spa_export(), this unloads the spa_t without actually removing it
2686 * from the namespace in any way.
2687 */
2688int
2689spa_reset(char *pool)
2690{
2691	return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
2692	    B_FALSE));
2693}
2694
2695/*
2696 * ==========================================================================
2697 * Device manipulation
2698 * ==========================================================================
2699 */
2700
2701/*
2702 * Add a device to a storage pool.
2703 */
2704int
2705spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
2706{
2707	uint64_t txg;
2708	int c, error;
2709	vdev_t *rvd = spa->spa_root_vdev;
2710	vdev_t *vd, *tvd;
2711	nvlist_t **spares, **l2cache;
2712	uint_t nspares, nl2cache;
2713
2714	txg = spa_vdev_enter(spa);
2715
2716	if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
2717	    VDEV_ALLOC_ADD)) != 0)
2718		return (spa_vdev_exit(spa, NULL, txg, error));
2719
2720	spa->spa_pending_vdev = vd;	/* spa_vdev_exit() will clear this */
2721
2722	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
2723	    &nspares) != 0)
2724		nspares = 0;
2725
2726	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
2727	    &nl2cache) != 0)
2728		nl2cache = 0;
2729
2730	if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
2731		return (spa_vdev_exit(spa, vd, txg, EINVAL));
2732
2733	if (vd->vdev_children != 0 &&
2734	    (error = vdev_create(vd, txg, B_FALSE)) != 0)
2735		return (spa_vdev_exit(spa, vd, txg, error));
2736
2737	/*
2738	 * We must validate the spares and l2cache devices after checking the
2739	 * children.  Otherwise, vdev_inuse() will blindly overwrite the spare.
2740	 */
2741	if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
2742		return (spa_vdev_exit(spa, vd, txg, error));
2743
2744	/*
2745	 * Transfer each new top-level vdev from vd to rvd.
2746	 */
2747	for (c = 0; c < vd->vdev_children; c++) {
2748		tvd = vd->vdev_child[c];
2749		vdev_remove_child(vd, tvd);
2750		tvd->vdev_id = rvd->vdev_children;
2751		vdev_add_child(rvd, tvd);
2752		vdev_config_dirty(tvd);
2753	}
2754
2755	if (nspares != 0) {
2756		spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
2757		    ZPOOL_CONFIG_SPARES);
2758		spa_load_spares(spa);
2759		spa->spa_spares.sav_sync = B_TRUE;
2760	}
2761
2762	if (nl2cache != 0) {
2763		spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
2764		    ZPOOL_CONFIG_L2CACHE);
2765		spa_load_l2cache(spa);
2766		spa->spa_l2cache.sav_sync = B_TRUE;
2767	}
2768
2769	/*
2770	 * We have to be careful when adding new vdevs to an existing pool.
2771	 * If other threads start allocating from these vdevs before we
2772	 * sync the config cache, and we lose power, then upon reboot we may
2773	 * fail to open the pool because there are DVAs that the config cache
2774	 * can't translate.  Therefore, we first add the vdevs without
2775	 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
2776	 * and then let spa_config_update() initialize the new metaslabs.
2777	 *
2778	 * spa_load() checks for added-but-not-initialized vdevs, so that
2779	 * if we lose power at any point in this sequence, the remaining
2780	 * steps will be completed the next time we load the pool.
2781	 */
2782	(void) spa_vdev_exit(spa, vd, txg, 0);
2783
2784	mutex_enter(&spa_namespace_lock);
2785	spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
2786	mutex_exit(&spa_namespace_lock);
2787
2788	return (0);
2789}
2790
2791/*
2792 * Attach a device to a mirror.  The arguments are the path to any device
2793 * in the mirror, and the nvroot for the new device.  If the path specifies
2794 * a device that is not mirrored, we automatically insert the mirror vdev.
2795 *
2796 * If 'replacing' is specified, the new device is intended to replace the
2797 * existing device; in this case the two devices are made into their own
2798 * mirror using the 'replacing' vdev, which is functionally identical to
2799 * the mirror vdev (it actually reuses all the same ops) but has a few
2800 * extra rules: you can't attach to it after it's been created, and upon
2801 * completion of resilvering, the first disk (the one being replaced)
2802 * is automatically detached.
2803 */
2804int
2805spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
2806{
2807	uint64_t txg, open_txg;
2808	vdev_t *rvd = spa->spa_root_vdev;
2809	vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
2810	vdev_ops_t *pvops;
2811	dmu_tx_t *tx;
2812	char *oldvdpath, *newvdpath;
2813	int newvd_isspare;
2814	int error;
2815
2816	txg = spa_vdev_enter(spa);
2817
2818	oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
2819
2820	if (oldvd == NULL)
2821		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
2822
2823	if (!oldvd->vdev_ops->vdev_op_leaf)
2824		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
2825
2826	pvd = oldvd->vdev_parent;
2827
2828	if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
2829	    VDEV_ALLOC_ADD)) != 0)
2830		return (spa_vdev_exit(spa, NULL, txg, EINVAL));
2831
2832	if (newrootvd->vdev_children != 1)
2833		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
2834
2835	newvd = newrootvd->vdev_child[0];
2836
2837	if (!newvd->vdev_ops->vdev_op_leaf)
2838		return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
2839
2840	if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
2841		return (spa_vdev_exit(spa, newrootvd, txg, error));
2842
2843	/*
2844	 * Spares can't replace logs
2845	 */
2846	if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
2847		return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2848
2849	if (!replacing) {
2850		/*
2851		 * For attach, the only allowable parent is a mirror or the root
2852		 * vdev.
2853		 */
2854		if (pvd->vdev_ops != &vdev_mirror_ops &&
2855		    pvd->vdev_ops != &vdev_root_ops)
2856			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2857
2858		pvops = &vdev_mirror_ops;
2859	} else {
2860		/*
2861		 * Active hot spares can only be replaced by inactive hot
2862		 * spares.
2863		 */
2864		if (pvd->vdev_ops == &vdev_spare_ops &&
2865		    pvd->vdev_child[1] == oldvd &&
2866		    !spa_has_spare(spa, newvd->vdev_guid))
2867			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2868
2869		/*
2870		 * If the source is a hot spare, and the parent isn't already a
2871		 * spare, then we want to create a new hot spare.  Otherwise, we
2872		 * want to create a replacing vdev.  The user is not allowed to
2873		 * attach to a spared vdev child unless the 'isspare' state is
2874		 * the same (spare replaces spare, non-spare replaces
2875		 * non-spare).
2876		 */
2877		if (pvd->vdev_ops == &vdev_replacing_ops)
2878			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2879		else if (pvd->vdev_ops == &vdev_spare_ops &&
2880		    newvd->vdev_isspare != oldvd->vdev_isspare)
2881			return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2882		else if (pvd->vdev_ops != &vdev_spare_ops &&
2883		    newvd->vdev_isspare)
2884			pvops = &vdev_spare_ops;
2885		else
2886			pvops = &vdev_replacing_ops;
2887	}
2888
2889	/*
2890	 * Compare the new device size with the replaceable/attachable
2891	 * device size.
2892	 */
2893	if (newvd->vdev_psize < vdev_get_rsize(oldvd))
2894		return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
2895
2896	/*
2897	 * The new device cannot have a higher alignment requirement
2898	 * than the top-level vdev.
2899	 */
2900	if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
2901		return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
2902
2903	/*
2904	 * If this is an in-place replacement, update oldvd's path and devid
2905	 * to make it distinguishable from newvd, and unopenable from now on.
2906	 */
2907	if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
2908		spa_strfree(oldvd->vdev_path);
2909		oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
2910		    KM_SLEEP);
2911		(void) sprintf(oldvd->vdev_path, "%s/%s",
2912		    newvd->vdev_path, "old");
2913		if (oldvd->vdev_devid != NULL) {
2914			spa_strfree(oldvd->vdev_devid);
2915			oldvd->vdev_devid = NULL;
2916		}
2917	}
2918
2919	/*
2920	 * If the parent is not a mirror, or if we're replacing, insert the new
2921	 * mirror/replacing/spare vdev above oldvd.
2922	 */
2923	if (pvd->vdev_ops != pvops)
2924		pvd = vdev_add_parent(oldvd, pvops);
2925
2926	ASSERT(pvd->vdev_top->vdev_parent == rvd);
2927	ASSERT(pvd->vdev_ops == pvops);
2928	ASSERT(oldvd->vdev_parent == pvd);
2929
2930	/*
2931	 * Extract the new device from its root and add it to pvd.
2932	 */
2933	vdev_remove_child(newrootvd, newvd);
2934	newvd->vdev_id = pvd->vdev_children;
2935	vdev_add_child(pvd, newvd);
2936
2937	/*
2938	 * If newvd is smaller than oldvd, but larger than its rsize,
2939	 * the addition of newvd may have decreased our parent's asize.
2940	 */
2941	pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
2942
2943	tvd = newvd->vdev_top;
2944	ASSERT(pvd->vdev_top == tvd);
2945	ASSERT(tvd->vdev_parent == rvd);
2946
2947	vdev_config_dirty(tvd);
2948
2949	/*
2950	 * Set newvd's DTL to [TXG_INITIAL, open_txg].  It will propagate
2951	 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
2952	 */
2953	open_txg = txg + TXG_CONCURRENT_STATES - 1;
2954
2955	mutex_enter(&newvd->vdev_dtl_lock);
2956	space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL,
2957	    open_txg - TXG_INITIAL + 1);
2958	mutex_exit(&newvd->vdev_dtl_lock);
2959
2960	if (newvd->vdev_isspare)
2961		spa_spare_activate(newvd);
2962	oldvdpath = spa_strdup(oldvd->vdev_path);
2963	newvdpath = spa_strdup(newvd->vdev_path);
2964	newvd_isspare = newvd->vdev_isspare;
2965
2966	/*
2967	 * Mark newvd's DTL dirty in this txg.
2968	 */
2969	vdev_dirty(tvd, VDD_DTL, newvd, txg);
2970
2971	(void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
2972
2973	tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
2974	if (dmu_tx_assign(tx, TXG_WAIT) == 0) {
2975		spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx,
2976		    CRED(),  "%s vdev=%s %s vdev=%s",
2977		    replacing && newvd_isspare ? "spare in" :
2978		    replacing ? "replace" : "attach", newvdpath,
2979		    replacing ? "for" : "to", oldvdpath);
2980		dmu_tx_commit(tx);
2981	} else {
2982		dmu_tx_abort(tx);
2983	}
2984
2985	spa_strfree(oldvdpath);
2986	spa_strfree(newvdpath);
2987
2988	/*
2989	 * Kick off a resilver to update newvd.
2990	 */
2991	VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0);
2992
2993	return (0);
2994}
2995
2996/*
2997 * Detach a device from a mirror or replacing vdev.
2998 * If 'replace_done' is specified, only detach if the parent
2999 * is a replacing vdev.
3000 */
3001int
3002spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done)
3003{
3004	uint64_t txg;
3005	int c, t, error;
3006	vdev_t *rvd = spa->spa_root_vdev;
3007	vdev_t *vd, *pvd, *cvd, *tvd;
3008	boolean_t unspare = B_FALSE;
3009	uint64_t unspare_guid;
3010	size_t len;
3011
3012	txg = spa_vdev_enter(spa);
3013
3014	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3015
3016	if (vd == NULL)
3017		return (spa_vdev_exit(spa, NULL, txg, ENODEV));
3018
3019	if (!vd->vdev_ops->vdev_op_leaf)
3020		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3021
3022	pvd = vd->vdev_parent;
3023
3024	/*
3025	 * If replace_done is specified, only remove this device if it's
3026	 * the first child of a replacing vdev.  For the 'spare' vdev, either
3027	 * disk can be removed.
3028	 */
3029	if (replace_done) {
3030		if (pvd->vdev_ops == &vdev_replacing_ops) {
3031			if (vd->vdev_id != 0)
3032				return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3033		} else if (pvd->vdev_ops != &vdev_spare_ops) {
3034			return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3035		}
3036	}
3037
3038	ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
3039	    spa_version(spa) >= SPA_VERSION_SPARES);
3040
3041	/*
3042	 * Only mirror, replacing, and spare vdevs support detach.
3043	 */
3044	if (pvd->vdev_ops != &vdev_replacing_ops &&
3045	    pvd->vdev_ops != &vdev_mirror_ops &&
3046	    pvd->vdev_ops != &vdev_spare_ops)
3047		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3048
3049	/*
3050	 * If there's only one replica, you can't detach it.
3051	 */
3052	if (pvd->vdev_children <= 1)
3053		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3054
3055	/*
3056	 * If all siblings have non-empty DTLs, this device may have the only
3057	 * valid copy of the data, which means we cannot safely detach it.
3058	 *
3059	 * XXX -- as in the vdev_offline() case, we really want a more
3060	 * precise DTL check.
3061	 */
3062	for (c = 0; c < pvd->vdev_children; c++) {
3063		uint64_t dirty;
3064
3065		cvd = pvd->vdev_child[c];
3066		if (cvd == vd)
3067			continue;
3068		if (vdev_is_dead(cvd))
3069			continue;
3070		mutex_enter(&cvd->vdev_dtl_lock);
3071		dirty = cvd->vdev_dtl_map.sm_space |
3072		    cvd->vdev_dtl_scrub.sm_space;
3073		mutex_exit(&cvd->vdev_dtl_lock);
3074		if (!dirty)
3075			break;
3076	}
3077
3078	if (c == pvd->vdev_children)
3079		return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3080
3081	/*
3082	 * If we are detaching the second disk from a replacing vdev, then
3083	 * check to see if we changed the original vdev's path to have "/old"
3084	 * at the end in spa_vdev_attach().  If so, undo that change now.
3085	 */
3086	if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 &&
3087	    pvd->vdev_child[0]->vdev_path != NULL &&
3088	    pvd->vdev_child[1]->vdev_path != NULL) {
3089		ASSERT(pvd->vdev_child[1] == vd);
3090		cvd = pvd->vdev_child[0];
3091		len = strlen(vd->vdev_path);
3092		if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
3093		    strcmp(cvd->vdev_path + len, "/old") == 0) {
3094			spa_strfree(cvd->vdev_path);
3095			cvd->vdev_path = spa_strdup(vd->vdev_path);
3096		}
3097	}
3098
3099	/*
3100	 * If we are detaching the original disk from a spare, then it implies
3101	 * that the spare should become a real disk, and be removed from the
3102	 * active spare list for the pool.
3103	 */
3104	if (pvd->vdev_ops == &vdev_spare_ops &&
3105	    vd->vdev_id == 0)
3106		unspare = B_TRUE;
3107
3108	/*
3109	 * Erase the disk labels so the disk can be used for other things.
3110	 * This must be done after all other error cases are handled,
3111	 * but before we disembowel vd (so we can still do I/O to it).
3112	 * But if we can't do it, don't treat the error as fatal --
3113	 * it may be that the unwritability of the disk is the reason
3114	 * it's being detached!
3115	 */
3116	error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
3117
3118	/*
3119	 * Remove vd from its parent and compact the parent's children.
3120	 */
3121	vdev_remove_child(pvd, vd);
3122	vdev_compact_children(pvd);
3123
3124	/*
3125	 * Remember one of the remaining children so we can get tvd below.
3126	 */
3127	cvd = pvd->vdev_child[0];
3128
3129	/*
3130	 * If we need to remove the remaining child from the list of hot spares,
3131	 * do it now, marking the vdev as no longer a spare in the process.  We
3132	 * must do this before vdev_remove_parent(), because that can change the
3133	 * GUID if it creates a new toplevel GUID.
3134	 */
3135	if (unspare) {
3136		ASSERT(cvd->vdev_isspare);
3137		spa_spare_remove(cvd);
3138		unspare_guid = cvd->vdev_guid;
3139	}
3140
3141	/*
3142	 * If the parent mirror/replacing vdev only has one child,
3143	 * the parent is no longer needed.  Remove it from the tree.
3144	 */
3145	if (pvd->vdev_children == 1)
3146		vdev_remove_parent(cvd);
3147
3148	/*
3149	 * We don't set tvd until now because the parent we just removed
3150	 * may have been the previous top-level vdev.
3151	 */
3152	tvd = cvd->vdev_top;
3153	ASSERT(tvd->vdev_parent == rvd);
3154
3155	/*
3156	 * Reevaluate the parent vdev state.
3157	 */
3158	vdev_propagate_state(cvd);
3159
3160	/*
3161	 * If the device we just detached was smaller than the others, it may be
3162	 * possible to add metaslabs (i.e. grow the pool).  vdev_metaslab_init()
3163	 * can't fail because the existing metaslabs are already in core, so
3164	 * there's nothing to read from disk.
3165	 */
3166	VERIFY(vdev_metaslab_init(tvd, txg) == 0);
3167
3168	vdev_config_dirty(tvd);
3169
3170	/*
3171	 * Mark vd's DTL as dirty in this txg.  vdev_dtl_sync() will see that
3172	 * vd->vdev_detached is set and free vd's DTL object in syncing context.
3173	 * But first make sure we're not on any *other* txg's DTL list, to
3174	 * prevent vd from being accessed after it's freed.
3175	 */
3176	for (t = 0; t < TXG_SIZE; t++)
3177		(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
3178	vd->vdev_detached = B_TRUE;
3179	vdev_dirty(tvd, VDD_DTL, vd, txg);
3180
3181	spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
3182
3183	error = spa_vdev_exit(spa, vd, txg, 0);
3184
3185	/*
3186	 * If this was the removal of the original device in a hot spare vdev,
3187	 * then we want to go through and remove the device from the hot spare
3188	 * list of every other pool.
3189	 */
3190	if (unspare) {
3191		spa = NULL;
3192		mutex_enter(&spa_namespace_lock);
3193		while ((spa = spa_next(spa)) != NULL) {
3194			if (spa->spa_state != POOL_STATE_ACTIVE)
3195				continue;
3196			spa_open_ref(spa, FTAG);
3197			mutex_exit(&spa_namespace_lock);
3198			(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
3199			mutex_enter(&spa_namespace_lock);
3200			spa_close(spa, FTAG);
3201		}
3202		mutex_exit(&spa_namespace_lock);
3203	}
3204
3205	return (error);
3206}
3207
3208static nvlist_t *
3209spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
3210{
3211	for (int i = 0; i < count; i++) {
3212		uint64_t guid;
3213
3214		VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
3215		    &guid) == 0);
3216
3217		if (guid == target_guid)
3218			return (nvpp[i]);
3219	}
3220
3221	return (NULL);
3222}
3223
3224static void
3225spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
3226	nvlist_t *dev_to_remove)
3227{
3228	nvlist_t **newdev = NULL;
3229
3230	if (count > 1)
3231		newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
3232
3233	for (int i = 0, j = 0; i < count; i++) {
3234		if (dev[i] == dev_to_remove)
3235			continue;
3236		VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
3237	}
3238
3239	VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
3240	VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
3241
3242	for (int i = 0; i < count - 1; i++)
3243		nvlist_free(newdev[i]);
3244
3245	if (count > 1)
3246		kmem_free(newdev, (count - 1) * sizeof (void *));
3247}
3248
3249/*
3250 * Remove a device from the pool.  Currently, this supports removing only hot
3251 * spares and level 2 ARC devices.
3252 */
3253int
3254spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
3255{
3256	vdev_t *vd;
3257	nvlist_t **spares, **l2cache, *nv;
3258	uint_t nspares, nl2cache;
3259	uint64_t txg;
3260	int error = 0;
3261
3262	txg = spa_vdev_enter(spa);
3263
3264	vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3265
3266	if (spa->spa_spares.sav_vdevs != NULL &&
3267	    nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3268	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
3269	    (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
3270		/*
3271		 * Only remove the hot spare if it's not currently in use
3272		 * in this pool.
3273		 */
3274		if (vd == NULL || unspare) {
3275			spa_vdev_remove_aux(spa->spa_spares.sav_config,
3276			    ZPOOL_CONFIG_SPARES, spares, nspares, nv);
3277			spa_load_spares(spa);
3278			spa->spa_spares.sav_sync = B_TRUE;
3279		} else {
3280			error = EBUSY;
3281		}
3282	} else if (spa->spa_l2cache.sav_vdevs != NULL &&
3283	    nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3284	    ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
3285	    (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
3286		/*
3287		 * Cache devices can always be removed.
3288		 */
3289		spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
3290		    ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
3291		spa_load_l2cache(spa);
3292		spa->spa_l2cache.sav_sync = B_TRUE;
3293	} else if (vd != NULL) {
3294		/*
3295		 * Normal vdevs cannot be removed (yet).
3296		 */
3297		error = ENOTSUP;
3298	} else {
3299		/*
3300		 * There is no vdev of any kind with the specified guid.
3301		 */
3302		error = ENOENT;
3303	}
3304
3305	return (spa_vdev_exit(spa, NULL, txg, error));
3306}
3307
3308/*
3309 * Find any device that's done replacing, or a vdev marked 'unspare' that's
3310 * current spared, so we can detach it.
3311 */
3312static vdev_t *
3313spa_vdev_resilver_done_hunt(vdev_t *vd)
3314{
3315	vdev_t *newvd, *oldvd;
3316	int c;
3317
3318	for (c = 0; c < vd->vdev_children; c++) {
3319		oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
3320		if (oldvd != NULL)
3321			return (oldvd);
3322	}
3323
3324	/*
3325	 * Check for a completed replacement.
3326	 */
3327	if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
3328		oldvd = vd->vdev_child[0];
3329		newvd = vd->vdev_child[1];
3330
3331		mutex_enter(&newvd->vdev_dtl_lock);
3332		if (newvd->vdev_dtl_map.sm_space == 0 &&
3333		    newvd->vdev_dtl_scrub.sm_space == 0) {
3334			mutex_exit(&newvd->vdev_dtl_lock);
3335			return (oldvd);
3336		}
3337		mutex_exit(&newvd->vdev_dtl_lock);
3338	}
3339
3340	/*
3341	 * Check for a completed resilver with the 'unspare' flag set.
3342	 */
3343	if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) {
3344		newvd = vd->vdev_child[0];
3345		oldvd = vd->vdev_child[1];
3346
3347		mutex_enter(&newvd->vdev_dtl_lock);
3348		if (newvd->vdev_unspare &&
3349		    newvd->vdev_dtl_map.sm_space == 0 &&
3350		    newvd->vdev_dtl_scrub.sm_space == 0) {
3351			newvd->vdev_unspare = 0;
3352			mutex_exit(&newvd->vdev_dtl_lock);
3353			return (oldvd);
3354		}
3355		mutex_exit(&newvd->vdev_dtl_lock);
3356	}
3357
3358	return (NULL);
3359}
3360
3361static void
3362spa_vdev_resilver_done(spa_t *spa)
3363{
3364	vdev_t *vd;
3365	vdev_t *pvd;
3366	uint64_t guid;
3367	uint64_t pguid = 0;
3368
3369	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3370
3371	while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
3372		guid = vd->vdev_guid;
3373		/*
3374		 * If we have just finished replacing a hot spared device, then
3375		 * we need to detach the parent's first child (the original hot
3376		 * spare) as well.
3377		 */
3378		pvd = vd->vdev_parent;
3379		if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
3380		    pvd->vdev_id == 0) {
3381			ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
3382			ASSERT(pvd->vdev_parent->vdev_children == 2);
3383			pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid;
3384		}
3385		spa_config_exit(spa, SCL_CONFIG, FTAG);
3386		if (spa_vdev_detach(spa, guid, B_TRUE) != 0)
3387			return;
3388		if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0)
3389			return;
3390		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3391	}
3392
3393	spa_config_exit(spa, SCL_CONFIG, FTAG);
3394}
3395
3396/*
3397 * Update the stored path for this vdev.  Dirty the vdev configuration, relying
3398 * on spa_vdev_enter/exit() to synchronize the labels and cache.
3399 */
3400int
3401spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
3402{
3403	vdev_t *vd;
3404	uint64_t txg;
3405
3406	txg = spa_vdev_enter(spa);
3407
3408	if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) {
3409		/*
3410		 * Determine if this is a reference to a hot spare device.  If
3411		 * it is, update the path manually as there is no associated
3412		 * vdev_t that can be synced to disk.
3413		 */
3414		nvlist_t **spares;
3415		uint_t i, nspares;
3416
3417		if (spa->spa_spares.sav_config != NULL) {
3418			VERIFY(nvlist_lookup_nvlist_array(
3419			    spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
3420			    &spares, &nspares) == 0);
3421			for (i = 0; i < nspares; i++) {
3422				uint64_t theguid;
3423				VERIFY(nvlist_lookup_uint64(spares[i],
3424				    ZPOOL_CONFIG_GUID, &theguid) == 0);
3425				if (theguid == guid) {
3426					VERIFY(nvlist_add_string(spares[i],
3427					    ZPOOL_CONFIG_PATH, newpath) == 0);
3428					spa_load_spares(spa);
3429					spa->spa_spares.sav_sync = B_TRUE;
3430					return (spa_vdev_exit(spa, NULL, txg,
3431					    0));
3432				}
3433			}
3434		}
3435
3436		return (spa_vdev_exit(spa, NULL, txg, ENOENT));
3437	}
3438
3439	if (!vd->vdev_ops->vdev_op_leaf)
3440		return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3441
3442	spa_strfree(vd->vdev_path);
3443	vd->vdev_path = spa_strdup(newpath);
3444
3445	vdev_config_dirty(vd->vdev_top);
3446
3447	return (spa_vdev_exit(spa, NULL, txg, 0));
3448}
3449
3450/*
3451 * ==========================================================================
3452 * SPA Scrubbing
3453 * ==========================================================================
3454 */
3455
3456int
3457spa_scrub(spa_t *spa, pool_scrub_type_t type)
3458{
3459	ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
3460
3461	if ((uint_t)type >= POOL_SCRUB_TYPES)
3462		return (ENOTSUP);
3463
3464	/*
3465	 * If a resilver was requested, but there is no DTL on a
3466	 * writeable leaf device, we have nothing to do.
3467	 */
3468	if (type == POOL_SCRUB_RESILVER &&
3469	    !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
3470		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
3471		return (0);
3472	}
3473
3474	if (type == POOL_SCRUB_EVERYTHING &&
3475	    spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE &&
3476	    spa->spa_dsl_pool->dp_scrub_isresilver)
3477		return (EBUSY);
3478
3479	if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) {
3480		return (dsl_pool_scrub_clean(spa->spa_dsl_pool));
3481	} else if (type == POOL_SCRUB_NONE) {
3482		return (dsl_pool_scrub_cancel(spa->spa_dsl_pool));
3483	} else {
3484		return (EINVAL);
3485	}
3486}
3487
3488/*
3489 * ==========================================================================
3490 * SPA async task processing
3491 * ==========================================================================
3492 */
3493
3494static void
3495spa_async_remove(spa_t *spa, vdev_t *vd)
3496{
3497	if (vd->vdev_remove_wanted) {
3498		vd->vdev_remove_wanted = 0;
3499		vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
3500		vdev_clear(spa, vd);
3501		vdev_state_dirty(vd->vdev_top);
3502	}
3503
3504	for (int c = 0; c < vd->vdev_children; c++)
3505		spa_async_remove(spa, vd->vdev_child[c]);
3506}
3507
3508static void
3509spa_async_probe(spa_t *spa, vdev_t *vd)
3510{
3511	if (vd->vdev_probe_wanted) {
3512		vd->vdev_probe_wanted = 0;
3513		vdev_reopen(vd);	/* vdev_open() does the actual probe */
3514	}
3515
3516	for (int c = 0; c < vd->vdev_children; c++)
3517		spa_async_probe(spa, vd->vdev_child[c]);
3518}
3519
3520static void
3521spa_async_thread(spa_t *spa)
3522{
3523	int tasks;
3524
3525	ASSERT(spa->spa_sync_on);
3526
3527	mutex_enter(&spa->spa_async_lock);
3528	tasks = spa->spa_async_tasks;
3529	spa->spa_async_tasks = 0;
3530	mutex_exit(&spa->spa_async_lock);
3531
3532	/*
3533	 * See if the config needs to be updated.
3534	 */
3535	if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
3536		mutex_enter(&spa_namespace_lock);
3537		spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
3538		mutex_exit(&spa_namespace_lock);
3539	}
3540
3541	/*
3542	 * See if any devices need to be marked REMOVED.
3543	 */
3544	if (tasks & SPA_ASYNC_REMOVE) {
3545		spa_vdev_state_enter(spa);
3546		spa_async_remove(spa, spa->spa_root_vdev);
3547		for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
3548			spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
3549		for (int i = 0; i < spa->spa_spares.sav_count; i++)
3550			spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
3551		(void) spa_vdev_state_exit(spa, NULL, 0);
3552	}
3553
3554	/*
3555	 * See if any devices need to be probed.
3556	 */
3557	if (tasks & SPA_ASYNC_PROBE) {
3558		spa_vdev_state_enter(spa);
3559		spa_async_probe(spa, spa->spa_root_vdev);
3560		(void) spa_vdev_state_exit(spa, NULL, 0);
3561	}
3562
3563	/*
3564	 * If any devices are done replacing, detach them.
3565	 */
3566	if (tasks & SPA_ASYNC_RESILVER_DONE)
3567		spa_vdev_resilver_done(spa);
3568
3569	/*
3570	 * Kick off a resilver.
3571	 */
3572	if (tasks & SPA_ASYNC_RESILVER)
3573		VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0);
3574
3575	/*
3576	 * Let the world know that we're done.
3577	 */
3578	mutex_enter(&spa->spa_async_lock);
3579	spa->spa_async_thread = NULL;
3580	cv_broadcast(&spa->spa_async_cv);
3581	mutex_exit(&spa->spa_async_lock);
3582	thread_exit();
3583}
3584
3585void
3586spa_async_suspend(spa_t *spa)
3587{
3588	mutex_enter(&spa->spa_async_lock);
3589	spa->spa_async_suspended++;
3590	while (spa->spa_async_thread != NULL)
3591		cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
3592	mutex_exit(&spa->spa_async_lock);
3593}
3594
3595void
3596spa_async_resume(spa_t *spa)
3597{
3598	mutex_enter(&spa->spa_async_lock);
3599	ASSERT(spa->spa_async_suspended != 0);
3600	spa->spa_async_suspended--;
3601	mutex_exit(&spa->spa_async_lock);
3602}
3603
3604static void
3605spa_async_dispatch(spa_t *spa)
3606{
3607	mutex_enter(&spa->spa_async_lock);
3608	if (spa->spa_async_tasks && !spa->spa_async_suspended &&
3609	    spa->spa_async_thread == NULL &&
3610	    rootdir != NULL && !vn_is_readonly(rootdir))
3611		spa->spa_async_thread = thread_create(NULL, 0,
3612		    spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
3613	mutex_exit(&spa->spa_async_lock);
3614}
3615
3616void
3617spa_async_request(spa_t *spa, int task)
3618{
3619	mutex_enter(&spa->spa_async_lock);
3620	spa->spa_async_tasks |= task;
3621	mutex_exit(&spa->spa_async_lock);
3622}
3623
3624/*
3625 * ==========================================================================
3626 * SPA syncing routines
3627 * ==========================================================================
3628 */
3629
3630static void
3631spa_sync_deferred_frees(spa_t *spa, uint64_t txg)
3632{
3633	bplist_t *bpl = &spa->spa_sync_bplist;
3634	dmu_tx_t *tx;
3635	blkptr_t blk;
3636	uint64_t itor = 0;
3637	zio_t *zio;
3638	int error;
3639	uint8_t c = 1;
3640
3641	zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
3642
3643	while (bplist_iterate(bpl, &itor, &blk) == 0) {
3644		ASSERT(blk.blk_birth < txg);
3645		zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL,
3646		    ZIO_FLAG_MUSTSUCCEED));
3647	}
3648
3649	error = zio_wait(zio);
3650	ASSERT3U(error, ==, 0);
3651
3652	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3653	bplist_vacate(bpl, tx);
3654
3655	/*
3656	 * Pre-dirty the first block so we sync to convergence faster.
3657	 * (Usually only the first block is needed.)
3658	 */
3659	dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx);
3660	dmu_tx_commit(tx);
3661}
3662
3663static void
3664spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
3665{
3666	char *packed = NULL;
3667	size_t bufsize;
3668	size_t nvsize = 0;
3669	dmu_buf_t *db;
3670
3671	VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
3672
3673	/*
3674	 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
3675	 * information.  This avoids the dbuf_will_dirty() path and
3676	 * saves us a pre-read to get data we don't actually care about.
3677	 */
3678	bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE);
3679	packed = kmem_alloc(bufsize, KM_SLEEP);
3680
3681	VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
3682	    KM_SLEEP) == 0);
3683	bzero(packed + nvsize, bufsize - nvsize);
3684
3685	dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
3686
3687	kmem_free(packed, bufsize);
3688
3689	VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
3690	dmu_buf_will_dirty(db, tx);
3691	*(uint64_t *)db->db_data = nvsize;
3692	dmu_buf_rele(db, FTAG);
3693}
3694
3695static void
3696spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
3697    const char *config, const char *entry)
3698{
3699	nvlist_t *nvroot;
3700	nvlist_t **list;
3701	int i;
3702
3703	if (!sav->sav_sync)
3704		return;
3705
3706	/*
3707	 * Update the MOS nvlist describing the list of available devices.
3708	 * spa_validate_aux() will have already made sure this nvlist is
3709	 * valid and the vdevs are labeled appropriately.
3710	 */
3711	if (sav->sav_object == 0) {
3712		sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
3713		    DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
3714		    sizeof (uint64_t), tx);
3715		VERIFY(zap_update(spa->spa_meta_objset,
3716		    DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
3717		    &sav->sav_object, tx) == 0);
3718	}
3719
3720	VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3721	if (sav->sav_count == 0) {
3722		VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
3723	} else {
3724		list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
3725		for (i = 0; i < sav->sav_count; i++)
3726			list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
3727			    B_FALSE, B_FALSE, B_TRUE);
3728		VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
3729		    sav->sav_count) == 0);
3730		for (i = 0; i < sav->sav_count; i++)
3731			nvlist_free(list[i]);
3732		kmem_free(list, sav->sav_count * sizeof (void *));
3733	}
3734
3735	spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
3736	nvlist_free(nvroot);
3737
3738	sav->sav_sync = B_FALSE;
3739}
3740
3741static void
3742spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
3743{
3744	nvlist_t *config;
3745
3746	if (list_is_empty(&spa->spa_config_dirty_list))
3747		return;
3748
3749	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
3750
3751	config = spa_config_generate(spa, spa->spa_root_vdev,
3752	    dmu_tx_get_txg(tx), B_FALSE);
3753
3754	spa_config_exit(spa, SCL_STATE, FTAG);
3755
3756	if (spa->spa_config_syncing)
3757		nvlist_free(spa->spa_config_syncing);
3758	spa->spa_config_syncing = config;
3759
3760	spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
3761}
3762
3763/*
3764 * Set zpool properties.
3765 */
3766static void
3767spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
3768{
3769	spa_t *spa = arg1;
3770	objset_t *mos = spa->spa_meta_objset;
3771	nvlist_t *nvp = arg2;
3772	nvpair_t *elem;
3773	uint64_t intval;
3774	char *strval;
3775	zpool_prop_t prop;
3776	const char *propname;
3777	zprop_type_t proptype;
3778	spa_config_dirent_t *dp;
3779
3780	mutex_enter(&spa->spa_props_lock);
3781
3782	elem = NULL;
3783	while ((elem = nvlist_next_nvpair(nvp, elem))) {
3784		switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
3785		case ZPOOL_PROP_VERSION:
3786			/*
3787			 * Only set version for non-zpool-creation cases
3788			 * (set/import). spa_create() needs special care
3789			 * for version setting.
3790			 */
3791			if (tx->tx_txg != TXG_INITIAL) {
3792				VERIFY(nvpair_value_uint64(elem,
3793				    &intval) == 0);
3794				ASSERT(intval <= SPA_VERSION);
3795				ASSERT(intval >= spa_version(spa));
3796				spa->spa_uberblock.ub_version = intval;
3797				vdev_config_dirty(spa->spa_root_vdev);
3798			}
3799			break;
3800
3801		case ZPOOL_PROP_ALTROOT:
3802			/*
3803			 * 'altroot' is a non-persistent property. It should
3804			 * have been set temporarily at creation or import time.
3805			 */
3806			ASSERT(spa->spa_root != NULL);
3807			break;
3808
3809		case ZPOOL_PROP_CACHEFILE:
3810			/*
3811			 * 'cachefile' is a non-persistent property, but note
3812			 * an async request that the config cache needs to be
3813			 * udpated.
3814			 */
3815			VERIFY(nvpair_value_string(elem, &strval) == 0);
3816
3817			dp = kmem_alloc(sizeof (spa_config_dirent_t), KM_SLEEP);
3818
3819			if (strval[0] == '\0')
3820				dp->scd_path = spa_strdup(spa_config_path);
3821			else if (strcmp(strval, "none") == 0)
3822				dp->scd_path = NULL;
3823			else
3824				dp->scd_path = spa_strdup(strval);
3825
3826			list_insert_head(&spa->spa_config_list, dp);
3827			spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
3828			break;
3829		default:
3830			/*
3831			 * Set pool property values in the poolprops mos object.
3832			 */
3833			if (spa->spa_pool_props_object == 0) {
3834				objset_t *mos = spa->spa_meta_objset;
3835
3836				VERIFY((spa->spa_pool_props_object =
3837				    zap_create(mos, DMU_OT_POOL_PROPS,
3838				    DMU_OT_NONE, 0, tx)) > 0);
3839
3840				VERIFY(zap_update(mos,
3841				    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
3842				    8, 1, &spa->spa_pool_props_object, tx)
3843				    == 0);
3844			}
3845
3846			/* normalize the property name */
3847			propname = zpool_prop_to_name(prop);
3848			proptype = zpool_prop_get_type(prop);
3849
3850			if (nvpair_type(elem) == DATA_TYPE_STRING) {
3851				ASSERT(proptype == PROP_TYPE_STRING);
3852				VERIFY(nvpair_value_string(elem, &strval) == 0);
3853				VERIFY(zap_update(mos,
3854				    spa->spa_pool_props_object, propname,
3855				    1, strlen(strval) + 1, strval, tx) == 0);
3856
3857			} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
3858				VERIFY(nvpair_value_uint64(elem, &intval) == 0);
3859
3860				if (proptype == PROP_TYPE_INDEX) {
3861					const char *unused;
3862					VERIFY(zpool_prop_index_to_string(
3863					    prop, intval, &unused) == 0);
3864				}
3865				VERIFY(zap_update(mos,
3866				    spa->spa_pool_props_object, propname,
3867				    8, 1, &intval, tx) == 0);
3868			} else {
3869				ASSERT(0); /* not allowed */
3870			}
3871
3872			switch (prop) {
3873			case ZPOOL_PROP_DELEGATION:
3874				spa->spa_delegation = intval;
3875				break;
3876			case ZPOOL_PROP_BOOTFS:
3877				spa->spa_bootfs = intval;
3878				break;
3879			case ZPOOL_PROP_FAILUREMODE:
3880				spa->spa_failmode = intval;
3881				break;
3882			default:
3883				break;
3884			}
3885		}
3886
3887		/* log internal history if this is not a zpool create */
3888		if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY &&
3889		    tx->tx_txg != TXG_INITIAL) {
3890			spa_history_internal_log(LOG_POOL_PROPSET,
3891			    spa, tx, cr, "%s %lld %s",
3892			    nvpair_name(elem), intval, spa_name(spa));
3893		}
3894	}
3895
3896	mutex_exit(&spa->spa_props_lock);
3897}
3898
3899/*
3900 * Sync the specified transaction group.  New blocks may be dirtied as
3901 * part of the process, so we iterate until it converges.
3902 */
3903void
3904spa_sync(spa_t *spa, uint64_t txg)
3905{
3906	dsl_pool_t *dp = spa->spa_dsl_pool;
3907	objset_t *mos = spa->spa_meta_objset;
3908	bplist_t *bpl = &spa->spa_sync_bplist;
3909	vdev_t *rvd = spa->spa_root_vdev;
3910	vdev_t *vd;
3911	dmu_tx_t *tx;
3912	int dirty_vdevs;
3913	int error;
3914
3915	/*
3916	 * Lock out configuration changes.
3917	 */
3918	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3919
3920	spa->spa_syncing_txg = txg;
3921	spa->spa_sync_pass = 0;
3922
3923	/*
3924	 * If there are any pending vdev state changes, convert them
3925	 * into config changes that go out with this transaction group.
3926	 */
3927	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
3928	while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
3929		vdev_state_clean(vd);
3930		vdev_config_dirty(vd);
3931	}
3932	spa_config_exit(spa, SCL_STATE, FTAG);
3933
3934	VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj));
3935
3936	tx = dmu_tx_create_assigned(dp, txg);
3937
3938	/*
3939	 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
3940	 * set spa_deflate if we have no raid-z vdevs.
3941	 */
3942	if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
3943	    spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
3944		int i;
3945
3946		for (i = 0; i < rvd->vdev_children; i++) {
3947			vd = rvd->vdev_child[i];
3948			if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
3949				break;
3950		}
3951		if (i == rvd->vdev_children) {
3952			spa->spa_deflate = TRUE;
3953			VERIFY(0 == zap_add(spa->spa_meta_objset,
3954			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3955			    sizeof (uint64_t), 1, &spa->spa_deflate, tx));
3956		}
3957	}
3958
3959	if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
3960	    spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
3961		dsl_pool_create_origin(dp, tx);
3962
3963		/* Keeping the origin open increases spa_minref */
3964		spa->spa_minref += 3;
3965	}
3966
3967	if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
3968	    spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
3969		dsl_pool_upgrade_clones(dp, tx);
3970	}
3971
3972	/*
3973	 * If anything has changed in this txg, push the deferred frees
3974	 * from the previous txg.  If not, leave them alone so that we
3975	 * don't generate work on an otherwise idle system.
3976	 */
3977	if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
3978	    !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
3979	    !txg_list_empty(&dp->dp_sync_tasks, txg))
3980		spa_sync_deferred_frees(spa, txg);
3981
3982	/*
3983	 * Iterate to convergence.
3984	 */
3985	do {
3986		spa->spa_sync_pass++;
3987
3988		spa_sync_config_object(spa, tx);
3989		spa_sync_aux_dev(spa, &spa->spa_spares, tx,
3990		    ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
3991		spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
3992		    ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
3993		spa_errlog_sync(spa, txg);
3994		dsl_pool_sync(dp, txg);
3995
3996		dirty_vdevs = 0;
3997		while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
3998			vdev_sync(vd, txg);
3999			dirty_vdevs++;
4000		}
4001
4002		bplist_sync(bpl, tx);
4003	} while (dirty_vdevs);
4004
4005	bplist_close(bpl);
4006
4007	dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass);
4008
4009	/*
4010	 * Rewrite the vdev configuration (which includes the uberblock)
4011	 * to commit the transaction group.
4012	 *
4013	 * If there are no dirty vdevs, we sync the uberblock to a few
4014	 * random top-level vdevs that are known to be visible in the
4015	 * config cache (see spa_vdev_add() for a complete description).
4016	 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
4017	 */
4018	for (;;) {
4019		/*
4020		 * We hold SCL_STATE to prevent vdev open/close/etc.
4021		 * while we're attempting to write the vdev labels.
4022		 */
4023		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4024
4025		if (list_is_empty(&spa->spa_config_dirty_list)) {
4026			vdev_t *svd[SPA_DVAS_PER_BP];
4027			int svdcount = 0;
4028			int children = rvd->vdev_children;
4029			int c0 = spa_get_random(children);
4030			int c;
4031
4032			for (c = 0; c < children; c++) {
4033				vd = rvd->vdev_child[(c0 + c) % children];
4034				if (vd->vdev_ms_array == 0 || vd->vdev_islog)
4035					continue;
4036				svd[svdcount++] = vd;
4037				if (svdcount == SPA_DVAS_PER_BP)
4038					break;
4039			}
4040			error = vdev_config_sync(svd, svdcount, txg);
4041		} else {
4042			error = vdev_config_sync(rvd->vdev_child,
4043			    rvd->vdev_children, txg);
4044		}
4045
4046		spa_config_exit(spa, SCL_STATE, FTAG);
4047
4048		if (error == 0)
4049			break;
4050		zio_suspend(spa, NULL);
4051		zio_resume_wait(spa);
4052	}
4053	dmu_tx_commit(tx);
4054
4055	/*
4056	 * Clear the dirty config list.
4057	 */
4058	while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
4059		vdev_config_clean(vd);
4060
4061	/*
4062	 * Now that the new config has synced transactionally,
4063	 * let it become visible to the config cache.
4064	 */
4065	if (spa->spa_config_syncing != NULL) {
4066		spa_config_set(spa, spa->spa_config_syncing);
4067		spa->spa_config_txg = txg;
4068		spa->spa_config_syncing = NULL;
4069	}
4070
4071	spa->spa_ubsync = spa->spa_uberblock;
4072
4073	/*
4074	 * Clean up the ZIL records for the synced txg.
4075	 */
4076	dsl_pool_zil_clean(dp);
4077
4078	/*
4079	 * Update usable space statistics.
4080	 */
4081	while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
4082		vdev_sync_done(vd, txg);
4083
4084	/*
4085	 * It had better be the case that we didn't dirty anything
4086	 * since vdev_config_sync().
4087	 */
4088	ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
4089	ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
4090	ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
4091	ASSERT(bpl->bpl_queue == NULL);
4092
4093	spa_config_exit(spa, SCL_CONFIG, FTAG);
4094
4095	/*
4096	 * If any async tasks have been requested, kick them off.
4097	 */
4098	spa_async_dispatch(spa);
4099}
4100
4101/*
4102 * Sync all pools.  We don't want to hold the namespace lock across these
4103 * operations, so we take a reference on the spa_t and drop the lock during the
4104 * sync.
4105 */
4106void
4107spa_sync_allpools(void)
4108{
4109	spa_t *spa = NULL;
4110	mutex_enter(&spa_namespace_lock);
4111	while ((spa = spa_next(spa)) != NULL) {
4112		if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa))
4113			continue;
4114		spa_open_ref(spa, FTAG);
4115		mutex_exit(&spa_namespace_lock);
4116		txg_wait_synced(spa_get_dsl(spa), 0);
4117		mutex_enter(&spa_namespace_lock);
4118		spa_close(spa, FTAG);
4119	}
4120	mutex_exit(&spa_namespace_lock);
4121}
4122
4123/*
4124 * ==========================================================================
4125 * Miscellaneous routines
4126 * ==========================================================================
4127 */
4128
4129/*
4130 * Remove all pools in the system.
4131 */
4132void
4133spa_evict_all(void)
4134{
4135	spa_t *spa;
4136
4137	/*
4138	 * Remove all cached state.  All pools should be closed now,
4139	 * so every spa in the AVL tree should be unreferenced.
4140	 */
4141	mutex_enter(&spa_namespace_lock);
4142	while ((spa = spa_next(NULL)) != NULL) {
4143		/*
4144		 * Stop async tasks.  The async thread may need to detach
4145		 * a device that's been replaced, which requires grabbing
4146		 * spa_namespace_lock, so we must drop it here.
4147		 */
4148		spa_open_ref(spa, FTAG);
4149		mutex_exit(&spa_namespace_lock);
4150		spa_async_suspend(spa);
4151		mutex_enter(&spa_namespace_lock);
4152		spa_close(spa, FTAG);
4153
4154		if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4155			spa_unload(spa);
4156			spa_deactivate(spa);
4157		}
4158		spa_remove(spa);
4159	}
4160	mutex_exit(&spa_namespace_lock);
4161}
4162
4163vdev_t *
4164spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache)
4165{
4166	vdev_t *vd;
4167	int i;
4168
4169	if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
4170		return (vd);
4171
4172	if (l2cache) {
4173		for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
4174			vd = spa->spa_l2cache.sav_vdevs[i];
4175			if (vd->vdev_guid == guid)
4176				return (vd);
4177		}
4178	}
4179
4180	return (NULL);
4181}
4182
4183void
4184spa_upgrade(spa_t *spa, uint64_t version)
4185{
4186	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4187
4188	/*
4189	 * This should only be called for a non-faulted pool, and since a
4190	 * future version would result in an unopenable pool, this shouldn't be
4191	 * possible.
4192	 */
4193	ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION);
4194	ASSERT(version >= spa->spa_uberblock.ub_version);
4195
4196	spa->spa_uberblock.ub_version = version;
4197	vdev_config_dirty(spa->spa_root_vdev);
4198
4199	spa_config_exit(spa, SCL_ALL, FTAG);
4200
4201	txg_wait_synced(spa_get_dsl(spa), 0);
4202}
4203
4204boolean_t
4205spa_has_spare(spa_t *spa, uint64_t guid)
4206{
4207	int i;
4208	uint64_t spareguid;
4209	spa_aux_vdev_t *sav = &spa->spa_spares;
4210
4211	for (i = 0; i < sav->sav_count; i++)
4212		if (sav->sav_vdevs[i]->vdev_guid == guid)
4213			return (B_TRUE);
4214
4215	for (i = 0; i < sav->sav_npending; i++) {
4216		if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
4217		    &spareguid) == 0 && spareguid == guid)
4218			return (B_TRUE);
4219	}
4220
4221	return (B_FALSE);
4222}
4223
4224/*
4225 * Check if a pool has an active shared spare device.
4226 * Note: reference count of an active spare is 2, as a spare and as a replace
4227 */
4228static boolean_t
4229spa_has_active_shared_spare(spa_t *spa)
4230{
4231	int i, refcnt;
4232	uint64_t pool;
4233	spa_aux_vdev_t *sav = &spa->spa_spares;
4234
4235	for (i = 0; i < sav->sav_count; i++) {
4236		if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
4237		    &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
4238		    refcnt > 2)
4239			return (B_TRUE);
4240	}
4241
4242	return (B_FALSE);
4243}
4244
4245/*
4246 * Post a sysevent corresponding to the given event.  The 'name' must be one of
4247 * the event definitions in sys/sysevent/eventdefs.h.  The payload will be
4248 * filled in from the spa and (optionally) the vdev.  This doesn't do anything
4249 * in the userland libzpool, as we don't want consumers to misinterpret ztest
4250 * or zdb as real changes.
4251 */
4252void
4253spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
4254{
4255#ifdef _KERNEL
4256	sysevent_t		*ev;
4257	sysevent_attr_list_t	*attr = NULL;
4258	sysevent_value_t	value;
4259	sysevent_id_t		eid;
4260
4261	ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
4262	    SE_SLEEP);
4263
4264	value.value_type = SE_DATA_TYPE_STRING;
4265	value.value.sv_string = spa_name(spa);
4266	if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
4267		goto done;
4268
4269	value.value_type = SE_DATA_TYPE_UINT64;
4270	value.value.sv_uint64 = spa_guid(spa);
4271	if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
4272		goto done;
4273
4274	if (vd) {
4275		value.value_type = SE_DATA_TYPE_UINT64;
4276		value.value.sv_uint64 = vd->vdev_guid;
4277		if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
4278		    SE_SLEEP) != 0)
4279			goto done;
4280
4281		if (vd->vdev_path) {
4282			value.value_type = SE_DATA_TYPE_STRING;
4283			value.value.sv_string = vd->vdev_path;
4284			if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
4285			    &value, SE_SLEEP) != 0)
4286				goto done;
4287		}
4288	}
4289
4290	if (sysevent_attach_attributes(ev, attr) != 0)
4291		goto done;
4292	attr = NULL;
4293
4294	(void) log_sysevent(ev, SE_SLEEP, &eid);
4295
4296done:
4297	if (attr)
4298		sysevent_free_attr(attr);
4299	sysevent_free(ev);
4300#endif
4301}
4302