1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 2199653d4eSeschrock 22fa9e4066Sahrens /* 2339c23413Seschrock * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24fa9e4066Sahrens * Use is subject to license terms. 25fa9e4066Sahrens */ 26fa9e4066Sahrens 27fa9e4066Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 28fa9e4066Sahrens 29fa9e4066Sahrens /* 30fa9e4066Sahrens * This file contains all the routines used when modifying on-disk SPA state. 31fa9e4066Sahrens * This includes opening, importing, destroying, exporting a pool, and syncing a 32fa9e4066Sahrens * pool. 33fa9e4066Sahrens */ 34fa9e4066Sahrens 35fa9e4066Sahrens #include <sys/zfs_context.h> 36ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h> 37fa9e4066Sahrens #include <sys/spa_impl.h> 38fa9e4066Sahrens #include <sys/zio.h> 39fa9e4066Sahrens #include <sys/zio_checksum.h> 40fa9e4066Sahrens #include <sys/zio_compress.h> 41fa9e4066Sahrens #include <sys/dmu.h> 42fa9e4066Sahrens #include <sys/dmu_tx.h> 43fa9e4066Sahrens #include <sys/zap.h> 44fa9e4066Sahrens #include <sys/zil.h> 45fa9e4066Sahrens #include <sys/vdev_impl.h> 46fa9e4066Sahrens #include <sys/metaslab.h> 47fa9e4066Sahrens #include <sys/uberblock_impl.h> 48fa9e4066Sahrens #include <sys/txg.h> 49fa9e4066Sahrens #include <sys/avl.h> 50fa9e4066Sahrens #include <sys/dmu_traverse.h> 51*b1b8ab34Slling #include <sys/dmu_objset.h> 52fa9e4066Sahrens #include <sys/unique.h> 53fa9e4066Sahrens #include <sys/dsl_pool.h> 54*b1b8ab34Slling #include <sys/dsl_dataset.h> 55fa9e4066Sahrens #include <sys/dsl_dir.h> 56fa9e4066Sahrens #include <sys/dsl_prop.h> 57*b1b8ab34Slling #include <sys/dsl_synctask.h> 58fa9e4066Sahrens #include <sys/fs/zfs.h> 59fa9e4066Sahrens #include <sys/callb.h> 60fa9e4066Sahrens 61416e0cd8Sek int zio_taskq_threads = 8; 62416e0cd8Sek 63fa9e4066Sahrens /* 64fa9e4066Sahrens * ========================================================================== 65fa9e4066Sahrens * SPA state manipulation (open/create/destroy/import/export) 66fa9e4066Sahrens * ========================================================================== 67fa9e4066Sahrens */ 68fa9e4066Sahrens 69ea8dc4b6Seschrock static int 70ea8dc4b6Seschrock spa_error_entry_compare(const void *a, const void *b) 71ea8dc4b6Seschrock { 72ea8dc4b6Seschrock spa_error_entry_t *sa = (spa_error_entry_t *)a; 73ea8dc4b6Seschrock spa_error_entry_t *sb = (spa_error_entry_t *)b; 74ea8dc4b6Seschrock int ret; 75ea8dc4b6Seschrock 76ea8dc4b6Seschrock ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 77ea8dc4b6Seschrock sizeof (zbookmark_t)); 78ea8dc4b6Seschrock 79ea8dc4b6Seschrock if (ret < 0) 80ea8dc4b6Seschrock return (-1); 81ea8dc4b6Seschrock else if (ret > 0) 82ea8dc4b6Seschrock return (1); 83ea8dc4b6Seschrock else 84ea8dc4b6Seschrock return (0); 85ea8dc4b6Seschrock } 86ea8dc4b6Seschrock 87ea8dc4b6Seschrock /* 88ea8dc4b6Seschrock * Utility function which retrieves copies of the current logs and 89ea8dc4b6Seschrock * re-initializes them in the process. 90ea8dc4b6Seschrock */ 91ea8dc4b6Seschrock void 92ea8dc4b6Seschrock spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 93ea8dc4b6Seschrock { 94ea8dc4b6Seschrock ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 95ea8dc4b6Seschrock 96ea8dc4b6Seschrock bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 97ea8dc4b6Seschrock bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 98ea8dc4b6Seschrock 99ea8dc4b6Seschrock avl_create(&spa->spa_errlist_scrub, 100ea8dc4b6Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 101ea8dc4b6Seschrock offsetof(spa_error_entry_t, se_avl)); 102ea8dc4b6Seschrock avl_create(&spa->spa_errlist_last, 103ea8dc4b6Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 104ea8dc4b6Seschrock offsetof(spa_error_entry_t, se_avl)); 105ea8dc4b6Seschrock } 106ea8dc4b6Seschrock 107fa9e4066Sahrens /* 108fa9e4066Sahrens * Activate an uninitialized pool. 109fa9e4066Sahrens */ 110fa9e4066Sahrens static void 111fa9e4066Sahrens spa_activate(spa_t *spa) 112fa9e4066Sahrens { 113fa9e4066Sahrens int t; 114fa9e4066Sahrens 115fa9e4066Sahrens ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 116fa9e4066Sahrens 117fa9e4066Sahrens spa->spa_state = POOL_STATE_ACTIVE; 118fa9e4066Sahrens 119fa9e4066Sahrens spa->spa_normal_class = metaslab_class_create(); 120fa9e4066Sahrens 121fa9e4066Sahrens for (t = 0; t < ZIO_TYPES; t++) { 122fa9e4066Sahrens spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue", 123416e0cd8Sek zio_taskq_threads, maxclsyspri, 50, INT_MAX, 124fa9e4066Sahrens TASKQ_PREPOPULATE); 125fa9e4066Sahrens spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr", 126416e0cd8Sek zio_taskq_threads, maxclsyspri, 50, INT_MAX, 127fa9e4066Sahrens TASKQ_PREPOPULATE); 128fa9e4066Sahrens } 129fa9e4066Sahrens 130fa9e4066Sahrens rw_init(&spa->spa_traverse_lock, NULL, RW_DEFAULT, NULL); 131fa9e4066Sahrens 1325ad82045Snd mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 1335ad82045Snd mutex_init(&spa->spa_config_cache_lock, NULL, MUTEX_DEFAULT, NULL); 1345ad82045Snd mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 1355ad82045Snd mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 1365ad82045Snd mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 1375ad82045Snd mutex_init(&spa->spa_config_lock.scl_lock, NULL, MUTEX_DEFAULT, NULL); 1385ad82045Snd mutex_init(&spa->spa_sync_bplist.bpl_lock, NULL, MUTEX_DEFAULT, NULL); 13906eeb2adSek mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 140*b1b8ab34Slling mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 1415ad82045Snd 142fa9e4066Sahrens list_create(&spa->spa_dirty_list, sizeof (vdev_t), 143fa9e4066Sahrens offsetof(vdev_t, vdev_dirty_node)); 144fa9e4066Sahrens 145fa9e4066Sahrens txg_list_create(&spa->spa_vdev_txg_list, 146fa9e4066Sahrens offsetof(struct vdev, vdev_txg_node)); 147ea8dc4b6Seschrock 148ea8dc4b6Seschrock avl_create(&spa->spa_errlist_scrub, 149ea8dc4b6Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 150ea8dc4b6Seschrock offsetof(spa_error_entry_t, se_avl)); 151ea8dc4b6Seschrock avl_create(&spa->spa_errlist_last, 152ea8dc4b6Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 153ea8dc4b6Seschrock offsetof(spa_error_entry_t, se_avl)); 154fa9e4066Sahrens } 155fa9e4066Sahrens 156fa9e4066Sahrens /* 157fa9e4066Sahrens * Opposite of spa_activate(). 158fa9e4066Sahrens */ 159fa9e4066Sahrens static void 160fa9e4066Sahrens spa_deactivate(spa_t *spa) 161fa9e4066Sahrens { 162fa9e4066Sahrens int t; 163fa9e4066Sahrens 164fa9e4066Sahrens ASSERT(spa->spa_sync_on == B_FALSE); 165fa9e4066Sahrens ASSERT(spa->spa_dsl_pool == NULL); 166fa9e4066Sahrens ASSERT(spa->spa_root_vdev == NULL); 167fa9e4066Sahrens 168fa9e4066Sahrens ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 169fa9e4066Sahrens 170fa9e4066Sahrens txg_list_destroy(&spa->spa_vdev_txg_list); 171fa9e4066Sahrens 172fa9e4066Sahrens list_destroy(&spa->spa_dirty_list); 173fa9e4066Sahrens 174fa9e4066Sahrens rw_destroy(&spa->spa_traverse_lock); 175fa9e4066Sahrens 176fa9e4066Sahrens for (t = 0; t < ZIO_TYPES; t++) { 177fa9e4066Sahrens taskq_destroy(spa->spa_zio_issue_taskq[t]); 178fa9e4066Sahrens taskq_destroy(spa->spa_zio_intr_taskq[t]); 179fa9e4066Sahrens spa->spa_zio_issue_taskq[t] = NULL; 180fa9e4066Sahrens spa->spa_zio_intr_taskq[t] = NULL; 181fa9e4066Sahrens } 182fa9e4066Sahrens 183fa9e4066Sahrens metaslab_class_destroy(spa->spa_normal_class); 184fa9e4066Sahrens spa->spa_normal_class = NULL; 185fa9e4066Sahrens 186ea8dc4b6Seschrock /* 187ea8dc4b6Seschrock * If this was part of an import or the open otherwise failed, we may 188ea8dc4b6Seschrock * still have errors left in the queues. Empty them just in case. 189ea8dc4b6Seschrock */ 190ea8dc4b6Seschrock spa_errlog_drain(spa); 191ea8dc4b6Seschrock 192ea8dc4b6Seschrock avl_destroy(&spa->spa_errlist_scrub); 193ea8dc4b6Seschrock avl_destroy(&spa->spa_errlist_last); 194ea8dc4b6Seschrock 195fa9e4066Sahrens spa->spa_state = POOL_STATE_UNINITIALIZED; 196fa9e4066Sahrens } 197fa9e4066Sahrens 198fa9e4066Sahrens /* 199fa9e4066Sahrens * Verify a pool configuration, and construct the vdev tree appropriately. This 200fa9e4066Sahrens * will create all the necessary vdevs in the appropriate layout, with each vdev 201fa9e4066Sahrens * in the CLOSED state. This will prep the pool before open/creation/import. 202fa9e4066Sahrens * All vdev validation is done by the vdev_alloc() routine. 203fa9e4066Sahrens */ 20499653d4eSeschrock static int 20599653d4eSeschrock spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 20699653d4eSeschrock uint_t id, int atype) 207fa9e4066Sahrens { 208fa9e4066Sahrens nvlist_t **child; 209fa9e4066Sahrens uint_t c, children; 21099653d4eSeschrock int error; 211fa9e4066Sahrens 21299653d4eSeschrock if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 21399653d4eSeschrock return (error); 214fa9e4066Sahrens 21599653d4eSeschrock if ((*vdp)->vdev_ops->vdev_op_leaf) 21699653d4eSeschrock return (0); 217fa9e4066Sahrens 218fa9e4066Sahrens if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 219fa9e4066Sahrens &child, &children) != 0) { 22099653d4eSeschrock vdev_free(*vdp); 22199653d4eSeschrock *vdp = NULL; 22299653d4eSeschrock return (EINVAL); 223fa9e4066Sahrens } 224fa9e4066Sahrens 225fa9e4066Sahrens for (c = 0; c < children; c++) { 22699653d4eSeschrock vdev_t *vd; 22799653d4eSeschrock if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 22899653d4eSeschrock atype)) != 0) { 22999653d4eSeschrock vdev_free(*vdp); 23099653d4eSeschrock *vdp = NULL; 23199653d4eSeschrock return (error); 232fa9e4066Sahrens } 233fa9e4066Sahrens } 234fa9e4066Sahrens 23599653d4eSeschrock ASSERT(*vdp != NULL); 23699653d4eSeschrock 23799653d4eSeschrock return (0); 238fa9e4066Sahrens } 239fa9e4066Sahrens 240fa9e4066Sahrens /* 241fa9e4066Sahrens * Opposite of spa_load(). 242fa9e4066Sahrens */ 243fa9e4066Sahrens static void 244fa9e4066Sahrens spa_unload(spa_t *spa) 245fa9e4066Sahrens { 24699653d4eSeschrock int i; 24799653d4eSeschrock 248ea8dc4b6Seschrock /* 249ea8dc4b6Seschrock * Stop async tasks. 250ea8dc4b6Seschrock */ 251ea8dc4b6Seschrock spa_async_suspend(spa); 252ea8dc4b6Seschrock 253fa9e4066Sahrens /* 254fa9e4066Sahrens * Stop syncing. 255fa9e4066Sahrens */ 256fa9e4066Sahrens if (spa->spa_sync_on) { 257fa9e4066Sahrens txg_sync_stop(spa->spa_dsl_pool); 258fa9e4066Sahrens spa->spa_sync_on = B_FALSE; 259fa9e4066Sahrens } 260fa9e4066Sahrens 261fa9e4066Sahrens /* 262fa9e4066Sahrens * Wait for any outstanding prefetch I/O to complete. 263fa9e4066Sahrens */ 264ea8dc4b6Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 265ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 266fa9e4066Sahrens 267fa9e4066Sahrens /* 268fa9e4066Sahrens * Close the dsl pool. 269fa9e4066Sahrens */ 270fa9e4066Sahrens if (spa->spa_dsl_pool) { 271fa9e4066Sahrens dsl_pool_close(spa->spa_dsl_pool); 272fa9e4066Sahrens spa->spa_dsl_pool = NULL; 273fa9e4066Sahrens } 274fa9e4066Sahrens 275fa9e4066Sahrens /* 276fa9e4066Sahrens * Close all vdevs. 277fa9e4066Sahrens */ 2780e34b6a7Sbonwick if (spa->spa_root_vdev) 279fa9e4066Sahrens vdev_free(spa->spa_root_vdev); 2800e34b6a7Sbonwick ASSERT(spa->spa_root_vdev == NULL); 281ea8dc4b6Seschrock 28299653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) 28399653d4eSeschrock vdev_free(spa->spa_spares[i]); 28499653d4eSeschrock if (spa->spa_spares) { 28599653d4eSeschrock kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *)); 28699653d4eSeschrock spa->spa_spares = NULL; 28799653d4eSeschrock } 28899653d4eSeschrock if (spa->spa_sparelist) { 28999653d4eSeschrock nvlist_free(spa->spa_sparelist); 29099653d4eSeschrock spa->spa_sparelist = NULL; 29199653d4eSeschrock } 29299653d4eSeschrock 293ea8dc4b6Seschrock spa->spa_async_suspended = 0; 294fa9e4066Sahrens } 295fa9e4066Sahrens 29699653d4eSeschrock /* 29799653d4eSeschrock * Load (or re-load) the current list of vdevs describing the active spares for 29899653d4eSeschrock * this pool. When this is called, we have some form of basic information in 29999653d4eSeschrock * 'spa_sparelist'. We parse this into vdevs, try to open them, and then 30099653d4eSeschrock * re-generate a more complete list including status information. 30199653d4eSeschrock */ 30299653d4eSeschrock static void 30399653d4eSeschrock spa_load_spares(spa_t *spa) 30499653d4eSeschrock { 30599653d4eSeschrock nvlist_t **spares; 30699653d4eSeschrock uint_t nspares; 30799653d4eSeschrock int i; 30839c23413Seschrock vdev_t *vd, *tvd; 30999653d4eSeschrock 31099653d4eSeschrock /* 31199653d4eSeschrock * First, close and free any existing spare vdevs. 31299653d4eSeschrock */ 31399653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) { 31439c23413Seschrock vd = spa->spa_spares[i]; 31539c23413Seschrock 31639c23413Seschrock /* Undo the call to spa_activate() below */ 31739c23413Seschrock if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid)) != NULL && 31839c23413Seschrock tvd->vdev_isspare) 31939c23413Seschrock spa_spare_remove(tvd); 32039c23413Seschrock vdev_close(vd); 32139c23413Seschrock vdev_free(vd); 32299653d4eSeschrock } 32339c23413Seschrock 32499653d4eSeschrock if (spa->spa_spares) 32599653d4eSeschrock kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *)); 32699653d4eSeschrock 32799653d4eSeschrock if (spa->spa_sparelist == NULL) 32899653d4eSeschrock nspares = 0; 32999653d4eSeschrock else 33099653d4eSeschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 33199653d4eSeschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 33299653d4eSeschrock 33399653d4eSeschrock spa->spa_nspares = (int)nspares; 33499653d4eSeschrock spa->spa_spares = NULL; 33599653d4eSeschrock 33699653d4eSeschrock if (nspares == 0) 33799653d4eSeschrock return; 33899653d4eSeschrock 33999653d4eSeschrock /* 34099653d4eSeschrock * Construct the array of vdevs, opening them to get status in the 34139c23413Seschrock * process. For each spare, there is potentially two different vdev_t 34239c23413Seschrock * structures associated with it: one in the list of spares (used only 34339c23413Seschrock * for basic validation purposes) and one in the active vdev 34439c23413Seschrock * configuration (if it's spared in). During this phase we open and 34539c23413Seschrock * validate each vdev on the spare list. If the vdev also exists in the 34639c23413Seschrock * active configuration, then we also mark this vdev as an active spare. 34799653d4eSeschrock */ 34899653d4eSeschrock spa->spa_spares = kmem_alloc(nspares * sizeof (void *), KM_SLEEP); 34999653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) { 35099653d4eSeschrock VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 35199653d4eSeschrock VDEV_ALLOC_SPARE) == 0); 35299653d4eSeschrock ASSERT(vd != NULL); 35399653d4eSeschrock 35499653d4eSeschrock spa->spa_spares[i] = vd; 35599653d4eSeschrock 35639c23413Seschrock if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid)) != NULL) { 35739c23413Seschrock if (!tvd->vdev_isspare) 35839c23413Seschrock spa_spare_add(tvd); 35939c23413Seschrock 36039c23413Seschrock /* 36139c23413Seschrock * We only mark the spare active if we were successfully 36239c23413Seschrock * able to load the vdev. Otherwise, importing a pool 36339c23413Seschrock * with a bad active spare would result in strange 36439c23413Seschrock * behavior, because multiple pool would think the spare 36539c23413Seschrock * is actively in use. 36639c23413Seschrock * 36739c23413Seschrock * There is a vulnerability here to an equally bizarre 36839c23413Seschrock * circumstance, where a dead active spare is later 36939c23413Seschrock * brought back to life (onlined or otherwise). Given 37039c23413Seschrock * the rarity of this scenario, and the extra complexity 37139c23413Seschrock * it adds, we ignore the possibility. 37239c23413Seschrock */ 37339c23413Seschrock if (!vdev_is_dead(tvd)) 37439c23413Seschrock spa_spare_activate(tvd); 37539c23413Seschrock } 37639c23413Seschrock 37799653d4eSeschrock if (vdev_open(vd) != 0) 37899653d4eSeschrock continue; 37999653d4eSeschrock 38099653d4eSeschrock vd->vdev_top = vd; 38199653d4eSeschrock (void) vdev_validate_spare(vd); 38299653d4eSeschrock } 38399653d4eSeschrock 38499653d4eSeschrock /* 38599653d4eSeschrock * Recompute the stashed list of spares, with status information 38699653d4eSeschrock * this time. 38799653d4eSeschrock */ 38899653d4eSeschrock VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 38999653d4eSeschrock DATA_TYPE_NVLIST_ARRAY) == 0); 39099653d4eSeschrock 39199653d4eSeschrock spares = kmem_alloc(spa->spa_nspares * sizeof (void *), KM_SLEEP); 39299653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) 39399653d4eSeschrock spares[i] = vdev_config_generate(spa, spa->spa_spares[i], 39499653d4eSeschrock B_TRUE, B_TRUE); 39599653d4eSeschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 39699653d4eSeschrock spares, spa->spa_nspares) == 0); 39799653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) 39899653d4eSeschrock nvlist_free(spares[i]); 39999653d4eSeschrock kmem_free(spares, spa->spa_nspares * sizeof (void *)); 40099653d4eSeschrock } 40199653d4eSeschrock 40299653d4eSeschrock static int 40399653d4eSeschrock load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 40499653d4eSeschrock { 40599653d4eSeschrock dmu_buf_t *db; 40699653d4eSeschrock char *packed = NULL; 40799653d4eSeschrock size_t nvsize = 0; 40899653d4eSeschrock int error; 40999653d4eSeschrock *value = NULL; 41099653d4eSeschrock 41199653d4eSeschrock VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 41299653d4eSeschrock nvsize = *(uint64_t *)db->db_data; 41399653d4eSeschrock dmu_buf_rele(db, FTAG); 41499653d4eSeschrock 41599653d4eSeschrock packed = kmem_alloc(nvsize, KM_SLEEP); 41699653d4eSeschrock error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); 41799653d4eSeschrock if (error == 0) 41899653d4eSeschrock error = nvlist_unpack(packed, nvsize, value, 0); 41999653d4eSeschrock kmem_free(packed, nvsize); 42099653d4eSeschrock 42199653d4eSeschrock return (error); 42299653d4eSeschrock } 42399653d4eSeschrock 424fa9e4066Sahrens /* 425fa9e4066Sahrens * Load an existing storage pool, using the pool's builtin spa_config as a 426ea8dc4b6Seschrock * source of configuration information. 427fa9e4066Sahrens */ 428fa9e4066Sahrens static int 429ea8dc4b6Seschrock spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 430fa9e4066Sahrens { 431fa9e4066Sahrens int error = 0; 432fa9e4066Sahrens nvlist_t *nvroot = NULL; 433fa9e4066Sahrens vdev_t *rvd; 434fa9e4066Sahrens uberblock_t *ub = &spa->spa_uberblock; 4350373e76bSbonwick uint64_t config_cache_txg = spa->spa_config_txg; 436fa9e4066Sahrens uint64_t pool_guid; 43799653d4eSeschrock uint64_t version; 438fa9e4066Sahrens zio_t *zio; 439fa9e4066Sahrens 440ea8dc4b6Seschrock spa->spa_load_state = state; 4410373e76bSbonwick 442fa9e4066Sahrens if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 443a9926bf0Sbonwick nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 444ea8dc4b6Seschrock error = EINVAL; 445ea8dc4b6Seschrock goto out; 446ea8dc4b6Seschrock } 447fa9e4066Sahrens 44899653d4eSeschrock /* 44999653d4eSeschrock * Versioning wasn't explicitly added to the label until later, so if 45099653d4eSeschrock * it's not present treat it as the initial version. 45199653d4eSeschrock */ 45299653d4eSeschrock if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 45399653d4eSeschrock version = ZFS_VERSION_INITIAL; 45499653d4eSeschrock 455a9926bf0Sbonwick (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 456a9926bf0Sbonwick &spa->spa_config_txg); 457a9926bf0Sbonwick 4580373e76bSbonwick if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 459ea8dc4b6Seschrock spa_guid_exists(pool_guid, 0)) { 460ea8dc4b6Seschrock error = EEXIST; 461ea8dc4b6Seschrock goto out; 462ea8dc4b6Seschrock } 463fa9e4066Sahrens 464b5989ec7Seschrock spa->spa_load_guid = pool_guid; 465b5989ec7Seschrock 466fa9e4066Sahrens /* 46799653d4eSeschrock * Parse the configuration into a vdev tree. We explicitly set the 46899653d4eSeschrock * value that will be returned by spa_version() since parsing the 46999653d4eSeschrock * configuration requires knowing the version number. 470fa9e4066Sahrens */ 471ea8dc4b6Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 47299653d4eSeschrock spa->spa_ubsync.ub_version = version; 47399653d4eSeschrock error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 474ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 475fa9e4066Sahrens 47699653d4eSeschrock if (error != 0) 477ea8dc4b6Seschrock goto out; 478fa9e4066Sahrens 4790e34b6a7Sbonwick ASSERT(spa->spa_root_vdev == rvd); 480fa9e4066Sahrens ASSERT(spa_guid(spa) == pool_guid); 481fa9e4066Sahrens 482fa9e4066Sahrens /* 483fa9e4066Sahrens * Try to open all vdevs, loading each label in the process. 484fa9e4066Sahrens */ 485ea8dc4b6Seschrock if (vdev_open(rvd) != 0) { 486ea8dc4b6Seschrock error = ENXIO; 487ea8dc4b6Seschrock goto out; 488ea8dc4b6Seschrock } 489fa9e4066Sahrens 490560e6e96Seschrock /* 491560e6e96Seschrock * Validate the labels for all leaf vdevs. We need to grab the config 492560e6e96Seschrock * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD 493560e6e96Seschrock * flag. 494560e6e96Seschrock */ 495560e6e96Seschrock spa_config_enter(spa, RW_READER, FTAG); 496560e6e96Seschrock error = vdev_validate(rvd); 497560e6e96Seschrock spa_config_exit(spa, FTAG); 498560e6e96Seschrock 499560e6e96Seschrock if (error != 0) { 500560e6e96Seschrock error = EBADF; 501560e6e96Seschrock goto out; 502560e6e96Seschrock } 503560e6e96Seschrock 504560e6e96Seschrock if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 505560e6e96Seschrock error = ENXIO; 506560e6e96Seschrock goto out; 507560e6e96Seschrock } 508560e6e96Seschrock 509fa9e4066Sahrens /* 510fa9e4066Sahrens * Find the best uberblock. 511fa9e4066Sahrens */ 512fa9e4066Sahrens bzero(ub, sizeof (uberblock_t)); 513fa9e4066Sahrens 514fa9e4066Sahrens zio = zio_root(spa, NULL, NULL, 515fa9e4066Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 516fa9e4066Sahrens vdev_uberblock_load(zio, rvd, ub); 517fa9e4066Sahrens error = zio_wait(zio); 518fa9e4066Sahrens 519fa9e4066Sahrens /* 520fa9e4066Sahrens * If we weren't able to find a single valid uberblock, return failure. 521fa9e4066Sahrens */ 522fa9e4066Sahrens if (ub->ub_txg == 0) { 523eaca9bbdSeschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 524eaca9bbdSeschrock VDEV_AUX_CORRUPT_DATA); 525ea8dc4b6Seschrock error = ENXIO; 526ea8dc4b6Seschrock goto out; 527ea8dc4b6Seschrock } 528ea8dc4b6Seschrock 529ea8dc4b6Seschrock /* 530ea8dc4b6Seschrock * If the pool is newer than the code, we can't open it. 531ea8dc4b6Seschrock */ 532eaca9bbdSeschrock if (ub->ub_version > ZFS_VERSION) { 533eaca9bbdSeschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 534eaca9bbdSeschrock VDEV_AUX_VERSION_NEWER); 535ea8dc4b6Seschrock error = ENOTSUP; 536ea8dc4b6Seschrock goto out; 537fa9e4066Sahrens } 538fa9e4066Sahrens 539fa9e4066Sahrens /* 540fa9e4066Sahrens * If the vdev guid sum doesn't match the uberblock, we have an 541fa9e4066Sahrens * incomplete configuration. 542fa9e4066Sahrens */ 543ecc2d604Sbonwick if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 544ea8dc4b6Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 545ea8dc4b6Seschrock VDEV_AUX_BAD_GUID_SUM); 546ea8dc4b6Seschrock error = ENXIO; 547ea8dc4b6Seschrock goto out; 548fa9e4066Sahrens } 549fa9e4066Sahrens 550fa9e4066Sahrens /* 551fa9e4066Sahrens * Initialize internal SPA structures. 552fa9e4066Sahrens */ 553fa9e4066Sahrens spa->spa_state = POOL_STATE_ACTIVE; 554fa9e4066Sahrens spa->spa_ubsync = spa->spa_uberblock; 555fa9e4066Sahrens spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 556ea8dc4b6Seschrock error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 557ea8dc4b6Seschrock if (error) { 558ea8dc4b6Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 559ea8dc4b6Seschrock VDEV_AUX_CORRUPT_DATA); 560ea8dc4b6Seschrock goto out; 561ea8dc4b6Seschrock } 562fa9e4066Sahrens spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 563fa9e4066Sahrens 564ea8dc4b6Seschrock if (zap_lookup(spa->spa_meta_objset, 565fa9e4066Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 566ea8dc4b6Seschrock sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 567ea8dc4b6Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 568ea8dc4b6Seschrock VDEV_AUX_CORRUPT_DATA); 569ea8dc4b6Seschrock error = EIO; 570ea8dc4b6Seschrock goto out; 571ea8dc4b6Seschrock } 572fa9e4066Sahrens 573fa9e4066Sahrens if (!mosconfig) { 57499653d4eSeschrock nvlist_t *newconfig; 575fa9e4066Sahrens 57699653d4eSeschrock if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 577ea8dc4b6Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 578ea8dc4b6Seschrock VDEV_AUX_CORRUPT_DATA); 579ea8dc4b6Seschrock error = EIO; 580ea8dc4b6Seschrock goto out; 581ea8dc4b6Seschrock } 582fa9e4066Sahrens 583fa9e4066Sahrens spa_config_set(spa, newconfig); 584fa9e4066Sahrens spa_unload(spa); 585fa9e4066Sahrens spa_deactivate(spa); 586fa9e4066Sahrens spa_activate(spa); 587fa9e4066Sahrens 588ea8dc4b6Seschrock return (spa_load(spa, newconfig, state, B_TRUE)); 589fa9e4066Sahrens } 590fa9e4066Sahrens 591ea8dc4b6Seschrock if (zap_lookup(spa->spa_meta_objset, 592fa9e4066Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 593ea8dc4b6Seschrock sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 594ea8dc4b6Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 595ea8dc4b6Seschrock VDEV_AUX_CORRUPT_DATA); 596ea8dc4b6Seschrock error = EIO; 597ea8dc4b6Seschrock goto out; 598ea8dc4b6Seschrock } 599fa9e4066Sahrens 60099653d4eSeschrock /* 60199653d4eSeschrock * Load the bit that tells us to use the new accounting function 60299653d4eSeschrock * (raid-z deflation). If we have an older pool, this will not 60399653d4eSeschrock * be present. 60499653d4eSeschrock */ 60599653d4eSeschrock error = zap_lookup(spa->spa_meta_objset, 60699653d4eSeschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 60799653d4eSeschrock sizeof (uint64_t), 1, &spa->spa_deflate); 60899653d4eSeschrock if (error != 0 && error != ENOENT) { 60999653d4eSeschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 61099653d4eSeschrock VDEV_AUX_CORRUPT_DATA); 61199653d4eSeschrock error = EIO; 61299653d4eSeschrock goto out; 61399653d4eSeschrock } 61499653d4eSeschrock 615fa9e4066Sahrens /* 616ea8dc4b6Seschrock * Load the persistent error log. If we have an older pool, this will 617ea8dc4b6Seschrock * not be present. 618fa9e4066Sahrens */ 619ea8dc4b6Seschrock error = zap_lookup(spa->spa_meta_objset, 620ea8dc4b6Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 621ea8dc4b6Seschrock sizeof (uint64_t), 1, &spa->spa_errlog_last); 622d80c45e0Sbonwick if (error != 0 && error != ENOENT) { 623ea8dc4b6Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 624ea8dc4b6Seschrock VDEV_AUX_CORRUPT_DATA); 625ea8dc4b6Seschrock error = EIO; 626ea8dc4b6Seschrock goto out; 627ea8dc4b6Seschrock } 628ea8dc4b6Seschrock 629ea8dc4b6Seschrock error = zap_lookup(spa->spa_meta_objset, 630ea8dc4b6Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 631ea8dc4b6Seschrock sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 632ea8dc4b6Seschrock if (error != 0 && error != ENOENT) { 633ea8dc4b6Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 634ea8dc4b6Seschrock VDEV_AUX_CORRUPT_DATA); 635ea8dc4b6Seschrock error = EIO; 636ea8dc4b6Seschrock goto out; 637ea8dc4b6Seschrock } 638ea8dc4b6Seschrock 63906eeb2adSek /* 64006eeb2adSek * Load the history object. If we have an older pool, this 64106eeb2adSek * will not be present. 64206eeb2adSek */ 64306eeb2adSek error = zap_lookup(spa->spa_meta_objset, 64406eeb2adSek DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY, 64506eeb2adSek sizeof (uint64_t), 1, &spa->spa_history); 64606eeb2adSek if (error != 0 && error != ENOENT) { 64706eeb2adSek vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 64806eeb2adSek VDEV_AUX_CORRUPT_DATA); 64906eeb2adSek error = EIO; 65006eeb2adSek goto out; 65106eeb2adSek } 65206eeb2adSek 65399653d4eSeschrock /* 65499653d4eSeschrock * Load any hot spares for this pool. 65599653d4eSeschrock */ 65699653d4eSeschrock error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 65799653d4eSeschrock DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares_object); 65899653d4eSeschrock if (error != 0 && error != ENOENT) { 65999653d4eSeschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 66099653d4eSeschrock VDEV_AUX_CORRUPT_DATA); 66199653d4eSeschrock error = EIO; 66299653d4eSeschrock goto out; 66399653d4eSeschrock } 66499653d4eSeschrock if (error == 0) { 66599653d4eSeschrock ASSERT(spa_version(spa) >= ZFS_VERSION_SPARES); 66699653d4eSeschrock if (load_nvlist(spa, spa->spa_spares_object, 66799653d4eSeschrock &spa->spa_sparelist) != 0) { 66899653d4eSeschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 66999653d4eSeschrock VDEV_AUX_CORRUPT_DATA); 67099653d4eSeschrock error = EIO; 67199653d4eSeschrock goto out; 67299653d4eSeschrock } 67399653d4eSeschrock 67499653d4eSeschrock spa_config_enter(spa, RW_WRITER, FTAG); 67599653d4eSeschrock spa_load_spares(spa); 67699653d4eSeschrock spa_config_exit(spa, FTAG); 67799653d4eSeschrock } 67899653d4eSeschrock 679*b1b8ab34Slling error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 680*b1b8ab34Slling DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object); 681*b1b8ab34Slling 682*b1b8ab34Slling if (error && error != ENOENT) { 683*b1b8ab34Slling vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 684*b1b8ab34Slling VDEV_AUX_CORRUPT_DATA); 685*b1b8ab34Slling error = EIO; 686*b1b8ab34Slling goto out; 687*b1b8ab34Slling } 688*b1b8ab34Slling 689*b1b8ab34Slling if (error == 0) { 690*b1b8ab34Slling (void) zap_lookup(spa->spa_meta_objset, 691*b1b8ab34Slling spa->spa_pool_props_object, 692*b1b8ab34Slling zpool_prop_to_name(ZFS_PROP_BOOTFS), 693*b1b8ab34Slling sizeof (uint64_t), 1, &spa->spa_bootfs); 694*b1b8ab34Slling } 695*b1b8ab34Slling 696ea8dc4b6Seschrock /* 697560e6e96Seschrock * Load the vdev state for all toplevel vdevs. 698ea8dc4b6Seschrock */ 699560e6e96Seschrock vdev_load(rvd); 7000373e76bSbonwick 701fa9e4066Sahrens /* 702fa9e4066Sahrens * Propagate the leaf DTLs we just loaded all the way up the tree. 703fa9e4066Sahrens */ 704ea8dc4b6Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 705fa9e4066Sahrens vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 706ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 707fa9e4066Sahrens 708fa9e4066Sahrens /* 709fa9e4066Sahrens * Check the state of the root vdev. If it can't be opened, it 710fa9e4066Sahrens * indicates one or more toplevel vdevs are faulted. 711fa9e4066Sahrens */ 712ea8dc4b6Seschrock if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 713ea8dc4b6Seschrock error = ENXIO; 714ea8dc4b6Seschrock goto out; 715ea8dc4b6Seschrock } 716fa9e4066Sahrens 717ea8dc4b6Seschrock if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) { 7185dabedeeSbonwick dmu_tx_t *tx; 7190373e76bSbonwick int need_update = B_FALSE; 7200373e76bSbonwick int c; 7215dabedeeSbonwick 7220373e76bSbonwick /* 7230373e76bSbonwick * Claim log blocks that haven't been committed yet. 7240373e76bSbonwick * This must all happen in a single txg. 7250373e76bSbonwick */ 7265dabedeeSbonwick tx = dmu_tx_create_assigned(spa_get_dsl(spa), 727fa9e4066Sahrens spa_first_txg(spa)); 7280b69c2f0Sahrens (void) dmu_objset_find(spa->spa_name, 7290b69c2f0Sahrens zil_claim, tx, DS_FIND_CHILDREN); 730fa9e4066Sahrens dmu_tx_commit(tx); 731fa9e4066Sahrens 732fa9e4066Sahrens spa->spa_sync_on = B_TRUE; 733fa9e4066Sahrens txg_sync_start(spa->spa_dsl_pool); 734fa9e4066Sahrens 735fa9e4066Sahrens /* 736fa9e4066Sahrens * Wait for all claims to sync. 737fa9e4066Sahrens */ 738fa9e4066Sahrens txg_wait_synced(spa->spa_dsl_pool, 0); 7390e34b6a7Sbonwick 7400e34b6a7Sbonwick /* 7410373e76bSbonwick * If the config cache is stale, or we have uninitialized 7420373e76bSbonwick * metaslabs (see spa_vdev_add()), then update the config. 7430e34b6a7Sbonwick */ 7440373e76bSbonwick if (config_cache_txg != spa->spa_config_txg || 7450373e76bSbonwick state == SPA_LOAD_IMPORT) 7460373e76bSbonwick need_update = B_TRUE; 7470373e76bSbonwick 7480373e76bSbonwick for (c = 0; c < rvd->vdev_children; c++) 7490373e76bSbonwick if (rvd->vdev_child[c]->vdev_ms_array == 0) 7500373e76bSbonwick need_update = B_TRUE; 7510e34b6a7Sbonwick 7520e34b6a7Sbonwick /* 7530373e76bSbonwick * Update the config cache asychronously in case we're the 7540373e76bSbonwick * root pool, in which case the config cache isn't writable yet. 7550e34b6a7Sbonwick */ 7560373e76bSbonwick if (need_update) 7570373e76bSbonwick spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 758fa9e4066Sahrens } 759fa9e4066Sahrens 760ea8dc4b6Seschrock error = 0; 761ea8dc4b6Seschrock out: 76299653d4eSeschrock if (error && error != EBADF) 763ea8dc4b6Seschrock zfs_ereport_post(FM_EREPORT_ZFS_POOL, spa, NULL, NULL, 0, 0); 764ea8dc4b6Seschrock spa->spa_load_state = SPA_LOAD_NONE; 765ea8dc4b6Seschrock spa->spa_ena = 0; 766ea8dc4b6Seschrock 767ea8dc4b6Seschrock return (error); 768fa9e4066Sahrens } 769fa9e4066Sahrens 770fa9e4066Sahrens /* 771fa9e4066Sahrens * Pool Open/Import 772fa9e4066Sahrens * 773fa9e4066Sahrens * The import case is identical to an open except that the configuration is sent 774fa9e4066Sahrens * down from userland, instead of grabbed from the configuration cache. For the 775fa9e4066Sahrens * case of an open, the pool configuration will exist in the 776fa9e4066Sahrens * POOL_STATE_UNITIALIZED state. 777fa9e4066Sahrens * 778fa9e4066Sahrens * The stats information (gen/count/ustats) is used to gather vdev statistics at 779fa9e4066Sahrens * the same time open the pool, without having to keep around the spa_t in some 780fa9e4066Sahrens * ambiguous state. 781fa9e4066Sahrens */ 782fa9e4066Sahrens static int 783fa9e4066Sahrens spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 784fa9e4066Sahrens { 785fa9e4066Sahrens spa_t *spa; 786fa9e4066Sahrens int error; 787fa9e4066Sahrens int loaded = B_FALSE; 788fa9e4066Sahrens int locked = B_FALSE; 789fa9e4066Sahrens 790fa9e4066Sahrens *spapp = NULL; 791fa9e4066Sahrens 792fa9e4066Sahrens /* 793fa9e4066Sahrens * As disgusting as this is, we need to support recursive calls to this 794fa9e4066Sahrens * function because dsl_dir_open() is called during spa_load(), and ends 795fa9e4066Sahrens * up calling spa_open() again. The real fix is to figure out how to 796fa9e4066Sahrens * avoid dsl_dir_open() calling this in the first place. 797fa9e4066Sahrens */ 798fa9e4066Sahrens if (mutex_owner(&spa_namespace_lock) != curthread) { 799fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 800fa9e4066Sahrens locked = B_TRUE; 801fa9e4066Sahrens } 802fa9e4066Sahrens 803fa9e4066Sahrens if ((spa = spa_lookup(pool)) == NULL) { 804fa9e4066Sahrens if (locked) 805fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 806fa9e4066Sahrens return (ENOENT); 807fa9e4066Sahrens } 808fa9e4066Sahrens if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 809fa9e4066Sahrens 810fa9e4066Sahrens spa_activate(spa); 811fa9e4066Sahrens 8120373e76bSbonwick error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 813fa9e4066Sahrens 814fa9e4066Sahrens if (error == EBADF) { 815fa9e4066Sahrens /* 816560e6e96Seschrock * If vdev_validate() returns failure (indicated by 817560e6e96Seschrock * EBADF), it indicates that one of the vdevs indicates 818560e6e96Seschrock * that the pool has been exported or destroyed. If 819560e6e96Seschrock * this is the case, the config cache is out of sync and 820560e6e96Seschrock * we should remove the pool from the namespace. 821fa9e4066Sahrens */ 82299653d4eSeschrock zfs_post_ok(spa, NULL); 823fa9e4066Sahrens spa_unload(spa); 824fa9e4066Sahrens spa_deactivate(spa); 825fa9e4066Sahrens spa_remove(spa); 826fa9e4066Sahrens spa_config_sync(); 827fa9e4066Sahrens if (locked) 828fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 829fa9e4066Sahrens return (ENOENT); 830ea8dc4b6Seschrock } 831ea8dc4b6Seschrock 832ea8dc4b6Seschrock if (error) { 833fa9e4066Sahrens /* 834fa9e4066Sahrens * We can't open the pool, but we still have useful 835fa9e4066Sahrens * information: the state of each vdev after the 836fa9e4066Sahrens * attempted vdev_open(). Return this to the user. 837fa9e4066Sahrens */ 8380373e76bSbonwick if (config != NULL && spa->spa_root_vdev != NULL) { 8390373e76bSbonwick spa_config_enter(spa, RW_READER, FTAG); 840fa9e4066Sahrens *config = spa_config_generate(spa, NULL, -1ULL, 841fa9e4066Sahrens B_TRUE); 8420373e76bSbonwick spa_config_exit(spa, FTAG); 8430373e76bSbonwick } 844fa9e4066Sahrens spa_unload(spa); 845fa9e4066Sahrens spa_deactivate(spa); 846ea8dc4b6Seschrock spa->spa_last_open_failed = B_TRUE; 847fa9e4066Sahrens if (locked) 848fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 849fa9e4066Sahrens *spapp = NULL; 850fa9e4066Sahrens return (error); 851ea8dc4b6Seschrock } else { 852ea8dc4b6Seschrock zfs_post_ok(spa, NULL); 853ea8dc4b6Seschrock spa->spa_last_open_failed = B_FALSE; 854fa9e4066Sahrens } 855fa9e4066Sahrens 856fa9e4066Sahrens loaded = B_TRUE; 857fa9e4066Sahrens } 858fa9e4066Sahrens 859fa9e4066Sahrens spa_open_ref(spa, tag); 860fa9e4066Sahrens if (locked) 861fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 862fa9e4066Sahrens 863fa9e4066Sahrens *spapp = spa; 864fa9e4066Sahrens 865fa9e4066Sahrens if (config != NULL) { 866ea8dc4b6Seschrock spa_config_enter(spa, RW_READER, FTAG); 867fa9e4066Sahrens *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 868ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 869fa9e4066Sahrens } 870fa9e4066Sahrens 871fa9e4066Sahrens /* 872fa9e4066Sahrens * If we just loaded the pool, resilver anything that's out of date. 873fa9e4066Sahrens */ 874fa9e4066Sahrens if (loaded && (spa_mode & FWRITE)) 875fa9e4066Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 876fa9e4066Sahrens 877fa9e4066Sahrens return (0); 878fa9e4066Sahrens } 879fa9e4066Sahrens 880fa9e4066Sahrens int 881fa9e4066Sahrens spa_open(const char *name, spa_t **spapp, void *tag) 882fa9e4066Sahrens { 883fa9e4066Sahrens return (spa_open_common(name, spapp, tag, NULL)); 884fa9e4066Sahrens } 885fa9e4066Sahrens 886ea8dc4b6Seschrock /* 887ea8dc4b6Seschrock * Lookup the given spa_t, incrementing the inject count in the process, 888ea8dc4b6Seschrock * preventing it from being exported or destroyed. 889ea8dc4b6Seschrock */ 890ea8dc4b6Seschrock spa_t * 891ea8dc4b6Seschrock spa_inject_addref(char *name) 892ea8dc4b6Seschrock { 893ea8dc4b6Seschrock spa_t *spa; 894ea8dc4b6Seschrock 895ea8dc4b6Seschrock mutex_enter(&spa_namespace_lock); 896ea8dc4b6Seschrock if ((spa = spa_lookup(name)) == NULL) { 897ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 898ea8dc4b6Seschrock return (NULL); 899ea8dc4b6Seschrock } 900ea8dc4b6Seschrock spa->spa_inject_ref++; 901ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 902ea8dc4b6Seschrock 903ea8dc4b6Seschrock return (spa); 904ea8dc4b6Seschrock } 905ea8dc4b6Seschrock 906ea8dc4b6Seschrock void 907ea8dc4b6Seschrock spa_inject_delref(spa_t *spa) 908ea8dc4b6Seschrock { 909ea8dc4b6Seschrock mutex_enter(&spa_namespace_lock); 910ea8dc4b6Seschrock spa->spa_inject_ref--; 911ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 912ea8dc4b6Seschrock } 913ea8dc4b6Seschrock 91499653d4eSeschrock static void 91599653d4eSeschrock spa_add_spares(spa_t *spa, nvlist_t *config) 91699653d4eSeschrock { 91799653d4eSeschrock nvlist_t **spares; 91899653d4eSeschrock uint_t i, nspares; 91999653d4eSeschrock nvlist_t *nvroot; 92099653d4eSeschrock uint64_t guid; 92199653d4eSeschrock vdev_stat_t *vs; 92299653d4eSeschrock uint_t vsc; 92339c23413Seschrock uint64_t pool; 92499653d4eSeschrock 92599653d4eSeschrock if (spa->spa_nspares == 0) 92699653d4eSeschrock return; 92799653d4eSeschrock 92899653d4eSeschrock VERIFY(nvlist_lookup_nvlist(config, 92999653d4eSeschrock ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 93099653d4eSeschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 93199653d4eSeschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 93299653d4eSeschrock if (nspares != 0) { 93399653d4eSeschrock VERIFY(nvlist_add_nvlist_array(nvroot, 93499653d4eSeschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 93599653d4eSeschrock VERIFY(nvlist_lookup_nvlist_array(nvroot, 93699653d4eSeschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 93799653d4eSeschrock 93899653d4eSeschrock /* 93999653d4eSeschrock * Go through and find any spares which have since been 94099653d4eSeschrock * repurposed as an active spare. If this is the case, update 94199653d4eSeschrock * their status appropriately. 94299653d4eSeschrock */ 94399653d4eSeschrock for (i = 0; i < nspares; i++) { 94499653d4eSeschrock VERIFY(nvlist_lookup_uint64(spares[i], 94599653d4eSeschrock ZPOOL_CONFIG_GUID, &guid) == 0); 94639c23413Seschrock if (spa_spare_exists(guid, &pool) && pool != 0ULL) { 94799653d4eSeschrock VERIFY(nvlist_lookup_uint64_array( 94899653d4eSeschrock spares[i], ZPOOL_CONFIG_STATS, 94999653d4eSeschrock (uint64_t **)&vs, &vsc) == 0); 95099653d4eSeschrock vs->vs_state = VDEV_STATE_CANT_OPEN; 95199653d4eSeschrock vs->vs_aux = VDEV_AUX_SPARED; 95299653d4eSeschrock } 95399653d4eSeschrock } 95499653d4eSeschrock } 95599653d4eSeschrock } 95699653d4eSeschrock 957fa9e4066Sahrens int 958ea8dc4b6Seschrock spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 959fa9e4066Sahrens { 960fa9e4066Sahrens int error; 961fa9e4066Sahrens spa_t *spa; 962fa9e4066Sahrens 963fa9e4066Sahrens *config = NULL; 964fa9e4066Sahrens error = spa_open_common(name, &spa, FTAG, config); 965fa9e4066Sahrens 96699653d4eSeschrock if (spa && *config != NULL) { 967ea8dc4b6Seschrock VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, 968ea8dc4b6Seschrock spa_get_errlog_size(spa)) == 0); 969ea8dc4b6Seschrock 97099653d4eSeschrock spa_add_spares(spa, *config); 97199653d4eSeschrock } 97299653d4eSeschrock 973ea8dc4b6Seschrock /* 974ea8dc4b6Seschrock * We want to get the alternate root even for faulted pools, so we cheat 975ea8dc4b6Seschrock * and call spa_lookup() directly. 976ea8dc4b6Seschrock */ 977ea8dc4b6Seschrock if (altroot) { 978ea8dc4b6Seschrock if (spa == NULL) { 979ea8dc4b6Seschrock mutex_enter(&spa_namespace_lock); 980ea8dc4b6Seschrock spa = spa_lookup(name); 981ea8dc4b6Seschrock if (spa) 982ea8dc4b6Seschrock spa_altroot(spa, altroot, buflen); 983ea8dc4b6Seschrock else 984ea8dc4b6Seschrock altroot[0] = '\0'; 985ea8dc4b6Seschrock spa = NULL; 986ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 987ea8dc4b6Seschrock } else { 988ea8dc4b6Seschrock spa_altroot(spa, altroot, buflen); 989ea8dc4b6Seschrock } 990ea8dc4b6Seschrock } 991ea8dc4b6Seschrock 992fa9e4066Sahrens if (spa != NULL) 993fa9e4066Sahrens spa_close(spa, FTAG); 994fa9e4066Sahrens 995fa9e4066Sahrens return (error); 996fa9e4066Sahrens } 997fa9e4066Sahrens 99899653d4eSeschrock /* 99999653d4eSeschrock * Validate that the 'spares' array is well formed. We must have an array of 100039c23413Seschrock * nvlists, each which describes a valid leaf vdev. If this is an import (mode 100139c23413Seschrock * is VDEV_ALLOC_SPARE), then we allow corrupted spares to be specified, as long 100239c23413Seschrock * as they are well-formed. 100399653d4eSeschrock */ 100499653d4eSeschrock static int 100599653d4eSeschrock spa_validate_spares(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 100699653d4eSeschrock { 100799653d4eSeschrock nvlist_t **spares; 100899653d4eSeschrock uint_t i, nspares; 100999653d4eSeschrock vdev_t *vd; 101099653d4eSeschrock int error; 101199653d4eSeschrock 101299653d4eSeschrock /* 101399653d4eSeschrock * It's acceptable to have no spares specified. 101499653d4eSeschrock */ 101599653d4eSeschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 101699653d4eSeschrock &spares, &nspares) != 0) 101799653d4eSeschrock return (0); 101899653d4eSeschrock 101999653d4eSeschrock if (nspares == 0) 102099653d4eSeschrock return (EINVAL); 102199653d4eSeschrock 102299653d4eSeschrock /* 102399653d4eSeschrock * Make sure the pool is formatted with a version that supports hot 102499653d4eSeschrock * spares. 102599653d4eSeschrock */ 102699653d4eSeschrock if (spa_version(spa) < ZFS_VERSION_SPARES) 102799653d4eSeschrock return (ENOTSUP); 102899653d4eSeschrock 102939c23413Seschrock /* 103039c23413Seschrock * Set the pending spare list so we correctly handle device in-use 103139c23413Seschrock * checking. 103239c23413Seschrock */ 103339c23413Seschrock spa->spa_pending_spares = spares; 103439c23413Seschrock spa->spa_pending_nspares = nspares; 103539c23413Seschrock 103699653d4eSeschrock for (i = 0; i < nspares; i++) { 103799653d4eSeschrock if ((error = spa_config_parse(spa, &vd, spares[i], NULL, 0, 103899653d4eSeschrock mode)) != 0) 103939c23413Seschrock goto out; 104099653d4eSeschrock 104199653d4eSeschrock if (!vd->vdev_ops->vdev_op_leaf) { 104299653d4eSeschrock vdev_free(vd); 104339c23413Seschrock error = EINVAL; 104439c23413Seschrock goto out; 104599653d4eSeschrock } 104699653d4eSeschrock 104799653d4eSeschrock vd->vdev_top = vd; 104899653d4eSeschrock 104939c23413Seschrock if ((error = vdev_open(vd)) == 0 && 105039c23413Seschrock (error = vdev_label_init(vd, crtxg, 105139c23413Seschrock VDEV_LABEL_SPARE)) == 0) { 105239c23413Seschrock VERIFY(nvlist_add_uint64(spares[i], ZPOOL_CONFIG_GUID, 105339c23413Seschrock vd->vdev_guid) == 0); 105439c23413Seschrock } 105599653d4eSeschrock 105699653d4eSeschrock vdev_free(vd); 105739c23413Seschrock 105839c23413Seschrock if (error && mode != VDEV_ALLOC_SPARE) 105939c23413Seschrock goto out; 106039c23413Seschrock else 106139c23413Seschrock error = 0; 106299653d4eSeschrock } 106399653d4eSeschrock 106439c23413Seschrock out: 106539c23413Seschrock spa->spa_pending_spares = NULL; 106639c23413Seschrock spa->spa_pending_nspares = 0; 106739c23413Seschrock return (error); 106899653d4eSeschrock } 106999653d4eSeschrock 1070fa9e4066Sahrens /* 1071fa9e4066Sahrens * Pool Creation 1072fa9e4066Sahrens */ 1073fa9e4066Sahrens int 10740373e76bSbonwick spa_create(const char *pool, nvlist_t *nvroot, const char *altroot) 1075fa9e4066Sahrens { 1076fa9e4066Sahrens spa_t *spa; 10770373e76bSbonwick vdev_t *rvd; 1078fa9e4066Sahrens dsl_pool_t *dp; 1079fa9e4066Sahrens dmu_tx_t *tx; 108099653d4eSeschrock int c, error = 0; 1081fa9e4066Sahrens uint64_t txg = TXG_INITIAL; 108299653d4eSeschrock nvlist_t **spares; 108399653d4eSeschrock uint_t nspares; 1084fa9e4066Sahrens 1085fa9e4066Sahrens /* 1086fa9e4066Sahrens * If this pool already exists, return failure. 1087fa9e4066Sahrens */ 1088fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 1089fa9e4066Sahrens if (spa_lookup(pool) != NULL) { 1090fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1091fa9e4066Sahrens return (EEXIST); 1092fa9e4066Sahrens } 1093fa9e4066Sahrens 1094fa9e4066Sahrens /* 1095fa9e4066Sahrens * Allocate a new spa_t structure. 1096fa9e4066Sahrens */ 10970373e76bSbonwick spa = spa_add(pool, altroot); 1098fa9e4066Sahrens spa_activate(spa); 1099fa9e4066Sahrens 1100fa9e4066Sahrens spa->spa_uberblock.ub_txg = txg - 1; 1101eaca9bbdSeschrock spa->spa_uberblock.ub_version = ZFS_VERSION; 1102fa9e4066Sahrens spa->spa_ubsync = spa->spa_uberblock; 1103fa9e4066Sahrens 11040373e76bSbonwick /* 11050373e76bSbonwick * Create the root vdev. 11060373e76bSbonwick */ 11070373e76bSbonwick spa_config_enter(spa, RW_WRITER, FTAG); 11080373e76bSbonwick 110999653d4eSeschrock error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 11100373e76bSbonwick 111199653d4eSeschrock ASSERT(error != 0 || rvd != NULL); 111299653d4eSeschrock ASSERT(error != 0 || spa->spa_root_vdev == rvd); 11130373e76bSbonwick 111499653d4eSeschrock if (error == 0 && rvd->vdev_children == 0) 11150373e76bSbonwick error = EINVAL; 111699653d4eSeschrock 111799653d4eSeschrock if (error == 0 && 111899653d4eSeschrock (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 111999653d4eSeschrock (error = spa_validate_spares(spa, nvroot, txg, 112099653d4eSeschrock VDEV_ALLOC_ADD)) == 0) { 112199653d4eSeschrock for (c = 0; c < rvd->vdev_children; c++) 112299653d4eSeschrock vdev_init(rvd->vdev_child[c], txg); 112399653d4eSeschrock vdev_config_dirty(rvd); 11240373e76bSbonwick } 11250373e76bSbonwick 11260373e76bSbonwick spa_config_exit(spa, FTAG); 1127fa9e4066Sahrens 112899653d4eSeschrock if (error != 0) { 1129fa9e4066Sahrens spa_unload(spa); 1130fa9e4066Sahrens spa_deactivate(spa); 1131fa9e4066Sahrens spa_remove(spa); 1132fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1133fa9e4066Sahrens return (error); 1134fa9e4066Sahrens } 1135fa9e4066Sahrens 113699653d4eSeschrock /* 113799653d4eSeschrock * Get the list of spares, if specified. 113899653d4eSeschrock */ 113999653d4eSeschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 114099653d4eSeschrock &spares, &nspares) == 0) { 114199653d4eSeschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, NV_UNIQUE_NAME, 114299653d4eSeschrock KM_SLEEP) == 0); 114399653d4eSeschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 114499653d4eSeschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 114599653d4eSeschrock spa_config_enter(spa, RW_WRITER, FTAG); 114699653d4eSeschrock spa_load_spares(spa); 114799653d4eSeschrock spa_config_exit(spa, FTAG); 114899653d4eSeschrock spa->spa_sync_spares = B_TRUE; 114999653d4eSeschrock } 115099653d4eSeschrock 1151fa9e4066Sahrens spa->spa_dsl_pool = dp = dsl_pool_create(spa, txg); 1152fa9e4066Sahrens spa->spa_meta_objset = dp->dp_meta_objset; 1153fa9e4066Sahrens 1154fa9e4066Sahrens tx = dmu_tx_create_assigned(dp, txg); 1155fa9e4066Sahrens 1156fa9e4066Sahrens /* 1157fa9e4066Sahrens * Create the pool config object. 1158fa9e4066Sahrens */ 1159fa9e4066Sahrens spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 1160fa9e4066Sahrens DMU_OT_PACKED_NVLIST, 1 << 14, 1161fa9e4066Sahrens DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 1162fa9e4066Sahrens 1163ea8dc4b6Seschrock if (zap_add(spa->spa_meta_objset, 1164fa9e4066Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1165ea8dc4b6Seschrock sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 1166ea8dc4b6Seschrock cmn_err(CE_PANIC, "failed to add pool config"); 1167ea8dc4b6Seschrock } 1168fa9e4066Sahrens 116999653d4eSeschrock /* Newly created pools are always deflated. */ 117099653d4eSeschrock spa->spa_deflate = TRUE; 117199653d4eSeschrock if (zap_add(spa->spa_meta_objset, 117299653d4eSeschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 117399653d4eSeschrock sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 117499653d4eSeschrock cmn_err(CE_PANIC, "failed to add deflate"); 117599653d4eSeschrock } 117699653d4eSeschrock 1177fa9e4066Sahrens /* 1178fa9e4066Sahrens * Create the deferred-free bplist object. Turn off compression 1179fa9e4066Sahrens * because sync-to-convergence takes longer if the blocksize 1180fa9e4066Sahrens * keeps changing. 1181fa9e4066Sahrens */ 1182fa9e4066Sahrens spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 1183fa9e4066Sahrens 1 << 14, tx); 1184fa9e4066Sahrens dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 1185fa9e4066Sahrens ZIO_COMPRESS_OFF, tx); 1186fa9e4066Sahrens 1187ea8dc4b6Seschrock if (zap_add(spa->spa_meta_objset, 1188fa9e4066Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1189ea8dc4b6Seschrock sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 1190ea8dc4b6Seschrock cmn_err(CE_PANIC, "failed to add bplist"); 1191ea8dc4b6Seschrock } 1192fa9e4066Sahrens 119306eeb2adSek /* 119406eeb2adSek * Create the pool's history object. 119506eeb2adSek */ 119606eeb2adSek spa_history_create_obj(spa, tx); 119706eeb2adSek 1198fa9e4066Sahrens dmu_tx_commit(tx); 1199fa9e4066Sahrens 1200*b1b8ab34Slling spa->spa_bootfs = zfs_prop_default_numeric(ZFS_PROP_BOOTFS); 1201fa9e4066Sahrens spa->spa_sync_on = B_TRUE; 1202fa9e4066Sahrens txg_sync_start(spa->spa_dsl_pool); 1203fa9e4066Sahrens 1204fa9e4066Sahrens /* 1205fa9e4066Sahrens * We explicitly wait for the first transaction to complete so that our 1206fa9e4066Sahrens * bean counters are appropriately updated. 1207fa9e4066Sahrens */ 1208fa9e4066Sahrens txg_wait_synced(spa->spa_dsl_pool, txg); 1209fa9e4066Sahrens 1210fa9e4066Sahrens spa_config_sync(); 1211fa9e4066Sahrens 1212fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1213fa9e4066Sahrens 1214fa9e4066Sahrens return (0); 1215fa9e4066Sahrens } 1216fa9e4066Sahrens 1217fa9e4066Sahrens /* 1218fa9e4066Sahrens * Import the given pool into the system. We set up the necessary spa_t and 1219fa9e4066Sahrens * then call spa_load() to do the dirty work. 1220fa9e4066Sahrens */ 1221fa9e4066Sahrens int 12220373e76bSbonwick spa_import(const char *pool, nvlist_t *config, const char *altroot) 1223fa9e4066Sahrens { 1224fa9e4066Sahrens spa_t *spa; 1225fa9e4066Sahrens int error; 122699653d4eSeschrock nvlist_t *nvroot; 122799653d4eSeschrock nvlist_t **spares; 122899653d4eSeschrock uint_t nspares; 1229fa9e4066Sahrens 1230fa9e4066Sahrens if (!(spa_mode & FWRITE)) 1231fa9e4066Sahrens return (EROFS); 1232fa9e4066Sahrens 1233fa9e4066Sahrens /* 1234fa9e4066Sahrens * If a pool with this name exists, return failure. 1235fa9e4066Sahrens */ 1236fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 1237fa9e4066Sahrens if (spa_lookup(pool) != NULL) { 1238fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1239fa9e4066Sahrens return (EEXIST); 1240fa9e4066Sahrens } 1241fa9e4066Sahrens 1242fa9e4066Sahrens /* 12430373e76bSbonwick * Create and initialize the spa structure. 1244fa9e4066Sahrens */ 12450373e76bSbonwick spa = spa_add(pool, altroot); 1246fa9e4066Sahrens spa_activate(spa); 1247fa9e4066Sahrens 12485dabedeeSbonwick /* 12490373e76bSbonwick * Pass off the heavy lifting to spa_load(). 1250ecc2d604Sbonwick * Pass TRUE for mosconfig because the user-supplied config 1251ecc2d604Sbonwick * is actually the one to trust when doing an import. 12525dabedeeSbonwick */ 1253ecc2d604Sbonwick error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE); 1254fa9e4066Sahrens 125599653d4eSeschrock spa_config_enter(spa, RW_WRITER, FTAG); 125699653d4eSeschrock /* 125799653d4eSeschrock * Toss any existing sparelist, as it doesn't have any validity anymore, 125899653d4eSeschrock * and conflicts with spa_has_spare(). 125999653d4eSeschrock */ 126099653d4eSeschrock if (spa->spa_sparelist) { 126199653d4eSeschrock nvlist_free(spa->spa_sparelist); 126299653d4eSeschrock spa->spa_sparelist = NULL; 126399653d4eSeschrock spa_load_spares(spa); 126499653d4eSeschrock } 126599653d4eSeschrock 126699653d4eSeschrock VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 126799653d4eSeschrock &nvroot) == 0); 126899653d4eSeschrock if (error == 0) 126999653d4eSeschrock error = spa_validate_spares(spa, nvroot, -1ULL, 127099653d4eSeschrock VDEV_ALLOC_SPARE); 127199653d4eSeschrock spa_config_exit(spa, FTAG); 127299653d4eSeschrock 127399653d4eSeschrock if (error != 0) { 1274fa9e4066Sahrens spa_unload(spa); 1275fa9e4066Sahrens spa_deactivate(spa); 1276fa9e4066Sahrens spa_remove(spa); 1277fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1278fa9e4066Sahrens return (error); 1279fa9e4066Sahrens } 1280fa9e4066Sahrens 128199653d4eSeschrock /* 128299653d4eSeschrock * Override any spares as specified by the user, as these may have 128399653d4eSeschrock * correct device names/devids, etc. 128499653d4eSeschrock */ 128599653d4eSeschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 128699653d4eSeschrock &spares, &nspares) == 0) { 128799653d4eSeschrock if (spa->spa_sparelist) 128899653d4eSeschrock VERIFY(nvlist_remove(spa->spa_sparelist, 128999653d4eSeschrock ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 129099653d4eSeschrock else 129199653d4eSeschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, 129299653d4eSeschrock NV_UNIQUE_NAME, KM_SLEEP) == 0); 129399653d4eSeschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 129499653d4eSeschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 129599653d4eSeschrock spa_config_enter(spa, RW_WRITER, FTAG); 129699653d4eSeschrock spa_load_spares(spa); 129799653d4eSeschrock spa_config_exit(spa, FTAG); 129899653d4eSeschrock spa->spa_sync_spares = B_TRUE; 129999653d4eSeschrock } 130099653d4eSeschrock 13010373e76bSbonwick /* 13020373e76bSbonwick * Update the config cache to include the newly-imported pool. 13030373e76bSbonwick */ 13040373e76bSbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 13050373e76bSbonwick 1306fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1307fa9e4066Sahrens 1308fa9e4066Sahrens /* 1309fa9e4066Sahrens * Resilver anything that's out of date. 1310fa9e4066Sahrens */ 1311fa9e4066Sahrens if (spa_mode & FWRITE) 1312fa9e4066Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1313fa9e4066Sahrens 1314fa9e4066Sahrens return (0); 1315fa9e4066Sahrens } 1316fa9e4066Sahrens 1317fa9e4066Sahrens /* 1318fa9e4066Sahrens * This (illegal) pool name is used when temporarily importing a spa_t in order 1319fa9e4066Sahrens * to get the vdev stats associated with the imported devices. 1320fa9e4066Sahrens */ 1321fa9e4066Sahrens #define TRYIMPORT_NAME "$import" 1322fa9e4066Sahrens 1323fa9e4066Sahrens nvlist_t * 1324fa9e4066Sahrens spa_tryimport(nvlist_t *tryconfig) 1325fa9e4066Sahrens { 1326fa9e4066Sahrens nvlist_t *config = NULL; 1327fa9e4066Sahrens char *poolname; 1328fa9e4066Sahrens spa_t *spa; 1329fa9e4066Sahrens uint64_t state; 1330fa9e4066Sahrens 1331fa9e4066Sahrens if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 1332fa9e4066Sahrens return (NULL); 1333fa9e4066Sahrens 1334fa9e4066Sahrens if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 1335fa9e4066Sahrens return (NULL); 1336fa9e4066Sahrens 1337fa9e4066Sahrens /* 13380373e76bSbonwick * Create and initialize the spa structure. 1339fa9e4066Sahrens */ 13400373e76bSbonwick mutex_enter(&spa_namespace_lock); 13410373e76bSbonwick spa = spa_add(TRYIMPORT_NAME, NULL); 1342fa9e4066Sahrens spa_activate(spa); 1343fa9e4066Sahrens 1344fa9e4066Sahrens /* 13450373e76bSbonwick * Pass off the heavy lifting to spa_load(). 1346ecc2d604Sbonwick * Pass TRUE for mosconfig because the user-supplied config 1347ecc2d604Sbonwick * is actually the one to trust when doing an import. 1348fa9e4066Sahrens */ 1349ecc2d604Sbonwick (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 1350fa9e4066Sahrens 1351fa9e4066Sahrens /* 1352fa9e4066Sahrens * If 'tryconfig' was at least parsable, return the current config. 1353fa9e4066Sahrens */ 1354fa9e4066Sahrens if (spa->spa_root_vdev != NULL) { 13550373e76bSbonwick spa_config_enter(spa, RW_READER, FTAG); 1356fa9e4066Sahrens config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 13570373e76bSbonwick spa_config_exit(spa, FTAG); 1358fa9e4066Sahrens VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 1359fa9e4066Sahrens poolname) == 0); 1360fa9e4066Sahrens VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 1361fa9e4066Sahrens state) == 0); 136299653d4eSeschrock 136399653d4eSeschrock /* 136499653d4eSeschrock * Add the list of hot spares. 136599653d4eSeschrock */ 136699653d4eSeschrock spa_add_spares(spa, config); 1367fa9e4066Sahrens } 1368fa9e4066Sahrens 1369fa9e4066Sahrens spa_unload(spa); 1370fa9e4066Sahrens spa_deactivate(spa); 1371fa9e4066Sahrens spa_remove(spa); 1372fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1373fa9e4066Sahrens 1374fa9e4066Sahrens return (config); 1375fa9e4066Sahrens } 1376fa9e4066Sahrens 1377fa9e4066Sahrens /* 1378fa9e4066Sahrens * Pool export/destroy 1379fa9e4066Sahrens * 1380fa9e4066Sahrens * The act of destroying or exporting a pool is very simple. We make sure there 1381fa9e4066Sahrens * is no more pending I/O and any references to the pool are gone. Then, we 1382fa9e4066Sahrens * update the pool state and sync all the labels to disk, removing the 1383fa9e4066Sahrens * configuration from the cache afterwards. 1384fa9e4066Sahrens */ 1385fa9e4066Sahrens static int 138644cd46caSbillm spa_export_common(char *pool, int new_state, nvlist_t **oldconfig) 1387fa9e4066Sahrens { 1388fa9e4066Sahrens spa_t *spa; 1389fa9e4066Sahrens 139044cd46caSbillm if (oldconfig) 139144cd46caSbillm *oldconfig = NULL; 139244cd46caSbillm 1393fa9e4066Sahrens if (!(spa_mode & FWRITE)) 1394fa9e4066Sahrens return (EROFS); 1395fa9e4066Sahrens 1396fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 1397fa9e4066Sahrens if ((spa = spa_lookup(pool)) == NULL) { 1398fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1399fa9e4066Sahrens return (ENOENT); 1400fa9e4066Sahrens } 1401fa9e4066Sahrens 1402ea8dc4b6Seschrock /* 1403ea8dc4b6Seschrock * Put a hold on the pool, drop the namespace lock, stop async tasks, 1404ea8dc4b6Seschrock * reacquire the namespace lock, and see if we can export. 1405ea8dc4b6Seschrock */ 1406ea8dc4b6Seschrock spa_open_ref(spa, FTAG); 1407ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 1408ea8dc4b6Seschrock spa_async_suspend(spa); 1409ea8dc4b6Seschrock mutex_enter(&spa_namespace_lock); 1410ea8dc4b6Seschrock spa_close(spa, FTAG); 1411ea8dc4b6Seschrock 1412fa9e4066Sahrens /* 1413fa9e4066Sahrens * The pool will be in core if it's openable, 1414fa9e4066Sahrens * in which case we can modify its state. 1415fa9e4066Sahrens */ 1416fa9e4066Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 1417fa9e4066Sahrens /* 1418fa9e4066Sahrens * Objsets may be open only because they're dirty, so we 1419fa9e4066Sahrens * have to force it to sync before checking spa_refcnt. 1420fa9e4066Sahrens */ 1421fa9e4066Sahrens spa_scrub_suspend(spa); 1422fa9e4066Sahrens txg_wait_synced(spa->spa_dsl_pool, 0); 1423fa9e4066Sahrens 1424ea8dc4b6Seschrock /* 1425ea8dc4b6Seschrock * A pool cannot be exported or destroyed if there are active 1426ea8dc4b6Seschrock * references. If we are resetting a pool, allow references by 1427ea8dc4b6Seschrock * fault injection handlers. 1428ea8dc4b6Seschrock */ 1429ea8dc4b6Seschrock if (!spa_refcount_zero(spa) || 1430ea8dc4b6Seschrock (spa->spa_inject_ref != 0 && 1431ea8dc4b6Seschrock new_state != POOL_STATE_UNINITIALIZED)) { 1432fa9e4066Sahrens spa_scrub_resume(spa); 1433ea8dc4b6Seschrock spa_async_resume(spa); 1434fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1435fa9e4066Sahrens return (EBUSY); 1436fa9e4066Sahrens } 1437fa9e4066Sahrens 1438fa9e4066Sahrens spa_scrub_resume(spa); 1439fa9e4066Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 1440fa9e4066Sahrens 1441fa9e4066Sahrens /* 1442fa9e4066Sahrens * We want this to be reflected on every label, 1443fa9e4066Sahrens * so mark them all dirty. spa_unload() will do the 1444fa9e4066Sahrens * final sync that pushes these changes out. 1445fa9e4066Sahrens */ 1446ea8dc4b6Seschrock if (new_state != POOL_STATE_UNINITIALIZED) { 14475dabedeeSbonwick spa_config_enter(spa, RW_WRITER, FTAG); 1448ea8dc4b6Seschrock spa->spa_state = new_state; 14490373e76bSbonwick spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 1450ea8dc4b6Seschrock vdev_config_dirty(spa->spa_root_vdev); 14515dabedeeSbonwick spa_config_exit(spa, FTAG); 1452ea8dc4b6Seschrock } 1453fa9e4066Sahrens } 1454fa9e4066Sahrens 1455fa9e4066Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 1456fa9e4066Sahrens spa_unload(spa); 1457fa9e4066Sahrens spa_deactivate(spa); 1458fa9e4066Sahrens } 1459fa9e4066Sahrens 146044cd46caSbillm if (oldconfig && spa->spa_config) 146144cd46caSbillm VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 146244cd46caSbillm 1463ea8dc4b6Seschrock if (new_state != POOL_STATE_UNINITIALIZED) { 1464ea8dc4b6Seschrock spa_remove(spa); 1465ea8dc4b6Seschrock spa_config_sync(); 1466ea8dc4b6Seschrock } 1467fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1468fa9e4066Sahrens 1469fa9e4066Sahrens return (0); 1470fa9e4066Sahrens } 1471fa9e4066Sahrens 1472fa9e4066Sahrens /* 1473fa9e4066Sahrens * Destroy a storage pool. 1474fa9e4066Sahrens */ 1475fa9e4066Sahrens int 1476fa9e4066Sahrens spa_destroy(char *pool) 1477fa9e4066Sahrens { 147844cd46caSbillm return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL)); 1479fa9e4066Sahrens } 1480fa9e4066Sahrens 1481fa9e4066Sahrens /* 1482fa9e4066Sahrens * Export a storage pool. 1483fa9e4066Sahrens */ 1484fa9e4066Sahrens int 148544cd46caSbillm spa_export(char *pool, nvlist_t **oldconfig) 1486fa9e4066Sahrens { 148744cd46caSbillm return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig)); 1488fa9e4066Sahrens } 1489fa9e4066Sahrens 1490ea8dc4b6Seschrock /* 1491ea8dc4b6Seschrock * Similar to spa_export(), this unloads the spa_t without actually removing it 1492ea8dc4b6Seschrock * from the namespace in any way. 1493ea8dc4b6Seschrock */ 1494ea8dc4b6Seschrock int 1495ea8dc4b6Seschrock spa_reset(char *pool) 1496ea8dc4b6Seschrock { 149744cd46caSbillm return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL)); 1498ea8dc4b6Seschrock } 1499ea8dc4b6Seschrock 1500ea8dc4b6Seschrock 1501fa9e4066Sahrens /* 1502fa9e4066Sahrens * ========================================================================== 1503fa9e4066Sahrens * Device manipulation 1504fa9e4066Sahrens * ========================================================================== 1505fa9e4066Sahrens */ 1506fa9e4066Sahrens 1507fa9e4066Sahrens /* 1508fa9e4066Sahrens * Add capacity to a storage pool. 1509fa9e4066Sahrens */ 1510fa9e4066Sahrens int 1511fa9e4066Sahrens spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 1512fa9e4066Sahrens { 1513fa9e4066Sahrens uint64_t txg; 15140373e76bSbonwick int c, error; 1515fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 15160e34b6a7Sbonwick vdev_t *vd, *tvd; 151799653d4eSeschrock nvlist_t **spares; 151899653d4eSeschrock uint_t i, nspares; 1519fa9e4066Sahrens 1520fa9e4066Sahrens txg = spa_vdev_enter(spa); 1521fa9e4066Sahrens 152299653d4eSeschrock if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 152399653d4eSeschrock VDEV_ALLOC_ADD)) != 0) 152499653d4eSeschrock return (spa_vdev_exit(spa, NULL, txg, error)); 1525fa9e4066Sahrens 152639c23413Seschrock spa->spa_pending_vdev = vd; 152799653d4eSeschrock 152899653d4eSeschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 152999653d4eSeschrock &spares, &nspares) != 0) 153099653d4eSeschrock nspares = 0; 153199653d4eSeschrock 153239c23413Seschrock if (vd->vdev_children == 0 && nspares == 0) { 153339c23413Seschrock spa->spa_pending_vdev = NULL; 1534fa9e4066Sahrens return (spa_vdev_exit(spa, vd, txg, EINVAL)); 153539c23413Seschrock } 1536fa9e4066Sahrens 153799653d4eSeschrock if (vd->vdev_children != 0) { 153839c23413Seschrock if ((error = vdev_create(vd, txg, B_FALSE)) != 0) { 153939c23413Seschrock spa->spa_pending_vdev = NULL; 154099653d4eSeschrock return (spa_vdev_exit(spa, vd, txg, error)); 154199653d4eSeschrock } 154299653d4eSeschrock } 154399653d4eSeschrock 154439c23413Seschrock /* 154539c23413Seschrock * We must validate the spares after checking the children. Otherwise, 154639c23413Seschrock * vdev_inuse() will blindly overwrite the spare. 154739c23413Seschrock */ 154839c23413Seschrock if ((error = spa_validate_spares(spa, nvroot, txg, 154939c23413Seschrock VDEV_ALLOC_ADD)) != 0) { 155039c23413Seschrock spa->spa_pending_vdev = NULL; 155139c23413Seschrock return (spa_vdev_exit(spa, vd, txg, error)); 155239c23413Seschrock } 155339c23413Seschrock 155439c23413Seschrock spa->spa_pending_vdev = NULL; 155539c23413Seschrock 155639c23413Seschrock /* 155739c23413Seschrock * Transfer each new top-level vdev from vd to rvd. 155839c23413Seschrock */ 155939c23413Seschrock for (c = 0; c < vd->vdev_children; c++) { 156039c23413Seschrock tvd = vd->vdev_child[c]; 156139c23413Seschrock vdev_remove_child(vd, tvd); 156239c23413Seschrock tvd->vdev_id = rvd->vdev_children; 156339c23413Seschrock vdev_add_child(rvd, tvd); 156439c23413Seschrock vdev_config_dirty(tvd); 156539c23413Seschrock } 156639c23413Seschrock 156799653d4eSeschrock if (nspares != 0) { 156899653d4eSeschrock if (spa->spa_sparelist != NULL) { 156999653d4eSeschrock nvlist_t **oldspares; 157099653d4eSeschrock uint_t oldnspares; 157199653d4eSeschrock nvlist_t **newspares; 157299653d4eSeschrock 157399653d4eSeschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 157499653d4eSeschrock ZPOOL_CONFIG_SPARES, &oldspares, &oldnspares) == 0); 157599653d4eSeschrock 157699653d4eSeschrock newspares = kmem_alloc(sizeof (void *) * 157799653d4eSeschrock (nspares + oldnspares), KM_SLEEP); 157899653d4eSeschrock for (i = 0; i < oldnspares; i++) 157999653d4eSeschrock VERIFY(nvlist_dup(oldspares[i], 158099653d4eSeschrock &newspares[i], KM_SLEEP) == 0); 158199653d4eSeschrock for (i = 0; i < nspares; i++) 158299653d4eSeschrock VERIFY(nvlist_dup(spares[i], 158399653d4eSeschrock &newspares[i + oldnspares], 158499653d4eSeschrock KM_SLEEP) == 0); 158599653d4eSeschrock 158699653d4eSeschrock VERIFY(nvlist_remove(spa->spa_sparelist, 158799653d4eSeschrock ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 158899653d4eSeschrock 158999653d4eSeschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 159099653d4eSeschrock ZPOOL_CONFIG_SPARES, newspares, 159199653d4eSeschrock nspares + oldnspares) == 0); 159299653d4eSeschrock for (i = 0; i < oldnspares + nspares; i++) 159399653d4eSeschrock nvlist_free(newspares[i]); 159499653d4eSeschrock kmem_free(newspares, (oldnspares + nspares) * 159599653d4eSeschrock sizeof (void *)); 159699653d4eSeschrock } else { 159799653d4eSeschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, 159899653d4eSeschrock NV_UNIQUE_NAME, KM_SLEEP) == 0); 159999653d4eSeschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 160099653d4eSeschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 160199653d4eSeschrock } 160299653d4eSeschrock 160399653d4eSeschrock spa_load_spares(spa); 160499653d4eSeschrock spa->spa_sync_spares = B_TRUE; 1605fa9e4066Sahrens } 1606fa9e4066Sahrens 1607fa9e4066Sahrens /* 16080e34b6a7Sbonwick * We have to be careful when adding new vdevs to an existing pool. 16090e34b6a7Sbonwick * If other threads start allocating from these vdevs before we 16100e34b6a7Sbonwick * sync the config cache, and we lose power, then upon reboot we may 16110e34b6a7Sbonwick * fail to open the pool because there are DVAs that the config cache 16120e34b6a7Sbonwick * can't translate. Therefore, we first add the vdevs without 16130e34b6a7Sbonwick * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 16140373e76bSbonwick * and then let spa_config_update() initialize the new metaslabs. 16150e34b6a7Sbonwick * 16160e34b6a7Sbonwick * spa_load() checks for added-but-not-initialized vdevs, so that 16170e34b6a7Sbonwick * if we lose power at any point in this sequence, the remaining 16180e34b6a7Sbonwick * steps will be completed the next time we load the pool. 16190e34b6a7Sbonwick */ 16200373e76bSbonwick (void) spa_vdev_exit(spa, vd, txg, 0); 16210e34b6a7Sbonwick 16220373e76bSbonwick mutex_enter(&spa_namespace_lock); 16230373e76bSbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 16240373e76bSbonwick mutex_exit(&spa_namespace_lock); 1625fa9e4066Sahrens 16260373e76bSbonwick return (0); 1627fa9e4066Sahrens } 1628fa9e4066Sahrens 1629fa9e4066Sahrens /* 1630fa9e4066Sahrens * Attach a device to a mirror. The arguments are the path to any device 1631fa9e4066Sahrens * in the mirror, and the nvroot for the new device. If the path specifies 1632fa9e4066Sahrens * a device that is not mirrored, we automatically insert the mirror vdev. 1633fa9e4066Sahrens * 1634fa9e4066Sahrens * If 'replacing' is specified, the new device is intended to replace the 1635fa9e4066Sahrens * existing device; in this case the two devices are made into their own 1636fa9e4066Sahrens * mirror using the 'replacing' vdev, which is functionally idendical to 1637fa9e4066Sahrens * the mirror vdev (it actually reuses all the same ops) but has a few 1638fa9e4066Sahrens * extra rules: you can't attach to it after it's been created, and upon 1639fa9e4066Sahrens * completion of resilvering, the first disk (the one being replaced) 1640fa9e4066Sahrens * is automatically detached. 1641fa9e4066Sahrens */ 1642fa9e4066Sahrens int 1643ea8dc4b6Seschrock spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 1644fa9e4066Sahrens { 1645fa9e4066Sahrens uint64_t txg, open_txg; 1646fa9e4066Sahrens int error; 1647fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 1648fa9e4066Sahrens vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 164999653d4eSeschrock vdev_ops_t *pvops; 1650fa9e4066Sahrens 1651fa9e4066Sahrens txg = spa_vdev_enter(spa); 1652fa9e4066Sahrens 1653ea8dc4b6Seschrock oldvd = vdev_lookup_by_guid(rvd, guid); 1654fa9e4066Sahrens 1655fa9e4066Sahrens if (oldvd == NULL) 1656fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1657fa9e4066Sahrens 16580e34b6a7Sbonwick if (!oldvd->vdev_ops->vdev_op_leaf) 16590e34b6a7Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 16600e34b6a7Sbonwick 1661fa9e4066Sahrens pvd = oldvd->vdev_parent; 1662fa9e4066Sahrens 166399653d4eSeschrock if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 166499653d4eSeschrock VDEV_ALLOC_ADD)) != 0 || newrootvd->vdev_children != 1) 1665fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 1666fa9e4066Sahrens 1667fa9e4066Sahrens newvd = newrootvd->vdev_child[0]; 1668fa9e4066Sahrens 1669fa9e4066Sahrens if (!newvd->vdev_ops->vdev_op_leaf) 1670fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 1671fa9e4066Sahrens 167299653d4eSeschrock if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 1673fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, error)); 1674fa9e4066Sahrens 167599653d4eSeschrock if (!replacing) { 167699653d4eSeschrock /* 167799653d4eSeschrock * For attach, the only allowable parent is a mirror or the root 167899653d4eSeschrock * vdev. 167999653d4eSeschrock */ 168099653d4eSeschrock if (pvd->vdev_ops != &vdev_mirror_ops && 168199653d4eSeschrock pvd->vdev_ops != &vdev_root_ops) 168299653d4eSeschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 168399653d4eSeschrock 168499653d4eSeschrock pvops = &vdev_mirror_ops; 168599653d4eSeschrock } else { 168699653d4eSeschrock /* 168799653d4eSeschrock * Active hot spares can only be replaced by inactive hot 168899653d4eSeschrock * spares. 168999653d4eSeschrock */ 169099653d4eSeschrock if (pvd->vdev_ops == &vdev_spare_ops && 169199653d4eSeschrock pvd->vdev_child[1] == oldvd && 169299653d4eSeschrock !spa_has_spare(spa, newvd->vdev_guid)) 169399653d4eSeschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 169499653d4eSeschrock 169599653d4eSeschrock /* 169699653d4eSeschrock * If the source is a hot spare, and the parent isn't already a 169799653d4eSeschrock * spare, then we want to create a new hot spare. Otherwise, we 169839c23413Seschrock * want to create a replacing vdev. The user is not allowed to 169939c23413Seschrock * attach to a spared vdev child unless the 'isspare' state is 170039c23413Seschrock * the same (spare replaces spare, non-spare replaces 170139c23413Seschrock * non-spare). 170299653d4eSeschrock */ 170399653d4eSeschrock if (pvd->vdev_ops == &vdev_replacing_ops) 170499653d4eSeschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 170539c23413Seschrock else if (pvd->vdev_ops == &vdev_spare_ops && 170639c23413Seschrock newvd->vdev_isspare != oldvd->vdev_isspare) 170739c23413Seschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 170899653d4eSeschrock else if (pvd->vdev_ops != &vdev_spare_ops && 170999653d4eSeschrock newvd->vdev_isspare) 171099653d4eSeschrock pvops = &vdev_spare_ops; 171199653d4eSeschrock else 171299653d4eSeschrock pvops = &vdev_replacing_ops; 171399653d4eSeschrock } 171499653d4eSeschrock 17152a79c5feSlling /* 17162a79c5feSlling * Compare the new device size with the replaceable/attachable 17172a79c5feSlling * device size. 17182a79c5feSlling */ 17192a79c5feSlling if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 1720fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 1721fa9e4066Sahrens 1722ecc2d604Sbonwick /* 1723ecc2d604Sbonwick * The new device cannot have a higher alignment requirement 1724ecc2d604Sbonwick * than the top-level vdev. 1725ecc2d604Sbonwick */ 1726ecc2d604Sbonwick if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 1727fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 1728fa9e4066Sahrens 1729fa9e4066Sahrens /* 1730fa9e4066Sahrens * If this is an in-place replacement, update oldvd's path and devid 1731fa9e4066Sahrens * to make it distinguishable from newvd, and unopenable from now on. 1732fa9e4066Sahrens */ 1733fa9e4066Sahrens if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 1734fa9e4066Sahrens spa_strfree(oldvd->vdev_path); 1735fa9e4066Sahrens oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 1736fa9e4066Sahrens KM_SLEEP); 1737fa9e4066Sahrens (void) sprintf(oldvd->vdev_path, "%s/%s", 1738fa9e4066Sahrens newvd->vdev_path, "old"); 1739fa9e4066Sahrens if (oldvd->vdev_devid != NULL) { 1740fa9e4066Sahrens spa_strfree(oldvd->vdev_devid); 1741fa9e4066Sahrens oldvd->vdev_devid = NULL; 1742fa9e4066Sahrens } 1743fa9e4066Sahrens } 1744fa9e4066Sahrens 1745fa9e4066Sahrens /* 174699653d4eSeschrock * If the parent is not a mirror, or if we're replacing, insert the new 174799653d4eSeschrock * mirror/replacing/spare vdev above oldvd. 1748fa9e4066Sahrens */ 1749fa9e4066Sahrens if (pvd->vdev_ops != pvops) 1750fa9e4066Sahrens pvd = vdev_add_parent(oldvd, pvops); 1751fa9e4066Sahrens 1752fa9e4066Sahrens ASSERT(pvd->vdev_top->vdev_parent == rvd); 1753fa9e4066Sahrens ASSERT(pvd->vdev_ops == pvops); 1754fa9e4066Sahrens ASSERT(oldvd->vdev_parent == pvd); 1755fa9e4066Sahrens 1756fa9e4066Sahrens /* 1757fa9e4066Sahrens * Extract the new device from its root and add it to pvd. 1758fa9e4066Sahrens */ 1759fa9e4066Sahrens vdev_remove_child(newrootvd, newvd); 1760fa9e4066Sahrens newvd->vdev_id = pvd->vdev_children; 1761fa9e4066Sahrens vdev_add_child(pvd, newvd); 1762fa9e4066Sahrens 1763ea8dc4b6Seschrock /* 1764ea8dc4b6Seschrock * If newvd is smaller than oldvd, but larger than its rsize, 1765ea8dc4b6Seschrock * the addition of newvd may have decreased our parent's asize. 1766ea8dc4b6Seschrock */ 1767ea8dc4b6Seschrock pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 1768ea8dc4b6Seschrock 1769fa9e4066Sahrens tvd = newvd->vdev_top; 1770fa9e4066Sahrens ASSERT(pvd->vdev_top == tvd); 1771fa9e4066Sahrens ASSERT(tvd->vdev_parent == rvd); 1772fa9e4066Sahrens 1773fa9e4066Sahrens vdev_config_dirty(tvd); 1774fa9e4066Sahrens 1775fa9e4066Sahrens /* 1776fa9e4066Sahrens * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 1777fa9e4066Sahrens * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 1778fa9e4066Sahrens */ 1779fa9e4066Sahrens open_txg = txg + TXG_CONCURRENT_STATES - 1; 1780fa9e4066Sahrens 1781fa9e4066Sahrens mutex_enter(&newvd->vdev_dtl_lock); 1782fa9e4066Sahrens space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL, 1783fa9e4066Sahrens open_txg - TXG_INITIAL + 1); 1784fa9e4066Sahrens mutex_exit(&newvd->vdev_dtl_lock); 1785fa9e4066Sahrens 178639c23413Seschrock if (newvd->vdev_isspare) 178739c23413Seschrock spa_spare_activate(newvd); 1788ea8dc4b6Seschrock 1789fa9e4066Sahrens /* 1790fa9e4066Sahrens * Mark newvd's DTL dirty in this txg. 1791fa9e4066Sahrens */ 1792ecc2d604Sbonwick vdev_dirty(tvd, VDD_DTL, newvd, txg); 1793fa9e4066Sahrens 1794fa9e4066Sahrens (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 1795fa9e4066Sahrens 1796fa9e4066Sahrens /* 1797fa9e4066Sahrens * Kick off a resilver to update newvd. 1798fa9e4066Sahrens */ 1799fa9e4066Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1800fa9e4066Sahrens 1801fa9e4066Sahrens return (0); 1802fa9e4066Sahrens } 1803fa9e4066Sahrens 1804fa9e4066Sahrens /* 1805fa9e4066Sahrens * Detach a device from a mirror or replacing vdev. 1806fa9e4066Sahrens * If 'replace_done' is specified, only detach if the parent 1807fa9e4066Sahrens * is a replacing vdev. 1808fa9e4066Sahrens */ 1809fa9e4066Sahrens int 1810ea8dc4b6Seschrock spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done) 1811fa9e4066Sahrens { 1812fa9e4066Sahrens uint64_t txg; 1813fa9e4066Sahrens int c, t, error; 1814fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 1815fa9e4066Sahrens vdev_t *vd, *pvd, *cvd, *tvd; 181699653d4eSeschrock boolean_t unspare = B_FALSE; 181799653d4eSeschrock uint64_t unspare_guid; 1818fa9e4066Sahrens 1819fa9e4066Sahrens txg = spa_vdev_enter(spa); 1820fa9e4066Sahrens 1821ea8dc4b6Seschrock vd = vdev_lookup_by_guid(rvd, guid); 1822fa9e4066Sahrens 1823fa9e4066Sahrens if (vd == NULL) 1824fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1825fa9e4066Sahrens 18260e34b6a7Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 18270e34b6a7Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 18280e34b6a7Sbonwick 1829fa9e4066Sahrens pvd = vd->vdev_parent; 1830fa9e4066Sahrens 1831fa9e4066Sahrens /* 1832fa9e4066Sahrens * If replace_done is specified, only remove this device if it's 183399653d4eSeschrock * the first child of a replacing vdev. For the 'spare' vdev, either 183499653d4eSeschrock * disk can be removed. 183599653d4eSeschrock */ 183699653d4eSeschrock if (replace_done) { 183799653d4eSeschrock if (pvd->vdev_ops == &vdev_replacing_ops) { 183899653d4eSeschrock if (vd->vdev_id != 0) 183999653d4eSeschrock return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 184099653d4eSeschrock } else if (pvd->vdev_ops != &vdev_spare_ops) { 184199653d4eSeschrock return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 184299653d4eSeschrock } 184399653d4eSeschrock } 184499653d4eSeschrock 184599653d4eSeschrock ASSERT(pvd->vdev_ops != &vdev_spare_ops || 184699653d4eSeschrock spa_version(spa) >= ZFS_VERSION_SPARES); 1847fa9e4066Sahrens 1848fa9e4066Sahrens /* 184999653d4eSeschrock * Only mirror, replacing, and spare vdevs support detach. 1850fa9e4066Sahrens */ 1851fa9e4066Sahrens if (pvd->vdev_ops != &vdev_replacing_ops && 185299653d4eSeschrock pvd->vdev_ops != &vdev_mirror_ops && 185399653d4eSeschrock pvd->vdev_ops != &vdev_spare_ops) 1854fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1855fa9e4066Sahrens 1856fa9e4066Sahrens /* 1857fa9e4066Sahrens * If there's only one replica, you can't detach it. 1858fa9e4066Sahrens */ 1859fa9e4066Sahrens if (pvd->vdev_children <= 1) 1860fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1861fa9e4066Sahrens 1862fa9e4066Sahrens /* 1863fa9e4066Sahrens * If all siblings have non-empty DTLs, this device may have the only 1864fa9e4066Sahrens * valid copy of the data, which means we cannot safely detach it. 1865fa9e4066Sahrens * 1866fa9e4066Sahrens * XXX -- as in the vdev_offline() case, we really want a more 1867fa9e4066Sahrens * precise DTL check. 1868fa9e4066Sahrens */ 1869fa9e4066Sahrens for (c = 0; c < pvd->vdev_children; c++) { 1870fa9e4066Sahrens uint64_t dirty; 1871fa9e4066Sahrens 1872fa9e4066Sahrens cvd = pvd->vdev_child[c]; 1873fa9e4066Sahrens if (cvd == vd) 1874fa9e4066Sahrens continue; 1875fa9e4066Sahrens if (vdev_is_dead(cvd)) 1876fa9e4066Sahrens continue; 1877fa9e4066Sahrens mutex_enter(&cvd->vdev_dtl_lock); 1878fa9e4066Sahrens dirty = cvd->vdev_dtl_map.sm_space | 1879fa9e4066Sahrens cvd->vdev_dtl_scrub.sm_space; 1880fa9e4066Sahrens mutex_exit(&cvd->vdev_dtl_lock); 1881fa9e4066Sahrens if (!dirty) 1882fa9e4066Sahrens break; 1883fa9e4066Sahrens } 188499653d4eSeschrock 188599653d4eSeschrock /* 188699653d4eSeschrock * If we are a replacing or spare vdev, then we can always detach the 188799653d4eSeschrock * latter child, as that is how one cancels the operation. 188899653d4eSeschrock */ 188999653d4eSeschrock if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) && 189099653d4eSeschrock c == pvd->vdev_children) 1891fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1892fa9e4066Sahrens 189399653d4eSeschrock /* 189499653d4eSeschrock * If we are detaching the original disk from a spare, then it implies 189599653d4eSeschrock * that the spare should become a real disk, and be removed from the 189699653d4eSeschrock * active spare list for the pool. 189799653d4eSeschrock */ 189899653d4eSeschrock if (pvd->vdev_ops == &vdev_spare_ops && 189999653d4eSeschrock vd->vdev_id == 0) 190099653d4eSeschrock unspare = B_TRUE; 190199653d4eSeschrock 1902fa9e4066Sahrens /* 1903fa9e4066Sahrens * Erase the disk labels so the disk can be used for other things. 1904fa9e4066Sahrens * This must be done after all other error cases are handled, 1905fa9e4066Sahrens * but before we disembowel vd (so we can still do I/O to it). 1906fa9e4066Sahrens * But if we can't do it, don't treat the error as fatal -- 1907fa9e4066Sahrens * it may be that the unwritability of the disk is the reason 1908fa9e4066Sahrens * it's being detached! 1909fa9e4066Sahrens */ 191039c23413Seschrock error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1911fa9e4066Sahrens 1912fa9e4066Sahrens /* 1913fa9e4066Sahrens * Remove vd from its parent and compact the parent's children. 1914fa9e4066Sahrens */ 1915fa9e4066Sahrens vdev_remove_child(pvd, vd); 1916fa9e4066Sahrens vdev_compact_children(pvd); 1917fa9e4066Sahrens 1918fa9e4066Sahrens /* 1919fa9e4066Sahrens * Remember one of the remaining children so we can get tvd below. 1920fa9e4066Sahrens */ 1921fa9e4066Sahrens cvd = pvd->vdev_child[0]; 1922fa9e4066Sahrens 192399653d4eSeschrock /* 192499653d4eSeschrock * If we need to remove the remaining child from the list of hot spares, 192599653d4eSeschrock * do it now, marking the vdev as no longer a spare in the process. We 192699653d4eSeschrock * must do this before vdev_remove_parent(), because that can change the 192799653d4eSeschrock * GUID if it creates a new toplevel GUID. 192899653d4eSeschrock */ 192999653d4eSeschrock if (unspare) { 193099653d4eSeschrock ASSERT(cvd->vdev_isspare); 193139c23413Seschrock spa_spare_remove(cvd); 193299653d4eSeschrock unspare_guid = cvd->vdev_guid; 193399653d4eSeschrock } 193499653d4eSeschrock 1935fa9e4066Sahrens /* 1936fa9e4066Sahrens * If the parent mirror/replacing vdev only has one child, 1937fa9e4066Sahrens * the parent is no longer needed. Remove it from the tree. 1938fa9e4066Sahrens */ 1939fa9e4066Sahrens if (pvd->vdev_children == 1) 1940fa9e4066Sahrens vdev_remove_parent(cvd); 1941fa9e4066Sahrens 1942fa9e4066Sahrens /* 1943fa9e4066Sahrens * We don't set tvd until now because the parent we just removed 1944fa9e4066Sahrens * may have been the previous top-level vdev. 1945fa9e4066Sahrens */ 1946fa9e4066Sahrens tvd = cvd->vdev_top; 1947fa9e4066Sahrens ASSERT(tvd->vdev_parent == rvd); 1948fa9e4066Sahrens 1949fa9e4066Sahrens /* 195039c23413Seschrock * Reevaluate the parent vdev state. 1951fa9e4066Sahrens */ 195239c23413Seschrock vdev_propagate_state(cvd->vdev_parent); 1953fa9e4066Sahrens 1954fa9e4066Sahrens /* 195539c23413Seschrock * If the device we just detached was smaller than the others, it may be 195639c23413Seschrock * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init() 195739c23413Seschrock * can't fail because the existing metaslabs are already in core, so 195839c23413Seschrock * there's nothing to read from disk. 1959fa9e4066Sahrens */ 1960ecc2d604Sbonwick VERIFY(vdev_metaslab_init(tvd, txg) == 0); 1961fa9e4066Sahrens 1962fa9e4066Sahrens vdev_config_dirty(tvd); 1963fa9e4066Sahrens 1964fa9e4066Sahrens /* 196539c23413Seschrock * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 196639c23413Seschrock * vd->vdev_detached is set and free vd's DTL object in syncing context. 196739c23413Seschrock * But first make sure we're not on any *other* txg's DTL list, to 196839c23413Seschrock * prevent vd from being accessed after it's freed. 1969fa9e4066Sahrens */ 1970fa9e4066Sahrens for (t = 0; t < TXG_SIZE; t++) 1971fa9e4066Sahrens (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 1972ecc2d604Sbonwick vd->vdev_detached = B_TRUE; 1973ecc2d604Sbonwick vdev_dirty(tvd, VDD_DTL, vd, txg); 1974fa9e4066Sahrens 197599653d4eSeschrock error = spa_vdev_exit(spa, vd, txg, 0); 197699653d4eSeschrock 197799653d4eSeschrock /* 197839c23413Seschrock * If this was the removal of the original device in a hot spare vdev, 197939c23413Seschrock * then we want to go through and remove the device from the hot spare 198039c23413Seschrock * list of every other pool. 198199653d4eSeschrock */ 198299653d4eSeschrock if (unspare) { 198399653d4eSeschrock spa = NULL; 198499653d4eSeschrock mutex_enter(&spa_namespace_lock); 198599653d4eSeschrock while ((spa = spa_next(spa)) != NULL) { 198699653d4eSeschrock if (spa->spa_state != POOL_STATE_ACTIVE) 198799653d4eSeschrock continue; 198899653d4eSeschrock 198999653d4eSeschrock (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 199099653d4eSeschrock } 199199653d4eSeschrock mutex_exit(&spa_namespace_lock); 199299653d4eSeschrock } 199399653d4eSeschrock 199499653d4eSeschrock return (error); 199599653d4eSeschrock } 199699653d4eSeschrock 199799653d4eSeschrock /* 199899653d4eSeschrock * Remove a device from the pool. Currently, this supports removing only hot 199999653d4eSeschrock * spares. 200099653d4eSeschrock */ 200199653d4eSeschrock int 200299653d4eSeschrock spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 200399653d4eSeschrock { 200499653d4eSeschrock vdev_t *vd; 200599653d4eSeschrock nvlist_t **spares, *nv, **newspares; 200699653d4eSeschrock uint_t i, j, nspares; 200799653d4eSeschrock int ret = 0; 200899653d4eSeschrock 200999653d4eSeschrock spa_config_enter(spa, RW_WRITER, FTAG); 201099653d4eSeschrock 201199653d4eSeschrock vd = spa_lookup_by_guid(spa, guid); 201299653d4eSeschrock 201399653d4eSeschrock nv = NULL; 201499653d4eSeschrock if (spa->spa_spares != NULL && 201599653d4eSeschrock nvlist_lookup_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 201699653d4eSeschrock &spares, &nspares) == 0) { 201799653d4eSeschrock for (i = 0; i < nspares; i++) { 201899653d4eSeschrock uint64_t theguid; 201999653d4eSeschrock 202099653d4eSeschrock VERIFY(nvlist_lookup_uint64(spares[i], 202199653d4eSeschrock ZPOOL_CONFIG_GUID, &theguid) == 0); 202299653d4eSeschrock if (theguid == guid) { 202399653d4eSeschrock nv = spares[i]; 202499653d4eSeschrock break; 202599653d4eSeschrock } 202699653d4eSeschrock } 202799653d4eSeschrock } 202899653d4eSeschrock 202999653d4eSeschrock /* 203099653d4eSeschrock * We only support removing a hot spare, and only if it's not currently 203199653d4eSeschrock * in use in this pool. 203299653d4eSeschrock */ 203399653d4eSeschrock if (nv == NULL && vd == NULL) { 203499653d4eSeschrock ret = ENOENT; 203599653d4eSeschrock goto out; 203699653d4eSeschrock } 203799653d4eSeschrock 203899653d4eSeschrock if (nv == NULL && vd != NULL) { 203999653d4eSeschrock ret = ENOTSUP; 204099653d4eSeschrock goto out; 204199653d4eSeschrock } 204299653d4eSeschrock 204399653d4eSeschrock if (!unspare && nv != NULL && vd != NULL) { 204499653d4eSeschrock ret = EBUSY; 204599653d4eSeschrock goto out; 204699653d4eSeschrock } 204799653d4eSeschrock 204899653d4eSeschrock if (nspares == 1) { 204999653d4eSeschrock newspares = NULL; 205099653d4eSeschrock } else { 205199653d4eSeschrock newspares = kmem_alloc((nspares - 1) * sizeof (void *), 205299653d4eSeschrock KM_SLEEP); 205399653d4eSeschrock for (i = 0, j = 0; i < nspares; i++) { 205499653d4eSeschrock if (spares[i] != nv) 205599653d4eSeschrock VERIFY(nvlist_dup(spares[i], 205699653d4eSeschrock &newspares[j++], KM_SLEEP) == 0); 205799653d4eSeschrock } 205899653d4eSeschrock } 205999653d4eSeschrock 206099653d4eSeschrock VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 206199653d4eSeschrock DATA_TYPE_NVLIST_ARRAY) == 0); 206299653d4eSeschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 206399653d4eSeschrock newspares, nspares - 1) == 0); 206499653d4eSeschrock for (i = 0; i < nspares - 1; i++) 206599653d4eSeschrock nvlist_free(newspares[i]); 206699653d4eSeschrock kmem_free(newspares, (nspares - 1) * sizeof (void *)); 206799653d4eSeschrock spa_load_spares(spa); 206899653d4eSeschrock spa->spa_sync_spares = B_TRUE; 206999653d4eSeschrock 207099653d4eSeschrock out: 207199653d4eSeschrock spa_config_exit(spa, FTAG); 207299653d4eSeschrock 207399653d4eSeschrock return (ret); 2074fa9e4066Sahrens } 2075fa9e4066Sahrens 2076fa9e4066Sahrens /* 2077ea8dc4b6Seschrock * Find any device that's done replacing, so we can detach it. 2078fa9e4066Sahrens */ 2079ea8dc4b6Seschrock static vdev_t * 2080ea8dc4b6Seschrock spa_vdev_replace_done_hunt(vdev_t *vd) 2081fa9e4066Sahrens { 2082ea8dc4b6Seschrock vdev_t *newvd, *oldvd; 2083fa9e4066Sahrens int c; 2084fa9e4066Sahrens 2085ea8dc4b6Seschrock for (c = 0; c < vd->vdev_children; c++) { 2086ea8dc4b6Seschrock oldvd = spa_vdev_replace_done_hunt(vd->vdev_child[c]); 2087ea8dc4b6Seschrock if (oldvd != NULL) 2088ea8dc4b6Seschrock return (oldvd); 2089ea8dc4b6Seschrock } 2090fa9e4066Sahrens 2091fa9e4066Sahrens if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 2092ea8dc4b6Seschrock oldvd = vd->vdev_child[0]; 2093ea8dc4b6Seschrock newvd = vd->vdev_child[1]; 2094ea8dc4b6Seschrock 2095ea8dc4b6Seschrock mutex_enter(&newvd->vdev_dtl_lock); 2096ea8dc4b6Seschrock if (newvd->vdev_dtl_map.sm_space == 0 && 2097ea8dc4b6Seschrock newvd->vdev_dtl_scrub.sm_space == 0) { 2098ea8dc4b6Seschrock mutex_exit(&newvd->vdev_dtl_lock); 2099ea8dc4b6Seschrock return (oldvd); 2100fa9e4066Sahrens } 2101ea8dc4b6Seschrock mutex_exit(&newvd->vdev_dtl_lock); 2102fa9e4066Sahrens } 2103ea8dc4b6Seschrock 2104ea8dc4b6Seschrock return (NULL); 2105fa9e4066Sahrens } 2106fa9e4066Sahrens 2107ea8dc4b6Seschrock static void 2108fa9e4066Sahrens spa_vdev_replace_done(spa_t *spa) 2109fa9e4066Sahrens { 2110ea8dc4b6Seschrock vdev_t *vd; 211199653d4eSeschrock vdev_t *pvd; 2112ea8dc4b6Seschrock uint64_t guid; 211399653d4eSeschrock uint64_t pguid = 0; 2114ea8dc4b6Seschrock 2115ea8dc4b6Seschrock spa_config_enter(spa, RW_READER, FTAG); 2116ea8dc4b6Seschrock 2117ea8dc4b6Seschrock while ((vd = spa_vdev_replace_done_hunt(spa->spa_root_vdev)) != NULL) { 2118ea8dc4b6Seschrock guid = vd->vdev_guid; 211999653d4eSeschrock /* 212099653d4eSeschrock * If we have just finished replacing a hot spared device, then 212199653d4eSeschrock * we need to detach the parent's first child (the original hot 212299653d4eSeschrock * spare) as well. 212399653d4eSeschrock */ 212499653d4eSeschrock pvd = vd->vdev_parent; 212599653d4eSeschrock if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops && 212699653d4eSeschrock pvd->vdev_id == 0) { 212799653d4eSeschrock ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 212899653d4eSeschrock ASSERT(pvd->vdev_parent->vdev_children == 2); 212999653d4eSeschrock pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid; 213099653d4eSeschrock } 2131ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 2132ea8dc4b6Seschrock if (spa_vdev_detach(spa, guid, B_TRUE) != 0) 2133ea8dc4b6Seschrock return; 213499653d4eSeschrock if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0) 213599653d4eSeschrock return; 2136ea8dc4b6Seschrock spa_config_enter(spa, RW_READER, FTAG); 2137fa9e4066Sahrens } 2138fa9e4066Sahrens 2139ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 2140fa9e4066Sahrens } 2141fa9e4066Sahrens 2142c67d9675Seschrock /* 2143c67d9675Seschrock * Update the stored path for this vdev. Dirty the vdev configuration, relying 2144c67d9675Seschrock * on spa_vdev_enter/exit() to synchronize the labels and cache. 2145c67d9675Seschrock */ 2146c67d9675Seschrock int 2147c67d9675Seschrock spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 2148c67d9675Seschrock { 2149c67d9675Seschrock vdev_t *rvd, *vd; 2150c67d9675Seschrock uint64_t txg; 2151c67d9675Seschrock 2152c67d9675Seschrock rvd = spa->spa_root_vdev; 2153c67d9675Seschrock 2154c67d9675Seschrock txg = spa_vdev_enter(spa); 2155c67d9675Seschrock 215699653d4eSeschrock if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 215799653d4eSeschrock /* 215899653d4eSeschrock * Determine if this is a reference to a hot spare. In that 215999653d4eSeschrock * case, update the path as stored in the spare list. 216099653d4eSeschrock */ 216199653d4eSeschrock nvlist_t **spares; 216299653d4eSeschrock uint_t i, nspares; 216399653d4eSeschrock if (spa->spa_sparelist != NULL) { 216499653d4eSeschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 216599653d4eSeschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 216699653d4eSeschrock for (i = 0; i < nspares; i++) { 216799653d4eSeschrock uint64_t theguid; 216899653d4eSeschrock VERIFY(nvlist_lookup_uint64(spares[i], 216999653d4eSeschrock ZPOOL_CONFIG_GUID, &theguid) == 0); 217099653d4eSeschrock if (theguid == guid) 217199653d4eSeschrock break; 217299653d4eSeschrock } 217399653d4eSeschrock 217499653d4eSeschrock if (i == nspares) 217599653d4eSeschrock return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 217699653d4eSeschrock 217799653d4eSeschrock VERIFY(nvlist_add_string(spares[i], 217899653d4eSeschrock ZPOOL_CONFIG_PATH, newpath) == 0); 217999653d4eSeschrock spa_load_spares(spa); 218099653d4eSeschrock spa->spa_sync_spares = B_TRUE; 218199653d4eSeschrock return (spa_vdev_exit(spa, NULL, txg, 0)); 218299653d4eSeschrock } else { 218399653d4eSeschrock return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 218499653d4eSeschrock } 218599653d4eSeschrock } 2186c67d9675Seschrock 21870e34b6a7Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 21880e34b6a7Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 21890e34b6a7Sbonwick 2190c67d9675Seschrock spa_strfree(vd->vdev_path); 2191c67d9675Seschrock vd->vdev_path = spa_strdup(newpath); 2192c67d9675Seschrock 2193c67d9675Seschrock vdev_config_dirty(vd->vdev_top); 2194c67d9675Seschrock 2195c67d9675Seschrock return (spa_vdev_exit(spa, NULL, txg, 0)); 2196c67d9675Seschrock } 2197c67d9675Seschrock 2198fa9e4066Sahrens /* 2199fa9e4066Sahrens * ========================================================================== 2200fa9e4066Sahrens * SPA Scrubbing 2201fa9e4066Sahrens * ========================================================================== 2202fa9e4066Sahrens */ 2203fa9e4066Sahrens 2204fa9e4066Sahrens static void 2205fa9e4066Sahrens spa_scrub_io_done(zio_t *zio) 2206fa9e4066Sahrens { 2207fa9e4066Sahrens spa_t *spa = zio->io_spa; 2208fa9e4066Sahrens 2209ad23a2dbSjohansen zio_data_buf_free(zio->io_data, zio->io_size); 2210fa9e4066Sahrens 2211fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2212ea8dc4b6Seschrock if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 221344cd46caSbillm vdev_t *vd = zio->io_vd ? zio->io_vd : spa->spa_root_vdev; 2214ea8dc4b6Seschrock spa->spa_scrub_errors++; 2215fa9e4066Sahrens mutex_enter(&vd->vdev_stat_lock); 2216fa9e4066Sahrens vd->vdev_stat.vs_scrub_errors++; 2217fa9e4066Sahrens mutex_exit(&vd->vdev_stat_lock); 2218fa9e4066Sahrens } 221905b2b3b8Smishra 222005b2b3b8Smishra if (--spa->spa_scrub_inflight < spa->spa_scrub_maxinflight) 2221ea8dc4b6Seschrock cv_broadcast(&spa->spa_scrub_io_cv); 222205b2b3b8Smishra 222305b2b3b8Smishra ASSERT(spa->spa_scrub_inflight >= 0); 222405b2b3b8Smishra 2225ea8dc4b6Seschrock mutex_exit(&spa->spa_scrub_lock); 2226fa9e4066Sahrens } 2227fa9e4066Sahrens 2228fa9e4066Sahrens static void 2229ea8dc4b6Seschrock spa_scrub_io_start(spa_t *spa, blkptr_t *bp, int priority, int flags, 2230ea8dc4b6Seschrock zbookmark_t *zb) 2231fa9e4066Sahrens { 2232fa9e4066Sahrens size_t size = BP_GET_LSIZE(bp); 223305b2b3b8Smishra void *data; 2234fa9e4066Sahrens 2235fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 223605b2b3b8Smishra /* 223705b2b3b8Smishra * Do not give too much work to vdev(s). 223805b2b3b8Smishra */ 223905b2b3b8Smishra while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight) { 224005b2b3b8Smishra cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 224105b2b3b8Smishra } 2242fa9e4066Sahrens spa->spa_scrub_inflight++; 2243fa9e4066Sahrens mutex_exit(&spa->spa_scrub_lock); 2244fa9e4066Sahrens 224505b2b3b8Smishra data = zio_data_buf_alloc(size); 224605b2b3b8Smishra 2247ea8dc4b6Seschrock if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET) 2248ea8dc4b6Seschrock flags |= ZIO_FLAG_SPECULATIVE; /* intent log block */ 2249ea8dc4b6Seschrock 2250d80c45e0Sbonwick flags |= ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL; 2251ea8dc4b6Seschrock 2252fa9e4066Sahrens zio_nowait(zio_read(NULL, spa, bp, data, size, 2253ea8dc4b6Seschrock spa_scrub_io_done, NULL, priority, flags, zb)); 2254fa9e4066Sahrens } 2255fa9e4066Sahrens 2256fa9e4066Sahrens /* ARGSUSED */ 2257fa9e4066Sahrens static int 2258fa9e4066Sahrens spa_scrub_cb(traverse_blk_cache_t *bc, spa_t *spa, void *a) 2259fa9e4066Sahrens { 2260fa9e4066Sahrens blkptr_t *bp = &bc->bc_blkptr; 226144cd46caSbillm vdev_t *vd = spa->spa_root_vdev; 226244cd46caSbillm dva_t *dva = bp->blk_dva; 226344cd46caSbillm int needs_resilver = B_FALSE; 226444cd46caSbillm int d; 2265fa9e4066Sahrens 226644cd46caSbillm if (bc->bc_errno) { 2267fa9e4066Sahrens /* 2268fa9e4066Sahrens * We can't scrub this block, but we can continue to scrub 2269fa9e4066Sahrens * the rest of the pool. Note the error and move along. 2270fa9e4066Sahrens */ 2271fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2272fa9e4066Sahrens spa->spa_scrub_errors++; 2273fa9e4066Sahrens mutex_exit(&spa->spa_scrub_lock); 2274fa9e4066Sahrens 227544cd46caSbillm mutex_enter(&vd->vdev_stat_lock); 227644cd46caSbillm vd->vdev_stat.vs_scrub_errors++; 227744cd46caSbillm mutex_exit(&vd->vdev_stat_lock); 2278fa9e4066Sahrens 2279fa9e4066Sahrens return (ERESTART); 2280fa9e4066Sahrens } 2281fa9e4066Sahrens 2282fa9e4066Sahrens ASSERT(bp->blk_birth < spa->spa_scrub_maxtxg); 2283fa9e4066Sahrens 228444cd46caSbillm for (d = 0; d < BP_GET_NDVAS(bp); d++) { 228544cd46caSbillm vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d])); 2286fa9e4066Sahrens 228744cd46caSbillm ASSERT(vd != NULL); 228844cd46caSbillm 228944cd46caSbillm /* 229044cd46caSbillm * Keep track of how much data we've examined so that 229144cd46caSbillm * zpool(1M) status can make useful progress reports. 229244cd46caSbillm */ 229344cd46caSbillm mutex_enter(&vd->vdev_stat_lock); 229444cd46caSbillm vd->vdev_stat.vs_scrub_examined += DVA_GET_ASIZE(&dva[d]); 229544cd46caSbillm mutex_exit(&vd->vdev_stat_lock); 229644cd46caSbillm 229744cd46caSbillm if (spa->spa_scrub_type == POOL_SCRUB_RESILVER) { 229844cd46caSbillm if (DVA_GET_GANG(&dva[d])) { 229944cd46caSbillm /* 230044cd46caSbillm * Gang members may be spread across multiple 230144cd46caSbillm * vdevs, so the best we can do is look at the 230244cd46caSbillm * pool-wide DTL. 230344cd46caSbillm * XXX -- it would be better to change our 230444cd46caSbillm * allocation policy to ensure that this can't 230544cd46caSbillm * happen. 230644cd46caSbillm */ 230744cd46caSbillm vd = spa->spa_root_vdev; 230844cd46caSbillm } 230944cd46caSbillm if (vdev_dtl_contains(&vd->vdev_dtl_map, 231044cd46caSbillm bp->blk_birth, 1)) 231144cd46caSbillm needs_resilver = B_TRUE; 2312fa9e4066Sahrens } 231344cd46caSbillm } 231444cd46caSbillm 231544cd46caSbillm if (spa->spa_scrub_type == POOL_SCRUB_EVERYTHING) 2316fa9e4066Sahrens spa_scrub_io_start(spa, bp, ZIO_PRIORITY_SCRUB, 2317ea8dc4b6Seschrock ZIO_FLAG_SCRUB, &bc->bc_bookmark); 231844cd46caSbillm else if (needs_resilver) 231944cd46caSbillm spa_scrub_io_start(spa, bp, ZIO_PRIORITY_RESILVER, 232044cd46caSbillm ZIO_FLAG_RESILVER, &bc->bc_bookmark); 2321fa9e4066Sahrens 2322fa9e4066Sahrens return (0); 2323fa9e4066Sahrens } 2324fa9e4066Sahrens 2325fa9e4066Sahrens static void 2326fa9e4066Sahrens spa_scrub_thread(spa_t *spa) 2327fa9e4066Sahrens { 2328fa9e4066Sahrens callb_cpr_t cprinfo; 2329fa9e4066Sahrens traverse_handle_t *th = spa->spa_scrub_th; 2330fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 2331fa9e4066Sahrens pool_scrub_type_t scrub_type = spa->spa_scrub_type; 2332fa9e4066Sahrens int error = 0; 2333fa9e4066Sahrens boolean_t complete; 2334fa9e4066Sahrens 2335fa9e4066Sahrens CALLB_CPR_INIT(&cprinfo, &spa->spa_scrub_lock, callb_generic_cpr, FTAG); 2336fa9e4066Sahrens 2337f0aa80d4Sbonwick /* 2338f0aa80d4Sbonwick * If we're restarting due to a snapshot create/delete, 2339f0aa80d4Sbonwick * wait for that to complete. 2340f0aa80d4Sbonwick */ 2341f0aa80d4Sbonwick txg_wait_synced(spa_get_dsl(spa), 0); 2342f0aa80d4Sbonwick 2343ea8dc4b6Seschrock dprintf("start %s mintxg=%llu maxtxg=%llu\n", 2344ea8dc4b6Seschrock scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 2345ea8dc4b6Seschrock spa->spa_scrub_mintxg, spa->spa_scrub_maxtxg); 2346ea8dc4b6Seschrock 2347ea8dc4b6Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 2348ea8dc4b6Seschrock vdev_reopen(rvd); /* purge all vdev caches */ 2349fa9e4066Sahrens vdev_config_dirty(rvd); /* rewrite all disk labels */ 2350fa9e4066Sahrens vdev_scrub_stat_update(rvd, scrub_type, B_FALSE); 2351ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 2352fa9e4066Sahrens 2353fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2354fa9e4066Sahrens spa->spa_scrub_errors = 0; 2355fa9e4066Sahrens spa->spa_scrub_active = 1; 2356ea8dc4b6Seschrock ASSERT(spa->spa_scrub_inflight == 0); 2357fa9e4066Sahrens 2358fa9e4066Sahrens while (!spa->spa_scrub_stop) { 2359fa9e4066Sahrens CALLB_CPR_SAFE_BEGIN(&cprinfo); 2360ea8dc4b6Seschrock while (spa->spa_scrub_suspended) { 2361fa9e4066Sahrens spa->spa_scrub_active = 0; 2362fa9e4066Sahrens cv_broadcast(&spa->spa_scrub_cv); 2363fa9e4066Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2364fa9e4066Sahrens spa->spa_scrub_active = 1; 2365fa9e4066Sahrens } 2366fa9e4066Sahrens CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_scrub_lock); 2367fa9e4066Sahrens 2368fa9e4066Sahrens if (spa->spa_scrub_restart_txg != 0) 2369fa9e4066Sahrens break; 2370fa9e4066Sahrens 2371fa9e4066Sahrens mutex_exit(&spa->spa_scrub_lock); 2372fa9e4066Sahrens error = traverse_more(th); 2373fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2374fa9e4066Sahrens if (error != EAGAIN) 2375fa9e4066Sahrens break; 2376fa9e4066Sahrens } 2377fa9e4066Sahrens 2378fa9e4066Sahrens while (spa->spa_scrub_inflight) 2379fa9e4066Sahrens cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2380fa9e4066Sahrens 23815dabedeeSbonwick spa->spa_scrub_active = 0; 23825dabedeeSbonwick cv_broadcast(&spa->spa_scrub_cv); 23835dabedeeSbonwick 23845dabedeeSbonwick mutex_exit(&spa->spa_scrub_lock); 23855dabedeeSbonwick 23865dabedeeSbonwick spa_config_enter(spa, RW_WRITER, FTAG); 23875dabedeeSbonwick 23885dabedeeSbonwick mutex_enter(&spa->spa_scrub_lock); 23895dabedeeSbonwick 23905dabedeeSbonwick /* 23915dabedeeSbonwick * Note: we check spa_scrub_restart_txg under both spa_scrub_lock 23925dabedeeSbonwick * AND the spa config lock to synchronize with any config changes 23935dabedeeSbonwick * that revise the DTLs under spa_vdev_enter() / spa_vdev_exit(). 23945dabedeeSbonwick */ 2395fa9e4066Sahrens if (spa->spa_scrub_restart_txg != 0) 2396fa9e4066Sahrens error = ERESTART; 2397fa9e4066Sahrens 2398ea8dc4b6Seschrock if (spa->spa_scrub_stop) 2399ea8dc4b6Seschrock error = EINTR; 2400ea8dc4b6Seschrock 2401fa9e4066Sahrens /* 2402ea8dc4b6Seschrock * Even if there were uncorrectable errors, we consider the scrub 2403ea8dc4b6Seschrock * completed. The downside is that if there is a transient error during 2404ea8dc4b6Seschrock * a resilver, we won't resilver the data properly to the target. But 2405ea8dc4b6Seschrock * if the damage is permanent (more likely) we will resilver forever, 2406ea8dc4b6Seschrock * which isn't really acceptable. Since there is enough information for 2407ea8dc4b6Seschrock * the user to know what has failed and why, this seems like a more 2408ea8dc4b6Seschrock * tractable approach. 2409fa9e4066Sahrens */ 2410ea8dc4b6Seschrock complete = (error == 0); 2411fa9e4066Sahrens 2412ea8dc4b6Seschrock dprintf("end %s to maxtxg=%llu %s, traverse=%d, %llu errors, stop=%u\n", 2413ea8dc4b6Seschrock scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 2414fa9e4066Sahrens spa->spa_scrub_maxtxg, complete ? "done" : "FAILED", 2415fa9e4066Sahrens error, spa->spa_scrub_errors, spa->spa_scrub_stop); 2416fa9e4066Sahrens 2417fa9e4066Sahrens mutex_exit(&spa->spa_scrub_lock); 2418fa9e4066Sahrens 2419fa9e4066Sahrens /* 2420fa9e4066Sahrens * If the scrub/resilver completed, update all DTLs to reflect this. 2421fa9e4066Sahrens * Whether it succeeded or not, vacate all temporary scrub DTLs. 2422fa9e4066Sahrens */ 2423fa9e4066Sahrens vdev_dtl_reassess(rvd, spa_last_synced_txg(spa) + 1, 2424fa9e4066Sahrens complete ? spa->spa_scrub_maxtxg : 0, B_TRUE); 2425fa9e4066Sahrens vdev_scrub_stat_update(rvd, POOL_SCRUB_NONE, complete); 2426ea8dc4b6Seschrock spa_errlog_rotate(spa); 24275dabedeeSbonwick 2428ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 2429fa9e4066Sahrens 2430fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2431fa9e4066Sahrens 2432ea8dc4b6Seschrock /* 2433ea8dc4b6Seschrock * We may have finished replacing a device. 2434ea8dc4b6Seschrock * Let the async thread assess this and handle the detach. 2435ea8dc4b6Seschrock */ 2436ea8dc4b6Seschrock spa_async_request(spa, SPA_ASYNC_REPLACE_DONE); 2437fa9e4066Sahrens 2438fa9e4066Sahrens /* 2439fa9e4066Sahrens * If we were told to restart, our final act is to start a new scrub. 2440fa9e4066Sahrens */ 2441fa9e4066Sahrens if (error == ERESTART) 2442ea8dc4b6Seschrock spa_async_request(spa, scrub_type == POOL_SCRUB_RESILVER ? 2443ea8dc4b6Seschrock SPA_ASYNC_RESILVER : SPA_ASYNC_SCRUB); 2444fa9e4066Sahrens 2445ea8dc4b6Seschrock spa->spa_scrub_type = POOL_SCRUB_NONE; 2446ea8dc4b6Seschrock spa->spa_scrub_active = 0; 2447ea8dc4b6Seschrock spa->spa_scrub_thread = NULL; 2448ea8dc4b6Seschrock cv_broadcast(&spa->spa_scrub_cv); 2449fa9e4066Sahrens CALLB_CPR_EXIT(&cprinfo); /* drops &spa->spa_scrub_lock */ 2450fa9e4066Sahrens thread_exit(); 2451fa9e4066Sahrens } 2452fa9e4066Sahrens 2453fa9e4066Sahrens void 2454fa9e4066Sahrens spa_scrub_suspend(spa_t *spa) 2455fa9e4066Sahrens { 2456fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2457ea8dc4b6Seschrock spa->spa_scrub_suspended++; 2458fa9e4066Sahrens while (spa->spa_scrub_active) { 2459fa9e4066Sahrens cv_broadcast(&spa->spa_scrub_cv); 2460fa9e4066Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2461fa9e4066Sahrens } 2462fa9e4066Sahrens while (spa->spa_scrub_inflight) 2463fa9e4066Sahrens cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2464fa9e4066Sahrens mutex_exit(&spa->spa_scrub_lock); 2465fa9e4066Sahrens } 2466fa9e4066Sahrens 2467fa9e4066Sahrens void 2468fa9e4066Sahrens spa_scrub_resume(spa_t *spa) 2469fa9e4066Sahrens { 2470fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2471ea8dc4b6Seschrock ASSERT(spa->spa_scrub_suspended != 0); 2472ea8dc4b6Seschrock if (--spa->spa_scrub_suspended == 0) 2473fa9e4066Sahrens cv_broadcast(&spa->spa_scrub_cv); 2474fa9e4066Sahrens mutex_exit(&spa->spa_scrub_lock); 2475fa9e4066Sahrens } 2476fa9e4066Sahrens 2477fa9e4066Sahrens void 2478fa9e4066Sahrens spa_scrub_restart(spa_t *spa, uint64_t txg) 2479fa9e4066Sahrens { 2480fa9e4066Sahrens /* 2481fa9e4066Sahrens * Something happened (e.g. snapshot create/delete) that means 2482fa9e4066Sahrens * we must restart any in-progress scrubs. The itinerary will 2483fa9e4066Sahrens * fix this properly. 2484fa9e4066Sahrens */ 2485fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2486fa9e4066Sahrens spa->spa_scrub_restart_txg = txg; 2487fa9e4066Sahrens mutex_exit(&spa->spa_scrub_lock); 2488fa9e4066Sahrens } 2489fa9e4066Sahrens 2490ea8dc4b6Seschrock int 2491ea8dc4b6Seschrock spa_scrub(spa_t *spa, pool_scrub_type_t type, boolean_t force) 2492fa9e4066Sahrens { 2493fa9e4066Sahrens space_seg_t *ss; 2494fa9e4066Sahrens uint64_t mintxg, maxtxg; 2495fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 2496fa9e4066Sahrens 2497fa9e4066Sahrens if ((uint_t)type >= POOL_SCRUB_TYPES) 2498fa9e4066Sahrens return (ENOTSUP); 2499fa9e4066Sahrens 2500ea8dc4b6Seschrock mutex_enter(&spa->spa_scrub_lock); 2501ea8dc4b6Seschrock 2502fa9e4066Sahrens /* 2503fa9e4066Sahrens * If there's a scrub or resilver already in progress, stop it. 2504fa9e4066Sahrens */ 2505fa9e4066Sahrens while (spa->spa_scrub_thread != NULL) { 2506fa9e4066Sahrens /* 2507fa9e4066Sahrens * Don't stop a resilver unless forced. 2508fa9e4066Sahrens */ 2509ea8dc4b6Seschrock if (spa->spa_scrub_type == POOL_SCRUB_RESILVER && !force) { 2510ea8dc4b6Seschrock mutex_exit(&spa->spa_scrub_lock); 2511fa9e4066Sahrens return (EBUSY); 2512ea8dc4b6Seschrock } 2513fa9e4066Sahrens spa->spa_scrub_stop = 1; 2514fa9e4066Sahrens cv_broadcast(&spa->spa_scrub_cv); 2515fa9e4066Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2516fa9e4066Sahrens } 2517fa9e4066Sahrens 2518fa9e4066Sahrens /* 2519fa9e4066Sahrens * Terminate the previous traverse. 2520fa9e4066Sahrens */ 2521fa9e4066Sahrens if (spa->spa_scrub_th != NULL) { 2522fa9e4066Sahrens traverse_fini(spa->spa_scrub_th); 2523fa9e4066Sahrens spa->spa_scrub_th = NULL; 2524fa9e4066Sahrens } 2525fa9e4066Sahrens 2526ea8dc4b6Seschrock if (rvd == NULL) { 2527ea8dc4b6Seschrock ASSERT(spa->spa_scrub_stop == 0); 2528ea8dc4b6Seschrock ASSERT(spa->spa_scrub_type == type); 2529ea8dc4b6Seschrock ASSERT(spa->spa_scrub_restart_txg == 0); 2530ea8dc4b6Seschrock mutex_exit(&spa->spa_scrub_lock); 2531ea8dc4b6Seschrock return (0); 2532ea8dc4b6Seschrock } 2533fa9e4066Sahrens 2534fa9e4066Sahrens mintxg = TXG_INITIAL - 1; 2535fa9e4066Sahrens maxtxg = spa_last_synced_txg(spa) + 1; 2536fa9e4066Sahrens 2537ea8dc4b6Seschrock mutex_enter(&rvd->vdev_dtl_lock); 2538fa9e4066Sahrens 2539ea8dc4b6Seschrock if (rvd->vdev_dtl_map.sm_space == 0) { 2540ea8dc4b6Seschrock /* 2541ea8dc4b6Seschrock * The pool-wide DTL is empty. 2542ecc2d604Sbonwick * If this is a resilver, there's nothing to do except 2543ecc2d604Sbonwick * check whether any in-progress replacements have completed. 2544ea8dc4b6Seschrock */ 2545ecc2d604Sbonwick if (type == POOL_SCRUB_RESILVER) { 2546ea8dc4b6Seschrock type = POOL_SCRUB_NONE; 2547ecc2d604Sbonwick spa_async_request(spa, SPA_ASYNC_REPLACE_DONE); 2548ecc2d604Sbonwick } 2549ea8dc4b6Seschrock } else { 2550ea8dc4b6Seschrock /* 2551ea8dc4b6Seschrock * The pool-wide DTL is non-empty. 2552ea8dc4b6Seschrock * If this is a normal scrub, upgrade to a resilver instead. 2553ea8dc4b6Seschrock */ 2554ea8dc4b6Seschrock if (type == POOL_SCRUB_EVERYTHING) 2555ea8dc4b6Seschrock type = POOL_SCRUB_RESILVER; 2556ea8dc4b6Seschrock } 2557fa9e4066Sahrens 2558ea8dc4b6Seschrock if (type == POOL_SCRUB_RESILVER) { 2559fa9e4066Sahrens /* 2560fa9e4066Sahrens * Determine the resilvering boundaries. 2561fa9e4066Sahrens * 2562fa9e4066Sahrens * Note: (mintxg, maxtxg) is an open interval, 2563fa9e4066Sahrens * i.e. mintxg and maxtxg themselves are not included. 2564fa9e4066Sahrens * 2565fa9e4066Sahrens * Note: for maxtxg, we MIN with spa_last_synced_txg(spa) + 1 2566fa9e4066Sahrens * so we don't claim to resilver a txg that's still changing. 2567fa9e4066Sahrens */ 2568fa9e4066Sahrens ss = avl_first(&rvd->vdev_dtl_map.sm_root); 2569ea8dc4b6Seschrock mintxg = ss->ss_start - 1; 2570fa9e4066Sahrens ss = avl_last(&rvd->vdev_dtl_map.sm_root); 2571ea8dc4b6Seschrock maxtxg = MIN(ss->ss_end, maxtxg); 2572fa9e4066Sahrens } 2573fa9e4066Sahrens 2574ea8dc4b6Seschrock mutex_exit(&rvd->vdev_dtl_lock); 2575ea8dc4b6Seschrock 2576ea8dc4b6Seschrock spa->spa_scrub_stop = 0; 2577ea8dc4b6Seschrock spa->spa_scrub_type = type; 2578ea8dc4b6Seschrock spa->spa_scrub_restart_txg = 0; 2579ea8dc4b6Seschrock 2580ea8dc4b6Seschrock if (type != POOL_SCRUB_NONE) { 2581ea8dc4b6Seschrock spa->spa_scrub_mintxg = mintxg; 2582fa9e4066Sahrens spa->spa_scrub_maxtxg = maxtxg; 2583fa9e4066Sahrens spa->spa_scrub_th = traverse_init(spa, spa_scrub_cb, NULL, 25840373e76bSbonwick ADVANCE_PRE | ADVANCE_PRUNE | ADVANCE_ZIL, 25850373e76bSbonwick ZIO_FLAG_CANFAIL); 2586fa9e4066Sahrens traverse_add_pool(spa->spa_scrub_th, mintxg, maxtxg); 2587fa9e4066Sahrens spa->spa_scrub_thread = thread_create(NULL, 0, 2588fa9e4066Sahrens spa_scrub_thread, spa, 0, &p0, TS_RUN, minclsyspri); 2589fa9e4066Sahrens } 2590fa9e4066Sahrens 2591ea8dc4b6Seschrock mutex_exit(&spa->spa_scrub_lock); 2592ea8dc4b6Seschrock 2593fa9e4066Sahrens return (0); 2594fa9e4066Sahrens } 2595fa9e4066Sahrens 2596ea8dc4b6Seschrock /* 2597ea8dc4b6Seschrock * ========================================================================== 2598ea8dc4b6Seschrock * SPA async task processing 2599ea8dc4b6Seschrock * ========================================================================== 2600ea8dc4b6Seschrock */ 2601ea8dc4b6Seschrock 2602ea8dc4b6Seschrock static void 2603ea8dc4b6Seschrock spa_async_reopen(spa_t *spa) 2604fa9e4066Sahrens { 2605ea8dc4b6Seschrock vdev_t *rvd = spa->spa_root_vdev; 2606ea8dc4b6Seschrock vdev_t *tvd; 2607ea8dc4b6Seschrock int c; 2608fa9e4066Sahrens 2609ea8dc4b6Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 2610ea8dc4b6Seschrock 2611ea8dc4b6Seschrock for (c = 0; c < rvd->vdev_children; c++) { 2612ea8dc4b6Seschrock tvd = rvd->vdev_child[c]; 2613ea8dc4b6Seschrock if (tvd->vdev_reopen_wanted) { 2614ea8dc4b6Seschrock tvd->vdev_reopen_wanted = 0; 2615ea8dc4b6Seschrock vdev_reopen(tvd); 2616ea8dc4b6Seschrock } 2617ea8dc4b6Seschrock } 2618ea8dc4b6Seschrock 2619ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 2620ea8dc4b6Seschrock } 2621fa9e4066Sahrens 2622ea8dc4b6Seschrock static void 2623ea8dc4b6Seschrock spa_async_thread(spa_t *spa) 2624ea8dc4b6Seschrock { 2625ea8dc4b6Seschrock int tasks; 2626ea8dc4b6Seschrock 2627ea8dc4b6Seschrock ASSERT(spa->spa_sync_on); 2628ea8dc4b6Seschrock 2629ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 2630ea8dc4b6Seschrock tasks = spa->spa_async_tasks; 2631ea8dc4b6Seschrock spa->spa_async_tasks = 0; 2632ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 2633ea8dc4b6Seschrock 26340373e76bSbonwick /* 26350373e76bSbonwick * See if the config needs to be updated. 26360373e76bSbonwick */ 26370373e76bSbonwick if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 26380373e76bSbonwick mutex_enter(&spa_namespace_lock); 26390373e76bSbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 26400373e76bSbonwick mutex_exit(&spa_namespace_lock); 26410373e76bSbonwick } 26420373e76bSbonwick 2643ea8dc4b6Seschrock /* 2644ea8dc4b6Seschrock * See if any devices need to be reopened. 2645ea8dc4b6Seschrock */ 2646ea8dc4b6Seschrock if (tasks & SPA_ASYNC_REOPEN) 2647ea8dc4b6Seschrock spa_async_reopen(spa); 2648ea8dc4b6Seschrock 2649ea8dc4b6Seschrock /* 2650ea8dc4b6Seschrock * If any devices are done replacing, detach them. 2651ea8dc4b6Seschrock */ 2652ea8dc4b6Seschrock if (tasks & SPA_ASYNC_REPLACE_DONE) 2653fa9e4066Sahrens spa_vdev_replace_done(spa); 2654fa9e4066Sahrens 2655ea8dc4b6Seschrock /* 2656ea8dc4b6Seschrock * Kick off a scrub. 2657ea8dc4b6Seschrock */ 2658ea8dc4b6Seschrock if (tasks & SPA_ASYNC_SCRUB) 2659ea8dc4b6Seschrock VERIFY(spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_TRUE) == 0); 2660ea8dc4b6Seschrock 2661ea8dc4b6Seschrock /* 2662ea8dc4b6Seschrock * Kick off a resilver. 2663ea8dc4b6Seschrock */ 2664ea8dc4b6Seschrock if (tasks & SPA_ASYNC_RESILVER) 2665ea8dc4b6Seschrock VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 2666ea8dc4b6Seschrock 2667ea8dc4b6Seschrock /* 2668ea8dc4b6Seschrock * Let the world know that we're done. 2669ea8dc4b6Seschrock */ 2670ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 2671ea8dc4b6Seschrock spa->spa_async_thread = NULL; 2672ea8dc4b6Seschrock cv_broadcast(&spa->spa_async_cv); 2673ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 2674ea8dc4b6Seschrock thread_exit(); 2675ea8dc4b6Seschrock } 2676ea8dc4b6Seschrock 2677ea8dc4b6Seschrock void 2678ea8dc4b6Seschrock spa_async_suspend(spa_t *spa) 2679ea8dc4b6Seschrock { 2680ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 2681ea8dc4b6Seschrock spa->spa_async_suspended++; 2682ea8dc4b6Seschrock while (spa->spa_async_thread != NULL) 2683ea8dc4b6Seschrock cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 2684ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 2685ea8dc4b6Seschrock } 2686ea8dc4b6Seschrock 2687ea8dc4b6Seschrock void 2688ea8dc4b6Seschrock spa_async_resume(spa_t *spa) 2689ea8dc4b6Seschrock { 2690ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 2691ea8dc4b6Seschrock ASSERT(spa->spa_async_suspended != 0); 2692ea8dc4b6Seschrock spa->spa_async_suspended--; 2693ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 2694ea8dc4b6Seschrock } 2695ea8dc4b6Seschrock 2696ea8dc4b6Seschrock static void 2697ea8dc4b6Seschrock spa_async_dispatch(spa_t *spa) 2698ea8dc4b6Seschrock { 2699ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 2700ea8dc4b6Seschrock if (spa->spa_async_tasks && !spa->spa_async_suspended && 27010373e76bSbonwick spa->spa_async_thread == NULL && 27020373e76bSbonwick rootdir != NULL && !vn_is_readonly(rootdir)) 2703ea8dc4b6Seschrock spa->spa_async_thread = thread_create(NULL, 0, 2704ea8dc4b6Seschrock spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 2705ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 2706ea8dc4b6Seschrock } 2707ea8dc4b6Seschrock 2708ea8dc4b6Seschrock void 2709ea8dc4b6Seschrock spa_async_request(spa_t *spa, int task) 2710ea8dc4b6Seschrock { 2711ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 2712ea8dc4b6Seschrock spa->spa_async_tasks |= task; 2713ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 2714fa9e4066Sahrens } 2715fa9e4066Sahrens 2716fa9e4066Sahrens /* 2717fa9e4066Sahrens * ========================================================================== 2718fa9e4066Sahrens * SPA syncing routines 2719fa9e4066Sahrens * ========================================================================== 2720fa9e4066Sahrens */ 2721fa9e4066Sahrens 2722fa9e4066Sahrens static void 2723fa9e4066Sahrens spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 2724fa9e4066Sahrens { 2725fa9e4066Sahrens bplist_t *bpl = &spa->spa_sync_bplist; 2726fa9e4066Sahrens dmu_tx_t *tx; 2727fa9e4066Sahrens blkptr_t blk; 2728fa9e4066Sahrens uint64_t itor = 0; 2729fa9e4066Sahrens zio_t *zio; 2730fa9e4066Sahrens int error; 2731fa9e4066Sahrens uint8_t c = 1; 2732fa9e4066Sahrens 2733fa9e4066Sahrens zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD); 2734fa9e4066Sahrens 2735fa9e4066Sahrens while (bplist_iterate(bpl, &itor, &blk) == 0) 2736fa9e4066Sahrens zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL)); 2737fa9e4066Sahrens 2738fa9e4066Sahrens error = zio_wait(zio); 2739fa9e4066Sahrens ASSERT3U(error, ==, 0); 2740fa9e4066Sahrens 2741fa9e4066Sahrens tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2742fa9e4066Sahrens bplist_vacate(bpl, tx); 2743fa9e4066Sahrens 2744fa9e4066Sahrens /* 2745fa9e4066Sahrens * Pre-dirty the first block so we sync to convergence faster. 2746fa9e4066Sahrens * (Usually only the first block is needed.) 2747fa9e4066Sahrens */ 2748fa9e4066Sahrens dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 2749fa9e4066Sahrens dmu_tx_commit(tx); 2750fa9e4066Sahrens } 2751fa9e4066Sahrens 2752fa9e4066Sahrens static void 275399653d4eSeschrock spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 2754fa9e4066Sahrens { 2755fa9e4066Sahrens char *packed = NULL; 2756fa9e4066Sahrens size_t nvsize = 0; 2757fa9e4066Sahrens dmu_buf_t *db; 2758fa9e4066Sahrens 275999653d4eSeschrock VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 2760fa9e4066Sahrens 2761fa9e4066Sahrens packed = kmem_alloc(nvsize, KM_SLEEP); 2762fa9e4066Sahrens 276399653d4eSeschrock VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 2764ea8dc4b6Seschrock KM_SLEEP) == 0); 2765fa9e4066Sahrens 276699653d4eSeschrock dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx); 2767fa9e4066Sahrens 2768fa9e4066Sahrens kmem_free(packed, nvsize); 2769fa9e4066Sahrens 277099653d4eSeschrock VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 2771fa9e4066Sahrens dmu_buf_will_dirty(db, tx); 2772fa9e4066Sahrens *(uint64_t *)db->db_data = nvsize; 2773ea8dc4b6Seschrock dmu_buf_rele(db, FTAG); 2774fa9e4066Sahrens } 2775fa9e4066Sahrens 277699653d4eSeschrock static void 277799653d4eSeschrock spa_sync_spares(spa_t *spa, dmu_tx_t *tx) 277899653d4eSeschrock { 277999653d4eSeschrock nvlist_t *nvroot; 278099653d4eSeschrock nvlist_t **spares; 278199653d4eSeschrock int i; 278299653d4eSeschrock 278399653d4eSeschrock if (!spa->spa_sync_spares) 278499653d4eSeschrock return; 278599653d4eSeschrock 278699653d4eSeschrock /* 278799653d4eSeschrock * Update the MOS nvlist describing the list of available spares. 278899653d4eSeschrock * spa_validate_spares() will have already made sure this nvlist is 278999653d4eSeschrock * valid and the vdevs are labelled appropriately. 279099653d4eSeschrock */ 279199653d4eSeschrock if (spa->spa_spares_object == 0) { 279299653d4eSeschrock spa->spa_spares_object = dmu_object_alloc(spa->spa_meta_objset, 279399653d4eSeschrock DMU_OT_PACKED_NVLIST, 1 << 14, 279499653d4eSeschrock DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 279599653d4eSeschrock VERIFY(zap_update(spa->spa_meta_objset, 279699653d4eSeschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SPARES, 279799653d4eSeschrock sizeof (uint64_t), 1, &spa->spa_spares_object, tx) == 0); 279899653d4eSeschrock } 279999653d4eSeschrock 280099653d4eSeschrock VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 280199653d4eSeschrock if (spa->spa_nspares == 0) { 280299653d4eSeschrock VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 280399653d4eSeschrock NULL, 0) == 0); 280499653d4eSeschrock } else { 280599653d4eSeschrock spares = kmem_alloc(spa->spa_nspares * sizeof (void *), 280699653d4eSeschrock KM_SLEEP); 280799653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) 280899653d4eSeschrock spares[i] = vdev_config_generate(spa, 280999653d4eSeschrock spa->spa_spares[i], B_FALSE, B_TRUE); 281099653d4eSeschrock VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 281199653d4eSeschrock spares, spa->spa_nspares) == 0); 281299653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) 281399653d4eSeschrock nvlist_free(spares[i]); 281499653d4eSeschrock kmem_free(spares, spa->spa_nspares * sizeof (void *)); 281599653d4eSeschrock } 281699653d4eSeschrock 281799653d4eSeschrock spa_sync_nvlist(spa, spa->spa_spares_object, nvroot, tx); 281806eeb2adSek nvlist_free(nvroot); 281999653d4eSeschrock 282099653d4eSeschrock spa->spa_sync_spares = B_FALSE; 282199653d4eSeschrock } 282299653d4eSeschrock 282399653d4eSeschrock static void 282499653d4eSeschrock spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 282599653d4eSeschrock { 282699653d4eSeschrock nvlist_t *config; 282799653d4eSeschrock 282899653d4eSeschrock if (list_is_empty(&spa->spa_dirty_list)) 282999653d4eSeschrock return; 283099653d4eSeschrock 283199653d4eSeschrock config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE); 283299653d4eSeschrock 283399653d4eSeschrock if (spa->spa_config_syncing) 283499653d4eSeschrock nvlist_free(spa->spa_config_syncing); 283599653d4eSeschrock spa->spa_config_syncing = config; 283699653d4eSeschrock 283799653d4eSeschrock spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 283899653d4eSeschrock } 283999653d4eSeschrock 2840*b1b8ab34Slling static void 2841*b1b8ab34Slling spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx) 2842*b1b8ab34Slling { 2843*b1b8ab34Slling spa_t *spa = arg1; 2844*b1b8ab34Slling nvlist_t *nvp = arg2; 2845*b1b8ab34Slling nvpair_t *nvpair; 2846*b1b8ab34Slling objset_t *mos = spa->spa_meta_objset; 2847*b1b8ab34Slling uint64_t zapobj; 2848*b1b8ab34Slling 2849*b1b8ab34Slling mutex_enter(&spa->spa_props_lock); 2850*b1b8ab34Slling if (spa->spa_pool_props_object == 0) { 2851*b1b8ab34Slling zapobj = zap_create(mos, DMU_OT_POOL_PROPS, DMU_OT_NONE, 0, tx); 2852*b1b8ab34Slling VERIFY(zapobj > 0); 2853*b1b8ab34Slling 2854*b1b8ab34Slling spa->spa_pool_props_object = zapobj; 2855*b1b8ab34Slling 2856*b1b8ab34Slling VERIFY(zap_update(mos, DMU_POOL_DIRECTORY_OBJECT, 2857*b1b8ab34Slling DMU_POOL_PROPS, 8, 1, 2858*b1b8ab34Slling &spa->spa_pool_props_object, tx) == 0); 2859*b1b8ab34Slling } 2860*b1b8ab34Slling mutex_exit(&spa->spa_props_lock); 2861*b1b8ab34Slling 2862*b1b8ab34Slling nvpair = NULL; 2863*b1b8ab34Slling while ((nvpair = nvlist_next_nvpair(nvp, nvpair))) { 2864*b1b8ab34Slling switch (zpool_name_to_prop(nvpair_name(nvpair))) { 2865*b1b8ab34Slling case ZFS_PROP_BOOTFS: 2866*b1b8ab34Slling VERIFY(nvlist_lookup_uint64(nvp, 2867*b1b8ab34Slling nvpair_name(nvpair), &spa->spa_bootfs) == 0); 2868*b1b8ab34Slling VERIFY(zap_update(mos, 2869*b1b8ab34Slling spa->spa_pool_props_object, 2870*b1b8ab34Slling zpool_prop_to_name(ZFS_PROP_BOOTFS), 8, 1, 2871*b1b8ab34Slling &spa->spa_bootfs, tx) == 0); 2872*b1b8ab34Slling break; 2873*b1b8ab34Slling } 2874*b1b8ab34Slling } 2875*b1b8ab34Slling } 2876*b1b8ab34Slling 2877fa9e4066Sahrens /* 2878fa9e4066Sahrens * Sync the specified transaction group. New blocks may be dirtied as 2879fa9e4066Sahrens * part of the process, so we iterate until it converges. 2880fa9e4066Sahrens */ 2881fa9e4066Sahrens void 2882fa9e4066Sahrens spa_sync(spa_t *spa, uint64_t txg) 2883fa9e4066Sahrens { 2884fa9e4066Sahrens dsl_pool_t *dp = spa->spa_dsl_pool; 2885fa9e4066Sahrens objset_t *mos = spa->spa_meta_objset; 2886fa9e4066Sahrens bplist_t *bpl = &spa->spa_sync_bplist; 28870373e76bSbonwick vdev_t *rvd = spa->spa_root_vdev; 2888fa9e4066Sahrens vdev_t *vd; 2889fa9e4066Sahrens dmu_tx_t *tx; 2890fa9e4066Sahrens int dirty_vdevs; 2891fa9e4066Sahrens 2892fa9e4066Sahrens /* 2893fa9e4066Sahrens * Lock out configuration changes. 2894fa9e4066Sahrens */ 2895ea8dc4b6Seschrock spa_config_enter(spa, RW_READER, FTAG); 2896fa9e4066Sahrens 2897fa9e4066Sahrens spa->spa_syncing_txg = txg; 2898fa9e4066Sahrens spa->spa_sync_pass = 0; 2899fa9e4066Sahrens 2900ea8dc4b6Seschrock VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 2901fa9e4066Sahrens 290299653d4eSeschrock tx = dmu_tx_create_assigned(dp, txg); 290399653d4eSeschrock 290499653d4eSeschrock /* 290599653d4eSeschrock * If we are upgrading to ZFS_VERSION_RAIDZ_DEFLATE this txg, 290699653d4eSeschrock * set spa_deflate if we have no raid-z vdevs. 290799653d4eSeschrock */ 290899653d4eSeschrock if (spa->spa_ubsync.ub_version < ZFS_VERSION_RAIDZ_DEFLATE && 290999653d4eSeschrock spa->spa_uberblock.ub_version >= ZFS_VERSION_RAIDZ_DEFLATE) { 291099653d4eSeschrock int i; 291199653d4eSeschrock 291299653d4eSeschrock for (i = 0; i < rvd->vdev_children; i++) { 291399653d4eSeschrock vd = rvd->vdev_child[i]; 291499653d4eSeschrock if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 291599653d4eSeschrock break; 291699653d4eSeschrock } 291799653d4eSeschrock if (i == rvd->vdev_children) { 291899653d4eSeschrock spa->spa_deflate = TRUE; 291999653d4eSeschrock VERIFY(0 == zap_add(spa->spa_meta_objset, 292099653d4eSeschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 292199653d4eSeschrock sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 292299653d4eSeschrock } 292399653d4eSeschrock } 292499653d4eSeschrock 2925fa9e4066Sahrens /* 2926fa9e4066Sahrens * If anything has changed in this txg, push the deferred frees 2927fa9e4066Sahrens * from the previous txg. If not, leave them alone so that we 2928fa9e4066Sahrens * don't generate work on an otherwise idle system. 2929fa9e4066Sahrens */ 2930fa9e4066Sahrens if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 29311615a317Sek !txg_list_empty(&dp->dp_dirty_dirs, txg) || 29321615a317Sek !txg_list_empty(&dp->dp_sync_tasks, txg)) 2933fa9e4066Sahrens spa_sync_deferred_frees(spa, txg); 2934fa9e4066Sahrens 2935fa9e4066Sahrens /* 2936fa9e4066Sahrens * Iterate to convergence. 2937fa9e4066Sahrens */ 2938fa9e4066Sahrens do { 2939fa9e4066Sahrens spa->spa_sync_pass++; 2940fa9e4066Sahrens 2941fa9e4066Sahrens spa_sync_config_object(spa, tx); 294299653d4eSeschrock spa_sync_spares(spa, tx); 2943ea8dc4b6Seschrock spa_errlog_sync(spa, txg); 2944fa9e4066Sahrens dsl_pool_sync(dp, txg); 2945fa9e4066Sahrens 2946fa9e4066Sahrens dirty_vdevs = 0; 2947fa9e4066Sahrens while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 2948fa9e4066Sahrens vdev_sync(vd, txg); 2949fa9e4066Sahrens dirty_vdevs++; 2950fa9e4066Sahrens } 2951fa9e4066Sahrens 2952fa9e4066Sahrens bplist_sync(bpl, tx); 2953fa9e4066Sahrens } while (dirty_vdevs); 2954fa9e4066Sahrens 2955fa9e4066Sahrens bplist_close(bpl); 2956fa9e4066Sahrens 2957fa9e4066Sahrens dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 2958fa9e4066Sahrens 2959fa9e4066Sahrens /* 2960fa9e4066Sahrens * Rewrite the vdev configuration (which includes the uberblock) 2961fa9e4066Sahrens * to commit the transaction group. 29620373e76bSbonwick * 29630373e76bSbonwick * If there are any dirty vdevs, sync the uberblock to all vdevs. 29640373e76bSbonwick * Otherwise, pick a random top-level vdev that's known to be 29650373e76bSbonwick * visible in the config cache (see spa_vdev_add() for details). 29660373e76bSbonwick * If the write fails, try the next vdev until we're tried them all. 29670373e76bSbonwick */ 29680373e76bSbonwick if (!list_is_empty(&spa->spa_dirty_list)) { 29690373e76bSbonwick VERIFY(vdev_config_sync(rvd, txg) == 0); 29700373e76bSbonwick } else { 29710373e76bSbonwick int children = rvd->vdev_children; 29720373e76bSbonwick int c0 = spa_get_random(children); 29730373e76bSbonwick int c; 29740373e76bSbonwick 29750373e76bSbonwick for (c = 0; c < children; c++) { 29760373e76bSbonwick vd = rvd->vdev_child[(c0 + c) % children]; 29770373e76bSbonwick if (vd->vdev_ms_array == 0) 29780373e76bSbonwick continue; 29790373e76bSbonwick if (vdev_config_sync(vd, txg) == 0) 29800373e76bSbonwick break; 29810373e76bSbonwick } 29820373e76bSbonwick if (c == children) 29830373e76bSbonwick VERIFY(vdev_config_sync(rvd, txg) == 0); 29840373e76bSbonwick } 29850373e76bSbonwick 298699653d4eSeschrock dmu_tx_commit(tx); 298799653d4eSeschrock 29880373e76bSbonwick /* 29890373e76bSbonwick * Clear the dirty config list. 2990fa9e4066Sahrens */ 29910373e76bSbonwick while ((vd = list_head(&spa->spa_dirty_list)) != NULL) 29920373e76bSbonwick vdev_config_clean(vd); 29930373e76bSbonwick 29940373e76bSbonwick /* 29950373e76bSbonwick * Now that the new config has synced transactionally, 29960373e76bSbonwick * let it become visible to the config cache. 29970373e76bSbonwick */ 29980373e76bSbonwick if (spa->spa_config_syncing != NULL) { 29990373e76bSbonwick spa_config_set(spa, spa->spa_config_syncing); 30000373e76bSbonwick spa->spa_config_txg = txg; 30010373e76bSbonwick spa->spa_config_syncing = NULL; 30020373e76bSbonwick } 3003fa9e4066Sahrens 3004fa9e4066Sahrens /* 3005fa9e4066Sahrens * Make a stable copy of the fully synced uberblock. 3006fa9e4066Sahrens * We use this as the root for pool traversals. 3007fa9e4066Sahrens */ 3008fa9e4066Sahrens spa->spa_traverse_wanted = 1; /* tells traverse_more() to stop */ 3009fa9e4066Sahrens 3010fa9e4066Sahrens spa_scrub_suspend(spa); /* stop scrubbing and finish I/Os */ 3011fa9e4066Sahrens 3012fa9e4066Sahrens rw_enter(&spa->spa_traverse_lock, RW_WRITER); 3013fa9e4066Sahrens spa->spa_traverse_wanted = 0; 3014fa9e4066Sahrens spa->spa_ubsync = spa->spa_uberblock; 3015fa9e4066Sahrens rw_exit(&spa->spa_traverse_lock); 3016fa9e4066Sahrens 3017fa9e4066Sahrens spa_scrub_resume(spa); /* resume scrub with new ubsync */ 3018fa9e4066Sahrens 3019fa9e4066Sahrens /* 3020fa9e4066Sahrens * Clean up the ZIL records for the synced txg. 3021fa9e4066Sahrens */ 3022fa9e4066Sahrens dsl_pool_zil_clean(dp); 3023fa9e4066Sahrens 3024fa9e4066Sahrens /* 3025fa9e4066Sahrens * Update usable space statistics. 3026fa9e4066Sahrens */ 3027fa9e4066Sahrens while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 3028fa9e4066Sahrens vdev_sync_done(vd, txg); 3029fa9e4066Sahrens 3030fa9e4066Sahrens /* 3031fa9e4066Sahrens * It had better be the case that we didn't dirty anything 303299653d4eSeschrock * since vdev_config_sync(). 3033fa9e4066Sahrens */ 3034fa9e4066Sahrens ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 3035fa9e4066Sahrens ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 3036fa9e4066Sahrens ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 3037fa9e4066Sahrens ASSERT(bpl->bpl_queue == NULL); 3038fa9e4066Sahrens 3039ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 3040ea8dc4b6Seschrock 3041ea8dc4b6Seschrock /* 3042ea8dc4b6Seschrock * If any async tasks have been requested, kick them off. 3043ea8dc4b6Seschrock */ 3044ea8dc4b6Seschrock spa_async_dispatch(spa); 3045fa9e4066Sahrens } 3046fa9e4066Sahrens 3047fa9e4066Sahrens /* 3048fa9e4066Sahrens * Sync all pools. We don't want to hold the namespace lock across these 3049fa9e4066Sahrens * operations, so we take a reference on the spa_t and drop the lock during the 3050fa9e4066Sahrens * sync. 3051fa9e4066Sahrens */ 3052fa9e4066Sahrens void 3053fa9e4066Sahrens spa_sync_allpools(void) 3054fa9e4066Sahrens { 3055fa9e4066Sahrens spa_t *spa = NULL; 3056fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 3057fa9e4066Sahrens while ((spa = spa_next(spa)) != NULL) { 3058fa9e4066Sahrens if (spa_state(spa) != POOL_STATE_ACTIVE) 3059fa9e4066Sahrens continue; 3060fa9e4066Sahrens spa_open_ref(spa, FTAG); 3061fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 3062fa9e4066Sahrens txg_wait_synced(spa_get_dsl(spa), 0); 3063fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 3064fa9e4066Sahrens spa_close(spa, FTAG); 3065fa9e4066Sahrens } 3066fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 3067fa9e4066Sahrens } 3068fa9e4066Sahrens 3069fa9e4066Sahrens /* 3070fa9e4066Sahrens * ========================================================================== 3071fa9e4066Sahrens * Miscellaneous routines 3072fa9e4066Sahrens * ========================================================================== 3073fa9e4066Sahrens */ 3074fa9e4066Sahrens 3075fa9e4066Sahrens /* 3076fa9e4066Sahrens * Remove all pools in the system. 3077fa9e4066Sahrens */ 3078fa9e4066Sahrens void 3079fa9e4066Sahrens spa_evict_all(void) 3080fa9e4066Sahrens { 3081fa9e4066Sahrens spa_t *spa; 3082fa9e4066Sahrens 3083fa9e4066Sahrens /* 3084fa9e4066Sahrens * Remove all cached state. All pools should be closed now, 3085fa9e4066Sahrens * so every spa in the AVL tree should be unreferenced. 3086fa9e4066Sahrens */ 3087fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 3088fa9e4066Sahrens while ((spa = spa_next(NULL)) != NULL) { 3089fa9e4066Sahrens /* 3090ea8dc4b6Seschrock * Stop async tasks. The async thread may need to detach 3091ea8dc4b6Seschrock * a device that's been replaced, which requires grabbing 3092ea8dc4b6Seschrock * spa_namespace_lock, so we must drop it here. 3093fa9e4066Sahrens */ 3094fa9e4066Sahrens spa_open_ref(spa, FTAG); 3095fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 3096ea8dc4b6Seschrock spa_async_suspend(spa); 3097fa9e4066Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 3098fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 3099fa9e4066Sahrens spa_close(spa, FTAG); 3100fa9e4066Sahrens 3101fa9e4066Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 3102fa9e4066Sahrens spa_unload(spa); 3103fa9e4066Sahrens spa_deactivate(spa); 3104fa9e4066Sahrens } 3105fa9e4066Sahrens spa_remove(spa); 3106fa9e4066Sahrens } 3107fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 3108fa9e4066Sahrens } 3109ea8dc4b6Seschrock 3110ea8dc4b6Seschrock vdev_t * 3111ea8dc4b6Seschrock spa_lookup_by_guid(spa_t *spa, uint64_t guid) 3112ea8dc4b6Seschrock { 3113ea8dc4b6Seschrock return (vdev_lookup_by_guid(spa->spa_root_vdev, guid)); 3114ea8dc4b6Seschrock } 3115eaca9bbdSeschrock 3116eaca9bbdSeschrock void 3117eaca9bbdSeschrock spa_upgrade(spa_t *spa) 3118eaca9bbdSeschrock { 3119eaca9bbdSeschrock spa_config_enter(spa, RW_WRITER, FTAG); 3120eaca9bbdSeschrock 3121eaca9bbdSeschrock /* 3122eaca9bbdSeschrock * This should only be called for a non-faulted pool, and since a 3123eaca9bbdSeschrock * future version would result in an unopenable pool, this shouldn't be 3124eaca9bbdSeschrock * possible. 3125eaca9bbdSeschrock */ 3126eaca9bbdSeschrock ASSERT(spa->spa_uberblock.ub_version <= ZFS_VERSION); 3127eaca9bbdSeschrock 3128eaca9bbdSeschrock spa->spa_uberblock.ub_version = ZFS_VERSION; 3129eaca9bbdSeschrock vdev_config_dirty(spa->spa_root_vdev); 3130eaca9bbdSeschrock 3131eaca9bbdSeschrock spa_config_exit(spa, FTAG); 313299653d4eSeschrock 313399653d4eSeschrock txg_wait_synced(spa_get_dsl(spa), 0); 313499653d4eSeschrock } 313599653d4eSeschrock 313699653d4eSeschrock boolean_t 313799653d4eSeschrock spa_has_spare(spa_t *spa, uint64_t guid) 313899653d4eSeschrock { 313999653d4eSeschrock int i; 314039c23413Seschrock uint64_t spareguid; 314199653d4eSeschrock 314299653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) 314399653d4eSeschrock if (spa->spa_spares[i]->vdev_guid == guid) 314499653d4eSeschrock return (B_TRUE); 314599653d4eSeschrock 314639c23413Seschrock for (i = 0; i < spa->spa_pending_nspares; i++) { 314739c23413Seschrock if (nvlist_lookup_uint64(spa->spa_pending_spares[i], 314839c23413Seschrock ZPOOL_CONFIG_GUID, &spareguid) == 0 && 314939c23413Seschrock spareguid == guid) 315039c23413Seschrock return (B_TRUE); 315139c23413Seschrock } 315239c23413Seschrock 315399653d4eSeschrock return (B_FALSE); 3154eaca9bbdSeschrock } 3155*b1b8ab34Slling 3156*b1b8ab34Slling int 3157*b1b8ab34Slling spa_set_props(spa_t *spa, nvlist_t *nvp) 3158*b1b8ab34Slling { 3159*b1b8ab34Slling return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props, 3160*b1b8ab34Slling spa, nvp, 3)); 3161*b1b8ab34Slling } 3162*b1b8ab34Slling 3163*b1b8ab34Slling int 3164*b1b8ab34Slling spa_get_props(spa_t *spa, nvlist_t **nvp) 3165*b1b8ab34Slling { 3166*b1b8ab34Slling zap_cursor_t zc; 3167*b1b8ab34Slling zap_attribute_t za; 3168*b1b8ab34Slling objset_t *mos = spa->spa_meta_objset; 3169*b1b8ab34Slling zfs_source_t src; 3170*b1b8ab34Slling zfs_prop_t prop; 3171*b1b8ab34Slling nvlist_t *propval; 3172*b1b8ab34Slling uint64_t value; 3173*b1b8ab34Slling int err; 3174*b1b8ab34Slling 3175*b1b8ab34Slling VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3176*b1b8ab34Slling 3177*b1b8ab34Slling mutex_enter(&spa->spa_props_lock); 3178*b1b8ab34Slling /* If no props object, then just return empty nvlist */ 3179*b1b8ab34Slling if (spa->spa_pool_props_object == 0) { 3180*b1b8ab34Slling mutex_exit(&spa->spa_props_lock); 3181*b1b8ab34Slling return (0); 3182*b1b8ab34Slling } 3183*b1b8ab34Slling 3184*b1b8ab34Slling for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 3185*b1b8ab34Slling (err = zap_cursor_retrieve(&zc, &za)) == 0; 3186*b1b8ab34Slling zap_cursor_advance(&zc)) { 3187*b1b8ab34Slling 3188*b1b8ab34Slling if ((prop = zpool_name_to_prop(za.za_name)) == ZFS_PROP_INVAL) 3189*b1b8ab34Slling continue; 3190*b1b8ab34Slling 3191*b1b8ab34Slling VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3192*b1b8ab34Slling switch (za.za_integer_length) { 3193*b1b8ab34Slling case 8: 3194*b1b8ab34Slling if (zfs_prop_default_numeric(prop) == 3195*b1b8ab34Slling za.za_first_integer) 3196*b1b8ab34Slling src = ZFS_SRC_DEFAULT; 3197*b1b8ab34Slling else 3198*b1b8ab34Slling src = ZFS_SRC_LOCAL; 3199*b1b8ab34Slling value = za.za_first_integer; 3200*b1b8ab34Slling 3201*b1b8ab34Slling if (prop == ZFS_PROP_BOOTFS) { 3202*b1b8ab34Slling dsl_pool_t *dp; 3203*b1b8ab34Slling dsl_dataset_t *ds = NULL; 3204*b1b8ab34Slling char strval[MAXPATHLEN]; 3205*b1b8ab34Slling 3206*b1b8ab34Slling dp = spa_get_dsl(spa); 3207*b1b8ab34Slling rw_enter(&dp->dp_config_rwlock, RW_READER); 3208*b1b8ab34Slling if ((err = dsl_dataset_open_obj(dp, 3209*b1b8ab34Slling za.za_first_integer, NULL, DS_MODE_NONE, 3210*b1b8ab34Slling FTAG, &ds)) != 0) { 3211*b1b8ab34Slling rw_exit(&dp->dp_config_rwlock); 3212*b1b8ab34Slling break; 3213*b1b8ab34Slling } 3214*b1b8ab34Slling dsl_dataset_name(ds, strval); 3215*b1b8ab34Slling dsl_dataset_close(ds, DS_MODE_NONE, FTAG); 3216*b1b8ab34Slling rw_exit(&dp->dp_config_rwlock); 3217*b1b8ab34Slling 3218*b1b8ab34Slling VERIFY(nvlist_add_uint64(propval, 3219*b1b8ab34Slling ZFS_PROP_SOURCE, src) == 0); 3220*b1b8ab34Slling VERIFY(nvlist_add_string(propval, 3221*b1b8ab34Slling ZFS_PROP_VALUE, strval) == 0); 3222*b1b8ab34Slling } else { 3223*b1b8ab34Slling VERIFY(nvlist_add_uint64(propval, 3224*b1b8ab34Slling ZFS_PROP_SOURCE, src) == 0); 3225*b1b8ab34Slling VERIFY(nvlist_add_uint64(propval, 3226*b1b8ab34Slling ZFS_PROP_VALUE, value) == 0); 3227*b1b8ab34Slling } 3228*b1b8ab34Slling VERIFY(nvlist_add_nvlist(*nvp, za.za_name, 3229*b1b8ab34Slling propval) == 0); 3230*b1b8ab34Slling break; 3231*b1b8ab34Slling } 3232*b1b8ab34Slling nvlist_free(propval); 3233*b1b8ab34Slling } 3234*b1b8ab34Slling zap_cursor_fini(&zc); 3235*b1b8ab34Slling mutex_exit(&spa->spa_props_lock); 3236*b1b8ab34Slling if (err && err != ENOENT) { 3237*b1b8ab34Slling nvlist_free(*nvp); 3238*b1b8ab34Slling return (err); 3239*b1b8ab34Slling } 3240*b1b8ab34Slling 3241*b1b8ab34Slling return (0); 3242*b1b8ab34Slling } 3243*b1b8ab34Slling 3244*b1b8ab34Slling /* 3245*b1b8ab34Slling * If the bootfs property value is dsobj, clear it. 3246*b1b8ab34Slling */ 3247*b1b8ab34Slling void 3248*b1b8ab34Slling spa_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 3249*b1b8ab34Slling { 3250*b1b8ab34Slling if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 3251*b1b8ab34Slling VERIFY(zap_remove(spa->spa_meta_objset, 3252*b1b8ab34Slling spa->spa_pool_props_object, 3253*b1b8ab34Slling zpool_prop_to_name(ZFS_PROP_BOOTFS), tx) == 0); 3254*b1b8ab34Slling spa->spa_bootfs = 0; 3255*b1b8ab34Slling } 3256*b1b8ab34Slling } 3257