1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 2199653d4eSeschrock 22fa9e4066Sahrens /* 2339c23413Seschrock * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24fa9e4066Sahrens * Use is subject to license terms. 25fa9e4066Sahrens */ 26fa9e4066Sahrens 27fa9e4066Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 28fa9e4066Sahrens 29fa9e4066Sahrens /* 30fa9e4066Sahrens * This file contains all the routines used when modifying on-disk SPA state. 31fa9e4066Sahrens * This includes opening, importing, destroying, exporting a pool, and syncing a 32fa9e4066Sahrens * pool. 33fa9e4066Sahrens */ 34fa9e4066Sahrens 35fa9e4066Sahrens #include <sys/zfs_context.h> 36ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h> 37fa9e4066Sahrens #include <sys/spa_impl.h> 38fa9e4066Sahrens #include <sys/zio.h> 39fa9e4066Sahrens #include <sys/zio_checksum.h> 40fa9e4066Sahrens #include <sys/zio_compress.h> 41fa9e4066Sahrens #include <sys/dmu.h> 42fa9e4066Sahrens #include <sys/dmu_tx.h> 43fa9e4066Sahrens #include <sys/zap.h> 44fa9e4066Sahrens #include <sys/zil.h> 45fa9e4066Sahrens #include <sys/vdev_impl.h> 46fa9e4066Sahrens #include <sys/metaslab.h> 47fa9e4066Sahrens #include <sys/uberblock_impl.h> 48fa9e4066Sahrens #include <sys/txg.h> 49fa9e4066Sahrens #include <sys/avl.h> 50fa9e4066Sahrens #include <sys/dmu_traverse.h> 51b1b8ab34Slling #include <sys/dmu_objset.h> 52fa9e4066Sahrens #include <sys/unique.h> 53fa9e4066Sahrens #include <sys/dsl_pool.h> 54b1b8ab34Slling #include <sys/dsl_dataset.h> 55fa9e4066Sahrens #include <sys/dsl_dir.h> 56fa9e4066Sahrens #include <sys/dsl_prop.h> 57b1b8ab34Slling #include <sys/dsl_synctask.h> 58fa9e4066Sahrens #include <sys/fs/zfs.h> 59fa9e4066Sahrens #include <sys/callb.h> 6095173954Sek #include <sys/systeminfo.h> 6195173954Sek #include <sys/sunddi.h> 62fa9e4066Sahrens 63416e0cd8Sek int zio_taskq_threads = 8; 64416e0cd8Sek 65fa9e4066Sahrens /* 66fa9e4066Sahrens * ========================================================================== 67fa9e4066Sahrens * SPA state manipulation (open/create/destroy/import/export) 68fa9e4066Sahrens * ========================================================================== 69fa9e4066Sahrens */ 70fa9e4066Sahrens 71ea8dc4b6Seschrock static int 72ea8dc4b6Seschrock spa_error_entry_compare(const void *a, const void *b) 73ea8dc4b6Seschrock { 74ea8dc4b6Seschrock spa_error_entry_t *sa = (spa_error_entry_t *)a; 75ea8dc4b6Seschrock spa_error_entry_t *sb = (spa_error_entry_t *)b; 76ea8dc4b6Seschrock int ret; 77ea8dc4b6Seschrock 78ea8dc4b6Seschrock ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 79ea8dc4b6Seschrock sizeof (zbookmark_t)); 80ea8dc4b6Seschrock 81ea8dc4b6Seschrock if (ret < 0) 82ea8dc4b6Seschrock return (-1); 83ea8dc4b6Seschrock else if (ret > 0) 84ea8dc4b6Seschrock return (1); 85ea8dc4b6Seschrock else 86ea8dc4b6Seschrock return (0); 87ea8dc4b6Seschrock } 88ea8dc4b6Seschrock 89ea8dc4b6Seschrock /* 90ea8dc4b6Seschrock * Utility function which retrieves copies of the current logs and 91ea8dc4b6Seschrock * re-initializes them in the process. 92ea8dc4b6Seschrock */ 93ea8dc4b6Seschrock void 94ea8dc4b6Seschrock spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 95ea8dc4b6Seschrock { 96ea8dc4b6Seschrock ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 97ea8dc4b6Seschrock 98ea8dc4b6Seschrock bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 99ea8dc4b6Seschrock bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 100ea8dc4b6Seschrock 101ea8dc4b6Seschrock avl_create(&spa->spa_errlist_scrub, 102ea8dc4b6Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 103ea8dc4b6Seschrock offsetof(spa_error_entry_t, se_avl)); 104ea8dc4b6Seschrock avl_create(&spa->spa_errlist_last, 105ea8dc4b6Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 106ea8dc4b6Seschrock offsetof(spa_error_entry_t, se_avl)); 107ea8dc4b6Seschrock } 108ea8dc4b6Seschrock 109fa9e4066Sahrens /* 110fa9e4066Sahrens * Activate an uninitialized pool. 111fa9e4066Sahrens */ 112fa9e4066Sahrens static void 113fa9e4066Sahrens spa_activate(spa_t *spa) 114fa9e4066Sahrens { 115fa9e4066Sahrens int t; 116fa9e4066Sahrens 117fa9e4066Sahrens ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 118fa9e4066Sahrens 119fa9e4066Sahrens spa->spa_state = POOL_STATE_ACTIVE; 120fa9e4066Sahrens 121fa9e4066Sahrens spa->spa_normal_class = metaslab_class_create(); 122*8654d025Sperrin spa->spa_log_class = metaslab_class_create(); 123fa9e4066Sahrens 124fa9e4066Sahrens for (t = 0; t < ZIO_TYPES; t++) { 125fa9e4066Sahrens spa->spa_zio_issue_taskq[t] = taskq_create("spa_zio_issue", 126416e0cd8Sek zio_taskq_threads, maxclsyspri, 50, INT_MAX, 127fa9e4066Sahrens TASKQ_PREPOPULATE); 128fa9e4066Sahrens spa->spa_zio_intr_taskq[t] = taskq_create("spa_zio_intr", 129416e0cd8Sek zio_taskq_threads, maxclsyspri, 50, INT_MAX, 130fa9e4066Sahrens TASKQ_PREPOPULATE); 131fa9e4066Sahrens } 132fa9e4066Sahrens 133fa9e4066Sahrens rw_init(&spa->spa_traverse_lock, NULL, RW_DEFAULT, NULL); 134fa9e4066Sahrens 1355ad82045Snd mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 1365ad82045Snd mutex_init(&spa->spa_config_cache_lock, NULL, MUTEX_DEFAULT, NULL); 1375ad82045Snd mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 1385ad82045Snd mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 1395ad82045Snd mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 1405ad82045Snd mutex_init(&spa->spa_config_lock.scl_lock, NULL, MUTEX_DEFAULT, NULL); 1415ad82045Snd mutex_init(&spa->spa_sync_bplist.bpl_lock, NULL, MUTEX_DEFAULT, NULL); 14206eeb2adSek mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 143b1b8ab34Slling mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 1445ad82045Snd 145fa9e4066Sahrens list_create(&spa->spa_dirty_list, sizeof (vdev_t), 146fa9e4066Sahrens offsetof(vdev_t, vdev_dirty_node)); 147fa9e4066Sahrens 148fa9e4066Sahrens txg_list_create(&spa->spa_vdev_txg_list, 149fa9e4066Sahrens offsetof(struct vdev, vdev_txg_node)); 150ea8dc4b6Seschrock 151ea8dc4b6Seschrock avl_create(&spa->spa_errlist_scrub, 152ea8dc4b6Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 153ea8dc4b6Seschrock offsetof(spa_error_entry_t, se_avl)); 154ea8dc4b6Seschrock avl_create(&spa->spa_errlist_last, 155ea8dc4b6Seschrock spa_error_entry_compare, sizeof (spa_error_entry_t), 156ea8dc4b6Seschrock offsetof(spa_error_entry_t, se_avl)); 157fa9e4066Sahrens } 158fa9e4066Sahrens 159fa9e4066Sahrens /* 160fa9e4066Sahrens * Opposite of spa_activate(). 161fa9e4066Sahrens */ 162fa9e4066Sahrens static void 163fa9e4066Sahrens spa_deactivate(spa_t *spa) 164fa9e4066Sahrens { 165fa9e4066Sahrens int t; 166fa9e4066Sahrens 167fa9e4066Sahrens ASSERT(spa->spa_sync_on == B_FALSE); 168fa9e4066Sahrens ASSERT(spa->spa_dsl_pool == NULL); 169fa9e4066Sahrens ASSERT(spa->spa_root_vdev == NULL); 170fa9e4066Sahrens 171fa9e4066Sahrens ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 172fa9e4066Sahrens 173fa9e4066Sahrens txg_list_destroy(&spa->spa_vdev_txg_list); 174fa9e4066Sahrens 175fa9e4066Sahrens list_destroy(&spa->spa_dirty_list); 176fa9e4066Sahrens 177fa9e4066Sahrens rw_destroy(&spa->spa_traverse_lock); 178fa9e4066Sahrens 179fa9e4066Sahrens for (t = 0; t < ZIO_TYPES; t++) { 180fa9e4066Sahrens taskq_destroy(spa->spa_zio_issue_taskq[t]); 181fa9e4066Sahrens taskq_destroy(spa->spa_zio_intr_taskq[t]); 182fa9e4066Sahrens spa->spa_zio_issue_taskq[t] = NULL; 183fa9e4066Sahrens spa->spa_zio_intr_taskq[t] = NULL; 184fa9e4066Sahrens } 185fa9e4066Sahrens 186fa9e4066Sahrens metaslab_class_destroy(spa->spa_normal_class); 187fa9e4066Sahrens spa->spa_normal_class = NULL; 188fa9e4066Sahrens 189*8654d025Sperrin metaslab_class_destroy(spa->spa_log_class); 190*8654d025Sperrin spa->spa_log_class = NULL; 191*8654d025Sperrin 192ea8dc4b6Seschrock /* 193ea8dc4b6Seschrock * If this was part of an import or the open otherwise failed, we may 194ea8dc4b6Seschrock * still have errors left in the queues. Empty them just in case. 195ea8dc4b6Seschrock */ 196ea8dc4b6Seschrock spa_errlog_drain(spa); 197ea8dc4b6Seschrock 198ea8dc4b6Seschrock avl_destroy(&spa->spa_errlist_scrub); 199ea8dc4b6Seschrock avl_destroy(&spa->spa_errlist_last); 200ea8dc4b6Seschrock 201fa9e4066Sahrens spa->spa_state = POOL_STATE_UNINITIALIZED; 202fa9e4066Sahrens } 203fa9e4066Sahrens 204fa9e4066Sahrens /* 205fa9e4066Sahrens * Verify a pool configuration, and construct the vdev tree appropriately. This 206fa9e4066Sahrens * will create all the necessary vdevs in the appropriate layout, with each vdev 207fa9e4066Sahrens * in the CLOSED state. This will prep the pool before open/creation/import. 208fa9e4066Sahrens * All vdev validation is done by the vdev_alloc() routine. 209fa9e4066Sahrens */ 21099653d4eSeschrock static int 21199653d4eSeschrock spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 21299653d4eSeschrock uint_t id, int atype) 213fa9e4066Sahrens { 214fa9e4066Sahrens nvlist_t **child; 215fa9e4066Sahrens uint_t c, children; 21699653d4eSeschrock int error; 217fa9e4066Sahrens 21899653d4eSeschrock if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 21999653d4eSeschrock return (error); 220fa9e4066Sahrens 22199653d4eSeschrock if ((*vdp)->vdev_ops->vdev_op_leaf) 22299653d4eSeschrock return (0); 223fa9e4066Sahrens 224fa9e4066Sahrens if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 225fa9e4066Sahrens &child, &children) != 0) { 22699653d4eSeschrock vdev_free(*vdp); 22799653d4eSeschrock *vdp = NULL; 22899653d4eSeschrock return (EINVAL); 229fa9e4066Sahrens } 230fa9e4066Sahrens 231fa9e4066Sahrens for (c = 0; c < children; c++) { 23299653d4eSeschrock vdev_t *vd; 23399653d4eSeschrock if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 23499653d4eSeschrock atype)) != 0) { 23599653d4eSeschrock vdev_free(*vdp); 23699653d4eSeschrock *vdp = NULL; 23799653d4eSeschrock return (error); 238fa9e4066Sahrens } 239fa9e4066Sahrens } 240fa9e4066Sahrens 24199653d4eSeschrock ASSERT(*vdp != NULL); 24299653d4eSeschrock 24399653d4eSeschrock return (0); 244fa9e4066Sahrens } 245fa9e4066Sahrens 246fa9e4066Sahrens /* 247fa9e4066Sahrens * Opposite of spa_load(). 248fa9e4066Sahrens */ 249fa9e4066Sahrens static void 250fa9e4066Sahrens spa_unload(spa_t *spa) 251fa9e4066Sahrens { 25299653d4eSeschrock int i; 25399653d4eSeschrock 254ea8dc4b6Seschrock /* 255ea8dc4b6Seschrock * Stop async tasks. 256ea8dc4b6Seschrock */ 257ea8dc4b6Seschrock spa_async_suspend(spa); 258ea8dc4b6Seschrock 259fa9e4066Sahrens /* 260fa9e4066Sahrens * Stop syncing. 261fa9e4066Sahrens */ 262fa9e4066Sahrens if (spa->spa_sync_on) { 263fa9e4066Sahrens txg_sync_stop(spa->spa_dsl_pool); 264fa9e4066Sahrens spa->spa_sync_on = B_FALSE; 265fa9e4066Sahrens } 266fa9e4066Sahrens 267fa9e4066Sahrens /* 268fa9e4066Sahrens * Wait for any outstanding prefetch I/O to complete. 269fa9e4066Sahrens */ 270ea8dc4b6Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 271ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 272fa9e4066Sahrens 273fa9e4066Sahrens /* 274fa9e4066Sahrens * Close the dsl pool. 275fa9e4066Sahrens */ 276fa9e4066Sahrens if (spa->spa_dsl_pool) { 277fa9e4066Sahrens dsl_pool_close(spa->spa_dsl_pool); 278fa9e4066Sahrens spa->spa_dsl_pool = NULL; 279fa9e4066Sahrens } 280fa9e4066Sahrens 281fa9e4066Sahrens /* 282fa9e4066Sahrens * Close all vdevs. 283fa9e4066Sahrens */ 2840e34b6a7Sbonwick if (spa->spa_root_vdev) 285fa9e4066Sahrens vdev_free(spa->spa_root_vdev); 2860e34b6a7Sbonwick ASSERT(spa->spa_root_vdev == NULL); 287ea8dc4b6Seschrock 28899653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) 28999653d4eSeschrock vdev_free(spa->spa_spares[i]); 29099653d4eSeschrock if (spa->spa_spares) { 29199653d4eSeschrock kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *)); 29299653d4eSeschrock spa->spa_spares = NULL; 29399653d4eSeschrock } 29499653d4eSeschrock if (spa->spa_sparelist) { 29599653d4eSeschrock nvlist_free(spa->spa_sparelist); 29699653d4eSeschrock spa->spa_sparelist = NULL; 29799653d4eSeschrock } 29899653d4eSeschrock 299ea8dc4b6Seschrock spa->spa_async_suspended = 0; 300fa9e4066Sahrens } 301fa9e4066Sahrens 30299653d4eSeschrock /* 30399653d4eSeschrock * Load (or re-load) the current list of vdevs describing the active spares for 30499653d4eSeschrock * this pool. When this is called, we have some form of basic information in 30599653d4eSeschrock * 'spa_sparelist'. We parse this into vdevs, try to open them, and then 30699653d4eSeschrock * re-generate a more complete list including status information. 30799653d4eSeschrock */ 30899653d4eSeschrock static void 30999653d4eSeschrock spa_load_spares(spa_t *spa) 31099653d4eSeschrock { 31199653d4eSeschrock nvlist_t **spares; 31299653d4eSeschrock uint_t nspares; 31399653d4eSeschrock int i; 31439c23413Seschrock vdev_t *vd, *tvd; 31599653d4eSeschrock 31699653d4eSeschrock /* 31799653d4eSeschrock * First, close and free any existing spare vdevs. 31899653d4eSeschrock */ 31999653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) { 32039c23413Seschrock vd = spa->spa_spares[i]; 32139c23413Seschrock 32239c23413Seschrock /* Undo the call to spa_activate() below */ 32339c23413Seschrock if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid)) != NULL && 32439c23413Seschrock tvd->vdev_isspare) 32539c23413Seschrock spa_spare_remove(tvd); 32639c23413Seschrock vdev_close(vd); 32739c23413Seschrock vdev_free(vd); 32899653d4eSeschrock } 32939c23413Seschrock 33099653d4eSeschrock if (spa->spa_spares) 33199653d4eSeschrock kmem_free(spa->spa_spares, spa->spa_nspares * sizeof (void *)); 33299653d4eSeschrock 33399653d4eSeschrock if (spa->spa_sparelist == NULL) 33499653d4eSeschrock nspares = 0; 33599653d4eSeschrock else 33699653d4eSeschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 33799653d4eSeschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 33899653d4eSeschrock 33999653d4eSeschrock spa->spa_nspares = (int)nspares; 34099653d4eSeschrock spa->spa_spares = NULL; 34199653d4eSeschrock 34299653d4eSeschrock if (nspares == 0) 34399653d4eSeschrock return; 34499653d4eSeschrock 34599653d4eSeschrock /* 34699653d4eSeschrock * Construct the array of vdevs, opening them to get status in the 34739c23413Seschrock * process. For each spare, there is potentially two different vdev_t 34839c23413Seschrock * structures associated with it: one in the list of spares (used only 34939c23413Seschrock * for basic validation purposes) and one in the active vdev 35039c23413Seschrock * configuration (if it's spared in). During this phase we open and 35139c23413Seschrock * validate each vdev on the spare list. If the vdev also exists in the 35239c23413Seschrock * active configuration, then we also mark this vdev as an active spare. 35399653d4eSeschrock */ 35499653d4eSeschrock spa->spa_spares = kmem_alloc(nspares * sizeof (void *), KM_SLEEP); 35599653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) { 35699653d4eSeschrock VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 35799653d4eSeschrock VDEV_ALLOC_SPARE) == 0); 35899653d4eSeschrock ASSERT(vd != NULL); 35999653d4eSeschrock 36099653d4eSeschrock spa->spa_spares[i] = vd; 36199653d4eSeschrock 36239c23413Seschrock if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid)) != NULL) { 36339c23413Seschrock if (!tvd->vdev_isspare) 36439c23413Seschrock spa_spare_add(tvd); 36539c23413Seschrock 36639c23413Seschrock /* 36739c23413Seschrock * We only mark the spare active if we were successfully 36839c23413Seschrock * able to load the vdev. Otherwise, importing a pool 36939c23413Seschrock * with a bad active spare would result in strange 37039c23413Seschrock * behavior, because multiple pool would think the spare 37139c23413Seschrock * is actively in use. 37239c23413Seschrock * 37339c23413Seschrock * There is a vulnerability here to an equally bizarre 37439c23413Seschrock * circumstance, where a dead active spare is later 37539c23413Seschrock * brought back to life (onlined or otherwise). Given 37639c23413Seschrock * the rarity of this scenario, and the extra complexity 37739c23413Seschrock * it adds, we ignore the possibility. 37839c23413Seschrock */ 37939c23413Seschrock if (!vdev_is_dead(tvd)) 38039c23413Seschrock spa_spare_activate(tvd); 38139c23413Seschrock } 38239c23413Seschrock 38399653d4eSeschrock if (vdev_open(vd) != 0) 38499653d4eSeschrock continue; 38599653d4eSeschrock 38699653d4eSeschrock vd->vdev_top = vd; 38799653d4eSeschrock (void) vdev_validate_spare(vd); 38899653d4eSeschrock } 38999653d4eSeschrock 39099653d4eSeschrock /* 39199653d4eSeschrock * Recompute the stashed list of spares, with status information 39299653d4eSeschrock * this time. 39399653d4eSeschrock */ 39499653d4eSeschrock VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 39599653d4eSeschrock DATA_TYPE_NVLIST_ARRAY) == 0); 39699653d4eSeschrock 39799653d4eSeschrock spares = kmem_alloc(spa->spa_nspares * sizeof (void *), KM_SLEEP); 39899653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) 39999653d4eSeschrock spares[i] = vdev_config_generate(spa, spa->spa_spares[i], 40099653d4eSeschrock B_TRUE, B_TRUE); 40199653d4eSeschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 40299653d4eSeschrock spares, spa->spa_nspares) == 0); 40399653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) 40499653d4eSeschrock nvlist_free(spares[i]); 40599653d4eSeschrock kmem_free(spares, spa->spa_nspares * sizeof (void *)); 40699653d4eSeschrock } 40799653d4eSeschrock 40899653d4eSeschrock static int 40999653d4eSeschrock load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 41099653d4eSeschrock { 41199653d4eSeschrock dmu_buf_t *db; 41299653d4eSeschrock char *packed = NULL; 41399653d4eSeschrock size_t nvsize = 0; 41499653d4eSeschrock int error; 41599653d4eSeschrock *value = NULL; 41699653d4eSeschrock 41799653d4eSeschrock VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 41899653d4eSeschrock nvsize = *(uint64_t *)db->db_data; 41999653d4eSeschrock dmu_buf_rele(db, FTAG); 42099653d4eSeschrock 42199653d4eSeschrock packed = kmem_alloc(nvsize, KM_SLEEP); 42299653d4eSeschrock error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); 42399653d4eSeschrock if (error == 0) 42499653d4eSeschrock error = nvlist_unpack(packed, nvsize, value, 0); 42599653d4eSeschrock kmem_free(packed, nvsize); 42699653d4eSeschrock 42799653d4eSeschrock return (error); 42899653d4eSeschrock } 42999653d4eSeschrock 4303d7072f8Seschrock /* 4313d7072f8Seschrock * Checks to see if the given vdev could not be opened, in which case we post a 4323d7072f8Seschrock * sysevent to notify the autoreplace code that the device has been removed. 4333d7072f8Seschrock */ 4343d7072f8Seschrock static void 4353d7072f8Seschrock spa_check_removed(vdev_t *vd) 4363d7072f8Seschrock { 4373d7072f8Seschrock int c; 4383d7072f8Seschrock 4393d7072f8Seschrock for (c = 0; c < vd->vdev_children; c++) 4403d7072f8Seschrock spa_check_removed(vd->vdev_child[c]); 4413d7072f8Seschrock 4423d7072f8Seschrock if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) { 4433d7072f8Seschrock zfs_post_autoreplace(vd->vdev_spa, vd); 4443d7072f8Seschrock spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 4453d7072f8Seschrock } 4463d7072f8Seschrock } 4473d7072f8Seschrock 448fa9e4066Sahrens /* 449fa9e4066Sahrens * Load an existing storage pool, using the pool's builtin spa_config as a 450ea8dc4b6Seschrock * source of configuration information. 451fa9e4066Sahrens */ 452fa9e4066Sahrens static int 453ea8dc4b6Seschrock spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) 454fa9e4066Sahrens { 455fa9e4066Sahrens int error = 0; 456fa9e4066Sahrens nvlist_t *nvroot = NULL; 457fa9e4066Sahrens vdev_t *rvd; 458fa9e4066Sahrens uberblock_t *ub = &spa->spa_uberblock; 4590373e76bSbonwick uint64_t config_cache_txg = spa->spa_config_txg; 460fa9e4066Sahrens uint64_t pool_guid; 46199653d4eSeschrock uint64_t version; 462fa9e4066Sahrens zio_t *zio; 4633d7072f8Seschrock uint64_t autoreplace = 0; 464fa9e4066Sahrens 465ea8dc4b6Seschrock spa->spa_load_state = state; 4660373e76bSbonwick 467fa9e4066Sahrens if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || 468a9926bf0Sbonwick nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { 469ea8dc4b6Seschrock error = EINVAL; 470ea8dc4b6Seschrock goto out; 471ea8dc4b6Seschrock } 472fa9e4066Sahrens 47399653d4eSeschrock /* 47499653d4eSeschrock * Versioning wasn't explicitly added to the label until later, so if 47599653d4eSeschrock * it's not present treat it as the initial version. 47699653d4eSeschrock */ 47799653d4eSeschrock if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) 47899653d4eSeschrock version = ZFS_VERSION_INITIAL; 47999653d4eSeschrock 480a9926bf0Sbonwick (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 481a9926bf0Sbonwick &spa->spa_config_txg); 482a9926bf0Sbonwick 4830373e76bSbonwick if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 484ea8dc4b6Seschrock spa_guid_exists(pool_guid, 0)) { 485ea8dc4b6Seschrock error = EEXIST; 486ea8dc4b6Seschrock goto out; 487ea8dc4b6Seschrock } 488fa9e4066Sahrens 489b5989ec7Seschrock spa->spa_load_guid = pool_guid; 490b5989ec7Seschrock 491fa9e4066Sahrens /* 49299653d4eSeschrock * Parse the configuration into a vdev tree. We explicitly set the 49399653d4eSeschrock * value that will be returned by spa_version() since parsing the 49499653d4eSeschrock * configuration requires knowing the version number. 495fa9e4066Sahrens */ 496ea8dc4b6Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 49799653d4eSeschrock spa->spa_ubsync.ub_version = version; 49899653d4eSeschrock error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); 499ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 500fa9e4066Sahrens 50199653d4eSeschrock if (error != 0) 502ea8dc4b6Seschrock goto out; 503fa9e4066Sahrens 5040e34b6a7Sbonwick ASSERT(spa->spa_root_vdev == rvd); 505fa9e4066Sahrens ASSERT(spa_guid(spa) == pool_guid); 506fa9e4066Sahrens 507fa9e4066Sahrens /* 508fa9e4066Sahrens * Try to open all vdevs, loading each label in the process. 509fa9e4066Sahrens */ 5100bf246f5Smc error = vdev_open(rvd); 5110bf246f5Smc if (error != 0) 512ea8dc4b6Seschrock goto out; 513fa9e4066Sahrens 514560e6e96Seschrock /* 515560e6e96Seschrock * Validate the labels for all leaf vdevs. We need to grab the config 516560e6e96Seschrock * lock because all label I/O is done with the ZIO_FLAG_CONFIG_HELD 517560e6e96Seschrock * flag. 518560e6e96Seschrock */ 519560e6e96Seschrock spa_config_enter(spa, RW_READER, FTAG); 520560e6e96Seschrock error = vdev_validate(rvd); 521560e6e96Seschrock spa_config_exit(spa, FTAG); 522560e6e96Seschrock 5230bf246f5Smc if (error != 0) 524560e6e96Seschrock goto out; 525560e6e96Seschrock 526560e6e96Seschrock if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 527560e6e96Seschrock error = ENXIO; 528560e6e96Seschrock goto out; 529560e6e96Seschrock } 530560e6e96Seschrock 531fa9e4066Sahrens /* 532fa9e4066Sahrens * Find the best uberblock. 533fa9e4066Sahrens */ 534fa9e4066Sahrens bzero(ub, sizeof (uberblock_t)); 535fa9e4066Sahrens 536fa9e4066Sahrens zio = zio_root(spa, NULL, NULL, 537fa9e4066Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 538fa9e4066Sahrens vdev_uberblock_load(zio, rvd, ub); 539fa9e4066Sahrens error = zio_wait(zio); 540fa9e4066Sahrens 541fa9e4066Sahrens /* 542fa9e4066Sahrens * If we weren't able to find a single valid uberblock, return failure. 543fa9e4066Sahrens */ 544fa9e4066Sahrens if (ub->ub_txg == 0) { 545eaca9bbdSeschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 546eaca9bbdSeschrock VDEV_AUX_CORRUPT_DATA); 547ea8dc4b6Seschrock error = ENXIO; 548ea8dc4b6Seschrock goto out; 549ea8dc4b6Seschrock } 550ea8dc4b6Seschrock 551ea8dc4b6Seschrock /* 552ea8dc4b6Seschrock * If the pool is newer than the code, we can't open it. 553ea8dc4b6Seschrock */ 554eaca9bbdSeschrock if (ub->ub_version > ZFS_VERSION) { 555eaca9bbdSeschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 556eaca9bbdSeschrock VDEV_AUX_VERSION_NEWER); 557ea8dc4b6Seschrock error = ENOTSUP; 558ea8dc4b6Seschrock goto out; 559fa9e4066Sahrens } 560fa9e4066Sahrens 561fa9e4066Sahrens /* 562fa9e4066Sahrens * If the vdev guid sum doesn't match the uberblock, we have an 563fa9e4066Sahrens * incomplete configuration. 564fa9e4066Sahrens */ 565ecc2d604Sbonwick if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { 566ea8dc4b6Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 567ea8dc4b6Seschrock VDEV_AUX_BAD_GUID_SUM); 568ea8dc4b6Seschrock error = ENXIO; 569ea8dc4b6Seschrock goto out; 570fa9e4066Sahrens } 571fa9e4066Sahrens 572fa9e4066Sahrens /* 573fa9e4066Sahrens * Initialize internal SPA structures. 574fa9e4066Sahrens */ 575fa9e4066Sahrens spa->spa_state = POOL_STATE_ACTIVE; 576fa9e4066Sahrens spa->spa_ubsync = spa->spa_uberblock; 577fa9e4066Sahrens spa->spa_first_txg = spa_last_synced_txg(spa) + 1; 578ea8dc4b6Seschrock error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 579ea8dc4b6Seschrock if (error) { 580ea8dc4b6Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 581ea8dc4b6Seschrock VDEV_AUX_CORRUPT_DATA); 582ea8dc4b6Seschrock goto out; 583ea8dc4b6Seschrock } 584fa9e4066Sahrens spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 585fa9e4066Sahrens 586ea8dc4b6Seschrock if (zap_lookup(spa->spa_meta_objset, 587fa9e4066Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 588ea8dc4b6Seschrock sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { 589ea8dc4b6Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 590ea8dc4b6Seschrock VDEV_AUX_CORRUPT_DATA); 591ea8dc4b6Seschrock error = EIO; 592ea8dc4b6Seschrock goto out; 593ea8dc4b6Seschrock } 594fa9e4066Sahrens 595fa9e4066Sahrens if (!mosconfig) { 59699653d4eSeschrock nvlist_t *newconfig; 59795173954Sek uint64_t hostid; 598fa9e4066Sahrens 59999653d4eSeschrock if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { 600ea8dc4b6Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 601ea8dc4b6Seschrock VDEV_AUX_CORRUPT_DATA); 602ea8dc4b6Seschrock error = EIO; 603ea8dc4b6Seschrock goto out; 604ea8dc4b6Seschrock } 605fa9e4066Sahrens 60695173954Sek if (nvlist_lookup_uint64(newconfig, ZPOOL_CONFIG_HOSTID, 60795173954Sek &hostid) == 0) { 60895173954Sek char *hostname; 60995173954Sek unsigned long myhostid = 0; 61095173954Sek 61195173954Sek VERIFY(nvlist_lookup_string(newconfig, 61295173954Sek ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 61395173954Sek 61495173954Sek (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 61517194a52Slling if (hostid != 0 && myhostid != 0 && 61617194a52Slling (unsigned long)hostid != myhostid) { 61795173954Sek cmn_err(CE_WARN, "pool '%s' could not be " 61895173954Sek "loaded as it was last accessed by " 61995173954Sek "another system (host: %s hostid: 0x%lx). " 62095173954Sek "See: http://www.sun.com/msg/ZFS-8000-EY", 62195173954Sek spa->spa_name, hostname, 62295173954Sek (unsigned long)hostid); 62395173954Sek error = EBADF; 62495173954Sek goto out; 62595173954Sek } 62695173954Sek } 62795173954Sek 628fa9e4066Sahrens spa_config_set(spa, newconfig); 629fa9e4066Sahrens spa_unload(spa); 630fa9e4066Sahrens spa_deactivate(spa); 631fa9e4066Sahrens spa_activate(spa); 632fa9e4066Sahrens 633ea8dc4b6Seschrock return (spa_load(spa, newconfig, state, B_TRUE)); 634fa9e4066Sahrens } 635fa9e4066Sahrens 636ea8dc4b6Seschrock if (zap_lookup(spa->spa_meta_objset, 637fa9e4066Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 638ea8dc4b6Seschrock sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { 639ea8dc4b6Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 640ea8dc4b6Seschrock VDEV_AUX_CORRUPT_DATA); 641ea8dc4b6Seschrock error = EIO; 642ea8dc4b6Seschrock goto out; 643ea8dc4b6Seschrock } 644fa9e4066Sahrens 64599653d4eSeschrock /* 64699653d4eSeschrock * Load the bit that tells us to use the new accounting function 64799653d4eSeschrock * (raid-z deflation). If we have an older pool, this will not 64899653d4eSeschrock * be present. 64999653d4eSeschrock */ 65099653d4eSeschrock error = zap_lookup(spa->spa_meta_objset, 65199653d4eSeschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 65299653d4eSeschrock sizeof (uint64_t), 1, &spa->spa_deflate); 65399653d4eSeschrock if (error != 0 && error != ENOENT) { 65499653d4eSeschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 65599653d4eSeschrock VDEV_AUX_CORRUPT_DATA); 65699653d4eSeschrock error = EIO; 65799653d4eSeschrock goto out; 65899653d4eSeschrock } 65999653d4eSeschrock 660fa9e4066Sahrens /* 661ea8dc4b6Seschrock * Load the persistent error log. If we have an older pool, this will 662ea8dc4b6Seschrock * not be present. 663fa9e4066Sahrens */ 664ea8dc4b6Seschrock error = zap_lookup(spa->spa_meta_objset, 665ea8dc4b6Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, 666ea8dc4b6Seschrock sizeof (uint64_t), 1, &spa->spa_errlog_last); 667d80c45e0Sbonwick if (error != 0 && error != ENOENT) { 668ea8dc4b6Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 669ea8dc4b6Seschrock VDEV_AUX_CORRUPT_DATA); 670ea8dc4b6Seschrock error = EIO; 671ea8dc4b6Seschrock goto out; 672ea8dc4b6Seschrock } 673ea8dc4b6Seschrock 674ea8dc4b6Seschrock error = zap_lookup(spa->spa_meta_objset, 675ea8dc4b6Seschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, 676ea8dc4b6Seschrock sizeof (uint64_t), 1, &spa->spa_errlog_scrub); 677ea8dc4b6Seschrock if (error != 0 && error != ENOENT) { 678ea8dc4b6Seschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 679ea8dc4b6Seschrock VDEV_AUX_CORRUPT_DATA); 680ea8dc4b6Seschrock error = EIO; 681ea8dc4b6Seschrock goto out; 682ea8dc4b6Seschrock } 683ea8dc4b6Seschrock 68406eeb2adSek /* 68506eeb2adSek * Load the history object. If we have an older pool, this 68606eeb2adSek * will not be present. 68706eeb2adSek */ 68806eeb2adSek error = zap_lookup(spa->spa_meta_objset, 68906eeb2adSek DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY, 69006eeb2adSek sizeof (uint64_t), 1, &spa->spa_history); 69106eeb2adSek if (error != 0 && error != ENOENT) { 69206eeb2adSek vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 69306eeb2adSek VDEV_AUX_CORRUPT_DATA); 69406eeb2adSek error = EIO; 69506eeb2adSek goto out; 69606eeb2adSek } 69706eeb2adSek 69899653d4eSeschrock /* 69999653d4eSeschrock * Load any hot spares for this pool. 70099653d4eSeschrock */ 70199653d4eSeschrock error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 70299653d4eSeschrock DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares_object); 70399653d4eSeschrock if (error != 0 && error != ENOENT) { 70499653d4eSeschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 70599653d4eSeschrock VDEV_AUX_CORRUPT_DATA); 70699653d4eSeschrock error = EIO; 70799653d4eSeschrock goto out; 70899653d4eSeschrock } 70999653d4eSeschrock if (error == 0) { 71099653d4eSeschrock ASSERT(spa_version(spa) >= ZFS_VERSION_SPARES); 71199653d4eSeschrock if (load_nvlist(spa, spa->spa_spares_object, 71299653d4eSeschrock &spa->spa_sparelist) != 0) { 71399653d4eSeschrock vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 71499653d4eSeschrock VDEV_AUX_CORRUPT_DATA); 71599653d4eSeschrock error = EIO; 71699653d4eSeschrock goto out; 71799653d4eSeschrock } 71899653d4eSeschrock 71999653d4eSeschrock spa_config_enter(spa, RW_WRITER, FTAG); 72099653d4eSeschrock spa_load_spares(spa); 72199653d4eSeschrock spa_config_exit(spa, FTAG); 72299653d4eSeschrock } 72399653d4eSeschrock 724b1b8ab34Slling error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 725b1b8ab34Slling DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object); 726b1b8ab34Slling 727b1b8ab34Slling if (error && error != ENOENT) { 728b1b8ab34Slling vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, 729b1b8ab34Slling VDEV_AUX_CORRUPT_DATA); 730b1b8ab34Slling error = EIO; 731b1b8ab34Slling goto out; 732b1b8ab34Slling } 733b1b8ab34Slling 734b1b8ab34Slling if (error == 0) { 735b1b8ab34Slling (void) zap_lookup(spa->spa_meta_objset, 736b1b8ab34Slling spa->spa_pool_props_object, 7373d7072f8Seschrock zpool_prop_to_name(ZPOOL_PROP_BOOTFS), 738b1b8ab34Slling sizeof (uint64_t), 1, &spa->spa_bootfs); 7393d7072f8Seschrock (void) zap_lookup(spa->spa_meta_objset, 7403d7072f8Seschrock spa->spa_pool_props_object, 7413d7072f8Seschrock zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 7423d7072f8Seschrock sizeof (uint64_t), 1, &autoreplace); 743b1b8ab34Slling } 744b1b8ab34Slling 7453d7072f8Seschrock /* 7463d7072f8Seschrock * If the 'autoreplace' property is set, then post a resource notifying 7473d7072f8Seschrock * the ZFS DE that it should not issue any faults for unopenable 7483d7072f8Seschrock * devices. We also iterate over the vdevs, and post a sysevent for any 7493d7072f8Seschrock * unopenable vdevs so that the normal autoreplace handler can take 7503d7072f8Seschrock * over. 7513d7072f8Seschrock */ 7523d7072f8Seschrock if (autoreplace) 7533d7072f8Seschrock spa_check_removed(spa->spa_root_vdev); 7543d7072f8Seschrock 755ea8dc4b6Seschrock /* 756560e6e96Seschrock * Load the vdev state for all toplevel vdevs. 757ea8dc4b6Seschrock */ 758560e6e96Seschrock vdev_load(rvd); 7590373e76bSbonwick 760fa9e4066Sahrens /* 761fa9e4066Sahrens * Propagate the leaf DTLs we just loaded all the way up the tree. 762fa9e4066Sahrens */ 763ea8dc4b6Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 764fa9e4066Sahrens vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 765ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 766fa9e4066Sahrens 767fa9e4066Sahrens /* 768fa9e4066Sahrens * Check the state of the root vdev. If it can't be opened, it 769fa9e4066Sahrens * indicates one or more toplevel vdevs are faulted. 770fa9e4066Sahrens */ 771ea8dc4b6Seschrock if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { 772ea8dc4b6Seschrock error = ENXIO; 773ea8dc4b6Seschrock goto out; 774ea8dc4b6Seschrock } 775fa9e4066Sahrens 776ea8dc4b6Seschrock if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) { 7775dabedeeSbonwick dmu_tx_t *tx; 7780373e76bSbonwick int need_update = B_FALSE; 7790373e76bSbonwick int c; 7805dabedeeSbonwick 7810373e76bSbonwick /* 7820373e76bSbonwick * Claim log blocks that haven't been committed yet. 7830373e76bSbonwick * This must all happen in a single txg. 7840373e76bSbonwick */ 7855dabedeeSbonwick tx = dmu_tx_create_assigned(spa_get_dsl(spa), 786fa9e4066Sahrens spa_first_txg(spa)); 7870b69c2f0Sahrens (void) dmu_objset_find(spa->spa_name, 7880b69c2f0Sahrens zil_claim, tx, DS_FIND_CHILDREN); 789fa9e4066Sahrens dmu_tx_commit(tx); 790fa9e4066Sahrens 791fa9e4066Sahrens spa->spa_sync_on = B_TRUE; 792fa9e4066Sahrens txg_sync_start(spa->spa_dsl_pool); 793fa9e4066Sahrens 794fa9e4066Sahrens /* 795fa9e4066Sahrens * Wait for all claims to sync. 796fa9e4066Sahrens */ 797fa9e4066Sahrens txg_wait_synced(spa->spa_dsl_pool, 0); 7980e34b6a7Sbonwick 7990e34b6a7Sbonwick /* 8000373e76bSbonwick * If the config cache is stale, or we have uninitialized 8010373e76bSbonwick * metaslabs (see spa_vdev_add()), then update the config. 8020e34b6a7Sbonwick */ 8030373e76bSbonwick if (config_cache_txg != spa->spa_config_txg || 8040373e76bSbonwick state == SPA_LOAD_IMPORT) 8050373e76bSbonwick need_update = B_TRUE; 8060373e76bSbonwick 8070373e76bSbonwick for (c = 0; c < rvd->vdev_children; c++) 8080373e76bSbonwick if (rvd->vdev_child[c]->vdev_ms_array == 0) 8090373e76bSbonwick need_update = B_TRUE; 8100e34b6a7Sbonwick 8110e34b6a7Sbonwick /* 8120373e76bSbonwick * Update the config cache asychronously in case we're the 8130373e76bSbonwick * root pool, in which case the config cache isn't writable yet. 8140e34b6a7Sbonwick */ 8150373e76bSbonwick if (need_update) 8160373e76bSbonwick spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 817fa9e4066Sahrens } 818fa9e4066Sahrens 819ea8dc4b6Seschrock error = 0; 820ea8dc4b6Seschrock out: 82199653d4eSeschrock if (error && error != EBADF) 822ea8dc4b6Seschrock zfs_ereport_post(FM_EREPORT_ZFS_POOL, spa, NULL, NULL, 0, 0); 823ea8dc4b6Seschrock spa->spa_load_state = SPA_LOAD_NONE; 824ea8dc4b6Seschrock spa->spa_ena = 0; 825ea8dc4b6Seschrock 826ea8dc4b6Seschrock return (error); 827fa9e4066Sahrens } 828fa9e4066Sahrens 829fa9e4066Sahrens /* 830fa9e4066Sahrens * Pool Open/Import 831fa9e4066Sahrens * 832fa9e4066Sahrens * The import case is identical to an open except that the configuration is sent 833fa9e4066Sahrens * down from userland, instead of grabbed from the configuration cache. For the 834fa9e4066Sahrens * case of an open, the pool configuration will exist in the 8353d7072f8Seschrock * POOL_STATE_UNINITIALIZED state. 836fa9e4066Sahrens * 837fa9e4066Sahrens * The stats information (gen/count/ustats) is used to gather vdev statistics at 838fa9e4066Sahrens * the same time open the pool, without having to keep around the spa_t in some 839fa9e4066Sahrens * ambiguous state. 840fa9e4066Sahrens */ 841fa9e4066Sahrens static int 842fa9e4066Sahrens spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) 843fa9e4066Sahrens { 844fa9e4066Sahrens spa_t *spa; 845fa9e4066Sahrens int error; 846fa9e4066Sahrens int loaded = B_FALSE; 847fa9e4066Sahrens int locked = B_FALSE; 848fa9e4066Sahrens 849fa9e4066Sahrens *spapp = NULL; 850fa9e4066Sahrens 851fa9e4066Sahrens /* 852fa9e4066Sahrens * As disgusting as this is, we need to support recursive calls to this 853fa9e4066Sahrens * function because dsl_dir_open() is called during spa_load(), and ends 854fa9e4066Sahrens * up calling spa_open() again. The real fix is to figure out how to 855fa9e4066Sahrens * avoid dsl_dir_open() calling this in the first place. 856fa9e4066Sahrens */ 857fa9e4066Sahrens if (mutex_owner(&spa_namespace_lock) != curthread) { 858fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 859fa9e4066Sahrens locked = B_TRUE; 860fa9e4066Sahrens } 861fa9e4066Sahrens 862fa9e4066Sahrens if ((spa = spa_lookup(pool)) == NULL) { 863fa9e4066Sahrens if (locked) 864fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 865fa9e4066Sahrens return (ENOENT); 866fa9e4066Sahrens } 867fa9e4066Sahrens if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 868fa9e4066Sahrens 869fa9e4066Sahrens spa_activate(spa); 870fa9e4066Sahrens 8710373e76bSbonwick error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); 872fa9e4066Sahrens 873fa9e4066Sahrens if (error == EBADF) { 874fa9e4066Sahrens /* 875560e6e96Seschrock * If vdev_validate() returns failure (indicated by 876560e6e96Seschrock * EBADF), it indicates that one of the vdevs indicates 877560e6e96Seschrock * that the pool has been exported or destroyed. If 878560e6e96Seschrock * this is the case, the config cache is out of sync and 879560e6e96Seschrock * we should remove the pool from the namespace. 880fa9e4066Sahrens */ 88199653d4eSeschrock zfs_post_ok(spa, NULL); 882fa9e4066Sahrens spa_unload(spa); 883fa9e4066Sahrens spa_deactivate(spa); 884fa9e4066Sahrens spa_remove(spa); 885fa9e4066Sahrens spa_config_sync(); 886fa9e4066Sahrens if (locked) 887fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 888fa9e4066Sahrens return (ENOENT); 889ea8dc4b6Seschrock } 890ea8dc4b6Seschrock 891ea8dc4b6Seschrock if (error) { 892fa9e4066Sahrens /* 893fa9e4066Sahrens * We can't open the pool, but we still have useful 894fa9e4066Sahrens * information: the state of each vdev after the 895fa9e4066Sahrens * attempted vdev_open(). Return this to the user. 896fa9e4066Sahrens */ 8970373e76bSbonwick if (config != NULL && spa->spa_root_vdev != NULL) { 8980373e76bSbonwick spa_config_enter(spa, RW_READER, FTAG); 899fa9e4066Sahrens *config = spa_config_generate(spa, NULL, -1ULL, 900fa9e4066Sahrens B_TRUE); 9010373e76bSbonwick spa_config_exit(spa, FTAG); 9020373e76bSbonwick } 903fa9e4066Sahrens spa_unload(spa); 904fa9e4066Sahrens spa_deactivate(spa); 905ea8dc4b6Seschrock spa->spa_last_open_failed = B_TRUE; 906fa9e4066Sahrens if (locked) 907fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 908fa9e4066Sahrens *spapp = NULL; 909fa9e4066Sahrens return (error); 910ea8dc4b6Seschrock } else { 911ea8dc4b6Seschrock zfs_post_ok(spa, NULL); 912ea8dc4b6Seschrock spa->spa_last_open_failed = B_FALSE; 913fa9e4066Sahrens } 914fa9e4066Sahrens 915fa9e4066Sahrens loaded = B_TRUE; 916fa9e4066Sahrens } 917fa9e4066Sahrens 918fa9e4066Sahrens spa_open_ref(spa, tag); 9193d7072f8Seschrock 9203d7072f8Seschrock /* 9213d7072f8Seschrock * If we just loaded the pool, resilver anything that's out of date. 9223d7072f8Seschrock */ 9233d7072f8Seschrock if (loaded && (spa_mode & FWRITE)) 9243d7072f8Seschrock VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 9253d7072f8Seschrock 926fa9e4066Sahrens if (locked) 927fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 928fa9e4066Sahrens 929fa9e4066Sahrens *spapp = spa; 930fa9e4066Sahrens 931fa9e4066Sahrens if (config != NULL) { 932ea8dc4b6Seschrock spa_config_enter(spa, RW_READER, FTAG); 933fa9e4066Sahrens *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 934ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 935fa9e4066Sahrens } 936fa9e4066Sahrens 937fa9e4066Sahrens return (0); 938fa9e4066Sahrens } 939fa9e4066Sahrens 940fa9e4066Sahrens int 941fa9e4066Sahrens spa_open(const char *name, spa_t **spapp, void *tag) 942fa9e4066Sahrens { 943fa9e4066Sahrens return (spa_open_common(name, spapp, tag, NULL)); 944fa9e4066Sahrens } 945fa9e4066Sahrens 946ea8dc4b6Seschrock /* 947ea8dc4b6Seschrock * Lookup the given spa_t, incrementing the inject count in the process, 948ea8dc4b6Seschrock * preventing it from being exported or destroyed. 949ea8dc4b6Seschrock */ 950ea8dc4b6Seschrock spa_t * 951ea8dc4b6Seschrock spa_inject_addref(char *name) 952ea8dc4b6Seschrock { 953ea8dc4b6Seschrock spa_t *spa; 954ea8dc4b6Seschrock 955ea8dc4b6Seschrock mutex_enter(&spa_namespace_lock); 956ea8dc4b6Seschrock if ((spa = spa_lookup(name)) == NULL) { 957ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 958ea8dc4b6Seschrock return (NULL); 959ea8dc4b6Seschrock } 960ea8dc4b6Seschrock spa->spa_inject_ref++; 961ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 962ea8dc4b6Seschrock 963ea8dc4b6Seschrock return (spa); 964ea8dc4b6Seschrock } 965ea8dc4b6Seschrock 966ea8dc4b6Seschrock void 967ea8dc4b6Seschrock spa_inject_delref(spa_t *spa) 968ea8dc4b6Seschrock { 969ea8dc4b6Seschrock mutex_enter(&spa_namespace_lock); 970ea8dc4b6Seschrock spa->spa_inject_ref--; 971ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 972ea8dc4b6Seschrock } 973ea8dc4b6Seschrock 97499653d4eSeschrock static void 97599653d4eSeschrock spa_add_spares(spa_t *spa, nvlist_t *config) 97699653d4eSeschrock { 97799653d4eSeschrock nvlist_t **spares; 97899653d4eSeschrock uint_t i, nspares; 97999653d4eSeschrock nvlist_t *nvroot; 98099653d4eSeschrock uint64_t guid; 98199653d4eSeschrock vdev_stat_t *vs; 98299653d4eSeschrock uint_t vsc; 98339c23413Seschrock uint64_t pool; 98499653d4eSeschrock 98599653d4eSeschrock if (spa->spa_nspares == 0) 98699653d4eSeschrock return; 98799653d4eSeschrock 98899653d4eSeschrock VERIFY(nvlist_lookup_nvlist(config, 98999653d4eSeschrock ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 99099653d4eSeschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 99199653d4eSeschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 99299653d4eSeschrock if (nspares != 0) { 99399653d4eSeschrock VERIFY(nvlist_add_nvlist_array(nvroot, 99499653d4eSeschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 99599653d4eSeschrock VERIFY(nvlist_lookup_nvlist_array(nvroot, 99699653d4eSeschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 99799653d4eSeschrock 99899653d4eSeschrock /* 99999653d4eSeschrock * Go through and find any spares which have since been 100099653d4eSeschrock * repurposed as an active spare. If this is the case, update 100199653d4eSeschrock * their status appropriately. 100299653d4eSeschrock */ 100399653d4eSeschrock for (i = 0; i < nspares; i++) { 100499653d4eSeschrock VERIFY(nvlist_lookup_uint64(spares[i], 100599653d4eSeschrock ZPOOL_CONFIG_GUID, &guid) == 0); 100639c23413Seschrock if (spa_spare_exists(guid, &pool) && pool != 0ULL) { 100799653d4eSeschrock VERIFY(nvlist_lookup_uint64_array( 100899653d4eSeschrock spares[i], ZPOOL_CONFIG_STATS, 100999653d4eSeschrock (uint64_t **)&vs, &vsc) == 0); 101099653d4eSeschrock vs->vs_state = VDEV_STATE_CANT_OPEN; 101199653d4eSeschrock vs->vs_aux = VDEV_AUX_SPARED; 101299653d4eSeschrock } 101399653d4eSeschrock } 101499653d4eSeschrock } 101599653d4eSeschrock } 101699653d4eSeschrock 1017fa9e4066Sahrens int 1018ea8dc4b6Seschrock spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) 1019fa9e4066Sahrens { 1020fa9e4066Sahrens int error; 1021fa9e4066Sahrens spa_t *spa; 1022fa9e4066Sahrens 1023fa9e4066Sahrens *config = NULL; 1024fa9e4066Sahrens error = spa_open_common(name, &spa, FTAG, config); 1025fa9e4066Sahrens 102699653d4eSeschrock if (spa && *config != NULL) { 1027ea8dc4b6Seschrock VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, 1028ea8dc4b6Seschrock spa_get_errlog_size(spa)) == 0); 1029ea8dc4b6Seschrock 103099653d4eSeschrock spa_add_spares(spa, *config); 103199653d4eSeschrock } 103299653d4eSeschrock 1033ea8dc4b6Seschrock /* 1034ea8dc4b6Seschrock * We want to get the alternate root even for faulted pools, so we cheat 1035ea8dc4b6Seschrock * and call spa_lookup() directly. 1036ea8dc4b6Seschrock */ 1037ea8dc4b6Seschrock if (altroot) { 1038ea8dc4b6Seschrock if (spa == NULL) { 1039ea8dc4b6Seschrock mutex_enter(&spa_namespace_lock); 1040ea8dc4b6Seschrock spa = spa_lookup(name); 1041ea8dc4b6Seschrock if (spa) 1042ea8dc4b6Seschrock spa_altroot(spa, altroot, buflen); 1043ea8dc4b6Seschrock else 1044ea8dc4b6Seschrock altroot[0] = '\0'; 1045ea8dc4b6Seschrock spa = NULL; 1046ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 1047ea8dc4b6Seschrock } else { 1048ea8dc4b6Seschrock spa_altroot(spa, altroot, buflen); 1049ea8dc4b6Seschrock } 1050ea8dc4b6Seschrock } 1051ea8dc4b6Seschrock 1052fa9e4066Sahrens if (spa != NULL) 1053fa9e4066Sahrens spa_close(spa, FTAG); 1054fa9e4066Sahrens 1055fa9e4066Sahrens return (error); 1056fa9e4066Sahrens } 1057fa9e4066Sahrens 105899653d4eSeschrock /* 105999653d4eSeschrock * Validate that the 'spares' array is well formed. We must have an array of 106039c23413Seschrock * nvlists, each which describes a valid leaf vdev. If this is an import (mode 106139c23413Seschrock * is VDEV_ALLOC_SPARE), then we allow corrupted spares to be specified, as long 106239c23413Seschrock * as they are well-formed. 106399653d4eSeschrock */ 106499653d4eSeschrock static int 106599653d4eSeschrock spa_validate_spares(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 106699653d4eSeschrock { 106799653d4eSeschrock nvlist_t **spares; 106899653d4eSeschrock uint_t i, nspares; 106999653d4eSeschrock vdev_t *vd; 107099653d4eSeschrock int error; 107199653d4eSeschrock 107299653d4eSeschrock /* 107399653d4eSeschrock * It's acceptable to have no spares specified. 107499653d4eSeschrock */ 107599653d4eSeschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 107699653d4eSeschrock &spares, &nspares) != 0) 107799653d4eSeschrock return (0); 107899653d4eSeschrock 107999653d4eSeschrock if (nspares == 0) 108099653d4eSeschrock return (EINVAL); 108199653d4eSeschrock 108299653d4eSeschrock /* 108399653d4eSeschrock * Make sure the pool is formatted with a version that supports hot 108499653d4eSeschrock * spares. 108599653d4eSeschrock */ 108699653d4eSeschrock if (spa_version(spa) < ZFS_VERSION_SPARES) 108799653d4eSeschrock return (ENOTSUP); 108899653d4eSeschrock 108939c23413Seschrock /* 109039c23413Seschrock * Set the pending spare list so we correctly handle device in-use 109139c23413Seschrock * checking. 109239c23413Seschrock */ 109339c23413Seschrock spa->spa_pending_spares = spares; 109439c23413Seschrock spa->spa_pending_nspares = nspares; 109539c23413Seschrock 109699653d4eSeschrock for (i = 0; i < nspares; i++) { 109799653d4eSeschrock if ((error = spa_config_parse(spa, &vd, spares[i], NULL, 0, 109899653d4eSeschrock mode)) != 0) 109939c23413Seschrock goto out; 110099653d4eSeschrock 110199653d4eSeschrock if (!vd->vdev_ops->vdev_op_leaf) { 110299653d4eSeschrock vdev_free(vd); 110339c23413Seschrock error = EINVAL; 110439c23413Seschrock goto out; 110599653d4eSeschrock } 110699653d4eSeschrock 110799653d4eSeschrock vd->vdev_top = vd; 110899653d4eSeschrock 110939c23413Seschrock if ((error = vdev_open(vd)) == 0 && 111039c23413Seschrock (error = vdev_label_init(vd, crtxg, 111139c23413Seschrock VDEV_LABEL_SPARE)) == 0) { 111239c23413Seschrock VERIFY(nvlist_add_uint64(spares[i], ZPOOL_CONFIG_GUID, 111339c23413Seschrock vd->vdev_guid) == 0); 111439c23413Seschrock } 111599653d4eSeschrock 111699653d4eSeschrock vdev_free(vd); 111739c23413Seschrock 111839c23413Seschrock if (error && mode != VDEV_ALLOC_SPARE) 111939c23413Seschrock goto out; 112039c23413Seschrock else 112139c23413Seschrock error = 0; 112299653d4eSeschrock } 112399653d4eSeschrock 112439c23413Seschrock out: 112539c23413Seschrock spa->spa_pending_spares = NULL; 112639c23413Seschrock spa->spa_pending_nspares = 0; 112739c23413Seschrock return (error); 112899653d4eSeschrock } 112999653d4eSeschrock 1130fa9e4066Sahrens /* 1131fa9e4066Sahrens * Pool Creation 1132fa9e4066Sahrens */ 1133fa9e4066Sahrens int 11340373e76bSbonwick spa_create(const char *pool, nvlist_t *nvroot, const char *altroot) 1135fa9e4066Sahrens { 1136fa9e4066Sahrens spa_t *spa; 11370373e76bSbonwick vdev_t *rvd; 1138fa9e4066Sahrens dsl_pool_t *dp; 1139fa9e4066Sahrens dmu_tx_t *tx; 114099653d4eSeschrock int c, error = 0; 1141fa9e4066Sahrens uint64_t txg = TXG_INITIAL; 114299653d4eSeschrock nvlist_t **spares; 114399653d4eSeschrock uint_t nspares; 1144fa9e4066Sahrens 1145fa9e4066Sahrens /* 1146fa9e4066Sahrens * If this pool already exists, return failure. 1147fa9e4066Sahrens */ 1148fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 1149fa9e4066Sahrens if (spa_lookup(pool) != NULL) { 1150fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1151fa9e4066Sahrens return (EEXIST); 1152fa9e4066Sahrens } 1153fa9e4066Sahrens 1154fa9e4066Sahrens /* 1155fa9e4066Sahrens * Allocate a new spa_t structure. 1156fa9e4066Sahrens */ 11570373e76bSbonwick spa = spa_add(pool, altroot); 1158fa9e4066Sahrens spa_activate(spa); 1159fa9e4066Sahrens 1160fa9e4066Sahrens spa->spa_uberblock.ub_txg = txg - 1; 1161eaca9bbdSeschrock spa->spa_uberblock.ub_version = ZFS_VERSION; 1162fa9e4066Sahrens spa->spa_ubsync = spa->spa_uberblock; 1163fa9e4066Sahrens 11640373e76bSbonwick /* 11650373e76bSbonwick * Create the root vdev. 11660373e76bSbonwick */ 11670373e76bSbonwick spa_config_enter(spa, RW_WRITER, FTAG); 11680373e76bSbonwick 116999653d4eSeschrock error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 11700373e76bSbonwick 117199653d4eSeschrock ASSERT(error != 0 || rvd != NULL); 117299653d4eSeschrock ASSERT(error != 0 || spa->spa_root_vdev == rvd); 11730373e76bSbonwick 117499653d4eSeschrock if (error == 0 && rvd->vdev_children == 0) 11750373e76bSbonwick error = EINVAL; 117699653d4eSeschrock 117799653d4eSeschrock if (error == 0 && 117899653d4eSeschrock (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 117999653d4eSeschrock (error = spa_validate_spares(spa, nvroot, txg, 118099653d4eSeschrock VDEV_ALLOC_ADD)) == 0) { 118199653d4eSeschrock for (c = 0; c < rvd->vdev_children; c++) 118299653d4eSeschrock vdev_init(rvd->vdev_child[c], txg); 118399653d4eSeschrock vdev_config_dirty(rvd); 11840373e76bSbonwick } 11850373e76bSbonwick 11860373e76bSbonwick spa_config_exit(spa, FTAG); 1187fa9e4066Sahrens 118899653d4eSeschrock if (error != 0) { 1189fa9e4066Sahrens spa_unload(spa); 1190fa9e4066Sahrens spa_deactivate(spa); 1191fa9e4066Sahrens spa_remove(spa); 1192fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1193fa9e4066Sahrens return (error); 1194fa9e4066Sahrens } 1195fa9e4066Sahrens 119699653d4eSeschrock /* 119799653d4eSeschrock * Get the list of spares, if specified. 119899653d4eSeschrock */ 119999653d4eSeschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 120099653d4eSeschrock &spares, &nspares) == 0) { 120199653d4eSeschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, NV_UNIQUE_NAME, 120299653d4eSeschrock KM_SLEEP) == 0); 120399653d4eSeschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 120499653d4eSeschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 120599653d4eSeschrock spa_config_enter(spa, RW_WRITER, FTAG); 120699653d4eSeschrock spa_load_spares(spa); 120799653d4eSeschrock spa_config_exit(spa, FTAG); 120899653d4eSeschrock spa->spa_sync_spares = B_TRUE; 120999653d4eSeschrock } 121099653d4eSeschrock 1211fa9e4066Sahrens spa->spa_dsl_pool = dp = dsl_pool_create(spa, txg); 1212fa9e4066Sahrens spa->spa_meta_objset = dp->dp_meta_objset; 1213fa9e4066Sahrens 1214fa9e4066Sahrens tx = dmu_tx_create_assigned(dp, txg); 1215fa9e4066Sahrens 1216fa9e4066Sahrens /* 1217fa9e4066Sahrens * Create the pool config object. 1218fa9e4066Sahrens */ 1219fa9e4066Sahrens spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 1220fa9e4066Sahrens DMU_OT_PACKED_NVLIST, 1 << 14, 1221fa9e4066Sahrens DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 1222fa9e4066Sahrens 1223ea8dc4b6Seschrock if (zap_add(spa->spa_meta_objset, 1224fa9e4066Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 1225ea8dc4b6Seschrock sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 1226ea8dc4b6Seschrock cmn_err(CE_PANIC, "failed to add pool config"); 1227ea8dc4b6Seschrock } 1228fa9e4066Sahrens 122999653d4eSeschrock /* Newly created pools are always deflated. */ 123099653d4eSeschrock spa->spa_deflate = TRUE; 123199653d4eSeschrock if (zap_add(spa->spa_meta_objset, 123299653d4eSeschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 123399653d4eSeschrock sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 123499653d4eSeschrock cmn_err(CE_PANIC, "failed to add deflate"); 123599653d4eSeschrock } 123699653d4eSeschrock 1237fa9e4066Sahrens /* 1238fa9e4066Sahrens * Create the deferred-free bplist object. Turn off compression 1239fa9e4066Sahrens * because sync-to-convergence takes longer if the blocksize 1240fa9e4066Sahrens * keeps changing. 1241fa9e4066Sahrens */ 1242fa9e4066Sahrens spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, 1243fa9e4066Sahrens 1 << 14, tx); 1244fa9e4066Sahrens dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 1245fa9e4066Sahrens ZIO_COMPRESS_OFF, tx); 1246fa9e4066Sahrens 1247ea8dc4b6Seschrock if (zap_add(spa->spa_meta_objset, 1248fa9e4066Sahrens DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, 1249ea8dc4b6Seschrock sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { 1250ea8dc4b6Seschrock cmn_err(CE_PANIC, "failed to add bplist"); 1251ea8dc4b6Seschrock } 1252fa9e4066Sahrens 125306eeb2adSek /* 125406eeb2adSek * Create the pool's history object. 125506eeb2adSek */ 125606eeb2adSek spa_history_create_obj(spa, tx); 125706eeb2adSek 1258fa9e4066Sahrens dmu_tx_commit(tx); 1259fa9e4066Sahrens 12603d7072f8Seschrock spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 1261fa9e4066Sahrens spa->spa_sync_on = B_TRUE; 1262fa9e4066Sahrens txg_sync_start(spa->spa_dsl_pool); 1263fa9e4066Sahrens 1264fa9e4066Sahrens /* 1265fa9e4066Sahrens * We explicitly wait for the first transaction to complete so that our 1266fa9e4066Sahrens * bean counters are appropriately updated. 1267fa9e4066Sahrens */ 1268fa9e4066Sahrens txg_wait_synced(spa->spa_dsl_pool, txg); 1269fa9e4066Sahrens 1270fa9e4066Sahrens spa_config_sync(); 1271fa9e4066Sahrens 1272fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1273fa9e4066Sahrens 1274fa9e4066Sahrens return (0); 1275fa9e4066Sahrens } 1276fa9e4066Sahrens 1277fa9e4066Sahrens /* 1278fa9e4066Sahrens * Import the given pool into the system. We set up the necessary spa_t and 1279fa9e4066Sahrens * then call spa_load() to do the dirty work. 1280fa9e4066Sahrens */ 1281fa9e4066Sahrens int 12820373e76bSbonwick spa_import(const char *pool, nvlist_t *config, const char *altroot) 1283fa9e4066Sahrens { 1284fa9e4066Sahrens spa_t *spa; 1285fa9e4066Sahrens int error; 128699653d4eSeschrock nvlist_t *nvroot; 128799653d4eSeschrock nvlist_t **spares; 128899653d4eSeschrock uint_t nspares; 1289fa9e4066Sahrens 1290fa9e4066Sahrens if (!(spa_mode & FWRITE)) 1291fa9e4066Sahrens return (EROFS); 1292fa9e4066Sahrens 1293fa9e4066Sahrens /* 1294fa9e4066Sahrens * If a pool with this name exists, return failure. 1295fa9e4066Sahrens */ 1296fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 1297fa9e4066Sahrens if (spa_lookup(pool) != NULL) { 1298fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1299fa9e4066Sahrens return (EEXIST); 1300fa9e4066Sahrens } 1301fa9e4066Sahrens 1302fa9e4066Sahrens /* 13030373e76bSbonwick * Create and initialize the spa structure. 1304fa9e4066Sahrens */ 13050373e76bSbonwick spa = spa_add(pool, altroot); 1306fa9e4066Sahrens spa_activate(spa); 1307fa9e4066Sahrens 13085dabedeeSbonwick /* 13090373e76bSbonwick * Pass off the heavy lifting to spa_load(). 1310ecc2d604Sbonwick * Pass TRUE for mosconfig because the user-supplied config 1311ecc2d604Sbonwick * is actually the one to trust when doing an import. 13125dabedeeSbonwick */ 1313ecc2d604Sbonwick error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE); 1314fa9e4066Sahrens 131599653d4eSeschrock spa_config_enter(spa, RW_WRITER, FTAG); 131699653d4eSeschrock /* 131799653d4eSeschrock * Toss any existing sparelist, as it doesn't have any validity anymore, 131899653d4eSeschrock * and conflicts with spa_has_spare(). 131999653d4eSeschrock */ 132099653d4eSeschrock if (spa->spa_sparelist) { 132199653d4eSeschrock nvlist_free(spa->spa_sparelist); 132299653d4eSeschrock spa->spa_sparelist = NULL; 132399653d4eSeschrock spa_load_spares(spa); 132499653d4eSeschrock } 132599653d4eSeschrock 132699653d4eSeschrock VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 132799653d4eSeschrock &nvroot) == 0); 132899653d4eSeschrock if (error == 0) 132999653d4eSeschrock error = spa_validate_spares(spa, nvroot, -1ULL, 133099653d4eSeschrock VDEV_ALLOC_SPARE); 133199653d4eSeschrock spa_config_exit(spa, FTAG); 133299653d4eSeschrock 133399653d4eSeschrock if (error != 0) { 1334fa9e4066Sahrens spa_unload(spa); 1335fa9e4066Sahrens spa_deactivate(spa); 1336fa9e4066Sahrens spa_remove(spa); 1337fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1338fa9e4066Sahrens return (error); 1339fa9e4066Sahrens } 1340fa9e4066Sahrens 134199653d4eSeschrock /* 134299653d4eSeschrock * Override any spares as specified by the user, as these may have 134399653d4eSeschrock * correct device names/devids, etc. 134499653d4eSeschrock */ 134599653d4eSeschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 134699653d4eSeschrock &spares, &nspares) == 0) { 134799653d4eSeschrock if (spa->spa_sparelist) 134899653d4eSeschrock VERIFY(nvlist_remove(spa->spa_sparelist, 134999653d4eSeschrock ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 135099653d4eSeschrock else 135199653d4eSeschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, 135299653d4eSeschrock NV_UNIQUE_NAME, KM_SLEEP) == 0); 135399653d4eSeschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 135499653d4eSeschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 135599653d4eSeschrock spa_config_enter(spa, RW_WRITER, FTAG); 135699653d4eSeschrock spa_load_spares(spa); 135799653d4eSeschrock spa_config_exit(spa, FTAG); 135899653d4eSeschrock spa->spa_sync_spares = B_TRUE; 135999653d4eSeschrock } 136099653d4eSeschrock 13610373e76bSbonwick /* 13620373e76bSbonwick * Update the config cache to include the newly-imported pool. 13630373e76bSbonwick */ 13640373e76bSbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 13650373e76bSbonwick 1366fa9e4066Sahrens /* 1367fa9e4066Sahrens * Resilver anything that's out of date. 1368fa9e4066Sahrens */ 1369fa9e4066Sahrens if (spa_mode & FWRITE) 1370fa9e4066Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1371fa9e4066Sahrens 13723d7072f8Seschrock mutex_exit(&spa_namespace_lock); 13733d7072f8Seschrock 1374fa9e4066Sahrens return (0); 1375fa9e4066Sahrens } 1376fa9e4066Sahrens 1377fa9e4066Sahrens /* 1378fa9e4066Sahrens * This (illegal) pool name is used when temporarily importing a spa_t in order 1379fa9e4066Sahrens * to get the vdev stats associated with the imported devices. 1380fa9e4066Sahrens */ 1381fa9e4066Sahrens #define TRYIMPORT_NAME "$import" 1382fa9e4066Sahrens 1383fa9e4066Sahrens nvlist_t * 1384fa9e4066Sahrens spa_tryimport(nvlist_t *tryconfig) 1385fa9e4066Sahrens { 1386fa9e4066Sahrens nvlist_t *config = NULL; 1387fa9e4066Sahrens char *poolname; 1388fa9e4066Sahrens spa_t *spa; 1389fa9e4066Sahrens uint64_t state; 1390fa9e4066Sahrens 1391fa9e4066Sahrens if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 1392fa9e4066Sahrens return (NULL); 1393fa9e4066Sahrens 1394fa9e4066Sahrens if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 1395fa9e4066Sahrens return (NULL); 1396fa9e4066Sahrens 1397fa9e4066Sahrens /* 13980373e76bSbonwick * Create and initialize the spa structure. 1399fa9e4066Sahrens */ 14000373e76bSbonwick mutex_enter(&spa_namespace_lock); 14010373e76bSbonwick spa = spa_add(TRYIMPORT_NAME, NULL); 1402fa9e4066Sahrens spa_activate(spa); 1403fa9e4066Sahrens 1404fa9e4066Sahrens /* 14050373e76bSbonwick * Pass off the heavy lifting to spa_load(). 1406ecc2d604Sbonwick * Pass TRUE for mosconfig because the user-supplied config 1407ecc2d604Sbonwick * is actually the one to trust when doing an import. 1408fa9e4066Sahrens */ 1409ecc2d604Sbonwick (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); 1410fa9e4066Sahrens 1411fa9e4066Sahrens /* 1412fa9e4066Sahrens * If 'tryconfig' was at least parsable, return the current config. 1413fa9e4066Sahrens */ 1414fa9e4066Sahrens if (spa->spa_root_vdev != NULL) { 14150373e76bSbonwick spa_config_enter(spa, RW_READER, FTAG); 1416fa9e4066Sahrens config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 14170373e76bSbonwick spa_config_exit(spa, FTAG); 1418fa9e4066Sahrens VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 1419fa9e4066Sahrens poolname) == 0); 1420fa9e4066Sahrens VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 1421fa9e4066Sahrens state) == 0); 142295173954Sek VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 142395173954Sek spa->spa_uberblock.ub_timestamp) == 0); 142499653d4eSeschrock 142599653d4eSeschrock /* 142699653d4eSeschrock * Add the list of hot spares. 142799653d4eSeschrock */ 142899653d4eSeschrock spa_add_spares(spa, config); 1429fa9e4066Sahrens } 1430fa9e4066Sahrens 1431fa9e4066Sahrens spa_unload(spa); 1432fa9e4066Sahrens spa_deactivate(spa); 1433fa9e4066Sahrens spa_remove(spa); 1434fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1435fa9e4066Sahrens 1436fa9e4066Sahrens return (config); 1437fa9e4066Sahrens } 1438fa9e4066Sahrens 1439fa9e4066Sahrens /* 1440fa9e4066Sahrens * Pool export/destroy 1441fa9e4066Sahrens * 1442fa9e4066Sahrens * The act of destroying or exporting a pool is very simple. We make sure there 1443fa9e4066Sahrens * is no more pending I/O and any references to the pool are gone. Then, we 1444fa9e4066Sahrens * update the pool state and sync all the labels to disk, removing the 1445fa9e4066Sahrens * configuration from the cache afterwards. 1446fa9e4066Sahrens */ 1447fa9e4066Sahrens static int 144844cd46caSbillm spa_export_common(char *pool, int new_state, nvlist_t **oldconfig) 1449fa9e4066Sahrens { 1450fa9e4066Sahrens spa_t *spa; 1451fa9e4066Sahrens 145244cd46caSbillm if (oldconfig) 145344cd46caSbillm *oldconfig = NULL; 145444cd46caSbillm 1455fa9e4066Sahrens if (!(spa_mode & FWRITE)) 1456fa9e4066Sahrens return (EROFS); 1457fa9e4066Sahrens 1458fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 1459fa9e4066Sahrens if ((spa = spa_lookup(pool)) == NULL) { 1460fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1461fa9e4066Sahrens return (ENOENT); 1462fa9e4066Sahrens } 1463fa9e4066Sahrens 1464ea8dc4b6Seschrock /* 1465ea8dc4b6Seschrock * Put a hold on the pool, drop the namespace lock, stop async tasks, 1466ea8dc4b6Seschrock * reacquire the namespace lock, and see if we can export. 1467ea8dc4b6Seschrock */ 1468ea8dc4b6Seschrock spa_open_ref(spa, FTAG); 1469ea8dc4b6Seschrock mutex_exit(&spa_namespace_lock); 1470ea8dc4b6Seschrock spa_async_suspend(spa); 1471ea8dc4b6Seschrock mutex_enter(&spa_namespace_lock); 1472ea8dc4b6Seschrock spa_close(spa, FTAG); 1473ea8dc4b6Seschrock 1474fa9e4066Sahrens /* 1475fa9e4066Sahrens * The pool will be in core if it's openable, 1476fa9e4066Sahrens * in which case we can modify its state. 1477fa9e4066Sahrens */ 1478fa9e4066Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 1479fa9e4066Sahrens /* 1480fa9e4066Sahrens * Objsets may be open only because they're dirty, so we 1481fa9e4066Sahrens * have to force it to sync before checking spa_refcnt. 1482fa9e4066Sahrens */ 1483fa9e4066Sahrens spa_scrub_suspend(spa); 1484fa9e4066Sahrens txg_wait_synced(spa->spa_dsl_pool, 0); 1485fa9e4066Sahrens 1486ea8dc4b6Seschrock /* 1487ea8dc4b6Seschrock * A pool cannot be exported or destroyed if there are active 1488ea8dc4b6Seschrock * references. If we are resetting a pool, allow references by 1489ea8dc4b6Seschrock * fault injection handlers. 1490ea8dc4b6Seschrock */ 1491ea8dc4b6Seschrock if (!spa_refcount_zero(spa) || 1492ea8dc4b6Seschrock (spa->spa_inject_ref != 0 && 1493ea8dc4b6Seschrock new_state != POOL_STATE_UNINITIALIZED)) { 1494fa9e4066Sahrens spa_scrub_resume(spa); 1495ea8dc4b6Seschrock spa_async_resume(spa); 1496fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1497fa9e4066Sahrens return (EBUSY); 1498fa9e4066Sahrens } 1499fa9e4066Sahrens 1500fa9e4066Sahrens spa_scrub_resume(spa); 1501fa9e4066Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 1502fa9e4066Sahrens 1503fa9e4066Sahrens /* 1504fa9e4066Sahrens * We want this to be reflected on every label, 1505fa9e4066Sahrens * so mark them all dirty. spa_unload() will do the 1506fa9e4066Sahrens * final sync that pushes these changes out. 1507fa9e4066Sahrens */ 1508ea8dc4b6Seschrock if (new_state != POOL_STATE_UNINITIALIZED) { 15095dabedeeSbonwick spa_config_enter(spa, RW_WRITER, FTAG); 1510ea8dc4b6Seschrock spa->spa_state = new_state; 15110373e76bSbonwick spa->spa_final_txg = spa_last_synced_txg(spa) + 1; 1512ea8dc4b6Seschrock vdev_config_dirty(spa->spa_root_vdev); 15135dabedeeSbonwick spa_config_exit(spa, FTAG); 1514ea8dc4b6Seschrock } 1515fa9e4066Sahrens } 1516fa9e4066Sahrens 15173d7072f8Seschrock spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 15183d7072f8Seschrock 1519fa9e4066Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 1520fa9e4066Sahrens spa_unload(spa); 1521fa9e4066Sahrens spa_deactivate(spa); 1522fa9e4066Sahrens } 1523fa9e4066Sahrens 152444cd46caSbillm if (oldconfig && spa->spa_config) 152544cd46caSbillm VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 152644cd46caSbillm 1527ea8dc4b6Seschrock if (new_state != POOL_STATE_UNINITIALIZED) { 1528ea8dc4b6Seschrock spa_remove(spa); 1529ea8dc4b6Seschrock spa_config_sync(); 1530ea8dc4b6Seschrock } 1531fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 1532fa9e4066Sahrens 1533fa9e4066Sahrens return (0); 1534fa9e4066Sahrens } 1535fa9e4066Sahrens 1536fa9e4066Sahrens /* 1537fa9e4066Sahrens * Destroy a storage pool. 1538fa9e4066Sahrens */ 1539fa9e4066Sahrens int 1540fa9e4066Sahrens spa_destroy(char *pool) 1541fa9e4066Sahrens { 154244cd46caSbillm return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL)); 1543fa9e4066Sahrens } 1544fa9e4066Sahrens 1545fa9e4066Sahrens /* 1546fa9e4066Sahrens * Export a storage pool. 1547fa9e4066Sahrens */ 1548fa9e4066Sahrens int 154944cd46caSbillm spa_export(char *pool, nvlist_t **oldconfig) 1550fa9e4066Sahrens { 155144cd46caSbillm return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig)); 1552fa9e4066Sahrens } 1553fa9e4066Sahrens 1554ea8dc4b6Seschrock /* 1555ea8dc4b6Seschrock * Similar to spa_export(), this unloads the spa_t without actually removing it 1556ea8dc4b6Seschrock * from the namespace in any way. 1557ea8dc4b6Seschrock */ 1558ea8dc4b6Seschrock int 1559ea8dc4b6Seschrock spa_reset(char *pool) 1560ea8dc4b6Seschrock { 156144cd46caSbillm return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL)); 1562ea8dc4b6Seschrock } 1563ea8dc4b6Seschrock 1564ea8dc4b6Seschrock 1565fa9e4066Sahrens /* 1566fa9e4066Sahrens * ========================================================================== 1567fa9e4066Sahrens * Device manipulation 1568fa9e4066Sahrens * ========================================================================== 1569fa9e4066Sahrens */ 1570fa9e4066Sahrens 1571fa9e4066Sahrens /* 1572*8654d025Sperrin * Add a device to a storage pool. 1573fa9e4066Sahrens */ 1574fa9e4066Sahrens int 1575fa9e4066Sahrens spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 1576fa9e4066Sahrens { 1577fa9e4066Sahrens uint64_t txg; 15780373e76bSbonwick int c, error; 1579fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 15800e34b6a7Sbonwick vdev_t *vd, *tvd; 158199653d4eSeschrock nvlist_t **spares; 158299653d4eSeschrock uint_t i, nspares; 1583fa9e4066Sahrens 1584fa9e4066Sahrens txg = spa_vdev_enter(spa); 1585fa9e4066Sahrens 158699653d4eSeschrock if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 158799653d4eSeschrock VDEV_ALLOC_ADD)) != 0) 158899653d4eSeschrock return (spa_vdev_exit(spa, NULL, txg, error)); 1589fa9e4066Sahrens 159039c23413Seschrock spa->spa_pending_vdev = vd; 159199653d4eSeschrock 159299653d4eSeschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 159399653d4eSeschrock &spares, &nspares) != 0) 159499653d4eSeschrock nspares = 0; 159599653d4eSeschrock 159639c23413Seschrock if (vd->vdev_children == 0 && nspares == 0) { 159739c23413Seschrock spa->spa_pending_vdev = NULL; 1598fa9e4066Sahrens return (spa_vdev_exit(spa, vd, txg, EINVAL)); 159939c23413Seschrock } 1600fa9e4066Sahrens 160199653d4eSeschrock if (vd->vdev_children != 0) { 160239c23413Seschrock if ((error = vdev_create(vd, txg, B_FALSE)) != 0) { 160339c23413Seschrock spa->spa_pending_vdev = NULL; 160499653d4eSeschrock return (spa_vdev_exit(spa, vd, txg, error)); 160599653d4eSeschrock } 160699653d4eSeschrock } 160799653d4eSeschrock 160839c23413Seschrock /* 160939c23413Seschrock * We must validate the spares after checking the children. Otherwise, 161039c23413Seschrock * vdev_inuse() will blindly overwrite the spare. 161139c23413Seschrock */ 161239c23413Seschrock if ((error = spa_validate_spares(spa, nvroot, txg, 161339c23413Seschrock VDEV_ALLOC_ADD)) != 0) { 161439c23413Seschrock spa->spa_pending_vdev = NULL; 161539c23413Seschrock return (spa_vdev_exit(spa, vd, txg, error)); 161639c23413Seschrock } 161739c23413Seschrock 161839c23413Seschrock spa->spa_pending_vdev = NULL; 161939c23413Seschrock 162039c23413Seschrock /* 162139c23413Seschrock * Transfer each new top-level vdev from vd to rvd. 162239c23413Seschrock */ 162339c23413Seschrock for (c = 0; c < vd->vdev_children; c++) { 162439c23413Seschrock tvd = vd->vdev_child[c]; 162539c23413Seschrock vdev_remove_child(vd, tvd); 162639c23413Seschrock tvd->vdev_id = rvd->vdev_children; 162739c23413Seschrock vdev_add_child(rvd, tvd); 162839c23413Seschrock vdev_config_dirty(tvd); 162939c23413Seschrock } 163039c23413Seschrock 163199653d4eSeschrock if (nspares != 0) { 163299653d4eSeschrock if (spa->spa_sparelist != NULL) { 163399653d4eSeschrock nvlist_t **oldspares; 163499653d4eSeschrock uint_t oldnspares; 163599653d4eSeschrock nvlist_t **newspares; 163699653d4eSeschrock 163799653d4eSeschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 163899653d4eSeschrock ZPOOL_CONFIG_SPARES, &oldspares, &oldnspares) == 0); 163999653d4eSeschrock 164099653d4eSeschrock newspares = kmem_alloc(sizeof (void *) * 164199653d4eSeschrock (nspares + oldnspares), KM_SLEEP); 164299653d4eSeschrock for (i = 0; i < oldnspares; i++) 164399653d4eSeschrock VERIFY(nvlist_dup(oldspares[i], 164499653d4eSeschrock &newspares[i], KM_SLEEP) == 0); 164599653d4eSeschrock for (i = 0; i < nspares; i++) 164699653d4eSeschrock VERIFY(nvlist_dup(spares[i], 164799653d4eSeschrock &newspares[i + oldnspares], 164899653d4eSeschrock KM_SLEEP) == 0); 164999653d4eSeschrock 165099653d4eSeschrock VERIFY(nvlist_remove(spa->spa_sparelist, 165199653d4eSeschrock ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 165299653d4eSeschrock 165399653d4eSeschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 165499653d4eSeschrock ZPOOL_CONFIG_SPARES, newspares, 165599653d4eSeschrock nspares + oldnspares) == 0); 165699653d4eSeschrock for (i = 0; i < oldnspares + nspares; i++) 165799653d4eSeschrock nvlist_free(newspares[i]); 165899653d4eSeschrock kmem_free(newspares, (oldnspares + nspares) * 165999653d4eSeschrock sizeof (void *)); 166099653d4eSeschrock } else { 166199653d4eSeschrock VERIFY(nvlist_alloc(&spa->spa_sparelist, 166299653d4eSeschrock NV_UNIQUE_NAME, KM_SLEEP) == 0); 166399653d4eSeschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, 166499653d4eSeschrock ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 166599653d4eSeschrock } 166699653d4eSeschrock 166799653d4eSeschrock spa_load_spares(spa); 166899653d4eSeschrock spa->spa_sync_spares = B_TRUE; 1669fa9e4066Sahrens } 1670fa9e4066Sahrens 1671fa9e4066Sahrens /* 16720e34b6a7Sbonwick * We have to be careful when adding new vdevs to an existing pool. 16730e34b6a7Sbonwick * If other threads start allocating from these vdevs before we 16740e34b6a7Sbonwick * sync the config cache, and we lose power, then upon reboot we may 16750e34b6a7Sbonwick * fail to open the pool because there are DVAs that the config cache 16760e34b6a7Sbonwick * can't translate. Therefore, we first add the vdevs without 16770e34b6a7Sbonwick * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 16780373e76bSbonwick * and then let spa_config_update() initialize the new metaslabs. 16790e34b6a7Sbonwick * 16800e34b6a7Sbonwick * spa_load() checks for added-but-not-initialized vdevs, so that 16810e34b6a7Sbonwick * if we lose power at any point in this sequence, the remaining 16820e34b6a7Sbonwick * steps will be completed the next time we load the pool. 16830e34b6a7Sbonwick */ 16840373e76bSbonwick (void) spa_vdev_exit(spa, vd, txg, 0); 16850e34b6a7Sbonwick 16860373e76bSbonwick mutex_enter(&spa_namespace_lock); 16870373e76bSbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 16880373e76bSbonwick mutex_exit(&spa_namespace_lock); 1689fa9e4066Sahrens 16900373e76bSbonwick return (0); 1691fa9e4066Sahrens } 1692fa9e4066Sahrens 1693fa9e4066Sahrens /* 1694fa9e4066Sahrens * Attach a device to a mirror. The arguments are the path to any device 1695fa9e4066Sahrens * in the mirror, and the nvroot for the new device. If the path specifies 1696fa9e4066Sahrens * a device that is not mirrored, we automatically insert the mirror vdev. 1697fa9e4066Sahrens * 1698fa9e4066Sahrens * If 'replacing' is specified, the new device is intended to replace the 1699fa9e4066Sahrens * existing device; in this case the two devices are made into their own 17003d7072f8Seschrock * mirror using the 'replacing' vdev, which is functionally identical to 1701fa9e4066Sahrens * the mirror vdev (it actually reuses all the same ops) but has a few 1702fa9e4066Sahrens * extra rules: you can't attach to it after it's been created, and upon 1703fa9e4066Sahrens * completion of resilvering, the first disk (the one being replaced) 1704fa9e4066Sahrens * is automatically detached. 1705fa9e4066Sahrens */ 1706fa9e4066Sahrens int 1707ea8dc4b6Seschrock spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 1708fa9e4066Sahrens { 1709fa9e4066Sahrens uint64_t txg, open_txg; 1710fa9e4066Sahrens int error; 1711fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 1712fa9e4066Sahrens vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 171399653d4eSeschrock vdev_ops_t *pvops; 1714*8654d025Sperrin int is_log; 1715fa9e4066Sahrens 1716fa9e4066Sahrens txg = spa_vdev_enter(spa); 1717fa9e4066Sahrens 1718ea8dc4b6Seschrock oldvd = vdev_lookup_by_guid(rvd, guid); 1719fa9e4066Sahrens 1720fa9e4066Sahrens if (oldvd == NULL) 1721fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1722fa9e4066Sahrens 17230e34b6a7Sbonwick if (!oldvd->vdev_ops->vdev_op_leaf) 17240e34b6a7Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 17250e34b6a7Sbonwick 1726fa9e4066Sahrens pvd = oldvd->vdev_parent; 1727fa9e4066Sahrens 172899653d4eSeschrock if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 17293d7072f8Seschrock VDEV_ALLOC_ADD)) != 0) 17303d7072f8Seschrock return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 17313d7072f8Seschrock 17323d7072f8Seschrock if (newrootvd->vdev_children != 1) 1733fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 1734fa9e4066Sahrens 1735fa9e4066Sahrens newvd = newrootvd->vdev_child[0]; 1736fa9e4066Sahrens 1737fa9e4066Sahrens if (!newvd->vdev_ops->vdev_op_leaf) 1738fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 1739fa9e4066Sahrens 174099653d4eSeschrock if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 1741fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, error)); 1742fa9e4066Sahrens 1743*8654d025Sperrin /* 1744*8654d025Sperrin * Spares can't replace logs 1745*8654d025Sperrin */ 1746*8654d025Sperrin is_log = oldvd->vdev_islog; 1747*8654d025Sperrin if (is_log && newvd->vdev_isspare) 1748*8654d025Sperrin return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 1749*8654d025Sperrin 175099653d4eSeschrock if (!replacing) { 175199653d4eSeschrock /* 175299653d4eSeschrock * For attach, the only allowable parent is a mirror or the root 175399653d4eSeschrock * vdev. 175499653d4eSeschrock */ 175599653d4eSeschrock if (pvd->vdev_ops != &vdev_mirror_ops && 175699653d4eSeschrock pvd->vdev_ops != &vdev_root_ops) 175799653d4eSeschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 175899653d4eSeschrock 175999653d4eSeschrock pvops = &vdev_mirror_ops; 176099653d4eSeschrock } else { 176199653d4eSeschrock /* 176299653d4eSeschrock * Active hot spares can only be replaced by inactive hot 176399653d4eSeschrock * spares. 176499653d4eSeschrock */ 176599653d4eSeschrock if (pvd->vdev_ops == &vdev_spare_ops && 176699653d4eSeschrock pvd->vdev_child[1] == oldvd && 176799653d4eSeschrock !spa_has_spare(spa, newvd->vdev_guid)) 176899653d4eSeschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 176999653d4eSeschrock 177099653d4eSeschrock /* 177199653d4eSeschrock * If the source is a hot spare, and the parent isn't already a 177299653d4eSeschrock * spare, then we want to create a new hot spare. Otherwise, we 177339c23413Seschrock * want to create a replacing vdev. The user is not allowed to 177439c23413Seschrock * attach to a spared vdev child unless the 'isspare' state is 177539c23413Seschrock * the same (spare replaces spare, non-spare replaces 177639c23413Seschrock * non-spare). 177799653d4eSeschrock */ 177899653d4eSeschrock if (pvd->vdev_ops == &vdev_replacing_ops) 177999653d4eSeschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 178039c23413Seschrock else if (pvd->vdev_ops == &vdev_spare_ops && 178139c23413Seschrock newvd->vdev_isspare != oldvd->vdev_isspare) 178239c23413Seschrock return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 178399653d4eSeschrock else if (pvd->vdev_ops != &vdev_spare_ops && 178499653d4eSeschrock newvd->vdev_isspare) 178599653d4eSeschrock pvops = &vdev_spare_ops; 178699653d4eSeschrock else 178799653d4eSeschrock pvops = &vdev_replacing_ops; 178899653d4eSeschrock } 178999653d4eSeschrock 17902a79c5feSlling /* 17912a79c5feSlling * Compare the new device size with the replaceable/attachable 17922a79c5feSlling * device size. 17932a79c5feSlling */ 17942a79c5feSlling if (newvd->vdev_psize < vdev_get_rsize(oldvd)) 1795fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 1796fa9e4066Sahrens 1797ecc2d604Sbonwick /* 1798ecc2d604Sbonwick * The new device cannot have a higher alignment requirement 1799ecc2d604Sbonwick * than the top-level vdev. 1800ecc2d604Sbonwick */ 1801ecc2d604Sbonwick if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 1802fa9e4066Sahrens return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 1803fa9e4066Sahrens 1804fa9e4066Sahrens /* 1805fa9e4066Sahrens * If this is an in-place replacement, update oldvd's path and devid 1806fa9e4066Sahrens * to make it distinguishable from newvd, and unopenable from now on. 1807fa9e4066Sahrens */ 1808fa9e4066Sahrens if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 1809fa9e4066Sahrens spa_strfree(oldvd->vdev_path); 1810fa9e4066Sahrens oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 1811fa9e4066Sahrens KM_SLEEP); 1812fa9e4066Sahrens (void) sprintf(oldvd->vdev_path, "%s/%s", 1813fa9e4066Sahrens newvd->vdev_path, "old"); 1814fa9e4066Sahrens if (oldvd->vdev_devid != NULL) { 1815fa9e4066Sahrens spa_strfree(oldvd->vdev_devid); 1816fa9e4066Sahrens oldvd->vdev_devid = NULL; 1817fa9e4066Sahrens } 1818fa9e4066Sahrens } 1819fa9e4066Sahrens 1820fa9e4066Sahrens /* 182199653d4eSeschrock * If the parent is not a mirror, or if we're replacing, insert the new 182299653d4eSeschrock * mirror/replacing/spare vdev above oldvd. 1823fa9e4066Sahrens */ 1824fa9e4066Sahrens if (pvd->vdev_ops != pvops) 1825fa9e4066Sahrens pvd = vdev_add_parent(oldvd, pvops); 1826fa9e4066Sahrens 1827fa9e4066Sahrens ASSERT(pvd->vdev_top->vdev_parent == rvd); 1828fa9e4066Sahrens ASSERT(pvd->vdev_ops == pvops); 1829fa9e4066Sahrens ASSERT(oldvd->vdev_parent == pvd); 1830fa9e4066Sahrens 1831fa9e4066Sahrens /* 1832fa9e4066Sahrens * Extract the new device from its root and add it to pvd. 1833fa9e4066Sahrens */ 1834fa9e4066Sahrens vdev_remove_child(newrootvd, newvd); 1835fa9e4066Sahrens newvd->vdev_id = pvd->vdev_children; 1836fa9e4066Sahrens vdev_add_child(pvd, newvd); 1837fa9e4066Sahrens 1838ea8dc4b6Seschrock /* 1839ea8dc4b6Seschrock * If newvd is smaller than oldvd, but larger than its rsize, 1840ea8dc4b6Seschrock * the addition of newvd may have decreased our parent's asize. 1841ea8dc4b6Seschrock */ 1842ea8dc4b6Seschrock pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); 1843ea8dc4b6Seschrock 1844fa9e4066Sahrens tvd = newvd->vdev_top; 1845fa9e4066Sahrens ASSERT(pvd->vdev_top == tvd); 1846fa9e4066Sahrens ASSERT(tvd->vdev_parent == rvd); 1847fa9e4066Sahrens 1848fa9e4066Sahrens vdev_config_dirty(tvd); 1849fa9e4066Sahrens 1850fa9e4066Sahrens /* 1851fa9e4066Sahrens * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate 1852fa9e4066Sahrens * upward when spa_vdev_exit() calls vdev_dtl_reassess(). 1853fa9e4066Sahrens */ 1854fa9e4066Sahrens open_txg = txg + TXG_CONCURRENT_STATES - 1; 1855fa9e4066Sahrens 1856fa9e4066Sahrens mutex_enter(&newvd->vdev_dtl_lock); 1857fa9e4066Sahrens space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL, 1858fa9e4066Sahrens open_txg - TXG_INITIAL + 1); 1859fa9e4066Sahrens mutex_exit(&newvd->vdev_dtl_lock); 1860fa9e4066Sahrens 186139c23413Seschrock if (newvd->vdev_isspare) 186239c23413Seschrock spa_spare_activate(newvd); 1863ea8dc4b6Seschrock 1864fa9e4066Sahrens /* 1865fa9e4066Sahrens * Mark newvd's DTL dirty in this txg. 1866fa9e4066Sahrens */ 1867ecc2d604Sbonwick vdev_dirty(tvd, VDD_DTL, newvd, txg); 1868fa9e4066Sahrens 1869fa9e4066Sahrens (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); 1870fa9e4066Sahrens 1871fa9e4066Sahrens /* 18723d7072f8Seschrock * Kick off a resilver to update newvd. We need to grab the namespace 18733d7072f8Seschrock * lock because spa_scrub() needs to post a sysevent with the pool name. 1874fa9e4066Sahrens */ 18753d7072f8Seschrock mutex_enter(&spa_namespace_lock); 1876fa9e4066Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 18773d7072f8Seschrock mutex_exit(&spa_namespace_lock); 1878fa9e4066Sahrens 1879fa9e4066Sahrens return (0); 1880fa9e4066Sahrens } 1881fa9e4066Sahrens 1882fa9e4066Sahrens /* 1883fa9e4066Sahrens * Detach a device from a mirror or replacing vdev. 1884fa9e4066Sahrens * If 'replace_done' is specified, only detach if the parent 1885fa9e4066Sahrens * is a replacing vdev. 1886fa9e4066Sahrens */ 1887fa9e4066Sahrens int 1888ea8dc4b6Seschrock spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done) 1889fa9e4066Sahrens { 1890fa9e4066Sahrens uint64_t txg; 1891fa9e4066Sahrens int c, t, error; 1892fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 1893fa9e4066Sahrens vdev_t *vd, *pvd, *cvd, *tvd; 189499653d4eSeschrock boolean_t unspare = B_FALSE; 189599653d4eSeschrock uint64_t unspare_guid; 1896fa9e4066Sahrens 1897fa9e4066Sahrens txg = spa_vdev_enter(spa); 1898fa9e4066Sahrens 1899ea8dc4b6Seschrock vd = vdev_lookup_by_guid(rvd, guid); 1900fa9e4066Sahrens 1901fa9e4066Sahrens if (vd == NULL) 1902fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1903fa9e4066Sahrens 19040e34b6a7Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 19050e34b6a7Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 19060e34b6a7Sbonwick 1907fa9e4066Sahrens pvd = vd->vdev_parent; 1908fa9e4066Sahrens 1909fa9e4066Sahrens /* 1910fa9e4066Sahrens * If replace_done is specified, only remove this device if it's 191199653d4eSeschrock * the first child of a replacing vdev. For the 'spare' vdev, either 191299653d4eSeschrock * disk can be removed. 191399653d4eSeschrock */ 191499653d4eSeschrock if (replace_done) { 191599653d4eSeschrock if (pvd->vdev_ops == &vdev_replacing_ops) { 191699653d4eSeschrock if (vd->vdev_id != 0) 191799653d4eSeschrock return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 191899653d4eSeschrock } else if (pvd->vdev_ops != &vdev_spare_ops) { 191999653d4eSeschrock return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 192099653d4eSeschrock } 192199653d4eSeschrock } 192299653d4eSeschrock 192399653d4eSeschrock ASSERT(pvd->vdev_ops != &vdev_spare_ops || 192499653d4eSeschrock spa_version(spa) >= ZFS_VERSION_SPARES); 1925fa9e4066Sahrens 1926fa9e4066Sahrens /* 192799653d4eSeschrock * Only mirror, replacing, and spare vdevs support detach. 1928fa9e4066Sahrens */ 1929fa9e4066Sahrens if (pvd->vdev_ops != &vdev_replacing_ops && 193099653d4eSeschrock pvd->vdev_ops != &vdev_mirror_ops && 193199653d4eSeschrock pvd->vdev_ops != &vdev_spare_ops) 1932fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1933fa9e4066Sahrens 1934fa9e4066Sahrens /* 1935fa9e4066Sahrens * If there's only one replica, you can't detach it. 1936fa9e4066Sahrens */ 1937fa9e4066Sahrens if (pvd->vdev_children <= 1) 1938fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1939fa9e4066Sahrens 1940fa9e4066Sahrens /* 1941fa9e4066Sahrens * If all siblings have non-empty DTLs, this device may have the only 1942fa9e4066Sahrens * valid copy of the data, which means we cannot safely detach it. 1943fa9e4066Sahrens * 1944fa9e4066Sahrens * XXX -- as in the vdev_offline() case, we really want a more 1945fa9e4066Sahrens * precise DTL check. 1946fa9e4066Sahrens */ 1947fa9e4066Sahrens for (c = 0; c < pvd->vdev_children; c++) { 1948fa9e4066Sahrens uint64_t dirty; 1949fa9e4066Sahrens 1950fa9e4066Sahrens cvd = pvd->vdev_child[c]; 1951fa9e4066Sahrens if (cvd == vd) 1952fa9e4066Sahrens continue; 1953fa9e4066Sahrens if (vdev_is_dead(cvd)) 1954fa9e4066Sahrens continue; 1955fa9e4066Sahrens mutex_enter(&cvd->vdev_dtl_lock); 1956fa9e4066Sahrens dirty = cvd->vdev_dtl_map.sm_space | 1957fa9e4066Sahrens cvd->vdev_dtl_scrub.sm_space; 1958fa9e4066Sahrens mutex_exit(&cvd->vdev_dtl_lock); 1959fa9e4066Sahrens if (!dirty) 1960fa9e4066Sahrens break; 1961fa9e4066Sahrens } 196299653d4eSeschrock 196399653d4eSeschrock /* 196499653d4eSeschrock * If we are a replacing or spare vdev, then we can always detach the 196599653d4eSeschrock * latter child, as that is how one cancels the operation. 196699653d4eSeschrock */ 196799653d4eSeschrock if ((pvd->vdev_ops == &vdev_mirror_ops || vd->vdev_id != 1) && 196899653d4eSeschrock c == pvd->vdev_children) 1969fa9e4066Sahrens return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1970fa9e4066Sahrens 197199653d4eSeschrock /* 197299653d4eSeschrock * If we are detaching the original disk from a spare, then it implies 197399653d4eSeschrock * that the spare should become a real disk, and be removed from the 197499653d4eSeschrock * active spare list for the pool. 197599653d4eSeschrock */ 197699653d4eSeschrock if (pvd->vdev_ops == &vdev_spare_ops && 197799653d4eSeschrock vd->vdev_id == 0) 197899653d4eSeschrock unspare = B_TRUE; 197999653d4eSeschrock 1980fa9e4066Sahrens /* 1981fa9e4066Sahrens * Erase the disk labels so the disk can be used for other things. 1982fa9e4066Sahrens * This must be done after all other error cases are handled, 1983fa9e4066Sahrens * but before we disembowel vd (so we can still do I/O to it). 1984fa9e4066Sahrens * But if we can't do it, don't treat the error as fatal -- 1985fa9e4066Sahrens * it may be that the unwritability of the disk is the reason 1986fa9e4066Sahrens * it's being detached! 1987fa9e4066Sahrens */ 198839c23413Seschrock error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1989fa9e4066Sahrens 1990fa9e4066Sahrens /* 1991fa9e4066Sahrens * Remove vd from its parent and compact the parent's children. 1992fa9e4066Sahrens */ 1993fa9e4066Sahrens vdev_remove_child(pvd, vd); 1994fa9e4066Sahrens vdev_compact_children(pvd); 1995fa9e4066Sahrens 1996fa9e4066Sahrens /* 1997fa9e4066Sahrens * Remember one of the remaining children so we can get tvd below. 1998fa9e4066Sahrens */ 1999fa9e4066Sahrens cvd = pvd->vdev_child[0]; 2000fa9e4066Sahrens 200199653d4eSeschrock /* 200299653d4eSeschrock * If we need to remove the remaining child from the list of hot spares, 200399653d4eSeschrock * do it now, marking the vdev as no longer a spare in the process. We 200499653d4eSeschrock * must do this before vdev_remove_parent(), because that can change the 200599653d4eSeschrock * GUID if it creates a new toplevel GUID. 200699653d4eSeschrock */ 200799653d4eSeschrock if (unspare) { 200899653d4eSeschrock ASSERT(cvd->vdev_isspare); 200939c23413Seschrock spa_spare_remove(cvd); 201099653d4eSeschrock unspare_guid = cvd->vdev_guid; 201199653d4eSeschrock } 201299653d4eSeschrock 2013fa9e4066Sahrens /* 2014fa9e4066Sahrens * If the parent mirror/replacing vdev only has one child, 2015fa9e4066Sahrens * the parent is no longer needed. Remove it from the tree. 2016fa9e4066Sahrens */ 2017fa9e4066Sahrens if (pvd->vdev_children == 1) 2018fa9e4066Sahrens vdev_remove_parent(cvd); 2019fa9e4066Sahrens 2020fa9e4066Sahrens /* 2021fa9e4066Sahrens * We don't set tvd until now because the parent we just removed 2022fa9e4066Sahrens * may have been the previous top-level vdev. 2023fa9e4066Sahrens */ 2024fa9e4066Sahrens tvd = cvd->vdev_top; 2025fa9e4066Sahrens ASSERT(tvd->vdev_parent == rvd); 2026fa9e4066Sahrens 2027fa9e4066Sahrens /* 202839c23413Seschrock * Reevaluate the parent vdev state. 2029fa9e4066Sahrens */ 20303d7072f8Seschrock vdev_propagate_state(cvd); 2031fa9e4066Sahrens 2032fa9e4066Sahrens /* 203339c23413Seschrock * If the device we just detached was smaller than the others, it may be 203439c23413Seschrock * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init() 203539c23413Seschrock * can't fail because the existing metaslabs are already in core, so 203639c23413Seschrock * there's nothing to read from disk. 2037fa9e4066Sahrens */ 2038ecc2d604Sbonwick VERIFY(vdev_metaslab_init(tvd, txg) == 0); 2039fa9e4066Sahrens 2040fa9e4066Sahrens vdev_config_dirty(tvd); 2041fa9e4066Sahrens 2042fa9e4066Sahrens /* 204339c23413Seschrock * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 204439c23413Seschrock * vd->vdev_detached is set and free vd's DTL object in syncing context. 204539c23413Seschrock * But first make sure we're not on any *other* txg's DTL list, to 204639c23413Seschrock * prevent vd from being accessed after it's freed. 2047fa9e4066Sahrens */ 2048fa9e4066Sahrens for (t = 0; t < TXG_SIZE; t++) 2049fa9e4066Sahrens (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 2050ecc2d604Sbonwick vd->vdev_detached = B_TRUE; 2051ecc2d604Sbonwick vdev_dirty(tvd, VDD_DTL, vd, txg); 2052fa9e4066Sahrens 20533d7072f8Seschrock spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 20543d7072f8Seschrock 205599653d4eSeschrock error = spa_vdev_exit(spa, vd, txg, 0); 205699653d4eSeschrock 205799653d4eSeschrock /* 205839c23413Seschrock * If this was the removal of the original device in a hot spare vdev, 205939c23413Seschrock * then we want to go through and remove the device from the hot spare 206039c23413Seschrock * list of every other pool. 206199653d4eSeschrock */ 206299653d4eSeschrock if (unspare) { 206399653d4eSeschrock spa = NULL; 206499653d4eSeschrock mutex_enter(&spa_namespace_lock); 206599653d4eSeschrock while ((spa = spa_next(spa)) != NULL) { 206699653d4eSeschrock if (spa->spa_state != POOL_STATE_ACTIVE) 206799653d4eSeschrock continue; 206899653d4eSeschrock 206999653d4eSeschrock (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 207099653d4eSeschrock } 207199653d4eSeschrock mutex_exit(&spa_namespace_lock); 207299653d4eSeschrock } 207399653d4eSeschrock 207499653d4eSeschrock return (error); 207599653d4eSeschrock } 207699653d4eSeschrock 207799653d4eSeschrock /* 207899653d4eSeschrock * Remove a device from the pool. Currently, this supports removing only hot 207999653d4eSeschrock * spares. 208099653d4eSeschrock */ 208199653d4eSeschrock int 208299653d4eSeschrock spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 208399653d4eSeschrock { 208499653d4eSeschrock vdev_t *vd; 208599653d4eSeschrock nvlist_t **spares, *nv, **newspares; 208699653d4eSeschrock uint_t i, j, nspares; 208799653d4eSeschrock int ret = 0; 208899653d4eSeschrock 208999653d4eSeschrock spa_config_enter(spa, RW_WRITER, FTAG); 209099653d4eSeschrock 209199653d4eSeschrock vd = spa_lookup_by_guid(spa, guid); 209299653d4eSeschrock 209399653d4eSeschrock nv = NULL; 209499653d4eSeschrock if (spa->spa_spares != NULL && 209599653d4eSeschrock nvlist_lookup_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 209699653d4eSeschrock &spares, &nspares) == 0) { 209799653d4eSeschrock for (i = 0; i < nspares; i++) { 209899653d4eSeschrock uint64_t theguid; 209999653d4eSeschrock 210099653d4eSeschrock VERIFY(nvlist_lookup_uint64(spares[i], 210199653d4eSeschrock ZPOOL_CONFIG_GUID, &theguid) == 0); 210299653d4eSeschrock if (theguid == guid) { 210399653d4eSeschrock nv = spares[i]; 210499653d4eSeschrock break; 210599653d4eSeschrock } 210699653d4eSeschrock } 210799653d4eSeschrock } 210899653d4eSeschrock 210999653d4eSeschrock /* 211099653d4eSeschrock * We only support removing a hot spare, and only if it's not currently 211199653d4eSeschrock * in use in this pool. 211299653d4eSeschrock */ 211399653d4eSeschrock if (nv == NULL && vd == NULL) { 211499653d4eSeschrock ret = ENOENT; 211599653d4eSeschrock goto out; 211699653d4eSeschrock } 211799653d4eSeschrock 211899653d4eSeschrock if (nv == NULL && vd != NULL) { 211999653d4eSeschrock ret = ENOTSUP; 212099653d4eSeschrock goto out; 212199653d4eSeschrock } 212299653d4eSeschrock 212399653d4eSeschrock if (!unspare && nv != NULL && vd != NULL) { 212499653d4eSeschrock ret = EBUSY; 212599653d4eSeschrock goto out; 212699653d4eSeschrock } 212799653d4eSeschrock 212899653d4eSeschrock if (nspares == 1) { 212999653d4eSeschrock newspares = NULL; 213099653d4eSeschrock } else { 213199653d4eSeschrock newspares = kmem_alloc((nspares - 1) * sizeof (void *), 213299653d4eSeschrock KM_SLEEP); 213399653d4eSeschrock for (i = 0, j = 0; i < nspares; i++) { 213499653d4eSeschrock if (spares[i] != nv) 213599653d4eSeschrock VERIFY(nvlist_dup(spares[i], 213699653d4eSeschrock &newspares[j++], KM_SLEEP) == 0); 213799653d4eSeschrock } 213899653d4eSeschrock } 213999653d4eSeschrock 214099653d4eSeschrock VERIFY(nvlist_remove(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 214199653d4eSeschrock DATA_TYPE_NVLIST_ARRAY) == 0); 214299653d4eSeschrock VERIFY(nvlist_add_nvlist_array(spa->spa_sparelist, ZPOOL_CONFIG_SPARES, 214399653d4eSeschrock newspares, nspares - 1) == 0); 214499653d4eSeschrock for (i = 0; i < nspares - 1; i++) 214599653d4eSeschrock nvlist_free(newspares[i]); 214699653d4eSeschrock kmem_free(newspares, (nspares - 1) * sizeof (void *)); 214799653d4eSeschrock spa_load_spares(spa); 214899653d4eSeschrock spa->spa_sync_spares = B_TRUE; 214999653d4eSeschrock 215099653d4eSeschrock out: 215199653d4eSeschrock spa_config_exit(spa, FTAG); 215299653d4eSeschrock 215399653d4eSeschrock return (ret); 2154fa9e4066Sahrens } 2155fa9e4066Sahrens 2156fa9e4066Sahrens /* 21573d7072f8Seschrock * Find any device that's done replacing, or a vdev marked 'unspare' that's 21583d7072f8Seschrock * current spared, so we can detach it. 2159fa9e4066Sahrens */ 2160ea8dc4b6Seschrock static vdev_t * 21613d7072f8Seschrock spa_vdev_resilver_done_hunt(vdev_t *vd) 2162fa9e4066Sahrens { 2163ea8dc4b6Seschrock vdev_t *newvd, *oldvd; 2164fa9e4066Sahrens int c; 2165fa9e4066Sahrens 2166ea8dc4b6Seschrock for (c = 0; c < vd->vdev_children; c++) { 21673d7072f8Seschrock oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 2168ea8dc4b6Seschrock if (oldvd != NULL) 2169ea8dc4b6Seschrock return (oldvd); 2170ea8dc4b6Seschrock } 2171fa9e4066Sahrens 21723d7072f8Seschrock /* 21733d7072f8Seschrock * Check for a completed replacement. 21743d7072f8Seschrock */ 2175fa9e4066Sahrens if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { 2176ea8dc4b6Seschrock oldvd = vd->vdev_child[0]; 2177ea8dc4b6Seschrock newvd = vd->vdev_child[1]; 2178ea8dc4b6Seschrock 2179ea8dc4b6Seschrock mutex_enter(&newvd->vdev_dtl_lock); 2180ea8dc4b6Seschrock if (newvd->vdev_dtl_map.sm_space == 0 && 2181ea8dc4b6Seschrock newvd->vdev_dtl_scrub.sm_space == 0) { 2182ea8dc4b6Seschrock mutex_exit(&newvd->vdev_dtl_lock); 2183ea8dc4b6Seschrock return (oldvd); 2184fa9e4066Sahrens } 2185ea8dc4b6Seschrock mutex_exit(&newvd->vdev_dtl_lock); 2186fa9e4066Sahrens } 2187ea8dc4b6Seschrock 21883d7072f8Seschrock /* 21893d7072f8Seschrock * Check for a completed resilver with the 'unspare' flag set. 21903d7072f8Seschrock */ 21913d7072f8Seschrock if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) { 21923d7072f8Seschrock newvd = vd->vdev_child[0]; 21933d7072f8Seschrock oldvd = vd->vdev_child[1]; 21943d7072f8Seschrock 21953d7072f8Seschrock mutex_enter(&newvd->vdev_dtl_lock); 21963d7072f8Seschrock if (newvd->vdev_unspare && 21973d7072f8Seschrock newvd->vdev_dtl_map.sm_space == 0 && 21983d7072f8Seschrock newvd->vdev_dtl_scrub.sm_space == 0) { 21993d7072f8Seschrock newvd->vdev_unspare = 0; 22003d7072f8Seschrock mutex_exit(&newvd->vdev_dtl_lock); 22013d7072f8Seschrock return (oldvd); 22023d7072f8Seschrock } 22033d7072f8Seschrock mutex_exit(&newvd->vdev_dtl_lock); 22043d7072f8Seschrock } 22053d7072f8Seschrock 2206ea8dc4b6Seschrock return (NULL); 2207fa9e4066Sahrens } 2208fa9e4066Sahrens 2209ea8dc4b6Seschrock static void 22103d7072f8Seschrock spa_vdev_resilver_done(spa_t *spa) 2211fa9e4066Sahrens { 2212ea8dc4b6Seschrock vdev_t *vd; 221399653d4eSeschrock vdev_t *pvd; 2214ea8dc4b6Seschrock uint64_t guid; 221599653d4eSeschrock uint64_t pguid = 0; 2216ea8dc4b6Seschrock 2217ea8dc4b6Seschrock spa_config_enter(spa, RW_READER, FTAG); 2218ea8dc4b6Seschrock 22193d7072f8Seschrock while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 2220ea8dc4b6Seschrock guid = vd->vdev_guid; 222199653d4eSeschrock /* 222299653d4eSeschrock * If we have just finished replacing a hot spared device, then 222399653d4eSeschrock * we need to detach the parent's first child (the original hot 222499653d4eSeschrock * spare) as well. 222599653d4eSeschrock */ 222699653d4eSeschrock pvd = vd->vdev_parent; 222799653d4eSeschrock if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops && 222899653d4eSeschrock pvd->vdev_id == 0) { 222999653d4eSeschrock ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 223099653d4eSeschrock ASSERT(pvd->vdev_parent->vdev_children == 2); 223199653d4eSeschrock pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid; 223299653d4eSeschrock } 2233ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 2234ea8dc4b6Seschrock if (spa_vdev_detach(spa, guid, B_TRUE) != 0) 2235ea8dc4b6Seschrock return; 223699653d4eSeschrock if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0) 223799653d4eSeschrock return; 2238ea8dc4b6Seschrock spa_config_enter(spa, RW_READER, FTAG); 2239fa9e4066Sahrens } 2240fa9e4066Sahrens 2241ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 2242fa9e4066Sahrens } 2243fa9e4066Sahrens 2244c67d9675Seschrock /* 2245c67d9675Seschrock * Update the stored path for this vdev. Dirty the vdev configuration, relying 2246c67d9675Seschrock * on spa_vdev_enter/exit() to synchronize the labels and cache. 2247c67d9675Seschrock */ 2248c67d9675Seschrock int 2249c67d9675Seschrock spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 2250c67d9675Seschrock { 2251c67d9675Seschrock vdev_t *rvd, *vd; 2252c67d9675Seschrock uint64_t txg; 2253c67d9675Seschrock 2254c67d9675Seschrock rvd = spa->spa_root_vdev; 2255c67d9675Seschrock 2256c67d9675Seschrock txg = spa_vdev_enter(spa); 2257c67d9675Seschrock 225899653d4eSeschrock if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 225999653d4eSeschrock /* 226099653d4eSeschrock * Determine if this is a reference to a hot spare. In that 226199653d4eSeschrock * case, update the path as stored in the spare list. 226299653d4eSeschrock */ 226399653d4eSeschrock nvlist_t **spares; 226499653d4eSeschrock uint_t i, nspares; 226599653d4eSeschrock if (spa->spa_sparelist != NULL) { 226699653d4eSeschrock VERIFY(nvlist_lookup_nvlist_array(spa->spa_sparelist, 226799653d4eSeschrock ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 226899653d4eSeschrock for (i = 0; i < nspares; i++) { 226999653d4eSeschrock uint64_t theguid; 227099653d4eSeschrock VERIFY(nvlist_lookup_uint64(spares[i], 227199653d4eSeschrock ZPOOL_CONFIG_GUID, &theguid) == 0); 227299653d4eSeschrock if (theguid == guid) 227399653d4eSeschrock break; 227499653d4eSeschrock } 227599653d4eSeschrock 227699653d4eSeschrock if (i == nspares) 227799653d4eSeschrock return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 227899653d4eSeschrock 227999653d4eSeschrock VERIFY(nvlist_add_string(spares[i], 228099653d4eSeschrock ZPOOL_CONFIG_PATH, newpath) == 0); 228199653d4eSeschrock spa_load_spares(spa); 228299653d4eSeschrock spa->spa_sync_spares = B_TRUE; 228399653d4eSeschrock return (spa_vdev_exit(spa, NULL, txg, 0)); 228499653d4eSeschrock } else { 228599653d4eSeschrock return (spa_vdev_exit(spa, NULL, txg, ENOENT)); 228699653d4eSeschrock } 228799653d4eSeschrock } 2288c67d9675Seschrock 22890e34b6a7Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 22900e34b6a7Sbonwick return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 22910e34b6a7Sbonwick 2292c67d9675Seschrock spa_strfree(vd->vdev_path); 2293c67d9675Seschrock vd->vdev_path = spa_strdup(newpath); 2294c67d9675Seschrock 2295c67d9675Seschrock vdev_config_dirty(vd->vdev_top); 2296c67d9675Seschrock 2297c67d9675Seschrock return (spa_vdev_exit(spa, NULL, txg, 0)); 2298c67d9675Seschrock } 2299c67d9675Seschrock 2300fa9e4066Sahrens /* 2301fa9e4066Sahrens * ========================================================================== 2302fa9e4066Sahrens * SPA Scrubbing 2303fa9e4066Sahrens * ========================================================================== 2304fa9e4066Sahrens */ 2305fa9e4066Sahrens 2306fa9e4066Sahrens static void 2307fa9e4066Sahrens spa_scrub_io_done(zio_t *zio) 2308fa9e4066Sahrens { 2309fa9e4066Sahrens spa_t *spa = zio->io_spa; 2310fa9e4066Sahrens 23110e8c6158Smaybee arc_data_buf_free(zio->io_data, zio->io_size); 2312fa9e4066Sahrens 2313fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2314ea8dc4b6Seschrock if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 231544cd46caSbillm vdev_t *vd = zio->io_vd ? zio->io_vd : spa->spa_root_vdev; 2316ea8dc4b6Seschrock spa->spa_scrub_errors++; 2317fa9e4066Sahrens mutex_enter(&vd->vdev_stat_lock); 2318fa9e4066Sahrens vd->vdev_stat.vs_scrub_errors++; 2319fa9e4066Sahrens mutex_exit(&vd->vdev_stat_lock); 2320fa9e4066Sahrens } 232105b2b3b8Smishra 232205b2b3b8Smishra if (--spa->spa_scrub_inflight < spa->spa_scrub_maxinflight) 2323ea8dc4b6Seschrock cv_broadcast(&spa->spa_scrub_io_cv); 232405b2b3b8Smishra 232505b2b3b8Smishra ASSERT(spa->spa_scrub_inflight >= 0); 232605b2b3b8Smishra 2327ea8dc4b6Seschrock mutex_exit(&spa->spa_scrub_lock); 2328fa9e4066Sahrens } 2329fa9e4066Sahrens 2330fa9e4066Sahrens static void 2331ea8dc4b6Seschrock spa_scrub_io_start(spa_t *spa, blkptr_t *bp, int priority, int flags, 2332ea8dc4b6Seschrock zbookmark_t *zb) 2333fa9e4066Sahrens { 2334fa9e4066Sahrens size_t size = BP_GET_LSIZE(bp); 233505b2b3b8Smishra void *data; 2336fa9e4066Sahrens 2337fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 233805b2b3b8Smishra /* 233905b2b3b8Smishra * Do not give too much work to vdev(s). 234005b2b3b8Smishra */ 234105b2b3b8Smishra while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight) { 234205b2b3b8Smishra cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 234305b2b3b8Smishra } 2344fa9e4066Sahrens spa->spa_scrub_inflight++; 2345fa9e4066Sahrens mutex_exit(&spa->spa_scrub_lock); 2346fa9e4066Sahrens 23470e8c6158Smaybee data = arc_data_buf_alloc(size); 234805b2b3b8Smishra 2349ea8dc4b6Seschrock if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET) 2350ea8dc4b6Seschrock flags |= ZIO_FLAG_SPECULATIVE; /* intent log block */ 2351ea8dc4b6Seschrock 2352d80c45e0Sbonwick flags |= ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL; 2353ea8dc4b6Seschrock 2354fa9e4066Sahrens zio_nowait(zio_read(NULL, spa, bp, data, size, 2355ea8dc4b6Seschrock spa_scrub_io_done, NULL, priority, flags, zb)); 2356fa9e4066Sahrens } 2357fa9e4066Sahrens 2358fa9e4066Sahrens /* ARGSUSED */ 2359fa9e4066Sahrens static int 2360fa9e4066Sahrens spa_scrub_cb(traverse_blk_cache_t *bc, spa_t *spa, void *a) 2361fa9e4066Sahrens { 2362fa9e4066Sahrens blkptr_t *bp = &bc->bc_blkptr; 236344cd46caSbillm vdev_t *vd = spa->spa_root_vdev; 236444cd46caSbillm dva_t *dva = bp->blk_dva; 236544cd46caSbillm int needs_resilver = B_FALSE; 236644cd46caSbillm int d; 2367fa9e4066Sahrens 236844cd46caSbillm if (bc->bc_errno) { 2369fa9e4066Sahrens /* 2370fa9e4066Sahrens * We can't scrub this block, but we can continue to scrub 2371fa9e4066Sahrens * the rest of the pool. Note the error and move along. 2372fa9e4066Sahrens */ 2373fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2374fa9e4066Sahrens spa->spa_scrub_errors++; 2375fa9e4066Sahrens mutex_exit(&spa->spa_scrub_lock); 2376fa9e4066Sahrens 237744cd46caSbillm mutex_enter(&vd->vdev_stat_lock); 237844cd46caSbillm vd->vdev_stat.vs_scrub_errors++; 237944cd46caSbillm mutex_exit(&vd->vdev_stat_lock); 2380fa9e4066Sahrens 2381fa9e4066Sahrens return (ERESTART); 2382fa9e4066Sahrens } 2383fa9e4066Sahrens 2384fa9e4066Sahrens ASSERT(bp->blk_birth < spa->spa_scrub_maxtxg); 2385fa9e4066Sahrens 238644cd46caSbillm for (d = 0; d < BP_GET_NDVAS(bp); d++) { 238744cd46caSbillm vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d])); 2388fa9e4066Sahrens 238944cd46caSbillm ASSERT(vd != NULL); 239044cd46caSbillm 239144cd46caSbillm /* 239244cd46caSbillm * Keep track of how much data we've examined so that 239344cd46caSbillm * zpool(1M) status can make useful progress reports. 239444cd46caSbillm */ 239544cd46caSbillm mutex_enter(&vd->vdev_stat_lock); 239644cd46caSbillm vd->vdev_stat.vs_scrub_examined += DVA_GET_ASIZE(&dva[d]); 239744cd46caSbillm mutex_exit(&vd->vdev_stat_lock); 239844cd46caSbillm 239944cd46caSbillm if (spa->spa_scrub_type == POOL_SCRUB_RESILVER) { 240044cd46caSbillm if (DVA_GET_GANG(&dva[d])) { 240144cd46caSbillm /* 240244cd46caSbillm * Gang members may be spread across multiple 240344cd46caSbillm * vdevs, so the best we can do is look at the 240444cd46caSbillm * pool-wide DTL. 240544cd46caSbillm * XXX -- it would be better to change our 240644cd46caSbillm * allocation policy to ensure that this can't 240744cd46caSbillm * happen. 240844cd46caSbillm */ 240944cd46caSbillm vd = spa->spa_root_vdev; 241044cd46caSbillm } 241144cd46caSbillm if (vdev_dtl_contains(&vd->vdev_dtl_map, 241244cd46caSbillm bp->blk_birth, 1)) 241344cd46caSbillm needs_resilver = B_TRUE; 2414fa9e4066Sahrens } 241544cd46caSbillm } 241644cd46caSbillm 241744cd46caSbillm if (spa->spa_scrub_type == POOL_SCRUB_EVERYTHING) 2418fa9e4066Sahrens spa_scrub_io_start(spa, bp, ZIO_PRIORITY_SCRUB, 2419ea8dc4b6Seschrock ZIO_FLAG_SCRUB, &bc->bc_bookmark); 242044cd46caSbillm else if (needs_resilver) 242144cd46caSbillm spa_scrub_io_start(spa, bp, ZIO_PRIORITY_RESILVER, 242244cd46caSbillm ZIO_FLAG_RESILVER, &bc->bc_bookmark); 2423fa9e4066Sahrens 2424fa9e4066Sahrens return (0); 2425fa9e4066Sahrens } 2426fa9e4066Sahrens 2427fa9e4066Sahrens static void 2428fa9e4066Sahrens spa_scrub_thread(spa_t *spa) 2429fa9e4066Sahrens { 2430fa9e4066Sahrens callb_cpr_t cprinfo; 2431fa9e4066Sahrens traverse_handle_t *th = spa->spa_scrub_th; 2432fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 2433fa9e4066Sahrens pool_scrub_type_t scrub_type = spa->spa_scrub_type; 2434fa9e4066Sahrens int error = 0; 2435fa9e4066Sahrens boolean_t complete; 2436fa9e4066Sahrens 2437fa9e4066Sahrens CALLB_CPR_INIT(&cprinfo, &spa->spa_scrub_lock, callb_generic_cpr, FTAG); 2438fa9e4066Sahrens 2439f0aa80d4Sbonwick /* 2440f0aa80d4Sbonwick * If we're restarting due to a snapshot create/delete, 2441f0aa80d4Sbonwick * wait for that to complete. 2442f0aa80d4Sbonwick */ 2443f0aa80d4Sbonwick txg_wait_synced(spa_get_dsl(spa), 0); 2444f0aa80d4Sbonwick 2445ea8dc4b6Seschrock dprintf("start %s mintxg=%llu maxtxg=%llu\n", 2446ea8dc4b6Seschrock scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 2447ea8dc4b6Seschrock spa->spa_scrub_mintxg, spa->spa_scrub_maxtxg); 2448ea8dc4b6Seschrock 2449ea8dc4b6Seschrock spa_config_enter(spa, RW_WRITER, FTAG); 2450ea8dc4b6Seschrock vdev_reopen(rvd); /* purge all vdev caches */ 2451fa9e4066Sahrens vdev_config_dirty(rvd); /* rewrite all disk labels */ 2452fa9e4066Sahrens vdev_scrub_stat_update(rvd, scrub_type, B_FALSE); 2453ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 2454fa9e4066Sahrens 2455fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2456fa9e4066Sahrens spa->spa_scrub_errors = 0; 2457fa9e4066Sahrens spa->spa_scrub_active = 1; 2458ea8dc4b6Seschrock ASSERT(spa->spa_scrub_inflight == 0); 2459fa9e4066Sahrens 2460fa9e4066Sahrens while (!spa->spa_scrub_stop) { 2461fa9e4066Sahrens CALLB_CPR_SAFE_BEGIN(&cprinfo); 2462ea8dc4b6Seschrock while (spa->spa_scrub_suspended) { 2463fa9e4066Sahrens spa->spa_scrub_active = 0; 2464fa9e4066Sahrens cv_broadcast(&spa->spa_scrub_cv); 2465fa9e4066Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2466fa9e4066Sahrens spa->spa_scrub_active = 1; 2467fa9e4066Sahrens } 2468fa9e4066Sahrens CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_scrub_lock); 2469fa9e4066Sahrens 2470fa9e4066Sahrens if (spa->spa_scrub_restart_txg != 0) 2471fa9e4066Sahrens break; 2472fa9e4066Sahrens 2473fa9e4066Sahrens mutex_exit(&spa->spa_scrub_lock); 2474fa9e4066Sahrens error = traverse_more(th); 2475fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2476fa9e4066Sahrens if (error != EAGAIN) 2477fa9e4066Sahrens break; 2478fa9e4066Sahrens } 2479fa9e4066Sahrens 2480fa9e4066Sahrens while (spa->spa_scrub_inflight) 2481fa9e4066Sahrens cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2482fa9e4066Sahrens 24835dabedeeSbonwick spa->spa_scrub_active = 0; 24845dabedeeSbonwick cv_broadcast(&spa->spa_scrub_cv); 24855dabedeeSbonwick 24865dabedeeSbonwick mutex_exit(&spa->spa_scrub_lock); 24875dabedeeSbonwick 24885dabedeeSbonwick spa_config_enter(spa, RW_WRITER, FTAG); 24895dabedeeSbonwick 24905dabedeeSbonwick mutex_enter(&spa->spa_scrub_lock); 24915dabedeeSbonwick 24925dabedeeSbonwick /* 24935dabedeeSbonwick * Note: we check spa_scrub_restart_txg under both spa_scrub_lock 24945dabedeeSbonwick * AND the spa config lock to synchronize with any config changes 24955dabedeeSbonwick * that revise the DTLs under spa_vdev_enter() / spa_vdev_exit(). 24965dabedeeSbonwick */ 2497fa9e4066Sahrens if (spa->spa_scrub_restart_txg != 0) 2498fa9e4066Sahrens error = ERESTART; 2499fa9e4066Sahrens 2500ea8dc4b6Seschrock if (spa->spa_scrub_stop) 2501ea8dc4b6Seschrock error = EINTR; 2502ea8dc4b6Seschrock 2503fa9e4066Sahrens /* 2504ea8dc4b6Seschrock * Even if there were uncorrectable errors, we consider the scrub 2505ea8dc4b6Seschrock * completed. The downside is that if there is a transient error during 2506ea8dc4b6Seschrock * a resilver, we won't resilver the data properly to the target. But 2507ea8dc4b6Seschrock * if the damage is permanent (more likely) we will resilver forever, 2508ea8dc4b6Seschrock * which isn't really acceptable. Since there is enough information for 2509ea8dc4b6Seschrock * the user to know what has failed and why, this seems like a more 2510ea8dc4b6Seschrock * tractable approach. 2511fa9e4066Sahrens */ 2512ea8dc4b6Seschrock complete = (error == 0); 2513fa9e4066Sahrens 2514ea8dc4b6Seschrock dprintf("end %s to maxtxg=%llu %s, traverse=%d, %llu errors, stop=%u\n", 2515ea8dc4b6Seschrock scrub_type == POOL_SCRUB_RESILVER ? "resilver" : "scrub", 2516fa9e4066Sahrens spa->spa_scrub_maxtxg, complete ? "done" : "FAILED", 2517fa9e4066Sahrens error, spa->spa_scrub_errors, spa->spa_scrub_stop); 2518fa9e4066Sahrens 2519fa9e4066Sahrens mutex_exit(&spa->spa_scrub_lock); 2520fa9e4066Sahrens 2521fa9e4066Sahrens /* 2522fa9e4066Sahrens * If the scrub/resilver completed, update all DTLs to reflect this. 2523fa9e4066Sahrens * Whether it succeeded or not, vacate all temporary scrub DTLs. 2524fa9e4066Sahrens */ 2525fa9e4066Sahrens vdev_dtl_reassess(rvd, spa_last_synced_txg(spa) + 1, 2526fa9e4066Sahrens complete ? spa->spa_scrub_maxtxg : 0, B_TRUE); 2527fa9e4066Sahrens vdev_scrub_stat_update(rvd, POOL_SCRUB_NONE, complete); 2528ea8dc4b6Seschrock spa_errlog_rotate(spa); 25295dabedeeSbonwick 25303d7072f8Seschrock if (scrub_type == POOL_SCRUB_RESILVER && complete) 25313d7072f8Seschrock spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_FINISH); 25323d7072f8Seschrock 2533ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 2534fa9e4066Sahrens 2535fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2536fa9e4066Sahrens 2537ea8dc4b6Seschrock /* 2538ea8dc4b6Seschrock * We may have finished replacing a device. 2539ea8dc4b6Seschrock * Let the async thread assess this and handle the detach. 2540ea8dc4b6Seschrock */ 25413d7072f8Seschrock spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 2542fa9e4066Sahrens 2543fa9e4066Sahrens /* 2544fa9e4066Sahrens * If we were told to restart, our final act is to start a new scrub. 2545fa9e4066Sahrens */ 2546fa9e4066Sahrens if (error == ERESTART) 2547ea8dc4b6Seschrock spa_async_request(spa, scrub_type == POOL_SCRUB_RESILVER ? 2548ea8dc4b6Seschrock SPA_ASYNC_RESILVER : SPA_ASYNC_SCRUB); 2549fa9e4066Sahrens 2550ea8dc4b6Seschrock spa->spa_scrub_type = POOL_SCRUB_NONE; 2551ea8dc4b6Seschrock spa->spa_scrub_active = 0; 2552ea8dc4b6Seschrock spa->spa_scrub_thread = NULL; 2553ea8dc4b6Seschrock cv_broadcast(&spa->spa_scrub_cv); 2554fa9e4066Sahrens CALLB_CPR_EXIT(&cprinfo); /* drops &spa->spa_scrub_lock */ 2555fa9e4066Sahrens thread_exit(); 2556fa9e4066Sahrens } 2557fa9e4066Sahrens 2558fa9e4066Sahrens void 2559fa9e4066Sahrens spa_scrub_suspend(spa_t *spa) 2560fa9e4066Sahrens { 2561fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2562ea8dc4b6Seschrock spa->spa_scrub_suspended++; 2563fa9e4066Sahrens while (spa->spa_scrub_active) { 2564fa9e4066Sahrens cv_broadcast(&spa->spa_scrub_cv); 2565fa9e4066Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2566fa9e4066Sahrens } 2567fa9e4066Sahrens while (spa->spa_scrub_inflight) 2568fa9e4066Sahrens cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2569fa9e4066Sahrens mutex_exit(&spa->spa_scrub_lock); 2570fa9e4066Sahrens } 2571fa9e4066Sahrens 2572fa9e4066Sahrens void 2573fa9e4066Sahrens spa_scrub_resume(spa_t *spa) 2574fa9e4066Sahrens { 2575fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2576ea8dc4b6Seschrock ASSERT(spa->spa_scrub_suspended != 0); 2577ea8dc4b6Seschrock if (--spa->spa_scrub_suspended == 0) 2578fa9e4066Sahrens cv_broadcast(&spa->spa_scrub_cv); 2579fa9e4066Sahrens mutex_exit(&spa->spa_scrub_lock); 2580fa9e4066Sahrens } 2581fa9e4066Sahrens 2582fa9e4066Sahrens void 2583fa9e4066Sahrens spa_scrub_restart(spa_t *spa, uint64_t txg) 2584fa9e4066Sahrens { 2585fa9e4066Sahrens /* 2586fa9e4066Sahrens * Something happened (e.g. snapshot create/delete) that means 2587fa9e4066Sahrens * we must restart any in-progress scrubs. The itinerary will 2588fa9e4066Sahrens * fix this properly. 2589fa9e4066Sahrens */ 2590fa9e4066Sahrens mutex_enter(&spa->spa_scrub_lock); 2591fa9e4066Sahrens spa->spa_scrub_restart_txg = txg; 2592fa9e4066Sahrens mutex_exit(&spa->spa_scrub_lock); 2593fa9e4066Sahrens } 2594fa9e4066Sahrens 2595ea8dc4b6Seschrock int 2596ea8dc4b6Seschrock spa_scrub(spa_t *spa, pool_scrub_type_t type, boolean_t force) 2597fa9e4066Sahrens { 2598fa9e4066Sahrens space_seg_t *ss; 2599fa9e4066Sahrens uint64_t mintxg, maxtxg; 2600fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 2601fa9e4066Sahrens 2602fa9e4066Sahrens if ((uint_t)type >= POOL_SCRUB_TYPES) 2603fa9e4066Sahrens return (ENOTSUP); 2604fa9e4066Sahrens 2605ea8dc4b6Seschrock mutex_enter(&spa->spa_scrub_lock); 2606ea8dc4b6Seschrock 2607fa9e4066Sahrens /* 2608fa9e4066Sahrens * If there's a scrub or resilver already in progress, stop it. 2609fa9e4066Sahrens */ 2610fa9e4066Sahrens while (spa->spa_scrub_thread != NULL) { 2611fa9e4066Sahrens /* 2612fa9e4066Sahrens * Don't stop a resilver unless forced. 2613fa9e4066Sahrens */ 2614ea8dc4b6Seschrock if (spa->spa_scrub_type == POOL_SCRUB_RESILVER && !force) { 2615ea8dc4b6Seschrock mutex_exit(&spa->spa_scrub_lock); 2616fa9e4066Sahrens return (EBUSY); 2617ea8dc4b6Seschrock } 2618fa9e4066Sahrens spa->spa_scrub_stop = 1; 2619fa9e4066Sahrens cv_broadcast(&spa->spa_scrub_cv); 2620fa9e4066Sahrens cv_wait(&spa->spa_scrub_cv, &spa->spa_scrub_lock); 2621fa9e4066Sahrens } 2622fa9e4066Sahrens 2623fa9e4066Sahrens /* 2624fa9e4066Sahrens * Terminate the previous traverse. 2625fa9e4066Sahrens */ 2626fa9e4066Sahrens if (spa->spa_scrub_th != NULL) { 2627fa9e4066Sahrens traverse_fini(spa->spa_scrub_th); 2628fa9e4066Sahrens spa->spa_scrub_th = NULL; 2629fa9e4066Sahrens } 2630fa9e4066Sahrens 2631ea8dc4b6Seschrock if (rvd == NULL) { 2632ea8dc4b6Seschrock ASSERT(spa->spa_scrub_stop == 0); 2633ea8dc4b6Seschrock ASSERT(spa->spa_scrub_type == type); 2634ea8dc4b6Seschrock ASSERT(spa->spa_scrub_restart_txg == 0); 2635ea8dc4b6Seschrock mutex_exit(&spa->spa_scrub_lock); 2636ea8dc4b6Seschrock return (0); 2637ea8dc4b6Seschrock } 2638fa9e4066Sahrens 2639fa9e4066Sahrens mintxg = TXG_INITIAL - 1; 2640fa9e4066Sahrens maxtxg = spa_last_synced_txg(spa) + 1; 2641fa9e4066Sahrens 2642ea8dc4b6Seschrock mutex_enter(&rvd->vdev_dtl_lock); 2643fa9e4066Sahrens 2644ea8dc4b6Seschrock if (rvd->vdev_dtl_map.sm_space == 0) { 2645ea8dc4b6Seschrock /* 2646ea8dc4b6Seschrock * The pool-wide DTL is empty. 2647ecc2d604Sbonwick * If this is a resilver, there's nothing to do except 2648ecc2d604Sbonwick * check whether any in-progress replacements have completed. 2649ea8dc4b6Seschrock */ 2650ecc2d604Sbonwick if (type == POOL_SCRUB_RESILVER) { 2651ea8dc4b6Seschrock type = POOL_SCRUB_NONE; 26523d7072f8Seschrock spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 2653ecc2d604Sbonwick } 2654ea8dc4b6Seschrock } else { 2655ea8dc4b6Seschrock /* 2656ea8dc4b6Seschrock * The pool-wide DTL is non-empty. 2657ea8dc4b6Seschrock * If this is a normal scrub, upgrade to a resilver instead. 2658ea8dc4b6Seschrock */ 2659ea8dc4b6Seschrock if (type == POOL_SCRUB_EVERYTHING) 2660ea8dc4b6Seschrock type = POOL_SCRUB_RESILVER; 2661ea8dc4b6Seschrock } 2662fa9e4066Sahrens 2663ea8dc4b6Seschrock if (type == POOL_SCRUB_RESILVER) { 2664fa9e4066Sahrens /* 2665fa9e4066Sahrens * Determine the resilvering boundaries. 2666fa9e4066Sahrens * 2667fa9e4066Sahrens * Note: (mintxg, maxtxg) is an open interval, 2668fa9e4066Sahrens * i.e. mintxg and maxtxg themselves are not included. 2669fa9e4066Sahrens * 2670fa9e4066Sahrens * Note: for maxtxg, we MIN with spa_last_synced_txg(spa) + 1 2671fa9e4066Sahrens * so we don't claim to resilver a txg that's still changing. 2672fa9e4066Sahrens */ 2673fa9e4066Sahrens ss = avl_first(&rvd->vdev_dtl_map.sm_root); 2674ea8dc4b6Seschrock mintxg = ss->ss_start - 1; 2675fa9e4066Sahrens ss = avl_last(&rvd->vdev_dtl_map.sm_root); 2676ea8dc4b6Seschrock maxtxg = MIN(ss->ss_end, maxtxg); 26773d7072f8Seschrock 26783d7072f8Seschrock spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START); 2679fa9e4066Sahrens } 2680fa9e4066Sahrens 2681ea8dc4b6Seschrock mutex_exit(&rvd->vdev_dtl_lock); 2682ea8dc4b6Seschrock 2683ea8dc4b6Seschrock spa->spa_scrub_stop = 0; 2684ea8dc4b6Seschrock spa->spa_scrub_type = type; 2685ea8dc4b6Seschrock spa->spa_scrub_restart_txg = 0; 2686ea8dc4b6Seschrock 2687ea8dc4b6Seschrock if (type != POOL_SCRUB_NONE) { 2688ea8dc4b6Seschrock spa->spa_scrub_mintxg = mintxg; 2689fa9e4066Sahrens spa->spa_scrub_maxtxg = maxtxg; 2690fa9e4066Sahrens spa->spa_scrub_th = traverse_init(spa, spa_scrub_cb, NULL, 26910373e76bSbonwick ADVANCE_PRE | ADVANCE_PRUNE | ADVANCE_ZIL, 26920373e76bSbonwick ZIO_FLAG_CANFAIL); 2693fa9e4066Sahrens traverse_add_pool(spa->spa_scrub_th, mintxg, maxtxg); 2694fa9e4066Sahrens spa->spa_scrub_thread = thread_create(NULL, 0, 2695fa9e4066Sahrens spa_scrub_thread, spa, 0, &p0, TS_RUN, minclsyspri); 2696fa9e4066Sahrens } 2697fa9e4066Sahrens 2698ea8dc4b6Seschrock mutex_exit(&spa->spa_scrub_lock); 2699ea8dc4b6Seschrock 2700fa9e4066Sahrens return (0); 2701fa9e4066Sahrens } 2702fa9e4066Sahrens 2703ea8dc4b6Seschrock /* 2704ea8dc4b6Seschrock * ========================================================================== 2705ea8dc4b6Seschrock * SPA async task processing 2706ea8dc4b6Seschrock * ========================================================================== 2707ea8dc4b6Seschrock */ 2708ea8dc4b6Seschrock 2709ea8dc4b6Seschrock static void 27103d7072f8Seschrock spa_async_remove(spa_t *spa, vdev_t *vd) 2711fa9e4066Sahrens { 2712ea8dc4b6Seschrock vdev_t *tvd; 2713ea8dc4b6Seschrock int c; 2714fa9e4066Sahrens 27153d7072f8Seschrock for (c = 0; c < vd->vdev_children; c++) { 27163d7072f8Seschrock tvd = vd->vdev_child[c]; 27173d7072f8Seschrock if (tvd->vdev_remove_wanted) { 27183d7072f8Seschrock tvd->vdev_remove_wanted = 0; 27193d7072f8Seschrock vdev_set_state(tvd, B_FALSE, VDEV_STATE_REMOVED, 27203d7072f8Seschrock VDEV_AUX_NONE); 27213d7072f8Seschrock vdev_clear(spa, tvd); 27223d7072f8Seschrock vdev_config_dirty(tvd->vdev_top); 2723ea8dc4b6Seschrock } 27243d7072f8Seschrock spa_async_remove(spa, tvd); 2725ea8dc4b6Seschrock } 2726ea8dc4b6Seschrock } 2727fa9e4066Sahrens 2728ea8dc4b6Seschrock static void 2729ea8dc4b6Seschrock spa_async_thread(spa_t *spa) 2730ea8dc4b6Seschrock { 2731ea8dc4b6Seschrock int tasks; 27323d7072f8Seschrock uint64_t txg; 2733ea8dc4b6Seschrock 2734ea8dc4b6Seschrock ASSERT(spa->spa_sync_on); 2735ea8dc4b6Seschrock 2736ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 2737ea8dc4b6Seschrock tasks = spa->spa_async_tasks; 2738ea8dc4b6Seschrock spa->spa_async_tasks = 0; 2739ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 2740ea8dc4b6Seschrock 27410373e76bSbonwick /* 27420373e76bSbonwick * See if the config needs to be updated. 27430373e76bSbonwick */ 27440373e76bSbonwick if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 27450373e76bSbonwick mutex_enter(&spa_namespace_lock); 27460373e76bSbonwick spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 27470373e76bSbonwick mutex_exit(&spa_namespace_lock); 27480373e76bSbonwick } 27490373e76bSbonwick 2750ea8dc4b6Seschrock /* 27513d7072f8Seschrock * See if any devices need to be marked REMOVED. 2752ea8dc4b6Seschrock */ 27533d7072f8Seschrock if (tasks & SPA_ASYNC_REMOVE) { 27543d7072f8Seschrock txg = spa_vdev_enter(spa); 27553d7072f8Seschrock spa_async_remove(spa, spa->spa_root_vdev); 27563d7072f8Seschrock (void) spa_vdev_exit(spa, NULL, txg, 0); 27573d7072f8Seschrock } 2758ea8dc4b6Seschrock 2759ea8dc4b6Seschrock /* 2760ea8dc4b6Seschrock * If any devices are done replacing, detach them. 2761ea8dc4b6Seschrock */ 27623d7072f8Seschrock if (tasks & SPA_ASYNC_RESILVER_DONE) 27633d7072f8Seschrock spa_vdev_resilver_done(spa); 2764fa9e4066Sahrens 2765ea8dc4b6Seschrock /* 27663d7072f8Seschrock * Kick off a scrub. When starting a RESILVER scrub (or an EVERYTHING 27673d7072f8Seschrock * scrub which can become a resilver), we need to hold 27683d7072f8Seschrock * spa_namespace_lock() because the sysevent we post via 27693d7072f8Seschrock * spa_event_notify() needs to get the name of the pool. 2770ea8dc4b6Seschrock */ 27713d7072f8Seschrock if (tasks & SPA_ASYNC_SCRUB) { 27723d7072f8Seschrock mutex_enter(&spa_namespace_lock); 2773ea8dc4b6Seschrock VERIFY(spa_scrub(spa, POOL_SCRUB_EVERYTHING, B_TRUE) == 0); 27743d7072f8Seschrock mutex_exit(&spa_namespace_lock); 27753d7072f8Seschrock } 2776ea8dc4b6Seschrock 2777ea8dc4b6Seschrock /* 2778ea8dc4b6Seschrock * Kick off a resilver. 2779ea8dc4b6Seschrock */ 27803d7072f8Seschrock if (tasks & SPA_ASYNC_RESILVER) { 27813d7072f8Seschrock mutex_enter(&spa_namespace_lock); 2782ea8dc4b6Seschrock VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 27833d7072f8Seschrock mutex_exit(&spa_namespace_lock); 27843d7072f8Seschrock } 2785ea8dc4b6Seschrock 2786ea8dc4b6Seschrock /* 2787ea8dc4b6Seschrock * Let the world know that we're done. 2788ea8dc4b6Seschrock */ 2789ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 2790ea8dc4b6Seschrock spa->spa_async_thread = NULL; 2791ea8dc4b6Seschrock cv_broadcast(&spa->spa_async_cv); 2792ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 2793ea8dc4b6Seschrock thread_exit(); 2794ea8dc4b6Seschrock } 2795ea8dc4b6Seschrock 2796ea8dc4b6Seschrock void 2797ea8dc4b6Seschrock spa_async_suspend(spa_t *spa) 2798ea8dc4b6Seschrock { 2799ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 2800ea8dc4b6Seschrock spa->spa_async_suspended++; 2801ea8dc4b6Seschrock while (spa->spa_async_thread != NULL) 2802ea8dc4b6Seschrock cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 2803ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 2804ea8dc4b6Seschrock } 2805ea8dc4b6Seschrock 2806ea8dc4b6Seschrock void 2807ea8dc4b6Seschrock spa_async_resume(spa_t *spa) 2808ea8dc4b6Seschrock { 2809ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 2810ea8dc4b6Seschrock ASSERT(spa->spa_async_suspended != 0); 2811ea8dc4b6Seschrock spa->spa_async_suspended--; 2812ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 2813ea8dc4b6Seschrock } 2814ea8dc4b6Seschrock 2815ea8dc4b6Seschrock static void 2816ea8dc4b6Seschrock spa_async_dispatch(spa_t *spa) 2817ea8dc4b6Seschrock { 2818ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 2819ea8dc4b6Seschrock if (spa->spa_async_tasks && !spa->spa_async_suspended && 28200373e76bSbonwick spa->spa_async_thread == NULL && 28210373e76bSbonwick rootdir != NULL && !vn_is_readonly(rootdir)) 2822ea8dc4b6Seschrock spa->spa_async_thread = thread_create(NULL, 0, 2823ea8dc4b6Seschrock spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 2824ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 2825ea8dc4b6Seschrock } 2826ea8dc4b6Seschrock 2827ea8dc4b6Seschrock void 2828ea8dc4b6Seschrock spa_async_request(spa_t *spa, int task) 2829ea8dc4b6Seschrock { 2830ea8dc4b6Seschrock mutex_enter(&spa->spa_async_lock); 2831ea8dc4b6Seschrock spa->spa_async_tasks |= task; 2832ea8dc4b6Seschrock mutex_exit(&spa->spa_async_lock); 2833fa9e4066Sahrens } 2834fa9e4066Sahrens 2835fa9e4066Sahrens /* 2836fa9e4066Sahrens * ========================================================================== 2837fa9e4066Sahrens * SPA syncing routines 2838fa9e4066Sahrens * ========================================================================== 2839fa9e4066Sahrens */ 2840fa9e4066Sahrens 2841fa9e4066Sahrens static void 2842fa9e4066Sahrens spa_sync_deferred_frees(spa_t *spa, uint64_t txg) 2843fa9e4066Sahrens { 2844fa9e4066Sahrens bplist_t *bpl = &spa->spa_sync_bplist; 2845fa9e4066Sahrens dmu_tx_t *tx; 2846fa9e4066Sahrens blkptr_t blk; 2847fa9e4066Sahrens uint64_t itor = 0; 2848fa9e4066Sahrens zio_t *zio; 2849fa9e4066Sahrens int error; 2850fa9e4066Sahrens uint8_t c = 1; 2851fa9e4066Sahrens 2852fa9e4066Sahrens zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CONFIG_HELD); 2853fa9e4066Sahrens 2854fa9e4066Sahrens while (bplist_iterate(bpl, &itor, &blk) == 0) 2855fa9e4066Sahrens zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL)); 2856fa9e4066Sahrens 2857fa9e4066Sahrens error = zio_wait(zio); 2858fa9e4066Sahrens ASSERT3U(error, ==, 0); 2859fa9e4066Sahrens 2860fa9e4066Sahrens tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2861fa9e4066Sahrens bplist_vacate(bpl, tx); 2862fa9e4066Sahrens 2863fa9e4066Sahrens /* 2864fa9e4066Sahrens * Pre-dirty the first block so we sync to convergence faster. 2865fa9e4066Sahrens * (Usually only the first block is needed.) 2866fa9e4066Sahrens */ 2867fa9e4066Sahrens dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); 2868fa9e4066Sahrens dmu_tx_commit(tx); 2869fa9e4066Sahrens } 2870fa9e4066Sahrens 2871fa9e4066Sahrens static void 287299653d4eSeschrock spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 2873fa9e4066Sahrens { 2874fa9e4066Sahrens char *packed = NULL; 2875fa9e4066Sahrens size_t nvsize = 0; 2876fa9e4066Sahrens dmu_buf_t *db; 2877fa9e4066Sahrens 287899653d4eSeschrock VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 2879fa9e4066Sahrens 2880fa9e4066Sahrens packed = kmem_alloc(nvsize, KM_SLEEP); 2881fa9e4066Sahrens 288299653d4eSeschrock VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 2883ea8dc4b6Seschrock KM_SLEEP) == 0); 2884fa9e4066Sahrens 288599653d4eSeschrock dmu_write(spa->spa_meta_objset, obj, 0, nvsize, packed, tx); 2886fa9e4066Sahrens 2887fa9e4066Sahrens kmem_free(packed, nvsize); 2888fa9e4066Sahrens 288999653d4eSeschrock VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 2890fa9e4066Sahrens dmu_buf_will_dirty(db, tx); 2891fa9e4066Sahrens *(uint64_t *)db->db_data = nvsize; 2892ea8dc4b6Seschrock dmu_buf_rele(db, FTAG); 2893fa9e4066Sahrens } 2894fa9e4066Sahrens 289599653d4eSeschrock static void 289699653d4eSeschrock spa_sync_spares(spa_t *spa, dmu_tx_t *tx) 289799653d4eSeschrock { 289899653d4eSeschrock nvlist_t *nvroot; 289999653d4eSeschrock nvlist_t **spares; 290099653d4eSeschrock int i; 290199653d4eSeschrock 290299653d4eSeschrock if (!spa->spa_sync_spares) 290399653d4eSeschrock return; 290499653d4eSeschrock 290599653d4eSeschrock /* 290699653d4eSeschrock * Update the MOS nvlist describing the list of available spares. 290799653d4eSeschrock * spa_validate_spares() will have already made sure this nvlist is 29083d7072f8Seschrock * valid and the vdevs are labeled appropriately. 290999653d4eSeschrock */ 291099653d4eSeschrock if (spa->spa_spares_object == 0) { 291199653d4eSeschrock spa->spa_spares_object = dmu_object_alloc(spa->spa_meta_objset, 291299653d4eSeschrock DMU_OT_PACKED_NVLIST, 1 << 14, 291399653d4eSeschrock DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 291499653d4eSeschrock VERIFY(zap_update(spa->spa_meta_objset, 291599653d4eSeschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SPARES, 291699653d4eSeschrock sizeof (uint64_t), 1, &spa->spa_spares_object, tx) == 0); 291799653d4eSeschrock } 291899653d4eSeschrock 291999653d4eSeschrock VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 292099653d4eSeschrock if (spa->spa_nspares == 0) { 292199653d4eSeschrock VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 292299653d4eSeschrock NULL, 0) == 0); 292399653d4eSeschrock } else { 292499653d4eSeschrock spares = kmem_alloc(spa->spa_nspares * sizeof (void *), 292599653d4eSeschrock KM_SLEEP); 292699653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) 292799653d4eSeschrock spares[i] = vdev_config_generate(spa, 292899653d4eSeschrock spa->spa_spares[i], B_FALSE, B_TRUE); 292999653d4eSeschrock VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 293099653d4eSeschrock spares, spa->spa_nspares) == 0); 293199653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) 293299653d4eSeschrock nvlist_free(spares[i]); 293399653d4eSeschrock kmem_free(spares, spa->spa_nspares * sizeof (void *)); 293499653d4eSeschrock } 293599653d4eSeschrock 293699653d4eSeschrock spa_sync_nvlist(spa, spa->spa_spares_object, nvroot, tx); 293706eeb2adSek nvlist_free(nvroot); 293899653d4eSeschrock 293999653d4eSeschrock spa->spa_sync_spares = B_FALSE; 294099653d4eSeschrock } 294199653d4eSeschrock 294299653d4eSeschrock static void 294399653d4eSeschrock spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 294499653d4eSeschrock { 294599653d4eSeschrock nvlist_t *config; 294699653d4eSeschrock 294799653d4eSeschrock if (list_is_empty(&spa->spa_dirty_list)) 294899653d4eSeschrock return; 294999653d4eSeschrock 295099653d4eSeschrock config = spa_config_generate(spa, NULL, dmu_tx_get_txg(tx), B_FALSE); 295199653d4eSeschrock 295299653d4eSeschrock if (spa->spa_config_syncing) 295399653d4eSeschrock nvlist_free(spa->spa_config_syncing); 295499653d4eSeschrock spa->spa_config_syncing = config; 295599653d4eSeschrock 295699653d4eSeschrock spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 295799653d4eSeschrock } 295899653d4eSeschrock 2959b1b8ab34Slling static void 2960b1b8ab34Slling spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx) 2961b1b8ab34Slling { 2962b1b8ab34Slling spa_t *spa = arg1; 2963b1b8ab34Slling nvlist_t *nvp = arg2; 2964b1b8ab34Slling nvpair_t *nvpair; 2965b1b8ab34Slling objset_t *mos = spa->spa_meta_objset; 2966b1b8ab34Slling uint64_t zapobj; 29673d7072f8Seschrock uint64_t intval; 2968b1b8ab34Slling 2969b1b8ab34Slling mutex_enter(&spa->spa_props_lock); 2970b1b8ab34Slling if (spa->spa_pool_props_object == 0) { 2971b1b8ab34Slling zapobj = zap_create(mos, DMU_OT_POOL_PROPS, DMU_OT_NONE, 0, tx); 2972b1b8ab34Slling VERIFY(zapobj > 0); 2973b1b8ab34Slling 2974b1b8ab34Slling spa->spa_pool_props_object = zapobj; 2975b1b8ab34Slling 2976b1b8ab34Slling VERIFY(zap_update(mos, DMU_POOL_DIRECTORY_OBJECT, 2977b1b8ab34Slling DMU_POOL_PROPS, 8, 1, 2978b1b8ab34Slling &spa->spa_pool_props_object, tx) == 0); 2979b1b8ab34Slling } 2980b1b8ab34Slling mutex_exit(&spa->spa_props_lock); 2981b1b8ab34Slling 2982b1b8ab34Slling nvpair = NULL; 2983b1b8ab34Slling while ((nvpair = nvlist_next_nvpair(nvp, nvpair))) { 2984b1b8ab34Slling switch (zpool_name_to_prop(nvpair_name(nvpair))) { 29853d7072f8Seschrock case ZPOOL_PROP_BOOTFS: 2986b1b8ab34Slling VERIFY(nvlist_lookup_uint64(nvp, 2987b1b8ab34Slling nvpair_name(nvpair), &spa->spa_bootfs) == 0); 2988b1b8ab34Slling VERIFY(zap_update(mos, 2989b1b8ab34Slling spa->spa_pool_props_object, 29903d7072f8Seschrock zpool_prop_to_name(ZPOOL_PROP_BOOTFS), 8, 1, 2991b1b8ab34Slling &spa->spa_bootfs, tx) == 0); 2992b1b8ab34Slling break; 29933d7072f8Seschrock 29943d7072f8Seschrock case ZPOOL_PROP_AUTOREPLACE: 29953d7072f8Seschrock VERIFY(nvlist_lookup_uint64(nvp, 29963d7072f8Seschrock nvpair_name(nvpair), &intval) == 0); 29973d7072f8Seschrock VERIFY(zap_update(mos, 29983d7072f8Seschrock spa->spa_pool_props_object, 29993d7072f8Seschrock zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 8, 1, 30003d7072f8Seschrock &intval, tx) == 0); 30013d7072f8Seschrock break; 3002b1b8ab34Slling } 3003b1b8ab34Slling } 3004b1b8ab34Slling } 3005b1b8ab34Slling 3006fa9e4066Sahrens /* 3007fa9e4066Sahrens * Sync the specified transaction group. New blocks may be dirtied as 3008fa9e4066Sahrens * part of the process, so we iterate until it converges. 3009fa9e4066Sahrens */ 3010fa9e4066Sahrens void 3011fa9e4066Sahrens spa_sync(spa_t *spa, uint64_t txg) 3012fa9e4066Sahrens { 3013fa9e4066Sahrens dsl_pool_t *dp = spa->spa_dsl_pool; 3014fa9e4066Sahrens objset_t *mos = spa->spa_meta_objset; 3015fa9e4066Sahrens bplist_t *bpl = &spa->spa_sync_bplist; 30160373e76bSbonwick vdev_t *rvd = spa->spa_root_vdev; 3017fa9e4066Sahrens vdev_t *vd; 3018fa9e4066Sahrens dmu_tx_t *tx; 3019fa9e4066Sahrens int dirty_vdevs; 3020fa9e4066Sahrens 3021fa9e4066Sahrens /* 3022fa9e4066Sahrens * Lock out configuration changes. 3023fa9e4066Sahrens */ 3024ea8dc4b6Seschrock spa_config_enter(spa, RW_READER, FTAG); 3025fa9e4066Sahrens 3026fa9e4066Sahrens spa->spa_syncing_txg = txg; 3027fa9e4066Sahrens spa->spa_sync_pass = 0; 3028fa9e4066Sahrens 3029ea8dc4b6Seschrock VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); 3030fa9e4066Sahrens 303199653d4eSeschrock tx = dmu_tx_create_assigned(dp, txg); 303299653d4eSeschrock 303399653d4eSeschrock /* 303499653d4eSeschrock * If we are upgrading to ZFS_VERSION_RAIDZ_DEFLATE this txg, 303599653d4eSeschrock * set spa_deflate if we have no raid-z vdevs. 303699653d4eSeschrock */ 303799653d4eSeschrock if (spa->spa_ubsync.ub_version < ZFS_VERSION_RAIDZ_DEFLATE && 303899653d4eSeschrock spa->spa_uberblock.ub_version >= ZFS_VERSION_RAIDZ_DEFLATE) { 303999653d4eSeschrock int i; 304099653d4eSeschrock 304199653d4eSeschrock for (i = 0; i < rvd->vdev_children; i++) { 304299653d4eSeschrock vd = rvd->vdev_child[i]; 304399653d4eSeschrock if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 304499653d4eSeschrock break; 304599653d4eSeschrock } 304699653d4eSeschrock if (i == rvd->vdev_children) { 304799653d4eSeschrock spa->spa_deflate = TRUE; 304899653d4eSeschrock VERIFY(0 == zap_add(spa->spa_meta_objset, 304999653d4eSeschrock DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 305099653d4eSeschrock sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 305199653d4eSeschrock } 305299653d4eSeschrock } 305399653d4eSeschrock 3054fa9e4066Sahrens /* 3055fa9e4066Sahrens * If anything has changed in this txg, push the deferred frees 3056fa9e4066Sahrens * from the previous txg. If not, leave them alone so that we 3057fa9e4066Sahrens * don't generate work on an otherwise idle system. 3058fa9e4066Sahrens */ 3059fa9e4066Sahrens if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 30601615a317Sek !txg_list_empty(&dp->dp_dirty_dirs, txg) || 30611615a317Sek !txg_list_empty(&dp->dp_sync_tasks, txg)) 3062fa9e4066Sahrens spa_sync_deferred_frees(spa, txg); 3063fa9e4066Sahrens 3064fa9e4066Sahrens /* 3065fa9e4066Sahrens * Iterate to convergence. 3066fa9e4066Sahrens */ 3067fa9e4066Sahrens do { 3068fa9e4066Sahrens spa->spa_sync_pass++; 3069fa9e4066Sahrens 3070fa9e4066Sahrens spa_sync_config_object(spa, tx); 307199653d4eSeschrock spa_sync_spares(spa, tx); 3072ea8dc4b6Seschrock spa_errlog_sync(spa, txg); 3073fa9e4066Sahrens dsl_pool_sync(dp, txg); 3074fa9e4066Sahrens 3075fa9e4066Sahrens dirty_vdevs = 0; 3076fa9e4066Sahrens while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { 3077fa9e4066Sahrens vdev_sync(vd, txg); 3078fa9e4066Sahrens dirty_vdevs++; 3079fa9e4066Sahrens } 3080fa9e4066Sahrens 3081fa9e4066Sahrens bplist_sync(bpl, tx); 3082fa9e4066Sahrens } while (dirty_vdevs); 3083fa9e4066Sahrens 3084fa9e4066Sahrens bplist_close(bpl); 3085fa9e4066Sahrens 3086fa9e4066Sahrens dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); 3087fa9e4066Sahrens 3088fa9e4066Sahrens /* 3089fa9e4066Sahrens * Rewrite the vdev configuration (which includes the uberblock) 3090fa9e4066Sahrens * to commit the transaction group. 30910373e76bSbonwick * 30920373e76bSbonwick * If there are any dirty vdevs, sync the uberblock to all vdevs. 30930373e76bSbonwick * Otherwise, pick a random top-level vdev that's known to be 30940373e76bSbonwick * visible in the config cache (see spa_vdev_add() for details). 30950373e76bSbonwick * If the write fails, try the next vdev until we're tried them all. 30960373e76bSbonwick */ 30970373e76bSbonwick if (!list_is_empty(&spa->spa_dirty_list)) { 30980373e76bSbonwick VERIFY(vdev_config_sync(rvd, txg) == 0); 30990373e76bSbonwick } else { 31000373e76bSbonwick int children = rvd->vdev_children; 31010373e76bSbonwick int c0 = spa_get_random(children); 31020373e76bSbonwick int c; 31030373e76bSbonwick 31040373e76bSbonwick for (c = 0; c < children; c++) { 31050373e76bSbonwick vd = rvd->vdev_child[(c0 + c) % children]; 31060373e76bSbonwick if (vd->vdev_ms_array == 0) 31070373e76bSbonwick continue; 31080373e76bSbonwick if (vdev_config_sync(vd, txg) == 0) 31090373e76bSbonwick break; 31100373e76bSbonwick } 31110373e76bSbonwick if (c == children) 31120373e76bSbonwick VERIFY(vdev_config_sync(rvd, txg) == 0); 31130373e76bSbonwick } 31140373e76bSbonwick 311599653d4eSeschrock dmu_tx_commit(tx); 311699653d4eSeschrock 31170373e76bSbonwick /* 31180373e76bSbonwick * Clear the dirty config list. 3119fa9e4066Sahrens */ 31200373e76bSbonwick while ((vd = list_head(&spa->spa_dirty_list)) != NULL) 31210373e76bSbonwick vdev_config_clean(vd); 31220373e76bSbonwick 31230373e76bSbonwick /* 31240373e76bSbonwick * Now that the new config has synced transactionally, 31250373e76bSbonwick * let it become visible to the config cache. 31260373e76bSbonwick */ 31270373e76bSbonwick if (spa->spa_config_syncing != NULL) { 31280373e76bSbonwick spa_config_set(spa, spa->spa_config_syncing); 31290373e76bSbonwick spa->spa_config_txg = txg; 31300373e76bSbonwick spa->spa_config_syncing = NULL; 31310373e76bSbonwick } 3132fa9e4066Sahrens 3133fa9e4066Sahrens /* 3134fa9e4066Sahrens * Make a stable copy of the fully synced uberblock. 3135fa9e4066Sahrens * We use this as the root for pool traversals. 3136fa9e4066Sahrens */ 3137fa9e4066Sahrens spa->spa_traverse_wanted = 1; /* tells traverse_more() to stop */ 3138fa9e4066Sahrens 3139fa9e4066Sahrens spa_scrub_suspend(spa); /* stop scrubbing and finish I/Os */ 3140fa9e4066Sahrens 3141fa9e4066Sahrens rw_enter(&spa->spa_traverse_lock, RW_WRITER); 3142fa9e4066Sahrens spa->spa_traverse_wanted = 0; 3143fa9e4066Sahrens spa->spa_ubsync = spa->spa_uberblock; 3144fa9e4066Sahrens rw_exit(&spa->spa_traverse_lock); 3145fa9e4066Sahrens 3146fa9e4066Sahrens spa_scrub_resume(spa); /* resume scrub with new ubsync */ 3147fa9e4066Sahrens 3148fa9e4066Sahrens /* 3149fa9e4066Sahrens * Clean up the ZIL records for the synced txg. 3150fa9e4066Sahrens */ 3151fa9e4066Sahrens dsl_pool_zil_clean(dp); 3152fa9e4066Sahrens 3153fa9e4066Sahrens /* 3154fa9e4066Sahrens * Update usable space statistics. 3155fa9e4066Sahrens */ 3156fa9e4066Sahrens while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 3157fa9e4066Sahrens vdev_sync_done(vd, txg); 3158fa9e4066Sahrens 3159fa9e4066Sahrens /* 3160fa9e4066Sahrens * It had better be the case that we didn't dirty anything 316199653d4eSeschrock * since vdev_config_sync(). 3162fa9e4066Sahrens */ 3163fa9e4066Sahrens ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 3164fa9e4066Sahrens ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 3165fa9e4066Sahrens ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 3166fa9e4066Sahrens ASSERT(bpl->bpl_queue == NULL); 3167fa9e4066Sahrens 3168ea8dc4b6Seschrock spa_config_exit(spa, FTAG); 3169ea8dc4b6Seschrock 3170ea8dc4b6Seschrock /* 3171ea8dc4b6Seschrock * If any async tasks have been requested, kick them off. 3172ea8dc4b6Seschrock */ 3173ea8dc4b6Seschrock spa_async_dispatch(spa); 3174fa9e4066Sahrens } 3175fa9e4066Sahrens 3176fa9e4066Sahrens /* 3177fa9e4066Sahrens * Sync all pools. We don't want to hold the namespace lock across these 3178fa9e4066Sahrens * operations, so we take a reference on the spa_t and drop the lock during the 3179fa9e4066Sahrens * sync. 3180fa9e4066Sahrens */ 3181fa9e4066Sahrens void 3182fa9e4066Sahrens spa_sync_allpools(void) 3183fa9e4066Sahrens { 3184fa9e4066Sahrens spa_t *spa = NULL; 3185fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 3186fa9e4066Sahrens while ((spa = spa_next(spa)) != NULL) { 3187fa9e4066Sahrens if (spa_state(spa) != POOL_STATE_ACTIVE) 3188fa9e4066Sahrens continue; 3189fa9e4066Sahrens spa_open_ref(spa, FTAG); 3190fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 3191fa9e4066Sahrens txg_wait_synced(spa_get_dsl(spa), 0); 3192fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 3193fa9e4066Sahrens spa_close(spa, FTAG); 3194fa9e4066Sahrens } 3195fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 3196fa9e4066Sahrens } 3197fa9e4066Sahrens 3198fa9e4066Sahrens /* 3199fa9e4066Sahrens * ========================================================================== 3200fa9e4066Sahrens * Miscellaneous routines 3201fa9e4066Sahrens * ========================================================================== 3202fa9e4066Sahrens */ 3203fa9e4066Sahrens 3204fa9e4066Sahrens /* 3205fa9e4066Sahrens * Remove all pools in the system. 3206fa9e4066Sahrens */ 3207fa9e4066Sahrens void 3208fa9e4066Sahrens spa_evict_all(void) 3209fa9e4066Sahrens { 3210fa9e4066Sahrens spa_t *spa; 3211fa9e4066Sahrens 3212fa9e4066Sahrens /* 3213fa9e4066Sahrens * Remove all cached state. All pools should be closed now, 3214fa9e4066Sahrens * so every spa in the AVL tree should be unreferenced. 3215fa9e4066Sahrens */ 3216fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 3217fa9e4066Sahrens while ((spa = spa_next(NULL)) != NULL) { 3218fa9e4066Sahrens /* 3219ea8dc4b6Seschrock * Stop async tasks. The async thread may need to detach 3220ea8dc4b6Seschrock * a device that's been replaced, which requires grabbing 3221ea8dc4b6Seschrock * spa_namespace_lock, so we must drop it here. 3222fa9e4066Sahrens */ 3223fa9e4066Sahrens spa_open_ref(spa, FTAG); 3224fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 3225ea8dc4b6Seschrock spa_async_suspend(spa); 3226fa9e4066Sahrens VERIFY(spa_scrub(spa, POOL_SCRUB_NONE, B_TRUE) == 0); 3227fa9e4066Sahrens mutex_enter(&spa_namespace_lock); 3228fa9e4066Sahrens spa_close(spa, FTAG); 3229fa9e4066Sahrens 3230fa9e4066Sahrens if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 3231fa9e4066Sahrens spa_unload(spa); 3232fa9e4066Sahrens spa_deactivate(spa); 3233fa9e4066Sahrens } 3234fa9e4066Sahrens spa_remove(spa); 3235fa9e4066Sahrens } 3236fa9e4066Sahrens mutex_exit(&spa_namespace_lock); 3237fa9e4066Sahrens } 3238ea8dc4b6Seschrock 3239ea8dc4b6Seschrock vdev_t * 3240ea8dc4b6Seschrock spa_lookup_by_guid(spa_t *spa, uint64_t guid) 3241ea8dc4b6Seschrock { 3242ea8dc4b6Seschrock return (vdev_lookup_by_guid(spa->spa_root_vdev, guid)); 3243ea8dc4b6Seschrock } 3244eaca9bbdSeschrock 3245eaca9bbdSeschrock void 3246eaca9bbdSeschrock spa_upgrade(spa_t *spa) 3247eaca9bbdSeschrock { 3248eaca9bbdSeschrock spa_config_enter(spa, RW_WRITER, FTAG); 3249eaca9bbdSeschrock 3250eaca9bbdSeschrock /* 3251eaca9bbdSeschrock * This should only be called for a non-faulted pool, and since a 3252eaca9bbdSeschrock * future version would result in an unopenable pool, this shouldn't be 3253eaca9bbdSeschrock * possible. 3254eaca9bbdSeschrock */ 3255eaca9bbdSeschrock ASSERT(spa->spa_uberblock.ub_version <= ZFS_VERSION); 3256eaca9bbdSeschrock 3257eaca9bbdSeschrock spa->spa_uberblock.ub_version = ZFS_VERSION; 3258eaca9bbdSeschrock vdev_config_dirty(spa->spa_root_vdev); 3259eaca9bbdSeschrock 3260eaca9bbdSeschrock spa_config_exit(spa, FTAG); 326199653d4eSeschrock 326299653d4eSeschrock txg_wait_synced(spa_get_dsl(spa), 0); 326399653d4eSeschrock } 326499653d4eSeschrock 326599653d4eSeschrock boolean_t 326699653d4eSeschrock spa_has_spare(spa_t *spa, uint64_t guid) 326799653d4eSeschrock { 326899653d4eSeschrock int i; 326939c23413Seschrock uint64_t spareguid; 327099653d4eSeschrock 327199653d4eSeschrock for (i = 0; i < spa->spa_nspares; i++) 327299653d4eSeschrock if (spa->spa_spares[i]->vdev_guid == guid) 327399653d4eSeschrock return (B_TRUE); 327499653d4eSeschrock 327539c23413Seschrock for (i = 0; i < spa->spa_pending_nspares; i++) { 327639c23413Seschrock if (nvlist_lookup_uint64(spa->spa_pending_spares[i], 327739c23413Seschrock ZPOOL_CONFIG_GUID, &spareguid) == 0 && 327839c23413Seschrock spareguid == guid) 327939c23413Seschrock return (B_TRUE); 328039c23413Seschrock } 328139c23413Seschrock 328299653d4eSeschrock return (B_FALSE); 3283eaca9bbdSeschrock } 3284b1b8ab34Slling 3285b1b8ab34Slling int 3286b1b8ab34Slling spa_set_props(spa_t *spa, nvlist_t *nvp) 3287b1b8ab34Slling { 3288b1b8ab34Slling return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props, 3289b1b8ab34Slling spa, nvp, 3)); 3290b1b8ab34Slling } 3291b1b8ab34Slling 3292b1b8ab34Slling int 3293b1b8ab34Slling spa_get_props(spa_t *spa, nvlist_t **nvp) 3294b1b8ab34Slling { 3295b1b8ab34Slling zap_cursor_t zc; 3296b1b8ab34Slling zap_attribute_t za; 3297b1b8ab34Slling objset_t *mos = spa->spa_meta_objset; 3298b1b8ab34Slling zfs_source_t src; 32993d7072f8Seschrock zpool_prop_t prop; 3300b1b8ab34Slling nvlist_t *propval; 3301b1b8ab34Slling uint64_t value; 3302b1b8ab34Slling int err; 3303b1b8ab34Slling 3304b1b8ab34Slling VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3305b1b8ab34Slling 3306b1b8ab34Slling mutex_enter(&spa->spa_props_lock); 3307b1b8ab34Slling /* If no props object, then just return empty nvlist */ 3308b1b8ab34Slling if (spa->spa_pool_props_object == 0) { 3309b1b8ab34Slling mutex_exit(&spa->spa_props_lock); 3310b1b8ab34Slling return (0); 3311b1b8ab34Slling } 3312b1b8ab34Slling 3313b1b8ab34Slling for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 3314b1b8ab34Slling (err = zap_cursor_retrieve(&zc, &za)) == 0; 3315b1b8ab34Slling zap_cursor_advance(&zc)) { 3316b1b8ab34Slling 3317b1b8ab34Slling if ((prop = zpool_name_to_prop(za.za_name)) == ZFS_PROP_INVAL) 3318b1b8ab34Slling continue; 3319b1b8ab34Slling 3320b1b8ab34Slling VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3321b1b8ab34Slling switch (za.za_integer_length) { 3322b1b8ab34Slling case 8: 33233d7072f8Seschrock if (zpool_prop_default_numeric(prop) == 3324b1b8ab34Slling za.za_first_integer) 3325b1b8ab34Slling src = ZFS_SRC_DEFAULT; 3326b1b8ab34Slling else 3327b1b8ab34Slling src = ZFS_SRC_LOCAL; 3328b1b8ab34Slling value = za.za_first_integer; 3329b1b8ab34Slling 33303d7072f8Seschrock if (prop == ZPOOL_PROP_BOOTFS) { 3331b1b8ab34Slling dsl_pool_t *dp; 3332b1b8ab34Slling dsl_dataset_t *ds = NULL; 3333b1b8ab34Slling char strval[MAXPATHLEN]; 3334b1b8ab34Slling 3335b1b8ab34Slling dp = spa_get_dsl(spa); 3336b1b8ab34Slling rw_enter(&dp->dp_config_rwlock, RW_READER); 3337b1b8ab34Slling if ((err = dsl_dataset_open_obj(dp, 3338b1b8ab34Slling za.za_first_integer, NULL, DS_MODE_NONE, 3339b1b8ab34Slling FTAG, &ds)) != 0) { 3340b1b8ab34Slling rw_exit(&dp->dp_config_rwlock); 3341b1b8ab34Slling break; 3342b1b8ab34Slling } 3343b1b8ab34Slling dsl_dataset_name(ds, strval); 3344b1b8ab34Slling dsl_dataset_close(ds, DS_MODE_NONE, FTAG); 3345b1b8ab34Slling rw_exit(&dp->dp_config_rwlock); 3346b1b8ab34Slling 3347b1b8ab34Slling VERIFY(nvlist_add_uint64(propval, 3348b1b8ab34Slling ZFS_PROP_SOURCE, src) == 0); 3349b1b8ab34Slling VERIFY(nvlist_add_string(propval, 3350b1b8ab34Slling ZFS_PROP_VALUE, strval) == 0); 3351b1b8ab34Slling } else { 3352b1b8ab34Slling VERIFY(nvlist_add_uint64(propval, 3353b1b8ab34Slling ZFS_PROP_SOURCE, src) == 0); 3354b1b8ab34Slling VERIFY(nvlist_add_uint64(propval, 3355b1b8ab34Slling ZFS_PROP_VALUE, value) == 0); 3356b1b8ab34Slling } 3357b1b8ab34Slling VERIFY(nvlist_add_nvlist(*nvp, za.za_name, 3358b1b8ab34Slling propval) == 0); 3359b1b8ab34Slling break; 3360b1b8ab34Slling } 3361b1b8ab34Slling nvlist_free(propval); 3362b1b8ab34Slling } 3363b1b8ab34Slling zap_cursor_fini(&zc); 3364b1b8ab34Slling mutex_exit(&spa->spa_props_lock); 3365b1b8ab34Slling if (err && err != ENOENT) { 3366b1b8ab34Slling nvlist_free(*nvp); 3367b1b8ab34Slling return (err); 3368b1b8ab34Slling } 3369b1b8ab34Slling 3370b1b8ab34Slling return (0); 3371b1b8ab34Slling } 3372b1b8ab34Slling 3373b1b8ab34Slling /* 3374b1b8ab34Slling * If the bootfs property value is dsobj, clear it. 3375b1b8ab34Slling */ 3376b1b8ab34Slling void 3377b1b8ab34Slling spa_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 3378b1b8ab34Slling { 3379b1b8ab34Slling if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 3380b1b8ab34Slling VERIFY(zap_remove(spa->spa_meta_objset, 3381b1b8ab34Slling spa->spa_pool_props_object, 33823d7072f8Seschrock zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 3383b1b8ab34Slling spa->spa_bootfs = 0; 3384b1b8ab34Slling } 3385b1b8ab34Slling } 33863d7072f8Seschrock 33873d7072f8Seschrock /* 33883d7072f8Seschrock * Post a sysevent corresponding to the given event. The 'name' must be one of 33893d7072f8Seschrock * the event definitions in sys/sysevent/eventdefs.h. The payload will be 33903d7072f8Seschrock * filled in from the spa and (optionally) the vdev. This doesn't do anything 33913d7072f8Seschrock * in the userland libzpool, as we don't want consumers to misinterpret ztest 33923d7072f8Seschrock * or zdb as real changes. 33933d7072f8Seschrock */ 33943d7072f8Seschrock void 33953d7072f8Seschrock spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 33963d7072f8Seschrock { 33973d7072f8Seschrock #ifdef _KERNEL 33983d7072f8Seschrock sysevent_t *ev; 33993d7072f8Seschrock sysevent_attr_list_t *attr = NULL; 34003d7072f8Seschrock sysevent_value_t value; 34013d7072f8Seschrock sysevent_id_t eid; 34023d7072f8Seschrock 34033d7072f8Seschrock ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 34043d7072f8Seschrock SE_SLEEP); 34053d7072f8Seschrock 34063d7072f8Seschrock value.value_type = SE_DATA_TYPE_STRING; 34073d7072f8Seschrock value.value.sv_string = spa_name(spa); 34083d7072f8Seschrock if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 34093d7072f8Seschrock goto done; 34103d7072f8Seschrock 34113d7072f8Seschrock value.value_type = SE_DATA_TYPE_UINT64; 34123d7072f8Seschrock value.value.sv_uint64 = spa_guid(spa); 34133d7072f8Seschrock if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 34143d7072f8Seschrock goto done; 34153d7072f8Seschrock 34163d7072f8Seschrock if (vd) { 34173d7072f8Seschrock value.value_type = SE_DATA_TYPE_UINT64; 34183d7072f8Seschrock value.value.sv_uint64 = vd->vdev_guid; 34193d7072f8Seschrock if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 34203d7072f8Seschrock SE_SLEEP) != 0) 34213d7072f8Seschrock goto done; 34223d7072f8Seschrock 34233d7072f8Seschrock if (vd->vdev_path) { 34243d7072f8Seschrock value.value_type = SE_DATA_TYPE_STRING; 34253d7072f8Seschrock value.value.sv_string = vd->vdev_path; 34263d7072f8Seschrock if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 34273d7072f8Seschrock &value, SE_SLEEP) != 0) 34283d7072f8Seschrock goto done; 34293d7072f8Seschrock } 34303d7072f8Seschrock } 34313d7072f8Seschrock 34323d7072f8Seschrock (void) log_sysevent(ev, SE_SLEEP, &eid); 34333d7072f8Seschrock 34343d7072f8Seschrock done: 34353d7072f8Seschrock if (attr) 34363d7072f8Seschrock sysevent_free_attr(attr); 34373d7072f8Seschrock sysevent_free(ev); 34383d7072f8Seschrock #endif 34393d7072f8Seschrock } 3440