1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5441d80aaSlling * Common Development and Distribution License (the "License"). 6441d80aaSlling * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 2199653d4eSeschrock 22fa9e4066Sahrens /* 23a3f829aeSBill Moore * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24fa9e4066Sahrens * Use is subject to license terms. 25fa9e4066Sahrens */ 26fa9e4066Sahrens 27fa9e4066Sahrens #include <sys/zfs_context.h> 28ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h> 29fa9e4066Sahrens #include <sys/spa.h> 30fa9e4066Sahrens #include <sys/spa_impl.h> 31fa9e4066Sahrens #include <sys/dmu.h> 32fa9e4066Sahrens #include <sys/dmu_tx.h> 33fa9e4066Sahrens #include <sys/vdev_impl.h> 34fa9e4066Sahrens #include <sys/uberblock_impl.h> 35fa9e4066Sahrens #include <sys/metaslab.h> 36fa9e4066Sahrens #include <sys/metaslab_impl.h> 37fa9e4066Sahrens #include <sys/space_map.h> 38fa9e4066Sahrens #include <sys/zio.h> 39fa9e4066Sahrens #include <sys/zap.h> 40fa9e4066Sahrens #include <sys/fs/zfs.h> 41c5904d13Seschrock #include <sys/arc.h> 42e6ca193dSGeorge Wilson #include <sys/zil.h> 43fa9e4066Sahrens 44fa9e4066Sahrens /* 45fa9e4066Sahrens * Virtual device management. 46fa9e4066Sahrens */ 47fa9e4066Sahrens 48fa9e4066Sahrens static vdev_ops_t *vdev_ops_table[] = { 49fa9e4066Sahrens &vdev_root_ops, 50fa9e4066Sahrens &vdev_raidz_ops, 51fa9e4066Sahrens &vdev_mirror_ops, 52fa9e4066Sahrens &vdev_replacing_ops, 5399653d4eSeschrock &vdev_spare_ops, 54fa9e4066Sahrens &vdev_disk_ops, 55fa9e4066Sahrens &vdev_file_ops, 56fa9e4066Sahrens &vdev_missing_ops, 57*88ecc943SGeorge Wilson &vdev_hole_ops, 58fa9e4066Sahrens NULL 59fa9e4066Sahrens }; 60fa9e4066Sahrens 61088f3894Sahrens /* maximum scrub/resilver I/O queue per leaf vdev */ 62088f3894Sahrens int zfs_scrub_limit = 10; 6305b2b3b8Smishra 64fa9e4066Sahrens /* 65fa9e4066Sahrens * Given a vdev type, return the appropriate ops vector. 66fa9e4066Sahrens */ 67fa9e4066Sahrens static vdev_ops_t * 68fa9e4066Sahrens vdev_getops(const char *type) 69fa9e4066Sahrens { 70fa9e4066Sahrens vdev_ops_t *ops, **opspp; 71fa9e4066Sahrens 72fa9e4066Sahrens for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 73fa9e4066Sahrens if (strcmp(ops->vdev_op_type, type) == 0) 74fa9e4066Sahrens break; 75fa9e4066Sahrens 76fa9e4066Sahrens return (ops); 77fa9e4066Sahrens } 78fa9e4066Sahrens 79fa9e4066Sahrens /* 80fa9e4066Sahrens * Default asize function: return the MAX of psize with the asize of 81fa9e4066Sahrens * all children. This is what's used by anything other than RAID-Z. 82fa9e4066Sahrens */ 83fa9e4066Sahrens uint64_t 84fa9e4066Sahrens vdev_default_asize(vdev_t *vd, uint64_t psize) 85fa9e4066Sahrens { 86ecc2d604Sbonwick uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 87fa9e4066Sahrens uint64_t csize; 88fa9e4066Sahrens 89573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) { 90fa9e4066Sahrens csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 91fa9e4066Sahrens asize = MAX(asize, csize); 92fa9e4066Sahrens } 93fa9e4066Sahrens 94fa9e4066Sahrens return (asize); 95fa9e4066Sahrens } 96fa9e4066Sahrens 972a79c5feSlling /* 98573ca77eSGeorge Wilson * Get the minimum allocatable size. We define the allocatable size as 99573ca77eSGeorge Wilson * the vdev's asize rounded to the nearest metaslab. This allows us to 100573ca77eSGeorge Wilson * replace or attach devices which don't have the same physical size but 101573ca77eSGeorge Wilson * can still satisfy the same number of allocations. 1022a79c5feSlling */ 1032a79c5feSlling uint64_t 104573ca77eSGeorge Wilson vdev_get_min_asize(vdev_t *vd) 1052a79c5feSlling { 106573ca77eSGeorge Wilson vdev_t *pvd = vd->vdev_parent; 1072a79c5feSlling 108573ca77eSGeorge Wilson /* 109573ca77eSGeorge Wilson * The our parent is NULL (inactive spare or cache) or is the root, 110573ca77eSGeorge Wilson * just return our own asize. 111573ca77eSGeorge Wilson */ 112573ca77eSGeorge Wilson if (pvd == NULL) 113573ca77eSGeorge Wilson return (vd->vdev_asize); 1142a79c5feSlling 1152a79c5feSlling /* 116573ca77eSGeorge Wilson * The top-level vdev just returns the allocatable size rounded 117573ca77eSGeorge Wilson * to the nearest metaslab. 1182a79c5feSlling */ 119573ca77eSGeorge Wilson if (vd == vd->vdev_top) 120573ca77eSGeorge Wilson return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); 1212a79c5feSlling 122573ca77eSGeorge Wilson /* 123573ca77eSGeorge Wilson * The allocatable space for a raidz vdev is N * sizeof(smallest child), 124573ca77eSGeorge Wilson * so each child must provide at least 1/Nth of its asize. 125573ca77eSGeorge Wilson */ 126573ca77eSGeorge Wilson if (pvd->vdev_ops == &vdev_raidz_ops) 127573ca77eSGeorge Wilson return (pvd->vdev_min_asize / pvd->vdev_children); 1282a79c5feSlling 129573ca77eSGeorge Wilson return (pvd->vdev_min_asize); 130573ca77eSGeorge Wilson } 1312a79c5feSlling 132573ca77eSGeorge Wilson void 133573ca77eSGeorge Wilson vdev_set_min_asize(vdev_t *vd) 134573ca77eSGeorge Wilson { 135573ca77eSGeorge Wilson vd->vdev_min_asize = vdev_get_min_asize(vd); 136573ca77eSGeorge Wilson 137573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 138573ca77eSGeorge Wilson vdev_set_min_asize(vd->vdev_child[c]); 1392a79c5feSlling } 1402a79c5feSlling 141fa9e4066Sahrens vdev_t * 142fa9e4066Sahrens vdev_lookup_top(spa_t *spa, uint64_t vdev) 143fa9e4066Sahrens { 144fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 145fa9e4066Sahrens 146e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 147e05725b1Sbonwick 148088f3894Sahrens if (vdev < rvd->vdev_children) { 149088f3894Sahrens ASSERT(rvd->vdev_child[vdev] != NULL); 150fa9e4066Sahrens return (rvd->vdev_child[vdev]); 151088f3894Sahrens } 152fa9e4066Sahrens 153fa9e4066Sahrens return (NULL); 154fa9e4066Sahrens } 155fa9e4066Sahrens 156fa9e4066Sahrens vdev_t * 157fa9e4066Sahrens vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 158fa9e4066Sahrens { 159fa9e4066Sahrens vdev_t *mvd; 160fa9e4066Sahrens 1610e34b6a7Sbonwick if (vd->vdev_guid == guid) 162fa9e4066Sahrens return (vd); 163fa9e4066Sahrens 164573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 165fa9e4066Sahrens if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 166fa9e4066Sahrens NULL) 167fa9e4066Sahrens return (mvd); 168fa9e4066Sahrens 169fa9e4066Sahrens return (NULL); 170fa9e4066Sahrens } 171fa9e4066Sahrens 172fa9e4066Sahrens void 173fa9e4066Sahrens vdev_add_child(vdev_t *pvd, vdev_t *cvd) 174fa9e4066Sahrens { 175fa9e4066Sahrens size_t oldsize, newsize; 176fa9e4066Sahrens uint64_t id = cvd->vdev_id; 177fa9e4066Sahrens vdev_t **newchild; 178fa9e4066Sahrens 179e14bb325SJeff Bonwick ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 180fa9e4066Sahrens ASSERT(cvd->vdev_parent == NULL); 181fa9e4066Sahrens 182fa9e4066Sahrens cvd->vdev_parent = pvd; 183fa9e4066Sahrens 184fa9e4066Sahrens if (pvd == NULL) 185fa9e4066Sahrens return; 186fa9e4066Sahrens 187fa9e4066Sahrens ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 188fa9e4066Sahrens 189fa9e4066Sahrens oldsize = pvd->vdev_children * sizeof (vdev_t *); 190fa9e4066Sahrens pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 191fa9e4066Sahrens newsize = pvd->vdev_children * sizeof (vdev_t *); 192fa9e4066Sahrens 193fa9e4066Sahrens newchild = kmem_zalloc(newsize, KM_SLEEP); 194fa9e4066Sahrens if (pvd->vdev_child != NULL) { 195fa9e4066Sahrens bcopy(pvd->vdev_child, newchild, oldsize); 196fa9e4066Sahrens kmem_free(pvd->vdev_child, oldsize); 197fa9e4066Sahrens } 198fa9e4066Sahrens 199fa9e4066Sahrens pvd->vdev_child = newchild; 200fa9e4066Sahrens pvd->vdev_child[id] = cvd; 201fa9e4066Sahrens 202fa9e4066Sahrens cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 203fa9e4066Sahrens ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 204fa9e4066Sahrens 205fa9e4066Sahrens /* 206fa9e4066Sahrens * Walk up all ancestors to update guid sum. 207fa9e4066Sahrens */ 208fa9e4066Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 209fa9e4066Sahrens pvd->vdev_guid_sum += cvd->vdev_guid_sum; 21005b2b3b8Smishra 21105b2b3b8Smishra if (cvd->vdev_ops->vdev_op_leaf) 21205b2b3b8Smishra cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit; 213fa9e4066Sahrens } 214fa9e4066Sahrens 215fa9e4066Sahrens void 216fa9e4066Sahrens vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 217fa9e4066Sahrens { 218fa9e4066Sahrens int c; 219fa9e4066Sahrens uint_t id = cvd->vdev_id; 220fa9e4066Sahrens 221fa9e4066Sahrens ASSERT(cvd->vdev_parent == pvd); 222fa9e4066Sahrens 223fa9e4066Sahrens if (pvd == NULL) 224fa9e4066Sahrens return; 225fa9e4066Sahrens 226fa9e4066Sahrens ASSERT(id < pvd->vdev_children); 227fa9e4066Sahrens ASSERT(pvd->vdev_child[id] == cvd); 228fa9e4066Sahrens 229fa9e4066Sahrens pvd->vdev_child[id] = NULL; 230fa9e4066Sahrens cvd->vdev_parent = NULL; 231fa9e4066Sahrens 232fa9e4066Sahrens for (c = 0; c < pvd->vdev_children; c++) 233fa9e4066Sahrens if (pvd->vdev_child[c]) 234fa9e4066Sahrens break; 235fa9e4066Sahrens 236fa9e4066Sahrens if (c == pvd->vdev_children) { 237fa9e4066Sahrens kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 238fa9e4066Sahrens pvd->vdev_child = NULL; 239fa9e4066Sahrens pvd->vdev_children = 0; 240fa9e4066Sahrens } 241fa9e4066Sahrens 242fa9e4066Sahrens /* 243fa9e4066Sahrens * Walk up all ancestors to update guid sum. 244fa9e4066Sahrens */ 245fa9e4066Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 246fa9e4066Sahrens pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 24705b2b3b8Smishra 24805b2b3b8Smishra if (cvd->vdev_ops->vdev_op_leaf) 24905b2b3b8Smishra cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit; 250fa9e4066Sahrens } 251fa9e4066Sahrens 252fa9e4066Sahrens /* 253fa9e4066Sahrens * Remove any holes in the child array. 254fa9e4066Sahrens */ 255fa9e4066Sahrens void 256fa9e4066Sahrens vdev_compact_children(vdev_t *pvd) 257fa9e4066Sahrens { 258fa9e4066Sahrens vdev_t **newchild, *cvd; 259fa9e4066Sahrens int oldc = pvd->vdev_children; 260573ca77eSGeorge Wilson int newc; 261fa9e4066Sahrens 262e14bb325SJeff Bonwick ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 263fa9e4066Sahrens 264573ca77eSGeorge Wilson for (int c = newc = 0; c < oldc; c++) 265fa9e4066Sahrens if (pvd->vdev_child[c]) 266fa9e4066Sahrens newc++; 267fa9e4066Sahrens 268fa9e4066Sahrens newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 269fa9e4066Sahrens 270573ca77eSGeorge Wilson for (int c = newc = 0; c < oldc; c++) { 271fa9e4066Sahrens if ((cvd = pvd->vdev_child[c]) != NULL) { 272fa9e4066Sahrens newchild[newc] = cvd; 273fa9e4066Sahrens cvd->vdev_id = newc++; 274fa9e4066Sahrens } 275fa9e4066Sahrens } 276fa9e4066Sahrens 277fa9e4066Sahrens kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 278fa9e4066Sahrens pvd->vdev_child = newchild; 279fa9e4066Sahrens pvd->vdev_children = newc; 280fa9e4066Sahrens } 281fa9e4066Sahrens 282fa9e4066Sahrens /* 283fa9e4066Sahrens * Allocate and minimally initialize a vdev_t. 284fa9e4066Sahrens */ 285*88ecc943SGeorge Wilson vdev_t * 286fa9e4066Sahrens vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 287fa9e4066Sahrens { 288fa9e4066Sahrens vdev_t *vd; 289fa9e4066Sahrens 290fa9e4066Sahrens vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 291fa9e4066Sahrens 2920e34b6a7Sbonwick if (spa->spa_root_vdev == NULL) { 2930e34b6a7Sbonwick ASSERT(ops == &vdev_root_ops); 2940e34b6a7Sbonwick spa->spa_root_vdev = vd; 2950e34b6a7Sbonwick } 2960e34b6a7Sbonwick 297*88ecc943SGeorge Wilson if (guid == 0 && ops != &vdev_hole_ops) { 2980e34b6a7Sbonwick if (spa->spa_root_vdev == vd) { 2990e34b6a7Sbonwick /* 3000e34b6a7Sbonwick * The root vdev's guid will also be the pool guid, 3010e34b6a7Sbonwick * which must be unique among all pools. 3020e34b6a7Sbonwick */ 3030e34b6a7Sbonwick while (guid == 0 || spa_guid_exists(guid, 0)) 3040e34b6a7Sbonwick guid = spa_get_random(-1ULL); 3050e34b6a7Sbonwick } else { 3060e34b6a7Sbonwick /* 3070e34b6a7Sbonwick * Any other vdev's guid must be unique within the pool. 3080e34b6a7Sbonwick */ 3090e34b6a7Sbonwick while (guid == 0 || 3100e34b6a7Sbonwick spa_guid_exists(spa_guid(spa), guid)) 3110e34b6a7Sbonwick guid = spa_get_random(-1ULL); 3120e34b6a7Sbonwick } 3130e34b6a7Sbonwick ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 3140e34b6a7Sbonwick } 3150e34b6a7Sbonwick 316fa9e4066Sahrens vd->vdev_spa = spa; 317fa9e4066Sahrens vd->vdev_id = id; 318fa9e4066Sahrens vd->vdev_guid = guid; 319fa9e4066Sahrens vd->vdev_guid_sum = guid; 320fa9e4066Sahrens vd->vdev_ops = ops; 321fa9e4066Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 322*88ecc943SGeorge Wilson vd->vdev_ishole = (ops == &vdev_hole_ops); 323fa9e4066Sahrens 324fa9e4066Sahrens mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 3255ad82045Snd mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 326e14bb325SJeff Bonwick mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 3278ad4d6ddSJeff Bonwick for (int t = 0; t < DTL_TYPES; t++) { 3288ad4d6ddSJeff Bonwick space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, 3298ad4d6ddSJeff Bonwick &vd->vdev_dtl_lock); 3308ad4d6ddSJeff Bonwick } 331fa9e4066Sahrens txg_list_create(&vd->vdev_ms_list, 332fa9e4066Sahrens offsetof(struct metaslab, ms_txg_node)); 333fa9e4066Sahrens txg_list_create(&vd->vdev_dtl_list, 334fa9e4066Sahrens offsetof(struct vdev, vdev_dtl_node)); 335fa9e4066Sahrens vd->vdev_stat.vs_timestamp = gethrtime(); 3363d7072f8Seschrock vdev_queue_init(vd); 3373d7072f8Seschrock vdev_cache_init(vd); 338fa9e4066Sahrens 339fa9e4066Sahrens return (vd); 340fa9e4066Sahrens } 341fa9e4066Sahrens 342fa9e4066Sahrens /* 343fa9e4066Sahrens * Allocate a new vdev. The 'alloctype' is used to control whether we are 344fa9e4066Sahrens * creating a new vdev or loading an existing one - the behavior is slightly 345fa9e4066Sahrens * different for each case. 346fa9e4066Sahrens */ 34799653d4eSeschrock int 34899653d4eSeschrock vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 34999653d4eSeschrock int alloctype) 350fa9e4066Sahrens { 351fa9e4066Sahrens vdev_ops_t *ops; 352fa9e4066Sahrens char *type; 3538654d025Sperrin uint64_t guid = 0, islog, nparity; 354fa9e4066Sahrens vdev_t *vd; 355fa9e4066Sahrens 356e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 357fa9e4066Sahrens 358fa9e4066Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 35999653d4eSeschrock return (EINVAL); 360fa9e4066Sahrens 361fa9e4066Sahrens if ((ops = vdev_getops(type)) == NULL) 36299653d4eSeschrock return (EINVAL); 363fa9e4066Sahrens 364fa9e4066Sahrens /* 365fa9e4066Sahrens * If this is a load, get the vdev guid from the nvlist. 366fa9e4066Sahrens * Otherwise, vdev_alloc_common() will generate one for us. 367fa9e4066Sahrens */ 368fa9e4066Sahrens if (alloctype == VDEV_ALLOC_LOAD) { 369fa9e4066Sahrens uint64_t label_id; 370fa9e4066Sahrens 371fa9e4066Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 372fa9e4066Sahrens label_id != id) 37399653d4eSeschrock return (EINVAL); 374fa9e4066Sahrens 375fa9e4066Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 37699653d4eSeschrock return (EINVAL); 37799653d4eSeschrock } else if (alloctype == VDEV_ALLOC_SPARE) { 37899653d4eSeschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 37999653d4eSeschrock return (EINVAL); 380fa94a07fSbrendan } else if (alloctype == VDEV_ALLOC_L2CACHE) { 381fa94a07fSbrendan if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 382fa94a07fSbrendan return (EINVAL); 38321ecdf64SLin Ling } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 38421ecdf64SLin Ling if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 38521ecdf64SLin Ling return (EINVAL); 386fa9e4066Sahrens } 387fa9e4066Sahrens 38899653d4eSeschrock /* 38999653d4eSeschrock * The first allocated vdev must be of type 'root'. 39099653d4eSeschrock */ 39199653d4eSeschrock if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 39299653d4eSeschrock return (EINVAL); 39399653d4eSeschrock 3948654d025Sperrin /* 3958654d025Sperrin * Determine whether we're a log vdev. 3968654d025Sperrin */ 3978654d025Sperrin islog = 0; 3988654d025Sperrin (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 399990b4856Slling if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 4008654d025Sperrin return (ENOTSUP); 401fa9e4066Sahrens 402*88ecc943SGeorge Wilson if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES) 403*88ecc943SGeorge Wilson return (ENOTSUP); 404*88ecc943SGeorge Wilson 40599653d4eSeschrock /* 4068654d025Sperrin * Set the nparity property for RAID-Z vdevs. 40799653d4eSeschrock */ 4088654d025Sperrin nparity = -1ULL; 40999653d4eSeschrock if (ops == &vdev_raidz_ops) { 41099653d4eSeschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 4118654d025Sperrin &nparity) == 0) { 41299653d4eSeschrock /* 413f94275ceSAdam Leventhal * Currently, we can only support 3 parity devices. 41499653d4eSeschrock */ 415f94275ceSAdam Leventhal if (nparity == 0 || nparity > 3) 41699653d4eSeschrock return (EINVAL); 41799653d4eSeschrock /* 418f94275ceSAdam Leventhal * Previous versions could only support 1 or 2 parity 419f94275ceSAdam Leventhal * device. 42099653d4eSeschrock */ 421f94275ceSAdam Leventhal if (nparity > 1 && 422f94275ceSAdam Leventhal spa_version(spa) < SPA_VERSION_RAIDZ2) 423f94275ceSAdam Leventhal return (ENOTSUP); 424f94275ceSAdam Leventhal if (nparity > 2 && 425f94275ceSAdam Leventhal spa_version(spa) < SPA_VERSION_RAIDZ3) 42699653d4eSeschrock return (ENOTSUP); 42799653d4eSeschrock } else { 42899653d4eSeschrock /* 42999653d4eSeschrock * We require the parity to be specified for SPAs that 43099653d4eSeschrock * support multiple parity levels. 43199653d4eSeschrock */ 432f94275ceSAdam Leventhal if (spa_version(spa) >= SPA_VERSION_RAIDZ2) 43399653d4eSeschrock return (EINVAL); 43499653d4eSeschrock /* 43599653d4eSeschrock * Otherwise, we default to 1 parity device for RAID-Z. 43699653d4eSeschrock */ 4378654d025Sperrin nparity = 1; 43899653d4eSeschrock } 43999653d4eSeschrock } else { 4408654d025Sperrin nparity = 0; 44199653d4eSeschrock } 4428654d025Sperrin ASSERT(nparity != -1ULL); 4438654d025Sperrin 4448654d025Sperrin vd = vdev_alloc_common(spa, id, guid, ops); 4458654d025Sperrin 4468654d025Sperrin vd->vdev_islog = islog; 4478654d025Sperrin vd->vdev_nparity = nparity; 4488654d025Sperrin 4498654d025Sperrin if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 4508654d025Sperrin vd->vdev_path = spa_strdup(vd->vdev_path); 4518654d025Sperrin if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 4528654d025Sperrin vd->vdev_devid = spa_strdup(vd->vdev_devid); 4538654d025Sperrin if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 4548654d025Sperrin &vd->vdev_physpath) == 0) 4558654d025Sperrin vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 4566809eb4eSEric Schrock if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 4576809eb4eSEric Schrock vd->vdev_fru = spa_strdup(vd->vdev_fru); 45899653d4eSeschrock 459afefbcddSeschrock /* 460afefbcddSeschrock * Set the whole_disk property. If it's not specified, leave the value 461afefbcddSeschrock * as -1. 462afefbcddSeschrock */ 463afefbcddSeschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 464afefbcddSeschrock &vd->vdev_wholedisk) != 0) 465afefbcddSeschrock vd->vdev_wholedisk = -1ULL; 466afefbcddSeschrock 467ea8dc4b6Seschrock /* 468ea8dc4b6Seschrock * Look for the 'not present' flag. This will only be set if the device 469ea8dc4b6Seschrock * was not present at the time of import. 470ea8dc4b6Seschrock */ 4716809eb4eSEric Schrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 4726809eb4eSEric Schrock &vd->vdev_not_present); 473ea8dc4b6Seschrock 474ecc2d604Sbonwick /* 475ecc2d604Sbonwick * Get the alignment requirement. 476ecc2d604Sbonwick */ 477ecc2d604Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 478ecc2d604Sbonwick 479*88ecc943SGeorge Wilson /* 480*88ecc943SGeorge Wilson * Retrieve the vdev creation time. 481*88ecc943SGeorge Wilson */ 482*88ecc943SGeorge Wilson (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, 483*88ecc943SGeorge Wilson &vd->vdev_crtxg); 484*88ecc943SGeorge Wilson 485fa9e4066Sahrens /* 486fa9e4066Sahrens * If we're a top-level vdev, try to load the allocation parameters. 487fa9e4066Sahrens */ 488fa9e4066Sahrens if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) { 489fa9e4066Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 490fa9e4066Sahrens &vd->vdev_ms_array); 491fa9e4066Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 492fa9e4066Sahrens &vd->vdev_ms_shift); 493fa9e4066Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 494fa9e4066Sahrens &vd->vdev_asize); 495fa9e4066Sahrens } 496fa9e4066Sahrens 497fa9e4066Sahrens /* 4983d7072f8Seschrock * If we're a leaf vdev, try to load the DTL object and other state. 499fa9e4066Sahrens */ 500c5904d13Seschrock if (vd->vdev_ops->vdev_op_leaf && 50121ecdf64SLin Ling (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 50221ecdf64SLin Ling alloctype == VDEV_ALLOC_ROOTPOOL)) { 503c5904d13Seschrock if (alloctype == VDEV_ALLOC_LOAD) { 504c5904d13Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 5058ad4d6ddSJeff Bonwick &vd->vdev_dtl_smo.smo_object); 506c5904d13Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 507c5904d13Seschrock &vd->vdev_unspare); 508c5904d13Seschrock } 50921ecdf64SLin Ling 51021ecdf64SLin Ling if (alloctype == VDEV_ALLOC_ROOTPOOL) { 51121ecdf64SLin Ling uint64_t spare = 0; 51221ecdf64SLin Ling 51321ecdf64SLin Ling if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 51421ecdf64SLin Ling &spare) == 0 && spare) 51521ecdf64SLin Ling spa_spare_add(vd); 51621ecdf64SLin Ling } 51721ecdf64SLin Ling 518ecc2d604Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 519ecc2d604Sbonwick &vd->vdev_offline); 520c5904d13Seschrock 5213d7072f8Seschrock /* 5223d7072f8Seschrock * When importing a pool, we want to ignore the persistent fault 5233d7072f8Seschrock * state, as the diagnosis made on another system may not be 5243d7072f8Seschrock * valid in the current context. 5253d7072f8Seschrock */ 5263d7072f8Seschrock if (spa->spa_load_state == SPA_LOAD_OPEN) { 5273d7072f8Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 5283d7072f8Seschrock &vd->vdev_faulted); 5293d7072f8Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 5303d7072f8Seschrock &vd->vdev_degraded); 5313d7072f8Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 5323d7072f8Seschrock &vd->vdev_removed); 5333d7072f8Seschrock } 534fa9e4066Sahrens } 535fa9e4066Sahrens 536fa9e4066Sahrens /* 537fa9e4066Sahrens * Add ourselves to the parent's list of children. 538fa9e4066Sahrens */ 539fa9e4066Sahrens vdev_add_child(parent, vd); 540fa9e4066Sahrens 54199653d4eSeschrock *vdp = vd; 54299653d4eSeschrock 54399653d4eSeschrock return (0); 544fa9e4066Sahrens } 545fa9e4066Sahrens 546fa9e4066Sahrens void 547fa9e4066Sahrens vdev_free(vdev_t *vd) 548fa9e4066Sahrens { 5493d7072f8Seschrock spa_t *spa = vd->vdev_spa; 550fa9e4066Sahrens 551fa9e4066Sahrens /* 552fa9e4066Sahrens * vdev_free() implies closing the vdev first. This is simpler than 553fa9e4066Sahrens * trying to ensure complicated semantics for all callers. 554fa9e4066Sahrens */ 555fa9e4066Sahrens vdev_close(vd); 556fa9e4066Sahrens 557e14bb325SJeff Bonwick ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 558fa9e4066Sahrens 559fa9e4066Sahrens /* 560fa9e4066Sahrens * Free all children. 561fa9e4066Sahrens */ 562573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 563fa9e4066Sahrens vdev_free(vd->vdev_child[c]); 564fa9e4066Sahrens 565fa9e4066Sahrens ASSERT(vd->vdev_child == NULL); 566fa9e4066Sahrens ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 567fa9e4066Sahrens 568fa9e4066Sahrens /* 569fa9e4066Sahrens * Discard allocation state. 570fa9e4066Sahrens */ 571fa9e4066Sahrens if (vd == vd->vdev_top) 572fa9e4066Sahrens vdev_metaslab_fini(vd); 573fa9e4066Sahrens 574fa9e4066Sahrens ASSERT3U(vd->vdev_stat.vs_space, ==, 0); 57599653d4eSeschrock ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); 576fa9e4066Sahrens ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); 577fa9e4066Sahrens 578fa9e4066Sahrens /* 579fa9e4066Sahrens * Remove this vdev from its parent's child list. 580fa9e4066Sahrens */ 581fa9e4066Sahrens vdev_remove_child(vd->vdev_parent, vd); 582fa9e4066Sahrens 583fa9e4066Sahrens ASSERT(vd->vdev_parent == NULL); 584fa9e4066Sahrens 5853d7072f8Seschrock /* 5863d7072f8Seschrock * Clean up vdev structure. 5873d7072f8Seschrock */ 5883d7072f8Seschrock vdev_queue_fini(vd); 5893d7072f8Seschrock vdev_cache_fini(vd); 5903d7072f8Seschrock 5913d7072f8Seschrock if (vd->vdev_path) 5923d7072f8Seschrock spa_strfree(vd->vdev_path); 5933d7072f8Seschrock if (vd->vdev_devid) 5943d7072f8Seschrock spa_strfree(vd->vdev_devid); 5953d7072f8Seschrock if (vd->vdev_physpath) 5963d7072f8Seschrock spa_strfree(vd->vdev_physpath); 5976809eb4eSEric Schrock if (vd->vdev_fru) 5986809eb4eSEric Schrock spa_strfree(vd->vdev_fru); 5993d7072f8Seschrock 6003d7072f8Seschrock if (vd->vdev_isspare) 6013d7072f8Seschrock spa_spare_remove(vd); 602fa94a07fSbrendan if (vd->vdev_isl2cache) 603fa94a07fSbrendan spa_l2cache_remove(vd); 6043d7072f8Seschrock 6053d7072f8Seschrock txg_list_destroy(&vd->vdev_ms_list); 6063d7072f8Seschrock txg_list_destroy(&vd->vdev_dtl_list); 6078ad4d6ddSJeff Bonwick 6083d7072f8Seschrock mutex_enter(&vd->vdev_dtl_lock); 6098ad4d6ddSJeff Bonwick for (int t = 0; t < DTL_TYPES; t++) { 6108ad4d6ddSJeff Bonwick space_map_unload(&vd->vdev_dtl[t]); 6118ad4d6ddSJeff Bonwick space_map_destroy(&vd->vdev_dtl[t]); 6128ad4d6ddSJeff Bonwick } 6133d7072f8Seschrock mutex_exit(&vd->vdev_dtl_lock); 6148ad4d6ddSJeff Bonwick 6153d7072f8Seschrock mutex_destroy(&vd->vdev_dtl_lock); 6163d7072f8Seschrock mutex_destroy(&vd->vdev_stat_lock); 617e14bb325SJeff Bonwick mutex_destroy(&vd->vdev_probe_lock); 6183d7072f8Seschrock 6193d7072f8Seschrock if (vd == spa->spa_root_vdev) 6203d7072f8Seschrock spa->spa_root_vdev = NULL; 6213d7072f8Seschrock 6223d7072f8Seschrock kmem_free(vd, sizeof (vdev_t)); 623fa9e4066Sahrens } 624fa9e4066Sahrens 625fa9e4066Sahrens /* 626fa9e4066Sahrens * Transfer top-level vdev state from svd to tvd. 627fa9e4066Sahrens */ 628fa9e4066Sahrens static void 629fa9e4066Sahrens vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 630fa9e4066Sahrens { 631fa9e4066Sahrens spa_t *spa = svd->vdev_spa; 632fa9e4066Sahrens metaslab_t *msp; 633fa9e4066Sahrens vdev_t *vd; 634fa9e4066Sahrens int t; 635fa9e4066Sahrens 636fa9e4066Sahrens ASSERT(tvd == tvd->vdev_top); 637fa9e4066Sahrens 638fa9e4066Sahrens tvd->vdev_ms_array = svd->vdev_ms_array; 639fa9e4066Sahrens tvd->vdev_ms_shift = svd->vdev_ms_shift; 640fa9e4066Sahrens tvd->vdev_ms_count = svd->vdev_ms_count; 641fa9e4066Sahrens 642fa9e4066Sahrens svd->vdev_ms_array = 0; 643fa9e4066Sahrens svd->vdev_ms_shift = 0; 644fa9e4066Sahrens svd->vdev_ms_count = 0; 645fa9e4066Sahrens 646fa9e4066Sahrens tvd->vdev_mg = svd->vdev_mg; 647fa9e4066Sahrens tvd->vdev_ms = svd->vdev_ms; 648fa9e4066Sahrens 649fa9e4066Sahrens svd->vdev_mg = NULL; 650fa9e4066Sahrens svd->vdev_ms = NULL; 651ecc2d604Sbonwick 652ecc2d604Sbonwick if (tvd->vdev_mg != NULL) 653ecc2d604Sbonwick tvd->vdev_mg->mg_vd = tvd; 654fa9e4066Sahrens 655fa9e4066Sahrens tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 656fa9e4066Sahrens tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 65799653d4eSeschrock tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 658fa9e4066Sahrens 659fa9e4066Sahrens svd->vdev_stat.vs_alloc = 0; 660fa9e4066Sahrens svd->vdev_stat.vs_space = 0; 66199653d4eSeschrock svd->vdev_stat.vs_dspace = 0; 662fa9e4066Sahrens 663fa9e4066Sahrens for (t = 0; t < TXG_SIZE; t++) { 664fa9e4066Sahrens while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 665fa9e4066Sahrens (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 666fa9e4066Sahrens while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 667fa9e4066Sahrens (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 668fa9e4066Sahrens if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 669fa9e4066Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 670fa9e4066Sahrens } 671fa9e4066Sahrens 672e14bb325SJeff Bonwick if (list_link_active(&svd->vdev_config_dirty_node)) { 673fa9e4066Sahrens vdev_config_clean(svd); 674fa9e4066Sahrens vdev_config_dirty(tvd); 675fa9e4066Sahrens } 676fa9e4066Sahrens 677e14bb325SJeff Bonwick if (list_link_active(&svd->vdev_state_dirty_node)) { 678e14bb325SJeff Bonwick vdev_state_clean(svd); 679e14bb325SJeff Bonwick vdev_state_dirty(tvd); 680e14bb325SJeff Bonwick } 681e14bb325SJeff Bonwick 68299653d4eSeschrock tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 68399653d4eSeschrock svd->vdev_deflate_ratio = 0; 6848654d025Sperrin 6858654d025Sperrin tvd->vdev_islog = svd->vdev_islog; 6868654d025Sperrin svd->vdev_islog = 0; 687fa9e4066Sahrens } 688fa9e4066Sahrens 689fa9e4066Sahrens static void 690fa9e4066Sahrens vdev_top_update(vdev_t *tvd, vdev_t *vd) 691fa9e4066Sahrens { 692fa9e4066Sahrens if (vd == NULL) 693fa9e4066Sahrens return; 694fa9e4066Sahrens 695fa9e4066Sahrens vd->vdev_top = tvd; 696fa9e4066Sahrens 697573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 698fa9e4066Sahrens vdev_top_update(tvd, vd->vdev_child[c]); 699fa9e4066Sahrens } 700fa9e4066Sahrens 701fa9e4066Sahrens /* 702fa9e4066Sahrens * Add a mirror/replacing vdev above an existing vdev. 703fa9e4066Sahrens */ 704fa9e4066Sahrens vdev_t * 705fa9e4066Sahrens vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 706fa9e4066Sahrens { 707fa9e4066Sahrens spa_t *spa = cvd->vdev_spa; 708fa9e4066Sahrens vdev_t *pvd = cvd->vdev_parent; 709fa9e4066Sahrens vdev_t *mvd; 710fa9e4066Sahrens 711e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 712fa9e4066Sahrens 713fa9e4066Sahrens mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 714ecc2d604Sbonwick 715ecc2d604Sbonwick mvd->vdev_asize = cvd->vdev_asize; 716573ca77eSGeorge Wilson mvd->vdev_min_asize = cvd->vdev_min_asize; 717ecc2d604Sbonwick mvd->vdev_ashift = cvd->vdev_ashift; 718ecc2d604Sbonwick mvd->vdev_state = cvd->vdev_state; 719*88ecc943SGeorge Wilson mvd->vdev_crtxg = cvd->vdev_crtxg; 720ecc2d604Sbonwick 721fa9e4066Sahrens vdev_remove_child(pvd, cvd); 722fa9e4066Sahrens vdev_add_child(pvd, mvd); 723fa9e4066Sahrens cvd->vdev_id = mvd->vdev_children; 724fa9e4066Sahrens vdev_add_child(mvd, cvd); 725fa9e4066Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 726fa9e4066Sahrens 727fa9e4066Sahrens if (mvd == mvd->vdev_top) 728fa9e4066Sahrens vdev_top_transfer(cvd, mvd); 729fa9e4066Sahrens 730fa9e4066Sahrens return (mvd); 731fa9e4066Sahrens } 732fa9e4066Sahrens 733fa9e4066Sahrens /* 734fa9e4066Sahrens * Remove a 1-way mirror/replacing vdev from the tree. 735fa9e4066Sahrens */ 736fa9e4066Sahrens void 737fa9e4066Sahrens vdev_remove_parent(vdev_t *cvd) 738fa9e4066Sahrens { 739fa9e4066Sahrens vdev_t *mvd = cvd->vdev_parent; 740fa9e4066Sahrens vdev_t *pvd = mvd->vdev_parent; 741fa9e4066Sahrens 742e14bb325SJeff Bonwick ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 743fa9e4066Sahrens 744fa9e4066Sahrens ASSERT(mvd->vdev_children == 1); 745fa9e4066Sahrens ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 74699653d4eSeschrock mvd->vdev_ops == &vdev_replacing_ops || 74799653d4eSeschrock mvd->vdev_ops == &vdev_spare_ops); 748ecc2d604Sbonwick cvd->vdev_ashift = mvd->vdev_ashift; 749fa9e4066Sahrens 750fa9e4066Sahrens vdev_remove_child(mvd, cvd); 751fa9e4066Sahrens vdev_remove_child(pvd, mvd); 7528ad4d6ddSJeff Bonwick 75399653d4eSeschrock /* 754e14bb325SJeff Bonwick * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 755e14bb325SJeff Bonwick * Otherwise, we could have detached an offline device, and when we 756e14bb325SJeff Bonwick * go to import the pool we'll think we have two top-level vdevs, 757e14bb325SJeff Bonwick * instead of a different version of the same top-level vdev. 75899653d4eSeschrock */ 7598ad4d6ddSJeff Bonwick if (mvd->vdev_top == mvd) { 7608ad4d6ddSJeff Bonwick uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 7618ad4d6ddSJeff Bonwick cvd->vdev_guid += guid_delta; 7628ad4d6ddSJeff Bonwick cvd->vdev_guid_sum += guid_delta; 7638ad4d6ddSJeff Bonwick } 764e14bb325SJeff Bonwick cvd->vdev_id = mvd->vdev_id; 765e14bb325SJeff Bonwick vdev_add_child(pvd, cvd); 766fa9e4066Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 767fa9e4066Sahrens 768fa9e4066Sahrens if (cvd == cvd->vdev_top) 769fa9e4066Sahrens vdev_top_transfer(mvd, cvd); 770fa9e4066Sahrens 771fa9e4066Sahrens ASSERT(mvd->vdev_children == 0); 772fa9e4066Sahrens vdev_free(mvd); 773fa9e4066Sahrens } 774fa9e4066Sahrens 775ea8dc4b6Seschrock int 776fa9e4066Sahrens vdev_metaslab_init(vdev_t *vd, uint64_t txg) 777fa9e4066Sahrens { 778fa9e4066Sahrens spa_t *spa = vd->vdev_spa; 779ecc2d604Sbonwick objset_t *mos = spa->spa_meta_objset; 7808654d025Sperrin metaslab_class_t *mc; 781ecc2d604Sbonwick uint64_t m; 782fa9e4066Sahrens uint64_t oldc = vd->vdev_ms_count; 783fa9e4066Sahrens uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 784ecc2d604Sbonwick metaslab_t **mspp; 785ecc2d604Sbonwick int error; 786fa9e4066Sahrens 787*88ecc943SGeorge Wilson /* 788*88ecc943SGeorge Wilson * This vdev is not being allocated from yet or is a hole. 789*88ecc943SGeorge Wilson */ 790*88ecc943SGeorge Wilson if (vd->vdev_ms_shift == 0) 7910e34b6a7Sbonwick return (0); 7920e34b6a7Sbonwick 793*88ecc943SGeorge Wilson ASSERT(!vd->vdev_ishole); 794*88ecc943SGeorge Wilson 795e6ca193dSGeorge Wilson /* 796e6ca193dSGeorge Wilson * Compute the raidz-deflation ratio. Note, we hard-code 797e6ca193dSGeorge Wilson * in 128k (1 << 17) because it is the current "typical" blocksize. 798e6ca193dSGeorge Wilson * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change, 799e6ca193dSGeorge Wilson * or we will inconsistently account for existing bp's. 800e6ca193dSGeorge Wilson */ 801e6ca193dSGeorge Wilson vd->vdev_deflate_ratio = (1 << 17) / 802e6ca193dSGeorge Wilson (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 803e6ca193dSGeorge Wilson 804fa9e4066Sahrens ASSERT(oldc <= newc); 805fa9e4066Sahrens 8068654d025Sperrin if (vd->vdev_islog) 8078654d025Sperrin mc = spa->spa_log_class; 8088654d025Sperrin else 8098654d025Sperrin mc = spa->spa_normal_class; 8108654d025Sperrin 811ecc2d604Sbonwick if (vd->vdev_mg == NULL) 812ecc2d604Sbonwick vd->vdev_mg = metaslab_group_create(mc, vd); 813fa9e4066Sahrens 814ecc2d604Sbonwick mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 815fa9e4066Sahrens 816ecc2d604Sbonwick if (oldc != 0) { 817ecc2d604Sbonwick bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 818ecc2d604Sbonwick kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 819ecc2d604Sbonwick } 820fa9e4066Sahrens 821ecc2d604Sbonwick vd->vdev_ms = mspp; 822ecc2d604Sbonwick vd->vdev_ms_count = newc; 823fa9e4066Sahrens 824ecc2d604Sbonwick for (m = oldc; m < newc; m++) { 825ecc2d604Sbonwick space_map_obj_t smo = { 0, 0, 0 }; 826ecc2d604Sbonwick if (txg == 0) { 827ecc2d604Sbonwick uint64_t object = 0; 828ecc2d604Sbonwick error = dmu_read(mos, vd->vdev_ms_array, 8297bfdf011SNeil Perrin m * sizeof (uint64_t), sizeof (uint64_t), &object, 8307bfdf011SNeil Perrin DMU_READ_PREFETCH); 831ecc2d604Sbonwick if (error) 832ecc2d604Sbonwick return (error); 833ecc2d604Sbonwick if (object != 0) { 834ecc2d604Sbonwick dmu_buf_t *db; 835ecc2d604Sbonwick error = dmu_bonus_hold(mos, object, FTAG, &db); 836ecc2d604Sbonwick if (error) 837ecc2d604Sbonwick return (error); 8381934e92fSmaybee ASSERT3U(db->db_size, >=, sizeof (smo)); 8391934e92fSmaybee bcopy(db->db_data, &smo, sizeof (smo)); 840ecc2d604Sbonwick ASSERT3U(smo.smo_object, ==, object); 841ea8dc4b6Seschrock dmu_buf_rele(db, FTAG); 842fa9e4066Sahrens } 843fa9e4066Sahrens } 844ecc2d604Sbonwick vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo, 845ecc2d604Sbonwick m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); 846fa9e4066Sahrens } 847fa9e4066Sahrens 848ea8dc4b6Seschrock return (0); 849fa9e4066Sahrens } 850fa9e4066Sahrens 851fa9e4066Sahrens void 852fa9e4066Sahrens vdev_metaslab_fini(vdev_t *vd) 853fa9e4066Sahrens { 854fa9e4066Sahrens uint64_t m; 855fa9e4066Sahrens uint64_t count = vd->vdev_ms_count; 856fa9e4066Sahrens 857fa9e4066Sahrens if (vd->vdev_ms != NULL) { 858fa9e4066Sahrens for (m = 0; m < count; m++) 859ecc2d604Sbonwick if (vd->vdev_ms[m] != NULL) 860ecc2d604Sbonwick metaslab_fini(vd->vdev_ms[m]); 861fa9e4066Sahrens kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 862fa9e4066Sahrens vd->vdev_ms = NULL; 863fa9e4066Sahrens } 864fa9e4066Sahrens } 865fa9e4066Sahrens 866e14bb325SJeff Bonwick typedef struct vdev_probe_stats { 867e14bb325SJeff Bonwick boolean_t vps_readable; 868e14bb325SJeff Bonwick boolean_t vps_writeable; 869e14bb325SJeff Bonwick int vps_flags; 870e14bb325SJeff Bonwick } vdev_probe_stats_t; 871e14bb325SJeff Bonwick 872e14bb325SJeff Bonwick static void 873e14bb325SJeff Bonwick vdev_probe_done(zio_t *zio) 8740a4e9518Sgw { 8758ad4d6ddSJeff Bonwick spa_t *spa = zio->io_spa; 876a3f829aeSBill Moore vdev_t *vd = zio->io_vd; 877e14bb325SJeff Bonwick vdev_probe_stats_t *vps = zio->io_private; 878a3f829aeSBill Moore 879a3f829aeSBill Moore ASSERT(vd->vdev_probe_zio != NULL); 880e14bb325SJeff Bonwick 881e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_READ) { 882e14bb325SJeff Bonwick if (zio->io_error == 0) 883e14bb325SJeff Bonwick vps->vps_readable = 1; 8848ad4d6ddSJeff Bonwick if (zio->io_error == 0 && spa_writeable(spa)) { 885a3f829aeSBill Moore zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 886e14bb325SJeff Bonwick zio->io_offset, zio->io_size, zio->io_data, 887e14bb325SJeff Bonwick ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 888e14bb325SJeff Bonwick ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 889e14bb325SJeff Bonwick } else { 890e14bb325SJeff Bonwick zio_buf_free(zio->io_data, zio->io_size); 891e14bb325SJeff Bonwick } 892e14bb325SJeff Bonwick } else if (zio->io_type == ZIO_TYPE_WRITE) { 893e14bb325SJeff Bonwick if (zio->io_error == 0) 894e14bb325SJeff Bonwick vps->vps_writeable = 1; 895e14bb325SJeff Bonwick zio_buf_free(zio->io_data, zio->io_size); 896e14bb325SJeff Bonwick } else if (zio->io_type == ZIO_TYPE_NULL) { 897a3f829aeSBill Moore zio_t *pio; 898e14bb325SJeff Bonwick 899e14bb325SJeff Bonwick vd->vdev_cant_read |= !vps->vps_readable; 900e14bb325SJeff Bonwick vd->vdev_cant_write |= !vps->vps_writeable; 901e14bb325SJeff Bonwick 902e14bb325SJeff Bonwick if (vdev_readable(vd) && 9038ad4d6ddSJeff Bonwick (vdev_writeable(vd) || !spa_writeable(spa))) { 904e14bb325SJeff Bonwick zio->io_error = 0; 905e14bb325SJeff Bonwick } else { 906e14bb325SJeff Bonwick ASSERT(zio->io_error != 0); 907e14bb325SJeff Bonwick zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 9088ad4d6ddSJeff Bonwick spa, vd, NULL, 0, 0); 909e14bb325SJeff Bonwick zio->io_error = ENXIO; 910e14bb325SJeff Bonwick } 911a3f829aeSBill Moore 912a3f829aeSBill Moore mutex_enter(&vd->vdev_probe_lock); 913a3f829aeSBill Moore ASSERT(vd->vdev_probe_zio == zio); 914a3f829aeSBill Moore vd->vdev_probe_zio = NULL; 915a3f829aeSBill Moore mutex_exit(&vd->vdev_probe_lock); 916a3f829aeSBill Moore 917a3f829aeSBill Moore while ((pio = zio_walk_parents(zio)) != NULL) 918a3f829aeSBill Moore if (!vdev_accessible(vd, pio)) 919a3f829aeSBill Moore pio->io_error = ENXIO; 920a3f829aeSBill Moore 921e14bb325SJeff Bonwick kmem_free(vps, sizeof (*vps)); 922e14bb325SJeff Bonwick } 923e14bb325SJeff Bonwick } 9240a4e9518Sgw 925e14bb325SJeff Bonwick /* 926e14bb325SJeff Bonwick * Determine whether this device is accessible by reading and writing 927e14bb325SJeff Bonwick * to several known locations: the pad regions of each vdev label 928e14bb325SJeff Bonwick * but the first (which we leave alone in case it contains a VTOC). 929e14bb325SJeff Bonwick */ 930e14bb325SJeff Bonwick zio_t * 931a3f829aeSBill Moore vdev_probe(vdev_t *vd, zio_t *zio) 932e14bb325SJeff Bonwick { 933e14bb325SJeff Bonwick spa_t *spa = vd->vdev_spa; 934a3f829aeSBill Moore vdev_probe_stats_t *vps = NULL; 935a3f829aeSBill Moore zio_t *pio; 936a3f829aeSBill Moore 937a3f829aeSBill Moore ASSERT(vd->vdev_ops->vdev_op_leaf); 9380a4e9518Sgw 939a3f829aeSBill Moore /* 940a3f829aeSBill Moore * Don't probe the probe. 941a3f829aeSBill Moore */ 942a3f829aeSBill Moore if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 943a3f829aeSBill Moore return (NULL); 944e14bb325SJeff Bonwick 945a3f829aeSBill Moore /* 946a3f829aeSBill Moore * To prevent 'probe storms' when a device fails, we create 947a3f829aeSBill Moore * just one probe i/o at a time. All zios that want to probe 948a3f829aeSBill Moore * this vdev will become parents of the probe io. 949a3f829aeSBill Moore */ 950a3f829aeSBill Moore mutex_enter(&vd->vdev_probe_lock); 951e14bb325SJeff Bonwick 952a3f829aeSBill Moore if ((pio = vd->vdev_probe_zio) == NULL) { 953a3f829aeSBill Moore vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 954a3f829aeSBill Moore 955a3f829aeSBill Moore vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 956a3f829aeSBill Moore ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 9578956713aSEric Schrock ZIO_FLAG_TRYHARD; 958a3f829aeSBill Moore 959a3f829aeSBill Moore if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 960a3f829aeSBill Moore /* 961a3f829aeSBill Moore * vdev_cant_read and vdev_cant_write can only 962a3f829aeSBill Moore * transition from TRUE to FALSE when we have the 963a3f829aeSBill Moore * SCL_ZIO lock as writer; otherwise they can only 964a3f829aeSBill Moore * transition from FALSE to TRUE. This ensures that 965a3f829aeSBill Moore * any zio looking at these values can assume that 966a3f829aeSBill Moore * failures persist for the life of the I/O. That's 967a3f829aeSBill Moore * important because when a device has intermittent 968a3f829aeSBill Moore * connectivity problems, we want to ensure that 969a3f829aeSBill Moore * they're ascribed to the device (ENXIO) and not 970a3f829aeSBill Moore * the zio (EIO). 971a3f829aeSBill Moore * 972a3f829aeSBill Moore * Since we hold SCL_ZIO as writer here, clear both 973a3f829aeSBill Moore * values so the probe can reevaluate from first 974a3f829aeSBill Moore * principles. 975a3f829aeSBill Moore */ 976a3f829aeSBill Moore vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 977a3f829aeSBill Moore vd->vdev_cant_read = B_FALSE; 978a3f829aeSBill Moore vd->vdev_cant_write = B_FALSE; 979a3f829aeSBill Moore } 980a3f829aeSBill Moore 981a3f829aeSBill Moore vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 982a3f829aeSBill Moore vdev_probe_done, vps, 983a3f829aeSBill Moore vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 984a3f829aeSBill Moore 985a3f829aeSBill Moore if (zio != NULL) { 986a3f829aeSBill Moore vd->vdev_probe_wanted = B_TRUE; 987a3f829aeSBill Moore spa_async_request(spa, SPA_ASYNC_PROBE); 988a3f829aeSBill Moore } 989e14bb325SJeff Bonwick } 990e14bb325SJeff Bonwick 991a3f829aeSBill Moore if (zio != NULL) 992a3f829aeSBill Moore zio_add_child(zio, pio); 993e14bb325SJeff Bonwick 994a3f829aeSBill Moore mutex_exit(&vd->vdev_probe_lock); 995e14bb325SJeff Bonwick 996a3f829aeSBill Moore if (vps == NULL) { 997a3f829aeSBill Moore ASSERT(zio != NULL); 998a3f829aeSBill Moore return (NULL); 999a3f829aeSBill Moore } 1000e14bb325SJeff Bonwick 1001e14bb325SJeff Bonwick for (int l = 1; l < VDEV_LABELS; l++) { 1002a3f829aeSBill Moore zio_nowait(zio_read_phys(pio, vd, 1003e14bb325SJeff Bonwick vdev_label_offset(vd->vdev_psize, l, 1004f83ffe1aSLin Ling offsetof(vdev_label_t, vl_pad2)), 1005f83ffe1aSLin Ling VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE), 1006e14bb325SJeff Bonwick ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1007e14bb325SJeff Bonwick ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 1008e14bb325SJeff Bonwick } 1009e14bb325SJeff Bonwick 1010a3f829aeSBill Moore if (zio == NULL) 1011a3f829aeSBill Moore return (pio); 1012a3f829aeSBill Moore 1013a3f829aeSBill Moore zio_nowait(pio); 1014a3f829aeSBill Moore return (NULL); 10150a4e9518Sgw } 10160a4e9518Sgw 1017f64c0e34SEric Taylor static void 1018f64c0e34SEric Taylor vdev_open_child(void *arg) 1019f64c0e34SEric Taylor { 1020f64c0e34SEric Taylor vdev_t *vd = arg; 1021f64c0e34SEric Taylor 1022f64c0e34SEric Taylor vd->vdev_open_thread = curthread; 1023f64c0e34SEric Taylor vd->vdev_open_error = vdev_open(vd); 1024f64c0e34SEric Taylor vd->vdev_open_thread = NULL; 1025f64c0e34SEric Taylor } 1026f64c0e34SEric Taylor 1027681d9761SEric Taylor boolean_t 1028681d9761SEric Taylor vdev_uses_zvols(vdev_t *vd) 1029681d9761SEric Taylor { 1030681d9761SEric Taylor if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, 1031681d9761SEric Taylor strlen(ZVOL_DIR)) == 0) 1032681d9761SEric Taylor return (B_TRUE); 1033681d9761SEric Taylor for (int c = 0; c < vd->vdev_children; c++) 1034681d9761SEric Taylor if (vdev_uses_zvols(vd->vdev_child[c])) 1035681d9761SEric Taylor return (B_TRUE); 1036681d9761SEric Taylor return (B_FALSE); 1037681d9761SEric Taylor } 1038681d9761SEric Taylor 1039f64c0e34SEric Taylor void 1040f64c0e34SEric Taylor vdev_open_children(vdev_t *vd) 1041f64c0e34SEric Taylor { 1042f64c0e34SEric Taylor taskq_t *tq; 1043f64c0e34SEric Taylor int children = vd->vdev_children; 1044f64c0e34SEric Taylor 1045681d9761SEric Taylor /* 1046681d9761SEric Taylor * in order to handle pools on top of zvols, do the opens 1047681d9761SEric Taylor * in a single thread so that the same thread holds the 1048681d9761SEric Taylor * spa_namespace_lock 1049681d9761SEric Taylor */ 1050681d9761SEric Taylor if (vdev_uses_zvols(vd)) { 1051681d9761SEric Taylor for (int c = 0; c < children; c++) 1052681d9761SEric Taylor vd->vdev_child[c]->vdev_open_error = 1053681d9761SEric Taylor vdev_open(vd->vdev_child[c]); 1054681d9761SEric Taylor return; 1055681d9761SEric Taylor } 1056f64c0e34SEric Taylor tq = taskq_create("vdev_open", children, minclsyspri, 1057f64c0e34SEric Taylor children, children, TASKQ_PREPOPULATE); 1058f64c0e34SEric Taylor 1059f64c0e34SEric Taylor for (int c = 0; c < children; c++) 1060f64c0e34SEric Taylor VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], 1061f64c0e34SEric Taylor TQ_SLEEP) != NULL); 1062f64c0e34SEric Taylor 1063f64c0e34SEric Taylor taskq_destroy(tq); 1064f64c0e34SEric Taylor } 1065f64c0e34SEric Taylor 1066fa9e4066Sahrens /* 1067fa9e4066Sahrens * Prepare a virtual device for access. 1068fa9e4066Sahrens */ 1069fa9e4066Sahrens int 1070fa9e4066Sahrens vdev_open(vdev_t *vd) 1071fa9e4066Sahrens { 10728ad4d6ddSJeff Bonwick spa_t *spa = vd->vdev_spa; 1073fa9e4066Sahrens int error; 1074fa9e4066Sahrens uint64_t osize = 0; 1075fa9e4066Sahrens uint64_t asize, psize; 1076ecc2d604Sbonwick uint64_t ashift = 0; 1077fa9e4066Sahrens 1078f64c0e34SEric Taylor ASSERT(vd->vdev_open_thread == curthread || 1079f64c0e34SEric Taylor spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1080fa9e4066Sahrens ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1081fa9e4066Sahrens vd->vdev_state == VDEV_STATE_CANT_OPEN || 1082fa9e4066Sahrens vd->vdev_state == VDEV_STATE_OFFLINE); 1083fa9e4066Sahrens 1084fa9e4066Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1085e6ca193dSGeorge Wilson vd->vdev_cant_read = B_FALSE; 1086e6ca193dSGeorge Wilson vd->vdev_cant_write = B_FALSE; 1087573ca77eSGeorge Wilson vd->vdev_min_asize = vdev_get_min_asize(vd); 1088fa9e4066Sahrens 10893d7072f8Seschrock if (!vd->vdev_removed && vd->vdev_faulted) { 10903d7072f8Seschrock ASSERT(vd->vdev_children == 0); 10913d7072f8Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 10923d7072f8Seschrock VDEV_AUX_ERR_EXCEEDED); 10933d7072f8Seschrock return (ENXIO); 10943d7072f8Seschrock } else if (vd->vdev_offline) { 1095fa9e4066Sahrens ASSERT(vd->vdev_children == 0); 1096ea8dc4b6Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1097fa9e4066Sahrens return (ENXIO); 1098fa9e4066Sahrens } 1099fa9e4066Sahrens 1100fa9e4066Sahrens error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); 1101fa9e4066Sahrens 1102ea8dc4b6Seschrock if (zio_injection_enabled && error == 0) 11038956713aSEric Schrock error = zio_handle_device_injection(vd, NULL, ENXIO); 1104ea8dc4b6Seschrock 1105fa9e4066Sahrens if (error) { 11063d7072f8Seschrock if (vd->vdev_removed && 11073d7072f8Seschrock vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 11083d7072f8Seschrock vd->vdev_removed = B_FALSE; 11093d7072f8Seschrock 1110ea8dc4b6Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1111fa9e4066Sahrens vd->vdev_stat.vs_aux); 1112fa9e4066Sahrens return (error); 1113fa9e4066Sahrens } 1114fa9e4066Sahrens 11153d7072f8Seschrock vd->vdev_removed = B_FALSE; 11163d7072f8Seschrock 11173d7072f8Seschrock if (vd->vdev_degraded) { 11183d7072f8Seschrock ASSERT(vd->vdev_children == 0); 11193d7072f8Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 11203d7072f8Seschrock VDEV_AUX_ERR_EXCEEDED); 11213d7072f8Seschrock } else { 11223d7072f8Seschrock vd->vdev_state = VDEV_STATE_HEALTHY; 11233d7072f8Seschrock } 1124fa9e4066Sahrens 1125*88ecc943SGeorge Wilson /* 1126*88ecc943SGeorge Wilson * For hole or missing vdevs we just return success. 1127*88ecc943SGeorge Wilson */ 1128*88ecc943SGeorge Wilson if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) 1129*88ecc943SGeorge Wilson return (0); 1130*88ecc943SGeorge Wilson 1131573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) { 1132ea8dc4b6Seschrock if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 1133ea8dc4b6Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1134ea8dc4b6Seschrock VDEV_AUX_NONE); 1135ea8dc4b6Seschrock break; 1136ea8dc4b6Seschrock } 1137573ca77eSGeorge Wilson } 1138fa9e4066Sahrens 1139fa9e4066Sahrens osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1140fa9e4066Sahrens 1141fa9e4066Sahrens if (vd->vdev_children == 0) { 1142fa9e4066Sahrens if (osize < SPA_MINDEVSIZE) { 1143ea8dc4b6Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1144ea8dc4b6Seschrock VDEV_AUX_TOO_SMALL); 1145fa9e4066Sahrens return (EOVERFLOW); 1146fa9e4066Sahrens } 1147fa9e4066Sahrens psize = osize; 1148fa9e4066Sahrens asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1149fa9e4066Sahrens } else { 1150ecc2d604Sbonwick if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1151fa9e4066Sahrens (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 1152ea8dc4b6Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1153ea8dc4b6Seschrock VDEV_AUX_TOO_SMALL); 1154fa9e4066Sahrens return (EOVERFLOW); 1155fa9e4066Sahrens } 1156fa9e4066Sahrens psize = 0; 1157fa9e4066Sahrens asize = osize; 1158fa9e4066Sahrens } 1159fa9e4066Sahrens 1160fa9e4066Sahrens vd->vdev_psize = psize; 1161fa9e4066Sahrens 1162573ca77eSGeorge Wilson /* 1163573ca77eSGeorge Wilson * Make sure the allocatable size hasn't shrunk. 1164573ca77eSGeorge Wilson */ 1165573ca77eSGeorge Wilson if (asize < vd->vdev_min_asize) { 1166573ca77eSGeorge Wilson vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1167573ca77eSGeorge Wilson VDEV_AUX_BAD_LABEL); 1168573ca77eSGeorge Wilson return (EINVAL); 1169573ca77eSGeorge Wilson } 1170573ca77eSGeorge Wilson 1171fa9e4066Sahrens if (vd->vdev_asize == 0) { 1172fa9e4066Sahrens /* 1173fa9e4066Sahrens * This is the first-ever open, so use the computed values. 1174ecc2d604Sbonwick * For testing purposes, a higher ashift can be requested. 1175fa9e4066Sahrens */ 1176fa9e4066Sahrens vd->vdev_asize = asize; 1177ecc2d604Sbonwick vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 1178fa9e4066Sahrens } else { 1179fa9e4066Sahrens /* 1180fa9e4066Sahrens * Make sure the alignment requirement hasn't increased. 1181fa9e4066Sahrens */ 1182ecc2d604Sbonwick if (ashift > vd->vdev_top->vdev_ashift) { 1183ea8dc4b6Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1184ea8dc4b6Seschrock VDEV_AUX_BAD_LABEL); 1185fa9e4066Sahrens return (EINVAL); 1186fa9e4066Sahrens } 1187573ca77eSGeorge Wilson } 1188fa9e4066Sahrens 1189573ca77eSGeorge Wilson /* 1190573ca77eSGeorge Wilson * If all children are healthy and the asize has increased, 1191573ca77eSGeorge Wilson * then we've experienced dynamic LUN growth. If automatic 1192573ca77eSGeorge Wilson * expansion is enabled then use the additional space. 1193573ca77eSGeorge Wilson */ 1194573ca77eSGeorge Wilson if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize && 1195573ca77eSGeorge Wilson (vd->vdev_expanding || spa->spa_autoexpand)) 1196573ca77eSGeorge Wilson vd->vdev_asize = asize; 1197fa9e4066Sahrens 1198573ca77eSGeorge Wilson vdev_set_min_asize(vd); 1199fa9e4066Sahrens 12000a4e9518Sgw /* 12010a4e9518Sgw * Ensure we can issue some IO before declaring the 12020a4e9518Sgw * vdev open for business. 12030a4e9518Sgw */ 1204e14bb325SJeff Bonwick if (vd->vdev_ops->vdev_op_leaf && 1205e14bb325SJeff Bonwick (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 12060a4e9518Sgw vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1207e14bb325SJeff Bonwick VDEV_AUX_IO_FAILURE); 12080a4e9518Sgw return (error); 12090a4e9518Sgw } 12100a4e9518Sgw 1211088f3894Sahrens /* 1212088f3894Sahrens * If a leaf vdev has a DTL, and seems healthy, then kick off a 12138ad4d6ddSJeff Bonwick * resilver. But don't do this if we are doing a reopen for a scrub, 12148ad4d6ddSJeff Bonwick * since this would just restart the scrub we are already doing. 1215088f3894Sahrens */ 12168ad4d6ddSJeff Bonwick if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 12178ad4d6ddSJeff Bonwick vdev_resilver_needed(vd, NULL, NULL)) 12188ad4d6ddSJeff Bonwick spa_async_request(spa, SPA_ASYNC_RESILVER); 1219088f3894Sahrens 1220fa9e4066Sahrens return (0); 1221fa9e4066Sahrens } 1222fa9e4066Sahrens 1223560e6e96Seschrock /* 1224560e6e96Seschrock * Called once the vdevs are all opened, this routine validates the label 1225560e6e96Seschrock * contents. This needs to be done before vdev_load() so that we don't 12263d7072f8Seschrock * inadvertently do repair I/Os to the wrong device. 1227560e6e96Seschrock * 1228560e6e96Seschrock * This function will only return failure if one of the vdevs indicates that it 1229560e6e96Seschrock * has since been destroyed or exported. This is only possible if 1230560e6e96Seschrock * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 1231560e6e96Seschrock * will be updated but the function will return 0. 1232560e6e96Seschrock */ 1233560e6e96Seschrock int 1234560e6e96Seschrock vdev_validate(vdev_t *vd) 1235560e6e96Seschrock { 1236560e6e96Seschrock spa_t *spa = vd->vdev_spa; 1237560e6e96Seschrock nvlist_t *label; 1238e14bb325SJeff Bonwick uint64_t guid, top_guid; 1239560e6e96Seschrock uint64_t state; 1240560e6e96Seschrock 1241573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 1242560e6e96Seschrock if (vdev_validate(vd->vdev_child[c]) != 0) 12430bf246f5Smc return (EBADF); 1244560e6e96Seschrock 1245b5989ec7Seschrock /* 1246b5989ec7Seschrock * If the device has already failed, or was marked offline, don't do 1247b5989ec7Seschrock * any further validation. Otherwise, label I/O will fail and we will 1248b5989ec7Seschrock * overwrite the previous state. 1249b5989ec7Seschrock */ 1250e14bb325SJeff Bonwick if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { 1251560e6e96Seschrock 1252560e6e96Seschrock if ((label = vdev_label_read_config(vd)) == NULL) { 1253560e6e96Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1254560e6e96Seschrock VDEV_AUX_BAD_LABEL); 1255560e6e96Seschrock return (0); 1256560e6e96Seschrock } 1257560e6e96Seschrock 1258560e6e96Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 1259560e6e96Seschrock &guid) != 0 || guid != spa_guid(spa)) { 1260560e6e96Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1261560e6e96Seschrock VDEV_AUX_CORRUPT_DATA); 1262560e6e96Seschrock nvlist_free(label); 1263560e6e96Seschrock return (0); 1264560e6e96Seschrock } 1265560e6e96Seschrock 1266e14bb325SJeff Bonwick /* 1267e14bb325SJeff Bonwick * If this vdev just became a top-level vdev because its 1268e14bb325SJeff Bonwick * sibling was detached, it will have adopted the parent's 1269e14bb325SJeff Bonwick * vdev guid -- but the label may or may not be on disk yet. 1270e14bb325SJeff Bonwick * Fortunately, either version of the label will have the 1271e14bb325SJeff Bonwick * same top guid, so if we're a top-level vdev, we can 1272e14bb325SJeff Bonwick * safely compare to that instead. 1273e14bb325SJeff Bonwick */ 1274560e6e96Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 1275e14bb325SJeff Bonwick &guid) != 0 || 1276e14bb325SJeff Bonwick nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, 1277e14bb325SJeff Bonwick &top_guid) != 0 || 1278e14bb325SJeff Bonwick (vd->vdev_guid != guid && 1279e14bb325SJeff Bonwick (vd->vdev_guid != top_guid || vd != vd->vdev_top))) { 1280560e6e96Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1281560e6e96Seschrock VDEV_AUX_CORRUPT_DATA); 1282560e6e96Seschrock nvlist_free(label); 1283560e6e96Seschrock return (0); 1284560e6e96Seschrock } 1285560e6e96Seschrock 1286560e6e96Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 1287560e6e96Seschrock &state) != 0) { 1288560e6e96Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1289560e6e96Seschrock VDEV_AUX_CORRUPT_DATA); 1290560e6e96Seschrock nvlist_free(label); 1291560e6e96Seschrock return (0); 1292560e6e96Seschrock } 1293560e6e96Seschrock 1294560e6e96Seschrock nvlist_free(label); 1295560e6e96Seschrock 1296bc758434SLin Ling /* 1297bc758434SLin Ling * If spa->spa_load_verbatim is true, no need to check the 1298bc758434SLin Ling * state of the pool. 1299bc758434SLin Ling */ 1300bc758434SLin Ling if (!spa->spa_load_verbatim && 1301bc758434SLin Ling spa->spa_load_state == SPA_LOAD_OPEN && 1302bc758434SLin Ling state != POOL_STATE_ACTIVE) 13030bf246f5Smc return (EBADF); 1304560e6e96Seschrock 130551ece835Seschrock /* 130651ece835Seschrock * If we were able to open and validate a vdev that was 130751ece835Seschrock * previously marked permanently unavailable, clear that state 130851ece835Seschrock * now. 130951ece835Seschrock */ 131051ece835Seschrock if (vd->vdev_not_present) 131151ece835Seschrock vd->vdev_not_present = 0; 131251ece835Seschrock } 1313560e6e96Seschrock 1314560e6e96Seschrock return (0); 1315560e6e96Seschrock } 1316560e6e96Seschrock 1317fa9e4066Sahrens /* 1318fa9e4066Sahrens * Close a virtual device. 1319fa9e4066Sahrens */ 1320fa9e4066Sahrens void 1321fa9e4066Sahrens vdev_close(vdev_t *vd) 1322fa9e4066Sahrens { 13238ad4d6ddSJeff Bonwick spa_t *spa = vd->vdev_spa; 13248ad4d6ddSJeff Bonwick 13258ad4d6ddSJeff Bonwick ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 13268ad4d6ddSJeff Bonwick 1327fa9e4066Sahrens vd->vdev_ops->vdev_op_close(vd); 1328fa9e4066Sahrens 13293d7072f8Seschrock vdev_cache_purge(vd); 1330fa9e4066Sahrens 1331560e6e96Seschrock /* 1332573ca77eSGeorge Wilson * We record the previous state before we close it, so that if we are 1333560e6e96Seschrock * doing a reopen(), we don't generate FMA ereports if we notice that 1334560e6e96Seschrock * it's still faulted. 1335560e6e96Seschrock */ 1336560e6e96Seschrock vd->vdev_prevstate = vd->vdev_state; 1337560e6e96Seschrock 1338fa9e4066Sahrens if (vd->vdev_offline) 1339fa9e4066Sahrens vd->vdev_state = VDEV_STATE_OFFLINE; 1340fa9e4066Sahrens else 1341fa9e4066Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 1342ea8dc4b6Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1343fa9e4066Sahrens } 1344fa9e4066Sahrens 1345fa9e4066Sahrens void 1346ea8dc4b6Seschrock vdev_reopen(vdev_t *vd) 1347fa9e4066Sahrens { 1348ea8dc4b6Seschrock spa_t *spa = vd->vdev_spa; 1349fa9e4066Sahrens 1350e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1351ea8dc4b6Seschrock 1352fa9e4066Sahrens vdev_close(vd); 1353fa9e4066Sahrens (void) vdev_open(vd); 1354fa9e4066Sahrens 135539c23413Seschrock /* 135639c23413Seschrock * Call vdev_validate() here to make sure we have the same device. 135739c23413Seschrock * Otherwise, a device with an invalid label could be successfully 135839c23413Seschrock * opened in response to vdev_reopen(). 135939c23413Seschrock */ 1360c5904d13Seschrock if (vd->vdev_aux) { 1361c5904d13Seschrock (void) vdev_validate_aux(vd); 1362e14bb325SJeff Bonwick if (vdev_readable(vd) && vdev_writeable(vd) && 13636809eb4eSEric Schrock vd->vdev_aux == &spa->spa_l2cache && 1364573ca77eSGeorge Wilson !l2arc_vdev_present(vd)) 1365573ca77eSGeorge Wilson l2arc_add_vdev(spa, vd); 1366c5904d13Seschrock } else { 1367c5904d13Seschrock (void) vdev_validate(vd); 1368c5904d13Seschrock } 136939c23413Seschrock 1370fa9e4066Sahrens /* 13713d7072f8Seschrock * Reassess parent vdev's health. 1372fa9e4066Sahrens */ 13733d7072f8Seschrock vdev_propagate_state(vd); 1374fa9e4066Sahrens } 1375fa9e4066Sahrens 1376fa9e4066Sahrens int 137799653d4eSeschrock vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 1378fa9e4066Sahrens { 1379fa9e4066Sahrens int error; 1380fa9e4066Sahrens 1381fa9e4066Sahrens /* 1382fa9e4066Sahrens * Normally, partial opens (e.g. of a mirror) are allowed. 1383fa9e4066Sahrens * For a create, however, we want to fail the request if 1384fa9e4066Sahrens * there are any components we can't open. 1385fa9e4066Sahrens */ 1386fa9e4066Sahrens error = vdev_open(vd); 1387fa9e4066Sahrens 1388fa9e4066Sahrens if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 1389fa9e4066Sahrens vdev_close(vd); 1390fa9e4066Sahrens return (error ? error : ENXIO); 1391fa9e4066Sahrens } 1392fa9e4066Sahrens 1393fa9e4066Sahrens /* 1394fa9e4066Sahrens * Recursively initialize all labels. 1395fa9e4066Sahrens */ 139639c23413Seschrock if ((error = vdev_label_init(vd, txg, isreplacing ? 139739c23413Seschrock VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 1398fa9e4066Sahrens vdev_close(vd); 1399fa9e4066Sahrens return (error); 1400fa9e4066Sahrens } 1401fa9e4066Sahrens 1402fa9e4066Sahrens return (0); 1403fa9e4066Sahrens } 1404fa9e4066Sahrens 14050e34b6a7Sbonwick void 1406573ca77eSGeorge Wilson vdev_metaslab_set_size(vdev_t *vd) 1407fa9e4066Sahrens { 1408fa9e4066Sahrens /* 1409fa9e4066Sahrens * Aim for roughly 200 metaslabs per vdev. 1410fa9e4066Sahrens */ 1411fa9e4066Sahrens vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); 1412fa9e4066Sahrens vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); 1413fa9e4066Sahrens } 1414fa9e4066Sahrens 1415fa9e4066Sahrens void 1416ecc2d604Sbonwick vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 1417fa9e4066Sahrens { 1418ecc2d604Sbonwick ASSERT(vd == vd->vdev_top); 1419*88ecc943SGeorge Wilson ASSERT(!vd->vdev_ishole); 1420ecc2d604Sbonwick ASSERT(ISP2(flags)); 1421fa9e4066Sahrens 1422ecc2d604Sbonwick if (flags & VDD_METASLAB) 1423ecc2d604Sbonwick (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 1424ecc2d604Sbonwick 1425ecc2d604Sbonwick if (flags & VDD_DTL) 1426ecc2d604Sbonwick (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 1427ecc2d604Sbonwick 1428ecc2d604Sbonwick (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 1429fa9e4066Sahrens } 1430fa9e4066Sahrens 14318ad4d6ddSJeff Bonwick /* 14328ad4d6ddSJeff Bonwick * DTLs. 14338ad4d6ddSJeff Bonwick * 14348ad4d6ddSJeff Bonwick * A vdev's DTL (dirty time log) is the set of transaction groups for which 14358ad4d6ddSJeff Bonwick * the vdev has less than perfect replication. There are three kinds of DTL: 14368ad4d6ddSJeff Bonwick * 14378ad4d6ddSJeff Bonwick * DTL_MISSING: txgs for which the vdev has no valid copies of the data 14388ad4d6ddSJeff Bonwick * 14398ad4d6ddSJeff Bonwick * DTL_PARTIAL: txgs for which data is available, but not fully replicated 14408ad4d6ddSJeff Bonwick * 14418ad4d6ddSJeff Bonwick * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 14428ad4d6ddSJeff Bonwick * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 14438ad4d6ddSJeff Bonwick * txgs that was scrubbed. 14448ad4d6ddSJeff Bonwick * 14458ad4d6ddSJeff Bonwick * DTL_OUTAGE: txgs which cannot currently be read, whether due to 14468ad4d6ddSJeff Bonwick * persistent errors or just some device being offline. 14478ad4d6ddSJeff Bonwick * Unlike the other three, the DTL_OUTAGE map is not generally 14488ad4d6ddSJeff Bonwick * maintained; it's only computed when needed, typically to 14498ad4d6ddSJeff Bonwick * determine whether a device can be detached. 14508ad4d6ddSJeff Bonwick * 14518ad4d6ddSJeff Bonwick * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 14528ad4d6ddSJeff Bonwick * either has the data or it doesn't. 14538ad4d6ddSJeff Bonwick * 14548ad4d6ddSJeff Bonwick * For interior vdevs such as mirror and RAID-Z the picture is more complex. 14558ad4d6ddSJeff Bonwick * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 14568ad4d6ddSJeff Bonwick * if any child is less than fully replicated, then so is its parent. 14578ad4d6ddSJeff Bonwick * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 14588ad4d6ddSJeff Bonwick * comprising only those txgs which appear in 'maxfaults' or more children; 14598ad4d6ddSJeff Bonwick * those are the txgs we don't have enough replication to read. For example, 14608ad4d6ddSJeff Bonwick * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 14618ad4d6ddSJeff Bonwick * thus, its DTL_MISSING consists of the set of txgs that appear in more than 14628ad4d6ddSJeff Bonwick * two child DTL_MISSING maps. 14638ad4d6ddSJeff Bonwick * 14648ad4d6ddSJeff Bonwick * It should be clear from the above that to compute the DTLs and outage maps 14658ad4d6ddSJeff Bonwick * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 14668ad4d6ddSJeff Bonwick * Therefore, that is all we keep on disk. When loading the pool, or after 14678ad4d6ddSJeff Bonwick * a configuration change, we generate all other DTLs from first principles. 14688ad4d6ddSJeff Bonwick */ 1469fa9e4066Sahrens void 14708ad4d6ddSJeff Bonwick vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1471fa9e4066Sahrens { 14728ad4d6ddSJeff Bonwick space_map_t *sm = &vd->vdev_dtl[t]; 14738ad4d6ddSJeff Bonwick 14748ad4d6ddSJeff Bonwick ASSERT(t < DTL_TYPES); 14758ad4d6ddSJeff Bonwick ASSERT(vd != vd->vdev_spa->spa_root_vdev); 14768ad4d6ddSJeff Bonwick 1477fa9e4066Sahrens mutex_enter(sm->sm_lock); 1478fa9e4066Sahrens if (!space_map_contains(sm, txg, size)) 1479fa9e4066Sahrens space_map_add(sm, txg, size); 1480fa9e4066Sahrens mutex_exit(sm->sm_lock); 1481fa9e4066Sahrens } 1482fa9e4066Sahrens 14838ad4d6ddSJeff Bonwick boolean_t 14848ad4d6ddSJeff Bonwick vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1485fa9e4066Sahrens { 14868ad4d6ddSJeff Bonwick space_map_t *sm = &vd->vdev_dtl[t]; 14878ad4d6ddSJeff Bonwick boolean_t dirty = B_FALSE; 1488fa9e4066Sahrens 14898ad4d6ddSJeff Bonwick ASSERT(t < DTL_TYPES); 14908ad4d6ddSJeff Bonwick ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1491fa9e4066Sahrens 1492fa9e4066Sahrens mutex_enter(sm->sm_lock); 14938ad4d6ddSJeff Bonwick if (sm->sm_space != 0) 14948ad4d6ddSJeff Bonwick dirty = space_map_contains(sm, txg, size); 1495fa9e4066Sahrens mutex_exit(sm->sm_lock); 1496fa9e4066Sahrens 1497fa9e4066Sahrens return (dirty); 1498fa9e4066Sahrens } 1499fa9e4066Sahrens 15008ad4d6ddSJeff Bonwick boolean_t 15018ad4d6ddSJeff Bonwick vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 15028ad4d6ddSJeff Bonwick { 15038ad4d6ddSJeff Bonwick space_map_t *sm = &vd->vdev_dtl[t]; 15048ad4d6ddSJeff Bonwick boolean_t empty; 15058ad4d6ddSJeff Bonwick 15068ad4d6ddSJeff Bonwick mutex_enter(sm->sm_lock); 15078ad4d6ddSJeff Bonwick empty = (sm->sm_space == 0); 15088ad4d6ddSJeff Bonwick mutex_exit(sm->sm_lock); 15098ad4d6ddSJeff Bonwick 15108ad4d6ddSJeff Bonwick return (empty); 15118ad4d6ddSJeff Bonwick } 15128ad4d6ddSJeff Bonwick 1513fa9e4066Sahrens /* 1514fa9e4066Sahrens * Reassess DTLs after a config change or scrub completion. 1515fa9e4066Sahrens */ 1516fa9e4066Sahrens void 1517fa9e4066Sahrens vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 1518fa9e4066Sahrens { 1519ea8dc4b6Seschrock spa_t *spa = vd->vdev_spa; 15208ad4d6ddSJeff Bonwick avl_tree_t reftree; 15218ad4d6ddSJeff Bonwick int minref; 1522fa9e4066Sahrens 15238ad4d6ddSJeff Bonwick ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1524fa9e4066Sahrens 15258ad4d6ddSJeff Bonwick for (int c = 0; c < vd->vdev_children; c++) 15268ad4d6ddSJeff Bonwick vdev_dtl_reassess(vd->vdev_child[c], txg, 15278ad4d6ddSJeff Bonwick scrub_txg, scrub_done); 15288ad4d6ddSJeff Bonwick 1529*88ecc943SGeorge Wilson if (vd == spa->spa_root_vdev || vd->vdev_ishole) 15308ad4d6ddSJeff Bonwick return; 15318ad4d6ddSJeff Bonwick 15328ad4d6ddSJeff Bonwick if (vd->vdev_ops->vdev_op_leaf) { 1533fa9e4066Sahrens mutex_enter(&vd->vdev_dtl_lock); 1534088f3894Sahrens if (scrub_txg != 0 && 1535088f3894Sahrens (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) { 1536088f3894Sahrens /* XXX should check scrub_done? */ 1537088f3894Sahrens /* 1538088f3894Sahrens * We completed a scrub up to scrub_txg. If we 1539088f3894Sahrens * did it without rebooting, then the scrub dtl 1540088f3894Sahrens * will be valid, so excise the old region and 1541088f3894Sahrens * fold in the scrub dtl. Otherwise, leave the 1542088f3894Sahrens * dtl as-is if there was an error. 15438ad4d6ddSJeff Bonwick * 15448ad4d6ddSJeff Bonwick * There's little trick here: to excise the beginning 15458ad4d6ddSJeff Bonwick * of the DTL_MISSING map, we put it into a reference 15468ad4d6ddSJeff Bonwick * tree and then add a segment with refcnt -1 that 15478ad4d6ddSJeff Bonwick * covers the range [0, scrub_txg). This means 15488ad4d6ddSJeff Bonwick * that each txg in that range has refcnt -1 or 0. 15498ad4d6ddSJeff Bonwick * We then add DTL_SCRUB with a refcnt of 2, so that 15508ad4d6ddSJeff Bonwick * entries in the range [0, scrub_txg) will have a 15518ad4d6ddSJeff Bonwick * positive refcnt -- either 1 or 2. We then convert 15528ad4d6ddSJeff Bonwick * the reference tree into the new DTL_MISSING map. 1553088f3894Sahrens */ 15548ad4d6ddSJeff Bonwick space_map_ref_create(&reftree); 15558ad4d6ddSJeff Bonwick space_map_ref_add_map(&reftree, 15568ad4d6ddSJeff Bonwick &vd->vdev_dtl[DTL_MISSING], 1); 15578ad4d6ddSJeff Bonwick space_map_ref_add_seg(&reftree, 0, scrub_txg, -1); 15588ad4d6ddSJeff Bonwick space_map_ref_add_map(&reftree, 15598ad4d6ddSJeff Bonwick &vd->vdev_dtl[DTL_SCRUB], 2); 15608ad4d6ddSJeff Bonwick space_map_ref_generate_map(&reftree, 15618ad4d6ddSJeff Bonwick &vd->vdev_dtl[DTL_MISSING], 1); 15628ad4d6ddSJeff Bonwick space_map_ref_destroy(&reftree); 1563fa9e4066Sahrens } 15648ad4d6ddSJeff Bonwick space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 15658ad4d6ddSJeff Bonwick space_map_walk(&vd->vdev_dtl[DTL_MISSING], 15668ad4d6ddSJeff Bonwick space_map_add, &vd->vdev_dtl[DTL_PARTIAL]); 1567fa9e4066Sahrens if (scrub_done) 15688ad4d6ddSJeff Bonwick space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 15698ad4d6ddSJeff Bonwick space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 15708ad4d6ddSJeff Bonwick if (!vdev_readable(vd)) 15718ad4d6ddSJeff Bonwick space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 15728ad4d6ddSJeff Bonwick else 15738ad4d6ddSJeff Bonwick space_map_walk(&vd->vdev_dtl[DTL_MISSING], 15748ad4d6ddSJeff Bonwick space_map_add, &vd->vdev_dtl[DTL_OUTAGE]); 1575fa9e4066Sahrens mutex_exit(&vd->vdev_dtl_lock); 1576088f3894Sahrens 1577ecc2d604Sbonwick if (txg != 0) 1578ecc2d604Sbonwick vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 1579fa9e4066Sahrens return; 1580fa9e4066Sahrens } 1581fa9e4066Sahrens 1582fa9e4066Sahrens mutex_enter(&vd->vdev_dtl_lock); 15838ad4d6ddSJeff Bonwick for (int t = 0; t < DTL_TYPES; t++) { 15848ad4d6ddSJeff Bonwick if (t == DTL_SCRUB) 15858ad4d6ddSJeff Bonwick continue; /* leaf vdevs only */ 15868ad4d6ddSJeff Bonwick if (t == DTL_PARTIAL) 15878ad4d6ddSJeff Bonwick minref = 1; /* i.e. non-zero */ 15888ad4d6ddSJeff Bonwick else if (vd->vdev_nparity != 0) 15898ad4d6ddSJeff Bonwick minref = vd->vdev_nparity + 1; /* RAID-Z */ 15908ad4d6ddSJeff Bonwick else 15918ad4d6ddSJeff Bonwick minref = vd->vdev_children; /* any kind of mirror */ 15928ad4d6ddSJeff Bonwick space_map_ref_create(&reftree); 15938ad4d6ddSJeff Bonwick for (int c = 0; c < vd->vdev_children; c++) { 15948ad4d6ddSJeff Bonwick vdev_t *cvd = vd->vdev_child[c]; 15958ad4d6ddSJeff Bonwick mutex_enter(&cvd->vdev_dtl_lock); 15968ad4d6ddSJeff Bonwick space_map_ref_add_map(&reftree, &cvd->vdev_dtl[t], 1); 15978ad4d6ddSJeff Bonwick mutex_exit(&cvd->vdev_dtl_lock); 15988ad4d6ddSJeff Bonwick } 15998ad4d6ddSJeff Bonwick space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref); 16008ad4d6ddSJeff Bonwick space_map_ref_destroy(&reftree); 1601fa9e4066Sahrens } 16028ad4d6ddSJeff Bonwick mutex_exit(&vd->vdev_dtl_lock); 1603fa9e4066Sahrens } 1604fa9e4066Sahrens 1605fa9e4066Sahrens static int 1606fa9e4066Sahrens vdev_dtl_load(vdev_t *vd) 1607fa9e4066Sahrens { 1608fa9e4066Sahrens spa_t *spa = vd->vdev_spa; 16098ad4d6ddSJeff Bonwick space_map_obj_t *smo = &vd->vdev_dtl_smo; 1610ecc2d604Sbonwick objset_t *mos = spa->spa_meta_objset; 1611fa9e4066Sahrens dmu_buf_t *db; 1612fa9e4066Sahrens int error; 1613fa9e4066Sahrens 1614fa9e4066Sahrens ASSERT(vd->vdev_children == 0); 1615fa9e4066Sahrens 1616fa9e4066Sahrens if (smo->smo_object == 0) 1617fa9e4066Sahrens return (0); 1618fa9e4066Sahrens 1619*88ecc943SGeorge Wilson ASSERT(!vd->vdev_ishole); 1620*88ecc943SGeorge Wilson 1621ecc2d604Sbonwick if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0) 1622ea8dc4b6Seschrock return (error); 1623ecc2d604Sbonwick 16241934e92fSmaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 16251934e92fSmaybee bcopy(db->db_data, smo, sizeof (*smo)); 1626ea8dc4b6Seschrock dmu_buf_rele(db, FTAG); 1627fa9e4066Sahrens 1628fa9e4066Sahrens mutex_enter(&vd->vdev_dtl_lock); 16298ad4d6ddSJeff Bonwick error = space_map_load(&vd->vdev_dtl[DTL_MISSING], 16308ad4d6ddSJeff Bonwick NULL, SM_ALLOC, smo, mos); 1631fa9e4066Sahrens mutex_exit(&vd->vdev_dtl_lock); 1632fa9e4066Sahrens 1633fa9e4066Sahrens return (error); 1634fa9e4066Sahrens } 1635fa9e4066Sahrens 1636fa9e4066Sahrens void 1637fa9e4066Sahrens vdev_dtl_sync(vdev_t *vd, uint64_t txg) 1638fa9e4066Sahrens { 1639fa9e4066Sahrens spa_t *spa = vd->vdev_spa; 16408ad4d6ddSJeff Bonwick space_map_obj_t *smo = &vd->vdev_dtl_smo; 16418ad4d6ddSJeff Bonwick space_map_t *sm = &vd->vdev_dtl[DTL_MISSING]; 1642ecc2d604Sbonwick objset_t *mos = spa->spa_meta_objset; 1643fa9e4066Sahrens space_map_t smsync; 1644fa9e4066Sahrens kmutex_t smlock; 1645fa9e4066Sahrens dmu_buf_t *db; 1646fa9e4066Sahrens dmu_tx_t *tx; 1647fa9e4066Sahrens 1648*88ecc943SGeorge Wilson ASSERT(!vd->vdev_ishole); 1649*88ecc943SGeorge Wilson 1650fa9e4066Sahrens tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1651fa9e4066Sahrens 1652fa9e4066Sahrens if (vd->vdev_detached) { 1653fa9e4066Sahrens if (smo->smo_object != 0) { 1654ecc2d604Sbonwick int err = dmu_object_free(mos, smo->smo_object, tx); 1655fa9e4066Sahrens ASSERT3U(err, ==, 0); 1656fa9e4066Sahrens smo->smo_object = 0; 1657fa9e4066Sahrens } 1658fa9e4066Sahrens dmu_tx_commit(tx); 1659fa9e4066Sahrens return; 1660fa9e4066Sahrens } 1661fa9e4066Sahrens 1662fa9e4066Sahrens if (smo->smo_object == 0) { 1663fa9e4066Sahrens ASSERT(smo->smo_objsize == 0); 1664fa9e4066Sahrens ASSERT(smo->smo_alloc == 0); 1665ecc2d604Sbonwick smo->smo_object = dmu_object_alloc(mos, 1666fa9e4066Sahrens DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 1667fa9e4066Sahrens DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 1668fa9e4066Sahrens ASSERT(smo->smo_object != 0); 1669fa9e4066Sahrens vdev_config_dirty(vd->vdev_top); 1670fa9e4066Sahrens } 1671fa9e4066Sahrens 1672fa9e4066Sahrens mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); 1673fa9e4066Sahrens 1674fa9e4066Sahrens space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, 1675fa9e4066Sahrens &smlock); 1676fa9e4066Sahrens 1677fa9e4066Sahrens mutex_enter(&smlock); 1678fa9e4066Sahrens 1679fa9e4066Sahrens mutex_enter(&vd->vdev_dtl_lock); 1680ecc2d604Sbonwick space_map_walk(sm, space_map_add, &smsync); 1681fa9e4066Sahrens mutex_exit(&vd->vdev_dtl_lock); 1682fa9e4066Sahrens 1683ecc2d604Sbonwick space_map_truncate(smo, mos, tx); 1684ecc2d604Sbonwick space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); 1685fa9e4066Sahrens 1686fa9e4066Sahrens space_map_destroy(&smsync); 1687fa9e4066Sahrens 1688fa9e4066Sahrens mutex_exit(&smlock); 1689fa9e4066Sahrens mutex_destroy(&smlock); 1690fa9e4066Sahrens 1691ecc2d604Sbonwick VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 1692fa9e4066Sahrens dmu_buf_will_dirty(db, tx); 16931934e92fSmaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 16941934e92fSmaybee bcopy(smo, db->db_data, sizeof (*smo)); 1695ea8dc4b6Seschrock dmu_buf_rele(db, FTAG); 1696fa9e4066Sahrens 1697fa9e4066Sahrens dmu_tx_commit(tx); 1698fa9e4066Sahrens } 1699fa9e4066Sahrens 17008ad4d6ddSJeff Bonwick /* 17018ad4d6ddSJeff Bonwick * Determine whether the specified vdev can be offlined/detached/removed 17028ad4d6ddSJeff Bonwick * without losing data. 17038ad4d6ddSJeff Bonwick */ 17048ad4d6ddSJeff Bonwick boolean_t 17058ad4d6ddSJeff Bonwick vdev_dtl_required(vdev_t *vd) 17068ad4d6ddSJeff Bonwick { 17078ad4d6ddSJeff Bonwick spa_t *spa = vd->vdev_spa; 17088ad4d6ddSJeff Bonwick vdev_t *tvd = vd->vdev_top; 17098ad4d6ddSJeff Bonwick uint8_t cant_read = vd->vdev_cant_read; 17108ad4d6ddSJeff Bonwick boolean_t required; 17118ad4d6ddSJeff Bonwick 17128ad4d6ddSJeff Bonwick ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 17138ad4d6ddSJeff Bonwick 17148ad4d6ddSJeff Bonwick if (vd == spa->spa_root_vdev || vd == tvd) 17158ad4d6ddSJeff Bonwick return (B_TRUE); 17168ad4d6ddSJeff Bonwick 17178ad4d6ddSJeff Bonwick /* 17188ad4d6ddSJeff Bonwick * Temporarily mark the device as unreadable, and then determine 17198ad4d6ddSJeff Bonwick * whether this results in any DTL outages in the top-level vdev. 17208ad4d6ddSJeff Bonwick * If not, we can safely offline/detach/remove the device. 17218ad4d6ddSJeff Bonwick */ 17228ad4d6ddSJeff Bonwick vd->vdev_cant_read = B_TRUE; 17238ad4d6ddSJeff Bonwick vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 17248ad4d6ddSJeff Bonwick required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 17258ad4d6ddSJeff Bonwick vd->vdev_cant_read = cant_read; 17268ad4d6ddSJeff Bonwick vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 17278ad4d6ddSJeff Bonwick 17288ad4d6ddSJeff Bonwick return (required); 17298ad4d6ddSJeff Bonwick } 17308ad4d6ddSJeff Bonwick 1731088f3894Sahrens /* 1732088f3894Sahrens * Determine if resilver is needed, and if so the txg range. 1733088f3894Sahrens */ 1734088f3894Sahrens boolean_t 1735088f3894Sahrens vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 1736088f3894Sahrens { 1737088f3894Sahrens boolean_t needed = B_FALSE; 1738088f3894Sahrens uint64_t thismin = UINT64_MAX; 1739088f3894Sahrens uint64_t thismax = 0; 1740088f3894Sahrens 1741088f3894Sahrens if (vd->vdev_children == 0) { 1742088f3894Sahrens mutex_enter(&vd->vdev_dtl_lock); 17438ad4d6ddSJeff Bonwick if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 && 17448ad4d6ddSJeff Bonwick vdev_writeable(vd)) { 1745088f3894Sahrens space_seg_t *ss; 1746088f3894Sahrens 17478ad4d6ddSJeff Bonwick ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root); 1748088f3894Sahrens thismin = ss->ss_start - 1; 17498ad4d6ddSJeff Bonwick ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root); 1750088f3894Sahrens thismax = ss->ss_end; 1751088f3894Sahrens needed = B_TRUE; 1752088f3894Sahrens } 1753088f3894Sahrens mutex_exit(&vd->vdev_dtl_lock); 1754088f3894Sahrens } else { 17558ad4d6ddSJeff Bonwick for (int c = 0; c < vd->vdev_children; c++) { 1756088f3894Sahrens vdev_t *cvd = vd->vdev_child[c]; 1757088f3894Sahrens uint64_t cmin, cmax; 1758088f3894Sahrens 1759088f3894Sahrens if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 1760088f3894Sahrens thismin = MIN(thismin, cmin); 1761088f3894Sahrens thismax = MAX(thismax, cmax); 1762088f3894Sahrens needed = B_TRUE; 1763088f3894Sahrens } 1764088f3894Sahrens } 1765088f3894Sahrens } 1766088f3894Sahrens 1767088f3894Sahrens if (needed && minp) { 1768088f3894Sahrens *minp = thismin; 1769088f3894Sahrens *maxp = thismax; 1770088f3894Sahrens } 1771088f3894Sahrens return (needed); 1772088f3894Sahrens } 1773088f3894Sahrens 1774560e6e96Seschrock void 1775ea8dc4b6Seschrock vdev_load(vdev_t *vd) 1776fa9e4066Sahrens { 1777fa9e4066Sahrens /* 1778fa9e4066Sahrens * Recursively load all children. 1779fa9e4066Sahrens */ 17808ad4d6ddSJeff Bonwick for (int c = 0; c < vd->vdev_children; c++) 1781560e6e96Seschrock vdev_load(vd->vdev_child[c]); 1782fa9e4066Sahrens 1783fa9e4066Sahrens /* 17840e34b6a7Sbonwick * If this is a top-level vdev, initialize its metaslabs. 1785fa9e4066Sahrens */ 1786*88ecc943SGeorge Wilson if (vd == vd->vdev_top && !vd->vdev_ishole && 1787560e6e96Seschrock (vd->vdev_ashift == 0 || vd->vdev_asize == 0 || 1788560e6e96Seschrock vdev_metaslab_init(vd, 0) != 0)) 1789560e6e96Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1790560e6e96Seschrock VDEV_AUX_CORRUPT_DATA); 1791fa9e4066Sahrens 1792fa9e4066Sahrens /* 1793fa9e4066Sahrens * If this is a leaf vdev, load its DTL. 1794fa9e4066Sahrens */ 1795560e6e96Seschrock if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0) 1796560e6e96Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1797560e6e96Seschrock VDEV_AUX_CORRUPT_DATA); 1798fa9e4066Sahrens } 1799fa9e4066Sahrens 180099653d4eSeschrock /* 1801fa94a07fSbrendan * The special vdev case is used for hot spares and l2cache devices. Its 1802fa94a07fSbrendan * sole purpose it to set the vdev state for the associated vdev. To do this, 1803fa94a07fSbrendan * we make sure that we can open the underlying device, then try to read the 1804fa94a07fSbrendan * label, and make sure that the label is sane and that it hasn't been 1805fa94a07fSbrendan * repurposed to another pool. 180699653d4eSeschrock */ 180799653d4eSeschrock int 1808fa94a07fSbrendan vdev_validate_aux(vdev_t *vd) 180999653d4eSeschrock { 181099653d4eSeschrock nvlist_t *label; 181199653d4eSeschrock uint64_t guid, version; 181299653d4eSeschrock uint64_t state; 181399653d4eSeschrock 1814e14bb325SJeff Bonwick if (!vdev_readable(vd)) 1815c5904d13Seschrock return (0); 1816c5904d13Seschrock 181799653d4eSeschrock if ((label = vdev_label_read_config(vd)) == NULL) { 181899653d4eSeschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 181999653d4eSeschrock VDEV_AUX_CORRUPT_DATA); 182099653d4eSeschrock return (-1); 182199653d4eSeschrock } 182299653d4eSeschrock 182399653d4eSeschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 1824e7437265Sahrens version > SPA_VERSION || 182599653d4eSeschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 182699653d4eSeschrock guid != vd->vdev_guid || 182799653d4eSeschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 182899653d4eSeschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 182999653d4eSeschrock VDEV_AUX_CORRUPT_DATA); 183099653d4eSeschrock nvlist_free(label); 183199653d4eSeschrock return (-1); 183299653d4eSeschrock } 183399653d4eSeschrock 183499653d4eSeschrock /* 183599653d4eSeschrock * We don't actually check the pool state here. If it's in fact in 183699653d4eSeschrock * use by another pool, we update this fact on the fly when requested. 183799653d4eSeschrock */ 183899653d4eSeschrock nvlist_free(label); 183999653d4eSeschrock return (0); 184099653d4eSeschrock } 184199653d4eSeschrock 1842*88ecc943SGeorge Wilson void 1843*88ecc943SGeorge Wilson vdev_remove(vdev_t *vd, uint64_t txg) 1844*88ecc943SGeorge Wilson { 1845*88ecc943SGeorge Wilson spa_t *spa = vd->vdev_spa; 1846*88ecc943SGeorge Wilson objset_t *mos = spa->spa_meta_objset; 1847*88ecc943SGeorge Wilson dmu_tx_t *tx; 1848*88ecc943SGeorge Wilson 1849*88ecc943SGeorge Wilson tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 1850*88ecc943SGeorge Wilson 1851*88ecc943SGeorge Wilson if (vd->vdev_dtl_smo.smo_object) { 1852*88ecc943SGeorge Wilson ASSERT3U(vd->vdev_dtl_smo.smo_alloc, ==, 0); 1853*88ecc943SGeorge Wilson (void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx); 1854*88ecc943SGeorge Wilson vd->vdev_dtl_smo.smo_object = 0; 1855*88ecc943SGeorge Wilson } 1856*88ecc943SGeorge Wilson 1857*88ecc943SGeorge Wilson if (vd->vdev_ms != NULL) { 1858*88ecc943SGeorge Wilson for (int m = 0; m < vd->vdev_ms_count; m++) { 1859*88ecc943SGeorge Wilson metaslab_t *msp = vd->vdev_ms[m]; 1860*88ecc943SGeorge Wilson 1861*88ecc943SGeorge Wilson if (msp == NULL || msp->ms_smo.smo_object == 0) 1862*88ecc943SGeorge Wilson continue; 1863*88ecc943SGeorge Wilson 1864*88ecc943SGeorge Wilson ASSERT3U(msp->ms_smo.smo_alloc, ==, 0); 1865*88ecc943SGeorge Wilson (void) dmu_object_free(mos, msp->ms_smo.smo_object, tx); 1866*88ecc943SGeorge Wilson msp->ms_smo.smo_object = 0; 1867*88ecc943SGeorge Wilson } 1868*88ecc943SGeorge Wilson } 1869*88ecc943SGeorge Wilson 1870*88ecc943SGeorge Wilson if (vd->vdev_ms_array) { 1871*88ecc943SGeorge Wilson (void) dmu_object_free(mos, vd->vdev_ms_array, tx); 1872*88ecc943SGeorge Wilson vd->vdev_ms_array = 0; 1873*88ecc943SGeorge Wilson vd->vdev_ms_shift = 0; 1874*88ecc943SGeorge Wilson } 1875*88ecc943SGeorge Wilson dmu_tx_commit(tx); 1876*88ecc943SGeorge Wilson } 1877*88ecc943SGeorge Wilson 1878fa9e4066Sahrens void 1879fa9e4066Sahrens vdev_sync_done(vdev_t *vd, uint64_t txg) 1880fa9e4066Sahrens { 1881fa9e4066Sahrens metaslab_t *msp; 1882fa9e4066Sahrens 1883*88ecc943SGeorge Wilson ASSERT(!vd->vdev_ishole); 1884*88ecc943SGeorge Wilson 1885fa9e4066Sahrens while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 1886fa9e4066Sahrens metaslab_sync_done(msp, txg); 1887fa9e4066Sahrens } 1888fa9e4066Sahrens 1889fa9e4066Sahrens void 1890fa9e4066Sahrens vdev_sync(vdev_t *vd, uint64_t txg) 1891fa9e4066Sahrens { 1892fa9e4066Sahrens spa_t *spa = vd->vdev_spa; 1893fa9e4066Sahrens vdev_t *lvd; 1894fa9e4066Sahrens metaslab_t *msp; 1895ecc2d604Sbonwick dmu_tx_t *tx; 1896fa9e4066Sahrens 1897*88ecc943SGeorge Wilson ASSERT(!vd->vdev_ishole); 1898*88ecc943SGeorge Wilson 1899ecc2d604Sbonwick if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) { 1900ecc2d604Sbonwick ASSERT(vd == vd->vdev_top); 1901ecc2d604Sbonwick tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1902ecc2d604Sbonwick vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 1903ecc2d604Sbonwick DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 1904ecc2d604Sbonwick ASSERT(vd->vdev_ms_array != 0); 1905ecc2d604Sbonwick vdev_config_dirty(vd); 1906ecc2d604Sbonwick dmu_tx_commit(tx); 1907ecc2d604Sbonwick } 1908fa9e4066Sahrens 1909*88ecc943SGeorge Wilson if (vd->vdev_removing) 1910*88ecc943SGeorge Wilson vdev_remove(vd, txg); 1911*88ecc943SGeorge Wilson 1912ecc2d604Sbonwick while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 1913fa9e4066Sahrens metaslab_sync(msp, txg); 1914ecc2d604Sbonwick (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 1915ecc2d604Sbonwick } 1916fa9e4066Sahrens 1917fa9e4066Sahrens while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 1918fa9e4066Sahrens vdev_dtl_sync(lvd, txg); 1919fa9e4066Sahrens 1920fa9e4066Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 1921fa9e4066Sahrens } 1922fa9e4066Sahrens 1923fa9e4066Sahrens uint64_t 1924fa9e4066Sahrens vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 1925fa9e4066Sahrens { 1926fa9e4066Sahrens return (vd->vdev_ops->vdev_op_asize(vd, psize)); 1927fa9e4066Sahrens } 1928fa9e4066Sahrens 19293d7072f8Seschrock /* 19303d7072f8Seschrock * Mark the given vdev faulted. A faulted vdev behaves as if the device could 19313d7072f8Seschrock * not be opened, and no I/O is attempted. 19323d7072f8Seschrock */ 1933fa9e4066Sahrens int 19343d7072f8Seschrock vdev_fault(spa_t *spa, uint64_t guid) 1935fa9e4066Sahrens { 1936c5904d13Seschrock vdev_t *vd; 1937fa9e4066Sahrens 1938e14bb325SJeff Bonwick spa_vdev_state_enter(spa); 1939fa9e4066Sahrens 1940c5904d13Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 1941e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1942e14bb325SJeff Bonwick 19433d7072f8Seschrock if (!vd->vdev_ops->vdev_op_leaf) 1944e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 1945fa9e4066Sahrens 19463d7072f8Seschrock /* 19473d7072f8Seschrock * Faulted state takes precedence over degraded. 19483d7072f8Seschrock */ 19493d7072f8Seschrock vd->vdev_faulted = 1ULL; 19503d7072f8Seschrock vd->vdev_degraded = 0ULL; 1951e14bb325SJeff Bonwick vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, VDEV_AUX_ERR_EXCEEDED); 19523d7072f8Seschrock 19533d7072f8Seschrock /* 19546988b9faSDavid Marker * If marking the vdev as faulted cause the top-level vdev to become 19553d7072f8Seschrock * unavailable, then back off and simply mark the vdev as degraded 19563d7072f8Seschrock * instead. 19573d7072f8Seschrock */ 1958c5904d13Seschrock if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) { 19593d7072f8Seschrock vd->vdev_degraded = 1ULL; 19603d7072f8Seschrock vd->vdev_faulted = 0ULL; 19613d7072f8Seschrock 19623d7072f8Seschrock /* 19633d7072f8Seschrock * If we reopen the device and it's not dead, only then do we 19643d7072f8Seschrock * mark it degraded. 19653d7072f8Seschrock */ 19663d7072f8Seschrock vdev_reopen(vd); 19673d7072f8Seschrock 19680a4e9518Sgw if (vdev_readable(vd)) { 19693d7072f8Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 19703d7072f8Seschrock VDEV_AUX_ERR_EXCEEDED); 19713d7072f8Seschrock } 19723d7072f8Seschrock } 19733d7072f8Seschrock 1974e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, vd, 0)); 19753d7072f8Seschrock } 19763d7072f8Seschrock 19773d7072f8Seschrock /* 19783d7072f8Seschrock * Mark the given vdev degraded. A degraded vdev is purely an indication to the 19793d7072f8Seschrock * user that something is wrong. The vdev continues to operate as normal as far 19803d7072f8Seschrock * as I/O is concerned. 19813d7072f8Seschrock */ 19823d7072f8Seschrock int 19833d7072f8Seschrock vdev_degrade(spa_t *spa, uint64_t guid) 19843d7072f8Seschrock { 1985c5904d13Seschrock vdev_t *vd; 19860a4e9518Sgw 1987e14bb325SJeff Bonwick spa_vdev_state_enter(spa); 19883d7072f8Seschrock 1989c5904d13Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 1990e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1991e14bb325SJeff Bonwick 19920e34b6a7Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 1993e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 19940e34b6a7Sbonwick 19953d7072f8Seschrock /* 19963d7072f8Seschrock * If the vdev is already faulted, then don't do anything. 19973d7072f8Seschrock */ 1998e14bb325SJeff Bonwick if (vd->vdev_faulted || vd->vdev_degraded) 1999e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, 0)); 20003d7072f8Seschrock 20013d7072f8Seschrock vd->vdev_degraded = 1ULL; 20023d7072f8Seschrock if (!vdev_is_dead(vd)) 20033d7072f8Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 20043d7072f8Seschrock VDEV_AUX_ERR_EXCEEDED); 20053d7072f8Seschrock 2006e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, vd, 0)); 20073d7072f8Seschrock } 20083d7072f8Seschrock 20093d7072f8Seschrock /* 20103d7072f8Seschrock * Online the given vdev. If 'unspare' is set, it implies two things. First, 20113d7072f8Seschrock * any attached spare device should be detached when the device finishes 20123d7072f8Seschrock * resilvering. Second, the online should be treated like a 'test' online case, 20133d7072f8Seschrock * so no FMA events are generated if the device fails to open. 20143d7072f8Seschrock */ 20153d7072f8Seschrock int 2016e14bb325SJeff Bonwick vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 20173d7072f8Seschrock { 2018573ca77eSGeorge Wilson vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; 20193d7072f8Seschrock 2020e14bb325SJeff Bonwick spa_vdev_state_enter(spa); 20213d7072f8Seschrock 2022c5904d13Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2023e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENODEV)); 20243d7072f8Seschrock 20253d7072f8Seschrock if (!vd->vdev_ops->vdev_op_leaf) 2026e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2027fa9e4066Sahrens 2028573ca77eSGeorge Wilson tvd = vd->vdev_top; 2029fa9e4066Sahrens vd->vdev_offline = B_FALSE; 2030441d80aaSlling vd->vdev_tmpoffline = B_FALSE; 2031e14bb325SJeff Bonwick vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 2032e14bb325SJeff Bonwick vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 2033573ca77eSGeorge Wilson 2034573ca77eSGeorge Wilson /* XXX - L2ARC 1.0 does not support expansion */ 2035573ca77eSGeorge Wilson if (!vd->vdev_aux) { 2036573ca77eSGeorge Wilson for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2037573ca77eSGeorge Wilson pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND); 2038573ca77eSGeorge Wilson } 2039573ca77eSGeorge Wilson 2040573ca77eSGeorge Wilson vdev_reopen(tvd); 20413d7072f8Seschrock vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 20423d7072f8Seschrock 2043573ca77eSGeorge Wilson if (!vd->vdev_aux) { 2044573ca77eSGeorge Wilson for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2045573ca77eSGeorge Wilson pvd->vdev_expanding = B_FALSE; 2046573ca77eSGeorge Wilson } 2047573ca77eSGeorge Wilson 20483d7072f8Seschrock if (newstate) 20493d7072f8Seschrock *newstate = vd->vdev_state; 20503d7072f8Seschrock if ((flags & ZFS_ONLINE_UNSPARE) && 20513d7072f8Seschrock !vdev_is_dead(vd) && vd->vdev_parent && 20523d7072f8Seschrock vd->vdev_parent->vdev_ops == &vdev_spare_ops && 20533d7072f8Seschrock vd->vdev_parent->vdev_child[0] == vd) 20543d7072f8Seschrock vd->vdev_unspare = B_TRUE; 2055fa9e4066Sahrens 2056573ca77eSGeorge Wilson if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { 2057573ca77eSGeorge Wilson 2058573ca77eSGeorge Wilson /* XXX - L2ARC 1.0 does not support expansion */ 2059573ca77eSGeorge Wilson if (vd->vdev_aux) 2060573ca77eSGeorge Wilson return (spa_vdev_state_exit(spa, vd, ENOTSUP)); 2061573ca77eSGeorge Wilson spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 2062573ca77eSGeorge Wilson } 20638ad4d6ddSJeff Bonwick return (spa_vdev_state_exit(spa, vd, 0)); 2064fa9e4066Sahrens } 2065fa9e4066Sahrens 2066fa9e4066Sahrens int 20673d7072f8Seschrock vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 2068fa9e4066Sahrens { 2069e6ca193dSGeorge Wilson vdev_t *vd, *tvd; 2070e6ca193dSGeorge Wilson int error; 20710a4e9518Sgw 2072e14bb325SJeff Bonwick spa_vdev_state_enter(spa); 2073fa9e4066Sahrens 2074c5904d13Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2075e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2076fa9e4066Sahrens 20770e34b6a7Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 2078e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 20790e34b6a7Sbonwick 2080e6ca193dSGeorge Wilson tvd = vd->vdev_top; 2081e6ca193dSGeorge Wilson 2082fa9e4066Sahrens /* 2083ecc2d604Sbonwick * If the device isn't already offline, try to offline it. 2084fa9e4066Sahrens */ 2085ecc2d604Sbonwick if (!vd->vdev_offline) { 2086ecc2d604Sbonwick /* 20878ad4d6ddSJeff Bonwick * If this device has the only valid copy of some data, 2088e6ca193dSGeorge Wilson * don't allow it to be offlined. Log devices are always 2089e6ca193dSGeorge Wilson * expendable. 2090ecc2d604Sbonwick */ 2091e6ca193dSGeorge Wilson if (!tvd->vdev_islog && vd->vdev_aux == NULL && 2092e6ca193dSGeorge Wilson vdev_dtl_required(vd)) 2093e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, EBUSY)); 2094fa9e4066Sahrens 2095ecc2d604Sbonwick /* 2096ecc2d604Sbonwick * Offline this device and reopen its top-level vdev. 2097e6ca193dSGeorge Wilson * If the top-level vdev is a log device then just offline 2098e6ca193dSGeorge Wilson * it. Otherwise, if this action results in the top-level 2099e6ca193dSGeorge Wilson * vdev becoming unusable, undo it and fail the request. 2100ecc2d604Sbonwick */ 2101ecc2d604Sbonwick vd->vdev_offline = B_TRUE; 2102e6ca193dSGeorge Wilson vdev_reopen(tvd); 2103e6ca193dSGeorge Wilson 2104e6ca193dSGeorge Wilson if (!tvd->vdev_islog && vd->vdev_aux == NULL && 2105e6ca193dSGeorge Wilson vdev_is_dead(tvd)) { 2106ecc2d604Sbonwick vd->vdev_offline = B_FALSE; 2107e6ca193dSGeorge Wilson vdev_reopen(tvd); 2108e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, EBUSY)); 2109ecc2d604Sbonwick } 2110fa9e4066Sahrens } 2111fa9e4066Sahrens 2112e14bb325SJeff Bonwick vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 2113ecc2d604Sbonwick 2114e6ca193dSGeorge Wilson if (!tvd->vdev_islog || !vdev_is_dead(tvd)) 2115e6ca193dSGeorge Wilson return (spa_vdev_state_exit(spa, vd, 0)); 2116e6ca193dSGeorge Wilson 2117e6ca193dSGeorge Wilson (void) spa_vdev_state_exit(spa, vd, 0); 2118e6ca193dSGeorge Wilson 2119e6ca193dSGeorge Wilson error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 2120e6ca193dSGeorge Wilson NULL, DS_FIND_CHILDREN); 2121e6ca193dSGeorge Wilson if (error) { 2122e6ca193dSGeorge Wilson (void) vdev_online(spa, guid, 0, NULL); 2123e6ca193dSGeorge Wilson return (error); 2124e6ca193dSGeorge Wilson } 2125e6ca193dSGeorge Wilson /* 2126e6ca193dSGeorge Wilson * If we successfully offlined the log device then we need to 2127e6ca193dSGeorge Wilson * sync out the current txg so that the "stubby" block can be 2128e6ca193dSGeorge Wilson * removed by zil_sync(). 2129e6ca193dSGeorge Wilson */ 2130e6ca193dSGeorge Wilson txg_wait_synced(spa->spa_dsl_pool, 0); 2131e6ca193dSGeorge Wilson return (0); 2132fa9e4066Sahrens } 2133fa9e4066Sahrens 2134ea8dc4b6Seschrock /* 2135ea8dc4b6Seschrock * Clear the error counts associated with this vdev. Unlike vdev_online() and 2136ea8dc4b6Seschrock * vdev_offline(), we assume the spa config is locked. We also clear all 2137ea8dc4b6Seschrock * children. If 'vd' is NULL, then the user wants to clear all vdevs. 2138ea8dc4b6Seschrock */ 2139ea8dc4b6Seschrock void 2140e14bb325SJeff Bonwick vdev_clear(spa_t *spa, vdev_t *vd) 2141fa9e4066Sahrens { 2142e14bb325SJeff Bonwick vdev_t *rvd = spa->spa_root_vdev; 2143e14bb325SJeff Bonwick 2144e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2145fa9e4066Sahrens 2146ea8dc4b6Seschrock if (vd == NULL) 2147e14bb325SJeff Bonwick vd = rvd; 2148fa9e4066Sahrens 2149ea8dc4b6Seschrock vd->vdev_stat.vs_read_errors = 0; 2150ea8dc4b6Seschrock vd->vdev_stat.vs_write_errors = 0; 2151ea8dc4b6Seschrock vd->vdev_stat.vs_checksum_errors = 0; 2152fa9e4066Sahrens 2153e14bb325SJeff Bonwick for (int c = 0; c < vd->vdev_children; c++) 2154e14bb325SJeff Bonwick vdev_clear(spa, vd->vdev_child[c]); 21553d7072f8Seschrock 21563d7072f8Seschrock /* 21578a79c1b5Sek * If we're in the FAULTED state or have experienced failed I/O, then 21588a79c1b5Sek * clear the persistent state and attempt to reopen the device. We 21598a79c1b5Sek * also mark the vdev config dirty, so that the new faulted state is 21608a79c1b5Sek * written out to disk. 21613d7072f8Seschrock */ 2162e14bb325SJeff Bonwick if (vd->vdev_faulted || vd->vdev_degraded || 2163e14bb325SJeff Bonwick !vdev_readable(vd) || !vdev_writeable(vd)) { 21648a79c1b5Sek 21653d7072f8Seschrock vd->vdev_faulted = vd->vdev_degraded = 0; 2166e14bb325SJeff Bonwick vd->vdev_cant_read = B_FALSE; 2167e14bb325SJeff Bonwick vd->vdev_cant_write = B_FALSE; 2168e14bb325SJeff Bonwick 21693d7072f8Seschrock vdev_reopen(vd); 21703d7072f8Seschrock 2171e14bb325SJeff Bonwick if (vd != rvd) 2172e14bb325SJeff Bonwick vdev_state_dirty(vd->vdev_top); 2173e14bb325SJeff Bonwick 2174e14bb325SJeff Bonwick if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 2175bb8b5132Sek spa_async_request(spa, SPA_ASYNC_RESILVER); 21763d7072f8Seschrock 21773d7072f8Seschrock spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); 21783d7072f8Seschrock } 2179fa9e4066Sahrens } 2180fa9e4066Sahrens 2181e14bb325SJeff Bonwick boolean_t 2182e14bb325SJeff Bonwick vdev_is_dead(vdev_t *vd) 21830a4e9518Sgw { 2184*88ecc943SGeorge Wilson /* 2185*88ecc943SGeorge Wilson * Holes and missing devices are always considered "dead". 2186*88ecc943SGeorge Wilson * This simplifies the code since we don't have to check for 2187*88ecc943SGeorge Wilson * these types of devices in the various code paths. 2188*88ecc943SGeorge Wilson * Instead we rely on the fact that we skip over dead devices 2189*88ecc943SGeorge Wilson * before issuing I/O to them. 2190*88ecc943SGeorge Wilson */ 2191*88ecc943SGeorge Wilson return (vd->vdev_state < VDEV_STATE_DEGRADED || vd->vdev_ishole || 2192*88ecc943SGeorge Wilson vd->vdev_ops == &vdev_missing_ops); 21930a4e9518Sgw } 21940a4e9518Sgw 2195e14bb325SJeff Bonwick boolean_t 2196e14bb325SJeff Bonwick vdev_readable(vdev_t *vd) 21970a4e9518Sgw { 2198e14bb325SJeff Bonwick return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 21990a4e9518Sgw } 22000a4e9518Sgw 2201e14bb325SJeff Bonwick boolean_t 2202e14bb325SJeff Bonwick vdev_writeable(vdev_t *vd) 2203fa9e4066Sahrens { 2204e14bb325SJeff Bonwick return (!vdev_is_dead(vd) && !vd->vdev_cant_write); 2205fa9e4066Sahrens } 2206fa9e4066Sahrens 2207a31e6787SGeorge Wilson boolean_t 2208a31e6787SGeorge Wilson vdev_allocatable(vdev_t *vd) 2209a31e6787SGeorge Wilson { 22108ad4d6ddSJeff Bonwick uint64_t state = vd->vdev_state; 22118ad4d6ddSJeff Bonwick 2212a31e6787SGeorge Wilson /* 22138ad4d6ddSJeff Bonwick * We currently allow allocations from vdevs which may be in the 2214a31e6787SGeorge Wilson * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 2215a31e6787SGeorge Wilson * fails to reopen then we'll catch it later when we're holding 22168ad4d6ddSJeff Bonwick * the proper locks. Note that we have to get the vdev state 22178ad4d6ddSJeff Bonwick * in a local variable because although it changes atomically, 22188ad4d6ddSJeff Bonwick * we're asking two separate questions about it. 2219a31e6787SGeorge Wilson */ 22208ad4d6ddSJeff Bonwick return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 2221*88ecc943SGeorge Wilson !vd->vdev_cant_write && !vd->vdev_ishole && !vd->vdev_removing); 2222a31e6787SGeorge Wilson } 2223a31e6787SGeorge Wilson 2224e14bb325SJeff Bonwick boolean_t 2225e14bb325SJeff Bonwick vdev_accessible(vdev_t *vd, zio_t *zio) 2226fa9e4066Sahrens { 2227e14bb325SJeff Bonwick ASSERT(zio->io_vd == vd); 2228fa9e4066Sahrens 2229e14bb325SJeff Bonwick if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 2230e14bb325SJeff Bonwick return (B_FALSE); 2231fa9e4066Sahrens 2232e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_READ) 2233e14bb325SJeff Bonwick return (!vd->vdev_cant_read); 2234fa9e4066Sahrens 2235e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_WRITE) 2236e14bb325SJeff Bonwick return (!vd->vdev_cant_write); 2237fa9e4066Sahrens 2238e14bb325SJeff Bonwick return (B_TRUE); 2239fa9e4066Sahrens } 2240fa9e4066Sahrens 2241fa9e4066Sahrens /* 2242fa9e4066Sahrens * Get statistics for the given vdev. 2243fa9e4066Sahrens */ 2244fa9e4066Sahrens void 2245fa9e4066Sahrens vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 2246fa9e4066Sahrens { 2247fa9e4066Sahrens vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 2248fa9e4066Sahrens 2249fa9e4066Sahrens mutex_enter(&vd->vdev_stat_lock); 2250fa9e4066Sahrens bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 2251088f3894Sahrens vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors; 2252fa9e4066Sahrens vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 2253fa9e4066Sahrens vs->vs_state = vd->vdev_state; 2254573ca77eSGeorge Wilson vs->vs_rsize = vdev_get_min_asize(vd); 2255573ca77eSGeorge Wilson if (vd->vdev_ops->vdev_op_leaf) 2256573ca77eSGeorge Wilson vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 2257fa9e4066Sahrens mutex_exit(&vd->vdev_stat_lock); 2258fa9e4066Sahrens 2259fa9e4066Sahrens /* 2260fa9e4066Sahrens * If we're getting stats on the root vdev, aggregate the I/O counts 2261fa9e4066Sahrens * over all top-level vdevs (i.e. the direct children of the root). 2262fa9e4066Sahrens */ 2263fa9e4066Sahrens if (vd == rvd) { 2264e14bb325SJeff Bonwick for (int c = 0; c < rvd->vdev_children; c++) { 2265fa9e4066Sahrens vdev_t *cvd = rvd->vdev_child[c]; 2266fa9e4066Sahrens vdev_stat_t *cvs = &cvd->vdev_stat; 2267fa9e4066Sahrens 2268fa9e4066Sahrens mutex_enter(&vd->vdev_stat_lock); 2269e14bb325SJeff Bonwick for (int t = 0; t < ZIO_TYPES; t++) { 2270fa9e4066Sahrens vs->vs_ops[t] += cvs->vs_ops[t]; 2271fa9e4066Sahrens vs->vs_bytes[t] += cvs->vs_bytes[t]; 2272fa9e4066Sahrens } 2273fa9e4066Sahrens vs->vs_scrub_examined += cvs->vs_scrub_examined; 2274fa9e4066Sahrens mutex_exit(&vd->vdev_stat_lock); 2275fa9e4066Sahrens } 2276fa9e4066Sahrens } 2277fa9e4066Sahrens } 2278fa9e4066Sahrens 2279fa94a07fSbrendan void 2280fa94a07fSbrendan vdev_clear_stats(vdev_t *vd) 2281fa94a07fSbrendan { 2282fa94a07fSbrendan mutex_enter(&vd->vdev_stat_lock); 2283fa94a07fSbrendan vd->vdev_stat.vs_space = 0; 2284fa94a07fSbrendan vd->vdev_stat.vs_dspace = 0; 2285fa94a07fSbrendan vd->vdev_stat.vs_alloc = 0; 2286fa94a07fSbrendan mutex_exit(&vd->vdev_stat_lock); 2287fa94a07fSbrendan } 2288fa94a07fSbrendan 2289fa9e4066Sahrens void 2290e14bb325SJeff Bonwick vdev_stat_update(zio_t *zio, uint64_t psize) 2291fa9e4066Sahrens { 22928ad4d6ddSJeff Bonwick spa_t *spa = zio->io_spa; 22938ad4d6ddSJeff Bonwick vdev_t *rvd = spa->spa_root_vdev; 2294e14bb325SJeff Bonwick vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 2295fa9e4066Sahrens vdev_t *pvd; 2296fa9e4066Sahrens uint64_t txg = zio->io_txg; 2297fa9e4066Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2298fa9e4066Sahrens zio_type_t type = zio->io_type; 2299fa9e4066Sahrens int flags = zio->io_flags; 2300fa9e4066Sahrens 2301e14bb325SJeff Bonwick /* 2302e14bb325SJeff Bonwick * If this i/o is a gang leader, it didn't do any actual work. 2303e14bb325SJeff Bonwick */ 2304e14bb325SJeff Bonwick if (zio->io_gang_tree) 2305e14bb325SJeff Bonwick return; 2306e14bb325SJeff Bonwick 2307fa9e4066Sahrens if (zio->io_error == 0) { 2308e14bb325SJeff Bonwick /* 2309e14bb325SJeff Bonwick * If this is a root i/o, don't count it -- we've already 2310e14bb325SJeff Bonwick * counted the top-level vdevs, and vdev_get_stats() will 2311e14bb325SJeff Bonwick * aggregate them when asked. This reduces contention on 2312e14bb325SJeff Bonwick * the root vdev_stat_lock and implicitly handles blocks 2313e14bb325SJeff Bonwick * that compress away to holes, for which there is no i/o. 2314e14bb325SJeff Bonwick * (Holes never create vdev children, so all the counters 2315e14bb325SJeff Bonwick * remain zero, which is what we want.) 2316e14bb325SJeff Bonwick * 2317e14bb325SJeff Bonwick * Note: this only applies to successful i/o (io_error == 0) 2318e14bb325SJeff Bonwick * because unlike i/o counts, errors are not additive. 2319e14bb325SJeff Bonwick * When reading a ditto block, for example, failure of 2320e14bb325SJeff Bonwick * one top-level vdev does not imply a root-level error. 2321e14bb325SJeff Bonwick */ 2322e14bb325SJeff Bonwick if (vd == rvd) 2323e14bb325SJeff Bonwick return; 2324e14bb325SJeff Bonwick 2325e14bb325SJeff Bonwick ASSERT(vd == zio->io_vd); 23268ad4d6ddSJeff Bonwick 23278ad4d6ddSJeff Bonwick if (flags & ZIO_FLAG_IO_BYPASS) 23288ad4d6ddSJeff Bonwick return; 23298ad4d6ddSJeff Bonwick 23308ad4d6ddSJeff Bonwick mutex_enter(&vd->vdev_stat_lock); 23318ad4d6ddSJeff Bonwick 2332e14bb325SJeff Bonwick if (flags & ZIO_FLAG_IO_REPAIR) { 2333d80c45e0Sbonwick if (flags & ZIO_FLAG_SCRUB_THREAD) 2334e14bb325SJeff Bonwick vs->vs_scrub_repaired += psize; 23358ad4d6ddSJeff Bonwick if (flags & ZIO_FLAG_SELF_HEAL) 2336e14bb325SJeff Bonwick vs->vs_self_healed += psize; 2337fa9e4066Sahrens } 23388ad4d6ddSJeff Bonwick 23398ad4d6ddSJeff Bonwick vs->vs_ops[type]++; 23408ad4d6ddSJeff Bonwick vs->vs_bytes[type] += psize; 23418ad4d6ddSJeff Bonwick 23428ad4d6ddSJeff Bonwick mutex_exit(&vd->vdev_stat_lock); 2343fa9e4066Sahrens return; 2344fa9e4066Sahrens } 2345fa9e4066Sahrens 2346fa9e4066Sahrens if (flags & ZIO_FLAG_SPECULATIVE) 2347fa9e4066Sahrens return; 2348fa9e4066Sahrens 23498956713aSEric Schrock /* 23508956713aSEric Schrock * If this is an I/O error that is going to be retried, then ignore the 23518956713aSEric Schrock * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 23528956713aSEric Schrock * hard errors, when in reality they can happen for any number of 23538956713aSEric Schrock * innocuous reasons (bus resets, MPxIO link failure, etc). 23548956713aSEric Schrock */ 23558956713aSEric Schrock if (zio->io_error == EIO && 23568956713aSEric Schrock !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 23578956713aSEric Schrock return; 23588956713aSEric Schrock 2359e14bb325SJeff Bonwick mutex_enter(&vd->vdev_stat_lock); 2360b47119fdSGeorge Wilson if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 2361e14bb325SJeff Bonwick if (zio->io_error == ECKSUM) 2362e14bb325SJeff Bonwick vs->vs_checksum_errors++; 2363e14bb325SJeff Bonwick else 2364e14bb325SJeff Bonwick vs->vs_read_errors++; 2365fa9e4066Sahrens } 2366b47119fdSGeorge Wilson if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 2367e14bb325SJeff Bonwick vs->vs_write_errors++; 2368e14bb325SJeff Bonwick mutex_exit(&vd->vdev_stat_lock); 2369fa9e4066Sahrens 23708ad4d6ddSJeff Bonwick if (type == ZIO_TYPE_WRITE && txg != 0 && 23718ad4d6ddSJeff Bonwick (!(flags & ZIO_FLAG_IO_REPAIR) || 23728ad4d6ddSJeff Bonwick (flags & ZIO_FLAG_SCRUB_THREAD))) { 23738ad4d6ddSJeff Bonwick /* 23748ad4d6ddSJeff Bonwick * This is either a normal write (not a repair), or it's a 23758ad4d6ddSJeff Bonwick * repair induced by the scrub thread. In the normal case, 23768ad4d6ddSJeff Bonwick * we commit the DTL change in the same txg as the block 23778ad4d6ddSJeff Bonwick * was born. In the scrub-induced repair case, we know that 23788ad4d6ddSJeff Bonwick * scrubs run in first-pass syncing context, so we commit 23798ad4d6ddSJeff Bonwick * the DTL change in spa->spa_syncing_txg. 23808ad4d6ddSJeff Bonwick * 23818ad4d6ddSJeff Bonwick * We currently do not make DTL entries for failed spontaneous 23828ad4d6ddSJeff Bonwick * self-healing writes triggered by normal (non-scrubbing) 23838ad4d6ddSJeff Bonwick * reads, because we have no transactional context in which to 23848ad4d6ddSJeff Bonwick * do so -- and it's not clear that it'd be desirable anyway. 23858ad4d6ddSJeff Bonwick */ 23868ad4d6ddSJeff Bonwick if (vd->vdev_ops->vdev_op_leaf) { 23878ad4d6ddSJeff Bonwick uint64_t commit_txg = txg; 23888ad4d6ddSJeff Bonwick if (flags & ZIO_FLAG_SCRUB_THREAD) { 23898ad4d6ddSJeff Bonwick ASSERT(flags & ZIO_FLAG_IO_REPAIR); 23908ad4d6ddSJeff Bonwick ASSERT(spa_sync_pass(spa) == 1); 23918ad4d6ddSJeff Bonwick vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 23928ad4d6ddSJeff Bonwick commit_txg = spa->spa_syncing_txg; 23938ad4d6ddSJeff Bonwick } 23948ad4d6ddSJeff Bonwick ASSERT(commit_txg >= spa->spa_syncing_txg); 23958ad4d6ddSJeff Bonwick if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 2396fa9e4066Sahrens return; 23978ad4d6ddSJeff Bonwick for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 23988ad4d6ddSJeff Bonwick vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 23998ad4d6ddSJeff Bonwick vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 2400fa9e4066Sahrens } 24018ad4d6ddSJeff Bonwick if (vd != rvd) 24028ad4d6ddSJeff Bonwick vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 2403fa9e4066Sahrens } 2404fa9e4066Sahrens } 2405fa9e4066Sahrens 2406fa9e4066Sahrens void 2407fa9e4066Sahrens vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete) 2408fa9e4066Sahrens { 2409fa9e4066Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2410fa9e4066Sahrens 2411573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 2412fa9e4066Sahrens vdev_scrub_stat_update(vd->vdev_child[c], type, complete); 2413fa9e4066Sahrens 2414fa9e4066Sahrens mutex_enter(&vd->vdev_stat_lock); 2415fa9e4066Sahrens 2416fa9e4066Sahrens if (type == POOL_SCRUB_NONE) { 2417fa9e4066Sahrens /* 2418fa9e4066Sahrens * Update completion and end time. Leave everything else alone 2419fa9e4066Sahrens * so we can report what happened during the previous scrub. 2420fa9e4066Sahrens */ 2421fa9e4066Sahrens vs->vs_scrub_complete = complete; 2422fa9e4066Sahrens vs->vs_scrub_end = gethrestime_sec(); 2423fa9e4066Sahrens } else { 2424fa9e4066Sahrens vs->vs_scrub_type = type; 2425fa9e4066Sahrens vs->vs_scrub_complete = 0; 2426fa9e4066Sahrens vs->vs_scrub_examined = 0; 2427fa9e4066Sahrens vs->vs_scrub_repaired = 0; 2428fa9e4066Sahrens vs->vs_scrub_start = gethrestime_sec(); 2429fa9e4066Sahrens vs->vs_scrub_end = 0; 2430fa9e4066Sahrens } 2431fa9e4066Sahrens 2432fa9e4066Sahrens mutex_exit(&vd->vdev_stat_lock); 2433fa9e4066Sahrens } 2434fa9e4066Sahrens 2435fa9e4066Sahrens /* 2436fa9e4066Sahrens * Update the in-core space usage stats for this vdev and the root vdev. 2437fa9e4066Sahrens */ 2438fa9e4066Sahrens void 2439fa94a07fSbrendan vdev_space_update(vdev_t *vd, int64_t space_delta, int64_t alloc_delta, 2440fa94a07fSbrendan boolean_t update_root) 2441fa9e4066Sahrens { 244299653d4eSeschrock int64_t dspace_delta = space_delta; 24438654d025Sperrin spa_t *spa = vd->vdev_spa; 24448654d025Sperrin vdev_t *rvd = spa->spa_root_vdev; 2445fa9e4066Sahrens 24468654d025Sperrin ASSERT(vd == vd->vdev_top); 244799653d4eSeschrock 24488654d025Sperrin /* 24498654d025Sperrin * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 24508654d025Sperrin * factor. We must calculate this here and not at the root vdev 24518654d025Sperrin * because the root vdev's psize-to-asize is simply the max of its 24528654d025Sperrin * childrens', thus not accurate enough for us. 24538654d025Sperrin */ 24548654d025Sperrin ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 2455e6ca193dSGeorge Wilson ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 24568654d025Sperrin dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 24578654d025Sperrin vd->vdev_deflate_ratio; 24588654d025Sperrin 24598654d025Sperrin mutex_enter(&vd->vdev_stat_lock); 24608654d025Sperrin vd->vdev_stat.vs_space += space_delta; 24618654d025Sperrin vd->vdev_stat.vs_alloc += alloc_delta; 24628654d025Sperrin vd->vdev_stat.vs_dspace += dspace_delta; 24638654d025Sperrin mutex_exit(&vd->vdev_stat_lock); 24648654d025Sperrin 2465fa94a07fSbrendan if (update_root) { 2466fa94a07fSbrendan ASSERT(rvd == vd->vdev_parent); 2467fa94a07fSbrendan ASSERT(vd->vdev_ms_count != 0); 2468fa94a07fSbrendan 2469fa94a07fSbrendan /* 2470fa94a07fSbrendan * Don't count non-normal (e.g. intent log) space as part of 2471fa94a07fSbrendan * the pool's capacity. 2472fa94a07fSbrendan */ 2473*88ecc943SGeorge Wilson if (vd->vdev_islog) 2474fa94a07fSbrendan return; 24758654d025Sperrin 2476fa94a07fSbrendan mutex_enter(&rvd->vdev_stat_lock); 2477fa94a07fSbrendan rvd->vdev_stat.vs_space += space_delta; 2478fa94a07fSbrendan rvd->vdev_stat.vs_alloc += alloc_delta; 2479fa94a07fSbrendan rvd->vdev_stat.vs_dspace += dspace_delta; 2480fa94a07fSbrendan mutex_exit(&rvd->vdev_stat_lock); 2481fa94a07fSbrendan } 2482fa9e4066Sahrens } 2483fa9e4066Sahrens 2484fa9e4066Sahrens /* 2485fa9e4066Sahrens * Mark a top-level vdev's config as dirty, placing it on the dirty list 2486fa9e4066Sahrens * so that it will be written out next time the vdev configuration is synced. 2487fa9e4066Sahrens * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 2488fa9e4066Sahrens */ 2489fa9e4066Sahrens void 2490fa9e4066Sahrens vdev_config_dirty(vdev_t *vd) 2491fa9e4066Sahrens { 2492fa9e4066Sahrens spa_t *spa = vd->vdev_spa; 2493fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 2494fa9e4066Sahrens int c; 2495fa9e4066Sahrens 2496c5904d13Seschrock /* 24976809eb4eSEric Schrock * If this is an aux vdev (as with l2cache and spare devices), then we 24986809eb4eSEric Schrock * update the vdev config manually and set the sync flag. 2499c5904d13Seschrock */ 2500c5904d13Seschrock if (vd->vdev_aux != NULL) { 2501c5904d13Seschrock spa_aux_vdev_t *sav = vd->vdev_aux; 2502c5904d13Seschrock nvlist_t **aux; 2503c5904d13Seschrock uint_t naux; 2504c5904d13Seschrock 2505c5904d13Seschrock for (c = 0; c < sav->sav_count; c++) { 2506c5904d13Seschrock if (sav->sav_vdevs[c] == vd) 2507c5904d13Seschrock break; 2508c5904d13Seschrock } 2509c5904d13Seschrock 2510e14bb325SJeff Bonwick if (c == sav->sav_count) { 2511e14bb325SJeff Bonwick /* 2512e14bb325SJeff Bonwick * We're being removed. There's nothing more to do. 2513e14bb325SJeff Bonwick */ 2514e14bb325SJeff Bonwick ASSERT(sav->sav_sync == B_TRUE); 2515e14bb325SJeff Bonwick return; 2516e14bb325SJeff Bonwick } 2517e14bb325SJeff Bonwick 2518c5904d13Seschrock sav->sav_sync = B_TRUE; 2519c5904d13Seschrock 25206809eb4eSEric Schrock if (nvlist_lookup_nvlist_array(sav->sav_config, 25216809eb4eSEric Schrock ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 25226809eb4eSEric Schrock VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 25236809eb4eSEric Schrock ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 25246809eb4eSEric Schrock } 2525c5904d13Seschrock 2526c5904d13Seschrock ASSERT(c < naux); 2527c5904d13Seschrock 2528c5904d13Seschrock /* 2529c5904d13Seschrock * Setting the nvlist in the middle if the array is a little 2530c5904d13Seschrock * sketchy, but it will work. 2531c5904d13Seschrock */ 2532c5904d13Seschrock nvlist_free(aux[c]); 2533c5904d13Seschrock aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE); 2534c5904d13Seschrock 2535c5904d13Seschrock return; 2536c5904d13Seschrock } 2537c5904d13Seschrock 25385dabedeeSbonwick /* 2539e14bb325SJeff Bonwick * The dirty list is protected by the SCL_CONFIG lock. The caller 2540e14bb325SJeff Bonwick * must either hold SCL_CONFIG as writer, or must be the sync thread 2541e14bb325SJeff Bonwick * (which holds SCL_CONFIG as reader). There's only one sync thread, 25425dabedeeSbonwick * so this is sufficient to ensure mutual exclusion. 25435dabedeeSbonwick */ 2544e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 2545e14bb325SJeff Bonwick (dsl_pool_sync_context(spa_get_dsl(spa)) && 2546e14bb325SJeff Bonwick spa_config_held(spa, SCL_CONFIG, RW_READER))); 25475dabedeeSbonwick 2548fa9e4066Sahrens if (vd == rvd) { 2549fa9e4066Sahrens for (c = 0; c < rvd->vdev_children; c++) 2550fa9e4066Sahrens vdev_config_dirty(rvd->vdev_child[c]); 2551fa9e4066Sahrens } else { 2552fa9e4066Sahrens ASSERT(vd == vd->vdev_top); 2553fa9e4066Sahrens 2554*88ecc943SGeorge Wilson if (!list_link_active(&vd->vdev_config_dirty_node) && 2555*88ecc943SGeorge Wilson !vd->vdev_ishole) 2556e14bb325SJeff Bonwick list_insert_head(&spa->spa_config_dirty_list, vd); 2557fa9e4066Sahrens } 2558fa9e4066Sahrens } 2559fa9e4066Sahrens 2560fa9e4066Sahrens void 2561fa9e4066Sahrens vdev_config_clean(vdev_t *vd) 2562fa9e4066Sahrens { 25635dabedeeSbonwick spa_t *spa = vd->vdev_spa; 25645dabedeeSbonwick 2565e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 2566e14bb325SJeff Bonwick (dsl_pool_sync_context(spa_get_dsl(spa)) && 2567e14bb325SJeff Bonwick spa_config_held(spa, SCL_CONFIG, RW_READER))); 25685dabedeeSbonwick 2569e14bb325SJeff Bonwick ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 2570e14bb325SJeff Bonwick list_remove(&spa->spa_config_dirty_list, vd); 2571e14bb325SJeff Bonwick } 2572e14bb325SJeff Bonwick 2573e14bb325SJeff Bonwick /* 2574e14bb325SJeff Bonwick * Mark a top-level vdev's state as dirty, so that the next pass of 2575e14bb325SJeff Bonwick * spa_sync() can convert this into vdev_config_dirty(). We distinguish 2576e14bb325SJeff Bonwick * the state changes from larger config changes because they require 2577e14bb325SJeff Bonwick * much less locking, and are often needed for administrative actions. 2578e14bb325SJeff Bonwick */ 2579e14bb325SJeff Bonwick void 2580e14bb325SJeff Bonwick vdev_state_dirty(vdev_t *vd) 2581e14bb325SJeff Bonwick { 2582e14bb325SJeff Bonwick spa_t *spa = vd->vdev_spa; 2583e14bb325SJeff Bonwick 2584e14bb325SJeff Bonwick ASSERT(vd == vd->vdev_top); 2585e14bb325SJeff Bonwick 2586e14bb325SJeff Bonwick /* 2587e14bb325SJeff Bonwick * The state list is protected by the SCL_STATE lock. The caller 2588e14bb325SJeff Bonwick * must either hold SCL_STATE as writer, or must be the sync thread 2589e14bb325SJeff Bonwick * (which holds SCL_STATE as reader). There's only one sync thread, 2590e14bb325SJeff Bonwick * so this is sufficient to ensure mutual exclusion. 2591e14bb325SJeff Bonwick */ 2592e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 2593e14bb325SJeff Bonwick (dsl_pool_sync_context(spa_get_dsl(spa)) && 2594e14bb325SJeff Bonwick spa_config_held(spa, SCL_STATE, RW_READER))); 2595e14bb325SJeff Bonwick 2596e14bb325SJeff Bonwick if (!list_link_active(&vd->vdev_state_dirty_node)) 2597e14bb325SJeff Bonwick list_insert_head(&spa->spa_state_dirty_list, vd); 2598e14bb325SJeff Bonwick } 2599e14bb325SJeff Bonwick 2600e14bb325SJeff Bonwick void 2601e14bb325SJeff Bonwick vdev_state_clean(vdev_t *vd) 2602e14bb325SJeff Bonwick { 2603e14bb325SJeff Bonwick spa_t *spa = vd->vdev_spa; 2604e14bb325SJeff Bonwick 2605e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 2606e14bb325SJeff Bonwick (dsl_pool_sync_context(spa_get_dsl(spa)) && 2607e14bb325SJeff Bonwick spa_config_held(spa, SCL_STATE, RW_READER))); 2608e14bb325SJeff Bonwick 2609e14bb325SJeff Bonwick ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 2610e14bb325SJeff Bonwick list_remove(&spa->spa_state_dirty_list, vd); 2611fa9e4066Sahrens } 2612fa9e4066Sahrens 261332b87932Sek /* 261432b87932Sek * Propagate vdev state up from children to parent. 261532b87932Sek */ 261644cd46caSbillm void 261744cd46caSbillm vdev_propagate_state(vdev_t *vd) 261844cd46caSbillm { 26198ad4d6ddSJeff Bonwick spa_t *spa = vd->vdev_spa; 26208ad4d6ddSJeff Bonwick vdev_t *rvd = spa->spa_root_vdev; 262144cd46caSbillm int degraded = 0, faulted = 0; 262244cd46caSbillm int corrupted = 0; 262344cd46caSbillm vdev_t *child; 262444cd46caSbillm 26253d7072f8Seschrock if (vd->vdev_children > 0) { 2626573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) { 26273d7072f8Seschrock child = vd->vdev_child[c]; 262851ece835Seschrock 2629*88ecc943SGeorge Wilson /* 2630*88ecc943SGeorge Wilson * Don't factor holes into the decision. 2631*88ecc943SGeorge Wilson */ 2632*88ecc943SGeorge Wilson if (child->vdev_ishole) 2633*88ecc943SGeorge Wilson continue; 2634*88ecc943SGeorge Wilson 2635e14bb325SJeff Bonwick if (!vdev_readable(child) || 26368ad4d6ddSJeff Bonwick (!vdev_writeable(child) && spa_writeable(spa))) { 263751ece835Seschrock /* 263851ece835Seschrock * Root special: if there is a top-level log 263951ece835Seschrock * device, treat the root vdev as if it were 264051ece835Seschrock * degraded. 264151ece835Seschrock */ 264251ece835Seschrock if (child->vdev_islog && vd == rvd) 264351ece835Seschrock degraded++; 264451ece835Seschrock else 264551ece835Seschrock faulted++; 264651ece835Seschrock } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 26473d7072f8Seschrock degraded++; 264851ece835Seschrock } 264944cd46caSbillm 26503d7072f8Seschrock if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 26513d7072f8Seschrock corrupted++; 26523d7072f8Seschrock } 265344cd46caSbillm 26543d7072f8Seschrock vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 26553d7072f8Seschrock 26563d7072f8Seschrock /* 2657e14bb325SJeff Bonwick * Root special: if there is a top-level vdev that cannot be 26583d7072f8Seschrock * opened due to corrupted metadata, then propagate the root 26593d7072f8Seschrock * vdev's aux state as 'corrupt' rather than 'insufficient 26603d7072f8Seschrock * replicas'. 26613d7072f8Seschrock */ 26623d7072f8Seschrock if (corrupted && vd == rvd && 26633d7072f8Seschrock rvd->vdev_state == VDEV_STATE_CANT_OPEN) 26643d7072f8Seschrock vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 26653d7072f8Seschrock VDEV_AUX_CORRUPT_DATA); 26663d7072f8Seschrock } 26673d7072f8Seschrock 266851ece835Seschrock if (vd->vdev_parent) 26693d7072f8Seschrock vdev_propagate_state(vd->vdev_parent); 267044cd46caSbillm } 267144cd46caSbillm 2672fa9e4066Sahrens /* 2673ea8dc4b6Seschrock * Set a vdev's state. If this is during an open, we don't update the parent 2674ea8dc4b6Seschrock * state, because we're in the process of opening children depth-first. 2675ea8dc4b6Seschrock * Otherwise, we propagate the change to the parent. 2676ea8dc4b6Seschrock * 2677ea8dc4b6Seschrock * If this routine places a device in a faulted state, an appropriate ereport is 2678ea8dc4b6Seschrock * generated. 2679fa9e4066Sahrens */ 2680fa9e4066Sahrens void 2681ea8dc4b6Seschrock vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 2682fa9e4066Sahrens { 2683560e6e96Seschrock uint64_t save_state; 2684c5904d13Seschrock spa_t *spa = vd->vdev_spa; 2685ea8dc4b6Seschrock 2686ea8dc4b6Seschrock if (state == vd->vdev_state) { 2687ea8dc4b6Seschrock vd->vdev_stat.vs_aux = aux; 2688fa9e4066Sahrens return; 2689ea8dc4b6Seschrock } 2690ea8dc4b6Seschrock 2691560e6e96Seschrock save_state = vd->vdev_state; 2692fa9e4066Sahrens 2693fa9e4066Sahrens vd->vdev_state = state; 2694fa9e4066Sahrens vd->vdev_stat.vs_aux = aux; 2695fa9e4066Sahrens 26963d7072f8Seschrock /* 26973d7072f8Seschrock * If we are setting the vdev state to anything but an open state, then 26983d7072f8Seschrock * always close the underlying device. Otherwise, we keep accessible 26993d7072f8Seschrock * but invalid devices open forever. We don't call vdev_close() itself, 27003d7072f8Seschrock * because that implies some extra checks (offline, etc) that we don't 27013d7072f8Seschrock * want here. This is limited to leaf devices, because otherwise 27023d7072f8Seschrock * closing the device will affect other children. 27033d7072f8Seschrock */ 2704cbd2b15eSJeff Bonwick if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf) 27053d7072f8Seschrock vd->vdev_ops->vdev_op_close(vd); 27063d7072f8Seschrock 27073d7072f8Seschrock if (vd->vdev_removed && 27083d7072f8Seschrock state == VDEV_STATE_CANT_OPEN && 27093d7072f8Seschrock (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 27103d7072f8Seschrock /* 27113d7072f8Seschrock * If the previous state is set to VDEV_STATE_REMOVED, then this 27123d7072f8Seschrock * device was previously marked removed and someone attempted to 27133d7072f8Seschrock * reopen it. If this failed due to a nonexistent device, then 27143d7072f8Seschrock * keep the device in the REMOVED state. We also let this be if 27153d7072f8Seschrock * it is one of our special test online cases, which is only 27163d7072f8Seschrock * attempting to online the device and shouldn't generate an FMA 27173d7072f8Seschrock * fault. 27183d7072f8Seschrock */ 27193d7072f8Seschrock vd->vdev_state = VDEV_STATE_REMOVED; 27203d7072f8Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 27213d7072f8Seschrock } else if (state == VDEV_STATE_REMOVED) { 27223d7072f8Seschrock /* 27233d7072f8Seschrock * Indicate to the ZFS DE that this device has been removed, and 27243d7072f8Seschrock * any recent errors should be ignored. 27253d7072f8Seschrock */ 2726c5904d13Seschrock zfs_post_remove(spa, vd); 27273d7072f8Seschrock vd->vdev_removed = B_TRUE; 27283d7072f8Seschrock } else if (state == VDEV_STATE_CANT_OPEN) { 2729ea8dc4b6Seschrock /* 2730ea8dc4b6Seschrock * If we fail to open a vdev during an import, we mark it as 2731ea8dc4b6Seschrock * "not available", which signifies that it was never there to 2732ea8dc4b6Seschrock * begin with. Failure to open such a device is not considered 2733ea8dc4b6Seschrock * an error. 2734ea8dc4b6Seschrock */ 2735c5904d13Seschrock if (spa->spa_load_state == SPA_LOAD_IMPORT && 2736560e6e96Seschrock vd->vdev_ops->vdev_op_leaf) 2737560e6e96Seschrock vd->vdev_not_present = 1; 2738560e6e96Seschrock 2739560e6e96Seschrock /* 2740560e6e96Seschrock * Post the appropriate ereport. If the 'prevstate' field is 2741560e6e96Seschrock * set to something other than VDEV_STATE_UNKNOWN, it indicates 2742560e6e96Seschrock * that this is part of a vdev_reopen(). In this case, we don't 2743560e6e96Seschrock * want to post the ereport if the device was already in the 2744560e6e96Seschrock * CANT_OPEN state beforehand. 27453d7072f8Seschrock * 27463d7072f8Seschrock * If the 'checkremove' flag is set, then this is an attempt to 27473d7072f8Seschrock * online the device in response to an insertion event. If we 27483d7072f8Seschrock * hit this case, then we have detected an insertion event for a 27493d7072f8Seschrock * faulted or offline device that wasn't in the removed state. 27503d7072f8Seschrock * In this scenario, we don't post an ereport because we are 27513d7072f8Seschrock * about to replace the device, or attempt an online with 27523d7072f8Seschrock * vdev_forcefault, which will generate the fault for us. 2753560e6e96Seschrock */ 27543d7072f8Seschrock if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 27553d7072f8Seschrock !vd->vdev_not_present && !vd->vdev_checkremove && 2756c5904d13Seschrock vd != spa->spa_root_vdev) { 2757ea8dc4b6Seschrock const char *class; 2758ea8dc4b6Seschrock 2759ea8dc4b6Seschrock switch (aux) { 2760ea8dc4b6Seschrock case VDEV_AUX_OPEN_FAILED: 2761ea8dc4b6Seschrock class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 2762ea8dc4b6Seschrock break; 2763ea8dc4b6Seschrock case VDEV_AUX_CORRUPT_DATA: 2764ea8dc4b6Seschrock class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 2765ea8dc4b6Seschrock break; 2766ea8dc4b6Seschrock case VDEV_AUX_NO_REPLICAS: 2767ea8dc4b6Seschrock class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 2768ea8dc4b6Seschrock break; 2769ea8dc4b6Seschrock case VDEV_AUX_BAD_GUID_SUM: 2770ea8dc4b6Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 2771ea8dc4b6Seschrock break; 2772ea8dc4b6Seschrock case VDEV_AUX_TOO_SMALL: 2773ea8dc4b6Seschrock class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 2774ea8dc4b6Seschrock break; 2775ea8dc4b6Seschrock case VDEV_AUX_BAD_LABEL: 2776ea8dc4b6Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 2777ea8dc4b6Seschrock break; 2778e14bb325SJeff Bonwick case VDEV_AUX_IO_FAILURE: 2779e14bb325SJeff Bonwick class = FM_EREPORT_ZFS_IO_FAILURE; 2780e14bb325SJeff Bonwick break; 2781ea8dc4b6Seschrock default: 2782ea8dc4b6Seschrock class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 2783ea8dc4b6Seschrock } 2784ea8dc4b6Seschrock 2785c5904d13Seschrock zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 2786ea8dc4b6Seschrock } 2787ea8dc4b6Seschrock 27883d7072f8Seschrock /* Erase any notion of persistent removed state */ 27893d7072f8Seschrock vd->vdev_removed = B_FALSE; 27903d7072f8Seschrock } else { 27913d7072f8Seschrock vd->vdev_removed = B_FALSE; 27923d7072f8Seschrock } 2793ea8dc4b6Seschrock 27948b33d774STim Haley if (!isopen && vd->vdev_parent) 27958b33d774STim Haley vdev_propagate_state(vd->vdev_parent); 2796fa9e4066Sahrens } 279715e6edf1Sgw 279815e6edf1Sgw /* 279915e6edf1Sgw * Check the vdev configuration to ensure that it's capable of supporting 280015e6edf1Sgw * a root pool. Currently, we do not support RAID-Z or partial configuration. 280115e6edf1Sgw * In addition, only a single top-level vdev is allowed and none of the leaves 280215e6edf1Sgw * can be wholedisks. 280315e6edf1Sgw */ 280415e6edf1Sgw boolean_t 280515e6edf1Sgw vdev_is_bootable(vdev_t *vd) 280615e6edf1Sgw { 280715e6edf1Sgw if (!vd->vdev_ops->vdev_op_leaf) { 280815e6edf1Sgw char *vdev_type = vd->vdev_ops->vdev_op_type; 280915e6edf1Sgw 281015e6edf1Sgw if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 281115e6edf1Sgw vd->vdev_children > 1) { 281215e6edf1Sgw return (B_FALSE); 281315e6edf1Sgw } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 || 281415e6edf1Sgw strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { 281515e6edf1Sgw return (B_FALSE); 281615e6edf1Sgw } 281715e6edf1Sgw } else if (vd->vdev_wholedisk == 1) { 281815e6edf1Sgw return (B_FALSE); 281915e6edf1Sgw } 282015e6edf1Sgw 2821573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) { 282215e6edf1Sgw if (!vdev_is_bootable(vd->vdev_child[c])) 282315e6edf1Sgw return (B_FALSE); 282415e6edf1Sgw } 282515e6edf1Sgw return (B_TRUE); 282615e6edf1Sgw } 2827e6ca193dSGeorge Wilson 2828*88ecc943SGeorge Wilson /* 2829*88ecc943SGeorge Wilson * Load the state from the original vdev tree (ovd) which 2830*88ecc943SGeorge Wilson * we've retrieved from the MOS config object. If the original 2831*88ecc943SGeorge Wilson * vdev was offline then we transfer that state to the device 2832*88ecc943SGeorge Wilson * in the current vdev tree (nvd). 2833*88ecc943SGeorge Wilson */ 2834e6ca193dSGeorge Wilson void 2835*88ecc943SGeorge Wilson vdev_load_log_state(vdev_t *nvd, vdev_t *ovd) 2836e6ca193dSGeorge Wilson { 2837*88ecc943SGeorge Wilson spa_t *spa = nvd->vdev_spa; 2838e6ca193dSGeorge Wilson 2839*88ecc943SGeorge Wilson ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2840*88ecc943SGeorge Wilson ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid); 2841e6ca193dSGeorge Wilson 2842*88ecc943SGeorge Wilson for (int c = 0; c < nvd->vdev_children; c++) 2843*88ecc943SGeorge Wilson vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]); 2844e6ca193dSGeorge Wilson 2845*88ecc943SGeorge Wilson if (nvd->vdev_ops->vdev_op_leaf && ovd->vdev_offline) { 2846e6ca193dSGeorge Wilson /* 2847e6ca193dSGeorge Wilson * It would be nice to call vdev_offline() 2848e6ca193dSGeorge Wilson * directly but the pool isn't fully loaded and 2849e6ca193dSGeorge Wilson * the txg threads have not been started yet. 2850e6ca193dSGeorge Wilson */ 2851*88ecc943SGeorge Wilson nvd->vdev_offline = ovd->vdev_offline; 2852*88ecc943SGeorge Wilson vdev_reopen(nvd->vdev_top); 2853e6ca193dSGeorge Wilson } 2854e6ca193dSGeorge Wilson } 2855573ca77eSGeorge Wilson 2856573ca77eSGeorge Wilson /* 2857573ca77eSGeorge Wilson * Expand a vdev if possible. 2858573ca77eSGeorge Wilson */ 2859573ca77eSGeorge Wilson void 2860573ca77eSGeorge Wilson vdev_expand(vdev_t *vd, uint64_t txg) 2861573ca77eSGeorge Wilson { 2862573ca77eSGeorge Wilson ASSERT(vd->vdev_top == vd); 2863573ca77eSGeorge Wilson ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 2864573ca77eSGeorge Wilson 2865573ca77eSGeorge Wilson if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) { 2866573ca77eSGeorge Wilson VERIFY(vdev_metaslab_init(vd, txg) == 0); 2867573ca77eSGeorge Wilson vdev_config_dirty(vd); 2868573ca77eSGeorge Wilson } 2869573ca77eSGeorge Wilson } 2870