1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5441d80aaSlling * Common Development and Distribution License (the "License"). 6441d80aaSlling * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 2199653d4eSeschrock 22fa9e4066Sahrens /* 2398d1cbfeSGeorge Wilson * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24e9103aaeSGarrett D'Amore * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25*4263d13fSGeorge Wilson * Copyright (c) 2012 by Delphix. All rights reserved. 26fa9e4066Sahrens */ 27fa9e4066Sahrens 28fa9e4066Sahrens #include <sys/zfs_context.h> 29ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h> 30fa9e4066Sahrens #include <sys/spa.h> 31fa9e4066Sahrens #include <sys/spa_impl.h> 32fa9e4066Sahrens #include <sys/dmu.h> 33fa9e4066Sahrens #include <sys/dmu_tx.h> 34fa9e4066Sahrens #include <sys/vdev_impl.h> 35fa9e4066Sahrens #include <sys/uberblock_impl.h> 36fa9e4066Sahrens #include <sys/metaslab.h> 37fa9e4066Sahrens #include <sys/metaslab_impl.h> 38fa9e4066Sahrens #include <sys/space_map.h> 39fa9e4066Sahrens #include <sys/zio.h> 40fa9e4066Sahrens #include <sys/zap.h> 41fa9e4066Sahrens #include <sys/fs/zfs.h> 42c5904d13Seschrock #include <sys/arc.h> 43e6ca193dSGeorge Wilson #include <sys/zil.h> 443f9d6ad7SLin Ling #include <sys/dsl_scan.h> 45fa9e4066Sahrens 46fa9e4066Sahrens /* 47fa9e4066Sahrens * Virtual device management. 48fa9e4066Sahrens */ 49fa9e4066Sahrens 50fa9e4066Sahrens static vdev_ops_t *vdev_ops_table[] = { 51fa9e4066Sahrens &vdev_root_ops, 52fa9e4066Sahrens &vdev_raidz_ops, 53fa9e4066Sahrens &vdev_mirror_ops, 54fa9e4066Sahrens &vdev_replacing_ops, 5599653d4eSeschrock &vdev_spare_ops, 56fa9e4066Sahrens &vdev_disk_ops, 57fa9e4066Sahrens &vdev_file_ops, 58fa9e4066Sahrens &vdev_missing_ops, 5988ecc943SGeorge Wilson &vdev_hole_ops, 60fa9e4066Sahrens NULL 61fa9e4066Sahrens }; 62fa9e4066Sahrens 63088f3894Sahrens /* maximum scrub/resilver I/O queue per leaf vdev */ 64088f3894Sahrens int zfs_scrub_limit = 10; 6505b2b3b8Smishra 66fa9e4066Sahrens /* 67fa9e4066Sahrens * Given a vdev type, return the appropriate ops vector. 68fa9e4066Sahrens */ 69fa9e4066Sahrens static vdev_ops_t * 70fa9e4066Sahrens vdev_getops(const char *type) 71fa9e4066Sahrens { 72fa9e4066Sahrens vdev_ops_t *ops, **opspp; 73fa9e4066Sahrens 74fa9e4066Sahrens for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 75fa9e4066Sahrens if (strcmp(ops->vdev_op_type, type) == 0) 76fa9e4066Sahrens break; 77fa9e4066Sahrens 78fa9e4066Sahrens return (ops); 79fa9e4066Sahrens } 80fa9e4066Sahrens 81fa9e4066Sahrens /* 82fa9e4066Sahrens * Default asize function: return the MAX of psize with the asize of 83fa9e4066Sahrens * all children. This is what's used by anything other than RAID-Z. 84fa9e4066Sahrens */ 85fa9e4066Sahrens uint64_t 86fa9e4066Sahrens vdev_default_asize(vdev_t *vd, uint64_t psize) 87fa9e4066Sahrens { 88ecc2d604Sbonwick uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 89fa9e4066Sahrens uint64_t csize; 90fa9e4066Sahrens 91573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) { 92fa9e4066Sahrens csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 93fa9e4066Sahrens asize = MAX(asize, csize); 94fa9e4066Sahrens } 95fa9e4066Sahrens 96fa9e4066Sahrens return (asize); 97fa9e4066Sahrens } 98fa9e4066Sahrens 992a79c5feSlling /* 100573ca77eSGeorge Wilson * Get the minimum allocatable size. We define the allocatable size as 101573ca77eSGeorge Wilson * the vdev's asize rounded to the nearest metaslab. This allows us to 102573ca77eSGeorge Wilson * replace or attach devices which don't have the same physical size but 103573ca77eSGeorge Wilson * can still satisfy the same number of allocations. 1042a79c5feSlling */ 1052a79c5feSlling uint64_t 106573ca77eSGeorge Wilson vdev_get_min_asize(vdev_t *vd) 1072a79c5feSlling { 108573ca77eSGeorge Wilson vdev_t *pvd = vd->vdev_parent; 1092a79c5feSlling 110573ca77eSGeorge Wilson /* 111*4263d13fSGeorge Wilson * If our parent is NULL (inactive spare or cache) or is the root, 112573ca77eSGeorge Wilson * just return our own asize. 113573ca77eSGeorge Wilson */ 114573ca77eSGeorge Wilson if (pvd == NULL) 115573ca77eSGeorge Wilson return (vd->vdev_asize); 1162a79c5feSlling 1172a79c5feSlling /* 118573ca77eSGeorge Wilson * The top-level vdev just returns the allocatable size rounded 119573ca77eSGeorge Wilson * to the nearest metaslab. 1202a79c5feSlling */ 121573ca77eSGeorge Wilson if (vd == vd->vdev_top) 122573ca77eSGeorge Wilson return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); 1232a79c5feSlling 124573ca77eSGeorge Wilson /* 125573ca77eSGeorge Wilson * The allocatable space for a raidz vdev is N * sizeof(smallest child), 126573ca77eSGeorge Wilson * so each child must provide at least 1/Nth of its asize. 127573ca77eSGeorge Wilson */ 128573ca77eSGeorge Wilson if (pvd->vdev_ops == &vdev_raidz_ops) 129573ca77eSGeorge Wilson return (pvd->vdev_min_asize / pvd->vdev_children); 1302a79c5feSlling 131573ca77eSGeorge Wilson return (pvd->vdev_min_asize); 132573ca77eSGeorge Wilson } 1332a79c5feSlling 134573ca77eSGeorge Wilson void 135573ca77eSGeorge Wilson vdev_set_min_asize(vdev_t *vd) 136573ca77eSGeorge Wilson { 137573ca77eSGeorge Wilson vd->vdev_min_asize = vdev_get_min_asize(vd); 138573ca77eSGeorge Wilson 139573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 140573ca77eSGeorge Wilson vdev_set_min_asize(vd->vdev_child[c]); 1412a79c5feSlling } 1422a79c5feSlling 143fa9e4066Sahrens vdev_t * 144fa9e4066Sahrens vdev_lookup_top(spa_t *spa, uint64_t vdev) 145fa9e4066Sahrens { 146fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 147fa9e4066Sahrens 148e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 149e05725b1Sbonwick 150088f3894Sahrens if (vdev < rvd->vdev_children) { 151088f3894Sahrens ASSERT(rvd->vdev_child[vdev] != NULL); 152fa9e4066Sahrens return (rvd->vdev_child[vdev]); 153088f3894Sahrens } 154fa9e4066Sahrens 155fa9e4066Sahrens return (NULL); 156fa9e4066Sahrens } 157fa9e4066Sahrens 158fa9e4066Sahrens vdev_t * 159fa9e4066Sahrens vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 160fa9e4066Sahrens { 161fa9e4066Sahrens vdev_t *mvd; 162fa9e4066Sahrens 1630e34b6a7Sbonwick if (vd->vdev_guid == guid) 164fa9e4066Sahrens return (vd); 165fa9e4066Sahrens 166573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 167fa9e4066Sahrens if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 168fa9e4066Sahrens NULL) 169fa9e4066Sahrens return (mvd); 170fa9e4066Sahrens 171fa9e4066Sahrens return (NULL); 172fa9e4066Sahrens } 173fa9e4066Sahrens 174fa9e4066Sahrens void 175fa9e4066Sahrens vdev_add_child(vdev_t *pvd, vdev_t *cvd) 176fa9e4066Sahrens { 177fa9e4066Sahrens size_t oldsize, newsize; 178fa9e4066Sahrens uint64_t id = cvd->vdev_id; 179fa9e4066Sahrens vdev_t **newchild; 180fa9e4066Sahrens 181e14bb325SJeff Bonwick ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 182fa9e4066Sahrens ASSERT(cvd->vdev_parent == NULL); 183fa9e4066Sahrens 184fa9e4066Sahrens cvd->vdev_parent = pvd; 185fa9e4066Sahrens 186fa9e4066Sahrens if (pvd == NULL) 187fa9e4066Sahrens return; 188fa9e4066Sahrens 189fa9e4066Sahrens ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 190fa9e4066Sahrens 191fa9e4066Sahrens oldsize = pvd->vdev_children * sizeof (vdev_t *); 192fa9e4066Sahrens pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 193fa9e4066Sahrens newsize = pvd->vdev_children * sizeof (vdev_t *); 194fa9e4066Sahrens 195fa9e4066Sahrens newchild = kmem_zalloc(newsize, KM_SLEEP); 196fa9e4066Sahrens if (pvd->vdev_child != NULL) { 197fa9e4066Sahrens bcopy(pvd->vdev_child, newchild, oldsize); 198fa9e4066Sahrens kmem_free(pvd->vdev_child, oldsize); 199fa9e4066Sahrens } 200fa9e4066Sahrens 201fa9e4066Sahrens pvd->vdev_child = newchild; 202fa9e4066Sahrens pvd->vdev_child[id] = cvd; 203fa9e4066Sahrens 204fa9e4066Sahrens cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 205fa9e4066Sahrens ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 206fa9e4066Sahrens 207fa9e4066Sahrens /* 208fa9e4066Sahrens * Walk up all ancestors to update guid sum. 209fa9e4066Sahrens */ 210fa9e4066Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 211fa9e4066Sahrens pvd->vdev_guid_sum += cvd->vdev_guid_sum; 212fa9e4066Sahrens } 213fa9e4066Sahrens 214fa9e4066Sahrens void 215fa9e4066Sahrens vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 216fa9e4066Sahrens { 217fa9e4066Sahrens int c; 218fa9e4066Sahrens uint_t id = cvd->vdev_id; 219fa9e4066Sahrens 220fa9e4066Sahrens ASSERT(cvd->vdev_parent == pvd); 221fa9e4066Sahrens 222fa9e4066Sahrens if (pvd == NULL) 223fa9e4066Sahrens return; 224fa9e4066Sahrens 225fa9e4066Sahrens ASSERT(id < pvd->vdev_children); 226fa9e4066Sahrens ASSERT(pvd->vdev_child[id] == cvd); 227fa9e4066Sahrens 228fa9e4066Sahrens pvd->vdev_child[id] = NULL; 229fa9e4066Sahrens cvd->vdev_parent = NULL; 230fa9e4066Sahrens 231fa9e4066Sahrens for (c = 0; c < pvd->vdev_children; c++) 232fa9e4066Sahrens if (pvd->vdev_child[c]) 233fa9e4066Sahrens break; 234fa9e4066Sahrens 235fa9e4066Sahrens if (c == pvd->vdev_children) { 236fa9e4066Sahrens kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 237fa9e4066Sahrens pvd->vdev_child = NULL; 238fa9e4066Sahrens pvd->vdev_children = 0; 239fa9e4066Sahrens } 240fa9e4066Sahrens 241fa9e4066Sahrens /* 242fa9e4066Sahrens * Walk up all ancestors to update guid sum. 243fa9e4066Sahrens */ 244fa9e4066Sahrens for (; pvd != NULL; pvd = pvd->vdev_parent) 245fa9e4066Sahrens pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 246fa9e4066Sahrens } 247fa9e4066Sahrens 248fa9e4066Sahrens /* 249fa9e4066Sahrens * Remove any holes in the child array. 250fa9e4066Sahrens */ 251fa9e4066Sahrens void 252fa9e4066Sahrens vdev_compact_children(vdev_t *pvd) 253fa9e4066Sahrens { 254fa9e4066Sahrens vdev_t **newchild, *cvd; 255fa9e4066Sahrens int oldc = pvd->vdev_children; 256573ca77eSGeorge Wilson int newc; 257fa9e4066Sahrens 258e14bb325SJeff Bonwick ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 259fa9e4066Sahrens 260573ca77eSGeorge Wilson for (int c = newc = 0; c < oldc; c++) 261fa9e4066Sahrens if (pvd->vdev_child[c]) 262fa9e4066Sahrens newc++; 263fa9e4066Sahrens 264fa9e4066Sahrens newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 265fa9e4066Sahrens 266573ca77eSGeorge Wilson for (int c = newc = 0; c < oldc; c++) { 267fa9e4066Sahrens if ((cvd = pvd->vdev_child[c]) != NULL) { 268fa9e4066Sahrens newchild[newc] = cvd; 269fa9e4066Sahrens cvd->vdev_id = newc++; 270fa9e4066Sahrens } 271fa9e4066Sahrens } 272fa9e4066Sahrens 273fa9e4066Sahrens kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 274fa9e4066Sahrens pvd->vdev_child = newchild; 275fa9e4066Sahrens pvd->vdev_children = newc; 276fa9e4066Sahrens } 277fa9e4066Sahrens 278fa9e4066Sahrens /* 279fa9e4066Sahrens * Allocate and minimally initialize a vdev_t. 280fa9e4066Sahrens */ 28188ecc943SGeorge Wilson vdev_t * 282fa9e4066Sahrens vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 283fa9e4066Sahrens { 284fa9e4066Sahrens vdev_t *vd; 285fa9e4066Sahrens 286fa9e4066Sahrens vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 287fa9e4066Sahrens 2880e34b6a7Sbonwick if (spa->spa_root_vdev == NULL) { 2890e34b6a7Sbonwick ASSERT(ops == &vdev_root_ops); 2900e34b6a7Sbonwick spa->spa_root_vdev = vd; 291e9103aaeSGarrett D'Amore spa->spa_load_guid = spa_generate_guid(NULL); 2920e34b6a7Sbonwick } 2930e34b6a7Sbonwick 29488ecc943SGeorge Wilson if (guid == 0 && ops != &vdev_hole_ops) { 2950e34b6a7Sbonwick if (spa->spa_root_vdev == vd) { 2960e34b6a7Sbonwick /* 2970e34b6a7Sbonwick * The root vdev's guid will also be the pool guid, 2980e34b6a7Sbonwick * which must be unique among all pools. 2990e34b6a7Sbonwick */ 3001195e687SMark J Musante guid = spa_generate_guid(NULL); 3010e34b6a7Sbonwick } else { 3020e34b6a7Sbonwick /* 3030e34b6a7Sbonwick * Any other vdev's guid must be unique within the pool. 3040e34b6a7Sbonwick */ 3051195e687SMark J Musante guid = spa_generate_guid(spa); 3060e34b6a7Sbonwick } 3070e34b6a7Sbonwick ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 3080e34b6a7Sbonwick } 3090e34b6a7Sbonwick 310fa9e4066Sahrens vd->vdev_spa = spa; 311fa9e4066Sahrens vd->vdev_id = id; 312fa9e4066Sahrens vd->vdev_guid = guid; 313fa9e4066Sahrens vd->vdev_guid_sum = guid; 314fa9e4066Sahrens vd->vdev_ops = ops; 315fa9e4066Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 31688ecc943SGeorge Wilson vd->vdev_ishole = (ops == &vdev_hole_ops); 317fa9e4066Sahrens 318fa9e4066Sahrens mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 3195ad82045Snd mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 320e14bb325SJeff Bonwick mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 3218ad4d6ddSJeff Bonwick for (int t = 0; t < DTL_TYPES; t++) { 3228ad4d6ddSJeff Bonwick space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, 3238ad4d6ddSJeff Bonwick &vd->vdev_dtl_lock); 3248ad4d6ddSJeff Bonwick } 325fa9e4066Sahrens txg_list_create(&vd->vdev_ms_list, 326fa9e4066Sahrens offsetof(struct metaslab, ms_txg_node)); 327fa9e4066Sahrens txg_list_create(&vd->vdev_dtl_list, 328fa9e4066Sahrens offsetof(struct vdev, vdev_dtl_node)); 329fa9e4066Sahrens vd->vdev_stat.vs_timestamp = gethrtime(); 3303d7072f8Seschrock vdev_queue_init(vd); 3313d7072f8Seschrock vdev_cache_init(vd); 332fa9e4066Sahrens 333fa9e4066Sahrens return (vd); 334fa9e4066Sahrens } 335fa9e4066Sahrens 336fa9e4066Sahrens /* 337fa9e4066Sahrens * Allocate a new vdev. The 'alloctype' is used to control whether we are 338fa9e4066Sahrens * creating a new vdev or loading an existing one - the behavior is slightly 339fa9e4066Sahrens * different for each case. 340fa9e4066Sahrens */ 34199653d4eSeschrock int 34299653d4eSeschrock vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 34399653d4eSeschrock int alloctype) 344fa9e4066Sahrens { 345fa9e4066Sahrens vdev_ops_t *ops; 346fa9e4066Sahrens char *type; 3478654d025Sperrin uint64_t guid = 0, islog, nparity; 348fa9e4066Sahrens vdev_t *vd; 349fa9e4066Sahrens 350e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 351fa9e4066Sahrens 352fa9e4066Sahrens if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 35399653d4eSeschrock return (EINVAL); 354fa9e4066Sahrens 355fa9e4066Sahrens if ((ops = vdev_getops(type)) == NULL) 35699653d4eSeschrock return (EINVAL); 357fa9e4066Sahrens 358fa9e4066Sahrens /* 359fa9e4066Sahrens * If this is a load, get the vdev guid from the nvlist. 360fa9e4066Sahrens * Otherwise, vdev_alloc_common() will generate one for us. 361fa9e4066Sahrens */ 362fa9e4066Sahrens if (alloctype == VDEV_ALLOC_LOAD) { 363fa9e4066Sahrens uint64_t label_id; 364fa9e4066Sahrens 365fa9e4066Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 366fa9e4066Sahrens label_id != id) 36799653d4eSeschrock return (EINVAL); 368fa9e4066Sahrens 369fa9e4066Sahrens if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 37099653d4eSeschrock return (EINVAL); 37199653d4eSeschrock } else if (alloctype == VDEV_ALLOC_SPARE) { 37299653d4eSeschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 37399653d4eSeschrock return (EINVAL); 374fa94a07fSbrendan } else if (alloctype == VDEV_ALLOC_L2CACHE) { 375fa94a07fSbrendan if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 376fa94a07fSbrendan return (EINVAL); 37721ecdf64SLin Ling } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 37821ecdf64SLin Ling if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 37921ecdf64SLin Ling return (EINVAL); 380fa9e4066Sahrens } 381fa9e4066Sahrens 38299653d4eSeschrock /* 38399653d4eSeschrock * The first allocated vdev must be of type 'root'. 38499653d4eSeschrock */ 38599653d4eSeschrock if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 38699653d4eSeschrock return (EINVAL); 38799653d4eSeschrock 3888654d025Sperrin /* 3898654d025Sperrin * Determine whether we're a log vdev. 3908654d025Sperrin */ 3918654d025Sperrin islog = 0; 3928654d025Sperrin (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 393990b4856Slling if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 3948654d025Sperrin return (ENOTSUP); 395fa9e4066Sahrens 39688ecc943SGeorge Wilson if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES) 39788ecc943SGeorge Wilson return (ENOTSUP); 39888ecc943SGeorge Wilson 39999653d4eSeschrock /* 4008654d025Sperrin * Set the nparity property for RAID-Z vdevs. 40199653d4eSeschrock */ 4028654d025Sperrin nparity = -1ULL; 40399653d4eSeschrock if (ops == &vdev_raidz_ops) { 40499653d4eSeschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 4058654d025Sperrin &nparity) == 0) { 406b24ab676SJeff Bonwick if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY) 40799653d4eSeschrock return (EINVAL); 40899653d4eSeschrock /* 409f94275ceSAdam Leventhal * Previous versions could only support 1 or 2 parity 410f94275ceSAdam Leventhal * device. 41199653d4eSeschrock */ 412f94275ceSAdam Leventhal if (nparity > 1 && 413f94275ceSAdam Leventhal spa_version(spa) < SPA_VERSION_RAIDZ2) 414f94275ceSAdam Leventhal return (ENOTSUP); 415f94275ceSAdam Leventhal if (nparity > 2 && 416f94275ceSAdam Leventhal spa_version(spa) < SPA_VERSION_RAIDZ3) 41799653d4eSeschrock return (ENOTSUP); 41899653d4eSeschrock } else { 41999653d4eSeschrock /* 42099653d4eSeschrock * We require the parity to be specified for SPAs that 42199653d4eSeschrock * support multiple parity levels. 42299653d4eSeschrock */ 423f94275ceSAdam Leventhal if (spa_version(spa) >= SPA_VERSION_RAIDZ2) 42499653d4eSeschrock return (EINVAL); 42599653d4eSeschrock /* 42699653d4eSeschrock * Otherwise, we default to 1 parity device for RAID-Z. 42799653d4eSeschrock */ 4288654d025Sperrin nparity = 1; 42999653d4eSeschrock } 43099653d4eSeschrock } else { 4318654d025Sperrin nparity = 0; 43299653d4eSeschrock } 4338654d025Sperrin ASSERT(nparity != -1ULL); 4348654d025Sperrin 4358654d025Sperrin vd = vdev_alloc_common(spa, id, guid, ops); 4368654d025Sperrin 4378654d025Sperrin vd->vdev_islog = islog; 4388654d025Sperrin vd->vdev_nparity = nparity; 4398654d025Sperrin 4408654d025Sperrin if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 4418654d025Sperrin vd->vdev_path = spa_strdup(vd->vdev_path); 4428654d025Sperrin if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 4438654d025Sperrin vd->vdev_devid = spa_strdup(vd->vdev_devid); 4448654d025Sperrin if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 4458654d025Sperrin &vd->vdev_physpath) == 0) 4468654d025Sperrin vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 4476809eb4eSEric Schrock if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 4486809eb4eSEric Schrock vd->vdev_fru = spa_strdup(vd->vdev_fru); 44999653d4eSeschrock 450afefbcddSeschrock /* 451afefbcddSeschrock * Set the whole_disk property. If it's not specified, leave the value 452afefbcddSeschrock * as -1. 453afefbcddSeschrock */ 454afefbcddSeschrock if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 455afefbcddSeschrock &vd->vdev_wholedisk) != 0) 456afefbcddSeschrock vd->vdev_wholedisk = -1ULL; 457afefbcddSeschrock 458ea8dc4b6Seschrock /* 459ea8dc4b6Seschrock * Look for the 'not present' flag. This will only be set if the device 460ea8dc4b6Seschrock * was not present at the time of import. 461ea8dc4b6Seschrock */ 4626809eb4eSEric Schrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 4636809eb4eSEric Schrock &vd->vdev_not_present); 464ea8dc4b6Seschrock 465ecc2d604Sbonwick /* 466ecc2d604Sbonwick * Get the alignment requirement. 467ecc2d604Sbonwick */ 468ecc2d604Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 469ecc2d604Sbonwick 47088ecc943SGeorge Wilson /* 47188ecc943SGeorge Wilson * Retrieve the vdev creation time. 47288ecc943SGeorge Wilson */ 47388ecc943SGeorge Wilson (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, 47488ecc943SGeorge Wilson &vd->vdev_crtxg); 47588ecc943SGeorge Wilson 476fa9e4066Sahrens /* 477fa9e4066Sahrens * If we're a top-level vdev, try to load the allocation parameters. 478fa9e4066Sahrens */ 4791195e687SMark J Musante if (parent && !parent->vdev_parent && 4801195e687SMark J Musante (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { 481fa9e4066Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 482fa9e4066Sahrens &vd->vdev_ms_array); 483fa9e4066Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 484fa9e4066Sahrens &vd->vdev_ms_shift); 485fa9e4066Sahrens (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 486fa9e4066Sahrens &vd->vdev_asize); 4873f9d6ad7SLin Ling (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING, 4883f9d6ad7SLin Ling &vd->vdev_removing); 489fa9e4066Sahrens } 490fa9e4066Sahrens 491a1521560SJeff Bonwick if (parent && !parent->vdev_parent) { 492a1521560SJeff Bonwick ASSERT(alloctype == VDEV_ALLOC_LOAD || 4939f4ab4d8SGeorge Wilson alloctype == VDEV_ALLOC_ADD || 4941195e687SMark J Musante alloctype == VDEV_ALLOC_SPLIT || 4959f4ab4d8SGeorge Wilson alloctype == VDEV_ALLOC_ROOTPOOL); 496a1521560SJeff Bonwick vd->vdev_mg = metaslab_group_create(islog ? 497a1521560SJeff Bonwick spa_log_class(spa) : spa_normal_class(spa), vd); 498a1521560SJeff Bonwick } 499a1521560SJeff Bonwick 500fa9e4066Sahrens /* 5013d7072f8Seschrock * If we're a leaf vdev, try to load the DTL object and other state. 502fa9e4066Sahrens */ 503c5904d13Seschrock if (vd->vdev_ops->vdev_op_leaf && 50421ecdf64SLin Ling (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 50521ecdf64SLin Ling alloctype == VDEV_ALLOC_ROOTPOOL)) { 506c5904d13Seschrock if (alloctype == VDEV_ALLOC_LOAD) { 507c5904d13Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 5088ad4d6ddSJeff Bonwick &vd->vdev_dtl_smo.smo_object); 509c5904d13Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 510c5904d13Seschrock &vd->vdev_unspare); 511c5904d13Seschrock } 51221ecdf64SLin Ling 51321ecdf64SLin Ling if (alloctype == VDEV_ALLOC_ROOTPOOL) { 51421ecdf64SLin Ling uint64_t spare = 0; 51521ecdf64SLin Ling 51621ecdf64SLin Ling if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 51721ecdf64SLin Ling &spare) == 0 && spare) 51821ecdf64SLin Ling spa_spare_add(vd); 51921ecdf64SLin Ling } 52021ecdf64SLin Ling 521ecc2d604Sbonwick (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 522ecc2d604Sbonwick &vd->vdev_offline); 523c5904d13Seschrock 524cb04b873SMark J Musante (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVERING, 525cb04b873SMark J Musante &vd->vdev_resilvering); 526cb04b873SMark J Musante 5273d7072f8Seschrock /* 5283d7072f8Seschrock * When importing a pool, we want to ignore the persistent fault 5293d7072f8Seschrock * state, as the diagnosis made on another system may not be 530069f55e2SEric Schrock * valid in the current context. Local vdevs will 531069f55e2SEric Schrock * remain in the faulted state. 5323d7072f8Seschrock */ 533b16da2e2SGeorge Wilson if (spa_load_state(spa) == SPA_LOAD_OPEN) { 5343d7072f8Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 5353d7072f8Seschrock &vd->vdev_faulted); 5363d7072f8Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 5373d7072f8Seschrock &vd->vdev_degraded); 5383d7072f8Seschrock (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 5393d7072f8Seschrock &vd->vdev_removed); 540069f55e2SEric Schrock 541069f55e2SEric Schrock if (vd->vdev_faulted || vd->vdev_degraded) { 542069f55e2SEric Schrock char *aux; 543069f55e2SEric Schrock 544069f55e2SEric Schrock vd->vdev_label_aux = 545069f55e2SEric Schrock VDEV_AUX_ERR_EXCEEDED; 546069f55e2SEric Schrock if (nvlist_lookup_string(nv, 547069f55e2SEric Schrock ZPOOL_CONFIG_AUX_STATE, &aux) == 0 && 548069f55e2SEric Schrock strcmp(aux, "external") == 0) 549069f55e2SEric Schrock vd->vdev_label_aux = VDEV_AUX_EXTERNAL; 550069f55e2SEric Schrock } 5513d7072f8Seschrock } 552fa9e4066Sahrens } 553fa9e4066Sahrens 554fa9e4066Sahrens /* 555fa9e4066Sahrens * Add ourselves to the parent's list of children. 556fa9e4066Sahrens */ 557fa9e4066Sahrens vdev_add_child(parent, vd); 558fa9e4066Sahrens 55999653d4eSeschrock *vdp = vd; 56099653d4eSeschrock 56199653d4eSeschrock return (0); 562fa9e4066Sahrens } 563fa9e4066Sahrens 564fa9e4066Sahrens void 565fa9e4066Sahrens vdev_free(vdev_t *vd) 566fa9e4066Sahrens { 5673d7072f8Seschrock spa_t *spa = vd->vdev_spa; 568fa9e4066Sahrens 569fa9e4066Sahrens /* 570fa9e4066Sahrens * vdev_free() implies closing the vdev first. This is simpler than 571fa9e4066Sahrens * trying to ensure complicated semantics for all callers. 572fa9e4066Sahrens */ 573fa9e4066Sahrens vdev_close(vd); 574fa9e4066Sahrens 575e14bb325SJeff Bonwick ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 576b24ab676SJeff Bonwick ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 577fa9e4066Sahrens 578fa9e4066Sahrens /* 579fa9e4066Sahrens * Free all children. 580fa9e4066Sahrens */ 581573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 582fa9e4066Sahrens vdev_free(vd->vdev_child[c]); 583fa9e4066Sahrens 584fa9e4066Sahrens ASSERT(vd->vdev_child == NULL); 585fa9e4066Sahrens ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 586fa9e4066Sahrens 587fa9e4066Sahrens /* 588fa9e4066Sahrens * Discard allocation state. 589fa9e4066Sahrens */ 590a1521560SJeff Bonwick if (vd->vdev_mg != NULL) { 591fa9e4066Sahrens vdev_metaslab_fini(vd); 592a1521560SJeff Bonwick metaslab_group_destroy(vd->vdev_mg); 593a1521560SJeff Bonwick } 594fa9e4066Sahrens 595fa9e4066Sahrens ASSERT3U(vd->vdev_stat.vs_space, ==, 0); 59699653d4eSeschrock ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); 597fa9e4066Sahrens ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); 598fa9e4066Sahrens 599fa9e4066Sahrens /* 600fa9e4066Sahrens * Remove this vdev from its parent's child list. 601fa9e4066Sahrens */ 602fa9e4066Sahrens vdev_remove_child(vd->vdev_parent, vd); 603fa9e4066Sahrens 604fa9e4066Sahrens ASSERT(vd->vdev_parent == NULL); 605fa9e4066Sahrens 6063d7072f8Seschrock /* 6073d7072f8Seschrock * Clean up vdev structure. 6083d7072f8Seschrock */ 6093d7072f8Seschrock vdev_queue_fini(vd); 6103d7072f8Seschrock vdev_cache_fini(vd); 6113d7072f8Seschrock 6123d7072f8Seschrock if (vd->vdev_path) 6133d7072f8Seschrock spa_strfree(vd->vdev_path); 6143d7072f8Seschrock if (vd->vdev_devid) 6153d7072f8Seschrock spa_strfree(vd->vdev_devid); 6163d7072f8Seschrock if (vd->vdev_physpath) 6173d7072f8Seschrock spa_strfree(vd->vdev_physpath); 6186809eb4eSEric Schrock if (vd->vdev_fru) 6196809eb4eSEric Schrock spa_strfree(vd->vdev_fru); 6203d7072f8Seschrock 6213d7072f8Seschrock if (vd->vdev_isspare) 6223d7072f8Seschrock spa_spare_remove(vd); 623fa94a07fSbrendan if (vd->vdev_isl2cache) 624fa94a07fSbrendan spa_l2cache_remove(vd); 6253d7072f8Seschrock 6263d7072f8Seschrock txg_list_destroy(&vd->vdev_ms_list); 6273d7072f8Seschrock txg_list_destroy(&vd->vdev_dtl_list); 6288ad4d6ddSJeff Bonwick 6293d7072f8Seschrock mutex_enter(&vd->vdev_dtl_lock); 6308ad4d6ddSJeff Bonwick for (int t = 0; t < DTL_TYPES; t++) { 6318ad4d6ddSJeff Bonwick space_map_unload(&vd->vdev_dtl[t]); 6328ad4d6ddSJeff Bonwick space_map_destroy(&vd->vdev_dtl[t]); 6338ad4d6ddSJeff Bonwick } 6343d7072f8Seschrock mutex_exit(&vd->vdev_dtl_lock); 6358ad4d6ddSJeff Bonwick 6363d7072f8Seschrock mutex_destroy(&vd->vdev_dtl_lock); 6373d7072f8Seschrock mutex_destroy(&vd->vdev_stat_lock); 638e14bb325SJeff Bonwick mutex_destroy(&vd->vdev_probe_lock); 6393d7072f8Seschrock 6403d7072f8Seschrock if (vd == spa->spa_root_vdev) 6413d7072f8Seschrock spa->spa_root_vdev = NULL; 6423d7072f8Seschrock 6433d7072f8Seschrock kmem_free(vd, sizeof (vdev_t)); 644fa9e4066Sahrens } 645fa9e4066Sahrens 646fa9e4066Sahrens /* 647fa9e4066Sahrens * Transfer top-level vdev state from svd to tvd. 648fa9e4066Sahrens */ 649fa9e4066Sahrens static void 650fa9e4066Sahrens vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 651fa9e4066Sahrens { 652fa9e4066Sahrens spa_t *spa = svd->vdev_spa; 653fa9e4066Sahrens metaslab_t *msp; 654fa9e4066Sahrens vdev_t *vd; 655fa9e4066Sahrens int t; 656fa9e4066Sahrens 657fa9e4066Sahrens ASSERT(tvd == tvd->vdev_top); 658fa9e4066Sahrens 659fa9e4066Sahrens tvd->vdev_ms_array = svd->vdev_ms_array; 660fa9e4066Sahrens tvd->vdev_ms_shift = svd->vdev_ms_shift; 661fa9e4066Sahrens tvd->vdev_ms_count = svd->vdev_ms_count; 662fa9e4066Sahrens 663fa9e4066Sahrens svd->vdev_ms_array = 0; 664fa9e4066Sahrens svd->vdev_ms_shift = 0; 665fa9e4066Sahrens svd->vdev_ms_count = 0; 666fa9e4066Sahrens 667fa9e4066Sahrens tvd->vdev_mg = svd->vdev_mg; 668fa9e4066Sahrens tvd->vdev_ms = svd->vdev_ms; 669fa9e4066Sahrens 670fa9e4066Sahrens svd->vdev_mg = NULL; 671fa9e4066Sahrens svd->vdev_ms = NULL; 672ecc2d604Sbonwick 673ecc2d604Sbonwick if (tvd->vdev_mg != NULL) 674ecc2d604Sbonwick tvd->vdev_mg->mg_vd = tvd; 675fa9e4066Sahrens 676fa9e4066Sahrens tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 677fa9e4066Sahrens tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 67899653d4eSeschrock tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 679fa9e4066Sahrens 680fa9e4066Sahrens svd->vdev_stat.vs_alloc = 0; 681fa9e4066Sahrens svd->vdev_stat.vs_space = 0; 68299653d4eSeschrock svd->vdev_stat.vs_dspace = 0; 683fa9e4066Sahrens 684fa9e4066Sahrens for (t = 0; t < TXG_SIZE; t++) { 685fa9e4066Sahrens while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 686fa9e4066Sahrens (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 687fa9e4066Sahrens while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 688fa9e4066Sahrens (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 689fa9e4066Sahrens if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 690fa9e4066Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 691fa9e4066Sahrens } 692fa9e4066Sahrens 693e14bb325SJeff Bonwick if (list_link_active(&svd->vdev_config_dirty_node)) { 694fa9e4066Sahrens vdev_config_clean(svd); 695fa9e4066Sahrens vdev_config_dirty(tvd); 696fa9e4066Sahrens } 697fa9e4066Sahrens 698e14bb325SJeff Bonwick if (list_link_active(&svd->vdev_state_dirty_node)) { 699e14bb325SJeff Bonwick vdev_state_clean(svd); 700e14bb325SJeff Bonwick vdev_state_dirty(tvd); 701e14bb325SJeff Bonwick } 702e14bb325SJeff Bonwick 70399653d4eSeschrock tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 70499653d4eSeschrock svd->vdev_deflate_ratio = 0; 7058654d025Sperrin 7068654d025Sperrin tvd->vdev_islog = svd->vdev_islog; 7078654d025Sperrin svd->vdev_islog = 0; 708fa9e4066Sahrens } 709fa9e4066Sahrens 710fa9e4066Sahrens static void 711fa9e4066Sahrens vdev_top_update(vdev_t *tvd, vdev_t *vd) 712fa9e4066Sahrens { 713fa9e4066Sahrens if (vd == NULL) 714fa9e4066Sahrens return; 715fa9e4066Sahrens 716fa9e4066Sahrens vd->vdev_top = tvd; 717fa9e4066Sahrens 718573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 719fa9e4066Sahrens vdev_top_update(tvd, vd->vdev_child[c]); 720fa9e4066Sahrens } 721fa9e4066Sahrens 722fa9e4066Sahrens /* 723fa9e4066Sahrens * Add a mirror/replacing vdev above an existing vdev. 724fa9e4066Sahrens */ 725fa9e4066Sahrens vdev_t * 726fa9e4066Sahrens vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 727fa9e4066Sahrens { 728fa9e4066Sahrens spa_t *spa = cvd->vdev_spa; 729fa9e4066Sahrens vdev_t *pvd = cvd->vdev_parent; 730fa9e4066Sahrens vdev_t *mvd; 731fa9e4066Sahrens 732e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 733fa9e4066Sahrens 734fa9e4066Sahrens mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 735ecc2d604Sbonwick 736ecc2d604Sbonwick mvd->vdev_asize = cvd->vdev_asize; 737573ca77eSGeorge Wilson mvd->vdev_min_asize = cvd->vdev_min_asize; 738*4263d13fSGeorge Wilson mvd->vdev_max_asize = cvd->vdev_max_asize; 739ecc2d604Sbonwick mvd->vdev_ashift = cvd->vdev_ashift; 740ecc2d604Sbonwick mvd->vdev_state = cvd->vdev_state; 74188ecc943SGeorge Wilson mvd->vdev_crtxg = cvd->vdev_crtxg; 742ecc2d604Sbonwick 743fa9e4066Sahrens vdev_remove_child(pvd, cvd); 744fa9e4066Sahrens vdev_add_child(pvd, mvd); 745fa9e4066Sahrens cvd->vdev_id = mvd->vdev_children; 746fa9e4066Sahrens vdev_add_child(mvd, cvd); 747fa9e4066Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 748fa9e4066Sahrens 749fa9e4066Sahrens if (mvd == mvd->vdev_top) 750fa9e4066Sahrens vdev_top_transfer(cvd, mvd); 751fa9e4066Sahrens 752fa9e4066Sahrens return (mvd); 753fa9e4066Sahrens } 754fa9e4066Sahrens 755fa9e4066Sahrens /* 756fa9e4066Sahrens * Remove a 1-way mirror/replacing vdev from the tree. 757fa9e4066Sahrens */ 758fa9e4066Sahrens void 759fa9e4066Sahrens vdev_remove_parent(vdev_t *cvd) 760fa9e4066Sahrens { 761fa9e4066Sahrens vdev_t *mvd = cvd->vdev_parent; 762fa9e4066Sahrens vdev_t *pvd = mvd->vdev_parent; 763fa9e4066Sahrens 764e14bb325SJeff Bonwick ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 765fa9e4066Sahrens 766fa9e4066Sahrens ASSERT(mvd->vdev_children == 1); 767fa9e4066Sahrens ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 76899653d4eSeschrock mvd->vdev_ops == &vdev_replacing_ops || 76999653d4eSeschrock mvd->vdev_ops == &vdev_spare_ops); 770ecc2d604Sbonwick cvd->vdev_ashift = mvd->vdev_ashift; 771fa9e4066Sahrens 772fa9e4066Sahrens vdev_remove_child(mvd, cvd); 773fa9e4066Sahrens vdev_remove_child(pvd, mvd); 7748ad4d6ddSJeff Bonwick 77599653d4eSeschrock /* 776e14bb325SJeff Bonwick * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 777e14bb325SJeff Bonwick * Otherwise, we could have detached an offline device, and when we 778e14bb325SJeff Bonwick * go to import the pool we'll think we have two top-level vdevs, 779e14bb325SJeff Bonwick * instead of a different version of the same top-level vdev. 78099653d4eSeschrock */ 7818ad4d6ddSJeff Bonwick if (mvd->vdev_top == mvd) { 7828ad4d6ddSJeff Bonwick uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 7831195e687SMark J Musante cvd->vdev_orig_guid = cvd->vdev_guid; 7848ad4d6ddSJeff Bonwick cvd->vdev_guid += guid_delta; 7858ad4d6ddSJeff Bonwick cvd->vdev_guid_sum += guid_delta; 7868ad4d6ddSJeff Bonwick } 787e14bb325SJeff Bonwick cvd->vdev_id = mvd->vdev_id; 788e14bb325SJeff Bonwick vdev_add_child(pvd, cvd); 789fa9e4066Sahrens vdev_top_update(cvd->vdev_top, cvd->vdev_top); 790fa9e4066Sahrens 791fa9e4066Sahrens if (cvd == cvd->vdev_top) 792fa9e4066Sahrens vdev_top_transfer(mvd, cvd); 793fa9e4066Sahrens 794fa9e4066Sahrens ASSERT(mvd->vdev_children == 0); 795fa9e4066Sahrens vdev_free(mvd); 796fa9e4066Sahrens } 797fa9e4066Sahrens 798ea8dc4b6Seschrock int 799fa9e4066Sahrens vdev_metaslab_init(vdev_t *vd, uint64_t txg) 800fa9e4066Sahrens { 801fa9e4066Sahrens spa_t *spa = vd->vdev_spa; 802ecc2d604Sbonwick objset_t *mos = spa->spa_meta_objset; 803ecc2d604Sbonwick uint64_t m; 804fa9e4066Sahrens uint64_t oldc = vd->vdev_ms_count; 805fa9e4066Sahrens uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 806ecc2d604Sbonwick metaslab_t **mspp; 807ecc2d604Sbonwick int error; 808fa9e4066Sahrens 809a1521560SJeff Bonwick ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 810a1521560SJeff Bonwick 81188ecc943SGeorge Wilson /* 81288ecc943SGeorge Wilson * This vdev is not being allocated from yet or is a hole. 81388ecc943SGeorge Wilson */ 81488ecc943SGeorge Wilson if (vd->vdev_ms_shift == 0) 8150e34b6a7Sbonwick return (0); 8160e34b6a7Sbonwick 81788ecc943SGeorge Wilson ASSERT(!vd->vdev_ishole); 81888ecc943SGeorge Wilson 819e6ca193dSGeorge Wilson /* 820e6ca193dSGeorge Wilson * Compute the raidz-deflation ratio. Note, we hard-code 821e6ca193dSGeorge Wilson * in 128k (1 << 17) because it is the current "typical" blocksize. 822e6ca193dSGeorge Wilson * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change, 823e6ca193dSGeorge Wilson * or we will inconsistently account for existing bp's. 824e6ca193dSGeorge Wilson */ 825e6ca193dSGeorge Wilson vd->vdev_deflate_ratio = (1 << 17) / 826e6ca193dSGeorge Wilson (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 827e6ca193dSGeorge Wilson 828fa9e4066Sahrens ASSERT(oldc <= newc); 829fa9e4066Sahrens 830ecc2d604Sbonwick mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 831fa9e4066Sahrens 832ecc2d604Sbonwick if (oldc != 0) { 833ecc2d604Sbonwick bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 834ecc2d604Sbonwick kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 835ecc2d604Sbonwick } 836fa9e4066Sahrens 837ecc2d604Sbonwick vd->vdev_ms = mspp; 838ecc2d604Sbonwick vd->vdev_ms_count = newc; 839fa9e4066Sahrens 840ecc2d604Sbonwick for (m = oldc; m < newc; m++) { 841ecc2d604Sbonwick space_map_obj_t smo = { 0, 0, 0 }; 842ecc2d604Sbonwick if (txg == 0) { 843ecc2d604Sbonwick uint64_t object = 0; 844ecc2d604Sbonwick error = dmu_read(mos, vd->vdev_ms_array, 8457bfdf011SNeil Perrin m * sizeof (uint64_t), sizeof (uint64_t), &object, 8467bfdf011SNeil Perrin DMU_READ_PREFETCH); 847ecc2d604Sbonwick if (error) 848ecc2d604Sbonwick return (error); 849ecc2d604Sbonwick if (object != 0) { 850ecc2d604Sbonwick dmu_buf_t *db; 851ecc2d604Sbonwick error = dmu_bonus_hold(mos, object, FTAG, &db); 852ecc2d604Sbonwick if (error) 853ecc2d604Sbonwick return (error); 8541934e92fSmaybee ASSERT3U(db->db_size, >=, sizeof (smo)); 8551934e92fSmaybee bcopy(db->db_data, &smo, sizeof (smo)); 856ecc2d604Sbonwick ASSERT3U(smo.smo_object, ==, object); 857ea8dc4b6Seschrock dmu_buf_rele(db, FTAG); 858fa9e4066Sahrens } 859fa9e4066Sahrens } 860ecc2d604Sbonwick vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo, 861ecc2d604Sbonwick m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); 862fa9e4066Sahrens } 863fa9e4066Sahrens 864a1521560SJeff Bonwick if (txg == 0) 865a1521560SJeff Bonwick spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER); 866a1521560SJeff Bonwick 8673f9d6ad7SLin Ling /* 8683f9d6ad7SLin Ling * If the vdev is being removed we don't activate 8693f9d6ad7SLin Ling * the metaslabs since we want to ensure that no new 8703f9d6ad7SLin Ling * allocations are performed on this device. 8713f9d6ad7SLin Ling */ 8723f9d6ad7SLin Ling if (oldc == 0 && !vd->vdev_removing) 873a1521560SJeff Bonwick metaslab_group_activate(vd->vdev_mg); 874a1521560SJeff Bonwick 875a1521560SJeff Bonwick if (txg == 0) 876a1521560SJeff Bonwick spa_config_exit(spa, SCL_ALLOC, FTAG); 877a1521560SJeff Bonwick 878ea8dc4b6Seschrock return (0); 879fa9e4066Sahrens } 880fa9e4066Sahrens 881fa9e4066Sahrens void 882fa9e4066Sahrens vdev_metaslab_fini(vdev_t *vd) 883fa9e4066Sahrens { 884fa9e4066Sahrens uint64_t m; 885fa9e4066Sahrens uint64_t count = vd->vdev_ms_count; 886fa9e4066Sahrens 887fa9e4066Sahrens if (vd->vdev_ms != NULL) { 888a1521560SJeff Bonwick metaslab_group_passivate(vd->vdev_mg); 889fa9e4066Sahrens for (m = 0; m < count; m++) 890ecc2d604Sbonwick if (vd->vdev_ms[m] != NULL) 891ecc2d604Sbonwick metaslab_fini(vd->vdev_ms[m]); 892fa9e4066Sahrens kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 893fa9e4066Sahrens vd->vdev_ms = NULL; 894fa9e4066Sahrens } 895fa9e4066Sahrens } 896fa9e4066Sahrens 897e14bb325SJeff Bonwick typedef struct vdev_probe_stats { 898e14bb325SJeff Bonwick boolean_t vps_readable; 899e14bb325SJeff Bonwick boolean_t vps_writeable; 900e14bb325SJeff Bonwick int vps_flags; 901e14bb325SJeff Bonwick } vdev_probe_stats_t; 902e14bb325SJeff Bonwick 903e14bb325SJeff Bonwick static void 904e14bb325SJeff Bonwick vdev_probe_done(zio_t *zio) 9050a4e9518Sgw { 9068ad4d6ddSJeff Bonwick spa_t *spa = zio->io_spa; 907a3f829aeSBill Moore vdev_t *vd = zio->io_vd; 908e14bb325SJeff Bonwick vdev_probe_stats_t *vps = zio->io_private; 909a3f829aeSBill Moore 910a3f829aeSBill Moore ASSERT(vd->vdev_probe_zio != NULL); 911e14bb325SJeff Bonwick 912e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_READ) { 913e14bb325SJeff Bonwick if (zio->io_error == 0) 914e14bb325SJeff Bonwick vps->vps_readable = 1; 9158ad4d6ddSJeff Bonwick if (zio->io_error == 0 && spa_writeable(spa)) { 916a3f829aeSBill Moore zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 917e14bb325SJeff Bonwick zio->io_offset, zio->io_size, zio->io_data, 918e14bb325SJeff Bonwick ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 919e14bb325SJeff Bonwick ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 920e14bb325SJeff Bonwick } else { 921e14bb325SJeff Bonwick zio_buf_free(zio->io_data, zio->io_size); 922e14bb325SJeff Bonwick } 923e14bb325SJeff Bonwick } else if (zio->io_type == ZIO_TYPE_WRITE) { 924e14bb325SJeff Bonwick if (zio->io_error == 0) 925e14bb325SJeff Bonwick vps->vps_writeable = 1; 926e14bb325SJeff Bonwick zio_buf_free(zio->io_data, zio->io_size); 927e14bb325SJeff Bonwick } else if (zio->io_type == ZIO_TYPE_NULL) { 928a3f829aeSBill Moore zio_t *pio; 929e14bb325SJeff Bonwick 930e14bb325SJeff Bonwick vd->vdev_cant_read |= !vps->vps_readable; 931e14bb325SJeff Bonwick vd->vdev_cant_write |= !vps->vps_writeable; 932e14bb325SJeff Bonwick 933e14bb325SJeff Bonwick if (vdev_readable(vd) && 9348ad4d6ddSJeff Bonwick (vdev_writeable(vd) || !spa_writeable(spa))) { 935e14bb325SJeff Bonwick zio->io_error = 0; 936e14bb325SJeff Bonwick } else { 937e14bb325SJeff Bonwick ASSERT(zio->io_error != 0); 938e14bb325SJeff Bonwick zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 9398ad4d6ddSJeff Bonwick spa, vd, NULL, 0, 0); 940e14bb325SJeff Bonwick zio->io_error = ENXIO; 941e14bb325SJeff Bonwick } 942a3f829aeSBill Moore 943a3f829aeSBill Moore mutex_enter(&vd->vdev_probe_lock); 944a3f829aeSBill Moore ASSERT(vd->vdev_probe_zio == zio); 945a3f829aeSBill Moore vd->vdev_probe_zio = NULL; 946a3f829aeSBill Moore mutex_exit(&vd->vdev_probe_lock); 947a3f829aeSBill Moore 948a3f829aeSBill Moore while ((pio = zio_walk_parents(zio)) != NULL) 949a3f829aeSBill Moore if (!vdev_accessible(vd, pio)) 950a3f829aeSBill Moore pio->io_error = ENXIO; 951a3f829aeSBill Moore 952e14bb325SJeff Bonwick kmem_free(vps, sizeof (*vps)); 953e14bb325SJeff Bonwick } 954e14bb325SJeff Bonwick } 9550a4e9518Sgw 956e14bb325SJeff Bonwick /* 957e14bb325SJeff Bonwick * Determine whether this device is accessible by reading and writing 958e14bb325SJeff Bonwick * to several known locations: the pad regions of each vdev label 959e14bb325SJeff Bonwick * but the first (which we leave alone in case it contains a VTOC). 960e14bb325SJeff Bonwick */ 961e14bb325SJeff Bonwick zio_t * 962a3f829aeSBill Moore vdev_probe(vdev_t *vd, zio_t *zio) 963e14bb325SJeff Bonwick { 964e14bb325SJeff Bonwick spa_t *spa = vd->vdev_spa; 965a3f829aeSBill Moore vdev_probe_stats_t *vps = NULL; 966a3f829aeSBill Moore zio_t *pio; 967a3f829aeSBill Moore 968a3f829aeSBill Moore ASSERT(vd->vdev_ops->vdev_op_leaf); 9690a4e9518Sgw 970a3f829aeSBill Moore /* 971a3f829aeSBill Moore * Don't probe the probe. 972a3f829aeSBill Moore */ 973a3f829aeSBill Moore if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 974a3f829aeSBill Moore return (NULL); 975e14bb325SJeff Bonwick 976a3f829aeSBill Moore /* 977a3f829aeSBill Moore * To prevent 'probe storms' when a device fails, we create 978a3f829aeSBill Moore * just one probe i/o at a time. All zios that want to probe 979a3f829aeSBill Moore * this vdev will become parents of the probe io. 980a3f829aeSBill Moore */ 981a3f829aeSBill Moore mutex_enter(&vd->vdev_probe_lock); 982e14bb325SJeff Bonwick 983a3f829aeSBill Moore if ((pio = vd->vdev_probe_zio) == NULL) { 984a3f829aeSBill Moore vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 985a3f829aeSBill Moore 986a3f829aeSBill Moore vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 987a3f829aeSBill Moore ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 9888956713aSEric Schrock ZIO_FLAG_TRYHARD; 989a3f829aeSBill Moore 990a3f829aeSBill Moore if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 991a3f829aeSBill Moore /* 992a3f829aeSBill Moore * vdev_cant_read and vdev_cant_write can only 993a3f829aeSBill Moore * transition from TRUE to FALSE when we have the 994a3f829aeSBill Moore * SCL_ZIO lock as writer; otherwise they can only 995a3f829aeSBill Moore * transition from FALSE to TRUE. This ensures that 996a3f829aeSBill Moore * any zio looking at these values can assume that 997a3f829aeSBill Moore * failures persist for the life of the I/O. That's 998a3f829aeSBill Moore * important because when a device has intermittent 999a3f829aeSBill Moore * connectivity problems, we want to ensure that 1000a3f829aeSBill Moore * they're ascribed to the device (ENXIO) and not 1001a3f829aeSBill Moore * the zio (EIO). 1002a3f829aeSBill Moore * 1003a3f829aeSBill Moore * Since we hold SCL_ZIO as writer here, clear both 1004a3f829aeSBill Moore * values so the probe can reevaluate from first 1005a3f829aeSBill Moore * principles. 1006a3f829aeSBill Moore */ 1007a3f829aeSBill Moore vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 1008a3f829aeSBill Moore vd->vdev_cant_read = B_FALSE; 1009a3f829aeSBill Moore vd->vdev_cant_write = B_FALSE; 1010a3f829aeSBill Moore } 1011a3f829aeSBill Moore 1012a3f829aeSBill Moore vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 1013a3f829aeSBill Moore vdev_probe_done, vps, 1014a3f829aeSBill Moore vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 1015a3f829aeSBill Moore 101698d1cbfeSGeorge Wilson /* 101798d1cbfeSGeorge Wilson * We can't change the vdev state in this context, so we 101898d1cbfeSGeorge Wilson * kick off an async task to do it on our behalf. 101998d1cbfeSGeorge Wilson */ 1020a3f829aeSBill Moore if (zio != NULL) { 1021a3f829aeSBill Moore vd->vdev_probe_wanted = B_TRUE; 1022a3f829aeSBill Moore spa_async_request(spa, SPA_ASYNC_PROBE); 1023a3f829aeSBill Moore } 1024e14bb325SJeff Bonwick } 1025e14bb325SJeff Bonwick 1026a3f829aeSBill Moore if (zio != NULL) 1027a3f829aeSBill Moore zio_add_child(zio, pio); 1028e14bb325SJeff Bonwick 1029a3f829aeSBill Moore mutex_exit(&vd->vdev_probe_lock); 1030e14bb325SJeff Bonwick 1031a3f829aeSBill Moore if (vps == NULL) { 1032a3f829aeSBill Moore ASSERT(zio != NULL); 1033a3f829aeSBill Moore return (NULL); 1034a3f829aeSBill Moore } 1035e14bb325SJeff Bonwick 1036e14bb325SJeff Bonwick for (int l = 1; l < VDEV_LABELS; l++) { 1037a3f829aeSBill Moore zio_nowait(zio_read_phys(pio, vd, 1038e14bb325SJeff Bonwick vdev_label_offset(vd->vdev_psize, l, 1039f83ffe1aSLin Ling offsetof(vdev_label_t, vl_pad2)), 1040f83ffe1aSLin Ling VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE), 1041e14bb325SJeff Bonwick ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1042e14bb325SJeff Bonwick ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 1043e14bb325SJeff Bonwick } 1044e14bb325SJeff Bonwick 1045a3f829aeSBill Moore if (zio == NULL) 1046a3f829aeSBill Moore return (pio); 1047a3f829aeSBill Moore 1048a3f829aeSBill Moore zio_nowait(pio); 1049a3f829aeSBill Moore return (NULL); 10500a4e9518Sgw } 10510a4e9518Sgw 1052f64c0e34SEric Taylor static void 1053f64c0e34SEric Taylor vdev_open_child(void *arg) 1054f64c0e34SEric Taylor { 1055f64c0e34SEric Taylor vdev_t *vd = arg; 1056f64c0e34SEric Taylor 1057f64c0e34SEric Taylor vd->vdev_open_thread = curthread; 1058f64c0e34SEric Taylor vd->vdev_open_error = vdev_open(vd); 1059f64c0e34SEric Taylor vd->vdev_open_thread = NULL; 1060f64c0e34SEric Taylor } 1061f64c0e34SEric Taylor 1062681d9761SEric Taylor boolean_t 1063681d9761SEric Taylor vdev_uses_zvols(vdev_t *vd) 1064681d9761SEric Taylor { 1065681d9761SEric Taylor if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, 1066681d9761SEric Taylor strlen(ZVOL_DIR)) == 0) 1067681d9761SEric Taylor return (B_TRUE); 1068681d9761SEric Taylor for (int c = 0; c < vd->vdev_children; c++) 1069681d9761SEric Taylor if (vdev_uses_zvols(vd->vdev_child[c])) 1070681d9761SEric Taylor return (B_TRUE); 1071681d9761SEric Taylor return (B_FALSE); 1072681d9761SEric Taylor } 1073681d9761SEric Taylor 1074f64c0e34SEric Taylor void 1075f64c0e34SEric Taylor vdev_open_children(vdev_t *vd) 1076f64c0e34SEric Taylor { 1077f64c0e34SEric Taylor taskq_t *tq; 1078f64c0e34SEric Taylor int children = vd->vdev_children; 1079f64c0e34SEric Taylor 1080681d9761SEric Taylor /* 1081681d9761SEric Taylor * in order to handle pools on top of zvols, do the opens 1082681d9761SEric Taylor * in a single thread so that the same thread holds the 1083681d9761SEric Taylor * spa_namespace_lock 1084681d9761SEric Taylor */ 1085681d9761SEric Taylor if (vdev_uses_zvols(vd)) { 1086681d9761SEric Taylor for (int c = 0; c < children; c++) 1087681d9761SEric Taylor vd->vdev_child[c]->vdev_open_error = 1088681d9761SEric Taylor vdev_open(vd->vdev_child[c]); 1089681d9761SEric Taylor return; 1090681d9761SEric Taylor } 1091f64c0e34SEric Taylor tq = taskq_create("vdev_open", children, minclsyspri, 1092f64c0e34SEric Taylor children, children, TASKQ_PREPOPULATE); 1093f64c0e34SEric Taylor 1094f64c0e34SEric Taylor for (int c = 0; c < children; c++) 1095f64c0e34SEric Taylor VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], 1096f64c0e34SEric Taylor TQ_SLEEP) != NULL); 1097f64c0e34SEric Taylor 1098f64c0e34SEric Taylor taskq_destroy(tq); 1099f64c0e34SEric Taylor } 1100f64c0e34SEric Taylor 1101fa9e4066Sahrens /* 1102fa9e4066Sahrens * Prepare a virtual device for access. 1103fa9e4066Sahrens */ 1104fa9e4066Sahrens int 1105fa9e4066Sahrens vdev_open(vdev_t *vd) 1106fa9e4066Sahrens { 11078ad4d6ddSJeff Bonwick spa_t *spa = vd->vdev_spa; 1108fa9e4066Sahrens int error; 1109fa9e4066Sahrens uint64_t osize = 0; 1110*4263d13fSGeorge Wilson uint64_t max_osize = 0; 1111*4263d13fSGeorge Wilson uint64_t asize, max_asize, psize; 1112ecc2d604Sbonwick uint64_t ashift = 0; 1113fa9e4066Sahrens 1114f64c0e34SEric Taylor ASSERT(vd->vdev_open_thread == curthread || 1115f64c0e34SEric Taylor spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1116fa9e4066Sahrens ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1117fa9e4066Sahrens vd->vdev_state == VDEV_STATE_CANT_OPEN || 1118fa9e4066Sahrens vd->vdev_state == VDEV_STATE_OFFLINE); 1119fa9e4066Sahrens 1120fa9e4066Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1121e6ca193dSGeorge Wilson vd->vdev_cant_read = B_FALSE; 1122e6ca193dSGeorge Wilson vd->vdev_cant_write = B_FALSE; 1123573ca77eSGeorge Wilson vd->vdev_min_asize = vdev_get_min_asize(vd); 1124fa9e4066Sahrens 1125069f55e2SEric Schrock /* 1126069f55e2SEric Schrock * If this vdev is not removed, check its fault status. If it's 1127069f55e2SEric Schrock * faulted, bail out of the open. 1128069f55e2SEric Schrock */ 11293d7072f8Seschrock if (!vd->vdev_removed && vd->vdev_faulted) { 11303d7072f8Seschrock ASSERT(vd->vdev_children == 0); 1131069f55e2SEric Schrock ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1132069f55e2SEric Schrock vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 11333d7072f8Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1134069f55e2SEric Schrock vd->vdev_label_aux); 11353d7072f8Seschrock return (ENXIO); 11363d7072f8Seschrock } else if (vd->vdev_offline) { 1137fa9e4066Sahrens ASSERT(vd->vdev_children == 0); 1138ea8dc4b6Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1139fa9e4066Sahrens return (ENXIO); 1140fa9e4066Sahrens } 1141fa9e4066Sahrens 1142*4263d13fSGeorge Wilson error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift); 1143fa9e4066Sahrens 1144095bcd66SGeorge Wilson /* 1145095bcd66SGeorge Wilson * Reset the vdev_reopening flag so that we actually close 1146095bcd66SGeorge Wilson * the vdev on error. 1147095bcd66SGeorge Wilson */ 1148095bcd66SGeorge Wilson vd->vdev_reopening = B_FALSE; 1149ea8dc4b6Seschrock if (zio_injection_enabled && error == 0) 11508956713aSEric Schrock error = zio_handle_device_injection(vd, NULL, ENXIO); 1151ea8dc4b6Seschrock 1152fa9e4066Sahrens if (error) { 11533d7072f8Seschrock if (vd->vdev_removed && 11543d7072f8Seschrock vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 11553d7072f8Seschrock vd->vdev_removed = B_FALSE; 11563d7072f8Seschrock 1157ea8dc4b6Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1158fa9e4066Sahrens vd->vdev_stat.vs_aux); 1159fa9e4066Sahrens return (error); 1160fa9e4066Sahrens } 1161fa9e4066Sahrens 11623d7072f8Seschrock vd->vdev_removed = B_FALSE; 11633d7072f8Seschrock 1164096d22d4SEric Schrock /* 1165096d22d4SEric Schrock * Recheck the faulted flag now that we have confirmed that 1166096d22d4SEric Schrock * the vdev is accessible. If we're faulted, bail. 1167096d22d4SEric Schrock */ 1168096d22d4SEric Schrock if (vd->vdev_faulted) { 1169096d22d4SEric Schrock ASSERT(vd->vdev_children == 0); 1170096d22d4SEric Schrock ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1171096d22d4SEric Schrock vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1172096d22d4SEric Schrock vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1173096d22d4SEric Schrock vd->vdev_label_aux); 1174096d22d4SEric Schrock return (ENXIO); 1175096d22d4SEric Schrock } 1176096d22d4SEric Schrock 11773d7072f8Seschrock if (vd->vdev_degraded) { 11783d7072f8Seschrock ASSERT(vd->vdev_children == 0); 11793d7072f8Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 11803d7072f8Seschrock VDEV_AUX_ERR_EXCEEDED); 11813d7072f8Seschrock } else { 1182069f55e2SEric Schrock vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0); 11833d7072f8Seschrock } 1184fa9e4066Sahrens 118588ecc943SGeorge Wilson /* 118688ecc943SGeorge Wilson * For hole or missing vdevs we just return success. 118788ecc943SGeorge Wilson */ 118888ecc943SGeorge Wilson if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) 118988ecc943SGeorge Wilson return (0); 119088ecc943SGeorge Wilson 1191573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) { 1192ea8dc4b6Seschrock if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 1193ea8dc4b6Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1194ea8dc4b6Seschrock VDEV_AUX_NONE); 1195ea8dc4b6Seschrock break; 1196ea8dc4b6Seschrock } 1197573ca77eSGeorge Wilson } 1198fa9e4066Sahrens 1199fa9e4066Sahrens osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1200*4263d13fSGeorge Wilson max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t)); 1201fa9e4066Sahrens 1202fa9e4066Sahrens if (vd->vdev_children == 0) { 1203fa9e4066Sahrens if (osize < SPA_MINDEVSIZE) { 1204ea8dc4b6Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1205ea8dc4b6Seschrock VDEV_AUX_TOO_SMALL); 1206fa9e4066Sahrens return (EOVERFLOW); 1207fa9e4066Sahrens } 1208fa9e4066Sahrens psize = osize; 1209fa9e4066Sahrens asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1210*4263d13fSGeorge Wilson max_asize = max_osize - (VDEV_LABEL_START_SIZE + 1211*4263d13fSGeorge Wilson VDEV_LABEL_END_SIZE); 1212fa9e4066Sahrens } else { 1213ecc2d604Sbonwick if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1214fa9e4066Sahrens (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 1215ea8dc4b6Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1216ea8dc4b6Seschrock VDEV_AUX_TOO_SMALL); 1217fa9e4066Sahrens return (EOVERFLOW); 1218fa9e4066Sahrens } 1219fa9e4066Sahrens psize = 0; 1220fa9e4066Sahrens asize = osize; 1221*4263d13fSGeorge Wilson max_asize = max_osize; 1222fa9e4066Sahrens } 1223fa9e4066Sahrens 1224fa9e4066Sahrens vd->vdev_psize = psize; 1225fa9e4066Sahrens 1226573ca77eSGeorge Wilson /* 1227573ca77eSGeorge Wilson * Make sure the allocatable size hasn't shrunk. 1228573ca77eSGeorge Wilson */ 1229573ca77eSGeorge Wilson if (asize < vd->vdev_min_asize) { 1230573ca77eSGeorge Wilson vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1231573ca77eSGeorge Wilson VDEV_AUX_BAD_LABEL); 1232573ca77eSGeorge Wilson return (EINVAL); 1233573ca77eSGeorge Wilson } 1234573ca77eSGeorge Wilson 1235fa9e4066Sahrens if (vd->vdev_asize == 0) { 1236fa9e4066Sahrens /* 1237fa9e4066Sahrens * This is the first-ever open, so use the computed values. 1238ecc2d604Sbonwick * For testing purposes, a higher ashift can be requested. 1239fa9e4066Sahrens */ 1240fa9e4066Sahrens vd->vdev_asize = asize; 1241*4263d13fSGeorge Wilson vd->vdev_max_asize = max_asize; 1242ecc2d604Sbonwick vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 1243fa9e4066Sahrens } else { 1244fa9e4066Sahrens /* 1245fa9e4066Sahrens * Make sure the alignment requirement hasn't increased. 1246fa9e4066Sahrens */ 1247ecc2d604Sbonwick if (ashift > vd->vdev_top->vdev_ashift) { 1248ea8dc4b6Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1249ea8dc4b6Seschrock VDEV_AUX_BAD_LABEL); 1250fa9e4066Sahrens return (EINVAL); 1251fa9e4066Sahrens } 1252*4263d13fSGeorge Wilson vd->vdev_max_asize = max_asize; 1253573ca77eSGeorge Wilson } 1254fa9e4066Sahrens 1255573ca77eSGeorge Wilson /* 1256573ca77eSGeorge Wilson * If all children are healthy and the asize has increased, 1257573ca77eSGeorge Wilson * then we've experienced dynamic LUN growth. If automatic 1258573ca77eSGeorge Wilson * expansion is enabled then use the additional space. 1259573ca77eSGeorge Wilson */ 1260573ca77eSGeorge Wilson if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize && 1261573ca77eSGeorge Wilson (vd->vdev_expanding || spa->spa_autoexpand)) 1262573ca77eSGeorge Wilson vd->vdev_asize = asize; 1263fa9e4066Sahrens 1264573ca77eSGeorge Wilson vdev_set_min_asize(vd); 1265fa9e4066Sahrens 12660a4e9518Sgw /* 12670a4e9518Sgw * Ensure we can issue some IO before declaring the 12680a4e9518Sgw * vdev open for business. 12690a4e9518Sgw */ 1270e14bb325SJeff Bonwick if (vd->vdev_ops->vdev_op_leaf && 1271e14bb325SJeff Bonwick (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 127298d1cbfeSGeorge Wilson vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 127398d1cbfeSGeorge Wilson VDEV_AUX_ERR_EXCEEDED); 12740a4e9518Sgw return (error); 12750a4e9518Sgw } 12760a4e9518Sgw 1277088f3894Sahrens /* 1278088f3894Sahrens * If a leaf vdev has a DTL, and seems healthy, then kick off a 12798ad4d6ddSJeff Bonwick * resilver. But don't do this if we are doing a reopen for a scrub, 12808ad4d6ddSJeff Bonwick * since this would just restart the scrub we are already doing. 1281088f3894Sahrens */ 12828ad4d6ddSJeff Bonwick if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 12838ad4d6ddSJeff Bonwick vdev_resilver_needed(vd, NULL, NULL)) 12848ad4d6ddSJeff Bonwick spa_async_request(spa, SPA_ASYNC_RESILVER); 1285088f3894Sahrens 1286fa9e4066Sahrens return (0); 1287fa9e4066Sahrens } 1288fa9e4066Sahrens 1289560e6e96Seschrock /* 1290560e6e96Seschrock * Called once the vdevs are all opened, this routine validates the label 1291560e6e96Seschrock * contents. This needs to be done before vdev_load() so that we don't 12923d7072f8Seschrock * inadvertently do repair I/Os to the wrong device. 1293560e6e96Seschrock * 1294560e6e96Seschrock * This function will only return failure if one of the vdevs indicates that it 1295560e6e96Seschrock * has since been destroyed or exported. This is only possible if 1296560e6e96Seschrock * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 1297560e6e96Seschrock * will be updated but the function will return 0. 1298560e6e96Seschrock */ 1299560e6e96Seschrock int 1300560e6e96Seschrock vdev_validate(vdev_t *vd) 1301560e6e96Seschrock { 1302560e6e96Seschrock spa_t *spa = vd->vdev_spa; 1303560e6e96Seschrock nvlist_t *label; 13041195e687SMark J Musante uint64_t guid = 0, top_guid; 1305560e6e96Seschrock uint64_t state; 1306560e6e96Seschrock 1307573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 1308560e6e96Seschrock if (vdev_validate(vd->vdev_child[c]) != 0) 13090bf246f5Smc return (EBADF); 1310560e6e96Seschrock 1311b5989ec7Seschrock /* 1312b5989ec7Seschrock * If the device has already failed, or was marked offline, don't do 1313b5989ec7Seschrock * any further validation. Otherwise, label I/O will fail and we will 1314b5989ec7Seschrock * overwrite the previous state. 1315b5989ec7Seschrock */ 1316e14bb325SJeff Bonwick if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { 13171195e687SMark J Musante uint64_t aux_guid = 0; 13181195e687SMark J Musante nvlist_t *nvl; 1319560e6e96Seschrock 1320560e6e96Seschrock if ((label = vdev_label_read_config(vd)) == NULL) { 1321560e6e96Seschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1322560e6e96Seschrock VDEV_AUX_BAD_LABEL); 1323560e6e96Seschrock return (0); 1324560e6e96Seschrock } 1325560e6e96Seschrock 13261195e687SMark J Musante /* 13271195e687SMark J Musante * Determine if this vdev has been split off into another 13281195e687SMark J Musante * pool. If so, then refuse to open it. 13291195e687SMark J Musante */ 13301195e687SMark J Musante if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID, 13311195e687SMark J Musante &aux_guid) == 0 && aux_guid == spa_guid(spa)) { 13321195e687SMark J Musante vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 13331195e687SMark J Musante VDEV_AUX_SPLIT_POOL); 13341195e687SMark J Musante nvlist_free(label); 13351195e687SMark J Musante return (0); 13361195e687SMark J Musante } 13371195e687SMark J Musante 1338560e6e96Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 1339560e6e96Seschrock &guid) != 0 || guid != spa_guid(spa)) { 1340560e6e96Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1341560e6e96Seschrock VDEV_AUX_CORRUPT_DATA); 1342560e6e96Seschrock nvlist_free(label); 1343560e6e96Seschrock return (0); 1344560e6e96Seschrock } 1345560e6e96Seschrock 13461195e687SMark J Musante if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl) 13471195e687SMark J Musante != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID, 13481195e687SMark J Musante &aux_guid) != 0) 13491195e687SMark J Musante aux_guid = 0; 13501195e687SMark J Musante 1351e14bb325SJeff Bonwick /* 1352e14bb325SJeff Bonwick * If this vdev just became a top-level vdev because its 1353e14bb325SJeff Bonwick * sibling was detached, it will have adopted the parent's 1354e14bb325SJeff Bonwick * vdev guid -- but the label may or may not be on disk yet. 1355e14bb325SJeff Bonwick * Fortunately, either version of the label will have the 1356e14bb325SJeff Bonwick * same top guid, so if we're a top-level vdev, we can 1357e14bb325SJeff Bonwick * safely compare to that instead. 13581195e687SMark J Musante * 13591195e687SMark J Musante * If we split this vdev off instead, then we also check the 13601195e687SMark J Musante * original pool's guid. We don't want to consider the vdev 13611195e687SMark J Musante * corrupt if it is partway through a split operation. 1362e14bb325SJeff Bonwick */ 1363560e6e96Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 1364e14bb325SJeff Bonwick &guid) != 0 || 1365e14bb325SJeff Bonwick nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, 1366e14bb325SJeff Bonwick &top_guid) != 0 || 13671195e687SMark J Musante ((vd->vdev_guid != guid && vd->vdev_guid != aux_guid) && 1368e14bb325SJeff Bonwick (vd->vdev_guid != top_guid || vd != vd->vdev_top))) { 1369560e6e96Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1370560e6e96Seschrock VDEV_AUX_CORRUPT_DATA); 1371560e6e96Seschrock nvlist_free(label); 1372560e6e96Seschrock return (0); 1373560e6e96Seschrock } 1374560e6e96Seschrock 1375560e6e96Seschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 1376560e6e96Seschrock &state) != 0) { 1377560e6e96Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1378560e6e96Seschrock VDEV_AUX_CORRUPT_DATA); 1379560e6e96Seschrock nvlist_free(label); 1380560e6e96Seschrock return (0); 1381560e6e96Seschrock } 1382560e6e96Seschrock 1383560e6e96Seschrock nvlist_free(label); 1384560e6e96Seschrock 1385bc758434SLin Ling /* 13864b964adaSGeorge Wilson * If this is a verbatim import, no need to check the 1387bc758434SLin Ling * state of the pool. 1388bc758434SLin Ling */ 13894b964adaSGeorge Wilson if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) && 1390b16da2e2SGeorge Wilson spa_load_state(spa) == SPA_LOAD_OPEN && 1391bc758434SLin Ling state != POOL_STATE_ACTIVE) 13920bf246f5Smc return (EBADF); 1393560e6e96Seschrock 139451ece835Seschrock /* 139551ece835Seschrock * If we were able to open and validate a vdev that was 139651ece835Seschrock * previously marked permanently unavailable, clear that state 139751ece835Seschrock * now. 139851ece835Seschrock */ 139951ece835Seschrock if (vd->vdev_not_present) 140051ece835Seschrock vd->vdev_not_present = 0; 140151ece835Seschrock } 1402560e6e96Seschrock 1403560e6e96Seschrock return (0); 1404560e6e96Seschrock } 1405560e6e96Seschrock 1406fa9e4066Sahrens /* 1407fa9e4066Sahrens * Close a virtual device. 1408fa9e4066Sahrens */ 1409fa9e4066Sahrens void 1410fa9e4066Sahrens vdev_close(vdev_t *vd) 1411fa9e4066Sahrens { 14128ad4d6ddSJeff Bonwick spa_t *spa = vd->vdev_spa; 1413095bcd66SGeorge Wilson vdev_t *pvd = vd->vdev_parent; 14148ad4d6ddSJeff Bonwick 14158ad4d6ddSJeff Bonwick ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 14168ad4d6ddSJeff Bonwick 14171195e687SMark J Musante /* 14181195e687SMark J Musante * If our parent is reopening, then we are as well, unless we are 14191195e687SMark J Musante * going offline. 14201195e687SMark J Musante */ 1421095bcd66SGeorge Wilson if (pvd != NULL && pvd->vdev_reopening) 14221195e687SMark J Musante vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline); 1423095bcd66SGeorge Wilson 1424fa9e4066Sahrens vd->vdev_ops->vdev_op_close(vd); 1425fa9e4066Sahrens 14263d7072f8Seschrock vdev_cache_purge(vd); 1427fa9e4066Sahrens 1428560e6e96Seschrock /* 1429573ca77eSGeorge Wilson * We record the previous state before we close it, so that if we are 1430560e6e96Seschrock * doing a reopen(), we don't generate FMA ereports if we notice that 1431560e6e96Seschrock * it's still faulted. 1432560e6e96Seschrock */ 1433560e6e96Seschrock vd->vdev_prevstate = vd->vdev_state; 1434560e6e96Seschrock 1435fa9e4066Sahrens if (vd->vdev_offline) 1436fa9e4066Sahrens vd->vdev_state = VDEV_STATE_OFFLINE; 1437fa9e4066Sahrens else 1438fa9e4066Sahrens vd->vdev_state = VDEV_STATE_CLOSED; 1439ea8dc4b6Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1440fa9e4066Sahrens } 1441fa9e4066Sahrens 1442dcba9f3fSGeorge Wilson void 1443dcba9f3fSGeorge Wilson vdev_hold(vdev_t *vd) 1444dcba9f3fSGeorge Wilson { 1445dcba9f3fSGeorge Wilson spa_t *spa = vd->vdev_spa; 1446dcba9f3fSGeorge Wilson 1447dcba9f3fSGeorge Wilson ASSERT(spa_is_root(spa)); 1448dcba9f3fSGeorge Wilson if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1449dcba9f3fSGeorge Wilson return; 1450dcba9f3fSGeorge Wilson 1451dcba9f3fSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 1452dcba9f3fSGeorge Wilson vdev_hold(vd->vdev_child[c]); 1453dcba9f3fSGeorge Wilson 1454dcba9f3fSGeorge Wilson if (vd->vdev_ops->vdev_op_leaf) 1455dcba9f3fSGeorge Wilson vd->vdev_ops->vdev_op_hold(vd); 1456dcba9f3fSGeorge Wilson } 1457dcba9f3fSGeorge Wilson 1458dcba9f3fSGeorge Wilson void 1459dcba9f3fSGeorge Wilson vdev_rele(vdev_t *vd) 1460dcba9f3fSGeorge Wilson { 1461dcba9f3fSGeorge Wilson spa_t *spa = vd->vdev_spa; 1462dcba9f3fSGeorge Wilson 1463dcba9f3fSGeorge Wilson ASSERT(spa_is_root(spa)); 1464dcba9f3fSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 1465dcba9f3fSGeorge Wilson vdev_rele(vd->vdev_child[c]); 1466dcba9f3fSGeorge Wilson 1467dcba9f3fSGeorge Wilson if (vd->vdev_ops->vdev_op_leaf) 1468dcba9f3fSGeorge Wilson vd->vdev_ops->vdev_op_rele(vd); 1469dcba9f3fSGeorge Wilson } 1470dcba9f3fSGeorge Wilson 1471095bcd66SGeorge Wilson /* 1472095bcd66SGeorge Wilson * Reopen all interior vdevs and any unopened leaves. We don't actually 1473095bcd66SGeorge Wilson * reopen leaf vdevs which had previously been opened as they might deadlock 1474095bcd66SGeorge Wilson * on the spa_config_lock. Instead we only obtain the leaf's physical size. 1475095bcd66SGeorge Wilson * If the leaf has never been opened then open it, as usual. 1476095bcd66SGeorge Wilson */ 1477fa9e4066Sahrens void 1478ea8dc4b6Seschrock vdev_reopen(vdev_t *vd) 1479fa9e4066Sahrens { 1480ea8dc4b6Seschrock spa_t *spa = vd->vdev_spa; 1481fa9e4066Sahrens 1482e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1483ea8dc4b6Seschrock 14841195e687SMark J Musante /* set the reopening flag unless we're taking the vdev offline */ 14851195e687SMark J Musante vd->vdev_reopening = !vd->vdev_offline; 1486fa9e4066Sahrens vdev_close(vd); 1487fa9e4066Sahrens (void) vdev_open(vd); 1488fa9e4066Sahrens 148939c23413Seschrock /* 149039c23413Seschrock * Call vdev_validate() here to make sure we have the same device. 149139c23413Seschrock * Otherwise, a device with an invalid label could be successfully 149239c23413Seschrock * opened in response to vdev_reopen(). 149339c23413Seschrock */ 1494c5904d13Seschrock if (vd->vdev_aux) { 1495c5904d13Seschrock (void) vdev_validate_aux(vd); 1496e14bb325SJeff Bonwick if (vdev_readable(vd) && vdev_writeable(vd) && 14976809eb4eSEric Schrock vd->vdev_aux == &spa->spa_l2cache && 1498573ca77eSGeorge Wilson !l2arc_vdev_present(vd)) 1499573ca77eSGeorge Wilson l2arc_add_vdev(spa, vd); 1500c5904d13Seschrock } else { 1501c5904d13Seschrock (void) vdev_validate(vd); 1502c5904d13Seschrock } 150339c23413Seschrock 1504fa9e4066Sahrens /* 15053d7072f8Seschrock * Reassess parent vdev's health. 1506fa9e4066Sahrens */ 15073d7072f8Seschrock vdev_propagate_state(vd); 1508fa9e4066Sahrens } 1509fa9e4066Sahrens 1510fa9e4066Sahrens int 151199653d4eSeschrock vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 1512fa9e4066Sahrens { 1513fa9e4066Sahrens int error; 1514fa9e4066Sahrens 1515fa9e4066Sahrens /* 1516fa9e4066Sahrens * Normally, partial opens (e.g. of a mirror) are allowed. 1517fa9e4066Sahrens * For a create, however, we want to fail the request if 1518fa9e4066Sahrens * there are any components we can't open. 1519fa9e4066Sahrens */ 1520fa9e4066Sahrens error = vdev_open(vd); 1521fa9e4066Sahrens 1522fa9e4066Sahrens if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 1523fa9e4066Sahrens vdev_close(vd); 1524fa9e4066Sahrens return (error ? error : ENXIO); 1525fa9e4066Sahrens } 1526fa9e4066Sahrens 1527fa9e4066Sahrens /* 1528fa9e4066Sahrens * Recursively initialize all labels. 1529fa9e4066Sahrens */ 153039c23413Seschrock if ((error = vdev_label_init(vd, txg, isreplacing ? 153139c23413Seschrock VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 1532fa9e4066Sahrens vdev_close(vd); 1533fa9e4066Sahrens return (error); 1534fa9e4066Sahrens } 1535fa9e4066Sahrens 1536fa9e4066Sahrens return (0); 1537fa9e4066Sahrens } 1538fa9e4066Sahrens 15390e34b6a7Sbonwick void 1540573ca77eSGeorge Wilson vdev_metaslab_set_size(vdev_t *vd) 1541fa9e4066Sahrens { 1542fa9e4066Sahrens /* 1543fa9e4066Sahrens * Aim for roughly 200 metaslabs per vdev. 1544fa9e4066Sahrens */ 1545fa9e4066Sahrens vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); 1546fa9e4066Sahrens vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); 1547fa9e4066Sahrens } 1548fa9e4066Sahrens 1549fa9e4066Sahrens void 1550ecc2d604Sbonwick vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 1551fa9e4066Sahrens { 1552ecc2d604Sbonwick ASSERT(vd == vd->vdev_top); 155388ecc943SGeorge Wilson ASSERT(!vd->vdev_ishole); 1554ecc2d604Sbonwick ASSERT(ISP2(flags)); 1555f9af39baSGeorge Wilson ASSERT(spa_writeable(vd->vdev_spa)); 1556fa9e4066Sahrens 1557ecc2d604Sbonwick if (flags & VDD_METASLAB) 1558ecc2d604Sbonwick (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 1559ecc2d604Sbonwick 1560ecc2d604Sbonwick if (flags & VDD_DTL) 1561ecc2d604Sbonwick (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 1562ecc2d604Sbonwick 1563ecc2d604Sbonwick (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 1564fa9e4066Sahrens } 1565fa9e4066Sahrens 15668ad4d6ddSJeff Bonwick /* 15678ad4d6ddSJeff Bonwick * DTLs. 15688ad4d6ddSJeff Bonwick * 15698ad4d6ddSJeff Bonwick * A vdev's DTL (dirty time log) is the set of transaction groups for which 15709fb35debSEric Taylor * the vdev has less than perfect replication. There are four kinds of DTL: 15718ad4d6ddSJeff Bonwick * 15728ad4d6ddSJeff Bonwick * DTL_MISSING: txgs for which the vdev has no valid copies of the data 15738ad4d6ddSJeff Bonwick * 15748ad4d6ddSJeff Bonwick * DTL_PARTIAL: txgs for which data is available, but not fully replicated 15758ad4d6ddSJeff Bonwick * 15768ad4d6ddSJeff Bonwick * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 15778ad4d6ddSJeff Bonwick * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 15788ad4d6ddSJeff Bonwick * txgs that was scrubbed. 15798ad4d6ddSJeff Bonwick * 15808ad4d6ddSJeff Bonwick * DTL_OUTAGE: txgs which cannot currently be read, whether due to 15818ad4d6ddSJeff Bonwick * persistent errors or just some device being offline. 15828ad4d6ddSJeff Bonwick * Unlike the other three, the DTL_OUTAGE map is not generally 15838ad4d6ddSJeff Bonwick * maintained; it's only computed when needed, typically to 15848ad4d6ddSJeff Bonwick * determine whether a device can be detached. 15858ad4d6ddSJeff Bonwick * 15868ad4d6ddSJeff Bonwick * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 15878ad4d6ddSJeff Bonwick * either has the data or it doesn't. 15888ad4d6ddSJeff Bonwick * 15898ad4d6ddSJeff Bonwick * For interior vdevs such as mirror and RAID-Z the picture is more complex. 15908ad4d6ddSJeff Bonwick * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 15918ad4d6ddSJeff Bonwick * if any child is less than fully replicated, then so is its parent. 15928ad4d6ddSJeff Bonwick * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 15938ad4d6ddSJeff Bonwick * comprising only those txgs which appear in 'maxfaults' or more children; 15948ad4d6ddSJeff Bonwick * those are the txgs we don't have enough replication to read. For example, 15958ad4d6ddSJeff Bonwick * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 15968ad4d6ddSJeff Bonwick * thus, its DTL_MISSING consists of the set of txgs that appear in more than 15978ad4d6ddSJeff Bonwick * two child DTL_MISSING maps. 15988ad4d6ddSJeff Bonwick * 15998ad4d6ddSJeff Bonwick * It should be clear from the above that to compute the DTLs and outage maps 16008ad4d6ddSJeff Bonwick * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 16018ad4d6ddSJeff Bonwick * Therefore, that is all we keep on disk. When loading the pool, or after 16028ad4d6ddSJeff Bonwick * a configuration change, we generate all other DTLs from first principles. 16038ad4d6ddSJeff Bonwick */ 1604fa9e4066Sahrens void 16058ad4d6ddSJeff Bonwick vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1606fa9e4066Sahrens { 16078ad4d6ddSJeff Bonwick space_map_t *sm = &vd->vdev_dtl[t]; 16088ad4d6ddSJeff Bonwick 16098ad4d6ddSJeff Bonwick ASSERT(t < DTL_TYPES); 16108ad4d6ddSJeff Bonwick ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1611f9af39baSGeorge Wilson ASSERT(spa_writeable(vd->vdev_spa)); 16128ad4d6ddSJeff Bonwick 1613fa9e4066Sahrens mutex_enter(sm->sm_lock); 1614fa9e4066Sahrens if (!space_map_contains(sm, txg, size)) 1615fa9e4066Sahrens space_map_add(sm, txg, size); 1616fa9e4066Sahrens mutex_exit(sm->sm_lock); 1617fa9e4066Sahrens } 1618fa9e4066Sahrens 16198ad4d6ddSJeff Bonwick boolean_t 16208ad4d6ddSJeff Bonwick vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1621fa9e4066Sahrens { 16228ad4d6ddSJeff Bonwick space_map_t *sm = &vd->vdev_dtl[t]; 16238ad4d6ddSJeff Bonwick boolean_t dirty = B_FALSE; 1624fa9e4066Sahrens 16258ad4d6ddSJeff Bonwick ASSERT(t < DTL_TYPES); 16268ad4d6ddSJeff Bonwick ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1627fa9e4066Sahrens 1628fa9e4066Sahrens mutex_enter(sm->sm_lock); 16298ad4d6ddSJeff Bonwick if (sm->sm_space != 0) 16308ad4d6ddSJeff Bonwick dirty = space_map_contains(sm, txg, size); 1631fa9e4066Sahrens mutex_exit(sm->sm_lock); 1632fa9e4066Sahrens 1633fa9e4066Sahrens return (dirty); 1634fa9e4066Sahrens } 1635fa9e4066Sahrens 16368ad4d6ddSJeff Bonwick boolean_t 16378ad4d6ddSJeff Bonwick vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 16388ad4d6ddSJeff Bonwick { 16398ad4d6ddSJeff Bonwick space_map_t *sm = &vd->vdev_dtl[t]; 16408ad4d6ddSJeff Bonwick boolean_t empty; 16418ad4d6ddSJeff Bonwick 16428ad4d6ddSJeff Bonwick mutex_enter(sm->sm_lock); 16438ad4d6ddSJeff Bonwick empty = (sm->sm_space == 0); 16448ad4d6ddSJeff Bonwick mutex_exit(sm->sm_lock); 16458ad4d6ddSJeff Bonwick 16468ad4d6ddSJeff Bonwick return (empty); 16478ad4d6ddSJeff Bonwick } 16488ad4d6ddSJeff Bonwick 1649fa9e4066Sahrens /* 1650fa9e4066Sahrens * Reassess DTLs after a config change or scrub completion. 1651fa9e4066Sahrens */ 1652fa9e4066Sahrens void 1653fa9e4066Sahrens vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 1654fa9e4066Sahrens { 1655ea8dc4b6Seschrock spa_t *spa = vd->vdev_spa; 16568ad4d6ddSJeff Bonwick avl_tree_t reftree; 16578ad4d6ddSJeff Bonwick int minref; 1658fa9e4066Sahrens 16598ad4d6ddSJeff Bonwick ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1660fa9e4066Sahrens 16618ad4d6ddSJeff Bonwick for (int c = 0; c < vd->vdev_children; c++) 16628ad4d6ddSJeff Bonwick vdev_dtl_reassess(vd->vdev_child[c], txg, 16638ad4d6ddSJeff Bonwick scrub_txg, scrub_done); 16648ad4d6ddSJeff Bonwick 1665b24ab676SJeff Bonwick if (vd == spa->spa_root_vdev || vd->vdev_ishole || vd->vdev_aux) 16668ad4d6ddSJeff Bonwick return; 16678ad4d6ddSJeff Bonwick 16688ad4d6ddSJeff Bonwick if (vd->vdev_ops->vdev_op_leaf) { 16693f9d6ad7SLin Ling dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 16703f9d6ad7SLin Ling 1671fa9e4066Sahrens mutex_enter(&vd->vdev_dtl_lock); 1672088f3894Sahrens if (scrub_txg != 0 && 16733f9d6ad7SLin Ling (spa->spa_scrub_started || 16743f9d6ad7SLin Ling (scn && scn->scn_phys.scn_errors == 0))) { 1675088f3894Sahrens /* 1676088f3894Sahrens * We completed a scrub up to scrub_txg. If we 1677088f3894Sahrens * did it without rebooting, then the scrub dtl 1678088f3894Sahrens * will be valid, so excise the old region and 1679088f3894Sahrens * fold in the scrub dtl. Otherwise, leave the 1680088f3894Sahrens * dtl as-is if there was an error. 16818ad4d6ddSJeff Bonwick * 16828ad4d6ddSJeff Bonwick * There's little trick here: to excise the beginning 16838ad4d6ddSJeff Bonwick * of the DTL_MISSING map, we put it into a reference 16848ad4d6ddSJeff Bonwick * tree and then add a segment with refcnt -1 that 16858ad4d6ddSJeff Bonwick * covers the range [0, scrub_txg). This means 16868ad4d6ddSJeff Bonwick * that each txg in that range has refcnt -1 or 0. 16878ad4d6ddSJeff Bonwick * We then add DTL_SCRUB with a refcnt of 2, so that 16888ad4d6ddSJeff Bonwick * entries in the range [0, scrub_txg) will have a 16898ad4d6ddSJeff Bonwick * positive refcnt -- either 1 or 2. We then convert 16908ad4d6ddSJeff Bonwick * the reference tree into the new DTL_MISSING map. 1691088f3894Sahrens */ 16928ad4d6ddSJeff Bonwick space_map_ref_create(&reftree); 16938ad4d6ddSJeff Bonwick space_map_ref_add_map(&reftree, 16948ad4d6ddSJeff Bonwick &vd->vdev_dtl[DTL_MISSING], 1); 16958ad4d6ddSJeff Bonwick space_map_ref_add_seg(&reftree, 0, scrub_txg, -1); 16968ad4d6ddSJeff Bonwick space_map_ref_add_map(&reftree, 16978ad4d6ddSJeff Bonwick &vd->vdev_dtl[DTL_SCRUB], 2); 16988ad4d6ddSJeff Bonwick space_map_ref_generate_map(&reftree, 16998ad4d6ddSJeff Bonwick &vd->vdev_dtl[DTL_MISSING], 1); 17008ad4d6ddSJeff Bonwick space_map_ref_destroy(&reftree); 1701fa9e4066Sahrens } 17028ad4d6ddSJeff Bonwick space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 17038ad4d6ddSJeff Bonwick space_map_walk(&vd->vdev_dtl[DTL_MISSING], 17048ad4d6ddSJeff Bonwick space_map_add, &vd->vdev_dtl[DTL_PARTIAL]); 1705fa9e4066Sahrens if (scrub_done) 17068ad4d6ddSJeff Bonwick space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 17078ad4d6ddSJeff Bonwick space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 17088ad4d6ddSJeff Bonwick if (!vdev_readable(vd)) 17098ad4d6ddSJeff Bonwick space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 17108ad4d6ddSJeff Bonwick else 17118ad4d6ddSJeff Bonwick space_map_walk(&vd->vdev_dtl[DTL_MISSING], 17128ad4d6ddSJeff Bonwick space_map_add, &vd->vdev_dtl[DTL_OUTAGE]); 1713fa9e4066Sahrens mutex_exit(&vd->vdev_dtl_lock); 1714088f3894Sahrens 1715ecc2d604Sbonwick if (txg != 0) 1716ecc2d604Sbonwick vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 1717fa9e4066Sahrens return; 1718fa9e4066Sahrens } 1719fa9e4066Sahrens 1720fa9e4066Sahrens mutex_enter(&vd->vdev_dtl_lock); 17218ad4d6ddSJeff Bonwick for (int t = 0; t < DTL_TYPES; t++) { 172299bb17e2SEric Taylor /* account for child's outage in parent's missing map */ 172399bb17e2SEric Taylor int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; 17248ad4d6ddSJeff Bonwick if (t == DTL_SCRUB) 17258ad4d6ddSJeff Bonwick continue; /* leaf vdevs only */ 17268ad4d6ddSJeff Bonwick if (t == DTL_PARTIAL) 17278ad4d6ddSJeff Bonwick minref = 1; /* i.e. non-zero */ 17288ad4d6ddSJeff Bonwick else if (vd->vdev_nparity != 0) 17298ad4d6ddSJeff Bonwick minref = vd->vdev_nparity + 1; /* RAID-Z */ 17308ad4d6ddSJeff Bonwick else 17318ad4d6ddSJeff Bonwick minref = vd->vdev_children; /* any kind of mirror */ 17328ad4d6ddSJeff Bonwick space_map_ref_create(&reftree); 17338ad4d6ddSJeff Bonwick for (int c = 0; c < vd->vdev_children; c++) { 17348ad4d6ddSJeff Bonwick vdev_t *cvd = vd->vdev_child[c]; 17358ad4d6ddSJeff Bonwick mutex_enter(&cvd->vdev_dtl_lock); 173699bb17e2SEric Taylor space_map_ref_add_map(&reftree, &cvd->vdev_dtl[s], 1); 17378ad4d6ddSJeff Bonwick mutex_exit(&cvd->vdev_dtl_lock); 17388ad4d6ddSJeff Bonwick } 17398ad4d6ddSJeff Bonwick space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref); 17408ad4d6ddSJeff Bonwick space_map_ref_destroy(&reftree); 1741fa9e4066Sahrens } 17428ad4d6ddSJeff Bonwick mutex_exit(&vd->vdev_dtl_lock); 1743fa9e4066Sahrens } 1744fa9e4066Sahrens 1745fa9e4066Sahrens static int 1746fa9e4066Sahrens vdev_dtl_load(vdev_t *vd) 1747fa9e4066Sahrens { 1748fa9e4066Sahrens spa_t *spa = vd->vdev_spa; 17498ad4d6ddSJeff Bonwick space_map_obj_t *smo = &vd->vdev_dtl_smo; 1750ecc2d604Sbonwick objset_t *mos = spa->spa_meta_objset; 1751fa9e4066Sahrens dmu_buf_t *db; 1752fa9e4066Sahrens int error; 1753fa9e4066Sahrens 1754fa9e4066Sahrens ASSERT(vd->vdev_children == 0); 1755fa9e4066Sahrens 1756fa9e4066Sahrens if (smo->smo_object == 0) 1757fa9e4066Sahrens return (0); 1758fa9e4066Sahrens 175988ecc943SGeorge Wilson ASSERT(!vd->vdev_ishole); 176088ecc943SGeorge Wilson 1761ecc2d604Sbonwick if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0) 1762ea8dc4b6Seschrock return (error); 1763ecc2d604Sbonwick 17641934e92fSmaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 17651934e92fSmaybee bcopy(db->db_data, smo, sizeof (*smo)); 1766ea8dc4b6Seschrock dmu_buf_rele(db, FTAG); 1767fa9e4066Sahrens 1768fa9e4066Sahrens mutex_enter(&vd->vdev_dtl_lock); 17698ad4d6ddSJeff Bonwick error = space_map_load(&vd->vdev_dtl[DTL_MISSING], 17708ad4d6ddSJeff Bonwick NULL, SM_ALLOC, smo, mos); 1771fa9e4066Sahrens mutex_exit(&vd->vdev_dtl_lock); 1772fa9e4066Sahrens 1773fa9e4066Sahrens return (error); 1774fa9e4066Sahrens } 1775fa9e4066Sahrens 1776fa9e4066Sahrens void 1777fa9e4066Sahrens vdev_dtl_sync(vdev_t *vd, uint64_t txg) 1778fa9e4066Sahrens { 1779fa9e4066Sahrens spa_t *spa = vd->vdev_spa; 17808ad4d6ddSJeff Bonwick space_map_obj_t *smo = &vd->vdev_dtl_smo; 17818ad4d6ddSJeff Bonwick space_map_t *sm = &vd->vdev_dtl[DTL_MISSING]; 1782ecc2d604Sbonwick objset_t *mos = spa->spa_meta_objset; 1783fa9e4066Sahrens space_map_t smsync; 1784fa9e4066Sahrens kmutex_t smlock; 1785fa9e4066Sahrens dmu_buf_t *db; 1786fa9e4066Sahrens dmu_tx_t *tx; 1787fa9e4066Sahrens 178888ecc943SGeorge Wilson ASSERT(!vd->vdev_ishole); 178988ecc943SGeorge Wilson 1790fa9e4066Sahrens tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1791fa9e4066Sahrens 1792fa9e4066Sahrens if (vd->vdev_detached) { 1793fa9e4066Sahrens if (smo->smo_object != 0) { 1794ecc2d604Sbonwick int err = dmu_object_free(mos, smo->smo_object, tx); 1795fa9e4066Sahrens ASSERT3U(err, ==, 0); 1796fa9e4066Sahrens smo->smo_object = 0; 1797fa9e4066Sahrens } 1798fa9e4066Sahrens dmu_tx_commit(tx); 1799fa9e4066Sahrens return; 1800fa9e4066Sahrens } 1801fa9e4066Sahrens 1802fa9e4066Sahrens if (smo->smo_object == 0) { 1803fa9e4066Sahrens ASSERT(smo->smo_objsize == 0); 1804fa9e4066Sahrens ASSERT(smo->smo_alloc == 0); 1805ecc2d604Sbonwick smo->smo_object = dmu_object_alloc(mos, 1806fa9e4066Sahrens DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 1807fa9e4066Sahrens DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 1808fa9e4066Sahrens ASSERT(smo->smo_object != 0); 1809fa9e4066Sahrens vdev_config_dirty(vd->vdev_top); 1810fa9e4066Sahrens } 1811fa9e4066Sahrens 1812fa9e4066Sahrens mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); 1813fa9e4066Sahrens 1814fa9e4066Sahrens space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, 1815fa9e4066Sahrens &smlock); 1816fa9e4066Sahrens 1817fa9e4066Sahrens mutex_enter(&smlock); 1818fa9e4066Sahrens 1819fa9e4066Sahrens mutex_enter(&vd->vdev_dtl_lock); 1820ecc2d604Sbonwick space_map_walk(sm, space_map_add, &smsync); 1821fa9e4066Sahrens mutex_exit(&vd->vdev_dtl_lock); 1822fa9e4066Sahrens 1823ecc2d604Sbonwick space_map_truncate(smo, mos, tx); 1824ecc2d604Sbonwick space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); 1825fa9e4066Sahrens 1826fa9e4066Sahrens space_map_destroy(&smsync); 1827fa9e4066Sahrens 1828fa9e4066Sahrens mutex_exit(&smlock); 1829fa9e4066Sahrens mutex_destroy(&smlock); 1830fa9e4066Sahrens 1831ecc2d604Sbonwick VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 1832fa9e4066Sahrens dmu_buf_will_dirty(db, tx); 18331934e92fSmaybee ASSERT3U(db->db_size, >=, sizeof (*smo)); 18341934e92fSmaybee bcopy(smo, db->db_data, sizeof (*smo)); 1835ea8dc4b6Seschrock dmu_buf_rele(db, FTAG); 1836fa9e4066Sahrens 1837fa9e4066Sahrens dmu_tx_commit(tx); 1838fa9e4066Sahrens } 1839fa9e4066Sahrens 18408ad4d6ddSJeff Bonwick /* 18418ad4d6ddSJeff Bonwick * Determine whether the specified vdev can be offlined/detached/removed 18428ad4d6ddSJeff Bonwick * without losing data. 18438ad4d6ddSJeff Bonwick */ 18448ad4d6ddSJeff Bonwick boolean_t 18458ad4d6ddSJeff Bonwick vdev_dtl_required(vdev_t *vd) 18468ad4d6ddSJeff Bonwick { 18478ad4d6ddSJeff Bonwick spa_t *spa = vd->vdev_spa; 18488ad4d6ddSJeff Bonwick vdev_t *tvd = vd->vdev_top; 18498ad4d6ddSJeff Bonwick uint8_t cant_read = vd->vdev_cant_read; 18508ad4d6ddSJeff Bonwick boolean_t required; 18518ad4d6ddSJeff Bonwick 18528ad4d6ddSJeff Bonwick ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 18538ad4d6ddSJeff Bonwick 18548ad4d6ddSJeff Bonwick if (vd == spa->spa_root_vdev || vd == tvd) 18558ad4d6ddSJeff Bonwick return (B_TRUE); 18568ad4d6ddSJeff Bonwick 18578ad4d6ddSJeff Bonwick /* 18588ad4d6ddSJeff Bonwick * Temporarily mark the device as unreadable, and then determine 18598ad4d6ddSJeff Bonwick * whether this results in any DTL outages in the top-level vdev. 18608ad4d6ddSJeff Bonwick * If not, we can safely offline/detach/remove the device. 18618ad4d6ddSJeff Bonwick */ 18628ad4d6ddSJeff Bonwick vd->vdev_cant_read = B_TRUE; 18638ad4d6ddSJeff Bonwick vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 18648ad4d6ddSJeff Bonwick required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 18658ad4d6ddSJeff Bonwick vd->vdev_cant_read = cant_read; 18668ad4d6ddSJeff Bonwick vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 18678ad4d6ddSJeff Bonwick 1868cb04b873SMark J Musante if (!required && zio_injection_enabled) 1869cb04b873SMark J Musante required = !!zio_handle_device_injection(vd, NULL, ECHILD); 1870cb04b873SMark J Musante 18718ad4d6ddSJeff Bonwick return (required); 18728ad4d6ddSJeff Bonwick } 18738ad4d6ddSJeff Bonwick 1874088f3894Sahrens /* 1875088f3894Sahrens * Determine if resilver is needed, and if so the txg range. 1876088f3894Sahrens */ 1877088f3894Sahrens boolean_t 1878088f3894Sahrens vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 1879088f3894Sahrens { 1880088f3894Sahrens boolean_t needed = B_FALSE; 1881088f3894Sahrens uint64_t thismin = UINT64_MAX; 1882088f3894Sahrens uint64_t thismax = 0; 1883088f3894Sahrens 1884088f3894Sahrens if (vd->vdev_children == 0) { 1885088f3894Sahrens mutex_enter(&vd->vdev_dtl_lock); 18868ad4d6ddSJeff Bonwick if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 && 18878ad4d6ddSJeff Bonwick vdev_writeable(vd)) { 1888088f3894Sahrens space_seg_t *ss; 1889088f3894Sahrens 18908ad4d6ddSJeff Bonwick ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root); 1891088f3894Sahrens thismin = ss->ss_start - 1; 18928ad4d6ddSJeff Bonwick ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root); 1893088f3894Sahrens thismax = ss->ss_end; 1894088f3894Sahrens needed = B_TRUE; 1895088f3894Sahrens } 1896088f3894Sahrens mutex_exit(&vd->vdev_dtl_lock); 1897088f3894Sahrens } else { 18988ad4d6ddSJeff Bonwick for (int c = 0; c < vd->vdev_children; c++) { 1899088f3894Sahrens vdev_t *cvd = vd->vdev_child[c]; 1900088f3894Sahrens uint64_t cmin, cmax; 1901088f3894Sahrens 1902088f3894Sahrens if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 1903088f3894Sahrens thismin = MIN(thismin, cmin); 1904088f3894Sahrens thismax = MAX(thismax, cmax); 1905088f3894Sahrens needed = B_TRUE; 1906088f3894Sahrens } 1907088f3894Sahrens } 1908088f3894Sahrens } 1909088f3894Sahrens 1910088f3894Sahrens if (needed && minp) { 1911088f3894Sahrens *minp = thismin; 1912088f3894Sahrens *maxp = thismax; 1913088f3894Sahrens } 1914088f3894Sahrens return (needed); 1915088f3894Sahrens } 1916088f3894Sahrens 1917560e6e96Seschrock void 1918ea8dc4b6Seschrock vdev_load(vdev_t *vd) 1919fa9e4066Sahrens { 1920fa9e4066Sahrens /* 1921fa9e4066Sahrens * Recursively load all children. 1922fa9e4066Sahrens */ 19238ad4d6ddSJeff Bonwick for (int c = 0; c < vd->vdev_children; c++) 1924560e6e96Seschrock vdev_load(vd->vdev_child[c]); 1925fa9e4066Sahrens 1926fa9e4066Sahrens /* 19270e34b6a7Sbonwick * If this is a top-level vdev, initialize its metaslabs. 1928fa9e4066Sahrens */ 192988ecc943SGeorge Wilson if (vd == vd->vdev_top && !vd->vdev_ishole && 1930560e6e96Seschrock (vd->vdev_ashift == 0 || vd->vdev_asize == 0 || 1931560e6e96Seschrock vdev_metaslab_init(vd, 0) != 0)) 1932560e6e96Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1933560e6e96Seschrock VDEV_AUX_CORRUPT_DATA); 1934fa9e4066Sahrens 1935fa9e4066Sahrens /* 1936fa9e4066Sahrens * If this is a leaf vdev, load its DTL. 1937fa9e4066Sahrens */ 1938560e6e96Seschrock if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0) 1939560e6e96Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1940560e6e96Seschrock VDEV_AUX_CORRUPT_DATA); 1941fa9e4066Sahrens } 1942fa9e4066Sahrens 194399653d4eSeschrock /* 1944fa94a07fSbrendan * The special vdev case is used for hot spares and l2cache devices. Its 1945fa94a07fSbrendan * sole purpose it to set the vdev state for the associated vdev. To do this, 1946fa94a07fSbrendan * we make sure that we can open the underlying device, then try to read the 1947fa94a07fSbrendan * label, and make sure that the label is sane and that it hasn't been 1948fa94a07fSbrendan * repurposed to another pool. 194999653d4eSeschrock */ 195099653d4eSeschrock int 1951fa94a07fSbrendan vdev_validate_aux(vdev_t *vd) 195299653d4eSeschrock { 195399653d4eSeschrock nvlist_t *label; 195499653d4eSeschrock uint64_t guid, version; 195599653d4eSeschrock uint64_t state; 195699653d4eSeschrock 1957e14bb325SJeff Bonwick if (!vdev_readable(vd)) 1958c5904d13Seschrock return (0); 1959c5904d13Seschrock 196099653d4eSeschrock if ((label = vdev_label_read_config(vd)) == NULL) { 196199653d4eSeschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 196299653d4eSeschrock VDEV_AUX_CORRUPT_DATA); 196399653d4eSeschrock return (-1); 196499653d4eSeschrock } 196599653d4eSeschrock 196699653d4eSeschrock if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 1967e7437265Sahrens version > SPA_VERSION || 196899653d4eSeschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 196999653d4eSeschrock guid != vd->vdev_guid || 197099653d4eSeschrock nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 197199653d4eSeschrock vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 197299653d4eSeschrock VDEV_AUX_CORRUPT_DATA); 197399653d4eSeschrock nvlist_free(label); 197499653d4eSeschrock return (-1); 197599653d4eSeschrock } 197699653d4eSeschrock 197799653d4eSeschrock /* 197899653d4eSeschrock * We don't actually check the pool state here. If it's in fact in 197999653d4eSeschrock * use by another pool, we update this fact on the fly when requested. 198099653d4eSeschrock */ 198199653d4eSeschrock nvlist_free(label); 198299653d4eSeschrock return (0); 198399653d4eSeschrock } 198499653d4eSeschrock 198588ecc943SGeorge Wilson void 198688ecc943SGeorge Wilson vdev_remove(vdev_t *vd, uint64_t txg) 198788ecc943SGeorge Wilson { 198888ecc943SGeorge Wilson spa_t *spa = vd->vdev_spa; 198988ecc943SGeorge Wilson objset_t *mos = spa->spa_meta_objset; 199088ecc943SGeorge Wilson dmu_tx_t *tx; 199188ecc943SGeorge Wilson 199288ecc943SGeorge Wilson tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 199388ecc943SGeorge Wilson 199488ecc943SGeorge Wilson if (vd->vdev_dtl_smo.smo_object) { 199588ecc943SGeorge Wilson ASSERT3U(vd->vdev_dtl_smo.smo_alloc, ==, 0); 199688ecc943SGeorge Wilson (void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx); 199788ecc943SGeorge Wilson vd->vdev_dtl_smo.smo_object = 0; 199888ecc943SGeorge Wilson } 199988ecc943SGeorge Wilson 200088ecc943SGeorge Wilson if (vd->vdev_ms != NULL) { 200188ecc943SGeorge Wilson for (int m = 0; m < vd->vdev_ms_count; m++) { 200288ecc943SGeorge Wilson metaslab_t *msp = vd->vdev_ms[m]; 200388ecc943SGeorge Wilson 200488ecc943SGeorge Wilson if (msp == NULL || msp->ms_smo.smo_object == 0) 200588ecc943SGeorge Wilson continue; 200688ecc943SGeorge Wilson 200788ecc943SGeorge Wilson ASSERT3U(msp->ms_smo.smo_alloc, ==, 0); 200888ecc943SGeorge Wilson (void) dmu_object_free(mos, msp->ms_smo.smo_object, tx); 200988ecc943SGeorge Wilson msp->ms_smo.smo_object = 0; 201088ecc943SGeorge Wilson } 201188ecc943SGeorge Wilson } 201288ecc943SGeorge Wilson 201388ecc943SGeorge Wilson if (vd->vdev_ms_array) { 201488ecc943SGeorge Wilson (void) dmu_object_free(mos, vd->vdev_ms_array, tx); 201588ecc943SGeorge Wilson vd->vdev_ms_array = 0; 201688ecc943SGeorge Wilson vd->vdev_ms_shift = 0; 201788ecc943SGeorge Wilson } 201888ecc943SGeorge Wilson dmu_tx_commit(tx); 201988ecc943SGeorge Wilson } 202088ecc943SGeorge Wilson 2021fa9e4066Sahrens void 2022fa9e4066Sahrens vdev_sync_done(vdev_t *vd, uint64_t txg) 2023fa9e4066Sahrens { 2024fa9e4066Sahrens metaslab_t *msp; 202580eb36f2SGeorge Wilson boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg)); 2026fa9e4066Sahrens 202788ecc943SGeorge Wilson ASSERT(!vd->vdev_ishole); 202888ecc943SGeorge Wilson 2029fa9e4066Sahrens while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 2030fa9e4066Sahrens metaslab_sync_done(msp, txg); 203180eb36f2SGeorge Wilson 203280eb36f2SGeorge Wilson if (reassess) 203380eb36f2SGeorge Wilson metaslab_sync_reassess(vd->vdev_mg); 2034fa9e4066Sahrens } 2035fa9e4066Sahrens 2036fa9e4066Sahrens void 2037fa9e4066Sahrens vdev_sync(vdev_t *vd, uint64_t txg) 2038fa9e4066Sahrens { 2039fa9e4066Sahrens spa_t *spa = vd->vdev_spa; 2040fa9e4066Sahrens vdev_t *lvd; 2041fa9e4066Sahrens metaslab_t *msp; 2042ecc2d604Sbonwick dmu_tx_t *tx; 2043fa9e4066Sahrens 204488ecc943SGeorge Wilson ASSERT(!vd->vdev_ishole); 204588ecc943SGeorge Wilson 2046ecc2d604Sbonwick if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) { 2047ecc2d604Sbonwick ASSERT(vd == vd->vdev_top); 2048ecc2d604Sbonwick tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2049ecc2d604Sbonwick vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 2050ecc2d604Sbonwick DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 2051ecc2d604Sbonwick ASSERT(vd->vdev_ms_array != 0); 2052ecc2d604Sbonwick vdev_config_dirty(vd); 2053ecc2d604Sbonwick dmu_tx_commit(tx); 2054ecc2d604Sbonwick } 2055fa9e4066Sahrens 20563f9d6ad7SLin Ling /* 20573f9d6ad7SLin Ling * Remove the metadata associated with this vdev once it's empty. 20583f9d6ad7SLin Ling */ 20593f9d6ad7SLin Ling if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing) 206088ecc943SGeorge Wilson vdev_remove(vd, txg); 206188ecc943SGeorge Wilson 2062ecc2d604Sbonwick while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 2063fa9e4066Sahrens metaslab_sync(msp, txg); 2064ecc2d604Sbonwick (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 2065ecc2d604Sbonwick } 2066fa9e4066Sahrens 2067fa9e4066Sahrens while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 2068fa9e4066Sahrens vdev_dtl_sync(lvd, txg); 2069fa9e4066Sahrens 2070fa9e4066Sahrens (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 2071fa9e4066Sahrens } 2072fa9e4066Sahrens 2073fa9e4066Sahrens uint64_t 2074fa9e4066Sahrens vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 2075fa9e4066Sahrens { 2076fa9e4066Sahrens return (vd->vdev_ops->vdev_op_asize(vd, psize)); 2077fa9e4066Sahrens } 2078fa9e4066Sahrens 20793d7072f8Seschrock /* 20803d7072f8Seschrock * Mark the given vdev faulted. A faulted vdev behaves as if the device could 20813d7072f8Seschrock * not be opened, and no I/O is attempted. 20823d7072f8Seschrock */ 2083fa9e4066Sahrens int 2084069f55e2SEric Schrock vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) 2085fa9e4066Sahrens { 20864b964adaSGeorge Wilson vdev_t *vd, *tvd; 2087fa9e4066Sahrens 20888f18d1faSGeorge Wilson spa_vdev_state_enter(spa, SCL_NONE); 2089fa9e4066Sahrens 2090c5904d13Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2091e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2092e14bb325SJeff Bonwick 20933d7072f8Seschrock if (!vd->vdev_ops->vdev_op_leaf) 2094e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2095fa9e4066Sahrens 20964b964adaSGeorge Wilson tvd = vd->vdev_top; 20974b964adaSGeorge Wilson 2098069f55e2SEric Schrock /* 2099069f55e2SEric Schrock * We don't directly use the aux state here, but if we do a 2100069f55e2SEric Schrock * vdev_reopen(), we need this value to be present to remember why we 2101069f55e2SEric Schrock * were faulted. 2102069f55e2SEric Schrock */ 2103069f55e2SEric Schrock vd->vdev_label_aux = aux; 2104069f55e2SEric Schrock 21053d7072f8Seschrock /* 21063d7072f8Seschrock * Faulted state takes precedence over degraded. 21073d7072f8Seschrock */ 210898d1cbfeSGeorge Wilson vd->vdev_delayed_close = B_FALSE; 21093d7072f8Seschrock vd->vdev_faulted = 1ULL; 21103d7072f8Seschrock vd->vdev_degraded = 0ULL; 2111069f55e2SEric Schrock vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux); 21123d7072f8Seschrock 21133d7072f8Seschrock /* 2114c79790bcSGeorge Wilson * If this device has the only valid copy of the data, then 2115c79790bcSGeorge Wilson * back off and simply mark the vdev as degraded instead. 21163d7072f8Seschrock */ 21174b964adaSGeorge Wilson if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) { 21183d7072f8Seschrock vd->vdev_degraded = 1ULL; 21193d7072f8Seschrock vd->vdev_faulted = 0ULL; 21203d7072f8Seschrock 21213d7072f8Seschrock /* 21223d7072f8Seschrock * If we reopen the device and it's not dead, only then do we 21233d7072f8Seschrock * mark it degraded. 21243d7072f8Seschrock */ 21254b964adaSGeorge Wilson vdev_reopen(tvd); 21263d7072f8Seschrock 2127069f55e2SEric Schrock if (vdev_readable(vd)) 2128069f55e2SEric Schrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux); 21293d7072f8Seschrock } 21303d7072f8Seschrock 2131e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, vd, 0)); 21323d7072f8Seschrock } 21333d7072f8Seschrock 21343d7072f8Seschrock /* 21353d7072f8Seschrock * Mark the given vdev degraded. A degraded vdev is purely an indication to the 21363d7072f8Seschrock * user that something is wrong. The vdev continues to operate as normal as far 21373d7072f8Seschrock * as I/O is concerned. 21383d7072f8Seschrock */ 21393d7072f8Seschrock int 2140069f55e2SEric Schrock vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux) 21413d7072f8Seschrock { 2142c5904d13Seschrock vdev_t *vd; 21430a4e9518Sgw 21448f18d1faSGeorge Wilson spa_vdev_state_enter(spa, SCL_NONE); 21453d7072f8Seschrock 2146c5904d13Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2147e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2148e14bb325SJeff Bonwick 21490e34b6a7Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 2150e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 21510e34b6a7Sbonwick 21523d7072f8Seschrock /* 21533d7072f8Seschrock * If the vdev is already faulted, then don't do anything. 21543d7072f8Seschrock */ 2155e14bb325SJeff Bonwick if (vd->vdev_faulted || vd->vdev_degraded) 2156e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, 0)); 21573d7072f8Seschrock 21583d7072f8Seschrock vd->vdev_degraded = 1ULL; 21593d7072f8Seschrock if (!vdev_is_dead(vd)) 21603d7072f8Seschrock vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 2161069f55e2SEric Schrock aux); 21623d7072f8Seschrock 2163e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, vd, 0)); 21643d7072f8Seschrock } 21653d7072f8Seschrock 21663d7072f8Seschrock /* 21673d7072f8Seschrock * Online the given vdev. If 'unspare' is set, it implies two things. First, 21683d7072f8Seschrock * any attached spare device should be detached when the device finishes 21693d7072f8Seschrock * resilvering. Second, the online should be treated like a 'test' online case, 21703d7072f8Seschrock * so no FMA events are generated if the device fails to open. 21713d7072f8Seschrock */ 21723d7072f8Seschrock int 2173e14bb325SJeff Bonwick vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 21743d7072f8Seschrock { 2175573ca77eSGeorge Wilson vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; 21763d7072f8Seschrock 21778f18d1faSGeorge Wilson spa_vdev_state_enter(spa, SCL_NONE); 21783d7072f8Seschrock 2179c5904d13Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2180e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENODEV)); 21813d7072f8Seschrock 21823d7072f8Seschrock if (!vd->vdev_ops->vdev_op_leaf) 2183e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2184fa9e4066Sahrens 2185573ca77eSGeorge Wilson tvd = vd->vdev_top; 2186fa9e4066Sahrens vd->vdev_offline = B_FALSE; 2187441d80aaSlling vd->vdev_tmpoffline = B_FALSE; 2188e14bb325SJeff Bonwick vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 2189e14bb325SJeff Bonwick vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 2190573ca77eSGeorge Wilson 2191573ca77eSGeorge Wilson /* XXX - L2ARC 1.0 does not support expansion */ 2192573ca77eSGeorge Wilson if (!vd->vdev_aux) { 2193573ca77eSGeorge Wilson for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2194573ca77eSGeorge Wilson pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND); 2195573ca77eSGeorge Wilson } 2196573ca77eSGeorge Wilson 2197573ca77eSGeorge Wilson vdev_reopen(tvd); 21983d7072f8Seschrock vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 21993d7072f8Seschrock 2200573ca77eSGeorge Wilson if (!vd->vdev_aux) { 2201573ca77eSGeorge Wilson for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2202573ca77eSGeorge Wilson pvd->vdev_expanding = B_FALSE; 2203573ca77eSGeorge Wilson } 2204573ca77eSGeorge Wilson 22053d7072f8Seschrock if (newstate) 22063d7072f8Seschrock *newstate = vd->vdev_state; 22073d7072f8Seschrock if ((flags & ZFS_ONLINE_UNSPARE) && 22083d7072f8Seschrock !vdev_is_dead(vd) && vd->vdev_parent && 22093d7072f8Seschrock vd->vdev_parent->vdev_ops == &vdev_spare_ops && 22103d7072f8Seschrock vd->vdev_parent->vdev_child[0] == vd) 22113d7072f8Seschrock vd->vdev_unspare = B_TRUE; 2212fa9e4066Sahrens 2213573ca77eSGeorge Wilson if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { 2214573ca77eSGeorge Wilson 2215573ca77eSGeorge Wilson /* XXX - L2ARC 1.0 does not support expansion */ 2216573ca77eSGeorge Wilson if (vd->vdev_aux) 2217573ca77eSGeorge Wilson return (spa_vdev_state_exit(spa, vd, ENOTSUP)); 2218573ca77eSGeorge Wilson spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 2219573ca77eSGeorge Wilson } 22208ad4d6ddSJeff Bonwick return (spa_vdev_state_exit(spa, vd, 0)); 2221fa9e4066Sahrens } 2222fa9e4066Sahrens 2223a1521560SJeff Bonwick static int 2224a1521560SJeff Bonwick vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags) 2225fa9e4066Sahrens { 2226e6ca193dSGeorge Wilson vdev_t *vd, *tvd; 22278f18d1faSGeorge Wilson int error = 0; 22288f18d1faSGeorge Wilson uint64_t generation; 22298f18d1faSGeorge Wilson metaslab_group_t *mg; 22300a4e9518Sgw 22318f18d1faSGeorge Wilson top: 22328f18d1faSGeorge Wilson spa_vdev_state_enter(spa, SCL_ALLOC); 2233fa9e4066Sahrens 2234c5904d13Seschrock if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2235e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2236fa9e4066Sahrens 22370e34b6a7Sbonwick if (!vd->vdev_ops->vdev_op_leaf) 2238e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 22390e34b6a7Sbonwick 2240e6ca193dSGeorge Wilson tvd = vd->vdev_top; 22418f18d1faSGeorge Wilson mg = tvd->vdev_mg; 22428f18d1faSGeorge Wilson generation = spa->spa_config_generation + 1; 2243e6ca193dSGeorge Wilson 2244fa9e4066Sahrens /* 2245ecc2d604Sbonwick * If the device isn't already offline, try to offline it. 2246fa9e4066Sahrens */ 2247ecc2d604Sbonwick if (!vd->vdev_offline) { 2248ecc2d604Sbonwick /* 22498ad4d6ddSJeff Bonwick * If this device has the only valid copy of some data, 2250e6ca193dSGeorge Wilson * don't allow it to be offlined. Log devices are always 2251e6ca193dSGeorge Wilson * expendable. 2252ecc2d604Sbonwick */ 2253e6ca193dSGeorge Wilson if (!tvd->vdev_islog && vd->vdev_aux == NULL && 2254e6ca193dSGeorge Wilson vdev_dtl_required(vd)) 2255e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, EBUSY)); 2256fa9e4066Sahrens 22578f18d1faSGeorge Wilson /* 2258b24ab676SJeff Bonwick * If the top-level is a slog and it has had allocations 2259b24ab676SJeff Bonwick * then proceed. We check that the vdev's metaslab group 2260b24ab676SJeff Bonwick * is not NULL since it's possible that we may have just 2261b24ab676SJeff Bonwick * added this vdev but not yet initialized its metaslabs. 22628f18d1faSGeorge Wilson */ 22638f18d1faSGeorge Wilson if (tvd->vdev_islog && mg != NULL) { 22648f18d1faSGeorge Wilson /* 22658f18d1faSGeorge Wilson * Prevent any future allocations. 22668f18d1faSGeorge Wilson */ 2267a1521560SJeff Bonwick metaslab_group_passivate(mg); 22688f18d1faSGeorge Wilson (void) spa_vdev_state_exit(spa, vd, 0); 22698f18d1faSGeorge Wilson 22701195e687SMark J Musante error = spa_offline_log(spa); 22718f18d1faSGeorge Wilson 22728f18d1faSGeorge Wilson spa_vdev_state_enter(spa, SCL_ALLOC); 22738f18d1faSGeorge Wilson 22748f18d1faSGeorge Wilson /* 22758f18d1faSGeorge Wilson * Check to see if the config has changed. 22768f18d1faSGeorge Wilson */ 22778f18d1faSGeorge Wilson if (error || generation != spa->spa_config_generation) { 2278a1521560SJeff Bonwick metaslab_group_activate(mg); 22798f18d1faSGeorge Wilson if (error) 22808f18d1faSGeorge Wilson return (spa_vdev_state_exit(spa, 22818f18d1faSGeorge Wilson vd, error)); 22828f18d1faSGeorge Wilson (void) spa_vdev_state_exit(spa, vd, 0); 22838f18d1faSGeorge Wilson goto top; 22848f18d1faSGeorge Wilson } 22858f18d1faSGeorge Wilson ASSERT3U(tvd->vdev_stat.vs_alloc, ==, 0); 22868f18d1faSGeorge Wilson } 22878f18d1faSGeorge Wilson 2288ecc2d604Sbonwick /* 2289ecc2d604Sbonwick * Offline this device and reopen its top-level vdev. 2290e6ca193dSGeorge Wilson * If the top-level vdev is a log device then just offline 2291e6ca193dSGeorge Wilson * it. Otherwise, if this action results in the top-level 2292e6ca193dSGeorge Wilson * vdev becoming unusable, undo it and fail the request. 2293ecc2d604Sbonwick */ 2294ecc2d604Sbonwick vd->vdev_offline = B_TRUE; 2295e6ca193dSGeorge Wilson vdev_reopen(tvd); 2296e6ca193dSGeorge Wilson 2297e6ca193dSGeorge Wilson if (!tvd->vdev_islog && vd->vdev_aux == NULL && 2298e6ca193dSGeorge Wilson vdev_is_dead(tvd)) { 2299ecc2d604Sbonwick vd->vdev_offline = B_FALSE; 2300e6ca193dSGeorge Wilson vdev_reopen(tvd); 2301e14bb325SJeff Bonwick return (spa_vdev_state_exit(spa, NULL, EBUSY)); 2302ecc2d604Sbonwick } 23038f18d1faSGeorge Wilson 23048f18d1faSGeorge Wilson /* 23058f18d1faSGeorge Wilson * Add the device back into the metaslab rotor so that 23068f18d1faSGeorge Wilson * once we online the device it's open for business. 23078f18d1faSGeorge Wilson */ 23088f18d1faSGeorge Wilson if (tvd->vdev_islog && mg != NULL) 2309a1521560SJeff Bonwick metaslab_group_activate(mg); 2310fa9e4066Sahrens } 2311fa9e4066Sahrens 2312e14bb325SJeff Bonwick vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 2313ecc2d604Sbonwick 23148f18d1faSGeorge Wilson return (spa_vdev_state_exit(spa, vd, 0)); 2315fa9e4066Sahrens } 2316fa9e4066Sahrens 2317a1521560SJeff Bonwick int 2318a1521560SJeff Bonwick vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 2319a1521560SJeff Bonwick { 2320a1521560SJeff Bonwick int error; 2321a1521560SJeff Bonwick 2322a1521560SJeff Bonwick mutex_enter(&spa->spa_vdev_top_lock); 2323a1521560SJeff Bonwick error = vdev_offline_locked(spa, guid, flags); 2324a1521560SJeff Bonwick mutex_exit(&spa->spa_vdev_top_lock); 2325a1521560SJeff Bonwick 2326a1521560SJeff Bonwick return (error); 2327a1521560SJeff Bonwick } 2328a1521560SJeff Bonwick 2329ea8dc4b6Seschrock /* 2330ea8dc4b6Seschrock * Clear the error counts associated with this vdev. Unlike vdev_online() and 2331ea8dc4b6Seschrock * vdev_offline(), we assume the spa config is locked. We also clear all 2332ea8dc4b6Seschrock * children. If 'vd' is NULL, then the user wants to clear all vdevs. 2333ea8dc4b6Seschrock */ 2334ea8dc4b6Seschrock void 2335e14bb325SJeff Bonwick vdev_clear(spa_t *spa, vdev_t *vd) 2336fa9e4066Sahrens { 2337e14bb325SJeff Bonwick vdev_t *rvd = spa->spa_root_vdev; 2338e14bb325SJeff Bonwick 2339e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2340fa9e4066Sahrens 2341ea8dc4b6Seschrock if (vd == NULL) 2342e14bb325SJeff Bonwick vd = rvd; 2343fa9e4066Sahrens 2344ea8dc4b6Seschrock vd->vdev_stat.vs_read_errors = 0; 2345ea8dc4b6Seschrock vd->vdev_stat.vs_write_errors = 0; 2346ea8dc4b6Seschrock vd->vdev_stat.vs_checksum_errors = 0; 2347fa9e4066Sahrens 2348e14bb325SJeff Bonwick for (int c = 0; c < vd->vdev_children; c++) 2349e14bb325SJeff Bonwick vdev_clear(spa, vd->vdev_child[c]); 23503d7072f8Seschrock 23513d7072f8Seschrock /* 23528a79c1b5Sek * If we're in the FAULTED state or have experienced failed I/O, then 23538a79c1b5Sek * clear the persistent state and attempt to reopen the device. We 23548a79c1b5Sek * also mark the vdev config dirty, so that the new faulted state is 23558a79c1b5Sek * written out to disk. 23563d7072f8Seschrock */ 2357e14bb325SJeff Bonwick if (vd->vdev_faulted || vd->vdev_degraded || 2358e14bb325SJeff Bonwick !vdev_readable(vd) || !vdev_writeable(vd)) { 23598a79c1b5Sek 2360096d22d4SEric Schrock /* 2361096d22d4SEric Schrock * When reopening in reponse to a clear event, it may be due to 2362096d22d4SEric Schrock * a fmadm repair request. In this case, if the device is 2363096d22d4SEric Schrock * still broken, we want to still post the ereport again. 2364096d22d4SEric Schrock */ 2365096d22d4SEric Schrock vd->vdev_forcefault = B_TRUE; 2366096d22d4SEric Schrock 23674b964adaSGeorge Wilson vd->vdev_faulted = vd->vdev_degraded = 0ULL; 2368e14bb325SJeff Bonwick vd->vdev_cant_read = B_FALSE; 2369e14bb325SJeff Bonwick vd->vdev_cant_write = B_FALSE; 2370e14bb325SJeff Bonwick 2371f9af39baSGeorge Wilson vdev_reopen(vd == rvd ? rvd : vd->vdev_top); 23723d7072f8Seschrock 2373096d22d4SEric Schrock vd->vdev_forcefault = B_FALSE; 2374096d22d4SEric Schrock 2375f9af39baSGeorge Wilson if (vd != rvd && vdev_writeable(vd->vdev_top)) 2376e14bb325SJeff Bonwick vdev_state_dirty(vd->vdev_top); 2377e14bb325SJeff Bonwick 2378e14bb325SJeff Bonwick if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 2379bb8b5132Sek spa_async_request(spa, SPA_ASYNC_RESILVER); 23803d7072f8Seschrock 23813d7072f8Seschrock spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); 23823d7072f8Seschrock } 2383096d22d4SEric Schrock 2384096d22d4SEric Schrock /* 2385096d22d4SEric Schrock * When clearing a FMA-diagnosed fault, we always want to 2386096d22d4SEric Schrock * unspare the device, as we assume that the original spare was 2387096d22d4SEric Schrock * done in response to the FMA fault. 2388096d22d4SEric Schrock */ 2389096d22d4SEric Schrock if (!vdev_is_dead(vd) && vd->vdev_parent != NULL && 2390096d22d4SEric Schrock vd->vdev_parent->vdev_ops == &vdev_spare_ops && 2391096d22d4SEric Schrock vd->vdev_parent->vdev_child[0] == vd) 2392096d22d4SEric Schrock vd->vdev_unspare = B_TRUE; 2393fa9e4066Sahrens } 2394fa9e4066Sahrens 2395e14bb325SJeff Bonwick boolean_t 2396e14bb325SJeff Bonwick vdev_is_dead(vdev_t *vd) 23970a4e9518Sgw { 239888ecc943SGeorge Wilson /* 239988ecc943SGeorge Wilson * Holes and missing devices are always considered "dead". 240088ecc943SGeorge Wilson * This simplifies the code since we don't have to check for 240188ecc943SGeorge Wilson * these types of devices in the various code paths. 240288ecc943SGeorge Wilson * Instead we rely on the fact that we skip over dead devices 240388ecc943SGeorge Wilson * before issuing I/O to them. 240488ecc943SGeorge Wilson */ 240588ecc943SGeorge Wilson return (vd->vdev_state < VDEV_STATE_DEGRADED || vd->vdev_ishole || 240688ecc943SGeorge Wilson vd->vdev_ops == &vdev_missing_ops); 24070a4e9518Sgw } 24080a4e9518Sgw 2409e14bb325SJeff Bonwick boolean_t 2410e14bb325SJeff Bonwick vdev_readable(vdev_t *vd) 24110a4e9518Sgw { 2412e14bb325SJeff Bonwick return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 24130a4e9518Sgw } 24140a4e9518Sgw 2415e14bb325SJeff Bonwick boolean_t 2416e14bb325SJeff Bonwick vdev_writeable(vdev_t *vd) 2417fa9e4066Sahrens { 2418e14bb325SJeff Bonwick return (!vdev_is_dead(vd) && !vd->vdev_cant_write); 2419fa9e4066Sahrens } 2420fa9e4066Sahrens 2421a31e6787SGeorge Wilson boolean_t 2422a31e6787SGeorge Wilson vdev_allocatable(vdev_t *vd) 2423a31e6787SGeorge Wilson { 24248ad4d6ddSJeff Bonwick uint64_t state = vd->vdev_state; 24258ad4d6ddSJeff Bonwick 2426a31e6787SGeorge Wilson /* 24278ad4d6ddSJeff Bonwick * We currently allow allocations from vdevs which may be in the 2428a31e6787SGeorge Wilson * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 2429a31e6787SGeorge Wilson * fails to reopen then we'll catch it later when we're holding 24308ad4d6ddSJeff Bonwick * the proper locks. Note that we have to get the vdev state 24318ad4d6ddSJeff Bonwick * in a local variable because although it changes atomically, 24328ad4d6ddSJeff Bonwick * we're asking two separate questions about it. 2433a31e6787SGeorge Wilson */ 24348ad4d6ddSJeff Bonwick return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 24353f9d6ad7SLin Ling !vd->vdev_cant_write && !vd->vdev_ishole); 2436a31e6787SGeorge Wilson } 2437a31e6787SGeorge Wilson 2438e14bb325SJeff Bonwick boolean_t 2439e14bb325SJeff Bonwick vdev_accessible(vdev_t *vd, zio_t *zio) 2440fa9e4066Sahrens { 2441e14bb325SJeff Bonwick ASSERT(zio->io_vd == vd); 2442fa9e4066Sahrens 2443e14bb325SJeff Bonwick if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 2444e14bb325SJeff Bonwick return (B_FALSE); 2445fa9e4066Sahrens 2446e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_READ) 2447e14bb325SJeff Bonwick return (!vd->vdev_cant_read); 2448fa9e4066Sahrens 2449e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_WRITE) 2450e14bb325SJeff Bonwick return (!vd->vdev_cant_write); 2451fa9e4066Sahrens 2452e14bb325SJeff Bonwick return (B_TRUE); 2453fa9e4066Sahrens } 2454fa9e4066Sahrens 2455fa9e4066Sahrens /* 2456fa9e4066Sahrens * Get statistics for the given vdev. 2457fa9e4066Sahrens */ 2458fa9e4066Sahrens void 2459fa9e4066Sahrens vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 2460fa9e4066Sahrens { 2461fa9e4066Sahrens vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 2462fa9e4066Sahrens 2463fa9e4066Sahrens mutex_enter(&vd->vdev_stat_lock); 2464fa9e4066Sahrens bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 2465fa9e4066Sahrens vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 2466fa9e4066Sahrens vs->vs_state = vd->vdev_state; 2467573ca77eSGeorge Wilson vs->vs_rsize = vdev_get_min_asize(vd); 2468573ca77eSGeorge Wilson if (vd->vdev_ops->vdev_op_leaf) 2469573ca77eSGeorge Wilson vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 2470*4263d13fSGeorge Wilson vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize; 2471fa9e4066Sahrens mutex_exit(&vd->vdev_stat_lock); 2472fa9e4066Sahrens 2473fa9e4066Sahrens /* 2474fa9e4066Sahrens * If we're getting stats on the root vdev, aggregate the I/O counts 2475fa9e4066Sahrens * over all top-level vdevs (i.e. the direct children of the root). 2476fa9e4066Sahrens */ 2477fa9e4066Sahrens if (vd == rvd) { 2478e14bb325SJeff Bonwick for (int c = 0; c < rvd->vdev_children; c++) { 2479fa9e4066Sahrens vdev_t *cvd = rvd->vdev_child[c]; 2480fa9e4066Sahrens vdev_stat_t *cvs = &cvd->vdev_stat; 2481fa9e4066Sahrens 2482fa9e4066Sahrens mutex_enter(&vd->vdev_stat_lock); 2483e14bb325SJeff Bonwick for (int t = 0; t < ZIO_TYPES; t++) { 2484fa9e4066Sahrens vs->vs_ops[t] += cvs->vs_ops[t]; 2485fa9e4066Sahrens vs->vs_bytes[t] += cvs->vs_bytes[t]; 2486fa9e4066Sahrens } 24873f9d6ad7SLin Ling cvs->vs_scan_removing = cvd->vdev_removing; 2488fa9e4066Sahrens mutex_exit(&vd->vdev_stat_lock); 2489fa9e4066Sahrens } 2490fa9e4066Sahrens } 2491fa9e4066Sahrens } 2492fa9e4066Sahrens 2493fa94a07fSbrendan void 2494fa94a07fSbrendan vdev_clear_stats(vdev_t *vd) 2495fa94a07fSbrendan { 2496fa94a07fSbrendan mutex_enter(&vd->vdev_stat_lock); 2497fa94a07fSbrendan vd->vdev_stat.vs_space = 0; 2498fa94a07fSbrendan vd->vdev_stat.vs_dspace = 0; 2499fa94a07fSbrendan vd->vdev_stat.vs_alloc = 0; 2500fa94a07fSbrendan mutex_exit(&vd->vdev_stat_lock); 2501fa94a07fSbrendan } 2502fa94a07fSbrendan 25033f9d6ad7SLin Ling void 25043f9d6ad7SLin Ling vdev_scan_stat_init(vdev_t *vd) 25053f9d6ad7SLin Ling { 25063f9d6ad7SLin Ling vdev_stat_t *vs = &vd->vdev_stat; 25073f9d6ad7SLin Ling 25083f9d6ad7SLin Ling for (int c = 0; c < vd->vdev_children; c++) 25093f9d6ad7SLin Ling vdev_scan_stat_init(vd->vdev_child[c]); 25103f9d6ad7SLin Ling 25113f9d6ad7SLin Ling mutex_enter(&vd->vdev_stat_lock); 25123f9d6ad7SLin Ling vs->vs_scan_processed = 0; 25133f9d6ad7SLin Ling mutex_exit(&vd->vdev_stat_lock); 25143f9d6ad7SLin Ling } 25153f9d6ad7SLin Ling 2516fa9e4066Sahrens void 2517e14bb325SJeff Bonwick vdev_stat_update(zio_t *zio, uint64_t psize) 2518fa9e4066Sahrens { 25198ad4d6ddSJeff Bonwick spa_t *spa = zio->io_spa; 25208ad4d6ddSJeff Bonwick vdev_t *rvd = spa->spa_root_vdev; 2521e14bb325SJeff Bonwick vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 2522fa9e4066Sahrens vdev_t *pvd; 2523fa9e4066Sahrens uint64_t txg = zio->io_txg; 2524fa9e4066Sahrens vdev_stat_t *vs = &vd->vdev_stat; 2525fa9e4066Sahrens zio_type_t type = zio->io_type; 2526fa9e4066Sahrens int flags = zio->io_flags; 2527fa9e4066Sahrens 2528e14bb325SJeff Bonwick /* 2529e14bb325SJeff Bonwick * If this i/o is a gang leader, it didn't do any actual work. 2530e14bb325SJeff Bonwick */ 2531e14bb325SJeff Bonwick if (zio->io_gang_tree) 2532e14bb325SJeff Bonwick return; 2533e14bb325SJeff Bonwick 2534fa9e4066Sahrens if (zio->io_error == 0) { 2535e14bb325SJeff Bonwick /* 2536e14bb325SJeff Bonwick * If this is a root i/o, don't count it -- we've already 2537e14bb325SJeff Bonwick * counted the top-level vdevs, and vdev_get_stats() will 2538e14bb325SJeff Bonwick * aggregate them when asked. This reduces contention on 2539e14bb325SJeff Bonwick * the root vdev_stat_lock and implicitly handles blocks 2540e14bb325SJeff Bonwick * that compress away to holes, for which there is no i/o. 2541e14bb325SJeff Bonwick * (Holes never create vdev children, so all the counters 2542e14bb325SJeff Bonwick * remain zero, which is what we want.) 2543e14bb325SJeff Bonwick * 2544e14bb325SJeff Bonwick * Note: this only applies to successful i/o (io_error == 0) 2545e14bb325SJeff Bonwick * because unlike i/o counts, errors are not additive. 2546e14bb325SJeff Bonwick * When reading a ditto block, for example, failure of 2547e14bb325SJeff Bonwick * one top-level vdev does not imply a root-level error. 2548e14bb325SJeff Bonwick */ 2549e14bb325SJeff Bonwick if (vd == rvd) 2550e14bb325SJeff Bonwick return; 2551e14bb325SJeff Bonwick 2552e14bb325SJeff Bonwick ASSERT(vd == zio->io_vd); 25538ad4d6ddSJeff Bonwick 25548ad4d6ddSJeff Bonwick if (flags & ZIO_FLAG_IO_BYPASS) 25558ad4d6ddSJeff Bonwick return; 25568ad4d6ddSJeff Bonwick 25578ad4d6ddSJeff Bonwick mutex_enter(&vd->vdev_stat_lock); 25588ad4d6ddSJeff Bonwick 2559e14bb325SJeff Bonwick if (flags & ZIO_FLAG_IO_REPAIR) { 256044ecc532SGeorge Wilson if (flags & ZIO_FLAG_SCAN_THREAD) { 25613f9d6ad7SLin Ling dsl_scan_phys_t *scn_phys = 25623f9d6ad7SLin Ling &spa->spa_dsl_pool->dp_scan->scn_phys; 25633f9d6ad7SLin Ling uint64_t *processed = &scn_phys->scn_processed; 25643f9d6ad7SLin Ling 25653f9d6ad7SLin Ling /* XXX cleanup? */ 25663f9d6ad7SLin Ling if (vd->vdev_ops->vdev_op_leaf) 25673f9d6ad7SLin Ling atomic_add_64(processed, psize); 25683f9d6ad7SLin Ling vs->vs_scan_processed += psize; 25693f9d6ad7SLin Ling } 25703f9d6ad7SLin Ling 25718ad4d6ddSJeff Bonwick if (flags & ZIO_FLAG_SELF_HEAL) 2572e14bb325SJeff Bonwick vs->vs_self_healed += psize; 2573fa9e4066Sahrens } 25748ad4d6ddSJeff Bonwick 25758ad4d6ddSJeff Bonwick vs->vs_ops[type]++; 25768ad4d6ddSJeff Bonwick vs->vs_bytes[type] += psize; 25778ad4d6ddSJeff Bonwick 25788ad4d6ddSJeff Bonwick mutex_exit(&vd->vdev_stat_lock); 2579fa9e4066Sahrens return; 2580fa9e4066Sahrens } 2581fa9e4066Sahrens 2582fa9e4066Sahrens if (flags & ZIO_FLAG_SPECULATIVE) 2583fa9e4066Sahrens return; 2584fa9e4066Sahrens 25858956713aSEric Schrock /* 25868956713aSEric Schrock * If this is an I/O error that is going to be retried, then ignore the 25878956713aSEric Schrock * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 25888956713aSEric Schrock * hard errors, when in reality they can happen for any number of 25898956713aSEric Schrock * innocuous reasons (bus resets, MPxIO link failure, etc). 25908956713aSEric Schrock */ 25918956713aSEric Schrock if (zio->io_error == EIO && 25928956713aSEric Schrock !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 25938956713aSEric Schrock return; 25948956713aSEric Schrock 25958f18d1faSGeorge Wilson /* 25968f18d1faSGeorge Wilson * Intent logs writes won't propagate their error to the root 25978f18d1faSGeorge Wilson * I/O so don't mark these types of failures as pool-level 25988f18d1faSGeorge Wilson * errors. 25998f18d1faSGeorge Wilson */ 26008f18d1faSGeorge Wilson if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 26018f18d1faSGeorge Wilson return; 26028f18d1faSGeorge Wilson 2603e14bb325SJeff Bonwick mutex_enter(&vd->vdev_stat_lock); 2604b47119fdSGeorge Wilson if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 2605e14bb325SJeff Bonwick if (zio->io_error == ECKSUM) 2606e14bb325SJeff Bonwick vs->vs_checksum_errors++; 2607e14bb325SJeff Bonwick else 2608e14bb325SJeff Bonwick vs->vs_read_errors++; 2609fa9e4066Sahrens } 2610b47119fdSGeorge Wilson if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 2611e14bb325SJeff Bonwick vs->vs_write_errors++; 2612e14bb325SJeff Bonwick mutex_exit(&vd->vdev_stat_lock); 2613fa9e4066Sahrens 26148ad4d6ddSJeff Bonwick if (type == ZIO_TYPE_WRITE && txg != 0 && 26158ad4d6ddSJeff Bonwick (!(flags & ZIO_FLAG_IO_REPAIR) || 261644ecc532SGeorge Wilson (flags & ZIO_FLAG_SCAN_THREAD) || 2617b24ab676SJeff Bonwick spa->spa_claiming)) { 26188ad4d6ddSJeff Bonwick /* 2619b24ab676SJeff Bonwick * This is either a normal write (not a repair), or it's 2620b24ab676SJeff Bonwick * a repair induced by the scrub thread, or it's a repair 2621b24ab676SJeff Bonwick * made by zil_claim() during spa_load() in the first txg. 2622b24ab676SJeff Bonwick * In the normal case, we commit the DTL change in the same 2623b24ab676SJeff Bonwick * txg as the block was born. In the scrub-induced repair 2624b24ab676SJeff Bonwick * case, we know that scrubs run in first-pass syncing context, 2625b24ab676SJeff Bonwick * so we commit the DTL change in spa_syncing_txg(spa). 2626b24ab676SJeff Bonwick * In the zil_claim() case, we commit in spa_first_txg(spa). 26278ad4d6ddSJeff Bonwick * 26288ad4d6ddSJeff Bonwick * We currently do not make DTL entries for failed spontaneous 26298ad4d6ddSJeff Bonwick * self-healing writes triggered by normal (non-scrubbing) 26308ad4d6ddSJeff Bonwick * reads, because we have no transactional context in which to 26318ad4d6ddSJeff Bonwick * do so -- and it's not clear that it'd be desirable anyway. 26328ad4d6ddSJeff Bonwick */ 26338ad4d6ddSJeff Bonwick if (vd->vdev_ops->vdev_op_leaf) { 26348ad4d6ddSJeff Bonwick uint64_t commit_txg = txg; 263544ecc532SGeorge Wilson if (flags & ZIO_FLAG_SCAN_THREAD) { 26368ad4d6ddSJeff Bonwick ASSERT(flags & ZIO_FLAG_IO_REPAIR); 26378ad4d6ddSJeff Bonwick ASSERT(spa_sync_pass(spa) == 1); 26388ad4d6ddSJeff Bonwick vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 2639b24ab676SJeff Bonwick commit_txg = spa_syncing_txg(spa); 2640b24ab676SJeff Bonwick } else if (spa->spa_claiming) { 2641b24ab676SJeff Bonwick ASSERT(flags & ZIO_FLAG_IO_REPAIR); 2642b24ab676SJeff Bonwick commit_txg = spa_first_txg(spa); 26438ad4d6ddSJeff Bonwick } 2644b24ab676SJeff Bonwick ASSERT(commit_txg >= spa_syncing_txg(spa)); 26458ad4d6ddSJeff Bonwick if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 2646fa9e4066Sahrens return; 26478ad4d6ddSJeff Bonwick for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 26488ad4d6ddSJeff Bonwick vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 26498ad4d6ddSJeff Bonwick vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 2650fa9e4066Sahrens } 26518ad4d6ddSJeff Bonwick if (vd != rvd) 26528ad4d6ddSJeff Bonwick vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 2653fa9e4066Sahrens } 2654fa9e4066Sahrens } 2655fa9e4066Sahrens 2656fa9e4066Sahrens /* 2657b24ab676SJeff Bonwick * Update the in-core space usage stats for this vdev, its metaslab class, 2658b24ab676SJeff Bonwick * and the root vdev. 2659fa9e4066Sahrens */ 2660fa9e4066Sahrens void 2661b24ab676SJeff Bonwick vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta, 2662b24ab676SJeff Bonwick int64_t space_delta) 2663fa9e4066Sahrens { 266499653d4eSeschrock int64_t dspace_delta = space_delta; 26658654d025Sperrin spa_t *spa = vd->vdev_spa; 26668654d025Sperrin vdev_t *rvd = spa->spa_root_vdev; 2667b24ab676SJeff Bonwick metaslab_group_t *mg = vd->vdev_mg; 2668b24ab676SJeff Bonwick metaslab_class_t *mc = mg ? mg->mg_class : NULL; 2669fa9e4066Sahrens 26708654d025Sperrin ASSERT(vd == vd->vdev_top); 267199653d4eSeschrock 26728654d025Sperrin /* 26738654d025Sperrin * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 26748654d025Sperrin * factor. We must calculate this here and not at the root vdev 26758654d025Sperrin * because the root vdev's psize-to-asize is simply the max of its 26768654d025Sperrin * childrens', thus not accurate enough for us. 26778654d025Sperrin */ 26788654d025Sperrin ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 2679e6ca193dSGeorge Wilson ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 26808654d025Sperrin dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 26818654d025Sperrin vd->vdev_deflate_ratio; 26828654d025Sperrin 26838654d025Sperrin mutex_enter(&vd->vdev_stat_lock); 26848654d025Sperrin vd->vdev_stat.vs_alloc += alloc_delta; 2685b24ab676SJeff Bonwick vd->vdev_stat.vs_space += space_delta; 26868654d025Sperrin vd->vdev_stat.vs_dspace += dspace_delta; 26878654d025Sperrin mutex_exit(&vd->vdev_stat_lock); 26888654d025Sperrin 2689b24ab676SJeff Bonwick if (mc == spa_normal_class(spa)) { 2690fa94a07fSbrendan mutex_enter(&rvd->vdev_stat_lock); 2691fa94a07fSbrendan rvd->vdev_stat.vs_alloc += alloc_delta; 2692b24ab676SJeff Bonwick rvd->vdev_stat.vs_space += space_delta; 2693fa94a07fSbrendan rvd->vdev_stat.vs_dspace += dspace_delta; 2694fa94a07fSbrendan mutex_exit(&rvd->vdev_stat_lock); 2695fa94a07fSbrendan } 2696b24ab676SJeff Bonwick 2697b24ab676SJeff Bonwick if (mc != NULL) { 2698b24ab676SJeff Bonwick ASSERT(rvd == vd->vdev_parent); 2699b24ab676SJeff Bonwick ASSERT(vd->vdev_ms_count != 0); 2700b24ab676SJeff Bonwick 2701b24ab676SJeff Bonwick metaslab_class_space_update(mc, 2702b24ab676SJeff Bonwick alloc_delta, defer_delta, space_delta, dspace_delta); 2703b24ab676SJeff Bonwick } 2704fa9e4066Sahrens } 2705fa9e4066Sahrens 2706fa9e4066Sahrens /* 2707fa9e4066Sahrens * Mark a top-level vdev's config as dirty, placing it on the dirty list 2708fa9e4066Sahrens * so that it will be written out next time the vdev configuration is synced. 2709fa9e4066Sahrens * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 2710fa9e4066Sahrens */ 2711fa9e4066Sahrens void 2712fa9e4066Sahrens vdev_config_dirty(vdev_t *vd) 2713fa9e4066Sahrens { 2714fa9e4066Sahrens spa_t *spa = vd->vdev_spa; 2715fa9e4066Sahrens vdev_t *rvd = spa->spa_root_vdev; 2716fa9e4066Sahrens int c; 2717fa9e4066Sahrens 2718f9af39baSGeorge Wilson ASSERT(spa_writeable(spa)); 2719f9af39baSGeorge Wilson 2720c5904d13Seschrock /* 27216809eb4eSEric Schrock * If this is an aux vdev (as with l2cache and spare devices), then we 27226809eb4eSEric Schrock * update the vdev config manually and set the sync flag. 2723c5904d13Seschrock */ 2724c5904d13Seschrock if (vd->vdev_aux != NULL) { 2725c5904d13Seschrock spa_aux_vdev_t *sav = vd->vdev_aux; 2726c5904d13Seschrock nvlist_t **aux; 2727c5904d13Seschrock uint_t naux; 2728c5904d13Seschrock 2729c5904d13Seschrock for (c = 0; c < sav->sav_count; c++) { 2730c5904d13Seschrock if (sav->sav_vdevs[c] == vd) 2731c5904d13Seschrock break; 2732c5904d13Seschrock } 2733c5904d13Seschrock 2734e14bb325SJeff Bonwick if (c == sav->sav_count) { 2735e14bb325SJeff Bonwick /* 2736e14bb325SJeff Bonwick * We're being removed. There's nothing more to do. 2737e14bb325SJeff Bonwick */ 2738e14bb325SJeff Bonwick ASSERT(sav->sav_sync == B_TRUE); 2739e14bb325SJeff Bonwick return; 2740e14bb325SJeff Bonwick } 2741e14bb325SJeff Bonwick 2742c5904d13Seschrock sav->sav_sync = B_TRUE; 2743c5904d13Seschrock 27446809eb4eSEric Schrock if (nvlist_lookup_nvlist_array(sav->sav_config, 27456809eb4eSEric Schrock ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 27466809eb4eSEric Schrock VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 27476809eb4eSEric Schrock ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 27486809eb4eSEric Schrock } 2749c5904d13Seschrock 2750c5904d13Seschrock ASSERT(c < naux); 2751c5904d13Seschrock 2752c5904d13Seschrock /* 2753c5904d13Seschrock * Setting the nvlist in the middle if the array is a little 2754c5904d13Seschrock * sketchy, but it will work. 2755c5904d13Seschrock */ 2756c5904d13Seschrock nvlist_free(aux[c]); 27573f9d6ad7SLin Ling aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0); 2758c5904d13Seschrock 2759c5904d13Seschrock return; 2760c5904d13Seschrock } 2761c5904d13Seschrock 27625dabedeeSbonwick /* 2763e14bb325SJeff Bonwick * The dirty list is protected by the SCL_CONFIG lock. The caller 2764e14bb325SJeff Bonwick * must either hold SCL_CONFIG as writer, or must be the sync thread 2765e14bb325SJeff Bonwick * (which holds SCL_CONFIG as reader). There's only one sync thread, 27665dabedeeSbonwick * so this is sufficient to ensure mutual exclusion. 27675dabedeeSbonwick */ 2768e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 2769e14bb325SJeff Bonwick (dsl_pool_sync_context(spa_get_dsl(spa)) && 2770e14bb325SJeff Bonwick spa_config_held(spa, SCL_CONFIG, RW_READER))); 27715dabedeeSbonwick 2772fa9e4066Sahrens if (vd == rvd) { 2773fa9e4066Sahrens for (c = 0; c < rvd->vdev_children; c++) 2774fa9e4066Sahrens vdev_config_dirty(rvd->vdev_child[c]); 2775fa9e4066Sahrens } else { 2776fa9e4066Sahrens ASSERT(vd == vd->vdev_top); 2777fa9e4066Sahrens 277888ecc943SGeorge Wilson if (!list_link_active(&vd->vdev_config_dirty_node) && 277988ecc943SGeorge Wilson !vd->vdev_ishole) 2780e14bb325SJeff Bonwick list_insert_head(&spa->spa_config_dirty_list, vd); 2781fa9e4066Sahrens } 2782fa9e4066Sahrens } 2783fa9e4066Sahrens 2784fa9e4066Sahrens void 2785fa9e4066Sahrens vdev_config_clean(vdev_t *vd) 2786fa9e4066Sahrens { 27875dabedeeSbonwick spa_t *spa = vd->vdev_spa; 27885dabedeeSbonwick 2789e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 2790e14bb325SJeff Bonwick (dsl_pool_sync_context(spa_get_dsl(spa)) && 2791e14bb325SJeff Bonwick spa_config_held(spa, SCL_CONFIG, RW_READER))); 27925dabedeeSbonwick 2793e14bb325SJeff Bonwick ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 2794e14bb325SJeff Bonwick list_remove(&spa->spa_config_dirty_list, vd); 2795e14bb325SJeff Bonwick } 2796e14bb325SJeff Bonwick 2797e14bb325SJeff Bonwick /* 2798e14bb325SJeff Bonwick * Mark a top-level vdev's state as dirty, so that the next pass of 2799e14bb325SJeff Bonwick * spa_sync() can convert this into vdev_config_dirty(). We distinguish 2800e14bb325SJeff Bonwick * the state changes from larger config changes because they require 2801e14bb325SJeff Bonwick * much less locking, and are often needed for administrative actions. 2802e14bb325SJeff Bonwick */ 2803e14bb325SJeff Bonwick void 2804e14bb325SJeff Bonwick vdev_state_dirty(vdev_t *vd) 2805e14bb325SJeff Bonwick { 2806e14bb325SJeff Bonwick spa_t *spa = vd->vdev_spa; 2807e14bb325SJeff Bonwick 2808f9af39baSGeorge Wilson ASSERT(spa_writeable(spa)); 2809e14bb325SJeff Bonwick ASSERT(vd == vd->vdev_top); 2810e14bb325SJeff Bonwick 2811e14bb325SJeff Bonwick /* 2812e14bb325SJeff Bonwick * The state list is protected by the SCL_STATE lock. The caller 2813e14bb325SJeff Bonwick * must either hold SCL_STATE as writer, or must be the sync thread 2814e14bb325SJeff Bonwick * (which holds SCL_STATE as reader). There's only one sync thread, 2815e14bb325SJeff Bonwick * so this is sufficient to ensure mutual exclusion. 2816e14bb325SJeff Bonwick */ 2817e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 2818e14bb325SJeff Bonwick (dsl_pool_sync_context(spa_get_dsl(spa)) && 2819e14bb325SJeff Bonwick spa_config_held(spa, SCL_STATE, RW_READER))); 2820e14bb325SJeff Bonwick 2821b24ab676SJeff Bonwick if (!list_link_active(&vd->vdev_state_dirty_node) && !vd->vdev_ishole) 2822e14bb325SJeff Bonwick list_insert_head(&spa->spa_state_dirty_list, vd); 2823e14bb325SJeff Bonwick } 2824e14bb325SJeff Bonwick 2825e14bb325SJeff Bonwick void 2826e14bb325SJeff Bonwick vdev_state_clean(vdev_t *vd) 2827e14bb325SJeff Bonwick { 2828e14bb325SJeff Bonwick spa_t *spa = vd->vdev_spa; 2829e14bb325SJeff Bonwick 2830e14bb325SJeff Bonwick ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 2831e14bb325SJeff Bonwick (dsl_pool_sync_context(spa_get_dsl(spa)) && 2832e14bb325SJeff Bonwick spa_config_held(spa, SCL_STATE, RW_READER))); 2833e14bb325SJeff Bonwick 2834e14bb325SJeff Bonwick ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 2835e14bb325SJeff Bonwick list_remove(&spa->spa_state_dirty_list, vd); 2836fa9e4066Sahrens } 2837fa9e4066Sahrens 283832b87932Sek /* 283932b87932Sek * Propagate vdev state up from children to parent. 284032b87932Sek */ 284144cd46caSbillm void 284244cd46caSbillm vdev_propagate_state(vdev_t *vd) 284344cd46caSbillm { 28448ad4d6ddSJeff Bonwick spa_t *spa = vd->vdev_spa; 28458ad4d6ddSJeff Bonwick vdev_t *rvd = spa->spa_root_vdev; 284644cd46caSbillm int degraded = 0, faulted = 0; 284744cd46caSbillm int corrupted = 0; 284844cd46caSbillm vdev_t *child; 284944cd46caSbillm 28503d7072f8Seschrock if (vd->vdev_children > 0) { 2851573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) { 28523d7072f8Seschrock child = vd->vdev_child[c]; 285351ece835Seschrock 285488ecc943SGeorge Wilson /* 285588ecc943SGeorge Wilson * Don't factor holes into the decision. 285688ecc943SGeorge Wilson */ 285788ecc943SGeorge Wilson if (child->vdev_ishole) 285888ecc943SGeorge Wilson continue; 285988ecc943SGeorge Wilson 2860e14bb325SJeff Bonwick if (!vdev_readable(child) || 28618ad4d6ddSJeff Bonwick (!vdev_writeable(child) && spa_writeable(spa))) { 286251ece835Seschrock /* 286351ece835Seschrock * Root special: if there is a top-level log 286451ece835Seschrock * device, treat the root vdev as if it were 286551ece835Seschrock * degraded. 286651ece835Seschrock */ 286751ece835Seschrock if (child->vdev_islog && vd == rvd) 286851ece835Seschrock degraded++; 286951ece835Seschrock else 287051ece835Seschrock faulted++; 287151ece835Seschrock } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 28723d7072f8Seschrock degraded++; 287351ece835Seschrock } 287444cd46caSbillm 28753d7072f8Seschrock if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 28763d7072f8Seschrock corrupted++; 28773d7072f8Seschrock } 287844cd46caSbillm 28793d7072f8Seschrock vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 28803d7072f8Seschrock 28813d7072f8Seschrock /* 2882e14bb325SJeff Bonwick * Root special: if there is a top-level vdev that cannot be 28833d7072f8Seschrock * opened due to corrupted metadata, then propagate the root 28843d7072f8Seschrock * vdev's aux state as 'corrupt' rather than 'insufficient 28853d7072f8Seschrock * replicas'. 28863d7072f8Seschrock */ 28873d7072f8Seschrock if (corrupted && vd == rvd && 28883d7072f8Seschrock rvd->vdev_state == VDEV_STATE_CANT_OPEN) 28893d7072f8Seschrock vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 28903d7072f8Seschrock VDEV_AUX_CORRUPT_DATA); 28913d7072f8Seschrock } 28923d7072f8Seschrock 289351ece835Seschrock if (vd->vdev_parent) 28943d7072f8Seschrock vdev_propagate_state(vd->vdev_parent); 289544cd46caSbillm } 289644cd46caSbillm 2897fa9e4066Sahrens /* 2898ea8dc4b6Seschrock * Set a vdev's state. If this is during an open, we don't update the parent 2899ea8dc4b6Seschrock * state, because we're in the process of opening children depth-first. 2900ea8dc4b6Seschrock * Otherwise, we propagate the change to the parent. 2901ea8dc4b6Seschrock * 2902ea8dc4b6Seschrock * If this routine places a device in a faulted state, an appropriate ereport is 2903ea8dc4b6Seschrock * generated. 2904fa9e4066Sahrens */ 2905fa9e4066Sahrens void 2906ea8dc4b6Seschrock vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 2907fa9e4066Sahrens { 2908560e6e96Seschrock uint64_t save_state; 2909c5904d13Seschrock spa_t *spa = vd->vdev_spa; 2910ea8dc4b6Seschrock 2911ea8dc4b6Seschrock if (state == vd->vdev_state) { 2912ea8dc4b6Seschrock vd->vdev_stat.vs_aux = aux; 2913fa9e4066Sahrens return; 2914ea8dc4b6Seschrock } 2915ea8dc4b6Seschrock 2916560e6e96Seschrock save_state = vd->vdev_state; 2917fa9e4066Sahrens 2918fa9e4066Sahrens vd->vdev_state = state; 2919fa9e4066Sahrens vd->vdev_stat.vs_aux = aux; 2920fa9e4066Sahrens 29213d7072f8Seschrock /* 29223d7072f8Seschrock * If we are setting the vdev state to anything but an open state, then 292398d1cbfeSGeorge Wilson * always close the underlying device unless the device has requested 292498d1cbfeSGeorge Wilson * a delayed close (i.e. we're about to remove or fault the device). 292598d1cbfeSGeorge Wilson * Otherwise, we keep accessible but invalid devices open forever. 292698d1cbfeSGeorge Wilson * We don't call vdev_close() itself, because that implies some extra 292798d1cbfeSGeorge Wilson * checks (offline, etc) that we don't want here. This is limited to 292898d1cbfeSGeorge Wilson * leaf devices, because otherwise closing the device will affect other 292998d1cbfeSGeorge Wilson * children. 293098d1cbfeSGeorge Wilson */ 293198d1cbfeSGeorge Wilson if (!vd->vdev_delayed_close && vdev_is_dead(vd) && 293298d1cbfeSGeorge Wilson vd->vdev_ops->vdev_op_leaf) 29333d7072f8Seschrock vd->vdev_ops->vdev_op_close(vd); 29343d7072f8Seschrock 2935069f55e2SEric Schrock /* 2936069f55e2SEric Schrock * If we have brought this vdev back into service, we need 2937069f55e2SEric Schrock * to notify fmd so that it can gracefully repair any outstanding 2938069f55e2SEric Schrock * cases due to a missing device. We do this in all cases, even those 2939069f55e2SEric Schrock * that probably don't correlate to a repaired fault. This is sure to 2940069f55e2SEric Schrock * catch all cases, and we let the zfs-retire agent sort it out. If 2941069f55e2SEric Schrock * this is a transient state it's OK, as the retire agent will 2942069f55e2SEric Schrock * double-check the state of the vdev before repairing it. 2943069f55e2SEric Schrock */ 2944069f55e2SEric Schrock if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf && 2945069f55e2SEric Schrock vd->vdev_prevstate != state) 2946069f55e2SEric Schrock zfs_post_state_change(spa, vd); 2947069f55e2SEric Schrock 29483d7072f8Seschrock if (vd->vdev_removed && 29493d7072f8Seschrock state == VDEV_STATE_CANT_OPEN && 29503d7072f8Seschrock (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 29513d7072f8Seschrock /* 29523d7072f8Seschrock * If the previous state is set to VDEV_STATE_REMOVED, then this 29533d7072f8Seschrock * device was previously marked removed and someone attempted to 29543d7072f8Seschrock * reopen it. If this failed due to a nonexistent device, then 29553d7072f8Seschrock * keep the device in the REMOVED state. We also let this be if 29563d7072f8Seschrock * it is one of our special test online cases, which is only 29573d7072f8Seschrock * attempting to online the device and shouldn't generate an FMA 29583d7072f8Seschrock * fault. 29593d7072f8Seschrock */ 29603d7072f8Seschrock vd->vdev_state = VDEV_STATE_REMOVED; 29613d7072f8Seschrock vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 29623d7072f8Seschrock } else if (state == VDEV_STATE_REMOVED) { 29633d7072f8Seschrock vd->vdev_removed = B_TRUE; 29643d7072f8Seschrock } else if (state == VDEV_STATE_CANT_OPEN) { 2965ea8dc4b6Seschrock /* 2966cb04b873SMark J Musante * If we fail to open a vdev during an import or recovery, we 2967cb04b873SMark J Musante * mark it as "not available", which signifies that it was 2968cb04b873SMark J Musante * never there to begin with. Failure to open such a device 2969cb04b873SMark J Musante * is not considered an error. 2970ea8dc4b6Seschrock */ 2971cb04b873SMark J Musante if ((spa_load_state(spa) == SPA_LOAD_IMPORT || 2972cb04b873SMark J Musante spa_load_state(spa) == SPA_LOAD_RECOVER) && 2973560e6e96Seschrock vd->vdev_ops->vdev_op_leaf) 2974560e6e96Seschrock vd->vdev_not_present = 1; 2975560e6e96Seschrock 2976560e6e96Seschrock /* 2977560e6e96Seschrock * Post the appropriate ereport. If the 'prevstate' field is 2978560e6e96Seschrock * set to something other than VDEV_STATE_UNKNOWN, it indicates 2979560e6e96Seschrock * that this is part of a vdev_reopen(). In this case, we don't 2980560e6e96Seschrock * want to post the ereport if the device was already in the 2981560e6e96Seschrock * CANT_OPEN state beforehand. 29823d7072f8Seschrock * 29833d7072f8Seschrock * If the 'checkremove' flag is set, then this is an attempt to 29843d7072f8Seschrock * online the device in response to an insertion event. If we 29853d7072f8Seschrock * hit this case, then we have detected an insertion event for a 29863d7072f8Seschrock * faulted or offline device that wasn't in the removed state. 29873d7072f8Seschrock * In this scenario, we don't post an ereport because we are 29883d7072f8Seschrock * about to replace the device, or attempt an online with 29893d7072f8Seschrock * vdev_forcefault, which will generate the fault for us. 2990560e6e96Seschrock */ 29913d7072f8Seschrock if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 29923d7072f8Seschrock !vd->vdev_not_present && !vd->vdev_checkremove && 2993c5904d13Seschrock vd != spa->spa_root_vdev) { 2994ea8dc4b6Seschrock const char *class; 2995ea8dc4b6Seschrock 2996ea8dc4b6Seschrock switch (aux) { 2997ea8dc4b6Seschrock case VDEV_AUX_OPEN_FAILED: 2998ea8dc4b6Seschrock class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 2999ea8dc4b6Seschrock break; 3000ea8dc4b6Seschrock case VDEV_AUX_CORRUPT_DATA: 3001ea8dc4b6Seschrock class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 3002ea8dc4b6Seschrock break; 3003ea8dc4b6Seschrock case VDEV_AUX_NO_REPLICAS: 3004ea8dc4b6Seschrock class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 3005ea8dc4b6Seschrock break; 3006ea8dc4b6Seschrock case VDEV_AUX_BAD_GUID_SUM: 3007ea8dc4b6Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 3008ea8dc4b6Seschrock break; 3009ea8dc4b6Seschrock case VDEV_AUX_TOO_SMALL: 3010ea8dc4b6Seschrock class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 3011ea8dc4b6Seschrock break; 3012ea8dc4b6Seschrock case VDEV_AUX_BAD_LABEL: 3013ea8dc4b6Seschrock class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 3014ea8dc4b6Seschrock break; 3015ea8dc4b6Seschrock default: 3016ea8dc4b6Seschrock class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 3017ea8dc4b6Seschrock } 3018ea8dc4b6Seschrock 3019c5904d13Seschrock zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 3020ea8dc4b6Seschrock } 3021ea8dc4b6Seschrock 30223d7072f8Seschrock /* Erase any notion of persistent removed state */ 30233d7072f8Seschrock vd->vdev_removed = B_FALSE; 30243d7072f8Seschrock } else { 30253d7072f8Seschrock vd->vdev_removed = B_FALSE; 30263d7072f8Seschrock } 3027ea8dc4b6Seschrock 30288b33d774STim Haley if (!isopen && vd->vdev_parent) 30298b33d774STim Haley vdev_propagate_state(vd->vdev_parent); 3030fa9e4066Sahrens } 303115e6edf1Sgw 303215e6edf1Sgw /* 303315e6edf1Sgw * Check the vdev configuration to ensure that it's capable of supporting 303415e6edf1Sgw * a root pool. Currently, we do not support RAID-Z or partial configuration. 303515e6edf1Sgw * In addition, only a single top-level vdev is allowed and none of the leaves 303615e6edf1Sgw * can be wholedisks. 303715e6edf1Sgw */ 303815e6edf1Sgw boolean_t 303915e6edf1Sgw vdev_is_bootable(vdev_t *vd) 304015e6edf1Sgw { 304115e6edf1Sgw if (!vd->vdev_ops->vdev_op_leaf) { 304215e6edf1Sgw char *vdev_type = vd->vdev_ops->vdev_op_type; 304315e6edf1Sgw 304415e6edf1Sgw if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 304515e6edf1Sgw vd->vdev_children > 1) { 304615e6edf1Sgw return (B_FALSE); 304715e6edf1Sgw } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 || 304815e6edf1Sgw strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { 304915e6edf1Sgw return (B_FALSE); 305015e6edf1Sgw } 305115e6edf1Sgw } else if (vd->vdev_wholedisk == 1) { 305215e6edf1Sgw return (B_FALSE); 305315e6edf1Sgw } 305415e6edf1Sgw 3055573ca77eSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) { 305615e6edf1Sgw if (!vdev_is_bootable(vd->vdev_child[c])) 305715e6edf1Sgw return (B_FALSE); 305815e6edf1Sgw } 305915e6edf1Sgw return (B_TRUE); 306015e6edf1Sgw } 3061e6ca193dSGeorge Wilson 306288ecc943SGeorge Wilson /* 306388ecc943SGeorge Wilson * Load the state from the original vdev tree (ovd) which 306488ecc943SGeorge Wilson * we've retrieved from the MOS config object. If the original 30654b964adaSGeorge Wilson * vdev was offline or faulted then we transfer that state to the 30664b964adaSGeorge Wilson * device in the current vdev tree (nvd). 306788ecc943SGeorge Wilson */ 3068e6ca193dSGeorge Wilson void 306988ecc943SGeorge Wilson vdev_load_log_state(vdev_t *nvd, vdev_t *ovd) 3070e6ca193dSGeorge Wilson { 307188ecc943SGeorge Wilson spa_t *spa = nvd->vdev_spa; 3072e6ca193dSGeorge Wilson 30734b964adaSGeorge Wilson ASSERT(nvd->vdev_top->vdev_islog); 307488ecc943SGeorge Wilson ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 307588ecc943SGeorge Wilson ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid); 3076e6ca193dSGeorge Wilson 307788ecc943SGeorge Wilson for (int c = 0; c < nvd->vdev_children; c++) 307888ecc943SGeorge Wilson vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]); 3079e6ca193dSGeorge Wilson 30804b964adaSGeorge Wilson if (nvd->vdev_ops->vdev_op_leaf) { 3081e6ca193dSGeorge Wilson /* 30824b964adaSGeorge Wilson * Restore the persistent vdev state 3083e6ca193dSGeorge Wilson */ 308488ecc943SGeorge Wilson nvd->vdev_offline = ovd->vdev_offline; 30854b964adaSGeorge Wilson nvd->vdev_faulted = ovd->vdev_faulted; 30864b964adaSGeorge Wilson nvd->vdev_degraded = ovd->vdev_degraded; 30874b964adaSGeorge Wilson nvd->vdev_removed = ovd->vdev_removed; 3088e6ca193dSGeorge Wilson } 3089e6ca193dSGeorge Wilson } 3090573ca77eSGeorge Wilson 30914b964adaSGeorge Wilson /* 30924b964adaSGeorge Wilson * Determine if a log device has valid content. If the vdev was 30934b964adaSGeorge Wilson * removed or faulted in the MOS config then we know that 30944b964adaSGeorge Wilson * the content on the log device has already been written to the pool. 30954b964adaSGeorge Wilson */ 30964b964adaSGeorge Wilson boolean_t 30974b964adaSGeorge Wilson vdev_log_state_valid(vdev_t *vd) 30984b964adaSGeorge Wilson { 30994b964adaSGeorge Wilson if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted && 31004b964adaSGeorge Wilson !vd->vdev_removed) 31014b964adaSGeorge Wilson return (B_TRUE); 31024b964adaSGeorge Wilson 31034b964adaSGeorge Wilson for (int c = 0; c < vd->vdev_children; c++) 31044b964adaSGeorge Wilson if (vdev_log_state_valid(vd->vdev_child[c])) 31054b964adaSGeorge Wilson return (B_TRUE); 31064b964adaSGeorge Wilson 31074b964adaSGeorge Wilson return (B_FALSE); 31084b964adaSGeorge Wilson } 31094b964adaSGeorge Wilson 3110573ca77eSGeorge Wilson /* 3111573ca77eSGeorge Wilson * Expand a vdev if possible. 3112573ca77eSGeorge Wilson */ 3113573ca77eSGeorge Wilson void 3114573ca77eSGeorge Wilson vdev_expand(vdev_t *vd, uint64_t txg) 3115573ca77eSGeorge Wilson { 3116573ca77eSGeorge Wilson ASSERT(vd->vdev_top == vd); 3117573ca77eSGeorge Wilson ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3118573ca77eSGeorge Wilson 3119573ca77eSGeorge Wilson if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) { 3120573ca77eSGeorge Wilson VERIFY(vdev_metaslab_init(vd, txg) == 0); 3121573ca77eSGeorge Wilson vdev_config_dirty(vd); 3122573ca77eSGeorge Wilson } 3123573ca77eSGeorge Wilson } 31241195e687SMark J Musante 31251195e687SMark J Musante /* 31261195e687SMark J Musante * Split a vdev. 31271195e687SMark J Musante */ 31281195e687SMark J Musante void 31291195e687SMark J Musante vdev_split(vdev_t *vd) 31301195e687SMark J Musante { 31311195e687SMark J Musante vdev_t *cvd, *pvd = vd->vdev_parent; 31321195e687SMark J Musante 31331195e687SMark J Musante vdev_remove_child(pvd, vd); 31341195e687SMark J Musante vdev_compact_children(pvd); 31351195e687SMark J Musante 31361195e687SMark J Musante cvd = pvd->vdev_child[0]; 31371195e687SMark J Musante if (pvd->vdev_children == 1) { 31381195e687SMark J Musante vdev_remove_parent(cvd); 31391195e687SMark J Musante cvd->vdev_splitting = B_TRUE; 31401195e687SMark J Musante } 31411195e687SMark J Musante vdev_propagate_state(cvd); 31421195e687SMark J Musante } 3143