1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/zfs_context.h> 28 #include <sys/fm/fs/zfs.h> 29 #include <sys/spa.h> 30 #include <sys/spa_impl.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/vdev_impl.h> 34 #include <sys/uberblock_impl.h> 35 #include <sys/metaslab.h> 36 #include <sys/metaslab_impl.h> 37 #include <sys/space_map.h> 38 #include <sys/zio.h> 39 #include <sys/zap.h> 40 #include <sys/fs/zfs.h> 41 #include <sys/arc.h> 42 #include <sys/zil.h> 43 44 /* 45 * Virtual device management. 46 */ 47 48 static vdev_ops_t *vdev_ops_table[] = { 49 &vdev_root_ops, 50 &vdev_raidz_ops, 51 &vdev_mirror_ops, 52 &vdev_replacing_ops, 53 &vdev_spare_ops, 54 &vdev_disk_ops, 55 &vdev_file_ops, 56 &vdev_missing_ops, 57 NULL 58 }; 59 60 /* maximum scrub/resilver I/O queue per leaf vdev */ 61 int zfs_scrub_limit = 10; 62 63 /* 64 * Given a vdev type, return the appropriate ops vector. 65 */ 66 static vdev_ops_t * 67 vdev_getops(const char *type) 68 { 69 vdev_ops_t *ops, **opspp; 70 71 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 72 if (strcmp(ops->vdev_op_type, type) == 0) 73 break; 74 75 return (ops); 76 } 77 78 /* 79 * Default asize function: return the MAX of psize with the asize of 80 * all children. This is what's used by anything other than RAID-Z. 81 */ 82 uint64_t 83 vdev_default_asize(vdev_t *vd, uint64_t psize) 84 { 85 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 86 uint64_t csize; 87 88 for (int c = 0; c < vd->vdev_children; c++) { 89 csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 90 asize = MAX(asize, csize); 91 } 92 93 return (asize); 94 } 95 96 /* 97 * Get the minimum allocatable size. We define the allocatable size as 98 * the vdev's asize rounded to the nearest metaslab. This allows us to 99 * replace or attach devices which don't have the same physical size but 100 * can still satisfy the same number of allocations. 101 */ 102 uint64_t 103 vdev_get_min_asize(vdev_t *vd) 104 { 105 vdev_t *pvd = vd->vdev_parent; 106 107 /* 108 * The our parent is NULL (inactive spare or cache) or is the root, 109 * just return our own asize. 110 */ 111 if (pvd == NULL) 112 return (vd->vdev_asize); 113 114 /* 115 * The top-level vdev just returns the allocatable size rounded 116 * to the nearest metaslab. 117 */ 118 if (vd == vd->vdev_top) 119 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); 120 121 /* 122 * The allocatable space for a raidz vdev is N * sizeof(smallest child), 123 * so each child must provide at least 1/Nth of its asize. 124 */ 125 if (pvd->vdev_ops == &vdev_raidz_ops) 126 return (pvd->vdev_min_asize / pvd->vdev_children); 127 128 return (pvd->vdev_min_asize); 129 } 130 131 void 132 vdev_set_min_asize(vdev_t *vd) 133 { 134 vd->vdev_min_asize = vdev_get_min_asize(vd); 135 136 for (int c = 0; c < vd->vdev_children; c++) 137 vdev_set_min_asize(vd->vdev_child[c]); 138 } 139 140 vdev_t * 141 vdev_lookup_top(spa_t *spa, uint64_t vdev) 142 { 143 vdev_t *rvd = spa->spa_root_vdev; 144 145 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 146 147 if (vdev < rvd->vdev_children) { 148 ASSERT(rvd->vdev_child[vdev] != NULL); 149 return (rvd->vdev_child[vdev]); 150 } 151 152 return (NULL); 153 } 154 155 vdev_t * 156 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 157 { 158 vdev_t *mvd; 159 160 if (vd->vdev_guid == guid) 161 return (vd); 162 163 for (int c = 0; c < vd->vdev_children; c++) 164 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 165 NULL) 166 return (mvd); 167 168 return (NULL); 169 } 170 171 void 172 vdev_add_child(vdev_t *pvd, vdev_t *cvd) 173 { 174 size_t oldsize, newsize; 175 uint64_t id = cvd->vdev_id; 176 vdev_t **newchild; 177 178 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 179 ASSERT(cvd->vdev_parent == NULL); 180 181 cvd->vdev_parent = pvd; 182 183 if (pvd == NULL) 184 return; 185 186 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 187 188 oldsize = pvd->vdev_children * sizeof (vdev_t *); 189 pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 190 newsize = pvd->vdev_children * sizeof (vdev_t *); 191 192 newchild = kmem_zalloc(newsize, KM_SLEEP); 193 if (pvd->vdev_child != NULL) { 194 bcopy(pvd->vdev_child, newchild, oldsize); 195 kmem_free(pvd->vdev_child, oldsize); 196 } 197 198 pvd->vdev_child = newchild; 199 pvd->vdev_child[id] = cvd; 200 201 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 202 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 203 204 /* 205 * Walk up all ancestors to update guid sum. 206 */ 207 for (; pvd != NULL; pvd = pvd->vdev_parent) 208 pvd->vdev_guid_sum += cvd->vdev_guid_sum; 209 210 if (cvd->vdev_ops->vdev_op_leaf) 211 cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit; 212 } 213 214 void 215 vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 216 { 217 int c; 218 uint_t id = cvd->vdev_id; 219 220 ASSERT(cvd->vdev_parent == pvd); 221 222 if (pvd == NULL) 223 return; 224 225 ASSERT(id < pvd->vdev_children); 226 ASSERT(pvd->vdev_child[id] == cvd); 227 228 pvd->vdev_child[id] = NULL; 229 cvd->vdev_parent = NULL; 230 231 for (c = 0; c < pvd->vdev_children; c++) 232 if (pvd->vdev_child[c]) 233 break; 234 235 if (c == pvd->vdev_children) { 236 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 237 pvd->vdev_child = NULL; 238 pvd->vdev_children = 0; 239 } 240 241 /* 242 * Walk up all ancestors to update guid sum. 243 */ 244 for (; pvd != NULL; pvd = pvd->vdev_parent) 245 pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 246 247 if (cvd->vdev_ops->vdev_op_leaf) 248 cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit; 249 } 250 251 /* 252 * Remove any holes in the child array. 253 */ 254 void 255 vdev_compact_children(vdev_t *pvd) 256 { 257 vdev_t **newchild, *cvd; 258 int oldc = pvd->vdev_children; 259 int newc; 260 261 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 262 263 for (int c = newc = 0; c < oldc; c++) 264 if (pvd->vdev_child[c]) 265 newc++; 266 267 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 268 269 for (int c = newc = 0; c < oldc; c++) { 270 if ((cvd = pvd->vdev_child[c]) != NULL) { 271 newchild[newc] = cvd; 272 cvd->vdev_id = newc++; 273 } 274 } 275 276 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 277 pvd->vdev_child = newchild; 278 pvd->vdev_children = newc; 279 } 280 281 /* 282 * Allocate and minimally initialize a vdev_t. 283 */ 284 static vdev_t * 285 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 286 { 287 vdev_t *vd; 288 289 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 290 291 if (spa->spa_root_vdev == NULL) { 292 ASSERT(ops == &vdev_root_ops); 293 spa->spa_root_vdev = vd; 294 } 295 296 if (guid == 0) { 297 if (spa->spa_root_vdev == vd) { 298 /* 299 * The root vdev's guid will also be the pool guid, 300 * which must be unique among all pools. 301 */ 302 while (guid == 0 || spa_guid_exists(guid, 0)) 303 guid = spa_get_random(-1ULL); 304 } else { 305 /* 306 * Any other vdev's guid must be unique within the pool. 307 */ 308 while (guid == 0 || 309 spa_guid_exists(spa_guid(spa), guid)) 310 guid = spa_get_random(-1ULL); 311 } 312 ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 313 } 314 315 vd->vdev_spa = spa; 316 vd->vdev_id = id; 317 vd->vdev_guid = guid; 318 vd->vdev_guid_sum = guid; 319 vd->vdev_ops = ops; 320 vd->vdev_state = VDEV_STATE_CLOSED; 321 322 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 323 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 324 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 325 for (int t = 0; t < DTL_TYPES; t++) { 326 space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, 327 &vd->vdev_dtl_lock); 328 } 329 txg_list_create(&vd->vdev_ms_list, 330 offsetof(struct metaslab, ms_txg_node)); 331 txg_list_create(&vd->vdev_dtl_list, 332 offsetof(struct vdev, vdev_dtl_node)); 333 vd->vdev_stat.vs_timestamp = gethrtime(); 334 vdev_queue_init(vd); 335 vdev_cache_init(vd); 336 337 return (vd); 338 } 339 340 /* 341 * Allocate a new vdev. The 'alloctype' is used to control whether we are 342 * creating a new vdev or loading an existing one - the behavior is slightly 343 * different for each case. 344 */ 345 int 346 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 347 int alloctype) 348 { 349 vdev_ops_t *ops; 350 char *type; 351 uint64_t guid = 0, islog, nparity; 352 vdev_t *vd; 353 354 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 355 356 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 357 return (EINVAL); 358 359 if ((ops = vdev_getops(type)) == NULL) 360 return (EINVAL); 361 362 /* 363 * If this is a load, get the vdev guid from the nvlist. 364 * Otherwise, vdev_alloc_common() will generate one for us. 365 */ 366 if (alloctype == VDEV_ALLOC_LOAD) { 367 uint64_t label_id; 368 369 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 370 label_id != id) 371 return (EINVAL); 372 373 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 374 return (EINVAL); 375 } else if (alloctype == VDEV_ALLOC_SPARE) { 376 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 377 return (EINVAL); 378 } else if (alloctype == VDEV_ALLOC_L2CACHE) { 379 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 380 return (EINVAL); 381 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 382 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 383 return (EINVAL); 384 } 385 386 /* 387 * The first allocated vdev must be of type 'root'. 388 */ 389 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 390 return (EINVAL); 391 392 /* 393 * Determine whether we're a log vdev. 394 */ 395 islog = 0; 396 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 397 if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 398 return (ENOTSUP); 399 400 /* 401 * Set the nparity property for RAID-Z vdevs. 402 */ 403 nparity = -1ULL; 404 if (ops == &vdev_raidz_ops) { 405 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 406 &nparity) == 0) { 407 /* 408 * Currently, we can only support 3 parity devices. 409 */ 410 if (nparity == 0 || nparity > 3) 411 return (EINVAL); 412 /* 413 * Previous versions could only support 1 or 2 parity 414 * device. 415 */ 416 if (nparity > 1 && 417 spa_version(spa) < SPA_VERSION_RAIDZ2) 418 return (ENOTSUP); 419 if (nparity > 2 && 420 spa_version(spa) < SPA_VERSION_RAIDZ3) 421 return (ENOTSUP); 422 } else { 423 /* 424 * We require the parity to be specified for SPAs that 425 * support multiple parity levels. 426 */ 427 if (spa_version(spa) >= SPA_VERSION_RAIDZ2) 428 return (EINVAL); 429 /* 430 * Otherwise, we default to 1 parity device for RAID-Z. 431 */ 432 nparity = 1; 433 } 434 } else { 435 nparity = 0; 436 } 437 ASSERT(nparity != -1ULL); 438 439 vd = vdev_alloc_common(spa, id, guid, ops); 440 441 vd->vdev_islog = islog; 442 vd->vdev_nparity = nparity; 443 444 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 445 vd->vdev_path = spa_strdup(vd->vdev_path); 446 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 447 vd->vdev_devid = spa_strdup(vd->vdev_devid); 448 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 449 &vd->vdev_physpath) == 0) 450 vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 451 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 452 vd->vdev_fru = spa_strdup(vd->vdev_fru); 453 454 /* 455 * Set the whole_disk property. If it's not specified, leave the value 456 * as -1. 457 */ 458 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 459 &vd->vdev_wholedisk) != 0) 460 vd->vdev_wholedisk = -1ULL; 461 462 /* 463 * Look for the 'not present' flag. This will only be set if the device 464 * was not present at the time of import. 465 */ 466 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 467 &vd->vdev_not_present); 468 469 /* 470 * Get the alignment requirement. 471 */ 472 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 473 474 /* 475 * If we're a top-level vdev, try to load the allocation parameters. 476 */ 477 if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) { 478 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 479 &vd->vdev_ms_array); 480 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 481 &vd->vdev_ms_shift); 482 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 483 &vd->vdev_asize); 484 } 485 486 /* 487 * If we're a leaf vdev, try to load the DTL object and other state. 488 */ 489 if (vd->vdev_ops->vdev_op_leaf && 490 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 491 alloctype == VDEV_ALLOC_ROOTPOOL)) { 492 if (alloctype == VDEV_ALLOC_LOAD) { 493 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 494 &vd->vdev_dtl_smo.smo_object); 495 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 496 &vd->vdev_unspare); 497 } 498 499 if (alloctype == VDEV_ALLOC_ROOTPOOL) { 500 uint64_t spare = 0; 501 502 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 503 &spare) == 0 && spare) 504 spa_spare_add(vd); 505 } 506 507 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 508 &vd->vdev_offline); 509 510 /* 511 * When importing a pool, we want to ignore the persistent fault 512 * state, as the diagnosis made on another system may not be 513 * valid in the current context. 514 */ 515 if (spa->spa_load_state == SPA_LOAD_OPEN) { 516 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 517 &vd->vdev_faulted); 518 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 519 &vd->vdev_degraded); 520 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 521 &vd->vdev_removed); 522 } 523 } 524 525 /* 526 * Add ourselves to the parent's list of children. 527 */ 528 vdev_add_child(parent, vd); 529 530 *vdp = vd; 531 532 return (0); 533 } 534 535 void 536 vdev_free(vdev_t *vd) 537 { 538 spa_t *spa = vd->vdev_spa; 539 540 /* 541 * vdev_free() implies closing the vdev first. This is simpler than 542 * trying to ensure complicated semantics for all callers. 543 */ 544 vdev_close(vd); 545 546 ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 547 548 /* 549 * Free all children. 550 */ 551 for (int c = 0; c < vd->vdev_children; c++) 552 vdev_free(vd->vdev_child[c]); 553 554 ASSERT(vd->vdev_child == NULL); 555 ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 556 557 /* 558 * Discard allocation state. 559 */ 560 if (vd == vd->vdev_top) 561 vdev_metaslab_fini(vd); 562 563 ASSERT3U(vd->vdev_stat.vs_space, ==, 0); 564 ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); 565 ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); 566 567 /* 568 * Remove this vdev from its parent's child list. 569 */ 570 vdev_remove_child(vd->vdev_parent, vd); 571 572 ASSERT(vd->vdev_parent == NULL); 573 574 /* 575 * Clean up vdev structure. 576 */ 577 vdev_queue_fini(vd); 578 vdev_cache_fini(vd); 579 580 if (vd->vdev_path) 581 spa_strfree(vd->vdev_path); 582 if (vd->vdev_devid) 583 spa_strfree(vd->vdev_devid); 584 if (vd->vdev_physpath) 585 spa_strfree(vd->vdev_physpath); 586 if (vd->vdev_fru) 587 spa_strfree(vd->vdev_fru); 588 589 if (vd->vdev_isspare) 590 spa_spare_remove(vd); 591 if (vd->vdev_isl2cache) 592 spa_l2cache_remove(vd); 593 594 txg_list_destroy(&vd->vdev_ms_list); 595 txg_list_destroy(&vd->vdev_dtl_list); 596 597 mutex_enter(&vd->vdev_dtl_lock); 598 for (int t = 0; t < DTL_TYPES; t++) { 599 space_map_unload(&vd->vdev_dtl[t]); 600 space_map_destroy(&vd->vdev_dtl[t]); 601 } 602 mutex_exit(&vd->vdev_dtl_lock); 603 604 mutex_destroy(&vd->vdev_dtl_lock); 605 mutex_destroy(&vd->vdev_stat_lock); 606 mutex_destroy(&vd->vdev_probe_lock); 607 608 if (vd == spa->spa_root_vdev) 609 spa->spa_root_vdev = NULL; 610 611 kmem_free(vd, sizeof (vdev_t)); 612 } 613 614 /* 615 * Transfer top-level vdev state from svd to tvd. 616 */ 617 static void 618 vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 619 { 620 spa_t *spa = svd->vdev_spa; 621 metaslab_t *msp; 622 vdev_t *vd; 623 int t; 624 625 ASSERT(tvd == tvd->vdev_top); 626 627 tvd->vdev_ms_array = svd->vdev_ms_array; 628 tvd->vdev_ms_shift = svd->vdev_ms_shift; 629 tvd->vdev_ms_count = svd->vdev_ms_count; 630 631 svd->vdev_ms_array = 0; 632 svd->vdev_ms_shift = 0; 633 svd->vdev_ms_count = 0; 634 635 tvd->vdev_mg = svd->vdev_mg; 636 tvd->vdev_ms = svd->vdev_ms; 637 638 svd->vdev_mg = NULL; 639 svd->vdev_ms = NULL; 640 641 if (tvd->vdev_mg != NULL) 642 tvd->vdev_mg->mg_vd = tvd; 643 644 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 645 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 646 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 647 648 svd->vdev_stat.vs_alloc = 0; 649 svd->vdev_stat.vs_space = 0; 650 svd->vdev_stat.vs_dspace = 0; 651 652 for (t = 0; t < TXG_SIZE; t++) { 653 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 654 (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 655 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 656 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 657 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 658 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 659 } 660 661 if (list_link_active(&svd->vdev_config_dirty_node)) { 662 vdev_config_clean(svd); 663 vdev_config_dirty(tvd); 664 } 665 666 if (list_link_active(&svd->vdev_state_dirty_node)) { 667 vdev_state_clean(svd); 668 vdev_state_dirty(tvd); 669 } 670 671 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 672 svd->vdev_deflate_ratio = 0; 673 674 tvd->vdev_islog = svd->vdev_islog; 675 svd->vdev_islog = 0; 676 } 677 678 static void 679 vdev_top_update(vdev_t *tvd, vdev_t *vd) 680 { 681 if (vd == NULL) 682 return; 683 684 vd->vdev_top = tvd; 685 686 for (int c = 0; c < vd->vdev_children; c++) 687 vdev_top_update(tvd, vd->vdev_child[c]); 688 } 689 690 /* 691 * Add a mirror/replacing vdev above an existing vdev. 692 */ 693 vdev_t * 694 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 695 { 696 spa_t *spa = cvd->vdev_spa; 697 vdev_t *pvd = cvd->vdev_parent; 698 vdev_t *mvd; 699 700 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 701 702 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 703 704 mvd->vdev_asize = cvd->vdev_asize; 705 mvd->vdev_min_asize = cvd->vdev_min_asize; 706 mvd->vdev_ashift = cvd->vdev_ashift; 707 mvd->vdev_state = cvd->vdev_state; 708 709 vdev_remove_child(pvd, cvd); 710 vdev_add_child(pvd, mvd); 711 cvd->vdev_id = mvd->vdev_children; 712 vdev_add_child(mvd, cvd); 713 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 714 715 if (mvd == mvd->vdev_top) 716 vdev_top_transfer(cvd, mvd); 717 718 return (mvd); 719 } 720 721 /* 722 * Remove a 1-way mirror/replacing vdev from the tree. 723 */ 724 void 725 vdev_remove_parent(vdev_t *cvd) 726 { 727 vdev_t *mvd = cvd->vdev_parent; 728 vdev_t *pvd = mvd->vdev_parent; 729 730 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 731 732 ASSERT(mvd->vdev_children == 1); 733 ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 734 mvd->vdev_ops == &vdev_replacing_ops || 735 mvd->vdev_ops == &vdev_spare_ops); 736 cvd->vdev_ashift = mvd->vdev_ashift; 737 738 vdev_remove_child(mvd, cvd); 739 vdev_remove_child(pvd, mvd); 740 741 /* 742 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 743 * Otherwise, we could have detached an offline device, and when we 744 * go to import the pool we'll think we have two top-level vdevs, 745 * instead of a different version of the same top-level vdev. 746 */ 747 if (mvd->vdev_top == mvd) { 748 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 749 cvd->vdev_guid += guid_delta; 750 cvd->vdev_guid_sum += guid_delta; 751 } 752 cvd->vdev_id = mvd->vdev_id; 753 vdev_add_child(pvd, cvd); 754 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 755 756 if (cvd == cvd->vdev_top) 757 vdev_top_transfer(mvd, cvd); 758 759 ASSERT(mvd->vdev_children == 0); 760 vdev_free(mvd); 761 } 762 763 int 764 vdev_metaslab_init(vdev_t *vd, uint64_t txg) 765 { 766 spa_t *spa = vd->vdev_spa; 767 objset_t *mos = spa->spa_meta_objset; 768 metaslab_class_t *mc; 769 uint64_t m; 770 uint64_t oldc = vd->vdev_ms_count; 771 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 772 metaslab_t **mspp; 773 int error; 774 775 if (vd->vdev_ms_shift == 0) /* not being allocated from yet */ 776 return (0); 777 778 /* 779 * Compute the raidz-deflation ratio. Note, we hard-code 780 * in 128k (1 << 17) because it is the current "typical" blocksize. 781 * Even if SPA_MAXBLOCKSIZE changes, this algorithm must never change, 782 * or we will inconsistently account for existing bp's. 783 */ 784 vd->vdev_deflate_ratio = (1 << 17) / 785 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 786 787 ASSERT(oldc <= newc); 788 789 if (vd->vdev_islog) 790 mc = spa->spa_log_class; 791 else 792 mc = spa->spa_normal_class; 793 794 if (vd->vdev_mg == NULL) 795 vd->vdev_mg = metaslab_group_create(mc, vd); 796 797 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 798 799 if (oldc != 0) { 800 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 801 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 802 } 803 804 vd->vdev_ms = mspp; 805 vd->vdev_ms_count = newc; 806 807 for (m = oldc; m < newc; m++) { 808 space_map_obj_t smo = { 0, 0, 0 }; 809 if (txg == 0) { 810 uint64_t object = 0; 811 error = dmu_read(mos, vd->vdev_ms_array, 812 m * sizeof (uint64_t), sizeof (uint64_t), &object, 813 DMU_READ_PREFETCH); 814 if (error) 815 return (error); 816 if (object != 0) { 817 dmu_buf_t *db; 818 error = dmu_bonus_hold(mos, object, FTAG, &db); 819 if (error) 820 return (error); 821 ASSERT3U(db->db_size, >=, sizeof (smo)); 822 bcopy(db->db_data, &smo, sizeof (smo)); 823 ASSERT3U(smo.smo_object, ==, object); 824 dmu_buf_rele(db, FTAG); 825 } 826 } 827 vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo, 828 m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); 829 } 830 831 return (0); 832 } 833 834 void 835 vdev_metaslab_fini(vdev_t *vd) 836 { 837 uint64_t m; 838 uint64_t count = vd->vdev_ms_count; 839 840 if (vd->vdev_ms != NULL) { 841 for (m = 0; m < count; m++) 842 if (vd->vdev_ms[m] != NULL) 843 metaslab_fini(vd->vdev_ms[m]); 844 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 845 vd->vdev_ms = NULL; 846 } 847 } 848 849 typedef struct vdev_probe_stats { 850 boolean_t vps_readable; 851 boolean_t vps_writeable; 852 int vps_flags; 853 } vdev_probe_stats_t; 854 855 static void 856 vdev_probe_done(zio_t *zio) 857 { 858 spa_t *spa = zio->io_spa; 859 vdev_t *vd = zio->io_vd; 860 vdev_probe_stats_t *vps = zio->io_private; 861 862 ASSERT(vd->vdev_probe_zio != NULL); 863 864 if (zio->io_type == ZIO_TYPE_READ) { 865 if (zio->io_error == 0) 866 vps->vps_readable = 1; 867 if (zio->io_error == 0 && spa_writeable(spa)) { 868 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 869 zio->io_offset, zio->io_size, zio->io_data, 870 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 871 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 872 } else { 873 zio_buf_free(zio->io_data, zio->io_size); 874 } 875 } else if (zio->io_type == ZIO_TYPE_WRITE) { 876 if (zio->io_error == 0) 877 vps->vps_writeable = 1; 878 zio_buf_free(zio->io_data, zio->io_size); 879 } else if (zio->io_type == ZIO_TYPE_NULL) { 880 zio_t *pio; 881 882 vd->vdev_cant_read |= !vps->vps_readable; 883 vd->vdev_cant_write |= !vps->vps_writeable; 884 885 if (vdev_readable(vd) && 886 (vdev_writeable(vd) || !spa_writeable(spa))) { 887 zio->io_error = 0; 888 } else { 889 ASSERT(zio->io_error != 0); 890 zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 891 spa, vd, NULL, 0, 0); 892 zio->io_error = ENXIO; 893 } 894 895 mutex_enter(&vd->vdev_probe_lock); 896 ASSERT(vd->vdev_probe_zio == zio); 897 vd->vdev_probe_zio = NULL; 898 mutex_exit(&vd->vdev_probe_lock); 899 900 while ((pio = zio_walk_parents(zio)) != NULL) 901 if (!vdev_accessible(vd, pio)) 902 pio->io_error = ENXIO; 903 904 kmem_free(vps, sizeof (*vps)); 905 } 906 } 907 908 /* 909 * Determine whether this device is accessible by reading and writing 910 * to several known locations: the pad regions of each vdev label 911 * but the first (which we leave alone in case it contains a VTOC). 912 */ 913 zio_t * 914 vdev_probe(vdev_t *vd, zio_t *zio) 915 { 916 spa_t *spa = vd->vdev_spa; 917 vdev_probe_stats_t *vps = NULL; 918 zio_t *pio; 919 920 ASSERT(vd->vdev_ops->vdev_op_leaf); 921 922 /* 923 * Don't probe the probe. 924 */ 925 if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 926 return (NULL); 927 928 /* 929 * To prevent 'probe storms' when a device fails, we create 930 * just one probe i/o at a time. All zios that want to probe 931 * this vdev will become parents of the probe io. 932 */ 933 mutex_enter(&vd->vdev_probe_lock); 934 935 if ((pio = vd->vdev_probe_zio) == NULL) { 936 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 937 938 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 939 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 940 ZIO_FLAG_TRYHARD; 941 942 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 943 /* 944 * vdev_cant_read and vdev_cant_write can only 945 * transition from TRUE to FALSE when we have the 946 * SCL_ZIO lock as writer; otherwise they can only 947 * transition from FALSE to TRUE. This ensures that 948 * any zio looking at these values can assume that 949 * failures persist for the life of the I/O. That's 950 * important because when a device has intermittent 951 * connectivity problems, we want to ensure that 952 * they're ascribed to the device (ENXIO) and not 953 * the zio (EIO). 954 * 955 * Since we hold SCL_ZIO as writer here, clear both 956 * values so the probe can reevaluate from first 957 * principles. 958 */ 959 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 960 vd->vdev_cant_read = B_FALSE; 961 vd->vdev_cant_write = B_FALSE; 962 } 963 964 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 965 vdev_probe_done, vps, 966 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 967 968 if (zio != NULL) { 969 vd->vdev_probe_wanted = B_TRUE; 970 spa_async_request(spa, SPA_ASYNC_PROBE); 971 } 972 } 973 974 if (zio != NULL) 975 zio_add_child(zio, pio); 976 977 mutex_exit(&vd->vdev_probe_lock); 978 979 if (vps == NULL) { 980 ASSERT(zio != NULL); 981 return (NULL); 982 } 983 984 for (int l = 1; l < VDEV_LABELS; l++) { 985 zio_nowait(zio_read_phys(pio, vd, 986 vdev_label_offset(vd->vdev_psize, l, 987 offsetof(vdev_label_t, vl_pad2)), 988 VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE), 989 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 990 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 991 } 992 993 if (zio == NULL) 994 return (pio); 995 996 zio_nowait(pio); 997 return (NULL); 998 } 999 1000 static void 1001 vdev_open_child(void *arg) 1002 { 1003 vdev_t *vd = arg; 1004 1005 vd->vdev_open_thread = curthread; 1006 vd->vdev_open_error = vdev_open(vd); 1007 vd->vdev_open_thread = NULL; 1008 } 1009 1010 boolean_t 1011 vdev_uses_zvols(vdev_t *vd) 1012 { 1013 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, 1014 strlen(ZVOL_DIR)) == 0) 1015 return (B_TRUE); 1016 for (int c = 0; c < vd->vdev_children; c++) 1017 if (vdev_uses_zvols(vd->vdev_child[c])) 1018 return (B_TRUE); 1019 return (B_FALSE); 1020 } 1021 1022 void 1023 vdev_open_children(vdev_t *vd) 1024 { 1025 taskq_t *tq; 1026 int children = vd->vdev_children; 1027 1028 /* 1029 * in order to handle pools on top of zvols, do the opens 1030 * in a single thread so that the same thread holds the 1031 * spa_namespace_lock 1032 */ 1033 if (vdev_uses_zvols(vd)) { 1034 for (int c = 0; c < children; c++) 1035 vd->vdev_child[c]->vdev_open_error = 1036 vdev_open(vd->vdev_child[c]); 1037 return; 1038 } 1039 tq = taskq_create("vdev_open", children, minclsyspri, 1040 children, children, TASKQ_PREPOPULATE); 1041 1042 for (int c = 0; c < children; c++) 1043 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], 1044 TQ_SLEEP) != NULL); 1045 1046 taskq_destroy(tq); 1047 } 1048 1049 /* 1050 * Prepare a virtual device for access. 1051 */ 1052 int 1053 vdev_open(vdev_t *vd) 1054 { 1055 spa_t *spa = vd->vdev_spa; 1056 int error; 1057 uint64_t osize = 0; 1058 uint64_t asize, psize; 1059 uint64_t ashift = 0; 1060 1061 ASSERT(vd->vdev_open_thread == curthread || 1062 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1063 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1064 vd->vdev_state == VDEV_STATE_CANT_OPEN || 1065 vd->vdev_state == VDEV_STATE_OFFLINE); 1066 1067 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1068 vd->vdev_cant_read = B_FALSE; 1069 vd->vdev_cant_write = B_FALSE; 1070 vd->vdev_min_asize = vdev_get_min_asize(vd); 1071 1072 if (!vd->vdev_removed && vd->vdev_faulted) { 1073 ASSERT(vd->vdev_children == 0); 1074 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1075 VDEV_AUX_ERR_EXCEEDED); 1076 return (ENXIO); 1077 } else if (vd->vdev_offline) { 1078 ASSERT(vd->vdev_children == 0); 1079 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1080 return (ENXIO); 1081 } 1082 1083 error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); 1084 1085 if (zio_injection_enabled && error == 0) 1086 error = zio_handle_device_injection(vd, NULL, ENXIO); 1087 1088 if (error) { 1089 if (vd->vdev_removed && 1090 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 1091 vd->vdev_removed = B_FALSE; 1092 1093 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1094 vd->vdev_stat.vs_aux); 1095 return (error); 1096 } 1097 1098 vd->vdev_removed = B_FALSE; 1099 1100 if (vd->vdev_degraded) { 1101 ASSERT(vd->vdev_children == 0); 1102 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1103 VDEV_AUX_ERR_EXCEEDED); 1104 } else { 1105 vd->vdev_state = VDEV_STATE_HEALTHY; 1106 } 1107 1108 for (int c = 0; c < vd->vdev_children; c++) { 1109 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 1110 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1111 VDEV_AUX_NONE); 1112 break; 1113 } 1114 } 1115 1116 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1117 1118 if (vd->vdev_children == 0) { 1119 if (osize < SPA_MINDEVSIZE) { 1120 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1121 VDEV_AUX_TOO_SMALL); 1122 return (EOVERFLOW); 1123 } 1124 psize = osize; 1125 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1126 } else { 1127 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1128 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 1129 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1130 VDEV_AUX_TOO_SMALL); 1131 return (EOVERFLOW); 1132 } 1133 psize = 0; 1134 asize = osize; 1135 } 1136 1137 vd->vdev_psize = psize; 1138 1139 /* 1140 * Make sure the allocatable size hasn't shrunk. 1141 */ 1142 if (asize < vd->vdev_min_asize) { 1143 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1144 VDEV_AUX_BAD_LABEL); 1145 return (EINVAL); 1146 } 1147 1148 if (vd->vdev_asize == 0) { 1149 /* 1150 * This is the first-ever open, so use the computed values. 1151 * For testing purposes, a higher ashift can be requested. 1152 */ 1153 vd->vdev_asize = asize; 1154 vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 1155 } else { 1156 /* 1157 * Make sure the alignment requirement hasn't increased. 1158 */ 1159 if (ashift > vd->vdev_top->vdev_ashift) { 1160 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1161 VDEV_AUX_BAD_LABEL); 1162 return (EINVAL); 1163 } 1164 } 1165 1166 /* 1167 * If all children are healthy and the asize has increased, 1168 * then we've experienced dynamic LUN growth. If automatic 1169 * expansion is enabled then use the additional space. 1170 */ 1171 if (vd->vdev_state == VDEV_STATE_HEALTHY && asize > vd->vdev_asize && 1172 (vd->vdev_expanding || spa->spa_autoexpand)) 1173 vd->vdev_asize = asize; 1174 1175 vdev_set_min_asize(vd); 1176 1177 /* 1178 * Ensure we can issue some IO before declaring the 1179 * vdev open for business. 1180 */ 1181 if (vd->vdev_ops->vdev_op_leaf && 1182 (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 1183 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1184 VDEV_AUX_IO_FAILURE); 1185 return (error); 1186 } 1187 1188 /* 1189 * If a leaf vdev has a DTL, and seems healthy, then kick off a 1190 * resilver. But don't do this if we are doing a reopen for a scrub, 1191 * since this would just restart the scrub we are already doing. 1192 */ 1193 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 1194 vdev_resilver_needed(vd, NULL, NULL)) 1195 spa_async_request(spa, SPA_ASYNC_RESILVER); 1196 1197 return (0); 1198 } 1199 1200 /* 1201 * Called once the vdevs are all opened, this routine validates the label 1202 * contents. This needs to be done before vdev_load() so that we don't 1203 * inadvertently do repair I/Os to the wrong device. 1204 * 1205 * This function will only return failure if one of the vdevs indicates that it 1206 * has since been destroyed or exported. This is only possible if 1207 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 1208 * will be updated but the function will return 0. 1209 */ 1210 int 1211 vdev_validate(vdev_t *vd) 1212 { 1213 spa_t *spa = vd->vdev_spa; 1214 nvlist_t *label; 1215 uint64_t guid, top_guid; 1216 uint64_t state; 1217 1218 for (int c = 0; c < vd->vdev_children; c++) 1219 if (vdev_validate(vd->vdev_child[c]) != 0) 1220 return (EBADF); 1221 1222 /* 1223 * If the device has already failed, or was marked offline, don't do 1224 * any further validation. Otherwise, label I/O will fail and we will 1225 * overwrite the previous state. 1226 */ 1227 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { 1228 1229 if ((label = vdev_label_read_config(vd)) == NULL) { 1230 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1231 VDEV_AUX_BAD_LABEL); 1232 return (0); 1233 } 1234 1235 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 1236 &guid) != 0 || guid != spa_guid(spa)) { 1237 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1238 VDEV_AUX_CORRUPT_DATA); 1239 nvlist_free(label); 1240 return (0); 1241 } 1242 1243 /* 1244 * If this vdev just became a top-level vdev because its 1245 * sibling was detached, it will have adopted the parent's 1246 * vdev guid -- but the label may or may not be on disk yet. 1247 * Fortunately, either version of the label will have the 1248 * same top guid, so if we're a top-level vdev, we can 1249 * safely compare to that instead. 1250 */ 1251 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, 1252 &guid) != 0 || 1253 nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, 1254 &top_guid) != 0 || 1255 (vd->vdev_guid != guid && 1256 (vd->vdev_guid != top_guid || vd != vd->vdev_top))) { 1257 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1258 VDEV_AUX_CORRUPT_DATA); 1259 nvlist_free(label); 1260 return (0); 1261 } 1262 1263 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 1264 &state) != 0) { 1265 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1266 VDEV_AUX_CORRUPT_DATA); 1267 nvlist_free(label); 1268 return (0); 1269 } 1270 1271 nvlist_free(label); 1272 1273 /* 1274 * If spa->spa_load_verbatim is true, no need to check the 1275 * state of the pool. 1276 */ 1277 if (!spa->spa_load_verbatim && 1278 spa->spa_load_state == SPA_LOAD_OPEN && 1279 state != POOL_STATE_ACTIVE) 1280 return (EBADF); 1281 1282 /* 1283 * If we were able to open and validate a vdev that was 1284 * previously marked permanently unavailable, clear that state 1285 * now. 1286 */ 1287 if (vd->vdev_not_present) 1288 vd->vdev_not_present = 0; 1289 } 1290 1291 return (0); 1292 } 1293 1294 /* 1295 * Close a virtual device. 1296 */ 1297 void 1298 vdev_close(vdev_t *vd) 1299 { 1300 spa_t *spa = vd->vdev_spa; 1301 1302 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1303 1304 vd->vdev_ops->vdev_op_close(vd); 1305 1306 vdev_cache_purge(vd); 1307 1308 /* 1309 * We record the previous state before we close it, so that if we are 1310 * doing a reopen(), we don't generate FMA ereports if we notice that 1311 * it's still faulted. 1312 */ 1313 vd->vdev_prevstate = vd->vdev_state; 1314 1315 if (vd->vdev_offline) 1316 vd->vdev_state = VDEV_STATE_OFFLINE; 1317 else 1318 vd->vdev_state = VDEV_STATE_CLOSED; 1319 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1320 } 1321 1322 void 1323 vdev_reopen(vdev_t *vd) 1324 { 1325 spa_t *spa = vd->vdev_spa; 1326 1327 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1328 1329 vdev_close(vd); 1330 (void) vdev_open(vd); 1331 1332 /* 1333 * Call vdev_validate() here to make sure we have the same device. 1334 * Otherwise, a device with an invalid label could be successfully 1335 * opened in response to vdev_reopen(). 1336 */ 1337 if (vd->vdev_aux) { 1338 (void) vdev_validate_aux(vd); 1339 if (vdev_readable(vd) && vdev_writeable(vd) && 1340 vd->vdev_aux == &spa->spa_l2cache && 1341 !l2arc_vdev_present(vd)) 1342 l2arc_add_vdev(spa, vd); 1343 } else { 1344 (void) vdev_validate(vd); 1345 } 1346 1347 /* 1348 * Reassess parent vdev's health. 1349 */ 1350 vdev_propagate_state(vd); 1351 } 1352 1353 int 1354 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 1355 { 1356 int error; 1357 1358 /* 1359 * Normally, partial opens (e.g. of a mirror) are allowed. 1360 * For a create, however, we want to fail the request if 1361 * there are any components we can't open. 1362 */ 1363 error = vdev_open(vd); 1364 1365 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 1366 vdev_close(vd); 1367 return (error ? error : ENXIO); 1368 } 1369 1370 /* 1371 * Recursively initialize all labels. 1372 */ 1373 if ((error = vdev_label_init(vd, txg, isreplacing ? 1374 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 1375 vdev_close(vd); 1376 return (error); 1377 } 1378 1379 return (0); 1380 } 1381 1382 void 1383 vdev_metaslab_set_size(vdev_t *vd) 1384 { 1385 /* 1386 * Aim for roughly 200 metaslabs per vdev. 1387 */ 1388 vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); 1389 vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); 1390 } 1391 1392 void 1393 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 1394 { 1395 ASSERT(vd == vd->vdev_top); 1396 ASSERT(ISP2(flags)); 1397 1398 if (flags & VDD_METASLAB) 1399 (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 1400 1401 if (flags & VDD_DTL) 1402 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 1403 1404 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 1405 } 1406 1407 /* 1408 * DTLs. 1409 * 1410 * A vdev's DTL (dirty time log) is the set of transaction groups for which 1411 * the vdev has less than perfect replication. There are three kinds of DTL: 1412 * 1413 * DTL_MISSING: txgs for which the vdev has no valid copies of the data 1414 * 1415 * DTL_PARTIAL: txgs for which data is available, but not fully replicated 1416 * 1417 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 1418 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 1419 * txgs that was scrubbed. 1420 * 1421 * DTL_OUTAGE: txgs which cannot currently be read, whether due to 1422 * persistent errors or just some device being offline. 1423 * Unlike the other three, the DTL_OUTAGE map is not generally 1424 * maintained; it's only computed when needed, typically to 1425 * determine whether a device can be detached. 1426 * 1427 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 1428 * either has the data or it doesn't. 1429 * 1430 * For interior vdevs such as mirror and RAID-Z the picture is more complex. 1431 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 1432 * if any child is less than fully replicated, then so is its parent. 1433 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 1434 * comprising only those txgs which appear in 'maxfaults' or more children; 1435 * those are the txgs we don't have enough replication to read. For example, 1436 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 1437 * thus, its DTL_MISSING consists of the set of txgs that appear in more than 1438 * two child DTL_MISSING maps. 1439 * 1440 * It should be clear from the above that to compute the DTLs and outage maps 1441 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 1442 * Therefore, that is all we keep on disk. When loading the pool, or after 1443 * a configuration change, we generate all other DTLs from first principles. 1444 */ 1445 void 1446 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1447 { 1448 space_map_t *sm = &vd->vdev_dtl[t]; 1449 1450 ASSERT(t < DTL_TYPES); 1451 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1452 1453 mutex_enter(sm->sm_lock); 1454 if (!space_map_contains(sm, txg, size)) 1455 space_map_add(sm, txg, size); 1456 mutex_exit(sm->sm_lock); 1457 } 1458 1459 boolean_t 1460 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 1461 { 1462 space_map_t *sm = &vd->vdev_dtl[t]; 1463 boolean_t dirty = B_FALSE; 1464 1465 ASSERT(t < DTL_TYPES); 1466 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 1467 1468 mutex_enter(sm->sm_lock); 1469 if (sm->sm_space != 0) 1470 dirty = space_map_contains(sm, txg, size); 1471 mutex_exit(sm->sm_lock); 1472 1473 return (dirty); 1474 } 1475 1476 boolean_t 1477 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 1478 { 1479 space_map_t *sm = &vd->vdev_dtl[t]; 1480 boolean_t empty; 1481 1482 mutex_enter(sm->sm_lock); 1483 empty = (sm->sm_space == 0); 1484 mutex_exit(sm->sm_lock); 1485 1486 return (empty); 1487 } 1488 1489 /* 1490 * Reassess DTLs after a config change or scrub completion. 1491 */ 1492 void 1493 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 1494 { 1495 spa_t *spa = vd->vdev_spa; 1496 avl_tree_t reftree; 1497 int minref; 1498 1499 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1500 1501 for (int c = 0; c < vd->vdev_children; c++) 1502 vdev_dtl_reassess(vd->vdev_child[c], txg, 1503 scrub_txg, scrub_done); 1504 1505 if (vd == spa->spa_root_vdev) 1506 return; 1507 1508 if (vd->vdev_ops->vdev_op_leaf) { 1509 mutex_enter(&vd->vdev_dtl_lock); 1510 if (scrub_txg != 0 && 1511 (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) { 1512 /* XXX should check scrub_done? */ 1513 /* 1514 * We completed a scrub up to scrub_txg. If we 1515 * did it without rebooting, then the scrub dtl 1516 * will be valid, so excise the old region and 1517 * fold in the scrub dtl. Otherwise, leave the 1518 * dtl as-is if there was an error. 1519 * 1520 * There's little trick here: to excise the beginning 1521 * of the DTL_MISSING map, we put it into a reference 1522 * tree and then add a segment with refcnt -1 that 1523 * covers the range [0, scrub_txg). This means 1524 * that each txg in that range has refcnt -1 or 0. 1525 * We then add DTL_SCRUB with a refcnt of 2, so that 1526 * entries in the range [0, scrub_txg) will have a 1527 * positive refcnt -- either 1 or 2. We then convert 1528 * the reference tree into the new DTL_MISSING map. 1529 */ 1530 space_map_ref_create(&reftree); 1531 space_map_ref_add_map(&reftree, 1532 &vd->vdev_dtl[DTL_MISSING], 1); 1533 space_map_ref_add_seg(&reftree, 0, scrub_txg, -1); 1534 space_map_ref_add_map(&reftree, 1535 &vd->vdev_dtl[DTL_SCRUB], 2); 1536 space_map_ref_generate_map(&reftree, 1537 &vd->vdev_dtl[DTL_MISSING], 1); 1538 space_map_ref_destroy(&reftree); 1539 } 1540 space_map_vacate(&vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 1541 space_map_walk(&vd->vdev_dtl[DTL_MISSING], 1542 space_map_add, &vd->vdev_dtl[DTL_PARTIAL]); 1543 if (scrub_done) 1544 space_map_vacate(&vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 1545 space_map_vacate(&vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 1546 if (!vdev_readable(vd)) 1547 space_map_add(&vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 1548 else 1549 space_map_walk(&vd->vdev_dtl[DTL_MISSING], 1550 space_map_add, &vd->vdev_dtl[DTL_OUTAGE]); 1551 mutex_exit(&vd->vdev_dtl_lock); 1552 1553 if (txg != 0) 1554 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 1555 return; 1556 } 1557 1558 mutex_enter(&vd->vdev_dtl_lock); 1559 for (int t = 0; t < DTL_TYPES; t++) { 1560 if (t == DTL_SCRUB) 1561 continue; /* leaf vdevs only */ 1562 if (t == DTL_PARTIAL) 1563 minref = 1; /* i.e. non-zero */ 1564 else if (vd->vdev_nparity != 0) 1565 minref = vd->vdev_nparity + 1; /* RAID-Z */ 1566 else 1567 minref = vd->vdev_children; /* any kind of mirror */ 1568 space_map_ref_create(&reftree); 1569 for (int c = 0; c < vd->vdev_children; c++) { 1570 vdev_t *cvd = vd->vdev_child[c]; 1571 mutex_enter(&cvd->vdev_dtl_lock); 1572 space_map_ref_add_map(&reftree, &cvd->vdev_dtl[t], 1); 1573 mutex_exit(&cvd->vdev_dtl_lock); 1574 } 1575 space_map_ref_generate_map(&reftree, &vd->vdev_dtl[t], minref); 1576 space_map_ref_destroy(&reftree); 1577 } 1578 mutex_exit(&vd->vdev_dtl_lock); 1579 } 1580 1581 static int 1582 vdev_dtl_load(vdev_t *vd) 1583 { 1584 spa_t *spa = vd->vdev_spa; 1585 space_map_obj_t *smo = &vd->vdev_dtl_smo; 1586 objset_t *mos = spa->spa_meta_objset; 1587 dmu_buf_t *db; 1588 int error; 1589 1590 ASSERT(vd->vdev_children == 0); 1591 1592 if (smo->smo_object == 0) 1593 return (0); 1594 1595 if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0) 1596 return (error); 1597 1598 ASSERT3U(db->db_size, >=, sizeof (*smo)); 1599 bcopy(db->db_data, smo, sizeof (*smo)); 1600 dmu_buf_rele(db, FTAG); 1601 1602 mutex_enter(&vd->vdev_dtl_lock); 1603 error = space_map_load(&vd->vdev_dtl[DTL_MISSING], 1604 NULL, SM_ALLOC, smo, mos); 1605 mutex_exit(&vd->vdev_dtl_lock); 1606 1607 return (error); 1608 } 1609 1610 void 1611 vdev_dtl_sync(vdev_t *vd, uint64_t txg) 1612 { 1613 spa_t *spa = vd->vdev_spa; 1614 space_map_obj_t *smo = &vd->vdev_dtl_smo; 1615 space_map_t *sm = &vd->vdev_dtl[DTL_MISSING]; 1616 objset_t *mos = spa->spa_meta_objset; 1617 space_map_t smsync; 1618 kmutex_t smlock; 1619 dmu_buf_t *db; 1620 dmu_tx_t *tx; 1621 1622 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1623 1624 if (vd->vdev_detached) { 1625 if (smo->smo_object != 0) { 1626 int err = dmu_object_free(mos, smo->smo_object, tx); 1627 ASSERT3U(err, ==, 0); 1628 smo->smo_object = 0; 1629 } 1630 dmu_tx_commit(tx); 1631 return; 1632 } 1633 1634 if (smo->smo_object == 0) { 1635 ASSERT(smo->smo_objsize == 0); 1636 ASSERT(smo->smo_alloc == 0); 1637 smo->smo_object = dmu_object_alloc(mos, 1638 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 1639 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 1640 ASSERT(smo->smo_object != 0); 1641 vdev_config_dirty(vd->vdev_top); 1642 } 1643 1644 mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); 1645 1646 space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, 1647 &smlock); 1648 1649 mutex_enter(&smlock); 1650 1651 mutex_enter(&vd->vdev_dtl_lock); 1652 space_map_walk(sm, space_map_add, &smsync); 1653 mutex_exit(&vd->vdev_dtl_lock); 1654 1655 space_map_truncate(smo, mos, tx); 1656 space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); 1657 1658 space_map_destroy(&smsync); 1659 1660 mutex_exit(&smlock); 1661 mutex_destroy(&smlock); 1662 1663 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 1664 dmu_buf_will_dirty(db, tx); 1665 ASSERT3U(db->db_size, >=, sizeof (*smo)); 1666 bcopy(smo, db->db_data, sizeof (*smo)); 1667 dmu_buf_rele(db, FTAG); 1668 1669 dmu_tx_commit(tx); 1670 } 1671 1672 /* 1673 * Determine whether the specified vdev can be offlined/detached/removed 1674 * without losing data. 1675 */ 1676 boolean_t 1677 vdev_dtl_required(vdev_t *vd) 1678 { 1679 spa_t *spa = vd->vdev_spa; 1680 vdev_t *tvd = vd->vdev_top; 1681 uint8_t cant_read = vd->vdev_cant_read; 1682 boolean_t required; 1683 1684 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1685 1686 if (vd == spa->spa_root_vdev || vd == tvd) 1687 return (B_TRUE); 1688 1689 /* 1690 * Temporarily mark the device as unreadable, and then determine 1691 * whether this results in any DTL outages in the top-level vdev. 1692 * If not, we can safely offline/detach/remove the device. 1693 */ 1694 vd->vdev_cant_read = B_TRUE; 1695 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 1696 required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 1697 vd->vdev_cant_read = cant_read; 1698 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 1699 1700 return (required); 1701 } 1702 1703 /* 1704 * Determine if resilver is needed, and if so the txg range. 1705 */ 1706 boolean_t 1707 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 1708 { 1709 boolean_t needed = B_FALSE; 1710 uint64_t thismin = UINT64_MAX; 1711 uint64_t thismax = 0; 1712 1713 if (vd->vdev_children == 0) { 1714 mutex_enter(&vd->vdev_dtl_lock); 1715 if (vd->vdev_dtl[DTL_MISSING].sm_space != 0 && 1716 vdev_writeable(vd)) { 1717 space_seg_t *ss; 1718 1719 ss = avl_first(&vd->vdev_dtl[DTL_MISSING].sm_root); 1720 thismin = ss->ss_start - 1; 1721 ss = avl_last(&vd->vdev_dtl[DTL_MISSING].sm_root); 1722 thismax = ss->ss_end; 1723 needed = B_TRUE; 1724 } 1725 mutex_exit(&vd->vdev_dtl_lock); 1726 } else { 1727 for (int c = 0; c < vd->vdev_children; c++) { 1728 vdev_t *cvd = vd->vdev_child[c]; 1729 uint64_t cmin, cmax; 1730 1731 if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 1732 thismin = MIN(thismin, cmin); 1733 thismax = MAX(thismax, cmax); 1734 needed = B_TRUE; 1735 } 1736 } 1737 } 1738 1739 if (needed && minp) { 1740 *minp = thismin; 1741 *maxp = thismax; 1742 } 1743 return (needed); 1744 } 1745 1746 void 1747 vdev_load(vdev_t *vd) 1748 { 1749 /* 1750 * Recursively load all children. 1751 */ 1752 for (int c = 0; c < vd->vdev_children; c++) 1753 vdev_load(vd->vdev_child[c]); 1754 1755 /* 1756 * If this is a top-level vdev, initialize its metaslabs. 1757 */ 1758 if (vd == vd->vdev_top && 1759 (vd->vdev_ashift == 0 || vd->vdev_asize == 0 || 1760 vdev_metaslab_init(vd, 0) != 0)) 1761 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1762 VDEV_AUX_CORRUPT_DATA); 1763 1764 /* 1765 * If this is a leaf vdev, load its DTL. 1766 */ 1767 if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0) 1768 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1769 VDEV_AUX_CORRUPT_DATA); 1770 } 1771 1772 /* 1773 * The special vdev case is used for hot spares and l2cache devices. Its 1774 * sole purpose it to set the vdev state for the associated vdev. To do this, 1775 * we make sure that we can open the underlying device, then try to read the 1776 * label, and make sure that the label is sane and that it hasn't been 1777 * repurposed to another pool. 1778 */ 1779 int 1780 vdev_validate_aux(vdev_t *vd) 1781 { 1782 nvlist_t *label; 1783 uint64_t guid, version; 1784 uint64_t state; 1785 1786 if (!vdev_readable(vd)) 1787 return (0); 1788 1789 if ((label = vdev_label_read_config(vd)) == NULL) { 1790 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1791 VDEV_AUX_CORRUPT_DATA); 1792 return (-1); 1793 } 1794 1795 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 1796 version > SPA_VERSION || 1797 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 1798 guid != vd->vdev_guid || 1799 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 1800 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1801 VDEV_AUX_CORRUPT_DATA); 1802 nvlist_free(label); 1803 return (-1); 1804 } 1805 1806 /* 1807 * We don't actually check the pool state here. If it's in fact in 1808 * use by another pool, we update this fact on the fly when requested. 1809 */ 1810 nvlist_free(label); 1811 return (0); 1812 } 1813 1814 void 1815 vdev_sync_done(vdev_t *vd, uint64_t txg) 1816 { 1817 metaslab_t *msp; 1818 1819 while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 1820 metaslab_sync_done(msp, txg); 1821 } 1822 1823 void 1824 vdev_sync(vdev_t *vd, uint64_t txg) 1825 { 1826 spa_t *spa = vd->vdev_spa; 1827 vdev_t *lvd; 1828 metaslab_t *msp; 1829 dmu_tx_t *tx; 1830 1831 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) { 1832 ASSERT(vd == vd->vdev_top); 1833 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1834 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 1835 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 1836 ASSERT(vd->vdev_ms_array != 0); 1837 vdev_config_dirty(vd); 1838 dmu_tx_commit(tx); 1839 } 1840 1841 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 1842 metaslab_sync(msp, txg); 1843 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 1844 } 1845 1846 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 1847 vdev_dtl_sync(lvd, txg); 1848 1849 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 1850 } 1851 1852 uint64_t 1853 vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 1854 { 1855 return (vd->vdev_ops->vdev_op_asize(vd, psize)); 1856 } 1857 1858 /* 1859 * Mark the given vdev faulted. A faulted vdev behaves as if the device could 1860 * not be opened, and no I/O is attempted. 1861 */ 1862 int 1863 vdev_fault(spa_t *spa, uint64_t guid) 1864 { 1865 vdev_t *vd; 1866 1867 spa_vdev_state_enter(spa); 1868 1869 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 1870 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1871 1872 if (!vd->vdev_ops->vdev_op_leaf) 1873 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 1874 1875 /* 1876 * Faulted state takes precedence over degraded. 1877 */ 1878 vd->vdev_faulted = 1ULL; 1879 vd->vdev_degraded = 0ULL; 1880 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, VDEV_AUX_ERR_EXCEEDED); 1881 1882 /* 1883 * If marking the vdev as faulted cause the top-level vdev to become 1884 * unavailable, then back off and simply mark the vdev as degraded 1885 * instead. 1886 */ 1887 if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) { 1888 vd->vdev_degraded = 1ULL; 1889 vd->vdev_faulted = 0ULL; 1890 1891 /* 1892 * If we reopen the device and it's not dead, only then do we 1893 * mark it degraded. 1894 */ 1895 vdev_reopen(vd); 1896 1897 if (vdev_readable(vd)) { 1898 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 1899 VDEV_AUX_ERR_EXCEEDED); 1900 } 1901 } 1902 1903 return (spa_vdev_state_exit(spa, vd, 0)); 1904 } 1905 1906 /* 1907 * Mark the given vdev degraded. A degraded vdev is purely an indication to the 1908 * user that something is wrong. The vdev continues to operate as normal as far 1909 * as I/O is concerned. 1910 */ 1911 int 1912 vdev_degrade(spa_t *spa, uint64_t guid) 1913 { 1914 vdev_t *vd; 1915 1916 spa_vdev_state_enter(spa); 1917 1918 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 1919 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1920 1921 if (!vd->vdev_ops->vdev_op_leaf) 1922 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 1923 1924 /* 1925 * If the vdev is already faulted, then don't do anything. 1926 */ 1927 if (vd->vdev_faulted || vd->vdev_degraded) 1928 return (spa_vdev_state_exit(spa, NULL, 0)); 1929 1930 vd->vdev_degraded = 1ULL; 1931 if (!vdev_is_dead(vd)) 1932 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 1933 VDEV_AUX_ERR_EXCEEDED); 1934 1935 return (spa_vdev_state_exit(spa, vd, 0)); 1936 } 1937 1938 /* 1939 * Online the given vdev. If 'unspare' is set, it implies two things. First, 1940 * any attached spare device should be detached when the device finishes 1941 * resilvering. Second, the online should be treated like a 'test' online case, 1942 * so no FMA events are generated if the device fails to open. 1943 */ 1944 int 1945 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 1946 { 1947 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; 1948 1949 spa_vdev_state_enter(spa); 1950 1951 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 1952 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 1953 1954 if (!vd->vdev_ops->vdev_op_leaf) 1955 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 1956 1957 tvd = vd->vdev_top; 1958 vd->vdev_offline = B_FALSE; 1959 vd->vdev_tmpoffline = B_FALSE; 1960 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 1961 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 1962 1963 /* XXX - L2ARC 1.0 does not support expansion */ 1964 if (!vd->vdev_aux) { 1965 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 1966 pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND); 1967 } 1968 1969 vdev_reopen(tvd); 1970 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 1971 1972 if (!vd->vdev_aux) { 1973 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 1974 pvd->vdev_expanding = B_FALSE; 1975 } 1976 1977 if (newstate) 1978 *newstate = vd->vdev_state; 1979 if ((flags & ZFS_ONLINE_UNSPARE) && 1980 !vdev_is_dead(vd) && vd->vdev_parent && 1981 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 1982 vd->vdev_parent->vdev_child[0] == vd) 1983 vd->vdev_unspare = B_TRUE; 1984 1985 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { 1986 1987 /* XXX - L2ARC 1.0 does not support expansion */ 1988 if (vd->vdev_aux) 1989 return (spa_vdev_state_exit(spa, vd, ENOTSUP)); 1990 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 1991 } 1992 return (spa_vdev_state_exit(spa, vd, 0)); 1993 } 1994 1995 int 1996 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 1997 { 1998 vdev_t *vd, *tvd; 1999 int error; 2000 2001 spa_vdev_state_enter(spa); 2002 2003 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2004 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2005 2006 if (!vd->vdev_ops->vdev_op_leaf) 2007 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2008 2009 tvd = vd->vdev_top; 2010 2011 /* 2012 * If the device isn't already offline, try to offline it. 2013 */ 2014 if (!vd->vdev_offline) { 2015 /* 2016 * If this device has the only valid copy of some data, 2017 * don't allow it to be offlined. Log devices are always 2018 * expendable. 2019 */ 2020 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 2021 vdev_dtl_required(vd)) 2022 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 2023 2024 /* 2025 * Offline this device and reopen its top-level vdev. 2026 * If the top-level vdev is a log device then just offline 2027 * it. Otherwise, if this action results in the top-level 2028 * vdev becoming unusable, undo it and fail the request. 2029 */ 2030 vd->vdev_offline = B_TRUE; 2031 vdev_reopen(tvd); 2032 2033 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 2034 vdev_is_dead(tvd)) { 2035 vd->vdev_offline = B_FALSE; 2036 vdev_reopen(tvd); 2037 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 2038 } 2039 } 2040 2041 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 2042 2043 if (!tvd->vdev_islog || !vdev_is_dead(tvd)) 2044 return (spa_vdev_state_exit(spa, vd, 0)); 2045 2046 (void) spa_vdev_state_exit(spa, vd, 0); 2047 2048 error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 2049 NULL, DS_FIND_CHILDREN); 2050 if (error) { 2051 (void) vdev_online(spa, guid, 0, NULL); 2052 return (error); 2053 } 2054 /* 2055 * If we successfully offlined the log device then we need to 2056 * sync out the current txg so that the "stubby" block can be 2057 * removed by zil_sync(). 2058 */ 2059 txg_wait_synced(spa->spa_dsl_pool, 0); 2060 return (0); 2061 } 2062 2063 /* 2064 * Clear the error counts associated with this vdev. Unlike vdev_online() and 2065 * vdev_offline(), we assume the spa config is locked. We also clear all 2066 * children. If 'vd' is NULL, then the user wants to clear all vdevs. 2067 */ 2068 void 2069 vdev_clear(spa_t *spa, vdev_t *vd) 2070 { 2071 vdev_t *rvd = spa->spa_root_vdev; 2072 2073 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2074 2075 if (vd == NULL) 2076 vd = rvd; 2077 2078 vd->vdev_stat.vs_read_errors = 0; 2079 vd->vdev_stat.vs_write_errors = 0; 2080 vd->vdev_stat.vs_checksum_errors = 0; 2081 2082 for (int c = 0; c < vd->vdev_children; c++) 2083 vdev_clear(spa, vd->vdev_child[c]); 2084 2085 /* 2086 * If we're in the FAULTED state or have experienced failed I/O, then 2087 * clear the persistent state and attempt to reopen the device. We 2088 * also mark the vdev config dirty, so that the new faulted state is 2089 * written out to disk. 2090 */ 2091 if (vd->vdev_faulted || vd->vdev_degraded || 2092 !vdev_readable(vd) || !vdev_writeable(vd)) { 2093 2094 vd->vdev_faulted = vd->vdev_degraded = 0; 2095 vd->vdev_cant_read = B_FALSE; 2096 vd->vdev_cant_write = B_FALSE; 2097 2098 vdev_reopen(vd); 2099 2100 if (vd != rvd) 2101 vdev_state_dirty(vd->vdev_top); 2102 2103 if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 2104 spa_async_request(spa, SPA_ASYNC_RESILVER); 2105 2106 spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); 2107 } 2108 } 2109 2110 boolean_t 2111 vdev_is_dead(vdev_t *vd) 2112 { 2113 return (vd->vdev_state < VDEV_STATE_DEGRADED); 2114 } 2115 2116 boolean_t 2117 vdev_readable(vdev_t *vd) 2118 { 2119 return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 2120 } 2121 2122 boolean_t 2123 vdev_writeable(vdev_t *vd) 2124 { 2125 return (!vdev_is_dead(vd) && !vd->vdev_cant_write); 2126 } 2127 2128 boolean_t 2129 vdev_allocatable(vdev_t *vd) 2130 { 2131 uint64_t state = vd->vdev_state; 2132 2133 /* 2134 * We currently allow allocations from vdevs which may be in the 2135 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 2136 * fails to reopen then we'll catch it later when we're holding 2137 * the proper locks. Note that we have to get the vdev state 2138 * in a local variable because although it changes atomically, 2139 * we're asking two separate questions about it. 2140 */ 2141 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 2142 !vd->vdev_cant_write); 2143 } 2144 2145 boolean_t 2146 vdev_accessible(vdev_t *vd, zio_t *zio) 2147 { 2148 ASSERT(zio->io_vd == vd); 2149 2150 if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 2151 return (B_FALSE); 2152 2153 if (zio->io_type == ZIO_TYPE_READ) 2154 return (!vd->vdev_cant_read); 2155 2156 if (zio->io_type == ZIO_TYPE_WRITE) 2157 return (!vd->vdev_cant_write); 2158 2159 return (B_TRUE); 2160 } 2161 2162 /* 2163 * Get statistics for the given vdev. 2164 */ 2165 void 2166 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 2167 { 2168 vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 2169 2170 mutex_enter(&vd->vdev_stat_lock); 2171 bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 2172 vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors; 2173 vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 2174 vs->vs_state = vd->vdev_state; 2175 vs->vs_rsize = vdev_get_min_asize(vd); 2176 if (vd->vdev_ops->vdev_op_leaf) 2177 vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 2178 mutex_exit(&vd->vdev_stat_lock); 2179 2180 /* 2181 * If we're getting stats on the root vdev, aggregate the I/O counts 2182 * over all top-level vdevs (i.e. the direct children of the root). 2183 */ 2184 if (vd == rvd) { 2185 for (int c = 0; c < rvd->vdev_children; c++) { 2186 vdev_t *cvd = rvd->vdev_child[c]; 2187 vdev_stat_t *cvs = &cvd->vdev_stat; 2188 2189 mutex_enter(&vd->vdev_stat_lock); 2190 for (int t = 0; t < ZIO_TYPES; t++) { 2191 vs->vs_ops[t] += cvs->vs_ops[t]; 2192 vs->vs_bytes[t] += cvs->vs_bytes[t]; 2193 } 2194 vs->vs_scrub_examined += cvs->vs_scrub_examined; 2195 mutex_exit(&vd->vdev_stat_lock); 2196 } 2197 } 2198 } 2199 2200 void 2201 vdev_clear_stats(vdev_t *vd) 2202 { 2203 mutex_enter(&vd->vdev_stat_lock); 2204 vd->vdev_stat.vs_space = 0; 2205 vd->vdev_stat.vs_dspace = 0; 2206 vd->vdev_stat.vs_alloc = 0; 2207 mutex_exit(&vd->vdev_stat_lock); 2208 } 2209 2210 void 2211 vdev_stat_update(zio_t *zio, uint64_t psize) 2212 { 2213 spa_t *spa = zio->io_spa; 2214 vdev_t *rvd = spa->spa_root_vdev; 2215 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 2216 vdev_t *pvd; 2217 uint64_t txg = zio->io_txg; 2218 vdev_stat_t *vs = &vd->vdev_stat; 2219 zio_type_t type = zio->io_type; 2220 int flags = zio->io_flags; 2221 2222 /* 2223 * If this i/o is a gang leader, it didn't do any actual work. 2224 */ 2225 if (zio->io_gang_tree) 2226 return; 2227 2228 if (zio->io_error == 0) { 2229 /* 2230 * If this is a root i/o, don't count it -- we've already 2231 * counted the top-level vdevs, and vdev_get_stats() will 2232 * aggregate them when asked. This reduces contention on 2233 * the root vdev_stat_lock and implicitly handles blocks 2234 * that compress away to holes, for which there is no i/o. 2235 * (Holes never create vdev children, so all the counters 2236 * remain zero, which is what we want.) 2237 * 2238 * Note: this only applies to successful i/o (io_error == 0) 2239 * because unlike i/o counts, errors are not additive. 2240 * When reading a ditto block, for example, failure of 2241 * one top-level vdev does not imply a root-level error. 2242 */ 2243 if (vd == rvd) 2244 return; 2245 2246 ASSERT(vd == zio->io_vd); 2247 2248 if (flags & ZIO_FLAG_IO_BYPASS) 2249 return; 2250 2251 mutex_enter(&vd->vdev_stat_lock); 2252 2253 if (flags & ZIO_FLAG_IO_REPAIR) { 2254 if (flags & ZIO_FLAG_SCRUB_THREAD) 2255 vs->vs_scrub_repaired += psize; 2256 if (flags & ZIO_FLAG_SELF_HEAL) 2257 vs->vs_self_healed += psize; 2258 } 2259 2260 vs->vs_ops[type]++; 2261 vs->vs_bytes[type] += psize; 2262 2263 mutex_exit(&vd->vdev_stat_lock); 2264 return; 2265 } 2266 2267 if (flags & ZIO_FLAG_SPECULATIVE) 2268 return; 2269 2270 /* 2271 * If this is an I/O error that is going to be retried, then ignore the 2272 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 2273 * hard errors, when in reality they can happen for any number of 2274 * innocuous reasons (bus resets, MPxIO link failure, etc). 2275 */ 2276 if (zio->io_error == EIO && 2277 !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 2278 return; 2279 2280 mutex_enter(&vd->vdev_stat_lock); 2281 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 2282 if (zio->io_error == ECKSUM) 2283 vs->vs_checksum_errors++; 2284 else 2285 vs->vs_read_errors++; 2286 } 2287 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 2288 vs->vs_write_errors++; 2289 mutex_exit(&vd->vdev_stat_lock); 2290 2291 if (type == ZIO_TYPE_WRITE && txg != 0 && 2292 (!(flags & ZIO_FLAG_IO_REPAIR) || 2293 (flags & ZIO_FLAG_SCRUB_THREAD))) { 2294 /* 2295 * This is either a normal write (not a repair), or it's a 2296 * repair induced by the scrub thread. In the normal case, 2297 * we commit the DTL change in the same txg as the block 2298 * was born. In the scrub-induced repair case, we know that 2299 * scrubs run in first-pass syncing context, so we commit 2300 * the DTL change in spa->spa_syncing_txg. 2301 * 2302 * We currently do not make DTL entries for failed spontaneous 2303 * self-healing writes triggered by normal (non-scrubbing) 2304 * reads, because we have no transactional context in which to 2305 * do so -- and it's not clear that it'd be desirable anyway. 2306 */ 2307 if (vd->vdev_ops->vdev_op_leaf) { 2308 uint64_t commit_txg = txg; 2309 if (flags & ZIO_FLAG_SCRUB_THREAD) { 2310 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 2311 ASSERT(spa_sync_pass(spa) == 1); 2312 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 2313 commit_txg = spa->spa_syncing_txg; 2314 } 2315 ASSERT(commit_txg >= spa->spa_syncing_txg); 2316 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 2317 return; 2318 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 2319 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 2320 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 2321 } 2322 if (vd != rvd) 2323 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 2324 } 2325 } 2326 2327 void 2328 vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete) 2329 { 2330 vdev_stat_t *vs = &vd->vdev_stat; 2331 2332 for (int c = 0; c < vd->vdev_children; c++) 2333 vdev_scrub_stat_update(vd->vdev_child[c], type, complete); 2334 2335 mutex_enter(&vd->vdev_stat_lock); 2336 2337 if (type == POOL_SCRUB_NONE) { 2338 /* 2339 * Update completion and end time. Leave everything else alone 2340 * so we can report what happened during the previous scrub. 2341 */ 2342 vs->vs_scrub_complete = complete; 2343 vs->vs_scrub_end = gethrestime_sec(); 2344 } else { 2345 vs->vs_scrub_type = type; 2346 vs->vs_scrub_complete = 0; 2347 vs->vs_scrub_examined = 0; 2348 vs->vs_scrub_repaired = 0; 2349 vs->vs_scrub_start = gethrestime_sec(); 2350 vs->vs_scrub_end = 0; 2351 } 2352 2353 mutex_exit(&vd->vdev_stat_lock); 2354 } 2355 2356 /* 2357 * Update the in-core space usage stats for this vdev and the root vdev. 2358 */ 2359 void 2360 vdev_space_update(vdev_t *vd, int64_t space_delta, int64_t alloc_delta, 2361 boolean_t update_root) 2362 { 2363 int64_t dspace_delta = space_delta; 2364 spa_t *spa = vd->vdev_spa; 2365 vdev_t *rvd = spa->spa_root_vdev; 2366 2367 ASSERT(vd == vd->vdev_top); 2368 2369 /* 2370 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 2371 * factor. We must calculate this here and not at the root vdev 2372 * because the root vdev's psize-to-asize is simply the max of its 2373 * childrens', thus not accurate enough for us. 2374 */ 2375 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 2376 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 2377 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 2378 vd->vdev_deflate_ratio; 2379 2380 mutex_enter(&vd->vdev_stat_lock); 2381 vd->vdev_stat.vs_space += space_delta; 2382 vd->vdev_stat.vs_alloc += alloc_delta; 2383 vd->vdev_stat.vs_dspace += dspace_delta; 2384 mutex_exit(&vd->vdev_stat_lock); 2385 2386 if (update_root) { 2387 ASSERT(rvd == vd->vdev_parent); 2388 ASSERT(vd->vdev_ms_count != 0); 2389 2390 /* 2391 * Don't count non-normal (e.g. intent log) space as part of 2392 * the pool's capacity. 2393 */ 2394 if (vd->vdev_mg->mg_class != spa->spa_normal_class) 2395 return; 2396 2397 mutex_enter(&rvd->vdev_stat_lock); 2398 rvd->vdev_stat.vs_space += space_delta; 2399 rvd->vdev_stat.vs_alloc += alloc_delta; 2400 rvd->vdev_stat.vs_dspace += dspace_delta; 2401 mutex_exit(&rvd->vdev_stat_lock); 2402 } 2403 } 2404 2405 /* 2406 * Mark a top-level vdev's config as dirty, placing it on the dirty list 2407 * so that it will be written out next time the vdev configuration is synced. 2408 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 2409 */ 2410 void 2411 vdev_config_dirty(vdev_t *vd) 2412 { 2413 spa_t *spa = vd->vdev_spa; 2414 vdev_t *rvd = spa->spa_root_vdev; 2415 int c; 2416 2417 /* 2418 * If this is an aux vdev (as with l2cache and spare devices), then we 2419 * update the vdev config manually and set the sync flag. 2420 */ 2421 if (vd->vdev_aux != NULL) { 2422 spa_aux_vdev_t *sav = vd->vdev_aux; 2423 nvlist_t **aux; 2424 uint_t naux; 2425 2426 for (c = 0; c < sav->sav_count; c++) { 2427 if (sav->sav_vdevs[c] == vd) 2428 break; 2429 } 2430 2431 if (c == sav->sav_count) { 2432 /* 2433 * We're being removed. There's nothing more to do. 2434 */ 2435 ASSERT(sav->sav_sync == B_TRUE); 2436 return; 2437 } 2438 2439 sav->sav_sync = B_TRUE; 2440 2441 if (nvlist_lookup_nvlist_array(sav->sav_config, 2442 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 2443 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 2444 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 2445 } 2446 2447 ASSERT(c < naux); 2448 2449 /* 2450 * Setting the nvlist in the middle if the array is a little 2451 * sketchy, but it will work. 2452 */ 2453 nvlist_free(aux[c]); 2454 aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE); 2455 2456 return; 2457 } 2458 2459 /* 2460 * The dirty list is protected by the SCL_CONFIG lock. The caller 2461 * must either hold SCL_CONFIG as writer, or must be the sync thread 2462 * (which holds SCL_CONFIG as reader). There's only one sync thread, 2463 * so this is sufficient to ensure mutual exclusion. 2464 */ 2465 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 2466 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2467 spa_config_held(spa, SCL_CONFIG, RW_READER))); 2468 2469 if (vd == rvd) { 2470 for (c = 0; c < rvd->vdev_children; c++) 2471 vdev_config_dirty(rvd->vdev_child[c]); 2472 } else { 2473 ASSERT(vd == vd->vdev_top); 2474 2475 if (!list_link_active(&vd->vdev_config_dirty_node)) 2476 list_insert_head(&spa->spa_config_dirty_list, vd); 2477 } 2478 } 2479 2480 void 2481 vdev_config_clean(vdev_t *vd) 2482 { 2483 spa_t *spa = vd->vdev_spa; 2484 2485 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 2486 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2487 spa_config_held(spa, SCL_CONFIG, RW_READER))); 2488 2489 ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 2490 list_remove(&spa->spa_config_dirty_list, vd); 2491 } 2492 2493 /* 2494 * Mark a top-level vdev's state as dirty, so that the next pass of 2495 * spa_sync() can convert this into vdev_config_dirty(). We distinguish 2496 * the state changes from larger config changes because they require 2497 * much less locking, and are often needed for administrative actions. 2498 */ 2499 void 2500 vdev_state_dirty(vdev_t *vd) 2501 { 2502 spa_t *spa = vd->vdev_spa; 2503 2504 ASSERT(vd == vd->vdev_top); 2505 2506 /* 2507 * The state list is protected by the SCL_STATE lock. The caller 2508 * must either hold SCL_STATE as writer, or must be the sync thread 2509 * (which holds SCL_STATE as reader). There's only one sync thread, 2510 * so this is sufficient to ensure mutual exclusion. 2511 */ 2512 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 2513 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2514 spa_config_held(spa, SCL_STATE, RW_READER))); 2515 2516 if (!list_link_active(&vd->vdev_state_dirty_node)) 2517 list_insert_head(&spa->spa_state_dirty_list, vd); 2518 } 2519 2520 void 2521 vdev_state_clean(vdev_t *vd) 2522 { 2523 spa_t *spa = vd->vdev_spa; 2524 2525 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 2526 (dsl_pool_sync_context(spa_get_dsl(spa)) && 2527 spa_config_held(spa, SCL_STATE, RW_READER))); 2528 2529 ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 2530 list_remove(&spa->spa_state_dirty_list, vd); 2531 } 2532 2533 /* 2534 * Propagate vdev state up from children to parent. 2535 */ 2536 void 2537 vdev_propagate_state(vdev_t *vd) 2538 { 2539 spa_t *spa = vd->vdev_spa; 2540 vdev_t *rvd = spa->spa_root_vdev; 2541 int degraded = 0, faulted = 0; 2542 int corrupted = 0; 2543 vdev_t *child; 2544 2545 if (vd->vdev_children > 0) { 2546 for (int c = 0; c < vd->vdev_children; c++) { 2547 child = vd->vdev_child[c]; 2548 2549 if (!vdev_readable(child) || 2550 (!vdev_writeable(child) && spa_writeable(spa))) { 2551 /* 2552 * Root special: if there is a top-level log 2553 * device, treat the root vdev as if it were 2554 * degraded. 2555 */ 2556 if (child->vdev_islog && vd == rvd) 2557 degraded++; 2558 else 2559 faulted++; 2560 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 2561 degraded++; 2562 } 2563 2564 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 2565 corrupted++; 2566 } 2567 2568 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 2569 2570 /* 2571 * Root special: if there is a top-level vdev that cannot be 2572 * opened due to corrupted metadata, then propagate the root 2573 * vdev's aux state as 'corrupt' rather than 'insufficient 2574 * replicas'. 2575 */ 2576 if (corrupted && vd == rvd && 2577 rvd->vdev_state == VDEV_STATE_CANT_OPEN) 2578 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 2579 VDEV_AUX_CORRUPT_DATA); 2580 } 2581 2582 if (vd->vdev_parent) 2583 vdev_propagate_state(vd->vdev_parent); 2584 } 2585 2586 /* 2587 * Set a vdev's state. If this is during an open, we don't update the parent 2588 * state, because we're in the process of opening children depth-first. 2589 * Otherwise, we propagate the change to the parent. 2590 * 2591 * If this routine places a device in a faulted state, an appropriate ereport is 2592 * generated. 2593 */ 2594 void 2595 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 2596 { 2597 uint64_t save_state; 2598 spa_t *spa = vd->vdev_spa; 2599 2600 if (state == vd->vdev_state) { 2601 vd->vdev_stat.vs_aux = aux; 2602 return; 2603 } 2604 2605 save_state = vd->vdev_state; 2606 2607 vd->vdev_state = state; 2608 vd->vdev_stat.vs_aux = aux; 2609 2610 /* 2611 * If we are setting the vdev state to anything but an open state, then 2612 * always close the underlying device. Otherwise, we keep accessible 2613 * but invalid devices open forever. We don't call vdev_close() itself, 2614 * because that implies some extra checks (offline, etc) that we don't 2615 * want here. This is limited to leaf devices, because otherwise 2616 * closing the device will affect other children. 2617 */ 2618 if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf) 2619 vd->vdev_ops->vdev_op_close(vd); 2620 2621 if (vd->vdev_removed && 2622 state == VDEV_STATE_CANT_OPEN && 2623 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 2624 /* 2625 * If the previous state is set to VDEV_STATE_REMOVED, then this 2626 * device was previously marked removed and someone attempted to 2627 * reopen it. If this failed due to a nonexistent device, then 2628 * keep the device in the REMOVED state. We also let this be if 2629 * it is one of our special test online cases, which is only 2630 * attempting to online the device and shouldn't generate an FMA 2631 * fault. 2632 */ 2633 vd->vdev_state = VDEV_STATE_REMOVED; 2634 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 2635 } else if (state == VDEV_STATE_REMOVED) { 2636 /* 2637 * Indicate to the ZFS DE that this device has been removed, and 2638 * any recent errors should be ignored. 2639 */ 2640 zfs_post_remove(spa, vd); 2641 vd->vdev_removed = B_TRUE; 2642 } else if (state == VDEV_STATE_CANT_OPEN) { 2643 /* 2644 * If we fail to open a vdev during an import, we mark it as 2645 * "not available", which signifies that it was never there to 2646 * begin with. Failure to open such a device is not considered 2647 * an error. 2648 */ 2649 if (spa->spa_load_state == SPA_LOAD_IMPORT && 2650 vd->vdev_ops->vdev_op_leaf) 2651 vd->vdev_not_present = 1; 2652 2653 /* 2654 * Post the appropriate ereport. If the 'prevstate' field is 2655 * set to something other than VDEV_STATE_UNKNOWN, it indicates 2656 * that this is part of a vdev_reopen(). In this case, we don't 2657 * want to post the ereport if the device was already in the 2658 * CANT_OPEN state beforehand. 2659 * 2660 * If the 'checkremove' flag is set, then this is an attempt to 2661 * online the device in response to an insertion event. If we 2662 * hit this case, then we have detected an insertion event for a 2663 * faulted or offline device that wasn't in the removed state. 2664 * In this scenario, we don't post an ereport because we are 2665 * about to replace the device, or attempt an online with 2666 * vdev_forcefault, which will generate the fault for us. 2667 */ 2668 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 2669 !vd->vdev_not_present && !vd->vdev_checkremove && 2670 vd != spa->spa_root_vdev) { 2671 const char *class; 2672 2673 switch (aux) { 2674 case VDEV_AUX_OPEN_FAILED: 2675 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 2676 break; 2677 case VDEV_AUX_CORRUPT_DATA: 2678 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 2679 break; 2680 case VDEV_AUX_NO_REPLICAS: 2681 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 2682 break; 2683 case VDEV_AUX_BAD_GUID_SUM: 2684 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 2685 break; 2686 case VDEV_AUX_TOO_SMALL: 2687 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 2688 break; 2689 case VDEV_AUX_BAD_LABEL: 2690 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 2691 break; 2692 case VDEV_AUX_IO_FAILURE: 2693 class = FM_EREPORT_ZFS_IO_FAILURE; 2694 break; 2695 default: 2696 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 2697 } 2698 2699 zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 2700 } 2701 2702 /* Erase any notion of persistent removed state */ 2703 vd->vdev_removed = B_FALSE; 2704 } else { 2705 vd->vdev_removed = B_FALSE; 2706 } 2707 2708 if (!isopen && vd->vdev_parent) 2709 vdev_propagate_state(vd->vdev_parent); 2710 } 2711 2712 /* 2713 * Check the vdev configuration to ensure that it's capable of supporting 2714 * a root pool. Currently, we do not support RAID-Z or partial configuration. 2715 * In addition, only a single top-level vdev is allowed and none of the leaves 2716 * can be wholedisks. 2717 */ 2718 boolean_t 2719 vdev_is_bootable(vdev_t *vd) 2720 { 2721 if (!vd->vdev_ops->vdev_op_leaf) { 2722 char *vdev_type = vd->vdev_ops->vdev_op_type; 2723 2724 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 2725 vd->vdev_children > 1) { 2726 return (B_FALSE); 2727 } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 || 2728 strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { 2729 return (B_FALSE); 2730 } 2731 } else if (vd->vdev_wholedisk == 1) { 2732 return (B_FALSE); 2733 } 2734 2735 for (int c = 0; c < vd->vdev_children; c++) { 2736 if (!vdev_is_bootable(vd->vdev_child[c])) 2737 return (B_FALSE); 2738 } 2739 return (B_TRUE); 2740 } 2741 2742 void 2743 vdev_load_log_state(vdev_t *vd, nvlist_t *nv) 2744 { 2745 uint_t children; 2746 nvlist_t **child; 2747 uint64_t val; 2748 spa_t *spa = vd->vdev_spa; 2749 2750 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2751 &child, &children) == 0) { 2752 for (int c = 0; c < children; c++) 2753 vdev_load_log_state(vd->vdev_child[c], child[c]); 2754 } 2755 2756 if (vd->vdev_ops->vdev_op_leaf && nvlist_lookup_uint64(nv, 2757 ZPOOL_CONFIG_OFFLINE, &val) == 0 && val) { 2758 2759 /* 2760 * It would be nice to call vdev_offline() 2761 * directly but the pool isn't fully loaded and 2762 * the txg threads have not been started yet. 2763 */ 2764 spa_config_enter(spa, SCL_STATE_ALL, FTAG, RW_WRITER); 2765 vd->vdev_offline = val; 2766 vdev_reopen(vd->vdev_top); 2767 spa_config_exit(spa, SCL_STATE_ALL, FTAG); 2768 } 2769 } 2770 2771 /* 2772 * Expand a vdev if possible. 2773 */ 2774 void 2775 vdev_expand(vdev_t *vd, uint64_t txg) 2776 { 2777 ASSERT(vd->vdev_top == vd); 2778 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 2779 2780 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) { 2781 VERIFY(vdev_metaslab_init(vd, txg) == 0); 2782 vdev_config_dirty(vd); 2783 } 2784 } 2785