1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 * Copyright 2017 Nexenta Systems, Inc. 26 * Copyright (c) 2014 Integros [integros.com] 27 * Copyright 2016 Toomas Soome <tsoome@me.com> 28 * Copyright 2017 Joyent, Inc. 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/fm/fs/zfs.h> 33 #include <sys/spa.h> 34 #include <sys/spa_impl.h> 35 #include <sys/bpobj.h> 36 #include <sys/dmu.h> 37 #include <sys/dmu_tx.h> 38 #include <sys/dsl_dir.h> 39 #include <sys/vdev_impl.h> 40 #include <sys/uberblock_impl.h> 41 #include <sys/metaslab.h> 42 #include <sys/metaslab_impl.h> 43 #include <sys/space_map.h> 44 #include <sys/space_reftree.h> 45 #include <sys/zio.h> 46 #include <sys/zap.h> 47 #include <sys/fs/zfs.h> 48 #include <sys/arc.h> 49 #include <sys/zil.h> 50 #include <sys/dsl_scan.h> 51 #include <sys/abd.h> 52 53 /* 54 * Virtual device management. 55 */ 56 57 static vdev_ops_t *vdev_ops_table[] = { 58 &vdev_root_ops, 59 &vdev_raidz_ops, 60 &vdev_mirror_ops, 61 &vdev_replacing_ops, 62 &vdev_spare_ops, 63 &vdev_disk_ops, 64 &vdev_file_ops, 65 &vdev_missing_ops, 66 &vdev_hole_ops, 67 &vdev_indirect_ops, 68 NULL 69 }; 70 71 /* maximum scrub/resilver I/O queue per leaf vdev */ 72 int zfs_scrub_limit = 10; 73 74 /* maximum number of metaslabs per top-level vdev */ 75 int vdev_max_ms_count = 200; 76 77 /* minimum amount of metaslabs per top-level vdev */ 78 int vdev_min_ms_count = 16; 79 80 /* see comment in vdev_metaslab_set_size() */ 81 int vdev_default_ms_shift = 29; 82 83 boolean_t vdev_validate_skip = B_FALSE; 84 85 /* 86 * Since the DTL space map of a vdev is not expected to have a lot of 87 * entries, we default its block size to 4K. 88 */ 89 int vdev_dtl_sm_blksz = (1 << 12); 90 91 /* 92 * vdev-wide space maps that have lots of entries written to them at 93 * the end of each transaction can benefit from a higher I/O bandwidth 94 * (e.g. vdev_obsolete_sm), thus we default their block size to 128K. 95 */ 96 int vdev_standard_sm_blksz = (1 << 17); 97 98 /*PRINTFLIKE2*/ 99 void 100 vdev_dbgmsg(vdev_t *vd, const char *fmt, ...) 101 { 102 va_list adx; 103 char buf[256]; 104 105 va_start(adx, fmt); 106 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 107 va_end(adx); 108 109 if (vd->vdev_path != NULL) { 110 zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type, 111 vd->vdev_path, buf); 112 } else { 113 zfs_dbgmsg("%s-%llu vdev (guid %llu): %s", 114 vd->vdev_ops->vdev_op_type, 115 (u_longlong_t)vd->vdev_id, 116 (u_longlong_t)vd->vdev_guid, buf); 117 } 118 } 119 120 void 121 vdev_dbgmsg_print_tree(vdev_t *vd, int indent) 122 { 123 char state[20]; 124 125 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) { 126 zfs_dbgmsg("%*svdev %u: %s", indent, "", vd->vdev_id, 127 vd->vdev_ops->vdev_op_type); 128 return; 129 } 130 131 switch (vd->vdev_state) { 132 case VDEV_STATE_UNKNOWN: 133 (void) snprintf(state, sizeof (state), "unknown"); 134 break; 135 case VDEV_STATE_CLOSED: 136 (void) snprintf(state, sizeof (state), "closed"); 137 break; 138 case VDEV_STATE_OFFLINE: 139 (void) snprintf(state, sizeof (state), "offline"); 140 break; 141 case VDEV_STATE_REMOVED: 142 (void) snprintf(state, sizeof (state), "removed"); 143 break; 144 case VDEV_STATE_CANT_OPEN: 145 (void) snprintf(state, sizeof (state), "can't open"); 146 break; 147 case VDEV_STATE_FAULTED: 148 (void) snprintf(state, sizeof (state), "faulted"); 149 break; 150 case VDEV_STATE_DEGRADED: 151 (void) snprintf(state, sizeof (state), "degraded"); 152 break; 153 case VDEV_STATE_HEALTHY: 154 (void) snprintf(state, sizeof (state), "healthy"); 155 break; 156 default: 157 (void) snprintf(state, sizeof (state), "<state %u>", 158 (uint_t)vd->vdev_state); 159 } 160 161 zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent, 162 "", vd->vdev_id, vd->vdev_ops->vdev_op_type, 163 vd->vdev_islog ? " (log)" : "", 164 (u_longlong_t)vd->vdev_guid, 165 vd->vdev_path ? vd->vdev_path : "N/A", state); 166 167 for (uint64_t i = 0; i < vd->vdev_children; i++) 168 vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2); 169 } 170 171 /* 172 * Given a vdev type, return the appropriate ops vector. 173 */ 174 static vdev_ops_t * 175 vdev_getops(const char *type) 176 { 177 vdev_ops_t *ops, **opspp; 178 179 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 180 if (strcmp(ops->vdev_op_type, type) == 0) 181 break; 182 183 return (ops); 184 } 185 186 /* 187 * Default asize function: return the MAX of psize with the asize of 188 * all children. This is what's used by anything other than RAID-Z. 189 */ 190 uint64_t 191 vdev_default_asize(vdev_t *vd, uint64_t psize) 192 { 193 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 194 uint64_t csize; 195 196 for (int c = 0; c < vd->vdev_children; c++) { 197 csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 198 asize = MAX(asize, csize); 199 } 200 201 return (asize); 202 } 203 204 /* 205 * Get the minimum allocatable size. We define the allocatable size as 206 * the vdev's asize rounded to the nearest metaslab. This allows us to 207 * replace or attach devices which don't have the same physical size but 208 * can still satisfy the same number of allocations. 209 */ 210 uint64_t 211 vdev_get_min_asize(vdev_t *vd) 212 { 213 vdev_t *pvd = vd->vdev_parent; 214 215 /* 216 * If our parent is NULL (inactive spare or cache) or is the root, 217 * just return our own asize. 218 */ 219 if (pvd == NULL) 220 return (vd->vdev_asize); 221 222 /* 223 * The top-level vdev just returns the allocatable size rounded 224 * to the nearest metaslab. 225 */ 226 if (vd == vd->vdev_top) 227 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); 228 229 /* 230 * The allocatable space for a raidz vdev is N * sizeof(smallest child), 231 * so each child must provide at least 1/Nth of its asize. 232 */ 233 if (pvd->vdev_ops == &vdev_raidz_ops) 234 return ((pvd->vdev_min_asize + pvd->vdev_children - 1) / 235 pvd->vdev_children); 236 237 return (pvd->vdev_min_asize); 238 } 239 240 void 241 vdev_set_min_asize(vdev_t *vd) 242 { 243 vd->vdev_min_asize = vdev_get_min_asize(vd); 244 245 for (int c = 0; c < vd->vdev_children; c++) 246 vdev_set_min_asize(vd->vdev_child[c]); 247 } 248 249 vdev_t * 250 vdev_lookup_top(spa_t *spa, uint64_t vdev) 251 { 252 vdev_t *rvd = spa->spa_root_vdev; 253 254 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 255 256 if (vdev < rvd->vdev_children) { 257 ASSERT(rvd->vdev_child[vdev] != NULL); 258 return (rvd->vdev_child[vdev]); 259 } 260 261 return (NULL); 262 } 263 264 vdev_t * 265 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 266 { 267 vdev_t *mvd; 268 269 if (vd->vdev_guid == guid) 270 return (vd); 271 272 for (int c = 0; c < vd->vdev_children; c++) 273 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 274 NULL) 275 return (mvd); 276 277 return (NULL); 278 } 279 280 static int 281 vdev_count_leaves_impl(vdev_t *vd) 282 { 283 int n = 0; 284 285 if (vd->vdev_ops->vdev_op_leaf) 286 return (1); 287 288 for (int c = 0; c < vd->vdev_children; c++) 289 n += vdev_count_leaves_impl(vd->vdev_child[c]); 290 291 return (n); 292 } 293 294 int 295 vdev_count_leaves(spa_t *spa) 296 { 297 return (vdev_count_leaves_impl(spa->spa_root_vdev)); 298 } 299 300 void 301 vdev_add_child(vdev_t *pvd, vdev_t *cvd) 302 { 303 size_t oldsize, newsize; 304 uint64_t id = cvd->vdev_id; 305 vdev_t **newchild; 306 spa_t *spa = cvd->vdev_spa; 307 308 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 309 ASSERT(cvd->vdev_parent == NULL); 310 311 cvd->vdev_parent = pvd; 312 313 if (pvd == NULL) 314 return; 315 316 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 317 318 oldsize = pvd->vdev_children * sizeof (vdev_t *); 319 pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 320 newsize = pvd->vdev_children * sizeof (vdev_t *); 321 322 newchild = kmem_zalloc(newsize, KM_SLEEP); 323 if (pvd->vdev_child != NULL) { 324 bcopy(pvd->vdev_child, newchild, oldsize); 325 kmem_free(pvd->vdev_child, oldsize); 326 } 327 328 pvd->vdev_child = newchild; 329 pvd->vdev_child[id] = cvd; 330 331 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 332 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 333 334 /* 335 * Walk up all ancestors to update guid sum. 336 */ 337 for (; pvd != NULL; pvd = pvd->vdev_parent) 338 pvd->vdev_guid_sum += cvd->vdev_guid_sum; 339 } 340 341 void 342 vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 343 { 344 int c; 345 uint_t id = cvd->vdev_id; 346 347 ASSERT(cvd->vdev_parent == pvd); 348 349 if (pvd == NULL) 350 return; 351 352 ASSERT(id < pvd->vdev_children); 353 ASSERT(pvd->vdev_child[id] == cvd); 354 355 pvd->vdev_child[id] = NULL; 356 cvd->vdev_parent = NULL; 357 358 for (c = 0; c < pvd->vdev_children; c++) 359 if (pvd->vdev_child[c]) 360 break; 361 362 if (c == pvd->vdev_children) { 363 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 364 pvd->vdev_child = NULL; 365 pvd->vdev_children = 0; 366 } 367 368 /* 369 * Walk up all ancestors to update guid sum. 370 */ 371 for (; pvd != NULL; pvd = pvd->vdev_parent) 372 pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 373 } 374 375 /* 376 * Remove any holes in the child array. 377 */ 378 void 379 vdev_compact_children(vdev_t *pvd) 380 { 381 vdev_t **newchild, *cvd; 382 int oldc = pvd->vdev_children; 383 int newc; 384 385 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 386 387 for (int c = newc = 0; c < oldc; c++) 388 if (pvd->vdev_child[c]) 389 newc++; 390 391 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 392 393 for (int c = newc = 0; c < oldc; c++) { 394 if ((cvd = pvd->vdev_child[c]) != NULL) { 395 newchild[newc] = cvd; 396 cvd->vdev_id = newc++; 397 } 398 } 399 400 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 401 pvd->vdev_child = newchild; 402 pvd->vdev_children = newc; 403 } 404 405 /* 406 * Allocate and minimally initialize a vdev_t. 407 */ 408 vdev_t * 409 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 410 { 411 vdev_t *vd; 412 vdev_indirect_config_t *vic; 413 414 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 415 vic = &vd->vdev_indirect_config; 416 417 if (spa->spa_root_vdev == NULL) { 418 ASSERT(ops == &vdev_root_ops); 419 spa->spa_root_vdev = vd; 420 spa->spa_load_guid = spa_generate_guid(NULL); 421 } 422 423 if (guid == 0 && ops != &vdev_hole_ops) { 424 if (spa->spa_root_vdev == vd) { 425 /* 426 * The root vdev's guid will also be the pool guid, 427 * which must be unique among all pools. 428 */ 429 guid = spa_generate_guid(NULL); 430 } else { 431 /* 432 * Any other vdev's guid must be unique within the pool. 433 */ 434 guid = spa_generate_guid(spa); 435 } 436 ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 437 } 438 439 vd->vdev_spa = spa; 440 vd->vdev_id = id; 441 vd->vdev_guid = guid; 442 vd->vdev_guid_sum = guid; 443 vd->vdev_ops = ops; 444 vd->vdev_state = VDEV_STATE_CLOSED; 445 vd->vdev_ishole = (ops == &vdev_hole_ops); 446 vic->vic_prev_indirect_vdev = UINT64_MAX; 447 448 rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL); 449 mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL); 450 vd->vdev_obsolete_segments = range_tree_create(NULL, NULL); 451 452 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 453 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 454 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 455 mutex_init(&vd->vdev_queue_lock, NULL, MUTEX_DEFAULT, NULL); 456 for (int t = 0; t < DTL_TYPES; t++) { 457 vd->vdev_dtl[t] = range_tree_create(NULL, NULL); 458 } 459 txg_list_create(&vd->vdev_ms_list, spa, 460 offsetof(struct metaslab, ms_txg_node)); 461 txg_list_create(&vd->vdev_dtl_list, spa, 462 offsetof(struct vdev, vdev_dtl_node)); 463 vd->vdev_stat.vs_timestamp = gethrtime(); 464 vdev_queue_init(vd); 465 vdev_cache_init(vd); 466 467 return (vd); 468 } 469 470 /* 471 * Allocate a new vdev. The 'alloctype' is used to control whether we are 472 * creating a new vdev or loading an existing one - the behavior is slightly 473 * different for each case. 474 */ 475 int 476 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 477 int alloctype) 478 { 479 vdev_ops_t *ops; 480 char *type; 481 uint64_t guid = 0, islog, nparity; 482 vdev_t *vd; 483 vdev_indirect_config_t *vic; 484 485 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 486 487 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 488 return (SET_ERROR(EINVAL)); 489 490 if ((ops = vdev_getops(type)) == NULL) 491 return (SET_ERROR(EINVAL)); 492 493 /* 494 * If this is a load, get the vdev guid from the nvlist. 495 * Otherwise, vdev_alloc_common() will generate one for us. 496 */ 497 if (alloctype == VDEV_ALLOC_LOAD) { 498 uint64_t label_id; 499 500 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 501 label_id != id) 502 return (SET_ERROR(EINVAL)); 503 504 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 505 return (SET_ERROR(EINVAL)); 506 } else if (alloctype == VDEV_ALLOC_SPARE) { 507 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 508 return (SET_ERROR(EINVAL)); 509 } else if (alloctype == VDEV_ALLOC_L2CACHE) { 510 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 511 return (SET_ERROR(EINVAL)); 512 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 513 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 514 return (SET_ERROR(EINVAL)); 515 } 516 517 /* 518 * The first allocated vdev must be of type 'root'. 519 */ 520 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 521 return (SET_ERROR(EINVAL)); 522 523 /* 524 * Determine whether we're a log vdev. 525 */ 526 islog = 0; 527 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 528 if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 529 return (SET_ERROR(ENOTSUP)); 530 531 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES) 532 return (SET_ERROR(ENOTSUP)); 533 534 /* 535 * Set the nparity property for RAID-Z vdevs. 536 */ 537 nparity = -1ULL; 538 if (ops == &vdev_raidz_ops) { 539 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 540 &nparity) == 0) { 541 if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY) 542 return (SET_ERROR(EINVAL)); 543 /* 544 * Previous versions could only support 1 or 2 parity 545 * device. 546 */ 547 if (nparity > 1 && 548 spa_version(spa) < SPA_VERSION_RAIDZ2) 549 return (SET_ERROR(ENOTSUP)); 550 if (nparity > 2 && 551 spa_version(spa) < SPA_VERSION_RAIDZ3) 552 return (SET_ERROR(ENOTSUP)); 553 } else { 554 /* 555 * We require the parity to be specified for SPAs that 556 * support multiple parity levels. 557 */ 558 if (spa_version(spa) >= SPA_VERSION_RAIDZ2) 559 return (SET_ERROR(EINVAL)); 560 /* 561 * Otherwise, we default to 1 parity device for RAID-Z. 562 */ 563 nparity = 1; 564 } 565 } else { 566 nparity = 0; 567 } 568 ASSERT(nparity != -1ULL); 569 570 vd = vdev_alloc_common(spa, id, guid, ops); 571 vic = &vd->vdev_indirect_config; 572 573 vd->vdev_islog = islog; 574 vd->vdev_nparity = nparity; 575 576 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 577 vd->vdev_path = spa_strdup(vd->vdev_path); 578 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 579 vd->vdev_devid = spa_strdup(vd->vdev_devid); 580 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 581 &vd->vdev_physpath) == 0) 582 vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 583 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 584 vd->vdev_fru = spa_strdup(vd->vdev_fru); 585 586 /* 587 * Set the whole_disk property. If it's not specified, leave the value 588 * as -1. 589 */ 590 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 591 &vd->vdev_wholedisk) != 0) 592 vd->vdev_wholedisk = -1ULL; 593 594 ASSERT0(vic->vic_mapping_object); 595 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT, 596 &vic->vic_mapping_object); 597 ASSERT0(vic->vic_births_object); 598 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS, 599 &vic->vic_births_object); 600 ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX); 601 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV, 602 &vic->vic_prev_indirect_vdev); 603 604 /* 605 * Look for the 'not present' flag. This will only be set if the device 606 * was not present at the time of import. 607 */ 608 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 609 &vd->vdev_not_present); 610 611 /* 612 * Get the alignment requirement. 613 */ 614 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 615 616 /* 617 * Retrieve the vdev creation time. 618 */ 619 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, 620 &vd->vdev_crtxg); 621 622 /* 623 * If we're a top-level vdev, try to load the allocation parameters. 624 */ 625 if (parent && !parent->vdev_parent && 626 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { 627 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 628 &vd->vdev_ms_array); 629 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 630 &vd->vdev_ms_shift); 631 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 632 &vd->vdev_asize); 633 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING, 634 &vd->vdev_removing); 635 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP, 636 &vd->vdev_top_zap); 637 } else { 638 ASSERT0(vd->vdev_top_zap); 639 } 640 641 if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) { 642 ASSERT(alloctype == VDEV_ALLOC_LOAD || 643 alloctype == VDEV_ALLOC_ADD || 644 alloctype == VDEV_ALLOC_SPLIT || 645 alloctype == VDEV_ALLOC_ROOTPOOL); 646 vd->vdev_mg = metaslab_group_create(islog ? 647 spa_log_class(spa) : spa_normal_class(spa), vd); 648 } 649 650 if (vd->vdev_ops->vdev_op_leaf && 651 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { 652 (void) nvlist_lookup_uint64(nv, 653 ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap); 654 } else { 655 ASSERT0(vd->vdev_leaf_zap); 656 } 657 658 /* 659 * If we're a leaf vdev, try to load the DTL object and other state. 660 */ 661 662 if (vd->vdev_ops->vdev_op_leaf && 663 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 664 alloctype == VDEV_ALLOC_ROOTPOOL)) { 665 if (alloctype == VDEV_ALLOC_LOAD) { 666 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 667 &vd->vdev_dtl_object); 668 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 669 &vd->vdev_unspare); 670 } 671 672 if (alloctype == VDEV_ALLOC_ROOTPOOL) { 673 uint64_t spare = 0; 674 675 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 676 &spare) == 0 && spare) 677 spa_spare_add(vd); 678 } 679 680 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 681 &vd->vdev_offline); 682 683 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG, 684 &vd->vdev_resilver_txg); 685 686 /* 687 * When importing a pool, we want to ignore the persistent fault 688 * state, as the diagnosis made on another system may not be 689 * valid in the current context. Local vdevs will 690 * remain in the faulted state. 691 */ 692 if (spa_load_state(spa) == SPA_LOAD_OPEN) { 693 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 694 &vd->vdev_faulted); 695 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 696 &vd->vdev_degraded); 697 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 698 &vd->vdev_removed); 699 700 if (vd->vdev_faulted || vd->vdev_degraded) { 701 char *aux; 702 703 vd->vdev_label_aux = 704 VDEV_AUX_ERR_EXCEEDED; 705 if (nvlist_lookup_string(nv, 706 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 && 707 strcmp(aux, "external") == 0) 708 vd->vdev_label_aux = VDEV_AUX_EXTERNAL; 709 } 710 } 711 } 712 713 /* 714 * Add ourselves to the parent's list of children. 715 */ 716 vdev_add_child(parent, vd); 717 718 *vdp = vd; 719 720 return (0); 721 } 722 723 void 724 vdev_free(vdev_t *vd) 725 { 726 spa_t *spa = vd->vdev_spa; 727 728 /* 729 * vdev_free() implies closing the vdev first. This is simpler than 730 * trying to ensure complicated semantics for all callers. 731 */ 732 vdev_close(vd); 733 734 ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 735 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 736 737 /* 738 * Free all children. 739 */ 740 for (int c = 0; c < vd->vdev_children; c++) 741 vdev_free(vd->vdev_child[c]); 742 743 ASSERT(vd->vdev_child == NULL); 744 ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 745 746 /* 747 * Discard allocation state. 748 */ 749 if (vd->vdev_mg != NULL) { 750 vdev_metaslab_fini(vd); 751 metaslab_group_destroy(vd->vdev_mg); 752 } 753 754 ASSERT0(vd->vdev_stat.vs_space); 755 ASSERT0(vd->vdev_stat.vs_dspace); 756 ASSERT0(vd->vdev_stat.vs_alloc); 757 758 /* 759 * Remove this vdev from its parent's child list. 760 */ 761 vdev_remove_child(vd->vdev_parent, vd); 762 763 ASSERT(vd->vdev_parent == NULL); 764 765 /* 766 * Clean up vdev structure. 767 */ 768 vdev_queue_fini(vd); 769 vdev_cache_fini(vd); 770 771 if (vd->vdev_path) 772 spa_strfree(vd->vdev_path); 773 if (vd->vdev_devid) 774 spa_strfree(vd->vdev_devid); 775 if (vd->vdev_physpath) 776 spa_strfree(vd->vdev_physpath); 777 if (vd->vdev_fru) 778 spa_strfree(vd->vdev_fru); 779 780 if (vd->vdev_isspare) 781 spa_spare_remove(vd); 782 if (vd->vdev_isl2cache) 783 spa_l2cache_remove(vd); 784 785 txg_list_destroy(&vd->vdev_ms_list); 786 txg_list_destroy(&vd->vdev_dtl_list); 787 788 mutex_enter(&vd->vdev_dtl_lock); 789 space_map_close(vd->vdev_dtl_sm); 790 for (int t = 0; t < DTL_TYPES; t++) { 791 range_tree_vacate(vd->vdev_dtl[t], NULL, NULL); 792 range_tree_destroy(vd->vdev_dtl[t]); 793 } 794 mutex_exit(&vd->vdev_dtl_lock); 795 796 EQUIV(vd->vdev_indirect_births != NULL, 797 vd->vdev_indirect_mapping != NULL); 798 if (vd->vdev_indirect_births != NULL) { 799 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 800 vdev_indirect_births_close(vd->vdev_indirect_births); 801 } 802 803 if (vd->vdev_obsolete_sm != NULL) { 804 ASSERT(vd->vdev_removing || 805 vd->vdev_ops == &vdev_indirect_ops); 806 space_map_close(vd->vdev_obsolete_sm); 807 vd->vdev_obsolete_sm = NULL; 808 } 809 range_tree_destroy(vd->vdev_obsolete_segments); 810 rw_destroy(&vd->vdev_indirect_rwlock); 811 mutex_destroy(&vd->vdev_obsolete_lock); 812 813 mutex_destroy(&vd->vdev_queue_lock); 814 mutex_destroy(&vd->vdev_dtl_lock); 815 mutex_destroy(&vd->vdev_stat_lock); 816 mutex_destroy(&vd->vdev_probe_lock); 817 818 if (vd == spa->spa_root_vdev) 819 spa->spa_root_vdev = NULL; 820 821 kmem_free(vd, sizeof (vdev_t)); 822 } 823 824 /* 825 * Transfer top-level vdev state from svd to tvd. 826 */ 827 static void 828 vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 829 { 830 spa_t *spa = svd->vdev_spa; 831 metaslab_t *msp; 832 vdev_t *vd; 833 int t; 834 835 ASSERT(tvd == tvd->vdev_top); 836 837 tvd->vdev_ms_array = svd->vdev_ms_array; 838 tvd->vdev_ms_shift = svd->vdev_ms_shift; 839 tvd->vdev_ms_count = svd->vdev_ms_count; 840 tvd->vdev_top_zap = svd->vdev_top_zap; 841 842 svd->vdev_ms_array = 0; 843 svd->vdev_ms_shift = 0; 844 svd->vdev_ms_count = 0; 845 svd->vdev_top_zap = 0; 846 847 if (tvd->vdev_mg) 848 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg); 849 tvd->vdev_mg = svd->vdev_mg; 850 tvd->vdev_ms = svd->vdev_ms; 851 852 svd->vdev_mg = NULL; 853 svd->vdev_ms = NULL; 854 855 if (tvd->vdev_mg != NULL) 856 tvd->vdev_mg->mg_vd = tvd; 857 858 tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm; 859 svd->vdev_checkpoint_sm = NULL; 860 861 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 862 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 863 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 864 865 svd->vdev_stat.vs_alloc = 0; 866 svd->vdev_stat.vs_space = 0; 867 svd->vdev_stat.vs_dspace = 0; 868 869 for (t = 0; t < TXG_SIZE; t++) { 870 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 871 (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 872 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 873 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 874 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 875 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 876 } 877 878 if (list_link_active(&svd->vdev_config_dirty_node)) { 879 vdev_config_clean(svd); 880 vdev_config_dirty(tvd); 881 } 882 883 if (list_link_active(&svd->vdev_state_dirty_node)) { 884 vdev_state_clean(svd); 885 vdev_state_dirty(tvd); 886 } 887 888 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 889 svd->vdev_deflate_ratio = 0; 890 891 tvd->vdev_islog = svd->vdev_islog; 892 svd->vdev_islog = 0; 893 } 894 895 static void 896 vdev_top_update(vdev_t *tvd, vdev_t *vd) 897 { 898 if (vd == NULL) 899 return; 900 901 vd->vdev_top = tvd; 902 903 for (int c = 0; c < vd->vdev_children; c++) 904 vdev_top_update(tvd, vd->vdev_child[c]); 905 } 906 907 /* 908 * Add a mirror/replacing vdev above an existing vdev. 909 */ 910 vdev_t * 911 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 912 { 913 spa_t *spa = cvd->vdev_spa; 914 vdev_t *pvd = cvd->vdev_parent; 915 vdev_t *mvd; 916 917 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 918 919 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 920 921 mvd->vdev_asize = cvd->vdev_asize; 922 mvd->vdev_min_asize = cvd->vdev_min_asize; 923 mvd->vdev_max_asize = cvd->vdev_max_asize; 924 mvd->vdev_psize = cvd->vdev_psize; 925 mvd->vdev_ashift = cvd->vdev_ashift; 926 mvd->vdev_state = cvd->vdev_state; 927 mvd->vdev_crtxg = cvd->vdev_crtxg; 928 929 vdev_remove_child(pvd, cvd); 930 vdev_add_child(pvd, mvd); 931 cvd->vdev_id = mvd->vdev_children; 932 vdev_add_child(mvd, cvd); 933 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 934 935 if (mvd == mvd->vdev_top) 936 vdev_top_transfer(cvd, mvd); 937 938 return (mvd); 939 } 940 941 /* 942 * Remove a 1-way mirror/replacing vdev from the tree. 943 */ 944 void 945 vdev_remove_parent(vdev_t *cvd) 946 { 947 vdev_t *mvd = cvd->vdev_parent; 948 vdev_t *pvd = mvd->vdev_parent; 949 950 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 951 952 ASSERT(mvd->vdev_children == 1); 953 ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 954 mvd->vdev_ops == &vdev_replacing_ops || 955 mvd->vdev_ops == &vdev_spare_ops); 956 cvd->vdev_ashift = mvd->vdev_ashift; 957 958 vdev_remove_child(mvd, cvd); 959 vdev_remove_child(pvd, mvd); 960 961 /* 962 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 963 * Otherwise, we could have detached an offline device, and when we 964 * go to import the pool we'll think we have two top-level vdevs, 965 * instead of a different version of the same top-level vdev. 966 */ 967 if (mvd->vdev_top == mvd) { 968 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 969 cvd->vdev_orig_guid = cvd->vdev_guid; 970 cvd->vdev_guid += guid_delta; 971 cvd->vdev_guid_sum += guid_delta; 972 } 973 cvd->vdev_id = mvd->vdev_id; 974 vdev_add_child(pvd, cvd); 975 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 976 977 if (cvd == cvd->vdev_top) 978 vdev_top_transfer(mvd, cvd); 979 980 ASSERT(mvd->vdev_children == 0); 981 vdev_free(mvd); 982 } 983 984 int 985 vdev_metaslab_init(vdev_t *vd, uint64_t txg) 986 { 987 spa_t *spa = vd->vdev_spa; 988 objset_t *mos = spa->spa_meta_objset; 989 uint64_t m; 990 uint64_t oldc = vd->vdev_ms_count; 991 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 992 metaslab_t **mspp; 993 int error; 994 995 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 996 997 /* 998 * This vdev is not being allocated from yet or is a hole. 999 */ 1000 if (vd->vdev_ms_shift == 0) 1001 return (0); 1002 1003 ASSERT(!vd->vdev_ishole); 1004 1005 ASSERT(oldc <= newc); 1006 1007 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 1008 1009 if (oldc != 0) { 1010 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 1011 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 1012 } 1013 1014 vd->vdev_ms = mspp; 1015 vd->vdev_ms_count = newc; 1016 1017 for (m = oldc; m < newc; m++) { 1018 uint64_t object = 0; 1019 1020 /* 1021 * vdev_ms_array may be 0 if we are creating the "fake" 1022 * metaslabs for an indirect vdev for zdb's leak detection. 1023 * See zdb_leak_init(). 1024 */ 1025 if (txg == 0 && vd->vdev_ms_array != 0) { 1026 error = dmu_read(mos, vd->vdev_ms_array, 1027 m * sizeof (uint64_t), sizeof (uint64_t), &object, 1028 DMU_READ_PREFETCH); 1029 if (error != 0) { 1030 vdev_dbgmsg(vd, "unable to read the metaslab " 1031 "array [error=%d]", error); 1032 return (error); 1033 } 1034 } 1035 1036 error = metaslab_init(vd->vdev_mg, m, object, txg, 1037 &(vd->vdev_ms[m])); 1038 if (error != 0) { 1039 vdev_dbgmsg(vd, "metaslab_init failed [error=%d]", 1040 error); 1041 return (error); 1042 } 1043 } 1044 1045 if (txg == 0) 1046 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER); 1047 1048 /* 1049 * If the vdev is being removed we don't activate 1050 * the metaslabs since we want to ensure that no new 1051 * allocations are performed on this device. 1052 */ 1053 if (oldc == 0 && !vd->vdev_removing) 1054 metaslab_group_activate(vd->vdev_mg); 1055 1056 if (txg == 0) 1057 spa_config_exit(spa, SCL_ALLOC, FTAG); 1058 1059 return (0); 1060 } 1061 1062 void 1063 vdev_metaslab_fini(vdev_t *vd) 1064 { 1065 if (vd->vdev_checkpoint_sm != NULL) { 1066 ASSERT(spa_feature_is_active(vd->vdev_spa, 1067 SPA_FEATURE_POOL_CHECKPOINT)); 1068 space_map_close(vd->vdev_checkpoint_sm); 1069 /* 1070 * Even though we close the space map, we need to set its 1071 * pointer to NULL. The reason is that vdev_metaslab_fini() 1072 * may be called multiple times for certain operations 1073 * (i.e. when destroying a pool) so we need to ensure that 1074 * this clause never executes twice. This logic is similar 1075 * to the one used for the vdev_ms clause below. 1076 */ 1077 vd->vdev_checkpoint_sm = NULL; 1078 } 1079 1080 if (vd->vdev_ms != NULL) { 1081 uint64_t count = vd->vdev_ms_count; 1082 1083 metaslab_group_passivate(vd->vdev_mg); 1084 for (uint64_t m = 0; m < count; m++) { 1085 metaslab_t *msp = vd->vdev_ms[m]; 1086 1087 if (msp != NULL) 1088 metaslab_fini(msp); 1089 } 1090 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 1091 vd->vdev_ms = NULL; 1092 1093 vd->vdev_ms_count = 0; 1094 } 1095 ASSERT0(vd->vdev_ms_count); 1096 } 1097 1098 typedef struct vdev_probe_stats { 1099 boolean_t vps_readable; 1100 boolean_t vps_writeable; 1101 int vps_flags; 1102 } vdev_probe_stats_t; 1103 1104 static void 1105 vdev_probe_done(zio_t *zio) 1106 { 1107 spa_t *spa = zio->io_spa; 1108 vdev_t *vd = zio->io_vd; 1109 vdev_probe_stats_t *vps = zio->io_private; 1110 1111 ASSERT(vd->vdev_probe_zio != NULL); 1112 1113 if (zio->io_type == ZIO_TYPE_READ) { 1114 if (zio->io_error == 0) 1115 vps->vps_readable = 1; 1116 if (zio->io_error == 0 && spa_writeable(spa)) { 1117 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 1118 zio->io_offset, zio->io_size, zio->io_abd, 1119 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1120 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 1121 } else { 1122 abd_free(zio->io_abd); 1123 } 1124 } else if (zio->io_type == ZIO_TYPE_WRITE) { 1125 if (zio->io_error == 0) 1126 vps->vps_writeable = 1; 1127 abd_free(zio->io_abd); 1128 } else if (zio->io_type == ZIO_TYPE_NULL) { 1129 zio_t *pio; 1130 1131 vd->vdev_cant_read |= !vps->vps_readable; 1132 vd->vdev_cant_write |= !vps->vps_writeable; 1133 1134 if (vdev_readable(vd) && 1135 (vdev_writeable(vd) || !spa_writeable(spa))) { 1136 zio->io_error = 0; 1137 } else { 1138 ASSERT(zio->io_error != 0); 1139 vdev_dbgmsg(vd, "failed probe"); 1140 zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 1141 spa, vd, NULL, 0, 0); 1142 zio->io_error = SET_ERROR(ENXIO); 1143 } 1144 1145 mutex_enter(&vd->vdev_probe_lock); 1146 ASSERT(vd->vdev_probe_zio == zio); 1147 vd->vdev_probe_zio = NULL; 1148 mutex_exit(&vd->vdev_probe_lock); 1149 1150 zio_link_t *zl = NULL; 1151 while ((pio = zio_walk_parents(zio, &zl)) != NULL) 1152 if (!vdev_accessible(vd, pio)) 1153 pio->io_error = SET_ERROR(ENXIO); 1154 1155 kmem_free(vps, sizeof (*vps)); 1156 } 1157 } 1158 1159 /* 1160 * Determine whether this device is accessible. 1161 * 1162 * Read and write to several known locations: the pad regions of each 1163 * vdev label but the first, which we leave alone in case it contains 1164 * a VTOC. 1165 */ 1166 zio_t * 1167 vdev_probe(vdev_t *vd, zio_t *zio) 1168 { 1169 spa_t *spa = vd->vdev_spa; 1170 vdev_probe_stats_t *vps = NULL; 1171 zio_t *pio; 1172 1173 ASSERT(vd->vdev_ops->vdev_op_leaf); 1174 1175 /* 1176 * Don't probe the probe. 1177 */ 1178 if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 1179 return (NULL); 1180 1181 /* 1182 * To prevent 'probe storms' when a device fails, we create 1183 * just one probe i/o at a time. All zios that want to probe 1184 * this vdev will become parents of the probe io. 1185 */ 1186 mutex_enter(&vd->vdev_probe_lock); 1187 1188 if ((pio = vd->vdev_probe_zio) == NULL) { 1189 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 1190 1191 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 1192 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 1193 ZIO_FLAG_TRYHARD; 1194 1195 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 1196 /* 1197 * vdev_cant_read and vdev_cant_write can only 1198 * transition from TRUE to FALSE when we have the 1199 * SCL_ZIO lock as writer; otherwise they can only 1200 * transition from FALSE to TRUE. This ensures that 1201 * any zio looking at these values can assume that 1202 * failures persist for the life of the I/O. That's 1203 * important because when a device has intermittent 1204 * connectivity problems, we want to ensure that 1205 * they're ascribed to the device (ENXIO) and not 1206 * the zio (EIO). 1207 * 1208 * Since we hold SCL_ZIO as writer here, clear both 1209 * values so the probe can reevaluate from first 1210 * principles. 1211 */ 1212 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 1213 vd->vdev_cant_read = B_FALSE; 1214 vd->vdev_cant_write = B_FALSE; 1215 } 1216 1217 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 1218 vdev_probe_done, vps, 1219 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 1220 1221 /* 1222 * We can't change the vdev state in this context, so we 1223 * kick off an async task to do it on our behalf. 1224 */ 1225 if (zio != NULL) { 1226 vd->vdev_probe_wanted = B_TRUE; 1227 spa_async_request(spa, SPA_ASYNC_PROBE); 1228 } 1229 } 1230 1231 if (zio != NULL) 1232 zio_add_child(zio, pio); 1233 1234 mutex_exit(&vd->vdev_probe_lock); 1235 1236 if (vps == NULL) { 1237 ASSERT(zio != NULL); 1238 return (NULL); 1239 } 1240 1241 for (int l = 1; l < VDEV_LABELS; l++) { 1242 zio_nowait(zio_read_phys(pio, vd, 1243 vdev_label_offset(vd->vdev_psize, l, 1244 offsetof(vdev_label_t, vl_pad2)), VDEV_PAD_SIZE, 1245 abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE), 1246 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1247 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 1248 } 1249 1250 if (zio == NULL) 1251 return (pio); 1252 1253 zio_nowait(pio); 1254 return (NULL); 1255 } 1256 1257 static void 1258 vdev_open_child(void *arg) 1259 { 1260 vdev_t *vd = arg; 1261 1262 vd->vdev_open_thread = curthread; 1263 vd->vdev_open_error = vdev_open(vd); 1264 vd->vdev_open_thread = NULL; 1265 } 1266 1267 boolean_t 1268 vdev_uses_zvols(vdev_t *vd) 1269 { 1270 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, 1271 strlen(ZVOL_DIR)) == 0) 1272 return (B_TRUE); 1273 for (int c = 0; c < vd->vdev_children; c++) 1274 if (vdev_uses_zvols(vd->vdev_child[c])) 1275 return (B_TRUE); 1276 return (B_FALSE); 1277 } 1278 1279 void 1280 vdev_open_children(vdev_t *vd) 1281 { 1282 taskq_t *tq; 1283 int children = vd->vdev_children; 1284 1285 /* 1286 * in order to handle pools on top of zvols, do the opens 1287 * in a single thread so that the same thread holds the 1288 * spa_namespace_lock 1289 */ 1290 if (vdev_uses_zvols(vd)) { 1291 for (int c = 0; c < children; c++) 1292 vd->vdev_child[c]->vdev_open_error = 1293 vdev_open(vd->vdev_child[c]); 1294 return; 1295 } 1296 tq = taskq_create("vdev_open", children, minclsyspri, 1297 children, children, TASKQ_PREPOPULATE); 1298 1299 for (int c = 0; c < children; c++) 1300 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], 1301 TQ_SLEEP) != NULL); 1302 1303 taskq_destroy(tq); 1304 } 1305 1306 /* 1307 * Compute the raidz-deflation ratio. Note, we hard-code 1308 * in 128k (1 << 17) because it is the "typical" blocksize. 1309 * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change, 1310 * otherwise it would inconsistently account for existing bp's. 1311 */ 1312 static void 1313 vdev_set_deflate_ratio(vdev_t *vd) 1314 { 1315 if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) { 1316 vd->vdev_deflate_ratio = (1 << 17) / 1317 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 1318 } 1319 } 1320 1321 /* 1322 * Prepare a virtual device for access. 1323 */ 1324 int 1325 vdev_open(vdev_t *vd) 1326 { 1327 spa_t *spa = vd->vdev_spa; 1328 int error; 1329 uint64_t osize = 0; 1330 uint64_t max_osize = 0; 1331 uint64_t asize, max_asize, psize; 1332 uint64_t ashift = 0; 1333 1334 ASSERT(vd->vdev_open_thread == curthread || 1335 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1336 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1337 vd->vdev_state == VDEV_STATE_CANT_OPEN || 1338 vd->vdev_state == VDEV_STATE_OFFLINE); 1339 1340 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1341 vd->vdev_cant_read = B_FALSE; 1342 vd->vdev_cant_write = B_FALSE; 1343 vd->vdev_min_asize = vdev_get_min_asize(vd); 1344 1345 /* 1346 * If this vdev is not removed, check its fault status. If it's 1347 * faulted, bail out of the open. 1348 */ 1349 if (!vd->vdev_removed && vd->vdev_faulted) { 1350 ASSERT(vd->vdev_children == 0); 1351 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1352 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1353 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1354 vd->vdev_label_aux); 1355 return (SET_ERROR(ENXIO)); 1356 } else if (vd->vdev_offline) { 1357 ASSERT(vd->vdev_children == 0); 1358 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1359 return (SET_ERROR(ENXIO)); 1360 } 1361 1362 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift); 1363 1364 /* 1365 * Reset the vdev_reopening flag so that we actually close 1366 * the vdev on error. 1367 */ 1368 vd->vdev_reopening = B_FALSE; 1369 if (zio_injection_enabled && error == 0) 1370 error = zio_handle_device_injection(vd, NULL, ENXIO); 1371 1372 if (error) { 1373 if (vd->vdev_removed && 1374 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 1375 vd->vdev_removed = B_FALSE; 1376 1377 if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) { 1378 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, 1379 vd->vdev_stat.vs_aux); 1380 } else { 1381 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1382 vd->vdev_stat.vs_aux); 1383 } 1384 return (error); 1385 } 1386 1387 vd->vdev_removed = B_FALSE; 1388 1389 /* 1390 * Recheck the faulted flag now that we have confirmed that 1391 * the vdev is accessible. If we're faulted, bail. 1392 */ 1393 if (vd->vdev_faulted) { 1394 ASSERT(vd->vdev_children == 0); 1395 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1396 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1397 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1398 vd->vdev_label_aux); 1399 return (SET_ERROR(ENXIO)); 1400 } 1401 1402 if (vd->vdev_degraded) { 1403 ASSERT(vd->vdev_children == 0); 1404 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1405 VDEV_AUX_ERR_EXCEEDED); 1406 } else { 1407 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0); 1408 } 1409 1410 /* 1411 * For hole or missing vdevs we just return success. 1412 */ 1413 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) 1414 return (0); 1415 1416 for (int c = 0; c < vd->vdev_children; c++) { 1417 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 1418 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1419 VDEV_AUX_NONE); 1420 break; 1421 } 1422 } 1423 1424 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1425 max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t)); 1426 1427 if (vd->vdev_children == 0) { 1428 if (osize < SPA_MINDEVSIZE) { 1429 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1430 VDEV_AUX_TOO_SMALL); 1431 return (SET_ERROR(EOVERFLOW)); 1432 } 1433 psize = osize; 1434 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1435 max_asize = max_osize - (VDEV_LABEL_START_SIZE + 1436 VDEV_LABEL_END_SIZE); 1437 } else { 1438 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1439 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 1440 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1441 VDEV_AUX_TOO_SMALL); 1442 return (SET_ERROR(EOVERFLOW)); 1443 } 1444 psize = 0; 1445 asize = osize; 1446 max_asize = max_osize; 1447 } 1448 1449 vd->vdev_psize = psize; 1450 1451 /* 1452 * Make sure the allocatable size hasn't shrunk too much. 1453 */ 1454 if (asize < vd->vdev_min_asize) { 1455 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1456 VDEV_AUX_BAD_LABEL); 1457 return (SET_ERROR(EINVAL)); 1458 } 1459 1460 if (vd->vdev_asize == 0) { 1461 /* 1462 * This is the first-ever open, so use the computed values. 1463 * For testing purposes, a higher ashift can be requested. 1464 */ 1465 vd->vdev_asize = asize; 1466 vd->vdev_max_asize = max_asize; 1467 vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 1468 } else { 1469 /* 1470 * Detect if the alignment requirement has increased. 1471 * We don't want to make the pool unavailable, just 1472 * issue a warning instead. 1473 */ 1474 if (ashift > vd->vdev_top->vdev_ashift && 1475 vd->vdev_ops->vdev_op_leaf) { 1476 cmn_err(CE_WARN, 1477 "Disk, '%s', has a block alignment that is " 1478 "larger than the pool's alignment\n", 1479 vd->vdev_path); 1480 } 1481 vd->vdev_max_asize = max_asize; 1482 } 1483 1484 /* 1485 * If all children are healthy we update asize if either: 1486 * The asize has increased, due to a device expansion caused by dynamic 1487 * LUN growth or vdev replacement, and automatic expansion is enabled; 1488 * making the additional space available. 1489 * 1490 * The asize has decreased, due to a device shrink usually caused by a 1491 * vdev replace with a smaller device. This ensures that calculations 1492 * based of max_asize and asize e.g. esize are always valid. It's safe 1493 * to do this as we've already validated that asize is greater than 1494 * vdev_min_asize. 1495 */ 1496 if (vd->vdev_state == VDEV_STATE_HEALTHY && 1497 ((asize > vd->vdev_asize && 1498 (vd->vdev_expanding || spa->spa_autoexpand)) || 1499 (asize < vd->vdev_asize))) 1500 vd->vdev_asize = asize; 1501 1502 vdev_set_min_asize(vd); 1503 1504 /* 1505 * Ensure we can issue some IO before declaring the 1506 * vdev open for business. 1507 */ 1508 if (vd->vdev_ops->vdev_op_leaf && 1509 (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 1510 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1511 VDEV_AUX_ERR_EXCEEDED); 1512 return (error); 1513 } 1514 1515 /* 1516 * Track the min and max ashift values for normal data devices. 1517 */ 1518 if (vd->vdev_top == vd && vd->vdev_ashift != 0 && 1519 !vd->vdev_islog && vd->vdev_aux == NULL) { 1520 if (vd->vdev_ashift > spa->spa_max_ashift) 1521 spa->spa_max_ashift = vd->vdev_ashift; 1522 if (vd->vdev_ashift < spa->spa_min_ashift) 1523 spa->spa_min_ashift = vd->vdev_ashift; 1524 } 1525 1526 /* 1527 * If a leaf vdev has a DTL, and seems healthy, then kick off a 1528 * resilver. But don't do this if we are doing a reopen for a scrub, 1529 * since this would just restart the scrub we are already doing. 1530 */ 1531 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 1532 vdev_resilver_needed(vd, NULL, NULL)) 1533 spa_async_request(spa, SPA_ASYNC_RESILVER); 1534 1535 return (0); 1536 } 1537 1538 /* 1539 * Called once the vdevs are all opened, this routine validates the label 1540 * contents. This needs to be done before vdev_load() so that we don't 1541 * inadvertently do repair I/Os to the wrong device. 1542 * 1543 * This function will only return failure if one of the vdevs indicates that it 1544 * has since been destroyed or exported. This is only possible if 1545 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 1546 * will be updated but the function will return 0. 1547 */ 1548 int 1549 vdev_validate(vdev_t *vd) 1550 { 1551 spa_t *spa = vd->vdev_spa; 1552 nvlist_t *label; 1553 uint64_t guid = 0, aux_guid = 0, top_guid; 1554 uint64_t state; 1555 nvlist_t *nvl; 1556 uint64_t txg; 1557 1558 if (vdev_validate_skip) 1559 return (0); 1560 1561 for (uint64_t c = 0; c < vd->vdev_children; c++) 1562 if (vdev_validate(vd->vdev_child[c]) != 0) 1563 return (SET_ERROR(EBADF)); 1564 1565 /* 1566 * If the device has already failed, or was marked offline, don't do 1567 * any further validation. Otherwise, label I/O will fail and we will 1568 * overwrite the previous state. 1569 */ 1570 if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd)) 1571 return (0); 1572 1573 /* 1574 * If we are performing an extreme rewind, we allow for a label that 1575 * was modified at a point after the current txg. 1576 * If config lock is not held do not check for the txg. spa_sync could 1577 * be updating the vdev's label before updating spa_last_synced_txg. 1578 */ 1579 if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 || 1580 spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG) 1581 txg = UINT64_MAX; 1582 else 1583 txg = spa_last_synced_txg(spa); 1584 1585 if ((label = vdev_label_read_config(vd, txg)) == NULL) { 1586 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1587 VDEV_AUX_BAD_LABEL); 1588 vdev_dbgmsg(vd, "vdev_validate: failed reading config"); 1589 return (0); 1590 } 1591 1592 /* 1593 * Determine if this vdev has been split off into another 1594 * pool. If so, then refuse to open it. 1595 */ 1596 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID, 1597 &aux_guid) == 0 && aux_guid == spa_guid(spa)) { 1598 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1599 VDEV_AUX_SPLIT_POOL); 1600 nvlist_free(label); 1601 vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool"); 1602 return (0); 1603 } 1604 1605 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) { 1606 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1607 VDEV_AUX_CORRUPT_DATA); 1608 nvlist_free(label); 1609 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1610 ZPOOL_CONFIG_POOL_GUID); 1611 return (0); 1612 } 1613 1614 /* 1615 * If config is not trusted then ignore the spa guid check. This is 1616 * necessary because if the machine crashed during a re-guid the new 1617 * guid might have been written to all of the vdev labels, but not the 1618 * cached config. The check will be performed again once we have the 1619 * trusted config from the MOS. 1620 */ 1621 if (spa->spa_trust_config && guid != spa_guid(spa)) { 1622 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1623 VDEV_AUX_CORRUPT_DATA); 1624 nvlist_free(label); 1625 vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't " 1626 "match config (%llu != %llu)", (u_longlong_t)guid, 1627 (u_longlong_t)spa_guid(spa)); 1628 return (0); 1629 } 1630 1631 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl) 1632 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID, 1633 &aux_guid) != 0) 1634 aux_guid = 0; 1635 1636 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) { 1637 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1638 VDEV_AUX_CORRUPT_DATA); 1639 nvlist_free(label); 1640 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1641 ZPOOL_CONFIG_GUID); 1642 return (0); 1643 } 1644 1645 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid) 1646 != 0) { 1647 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1648 VDEV_AUX_CORRUPT_DATA); 1649 nvlist_free(label); 1650 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1651 ZPOOL_CONFIG_TOP_GUID); 1652 return (0); 1653 } 1654 1655 /* 1656 * If this vdev just became a top-level vdev because its sibling was 1657 * detached, it will have adopted the parent's vdev guid -- but the 1658 * label may or may not be on disk yet. Fortunately, either version 1659 * of the label will have the same top guid, so if we're a top-level 1660 * vdev, we can safely compare to that instead. 1661 * However, if the config comes from a cachefile that failed to update 1662 * after the detach, a top-level vdev will appear as a non top-level 1663 * vdev in the config. Also relax the constraints if we perform an 1664 * extreme rewind. 1665 * 1666 * If we split this vdev off instead, then we also check the 1667 * original pool's guid. We don't want to consider the vdev 1668 * corrupt if it is partway through a split operation. 1669 */ 1670 if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) { 1671 boolean_t mismatch = B_FALSE; 1672 if (spa->spa_trust_config && !spa->spa_extreme_rewind) { 1673 if (vd != vd->vdev_top || vd->vdev_guid != top_guid) 1674 mismatch = B_TRUE; 1675 } else { 1676 if (vd->vdev_guid != top_guid && 1677 vd->vdev_top->vdev_guid != guid) 1678 mismatch = B_TRUE; 1679 } 1680 1681 if (mismatch) { 1682 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1683 VDEV_AUX_CORRUPT_DATA); 1684 nvlist_free(label); 1685 vdev_dbgmsg(vd, "vdev_validate: config guid " 1686 "doesn't match label guid"); 1687 vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu", 1688 (u_longlong_t)vd->vdev_guid, 1689 (u_longlong_t)vd->vdev_top->vdev_guid); 1690 vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, " 1691 "aux_guid %llu", (u_longlong_t)guid, 1692 (u_longlong_t)top_guid, (u_longlong_t)aux_guid); 1693 return (0); 1694 } 1695 } 1696 1697 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 1698 &state) != 0) { 1699 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1700 VDEV_AUX_CORRUPT_DATA); 1701 nvlist_free(label); 1702 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1703 ZPOOL_CONFIG_POOL_STATE); 1704 return (0); 1705 } 1706 1707 nvlist_free(label); 1708 1709 /* 1710 * If this is a verbatim import, no need to check the 1711 * state of the pool. 1712 */ 1713 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) && 1714 spa_load_state(spa) == SPA_LOAD_OPEN && 1715 state != POOL_STATE_ACTIVE) { 1716 vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) " 1717 "for spa %s", (u_longlong_t)state, spa->spa_name); 1718 return (SET_ERROR(EBADF)); 1719 } 1720 1721 /* 1722 * If we were able to open and validate a vdev that was 1723 * previously marked permanently unavailable, clear that state 1724 * now. 1725 */ 1726 if (vd->vdev_not_present) 1727 vd->vdev_not_present = 0; 1728 1729 return (0); 1730 } 1731 1732 static void 1733 vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd) 1734 { 1735 if (svd->vdev_path != NULL && dvd->vdev_path != NULL) { 1736 if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) { 1737 zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed " 1738 "from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid, 1739 dvd->vdev_path, svd->vdev_path); 1740 spa_strfree(dvd->vdev_path); 1741 dvd->vdev_path = spa_strdup(svd->vdev_path); 1742 } 1743 } else if (svd->vdev_path != NULL) { 1744 dvd->vdev_path = spa_strdup(svd->vdev_path); 1745 zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'", 1746 (u_longlong_t)dvd->vdev_guid, dvd->vdev_path); 1747 } 1748 } 1749 1750 /* 1751 * Recursively copy vdev paths from one vdev to another. Source and destination 1752 * vdev trees must have same geometry otherwise return error. Intended to copy 1753 * paths from userland config into MOS config. 1754 */ 1755 int 1756 vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd) 1757 { 1758 if ((svd->vdev_ops == &vdev_missing_ops) || 1759 (svd->vdev_ishole && dvd->vdev_ishole) || 1760 (dvd->vdev_ops == &vdev_indirect_ops)) 1761 return (0); 1762 1763 if (svd->vdev_ops != dvd->vdev_ops) { 1764 vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s", 1765 svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type); 1766 return (SET_ERROR(EINVAL)); 1767 } 1768 1769 if (svd->vdev_guid != dvd->vdev_guid) { 1770 vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != " 1771 "%llu)", (u_longlong_t)svd->vdev_guid, 1772 (u_longlong_t)dvd->vdev_guid); 1773 return (SET_ERROR(EINVAL)); 1774 } 1775 1776 if (svd->vdev_children != dvd->vdev_children) { 1777 vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: " 1778 "%llu != %llu", (u_longlong_t)svd->vdev_children, 1779 (u_longlong_t)dvd->vdev_children); 1780 return (SET_ERROR(EINVAL)); 1781 } 1782 1783 for (uint64_t i = 0; i < svd->vdev_children; i++) { 1784 int error = vdev_copy_path_strict(svd->vdev_child[i], 1785 dvd->vdev_child[i]); 1786 if (error != 0) 1787 return (error); 1788 } 1789 1790 if (svd->vdev_ops->vdev_op_leaf) 1791 vdev_copy_path_impl(svd, dvd); 1792 1793 return (0); 1794 } 1795 1796 static void 1797 vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd) 1798 { 1799 ASSERT(stvd->vdev_top == stvd); 1800 ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id); 1801 1802 for (uint64_t i = 0; i < dvd->vdev_children; i++) { 1803 vdev_copy_path_search(stvd, dvd->vdev_child[i]); 1804 } 1805 1806 if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd)) 1807 return; 1808 1809 /* 1810 * The idea here is that while a vdev can shift positions within 1811 * a top vdev (when replacing, attaching mirror, etc.) it cannot 1812 * step outside of it. 1813 */ 1814 vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid); 1815 1816 if (vd == NULL || vd->vdev_ops != dvd->vdev_ops) 1817 return; 1818 1819 ASSERT(vd->vdev_ops->vdev_op_leaf); 1820 1821 vdev_copy_path_impl(vd, dvd); 1822 } 1823 1824 /* 1825 * Recursively copy vdev paths from one root vdev to another. Source and 1826 * destination vdev trees may differ in geometry. For each destination leaf 1827 * vdev, search a vdev with the same guid and top vdev id in the source. 1828 * Intended to copy paths from userland config into MOS config. 1829 */ 1830 void 1831 vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd) 1832 { 1833 uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children); 1834 ASSERT(srvd->vdev_ops == &vdev_root_ops); 1835 ASSERT(drvd->vdev_ops == &vdev_root_ops); 1836 1837 for (uint64_t i = 0; i < children; i++) { 1838 vdev_copy_path_search(srvd->vdev_child[i], 1839 drvd->vdev_child[i]); 1840 } 1841 } 1842 1843 /* 1844 * Close a virtual device. 1845 */ 1846 void 1847 vdev_close(vdev_t *vd) 1848 { 1849 spa_t *spa = vd->vdev_spa; 1850 vdev_t *pvd = vd->vdev_parent; 1851 1852 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1853 1854 /* 1855 * If our parent is reopening, then we are as well, unless we are 1856 * going offline. 1857 */ 1858 if (pvd != NULL && pvd->vdev_reopening) 1859 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline); 1860 1861 vd->vdev_ops->vdev_op_close(vd); 1862 1863 vdev_cache_purge(vd); 1864 1865 /* 1866 * We record the previous state before we close it, so that if we are 1867 * doing a reopen(), we don't generate FMA ereports if we notice that 1868 * it's still faulted. 1869 */ 1870 vd->vdev_prevstate = vd->vdev_state; 1871 1872 if (vd->vdev_offline) 1873 vd->vdev_state = VDEV_STATE_OFFLINE; 1874 else 1875 vd->vdev_state = VDEV_STATE_CLOSED; 1876 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1877 } 1878 1879 void 1880 vdev_hold(vdev_t *vd) 1881 { 1882 spa_t *spa = vd->vdev_spa; 1883 1884 ASSERT(spa_is_root(spa)); 1885 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1886 return; 1887 1888 for (int c = 0; c < vd->vdev_children; c++) 1889 vdev_hold(vd->vdev_child[c]); 1890 1891 if (vd->vdev_ops->vdev_op_leaf) 1892 vd->vdev_ops->vdev_op_hold(vd); 1893 } 1894 1895 void 1896 vdev_rele(vdev_t *vd) 1897 { 1898 spa_t *spa = vd->vdev_spa; 1899 1900 ASSERT(spa_is_root(spa)); 1901 for (int c = 0; c < vd->vdev_children; c++) 1902 vdev_rele(vd->vdev_child[c]); 1903 1904 if (vd->vdev_ops->vdev_op_leaf) 1905 vd->vdev_ops->vdev_op_rele(vd); 1906 } 1907 1908 /* 1909 * Reopen all interior vdevs and any unopened leaves. We don't actually 1910 * reopen leaf vdevs which had previously been opened as they might deadlock 1911 * on the spa_config_lock. Instead we only obtain the leaf's physical size. 1912 * If the leaf has never been opened then open it, as usual. 1913 */ 1914 void 1915 vdev_reopen(vdev_t *vd) 1916 { 1917 spa_t *spa = vd->vdev_spa; 1918 1919 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1920 1921 /* set the reopening flag unless we're taking the vdev offline */ 1922 vd->vdev_reopening = !vd->vdev_offline; 1923 vdev_close(vd); 1924 (void) vdev_open(vd); 1925 1926 /* 1927 * Call vdev_validate() here to make sure we have the same device. 1928 * Otherwise, a device with an invalid label could be successfully 1929 * opened in response to vdev_reopen(). 1930 */ 1931 if (vd->vdev_aux) { 1932 (void) vdev_validate_aux(vd); 1933 if (vdev_readable(vd) && vdev_writeable(vd) && 1934 vd->vdev_aux == &spa->spa_l2cache && 1935 !l2arc_vdev_present(vd)) 1936 l2arc_add_vdev(spa, vd); 1937 } else { 1938 (void) vdev_validate(vd); 1939 } 1940 1941 /* 1942 * Reassess parent vdev's health. 1943 */ 1944 vdev_propagate_state(vd); 1945 } 1946 1947 int 1948 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 1949 { 1950 int error; 1951 1952 /* 1953 * Normally, partial opens (e.g. of a mirror) are allowed. 1954 * For a create, however, we want to fail the request if 1955 * there are any components we can't open. 1956 */ 1957 error = vdev_open(vd); 1958 1959 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 1960 vdev_close(vd); 1961 return (error ? error : ENXIO); 1962 } 1963 1964 /* 1965 * Recursively load DTLs and initialize all labels. 1966 */ 1967 if ((error = vdev_dtl_load(vd)) != 0 || 1968 (error = vdev_label_init(vd, txg, isreplacing ? 1969 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 1970 vdev_close(vd); 1971 return (error); 1972 } 1973 1974 return (0); 1975 } 1976 1977 void 1978 vdev_metaslab_set_size(vdev_t *vd) 1979 { 1980 uint64_t asize = vd->vdev_asize; 1981 uint64_t ms_shift = 0; 1982 1983 /* 1984 * For vdevs that are bigger than 8G the metaslab size varies in 1985 * a way that the number of metaslabs increases in powers of two, 1986 * linearly in terms of vdev_asize, starting from 16 metaslabs. 1987 * So for vdev_asize of 8G we get 16 metaslabs, for 16G, we get 32, 1988 * and so on, until we hit the maximum metaslab count limit 1989 * [vdev_max_ms_count] from which point the metaslab count stays 1990 * the same. 1991 */ 1992 ms_shift = vdev_default_ms_shift; 1993 1994 if ((asize >> ms_shift) < vdev_min_ms_count) { 1995 /* 1996 * For devices that are less than 8G we want to have 1997 * exactly 16 metaslabs. We don't want less as integer 1998 * division rounds down, so less metaslabs mean more 1999 * wasted space. We don't want more as these vdevs are 2000 * small and in the likely event that we are running 2001 * out of space, the SPA will have a hard time finding 2002 * space due to fragmentation. 2003 */ 2004 ms_shift = highbit64(asize / vdev_min_ms_count); 2005 ms_shift = MAX(ms_shift, SPA_MAXBLOCKSHIFT); 2006 2007 } else if ((asize >> ms_shift) > vdev_max_ms_count) { 2008 ms_shift = highbit64(asize / vdev_max_ms_count); 2009 } 2010 2011 vd->vdev_ms_shift = ms_shift; 2012 ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT); 2013 } 2014 2015 void 2016 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 2017 { 2018 ASSERT(vd == vd->vdev_top); 2019 /* indirect vdevs don't have metaslabs or dtls */ 2020 ASSERT(vdev_is_concrete(vd) || flags == 0); 2021 ASSERT(ISP2(flags)); 2022 ASSERT(spa_writeable(vd->vdev_spa)); 2023 2024 if (flags & VDD_METASLAB) 2025 (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 2026 2027 if (flags & VDD_DTL) 2028 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 2029 2030 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 2031 } 2032 2033 void 2034 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg) 2035 { 2036 for (int c = 0; c < vd->vdev_children; c++) 2037 vdev_dirty_leaves(vd->vdev_child[c], flags, txg); 2038 2039 if (vd->vdev_ops->vdev_op_leaf) 2040 vdev_dirty(vd->vdev_top, flags, vd, txg); 2041 } 2042 2043 /* 2044 * DTLs. 2045 * 2046 * A vdev's DTL (dirty time log) is the set of transaction groups for which 2047 * the vdev has less than perfect replication. There are four kinds of DTL: 2048 * 2049 * DTL_MISSING: txgs for which the vdev has no valid copies of the data 2050 * 2051 * DTL_PARTIAL: txgs for which data is available, but not fully replicated 2052 * 2053 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 2054 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 2055 * txgs that was scrubbed. 2056 * 2057 * DTL_OUTAGE: txgs which cannot currently be read, whether due to 2058 * persistent errors or just some device being offline. 2059 * Unlike the other three, the DTL_OUTAGE map is not generally 2060 * maintained; it's only computed when needed, typically to 2061 * determine whether a device can be detached. 2062 * 2063 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 2064 * either has the data or it doesn't. 2065 * 2066 * For interior vdevs such as mirror and RAID-Z the picture is more complex. 2067 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 2068 * if any child is less than fully replicated, then so is its parent. 2069 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 2070 * comprising only those txgs which appear in 'maxfaults' or more children; 2071 * those are the txgs we don't have enough replication to read. For example, 2072 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 2073 * thus, its DTL_MISSING consists of the set of txgs that appear in more than 2074 * two child DTL_MISSING maps. 2075 * 2076 * It should be clear from the above that to compute the DTLs and outage maps 2077 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 2078 * Therefore, that is all we keep on disk. When loading the pool, or after 2079 * a configuration change, we generate all other DTLs from first principles. 2080 */ 2081 void 2082 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 2083 { 2084 range_tree_t *rt = vd->vdev_dtl[t]; 2085 2086 ASSERT(t < DTL_TYPES); 2087 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 2088 ASSERT(spa_writeable(vd->vdev_spa)); 2089 2090 mutex_enter(&vd->vdev_dtl_lock); 2091 if (!range_tree_contains(rt, txg, size)) 2092 range_tree_add(rt, txg, size); 2093 mutex_exit(&vd->vdev_dtl_lock); 2094 } 2095 2096 boolean_t 2097 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 2098 { 2099 range_tree_t *rt = vd->vdev_dtl[t]; 2100 boolean_t dirty = B_FALSE; 2101 2102 ASSERT(t < DTL_TYPES); 2103 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 2104 2105 /* 2106 * While we are loading the pool, the DTLs have not been loaded yet. 2107 * Ignore the DTLs and try all devices. This avoids a recursive 2108 * mutex enter on the vdev_dtl_lock, and also makes us try hard 2109 * when loading the pool (relying on the checksum to ensure that 2110 * we get the right data -- note that we while loading, we are 2111 * only reading the MOS, which is always checksummed). 2112 */ 2113 if (vd->vdev_spa->spa_load_state != SPA_LOAD_NONE) 2114 return (B_FALSE); 2115 2116 mutex_enter(&vd->vdev_dtl_lock); 2117 if (!range_tree_is_empty(rt)) 2118 dirty = range_tree_contains(rt, txg, size); 2119 mutex_exit(&vd->vdev_dtl_lock); 2120 2121 return (dirty); 2122 } 2123 2124 boolean_t 2125 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 2126 { 2127 range_tree_t *rt = vd->vdev_dtl[t]; 2128 boolean_t empty; 2129 2130 mutex_enter(&vd->vdev_dtl_lock); 2131 empty = range_tree_is_empty(rt); 2132 mutex_exit(&vd->vdev_dtl_lock); 2133 2134 return (empty); 2135 } 2136 2137 /* 2138 * Returns the lowest txg in the DTL range. 2139 */ 2140 static uint64_t 2141 vdev_dtl_min(vdev_t *vd) 2142 { 2143 range_seg_t *rs; 2144 2145 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); 2146 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); 2147 ASSERT0(vd->vdev_children); 2148 2149 rs = avl_first(&vd->vdev_dtl[DTL_MISSING]->rt_root); 2150 return (rs->rs_start - 1); 2151 } 2152 2153 /* 2154 * Returns the highest txg in the DTL. 2155 */ 2156 static uint64_t 2157 vdev_dtl_max(vdev_t *vd) 2158 { 2159 range_seg_t *rs; 2160 2161 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); 2162 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); 2163 ASSERT0(vd->vdev_children); 2164 2165 rs = avl_last(&vd->vdev_dtl[DTL_MISSING]->rt_root); 2166 return (rs->rs_end); 2167 } 2168 2169 /* 2170 * Determine if a resilvering vdev should remove any DTL entries from 2171 * its range. If the vdev was resilvering for the entire duration of the 2172 * scan then it should excise that range from its DTLs. Otherwise, this 2173 * vdev is considered partially resilvered and should leave its DTL 2174 * entries intact. The comment in vdev_dtl_reassess() describes how we 2175 * excise the DTLs. 2176 */ 2177 static boolean_t 2178 vdev_dtl_should_excise(vdev_t *vd) 2179 { 2180 spa_t *spa = vd->vdev_spa; 2181 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 2182 2183 ASSERT0(scn->scn_phys.scn_errors); 2184 ASSERT0(vd->vdev_children); 2185 2186 if (vd->vdev_state < VDEV_STATE_DEGRADED) 2187 return (B_FALSE); 2188 2189 if (vd->vdev_resilver_txg == 0 || 2190 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) 2191 return (B_TRUE); 2192 2193 /* 2194 * When a resilver is initiated the scan will assign the scn_max_txg 2195 * value to the highest txg value that exists in all DTLs. If this 2196 * device's max DTL is not part of this scan (i.e. it is not in 2197 * the range (scn_min_txg, scn_max_txg] then it is not eligible 2198 * for excision. 2199 */ 2200 if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) { 2201 ASSERT3U(scn->scn_phys.scn_min_txg, <=, vdev_dtl_min(vd)); 2202 ASSERT3U(scn->scn_phys.scn_min_txg, <, vd->vdev_resilver_txg); 2203 ASSERT3U(vd->vdev_resilver_txg, <=, scn->scn_phys.scn_max_txg); 2204 return (B_TRUE); 2205 } 2206 return (B_FALSE); 2207 } 2208 2209 /* 2210 * Reassess DTLs after a config change or scrub completion. 2211 */ 2212 void 2213 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 2214 { 2215 spa_t *spa = vd->vdev_spa; 2216 avl_tree_t reftree; 2217 int minref; 2218 2219 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 2220 2221 for (int c = 0; c < vd->vdev_children; c++) 2222 vdev_dtl_reassess(vd->vdev_child[c], txg, 2223 scrub_txg, scrub_done); 2224 2225 if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux) 2226 return; 2227 2228 if (vd->vdev_ops->vdev_op_leaf) { 2229 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 2230 2231 mutex_enter(&vd->vdev_dtl_lock); 2232 2233 /* 2234 * If we've completed a scan cleanly then determine 2235 * if this vdev should remove any DTLs. We only want to 2236 * excise regions on vdevs that were available during 2237 * the entire duration of this scan. 2238 */ 2239 if (scrub_txg != 0 && 2240 (spa->spa_scrub_started || 2241 (scn != NULL && scn->scn_phys.scn_errors == 0)) && 2242 vdev_dtl_should_excise(vd)) { 2243 /* 2244 * We completed a scrub up to scrub_txg. If we 2245 * did it without rebooting, then the scrub dtl 2246 * will be valid, so excise the old region and 2247 * fold in the scrub dtl. Otherwise, leave the 2248 * dtl as-is if there was an error. 2249 * 2250 * There's little trick here: to excise the beginning 2251 * of the DTL_MISSING map, we put it into a reference 2252 * tree and then add a segment with refcnt -1 that 2253 * covers the range [0, scrub_txg). This means 2254 * that each txg in that range has refcnt -1 or 0. 2255 * We then add DTL_SCRUB with a refcnt of 2, so that 2256 * entries in the range [0, scrub_txg) will have a 2257 * positive refcnt -- either 1 or 2. We then convert 2258 * the reference tree into the new DTL_MISSING map. 2259 */ 2260 space_reftree_create(&reftree); 2261 space_reftree_add_map(&reftree, 2262 vd->vdev_dtl[DTL_MISSING], 1); 2263 space_reftree_add_seg(&reftree, 0, scrub_txg, -1); 2264 space_reftree_add_map(&reftree, 2265 vd->vdev_dtl[DTL_SCRUB], 2); 2266 space_reftree_generate_map(&reftree, 2267 vd->vdev_dtl[DTL_MISSING], 1); 2268 space_reftree_destroy(&reftree); 2269 } 2270 range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 2271 range_tree_walk(vd->vdev_dtl[DTL_MISSING], 2272 range_tree_add, vd->vdev_dtl[DTL_PARTIAL]); 2273 if (scrub_done) 2274 range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 2275 range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 2276 if (!vdev_readable(vd)) 2277 range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 2278 else 2279 range_tree_walk(vd->vdev_dtl[DTL_MISSING], 2280 range_tree_add, vd->vdev_dtl[DTL_OUTAGE]); 2281 2282 /* 2283 * If the vdev was resilvering and no longer has any 2284 * DTLs then reset its resilvering flag. 2285 */ 2286 if (vd->vdev_resilver_txg != 0 && 2287 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && 2288 range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) 2289 vd->vdev_resilver_txg = 0; 2290 2291 mutex_exit(&vd->vdev_dtl_lock); 2292 2293 if (txg != 0) 2294 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 2295 return; 2296 } 2297 2298 mutex_enter(&vd->vdev_dtl_lock); 2299 for (int t = 0; t < DTL_TYPES; t++) { 2300 /* account for child's outage in parent's missing map */ 2301 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; 2302 if (t == DTL_SCRUB) 2303 continue; /* leaf vdevs only */ 2304 if (t == DTL_PARTIAL) 2305 minref = 1; /* i.e. non-zero */ 2306 else if (vd->vdev_nparity != 0) 2307 minref = vd->vdev_nparity + 1; /* RAID-Z */ 2308 else 2309 minref = vd->vdev_children; /* any kind of mirror */ 2310 space_reftree_create(&reftree); 2311 for (int c = 0; c < vd->vdev_children; c++) { 2312 vdev_t *cvd = vd->vdev_child[c]; 2313 mutex_enter(&cvd->vdev_dtl_lock); 2314 space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1); 2315 mutex_exit(&cvd->vdev_dtl_lock); 2316 } 2317 space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref); 2318 space_reftree_destroy(&reftree); 2319 } 2320 mutex_exit(&vd->vdev_dtl_lock); 2321 } 2322 2323 int 2324 vdev_dtl_load(vdev_t *vd) 2325 { 2326 spa_t *spa = vd->vdev_spa; 2327 objset_t *mos = spa->spa_meta_objset; 2328 int error = 0; 2329 2330 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) { 2331 ASSERT(vdev_is_concrete(vd)); 2332 2333 error = space_map_open(&vd->vdev_dtl_sm, mos, 2334 vd->vdev_dtl_object, 0, -1ULL, 0); 2335 if (error) 2336 return (error); 2337 ASSERT(vd->vdev_dtl_sm != NULL); 2338 2339 mutex_enter(&vd->vdev_dtl_lock); 2340 2341 /* 2342 * Now that we've opened the space_map we need to update 2343 * the in-core DTL. 2344 */ 2345 space_map_update(vd->vdev_dtl_sm); 2346 2347 error = space_map_load(vd->vdev_dtl_sm, 2348 vd->vdev_dtl[DTL_MISSING], SM_ALLOC); 2349 mutex_exit(&vd->vdev_dtl_lock); 2350 2351 return (error); 2352 } 2353 2354 for (int c = 0; c < vd->vdev_children; c++) { 2355 error = vdev_dtl_load(vd->vdev_child[c]); 2356 if (error != 0) 2357 break; 2358 } 2359 2360 return (error); 2361 } 2362 2363 void 2364 vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx) 2365 { 2366 spa_t *spa = vd->vdev_spa; 2367 2368 VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx)); 2369 VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, 2370 zapobj, tx)); 2371 } 2372 2373 uint64_t 2374 vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx) 2375 { 2376 spa_t *spa = vd->vdev_spa; 2377 uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA, 2378 DMU_OT_NONE, 0, tx); 2379 2380 ASSERT(zap != 0); 2381 VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, 2382 zap, tx)); 2383 2384 return (zap); 2385 } 2386 2387 void 2388 vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx) 2389 { 2390 if (vd->vdev_ops != &vdev_hole_ops && 2391 vd->vdev_ops != &vdev_missing_ops && 2392 vd->vdev_ops != &vdev_root_ops && 2393 !vd->vdev_top->vdev_removing) { 2394 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) { 2395 vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx); 2396 } 2397 if (vd == vd->vdev_top && vd->vdev_top_zap == 0) { 2398 vd->vdev_top_zap = vdev_create_link_zap(vd, tx); 2399 } 2400 } 2401 for (uint64_t i = 0; i < vd->vdev_children; i++) { 2402 vdev_construct_zaps(vd->vdev_child[i], tx); 2403 } 2404 } 2405 2406 void 2407 vdev_dtl_sync(vdev_t *vd, uint64_t txg) 2408 { 2409 spa_t *spa = vd->vdev_spa; 2410 range_tree_t *rt = vd->vdev_dtl[DTL_MISSING]; 2411 objset_t *mos = spa->spa_meta_objset; 2412 range_tree_t *rtsync; 2413 dmu_tx_t *tx; 2414 uint64_t object = space_map_object(vd->vdev_dtl_sm); 2415 2416 ASSERT(vdev_is_concrete(vd)); 2417 ASSERT(vd->vdev_ops->vdev_op_leaf); 2418 2419 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2420 2421 if (vd->vdev_detached || vd->vdev_top->vdev_removing) { 2422 mutex_enter(&vd->vdev_dtl_lock); 2423 space_map_free(vd->vdev_dtl_sm, tx); 2424 space_map_close(vd->vdev_dtl_sm); 2425 vd->vdev_dtl_sm = NULL; 2426 mutex_exit(&vd->vdev_dtl_lock); 2427 2428 /* 2429 * We only destroy the leaf ZAP for detached leaves or for 2430 * removed log devices. Removed data devices handle leaf ZAP 2431 * cleanup later, once cancellation is no longer possible. 2432 */ 2433 if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached || 2434 vd->vdev_top->vdev_islog)) { 2435 vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx); 2436 vd->vdev_leaf_zap = 0; 2437 } 2438 2439 dmu_tx_commit(tx); 2440 return; 2441 } 2442 2443 if (vd->vdev_dtl_sm == NULL) { 2444 uint64_t new_object; 2445 2446 new_object = space_map_alloc(mos, vdev_dtl_sm_blksz, tx); 2447 VERIFY3U(new_object, !=, 0); 2448 2449 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object, 2450 0, -1ULL, 0)); 2451 ASSERT(vd->vdev_dtl_sm != NULL); 2452 } 2453 2454 rtsync = range_tree_create(NULL, NULL); 2455 2456 mutex_enter(&vd->vdev_dtl_lock); 2457 range_tree_walk(rt, range_tree_add, rtsync); 2458 mutex_exit(&vd->vdev_dtl_lock); 2459 2460 space_map_truncate(vd->vdev_dtl_sm, vdev_dtl_sm_blksz, tx); 2461 space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, tx); 2462 range_tree_vacate(rtsync, NULL, NULL); 2463 2464 range_tree_destroy(rtsync); 2465 2466 /* 2467 * If the object for the space map has changed then dirty 2468 * the top level so that we update the config. 2469 */ 2470 if (object != space_map_object(vd->vdev_dtl_sm)) { 2471 vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, " 2472 "new object %llu", (u_longlong_t)txg, spa_name(spa), 2473 (u_longlong_t)object, 2474 (u_longlong_t)space_map_object(vd->vdev_dtl_sm)); 2475 vdev_config_dirty(vd->vdev_top); 2476 } 2477 2478 dmu_tx_commit(tx); 2479 2480 mutex_enter(&vd->vdev_dtl_lock); 2481 space_map_update(vd->vdev_dtl_sm); 2482 mutex_exit(&vd->vdev_dtl_lock); 2483 } 2484 2485 /* 2486 * Determine whether the specified vdev can be offlined/detached/removed 2487 * without losing data. 2488 */ 2489 boolean_t 2490 vdev_dtl_required(vdev_t *vd) 2491 { 2492 spa_t *spa = vd->vdev_spa; 2493 vdev_t *tvd = vd->vdev_top; 2494 uint8_t cant_read = vd->vdev_cant_read; 2495 boolean_t required; 2496 2497 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2498 2499 if (vd == spa->spa_root_vdev || vd == tvd) 2500 return (B_TRUE); 2501 2502 /* 2503 * Temporarily mark the device as unreadable, and then determine 2504 * whether this results in any DTL outages in the top-level vdev. 2505 * If not, we can safely offline/detach/remove the device. 2506 */ 2507 vd->vdev_cant_read = B_TRUE; 2508 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 2509 required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 2510 vd->vdev_cant_read = cant_read; 2511 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 2512 2513 if (!required && zio_injection_enabled) 2514 required = !!zio_handle_device_injection(vd, NULL, ECHILD); 2515 2516 return (required); 2517 } 2518 2519 /* 2520 * Determine if resilver is needed, and if so the txg range. 2521 */ 2522 boolean_t 2523 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 2524 { 2525 boolean_t needed = B_FALSE; 2526 uint64_t thismin = UINT64_MAX; 2527 uint64_t thismax = 0; 2528 2529 if (vd->vdev_children == 0) { 2530 mutex_enter(&vd->vdev_dtl_lock); 2531 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && 2532 vdev_writeable(vd)) { 2533 2534 thismin = vdev_dtl_min(vd); 2535 thismax = vdev_dtl_max(vd); 2536 needed = B_TRUE; 2537 } 2538 mutex_exit(&vd->vdev_dtl_lock); 2539 } else { 2540 for (int c = 0; c < vd->vdev_children; c++) { 2541 vdev_t *cvd = vd->vdev_child[c]; 2542 uint64_t cmin, cmax; 2543 2544 if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 2545 thismin = MIN(thismin, cmin); 2546 thismax = MAX(thismax, cmax); 2547 needed = B_TRUE; 2548 } 2549 } 2550 } 2551 2552 if (needed && minp) { 2553 *minp = thismin; 2554 *maxp = thismax; 2555 } 2556 return (needed); 2557 } 2558 2559 /* 2560 * Gets the checkpoint space map object from the vdev's ZAP. 2561 * Returns the spacemap object, or 0 if it wasn't in the ZAP 2562 * or the ZAP doesn't exist yet. 2563 */ 2564 int 2565 vdev_checkpoint_sm_object(vdev_t *vd) 2566 { 2567 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 2568 if (vd->vdev_top_zap == 0) { 2569 return (0); 2570 } 2571 2572 uint64_t sm_obj = 0; 2573 int err = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap, 2574 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, &sm_obj); 2575 2576 ASSERT(err == 0 || err == ENOENT); 2577 2578 return (sm_obj); 2579 } 2580 2581 int 2582 vdev_load(vdev_t *vd) 2583 { 2584 int error = 0; 2585 /* 2586 * Recursively load all children. 2587 */ 2588 for (int c = 0; c < vd->vdev_children; c++) { 2589 error = vdev_load(vd->vdev_child[c]); 2590 if (error != 0) { 2591 return (error); 2592 } 2593 } 2594 2595 vdev_set_deflate_ratio(vd); 2596 2597 /* 2598 * If this is a top-level vdev, initialize its metaslabs. 2599 */ 2600 if (vd == vd->vdev_top && vdev_is_concrete(vd)) { 2601 if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) { 2602 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2603 VDEV_AUX_CORRUPT_DATA); 2604 vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, " 2605 "asize=%llu", (u_longlong_t)vd->vdev_ashift, 2606 (u_longlong_t)vd->vdev_asize); 2607 return (SET_ERROR(ENXIO)); 2608 } else if ((error = vdev_metaslab_init(vd, 0)) != 0) { 2609 vdev_dbgmsg(vd, "vdev_load: metaslab_init failed " 2610 "[error=%d]", error); 2611 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2612 VDEV_AUX_CORRUPT_DATA); 2613 return (error); 2614 } 2615 2616 uint64_t checkpoint_sm_obj = vdev_checkpoint_sm_object(vd); 2617 if (checkpoint_sm_obj != 0) { 2618 objset_t *mos = spa_meta_objset(vd->vdev_spa); 2619 ASSERT(vd->vdev_asize != 0); 2620 ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL); 2621 2622 if ((error = space_map_open(&vd->vdev_checkpoint_sm, 2623 mos, checkpoint_sm_obj, 0, vd->vdev_asize, 2624 vd->vdev_ashift))) { 2625 vdev_dbgmsg(vd, "vdev_load: space_map_open " 2626 "failed for checkpoint spacemap (obj %llu) " 2627 "[error=%d]", 2628 (u_longlong_t)checkpoint_sm_obj, error); 2629 return (error); 2630 } 2631 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 2632 space_map_update(vd->vdev_checkpoint_sm); 2633 2634 /* 2635 * Since the checkpoint_sm contains free entries 2636 * exclusively we can use sm_alloc to indicate the 2637 * culmulative checkpointed space that has been freed. 2638 */ 2639 vd->vdev_stat.vs_checkpoint_space = 2640 -vd->vdev_checkpoint_sm->sm_alloc; 2641 vd->vdev_spa->spa_checkpoint_info.sci_dspace += 2642 vd->vdev_stat.vs_checkpoint_space; 2643 } 2644 } 2645 2646 /* 2647 * If this is a leaf vdev, load its DTL. 2648 */ 2649 if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) { 2650 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2651 VDEV_AUX_CORRUPT_DATA); 2652 vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed " 2653 "[error=%d]", error); 2654 return (error); 2655 } 2656 2657 uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd); 2658 if (obsolete_sm_object != 0) { 2659 objset_t *mos = vd->vdev_spa->spa_meta_objset; 2660 ASSERT(vd->vdev_asize != 0); 2661 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL); 2662 2663 if ((error = space_map_open(&vd->vdev_obsolete_sm, mos, 2664 obsolete_sm_object, 0, vd->vdev_asize, 0))) { 2665 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2666 VDEV_AUX_CORRUPT_DATA); 2667 vdev_dbgmsg(vd, "vdev_load: space_map_open failed for " 2668 "obsolete spacemap (obj %llu) [error=%d]", 2669 (u_longlong_t)obsolete_sm_object, error); 2670 return (error); 2671 } 2672 space_map_update(vd->vdev_obsolete_sm); 2673 } 2674 2675 return (0); 2676 } 2677 2678 /* 2679 * The special vdev case is used for hot spares and l2cache devices. Its 2680 * sole purpose it to set the vdev state for the associated vdev. To do this, 2681 * we make sure that we can open the underlying device, then try to read the 2682 * label, and make sure that the label is sane and that it hasn't been 2683 * repurposed to another pool. 2684 */ 2685 int 2686 vdev_validate_aux(vdev_t *vd) 2687 { 2688 nvlist_t *label; 2689 uint64_t guid, version; 2690 uint64_t state; 2691 2692 if (!vdev_readable(vd)) 2693 return (0); 2694 2695 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) { 2696 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 2697 VDEV_AUX_CORRUPT_DATA); 2698 return (-1); 2699 } 2700 2701 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 2702 !SPA_VERSION_IS_SUPPORTED(version) || 2703 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 2704 guid != vd->vdev_guid || 2705 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 2706 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 2707 VDEV_AUX_CORRUPT_DATA); 2708 nvlist_free(label); 2709 return (-1); 2710 } 2711 2712 /* 2713 * We don't actually check the pool state here. If it's in fact in 2714 * use by another pool, we update this fact on the fly when requested. 2715 */ 2716 nvlist_free(label); 2717 return (0); 2718 } 2719 2720 /* 2721 * Free the objects used to store this vdev's spacemaps, and the array 2722 * that points to them. 2723 */ 2724 void 2725 vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx) 2726 { 2727 if (vd->vdev_ms_array == 0) 2728 return; 2729 2730 objset_t *mos = vd->vdev_spa->spa_meta_objset; 2731 uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift; 2732 size_t array_bytes = array_count * sizeof (uint64_t); 2733 uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP); 2734 VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0, 2735 array_bytes, smobj_array, 0)); 2736 2737 for (uint64_t i = 0; i < array_count; i++) { 2738 uint64_t smobj = smobj_array[i]; 2739 if (smobj == 0) 2740 continue; 2741 2742 space_map_free_obj(mos, smobj, tx); 2743 } 2744 2745 kmem_free(smobj_array, array_bytes); 2746 VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx)); 2747 vd->vdev_ms_array = 0; 2748 } 2749 2750 static void 2751 vdev_remove_empty(vdev_t *vd, uint64_t txg) 2752 { 2753 spa_t *spa = vd->vdev_spa; 2754 dmu_tx_t *tx; 2755 2756 ASSERT(vd == vd->vdev_top); 2757 ASSERT3U(txg, ==, spa_syncing_txg(spa)); 2758 2759 if (vd->vdev_ms != NULL) { 2760 metaslab_group_t *mg = vd->vdev_mg; 2761 2762 metaslab_group_histogram_verify(mg); 2763 metaslab_class_histogram_verify(mg->mg_class); 2764 2765 for (int m = 0; m < vd->vdev_ms_count; m++) { 2766 metaslab_t *msp = vd->vdev_ms[m]; 2767 2768 if (msp == NULL || msp->ms_sm == NULL) 2769 continue; 2770 2771 mutex_enter(&msp->ms_lock); 2772 /* 2773 * If the metaslab was not loaded when the vdev 2774 * was removed then the histogram accounting may 2775 * not be accurate. Update the histogram information 2776 * here so that we ensure that the metaslab group 2777 * and metaslab class are up-to-date. 2778 */ 2779 metaslab_group_histogram_remove(mg, msp); 2780 2781 VERIFY0(space_map_allocated(msp->ms_sm)); 2782 space_map_close(msp->ms_sm); 2783 msp->ms_sm = NULL; 2784 mutex_exit(&msp->ms_lock); 2785 } 2786 2787 if (vd->vdev_checkpoint_sm != NULL) { 2788 ASSERT(spa_has_checkpoint(spa)); 2789 space_map_close(vd->vdev_checkpoint_sm); 2790 vd->vdev_checkpoint_sm = NULL; 2791 } 2792 2793 metaslab_group_histogram_verify(mg); 2794 metaslab_class_histogram_verify(mg->mg_class); 2795 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 2796 ASSERT0(mg->mg_histogram[i]); 2797 } 2798 2799 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 2800 vdev_destroy_spacemaps(vd, tx); 2801 2802 if (vd->vdev_islog && vd->vdev_top_zap != 0) { 2803 vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx); 2804 vd->vdev_top_zap = 0; 2805 } 2806 dmu_tx_commit(tx); 2807 } 2808 2809 void 2810 vdev_sync_done(vdev_t *vd, uint64_t txg) 2811 { 2812 metaslab_t *msp; 2813 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg)); 2814 2815 ASSERT(vdev_is_concrete(vd)); 2816 2817 while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 2818 metaslab_sync_done(msp, txg); 2819 2820 if (reassess) 2821 metaslab_sync_reassess(vd->vdev_mg); 2822 } 2823 2824 void 2825 vdev_sync(vdev_t *vd, uint64_t txg) 2826 { 2827 spa_t *spa = vd->vdev_spa; 2828 vdev_t *lvd; 2829 metaslab_t *msp; 2830 dmu_tx_t *tx; 2831 2832 if (range_tree_space(vd->vdev_obsolete_segments) > 0) { 2833 dmu_tx_t *tx; 2834 2835 ASSERT(vd->vdev_removing || 2836 vd->vdev_ops == &vdev_indirect_ops); 2837 2838 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2839 vdev_indirect_sync_obsolete(vd, tx); 2840 dmu_tx_commit(tx); 2841 2842 /* 2843 * If the vdev is indirect, it can't have dirty 2844 * metaslabs or DTLs. 2845 */ 2846 if (vd->vdev_ops == &vdev_indirect_ops) { 2847 ASSERT(txg_list_empty(&vd->vdev_ms_list, txg)); 2848 ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg)); 2849 return; 2850 } 2851 } 2852 2853 ASSERT(vdev_is_concrete(vd)); 2854 2855 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 && 2856 !vd->vdev_removing) { 2857 ASSERT(vd == vd->vdev_top); 2858 ASSERT0(vd->vdev_indirect_config.vic_mapping_object); 2859 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2860 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 2861 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 2862 ASSERT(vd->vdev_ms_array != 0); 2863 vdev_config_dirty(vd); 2864 dmu_tx_commit(tx); 2865 } 2866 2867 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 2868 metaslab_sync(msp, txg); 2869 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 2870 } 2871 2872 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 2873 vdev_dtl_sync(lvd, txg); 2874 2875 /* 2876 * Remove the metadata associated with this vdev once it's empty. 2877 * Note that this is typically used for log/cache device removal; 2878 * we don't empty toplevel vdevs when removing them. But if 2879 * a toplevel happens to be emptied, this is not harmful. 2880 */ 2881 if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing) { 2882 vdev_remove_empty(vd, txg); 2883 } 2884 2885 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 2886 } 2887 2888 uint64_t 2889 vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 2890 { 2891 return (vd->vdev_ops->vdev_op_asize(vd, psize)); 2892 } 2893 2894 /* 2895 * Mark the given vdev faulted. A faulted vdev behaves as if the device could 2896 * not be opened, and no I/O is attempted. 2897 */ 2898 int 2899 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) 2900 { 2901 vdev_t *vd, *tvd; 2902 2903 spa_vdev_state_enter(spa, SCL_NONE); 2904 2905 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2906 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2907 2908 if (!vd->vdev_ops->vdev_op_leaf) 2909 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2910 2911 tvd = vd->vdev_top; 2912 2913 /* 2914 * We don't directly use the aux state here, but if we do a 2915 * vdev_reopen(), we need this value to be present to remember why we 2916 * were faulted. 2917 */ 2918 vd->vdev_label_aux = aux; 2919 2920 /* 2921 * Faulted state takes precedence over degraded. 2922 */ 2923 vd->vdev_delayed_close = B_FALSE; 2924 vd->vdev_faulted = 1ULL; 2925 vd->vdev_degraded = 0ULL; 2926 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux); 2927 2928 /* 2929 * If this device has the only valid copy of the data, then 2930 * back off and simply mark the vdev as degraded instead. 2931 */ 2932 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) { 2933 vd->vdev_degraded = 1ULL; 2934 vd->vdev_faulted = 0ULL; 2935 2936 /* 2937 * If we reopen the device and it's not dead, only then do we 2938 * mark it degraded. 2939 */ 2940 vdev_reopen(tvd); 2941 2942 if (vdev_readable(vd)) 2943 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux); 2944 } 2945 2946 return (spa_vdev_state_exit(spa, vd, 0)); 2947 } 2948 2949 /* 2950 * Mark the given vdev degraded. A degraded vdev is purely an indication to the 2951 * user that something is wrong. The vdev continues to operate as normal as far 2952 * as I/O is concerned. 2953 */ 2954 int 2955 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux) 2956 { 2957 vdev_t *vd; 2958 2959 spa_vdev_state_enter(spa, SCL_NONE); 2960 2961 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2962 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 2963 2964 if (!vd->vdev_ops->vdev_op_leaf) 2965 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 2966 2967 /* 2968 * If the vdev is already faulted, then don't do anything. 2969 */ 2970 if (vd->vdev_faulted || vd->vdev_degraded) 2971 return (spa_vdev_state_exit(spa, NULL, 0)); 2972 2973 vd->vdev_degraded = 1ULL; 2974 if (!vdev_is_dead(vd)) 2975 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 2976 aux); 2977 2978 return (spa_vdev_state_exit(spa, vd, 0)); 2979 } 2980 2981 /* 2982 * Online the given vdev. 2983 * 2984 * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached 2985 * spare device should be detached when the device finishes resilvering. 2986 * Second, the online should be treated like a 'test' online case, so no FMA 2987 * events are generated if the device fails to open. 2988 */ 2989 int 2990 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 2991 { 2992 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; 2993 boolean_t wasoffline; 2994 vdev_state_t oldstate; 2995 2996 spa_vdev_state_enter(spa, SCL_NONE); 2997 2998 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 2999 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 3000 3001 if (!vd->vdev_ops->vdev_op_leaf) 3002 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3003 3004 wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline); 3005 oldstate = vd->vdev_state; 3006 3007 tvd = vd->vdev_top; 3008 vd->vdev_offline = B_FALSE; 3009 vd->vdev_tmpoffline = B_FALSE; 3010 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 3011 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 3012 3013 /* XXX - L2ARC 1.0 does not support expansion */ 3014 if (!vd->vdev_aux) { 3015 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 3016 pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND); 3017 } 3018 3019 vdev_reopen(tvd); 3020 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 3021 3022 if (!vd->vdev_aux) { 3023 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 3024 pvd->vdev_expanding = B_FALSE; 3025 } 3026 3027 if (newstate) 3028 *newstate = vd->vdev_state; 3029 if ((flags & ZFS_ONLINE_UNSPARE) && 3030 !vdev_is_dead(vd) && vd->vdev_parent && 3031 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 3032 vd->vdev_parent->vdev_child[0] == vd) 3033 vd->vdev_unspare = B_TRUE; 3034 3035 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { 3036 3037 /* XXX - L2ARC 1.0 does not support expansion */ 3038 if (vd->vdev_aux) 3039 return (spa_vdev_state_exit(spa, vd, ENOTSUP)); 3040 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 3041 } 3042 3043 if (wasoffline || 3044 (oldstate < VDEV_STATE_DEGRADED && 3045 vd->vdev_state >= VDEV_STATE_DEGRADED)) 3046 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE); 3047 3048 return (spa_vdev_state_exit(spa, vd, 0)); 3049 } 3050 3051 static int 3052 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags) 3053 { 3054 vdev_t *vd, *tvd; 3055 int error = 0; 3056 uint64_t generation; 3057 metaslab_group_t *mg; 3058 3059 top: 3060 spa_vdev_state_enter(spa, SCL_ALLOC); 3061 3062 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3063 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 3064 3065 if (!vd->vdev_ops->vdev_op_leaf) 3066 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3067 3068 tvd = vd->vdev_top; 3069 mg = tvd->vdev_mg; 3070 generation = spa->spa_config_generation + 1; 3071 3072 /* 3073 * If the device isn't already offline, try to offline it. 3074 */ 3075 if (!vd->vdev_offline) { 3076 /* 3077 * If this device has the only valid copy of some data, 3078 * don't allow it to be offlined. Log devices are always 3079 * expendable. 3080 */ 3081 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 3082 vdev_dtl_required(vd)) 3083 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 3084 3085 /* 3086 * If the top-level is a slog and it has had allocations 3087 * then proceed. We check that the vdev's metaslab group 3088 * is not NULL since it's possible that we may have just 3089 * added this vdev but not yet initialized its metaslabs. 3090 */ 3091 if (tvd->vdev_islog && mg != NULL) { 3092 /* 3093 * Prevent any future allocations. 3094 */ 3095 metaslab_group_passivate(mg); 3096 (void) spa_vdev_state_exit(spa, vd, 0); 3097 3098 error = spa_reset_logs(spa); 3099 3100 /* 3101 * If the log device was successfully reset but has 3102 * checkpointed data, do not offline it. 3103 */ 3104 if (error == 0 && 3105 tvd->vdev_checkpoint_sm != NULL) { 3106 ASSERT3U(tvd->vdev_checkpoint_sm->sm_alloc, 3107 !=, 0); 3108 error = ZFS_ERR_CHECKPOINT_EXISTS; 3109 } 3110 3111 spa_vdev_state_enter(spa, SCL_ALLOC); 3112 3113 /* 3114 * Check to see if the config has changed. 3115 */ 3116 if (error || generation != spa->spa_config_generation) { 3117 metaslab_group_activate(mg); 3118 if (error) 3119 return (spa_vdev_state_exit(spa, 3120 vd, error)); 3121 (void) spa_vdev_state_exit(spa, vd, 0); 3122 goto top; 3123 } 3124 ASSERT0(tvd->vdev_stat.vs_alloc); 3125 } 3126 3127 /* 3128 * Offline this device and reopen its top-level vdev. 3129 * If the top-level vdev is a log device then just offline 3130 * it. Otherwise, if this action results in the top-level 3131 * vdev becoming unusable, undo it and fail the request. 3132 */ 3133 vd->vdev_offline = B_TRUE; 3134 vdev_reopen(tvd); 3135 3136 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 3137 vdev_is_dead(tvd)) { 3138 vd->vdev_offline = B_FALSE; 3139 vdev_reopen(tvd); 3140 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 3141 } 3142 3143 /* 3144 * Add the device back into the metaslab rotor so that 3145 * once we online the device it's open for business. 3146 */ 3147 if (tvd->vdev_islog && mg != NULL) 3148 metaslab_group_activate(mg); 3149 } 3150 3151 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 3152 3153 return (spa_vdev_state_exit(spa, vd, 0)); 3154 } 3155 3156 int 3157 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 3158 { 3159 int error; 3160 3161 mutex_enter(&spa->spa_vdev_top_lock); 3162 error = vdev_offline_locked(spa, guid, flags); 3163 mutex_exit(&spa->spa_vdev_top_lock); 3164 3165 return (error); 3166 } 3167 3168 /* 3169 * Clear the error counts associated with this vdev. Unlike vdev_online() and 3170 * vdev_offline(), we assume the spa config is locked. We also clear all 3171 * children. If 'vd' is NULL, then the user wants to clear all vdevs. 3172 */ 3173 void 3174 vdev_clear(spa_t *spa, vdev_t *vd) 3175 { 3176 vdev_t *rvd = spa->spa_root_vdev; 3177 3178 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 3179 3180 if (vd == NULL) 3181 vd = rvd; 3182 3183 vd->vdev_stat.vs_read_errors = 0; 3184 vd->vdev_stat.vs_write_errors = 0; 3185 vd->vdev_stat.vs_checksum_errors = 0; 3186 3187 for (int c = 0; c < vd->vdev_children; c++) 3188 vdev_clear(spa, vd->vdev_child[c]); 3189 3190 /* 3191 * It makes no sense to "clear" an indirect vdev. 3192 */ 3193 if (!vdev_is_concrete(vd)) 3194 return; 3195 3196 /* 3197 * If we're in the FAULTED state or have experienced failed I/O, then 3198 * clear the persistent state and attempt to reopen the device. We 3199 * also mark the vdev config dirty, so that the new faulted state is 3200 * written out to disk. 3201 */ 3202 if (vd->vdev_faulted || vd->vdev_degraded || 3203 !vdev_readable(vd) || !vdev_writeable(vd)) { 3204 3205 /* 3206 * When reopening in reponse to a clear event, it may be due to 3207 * a fmadm repair request. In this case, if the device is 3208 * still broken, we want to still post the ereport again. 3209 */ 3210 vd->vdev_forcefault = B_TRUE; 3211 3212 vd->vdev_faulted = vd->vdev_degraded = 0ULL; 3213 vd->vdev_cant_read = B_FALSE; 3214 vd->vdev_cant_write = B_FALSE; 3215 3216 vdev_reopen(vd == rvd ? rvd : vd->vdev_top); 3217 3218 vd->vdev_forcefault = B_FALSE; 3219 3220 if (vd != rvd && vdev_writeable(vd->vdev_top)) 3221 vdev_state_dirty(vd->vdev_top); 3222 3223 if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 3224 spa_async_request(spa, SPA_ASYNC_RESILVER); 3225 3226 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR); 3227 } 3228 3229 /* 3230 * When clearing a FMA-diagnosed fault, we always want to 3231 * unspare the device, as we assume that the original spare was 3232 * done in response to the FMA fault. 3233 */ 3234 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL && 3235 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 3236 vd->vdev_parent->vdev_child[0] == vd) 3237 vd->vdev_unspare = B_TRUE; 3238 } 3239 3240 boolean_t 3241 vdev_is_dead(vdev_t *vd) 3242 { 3243 /* 3244 * Holes and missing devices are always considered "dead". 3245 * This simplifies the code since we don't have to check for 3246 * these types of devices in the various code paths. 3247 * Instead we rely on the fact that we skip over dead devices 3248 * before issuing I/O to them. 3249 */ 3250 return (vd->vdev_state < VDEV_STATE_DEGRADED || 3251 vd->vdev_ops == &vdev_hole_ops || 3252 vd->vdev_ops == &vdev_missing_ops); 3253 } 3254 3255 boolean_t 3256 vdev_readable(vdev_t *vd) 3257 { 3258 return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 3259 } 3260 3261 boolean_t 3262 vdev_writeable(vdev_t *vd) 3263 { 3264 return (!vdev_is_dead(vd) && !vd->vdev_cant_write && 3265 vdev_is_concrete(vd)); 3266 } 3267 3268 boolean_t 3269 vdev_allocatable(vdev_t *vd) 3270 { 3271 uint64_t state = vd->vdev_state; 3272 3273 /* 3274 * We currently allow allocations from vdevs which may be in the 3275 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 3276 * fails to reopen then we'll catch it later when we're holding 3277 * the proper locks. Note that we have to get the vdev state 3278 * in a local variable because although it changes atomically, 3279 * we're asking two separate questions about it. 3280 */ 3281 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 3282 !vd->vdev_cant_write && vdev_is_concrete(vd) && 3283 vd->vdev_mg->mg_initialized); 3284 } 3285 3286 boolean_t 3287 vdev_accessible(vdev_t *vd, zio_t *zio) 3288 { 3289 ASSERT(zio->io_vd == vd); 3290 3291 if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 3292 return (B_FALSE); 3293 3294 if (zio->io_type == ZIO_TYPE_READ) 3295 return (!vd->vdev_cant_read); 3296 3297 if (zio->io_type == ZIO_TYPE_WRITE) 3298 return (!vd->vdev_cant_write); 3299 3300 return (B_TRUE); 3301 } 3302 3303 boolean_t 3304 vdev_is_spacemap_addressable(vdev_t *vd) 3305 { 3306 /* 3307 * Assuming 47 bits of the space map entry dedicated for the entry's 3308 * offset (see description in space_map.h), we calculate the maximum 3309 * address that can be described by a space map entry for the given 3310 * device. 3311 */ 3312 uint64_t shift = vd->vdev_ashift + 47; 3313 3314 if (shift >= 63) /* detect potential overflow */ 3315 return (B_TRUE); 3316 3317 return (vd->vdev_asize < (1ULL << shift)); 3318 } 3319 3320 /* 3321 * Get statistics for the given vdev. 3322 */ 3323 void 3324 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 3325 { 3326 spa_t *spa = vd->vdev_spa; 3327 vdev_t *rvd = spa->spa_root_vdev; 3328 vdev_t *tvd = vd->vdev_top; 3329 3330 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 3331 3332 mutex_enter(&vd->vdev_stat_lock); 3333 bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 3334 vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 3335 vs->vs_state = vd->vdev_state; 3336 vs->vs_rsize = vdev_get_min_asize(vd); 3337 if (vd->vdev_ops->vdev_op_leaf) 3338 vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 3339 /* 3340 * Report expandable space on top-level, non-auxillary devices only. 3341 * The expandable space is reported in terms of metaslab sized units 3342 * since that determines how much space the pool can expand. 3343 */ 3344 if (vd->vdev_aux == NULL && tvd != NULL) { 3345 vs->vs_esize = P2ALIGN(vd->vdev_max_asize - vd->vdev_asize - 3346 spa->spa_bootsize, 1ULL << tvd->vdev_ms_shift); 3347 } 3348 if (vd->vdev_aux == NULL && vd == vd->vdev_top && 3349 vdev_is_concrete(vd)) { 3350 vs->vs_fragmentation = vd->vdev_mg->mg_fragmentation; 3351 } 3352 3353 /* 3354 * If we're getting stats on the root vdev, aggregate the I/O counts 3355 * over all top-level vdevs (i.e. the direct children of the root). 3356 */ 3357 if (vd == rvd) { 3358 for (int c = 0; c < rvd->vdev_children; c++) { 3359 vdev_t *cvd = rvd->vdev_child[c]; 3360 vdev_stat_t *cvs = &cvd->vdev_stat; 3361 3362 for (int t = 0; t < ZIO_TYPES; t++) { 3363 vs->vs_ops[t] += cvs->vs_ops[t]; 3364 vs->vs_bytes[t] += cvs->vs_bytes[t]; 3365 } 3366 cvs->vs_scan_removing = cvd->vdev_removing; 3367 } 3368 } 3369 mutex_exit(&vd->vdev_stat_lock); 3370 } 3371 3372 void 3373 vdev_clear_stats(vdev_t *vd) 3374 { 3375 mutex_enter(&vd->vdev_stat_lock); 3376 vd->vdev_stat.vs_space = 0; 3377 vd->vdev_stat.vs_dspace = 0; 3378 vd->vdev_stat.vs_alloc = 0; 3379 mutex_exit(&vd->vdev_stat_lock); 3380 } 3381 3382 void 3383 vdev_scan_stat_init(vdev_t *vd) 3384 { 3385 vdev_stat_t *vs = &vd->vdev_stat; 3386 3387 for (int c = 0; c < vd->vdev_children; c++) 3388 vdev_scan_stat_init(vd->vdev_child[c]); 3389 3390 mutex_enter(&vd->vdev_stat_lock); 3391 vs->vs_scan_processed = 0; 3392 mutex_exit(&vd->vdev_stat_lock); 3393 } 3394 3395 void 3396 vdev_stat_update(zio_t *zio, uint64_t psize) 3397 { 3398 spa_t *spa = zio->io_spa; 3399 vdev_t *rvd = spa->spa_root_vdev; 3400 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 3401 vdev_t *pvd; 3402 uint64_t txg = zio->io_txg; 3403 vdev_stat_t *vs = &vd->vdev_stat; 3404 zio_type_t type = zio->io_type; 3405 int flags = zio->io_flags; 3406 3407 /* 3408 * If this i/o is a gang leader, it didn't do any actual work. 3409 */ 3410 if (zio->io_gang_tree) 3411 return; 3412 3413 if (zio->io_error == 0) { 3414 /* 3415 * If this is a root i/o, don't count it -- we've already 3416 * counted the top-level vdevs, and vdev_get_stats() will 3417 * aggregate them when asked. This reduces contention on 3418 * the root vdev_stat_lock and implicitly handles blocks 3419 * that compress away to holes, for which there is no i/o. 3420 * (Holes never create vdev children, so all the counters 3421 * remain zero, which is what we want.) 3422 * 3423 * Note: this only applies to successful i/o (io_error == 0) 3424 * because unlike i/o counts, errors are not additive. 3425 * When reading a ditto block, for example, failure of 3426 * one top-level vdev does not imply a root-level error. 3427 */ 3428 if (vd == rvd) 3429 return; 3430 3431 ASSERT(vd == zio->io_vd); 3432 3433 if (flags & ZIO_FLAG_IO_BYPASS) 3434 return; 3435 3436 mutex_enter(&vd->vdev_stat_lock); 3437 3438 if (flags & ZIO_FLAG_IO_REPAIR) { 3439 if (flags & ZIO_FLAG_SCAN_THREAD) { 3440 dsl_scan_phys_t *scn_phys = 3441 &spa->spa_dsl_pool->dp_scan->scn_phys; 3442 uint64_t *processed = &scn_phys->scn_processed; 3443 3444 /* XXX cleanup? */ 3445 if (vd->vdev_ops->vdev_op_leaf) 3446 atomic_add_64(processed, psize); 3447 vs->vs_scan_processed += psize; 3448 } 3449 3450 if (flags & ZIO_FLAG_SELF_HEAL) 3451 vs->vs_self_healed += psize; 3452 } 3453 3454 vs->vs_ops[type]++; 3455 vs->vs_bytes[type] += psize; 3456 3457 mutex_exit(&vd->vdev_stat_lock); 3458 return; 3459 } 3460 3461 if (flags & ZIO_FLAG_SPECULATIVE) 3462 return; 3463 3464 /* 3465 * If this is an I/O error that is going to be retried, then ignore the 3466 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 3467 * hard errors, when in reality they can happen for any number of 3468 * innocuous reasons (bus resets, MPxIO link failure, etc). 3469 */ 3470 if (zio->io_error == EIO && 3471 !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 3472 return; 3473 3474 /* 3475 * Intent logs writes won't propagate their error to the root 3476 * I/O so don't mark these types of failures as pool-level 3477 * errors. 3478 */ 3479 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 3480 return; 3481 3482 mutex_enter(&vd->vdev_stat_lock); 3483 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 3484 if (zio->io_error == ECKSUM) 3485 vs->vs_checksum_errors++; 3486 else 3487 vs->vs_read_errors++; 3488 } 3489 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 3490 vs->vs_write_errors++; 3491 mutex_exit(&vd->vdev_stat_lock); 3492 3493 if (spa->spa_load_state == SPA_LOAD_NONE && 3494 type == ZIO_TYPE_WRITE && txg != 0 && 3495 (!(flags & ZIO_FLAG_IO_REPAIR) || 3496 (flags & ZIO_FLAG_SCAN_THREAD) || 3497 spa->spa_claiming)) { 3498 /* 3499 * This is either a normal write (not a repair), or it's 3500 * a repair induced by the scrub thread, or it's a repair 3501 * made by zil_claim() during spa_load() in the first txg. 3502 * In the normal case, we commit the DTL change in the same 3503 * txg as the block was born. In the scrub-induced repair 3504 * case, we know that scrubs run in first-pass syncing context, 3505 * so we commit the DTL change in spa_syncing_txg(spa). 3506 * In the zil_claim() case, we commit in spa_first_txg(spa). 3507 * 3508 * We currently do not make DTL entries for failed spontaneous 3509 * self-healing writes triggered by normal (non-scrubbing) 3510 * reads, because we have no transactional context in which to 3511 * do so -- and it's not clear that it'd be desirable anyway. 3512 */ 3513 if (vd->vdev_ops->vdev_op_leaf) { 3514 uint64_t commit_txg = txg; 3515 if (flags & ZIO_FLAG_SCAN_THREAD) { 3516 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 3517 ASSERT(spa_sync_pass(spa) == 1); 3518 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 3519 commit_txg = spa_syncing_txg(spa); 3520 } else if (spa->spa_claiming) { 3521 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 3522 commit_txg = spa_first_txg(spa); 3523 } 3524 ASSERT(commit_txg >= spa_syncing_txg(spa)); 3525 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 3526 return; 3527 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 3528 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 3529 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 3530 } 3531 if (vd != rvd) 3532 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 3533 } 3534 } 3535 3536 /* 3537 * Update the in-core space usage stats for this vdev, its metaslab class, 3538 * and the root vdev. 3539 */ 3540 void 3541 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta, 3542 int64_t space_delta) 3543 { 3544 int64_t dspace_delta = space_delta; 3545 spa_t *spa = vd->vdev_spa; 3546 vdev_t *rvd = spa->spa_root_vdev; 3547 metaslab_group_t *mg = vd->vdev_mg; 3548 metaslab_class_t *mc = mg ? mg->mg_class : NULL; 3549 3550 ASSERT(vd == vd->vdev_top); 3551 3552 /* 3553 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 3554 * factor. We must calculate this here and not at the root vdev 3555 * because the root vdev's psize-to-asize is simply the max of its 3556 * childrens', thus not accurate enough for us. 3557 */ 3558 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 3559 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 3560 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 3561 vd->vdev_deflate_ratio; 3562 3563 mutex_enter(&vd->vdev_stat_lock); 3564 vd->vdev_stat.vs_alloc += alloc_delta; 3565 vd->vdev_stat.vs_space += space_delta; 3566 vd->vdev_stat.vs_dspace += dspace_delta; 3567 mutex_exit(&vd->vdev_stat_lock); 3568 3569 if (mc == spa_normal_class(spa)) { 3570 mutex_enter(&rvd->vdev_stat_lock); 3571 rvd->vdev_stat.vs_alloc += alloc_delta; 3572 rvd->vdev_stat.vs_space += space_delta; 3573 rvd->vdev_stat.vs_dspace += dspace_delta; 3574 mutex_exit(&rvd->vdev_stat_lock); 3575 } 3576 3577 if (mc != NULL) { 3578 ASSERT(rvd == vd->vdev_parent); 3579 ASSERT(vd->vdev_ms_count != 0); 3580 3581 metaslab_class_space_update(mc, 3582 alloc_delta, defer_delta, space_delta, dspace_delta); 3583 } 3584 } 3585 3586 /* 3587 * Mark a top-level vdev's config as dirty, placing it on the dirty list 3588 * so that it will be written out next time the vdev configuration is synced. 3589 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 3590 */ 3591 void 3592 vdev_config_dirty(vdev_t *vd) 3593 { 3594 spa_t *spa = vd->vdev_spa; 3595 vdev_t *rvd = spa->spa_root_vdev; 3596 int c; 3597 3598 ASSERT(spa_writeable(spa)); 3599 3600 /* 3601 * If this is an aux vdev (as with l2cache and spare devices), then we 3602 * update the vdev config manually and set the sync flag. 3603 */ 3604 if (vd->vdev_aux != NULL) { 3605 spa_aux_vdev_t *sav = vd->vdev_aux; 3606 nvlist_t **aux; 3607 uint_t naux; 3608 3609 for (c = 0; c < sav->sav_count; c++) { 3610 if (sav->sav_vdevs[c] == vd) 3611 break; 3612 } 3613 3614 if (c == sav->sav_count) { 3615 /* 3616 * We're being removed. There's nothing more to do. 3617 */ 3618 ASSERT(sav->sav_sync == B_TRUE); 3619 return; 3620 } 3621 3622 sav->sav_sync = B_TRUE; 3623 3624 if (nvlist_lookup_nvlist_array(sav->sav_config, 3625 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 3626 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 3627 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 3628 } 3629 3630 ASSERT(c < naux); 3631 3632 /* 3633 * Setting the nvlist in the middle if the array is a little 3634 * sketchy, but it will work. 3635 */ 3636 nvlist_free(aux[c]); 3637 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0); 3638 3639 return; 3640 } 3641 3642 /* 3643 * The dirty list is protected by the SCL_CONFIG lock. The caller 3644 * must either hold SCL_CONFIG as writer, or must be the sync thread 3645 * (which holds SCL_CONFIG as reader). There's only one sync thread, 3646 * so this is sufficient to ensure mutual exclusion. 3647 */ 3648 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 3649 (dsl_pool_sync_context(spa_get_dsl(spa)) && 3650 spa_config_held(spa, SCL_CONFIG, RW_READER))); 3651 3652 if (vd == rvd) { 3653 for (c = 0; c < rvd->vdev_children; c++) 3654 vdev_config_dirty(rvd->vdev_child[c]); 3655 } else { 3656 ASSERT(vd == vd->vdev_top); 3657 3658 if (!list_link_active(&vd->vdev_config_dirty_node) && 3659 vdev_is_concrete(vd)) { 3660 list_insert_head(&spa->spa_config_dirty_list, vd); 3661 } 3662 } 3663 } 3664 3665 void 3666 vdev_config_clean(vdev_t *vd) 3667 { 3668 spa_t *spa = vd->vdev_spa; 3669 3670 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 3671 (dsl_pool_sync_context(spa_get_dsl(spa)) && 3672 spa_config_held(spa, SCL_CONFIG, RW_READER))); 3673 3674 ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 3675 list_remove(&spa->spa_config_dirty_list, vd); 3676 } 3677 3678 /* 3679 * Mark a top-level vdev's state as dirty, so that the next pass of 3680 * spa_sync() can convert this into vdev_config_dirty(). We distinguish 3681 * the state changes from larger config changes because they require 3682 * much less locking, and are often needed for administrative actions. 3683 */ 3684 void 3685 vdev_state_dirty(vdev_t *vd) 3686 { 3687 spa_t *spa = vd->vdev_spa; 3688 3689 ASSERT(spa_writeable(spa)); 3690 ASSERT(vd == vd->vdev_top); 3691 3692 /* 3693 * The state list is protected by the SCL_STATE lock. The caller 3694 * must either hold SCL_STATE as writer, or must be the sync thread 3695 * (which holds SCL_STATE as reader). There's only one sync thread, 3696 * so this is sufficient to ensure mutual exclusion. 3697 */ 3698 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 3699 (dsl_pool_sync_context(spa_get_dsl(spa)) && 3700 spa_config_held(spa, SCL_STATE, RW_READER))); 3701 3702 if (!list_link_active(&vd->vdev_state_dirty_node) && 3703 vdev_is_concrete(vd)) 3704 list_insert_head(&spa->spa_state_dirty_list, vd); 3705 } 3706 3707 void 3708 vdev_state_clean(vdev_t *vd) 3709 { 3710 spa_t *spa = vd->vdev_spa; 3711 3712 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 3713 (dsl_pool_sync_context(spa_get_dsl(spa)) && 3714 spa_config_held(spa, SCL_STATE, RW_READER))); 3715 3716 ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 3717 list_remove(&spa->spa_state_dirty_list, vd); 3718 } 3719 3720 /* 3721 * Propagate vdev state up from children to parent. 3722 */ 3723 void 3724 vdev_propagate_state(vdev_t *vd) 3725 { 3726 spa_t *spa = vd->vdev_spa; 3727 vdev_t *rvd = spa->spa_root_vdev; 3728 int degraded = 0, faulted = 0; 3729 int corrupted = 0; 3730 vdev_t *child; 3731 3732 if (vd->vdev_children > 0) { 3733 for (int c = 0; c < vd->vdev_children; c++) { 3734 child = vd->vdev_child[c]; 3735 3736 /* 3737 * Don't factor holes or indirect vdevs into the 3738 * decision. 3739 */ 3740 if (!vdev_is_concrete(child)) 3741 continue; 3742 3743 if (!vdev_readable(child) || 3744 (!vdev_writeable(child) && spa_writeable(spa))) { 3745 /* 3746 * Root special: if there is a top-level log 3747 * device, treat the root vdev as if it were 3748 * degraded. 3749 */ 3750 if (child->vdev_islog && vd == rvd) 3751 degraded++; 3752 else 3753 faulted++; 3754 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 3755 degraded++; 3756 } 3757 3758 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 3759 corrupted++; 3760 } 3761 3762 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 3763 3764 /* 3765 * Root special: if there is a top-level vdev that cannot be 3766 * opened due to corrupted metadata, then propagate the root 3767 * vdev's aux state as 'corrupt' rather than 'insufficient 3768 * replicas'. 3769 */ 3770 if (corrupted && vd == rvd && 3771 rvd->vdev_state == VDEV_STATE_CANT_OPEN) 3772 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 3773 VDEV_AUX_CORRUPT_DATA); 3774 } 3775 3776 if (vd->vdev_parent) 3777 vdev_propagate_state(vd->vdev_parent); 3778 } 3779 3780 /* 3781 * Set a vdev's state. If this is during an open, we don't update the parent 3782 * state, because we're in the process of opening children depth-first. 3783 * Otherwise, we propagate the change to the parent. 3784 * 3785 * If this routine places a device in a faulted state, an appropriate ereport is 3786 * generated. 3787 */ 3788 void 3789 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 3790 { 3791 uint64_t save_state; 3792 spa_t *spa = vd->vdev_spa; 3793 3794 if (state == vd->vdev_state) { 3795 vd->vdev_stat.vs_aux = aux; 3796 return; 3797 } 3798 3799 save_state = vd->vdev_state; 3800 3801 vd->vdev_state = state; 3802 vd->vdev_stat.vs_aux = aux; 3803 3804 /* 3805 * If we are setting the vdev state to anything but an open state, then 3806 * always close the underlying device unless the device has requested 3807 * a delayed close (i.e. we're about to remove or fault the device). 3808 * Otherwise, we keep accessible but invalid devices open forever. 3809 * We don't call vdev_close() itself, because that implies some extra 3810 * checks (offline, etc) that we don't want here. This is limited to 3811 * leaf devices, because otherwise closing the device will affect other 3812 * children. 3813 */ 3814 if (!vd->vdev_delayed_close && vdev_is_dead(vd) && 3815 vd->vdev_ops->vdev_op_leaf) 3816 vd->vdev_ops->vdev_op_close(vd); 3817 3818 /* 3819 * If we have brought this vdev back into service, we need 3820 * to notify fmd so that it can gracefully repair any outstanding 3821 * cases due to a missing device. We do this in all cases, even those 3822 * that probably don't correlate to a repaired fault. This is sure to 3823 * catch all cases, and we let the zfs-retire agent sort it out. If 3824 * this is a transient state it's OK, as the retire agent will 3825 * double-check the state of the vdev before repairing it. 3826 */ 3827 if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf && 3828 vd->vdev_prevstate != state) 3829 zfs_post_state_change(spa, vd); 3830 3831 if (vd->vdev_removed && 3832 state == VDEV_STATE_CANT_OPEN && 3833 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 3834 /* 3835 * If the previous state is set to VDEV_STATE_REMOVED, then this 3836 * device was previously marked removed and someone attempted to 3837 * reopen it. If this failed due to a nonexistent device, then 3838 * keep the device in the REMOVED state. We also let this be if 3839 * it is one of our special test online cases, which is only 3840 * attempting to online the device and shouldn't generate an FMA 3841 * fault. 3842 */ 3843 vd->vdev_state = VDEV_STATE_REMOVED; 3844 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 3845 } else if (state == VDEV_STATE_REMOVED) { 3846 vd->vdev_removed = B_TRUE; 3847 } else if (state == VDEV_STATE_CANT_OPEN) { 3848 /* 3849 * If we fail to open a vdev during an import or recovery, we 3850 * mark it as "not available", which signifies that it was 3851 * never there to begin with. Failure to open such a device 3852 * is not considered an error. 3853 */ 3854 if ((spa_load_state(spa) == SPA_LOAD_IMPORT || 3855 spa_load_state(spa) == SPA_LOAD_RECOVER) && 3856 vd->vdev_ops->vdev_op_leaf) 3857 vd->vdev_not_present = 1; 3858 3859 /* 3860 * Post the appropriate ereport. If the 'prevstate' field is 3861 * set to something other than VDEV_STATE_UNKNOWN, it indicates 3862 * that this is part of a vdev_reopen(). In this case, we don't 3863 * want to post the ereport if the device was already in the 3864 * CANT_OPEN state beforehand. 3865 * 3866 * If the 'checkremove' flag is set, then this is an attempt to 3867 * online the device in response to an insertion event. If we 3868 * hit this case, then we have detected an insertion event for a 3869 * faulted or offline device that wasn't in the removed state. 3870 * In this scenario, we don't post an ereport because we are 3871 * about to replace the device, or attempt an online with 3872 * vdev_forcefault, which will generate the fault for us. 3873 */ 3874 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 3875 !vd->vdev_not_present && !vd->vdev_checkremove && 3876 vd != spa->spa_root_vdev) { 3877 const char *class; 3878 3879 switch (aux) { 3880 case VDEV_AUX_OPEN_FAILED: 3881 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 3882 break; 3883 case VDEV_AUX_CORRUPT_DATA: 3884 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 3885 break; 3886 case VDEV_AUX_NO_REPLICAS: 3887 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 3888 break; 3889 case VDEV_AUX_BAD_GUID_SUM: 3890 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 3891 break; 3892 case VDEV_AUX_TOO_SMALL: 3893 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 3894 break; 3895 case VDEV_AUX_BAD_LABEL: 3896 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 3897 break; 3898 default: 3899 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 3900 } 3901 3902 zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 3903 } 3904 3905 /* Erase any notion of persistent removed state */ 3906 vd->vdev_removed = B_FALSE; 3907 } else { 3908 vd->vdev_removed = B_FALSE; 3909 } 3910 3911 if (!isopen && vd->vdev_parent) 3912 vdev_propagate_state(vd->vdev_parent); 3913 } 3914 3915 boolean_t 3916 vdev_children_are_offline(vdev_t *vd) 3917 { 3918 ASSERT(!vd->vdev_ops->vdev_op_leaf); 3919 3920 for (uint64_t i = 0; i < vd->vdev_children; i++) { 3921 if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE) 3922 return (B_FALSE); 3923 } 3924 3925 return (B_TRUE); 3926 } 3927 3928 /* 3929 * Check the vdev configuration to ensure that it's capable of supporting 3930 * a root pool. We do not support partial configuration. 3931 * In addition, only a single top-level vdev is allowed. 3932 */ 3933 boolean_t 3934 vdev_is_bootable(vdev_t *vd) 3935 { 3936 if (!vd->vdev_ops->vdev_op_leaf) { 3937 char *vdev_type = vd->vdev_ops->vdev_op_type; 3938 3939 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 3940 vd->vdev_children > 1) { 3941 return (B_FALSE); 3942 } else if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0 || 3943 strcmp(vdev_type, VDEV_TYPE_INDIRECT) == 0) { 3944 return (B_FALSE); 3945 } 3946 } 3947 3948 for (int c = 0; c < vd->vdev_children; c++) { 3949 if (!vdev_is_bootable(vd->vdev_child[c])) 3950 return (B_FALSE); 3951 } 3952 return (B_TRUE); 3953 } 3954 3955 boolean_t 3956 vdev_is_concrete(vdev_t *vd) 3957 { 3958 vdev_ops_t *ops = vd->vdev_ops; 3959 if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops || 3960 ops == &vdev_missing_ops || ops == &vdev_root_ops) { 3961 return (B_FALSE); 3962 } else { 3963 return (B_TRUE); 3964 } 3965 } 3966 3967 /* 3968 * Determine if a log device has valid content. If the vdev was 3969 * removed or faulted in the MOS config then we know that 3970 * the content on the log device has already been written to the pool. 3971 */ 3972 boolean_t 3973 vdev_log_state_valid(vdev_t *vd) 3974 { 3975 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted && 3976 !vd->vdev_removed) 3977 return (B_TRUE); 3978 3979 for (int c = 0; c < vd->vdev_children; c++) 3980 if (vdev_log_state_valid(vd->vdev_child[c])) 3981 return (B_TRUE); 3982 3983 return (B_FALSE); 3984 } 3985 3986 /* 3987 * Expand a vdev if possible. 3988 */ 3989 void 3990 vdev_expand(vdev_t *vd, uint64_t txg) 3991 { 3992 ASSERT(vd->vdev_top == vd); 3993 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3994 3995 vdev_set_deflate_ratio(vd); 3996 3997 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count && 3998 vdev_is_concrete(vd)) { 3999 VERIFY(vdev_metaslab_init(vd, txg) == 0); 4000 vdev_config_dirty(vd); 4001 } 4002 } 4003 4004 /* 4005 * Split a vdev. 4006 */ 4007 void 4008 vdev_split(vdev_t *vd) 4009 { 4010 vdev_t *cvd, *pvd = vd->vdev_parent; 4011 4012 vdev_remove_child(pvd, vd); 4013 vdev_compact_children(pvd); 4014 4015 cvd = pvd->vdev_child[0]; 4016 if (pvd->vdev_children == 1) { 4017 vdev_remove_parent(cvd); 4018 cvd->vdev_splitting = B_TRUE; 4019 } 4020 vdev_propagate_state(cvd); 4021 } 4022 4023 void 4024 vdev_deadman(vdev_t *vd) 4025 { 4026 for (int c = 0; c < vd->vdev_children; c++) { 4027 vdev_t *cvd = vd->vdev_child[c]; 4028 4029 vdev_deadman(cvd); 4030 } 4031 4032 if (vd->vdev_ops->vdev_op_leaf) { 4033 vdev_queue_t *vq = &vd->vdev_queue; 4034 4035 mutex_enter(&vq->vq_lock); 4036 if (avl_numnodes(&vq->vq_active_tree) > 0) { 4037 spa_t *spa = vd->vdev_spa; 4038 zio_t *fio; 4039 uint64_t delta; 4040 4041 /* 4042 * Look at the head of all the pending queues, 4043 * if any I/O has been outstanding for longer than 4044 * the spa_deadman_synctime we panic the system. 4045 */ 4046 fio = avl_first(&vq->vq_active_tree); 4047 delta = gethrtime() - fio->io_timestamp; 4048 if (delta > spa_deadman_synctime(spa)) { 4049 vdev_dbgmsg(vd, "SLOW IO: zio timestamp " 4050 "%lluns, delta %lluns, last io %lluns", 4051 fio->io_timestamp, (u_longlong_t)delta, 4052 vq->vq_io_complete_ts); 4053 fm_panic("I/O to pool '%s' appears to be " 4054 "hung.", spa_name(spa)); 4055 } 4056 } 4057 mutex_exit(&vq->vq_lock); 4058 } 4059 } 4060