1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 * Copyright 2017 Nexenta Systems, Inc. 26 * Copyright (c) 2014 Integros [integros.com] 27 * Copyright 2016 Toomas Soome <tsoome@me.com> 28 * Copyright 2017 Joyent, Inc. 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/fm/fs/zfs.h> 33 #include <sys/spa.h> 34 #include <sys/spa_impl.h> 35 #include <sys/bpobj.h> 36 #include <sys/dmu.h> 37 #include <sys/dmu_tx.h> 38 #include <sys/dsl_dir.h> 39 #include <sys/vdev_impl.h> 40 #include <sys/uberblock_impl.h> 41 #include <sys/metaslab.h> 42 #include <sys/metaslab_impl.h> 43 #include <sys/space_map.h> 44 #include <sys/space_reftree.h> 45 #include <sys/zio.h> 46 #include <sys/zap.h> 47 #include <sys/fs/zfs.h> 48 #include <sys/arc.h> 49 #include <sys/zil.h> 50 #include <sys/dsl_scan.h> 51 #include <sys/abd.h> 52 #include <sys/vdev_initialize.h> 53 54 /* 55 * Virtual device management. 56 */ 57 58 static vdev_ops_t *vdev_ops_table[] = { 59 &vdev_root_ops, 60 &vdev_raidz_ops, 61 &vdev_mirror_ops, 62 &vdev_replacing_ops, 63 &vdev_spare_ops, 64 &vdev_disk_ops, 65 &vdev_file_ops, 66 &vdev_missing_ops, 67 &vdev_hole_ops, 68 &vdev_indirect_ops, 69 NULL 70 }; 71 72 /* maximum scrub/resilver I/O queue per leaf vdev */ 73 int zfs_scrub_limit = 10; 74 75 /* default target for number of metaslabs per top-level vdev */ 76 int zfs_vdev_default_ms_count = 200; 77 78 /* minimum number of metaslabs per top-level vdev */ 79 int zfs_vdev_min_ms_count = 16; 80 81 /* practical upper limit of total metaslabs per top-level vdev */ 82 int zfs_vdev_ms_count_limit = 1ULL << 17; 83 84 /* lower limit for metaslab size (512M) */ 85 int zfs_vdev_default_ms_shift = 29; 86 87 /* upper limit for metaslab size (16G) */ 88 int zfs_vdev_max_ms_shift = 34; 89 90 boolean_t vdev_validate_skip = B_FALSE; 91 92 /* 93 * Since the DTL space map of a vdev is not expected to have a lot of 94 * entries, we default its block size to 4K. 95 */ 96 int vdev_dtl_sm_blksz = (1 << 12); 97 98 /* 99 * vdev-wide space maps that have lots of entries written to them at 100 * the end of each transaction can benefit from a higher I/O bandwidth 101 * (e.g. vdev_obsolete_sm), thus we default their block size to 128K. 102 */ 103 int vdev_standard_sm_blksz = (1 << 17); 104 105 int zfs_ashift_min; 106 107 /*PRINTFLIKE2*/ 108 void 109 vdev_dbgmsg(vdev_t *vd, const char *fmt, ...) 110 { 111 va_list adx; 112 char buf[256]; 113 114 va_start(adx, fmt); 115 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 116 va_end(adx); 117 118 if (vd->vdev_path != NULL) { 119 zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type, 120 vd->vdev_path, buf); 121 } else { 122 zfs_dbgmsg("%s-%llu vdev (guid %llu): %s", 123 vd->vdev_ops->vdev_op_type, 124 (u_longlong_t)vd->vdev_id, 125 (u_longlong_t)vd->vdev_guid, buf); 126 } 127 } 128 129 void 130 vdev_dbgmsg_print_tree(vdev_t *vd, int indent) 131 { 132 char state[20]; 133 134 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) { 135 zfs_dbgmsg("%*svdev %u: %s", indent, "", vd->vdev_id, 136 vd->vdev_ops->vdev_op_type); 137 return; 138 } 139 140 switch (vd->vdev_state) { 141 case VDEV_STATE_UNKNOWN: 142 (void) snprintf(state, sizeof (state), "unknown"); 143 break; 144 case VDEV_STATE_CLOSED: 145 (void) snprintf(state, sizeof (state), "closed"); 146 break; 147 case VDEV_STATE_OFFLINE: 148 (void) snprintf(state, sizeof (state), "offline"); 149 break; 150 case VDEV_STATE_REMOVED: 151 (void) snprintf(state, sizeof (state), "removed"); 152 break; 153 case VDEV_STATE_CANT_OPEN: 154 (void) snprintf(state, sizeof (state), "can't open"); 155 break; 156 case VDEV_STATE_FAULTED: 157 (void) snprintf(state, sizeof (state), "faulted"); 158 break; 159 case VDEV_STATE_DEGRADED: 160 (void) snprintf(state, sizeof (state), "degraded"); 161 break; 162 case VDEV_STATE_HEALTHY: 163 (void) snprintf(state, sizeof (state), "healthy"); 164 break; 165 default: 166 (void) snprintf(state, sizeof (state), "<state %u>", 167 (uint_t)vd->vdev_state); 168 } 169 170 zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent, 171 "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type, 172 vd->vdev_islog ? " (log)" : "", 173 (u_longlong_t)vd->vdev_guid, 174 vd->vdev_path ? vd->vdev_path : "N/A", state); 175 176 for (uint64_t i = 0; i < vd->vdev_children; i++) 177 vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2); 178 } 179 180 /* 181 * Given a vdev type, return the appropriate ops vector. 182 */ 183 static vdev_ops_t * 184 vdev_getops(const char *type) 185 { 186 vdev_ops_t *ops, **opspp; 187 188 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 189 if (strcmp(ops->vdev_op_type, type) == 0) 190 break; 191 192 return (ops); 193 } 194 195 /* ARGSUSED */ 196 void 197 vdev_default_xlate(vdev_t *vd, const range_seg_t *in, range_seg_t *res) 198 { 199 res->rs_start = in->rs_start; 200 res->rs_end = in->rs_end; 201 } 202 203 /* 204 * Default asize function: return the MAX of psize with the asize of 205 * all children. This is what's used by anything other than RAID-Z. 206 */ 207 uint64_t 208 vdev_default_asize(vdev_t *vd, uint64_t psize) 209 { 210 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); 211 uint64_t csize; 212 213 for (int c = 0; c < vd->vdev_children; c++) { 214 csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 215 asize = MAX(asize, csize); 216 } 217 218 return (asize); 219 } 220 221 /* 222 * Get the minimum allocatable size. We define the allocatable size as 223 * the vdev's asize rounded to the nearest metaslab. This allows us to 224 * replace or attach devices which don't have the same physical size but 225 * can still satisfy the same number of allocations. 226 */ 227 uint64_t 228 vdev_get_min_asize(vdev_t *vd) 229 { 230 vdev_t *pvd = vd->vdev_parent; 231 232 /* 233 * If our parent is NULL (inactive spare or cache) or is the root, 234 * just return our own asize. 235 */ 236 if (pvd == NULL) 237 return (vd->vdev_asize); 238 239 /* 240 * The top-level vdev just returns the allocatable size rounded 241 * to the nearest metaslab. 242 */ 243 if (vd == vd->vdev_top) 244 return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); 245 246 /* 247 * The allocatable space for a raidz vdev is N * sizeof(smallest child), 248 * so each child must provide at least 1/Nth of its asize. 249 */ 250 if (pvd->vdev_ops == &vdev_raidz_ops) 251 return ((pvd->vdev_min_asize + pvd->vdev_children - 1) / 252 pvd->vdev_children); 253 254 return (pvd->vdev_min_asize); 255 } 256 257 void 258 vdev_set_min_asize(vdev_t *vd) 259 { 260 vd->vdev_min_asize = vdev_get_min_asize(vd); 261 262 for (int c = 0; c < vd->vdev_children; c++) 263 vdev_set_min_asize(vd->vdev_child[c]); 264 } 265 266 vdev_t * 267 vdev_lookup_top(spa_t *spa, uint64_t vdev) 268 { 269 vdev_t *rvd = spa->spa_root_vdev; 270 271 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 272 273 if (vdev < rvd->vdev_children) { 274 ASSERT(rvd->vdev_child[vdev] != NULL); 275 return (rvd->vdev_child[vdev]); 276 } 277 278 return (NULL); 279 } 280 281 vdev_t * 282 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 283 { 284 vdev_t *mvd; 285 286 if (vd->vdev_guid == guid) 287 return (vd); 288 289 for (int c = 0; c < vd->vdev_children; c++) 290 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 291 NULL) 292 return (mvd); 293 294 return (NULL); 295 } 296 297 static int 298 vdev_count_leaves_impl(vdev_t *vd) 299 { 300 int n = 0; 301 302 if (vd->vdev_ops->vdev_op_leaf) 303 return (1); 304 305 for (int c = 0; c < vd->vdev_children; c++) 306 n += vdev_count_leaves_impl(vd->vdev_child[c]); 307 308 return (n); 309 } 310 311 int 312 vdev_count_leaves(spa_t *spa) 313 { 314 return (vdev_count_leaves_impl(spa->spa_root_vdev)); 315 } 316 317 void 318 vdev_add_child(vdev_t *pvd, vdev_t *cvd) 319 { 320 size_t oldsize, newsize; 321 uint64_t id = cvd->vdev_id; 322 vdev_t **newchild; 323 spa_t *spa = cvd->vdev_spa; 324 325 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 326 ASSERT(cvd->vdev_parent == NULL); 327 328 cvd->vdev_parent = pvd; 329 330 if (pvd == NULL) 331 return; 332 333 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 334 335 oldsize = pvd->vdev_children * sizeof (vdev_t *); 336 pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 337 newsize = pvd->vdev_children * sizeof (vdev_t *); 338 339 newchild = kmem_zalloc(newsize, KM_SLEEP); 340 if (pvd->vdev_child != NULL) { 341 bcopy(pvd->vdev_child, newchild, oldsize); 342 kmem_free(pvd->vdev_child, oldsize); 343 } 344 345 pvd->vdev_child = newchild; 346 pvd->vdev_child[id] = cvd; 347 348 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 349 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 350 351 /* 352 * Walk up all ancestors to update guid sum. 353 */ 354 for (; pvd != NULL; pvd = pvd->vdev_parent) 355 pvd->vdev_guid_sum += cvd->vdev_guid_sum; 356 357 if (cvd->vdev_ops->vdev_op_leaf) { 358 list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd); 359 cvd->vdev_spa->spa_leaf_list_gen++; 360 } 361 } 362 363 void 364 vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 365 { 366 int c; 367 uint_t id = cvd->vdev_id; 368 369 ASSERT(cvd->vdev_parent == pvd); 370 371 if (pvd == NULL) 372 return; 373 374 ASSERT(id < pvd->vdev_children); 375 ASSERT(pvd->vdev_child[id] == cvd); 376 377 pvd->vdev_child[id] = NULL; 378 cvd->vdev_parent = NULL; 379 380 for (c = 0; c < pvd->vdev_children; c++) 381 if (pvd->vdev_child[c]) 382 break; 383 384 if (c == pvd->vdev_children) { 385 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 386 pvd->vdev_child = NULL; 387 pvd->vdev_children = 0; 388 } 389 390 if (cvd->vdev_ops->vdev_op_leaf) { 391 spa_t *spa = cvd->vdev_spa; 392 list_remove(&spa->spa_leaf_list, cvd); 393 spa->spa_leaf_list_gen++; 394 } 395 396 /* 397 * Walk up all ancestors to update guid sum. 398 */ 399 for (; pvd != NULL; pvd = pvd->vdev_parent) 400 pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 401 } 402 403 /* 404 * Remove any holes in the child array. 405 */ 406 void 407 vdev_compact_children(vdev_t *pvd) 408 { 409 vdev_t **newchild, *cvd; 410 int oldc = pvd->vdev_children; 411 int newc; 412 413 ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 414 415 for (int c = newc = 0; c < oldc; c++) 416 if (pvd->vdev_child[c]) 417 newc++; 418 419 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 420 421 for (int c = newc = 0; c < oldc; c++) { 422 if ((cvd = pvd->vdev_child[c]) != NULL) { 423 newchild[newc] = cvd; 424 cvd->vdev_id = newc++; 425 } 426 } 427 428 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 429 pvd->vdev_child = newchild; 430 pvd->vdev_children = newc; 431 } 432 433 /* 434 * Allocate and minimally initialize a vdev_t. 435 */ 436 vdev_t * 437 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 438 { 439 vdev_t *vd; 440 vdev_indirect_config_t *vic; 441 442 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 443 vic = &vd->vdev_indirect_config; 444 445 if (spa->spa_root_vdev == NULL) { 446 ASSERT(ops == &vdev_root_ops); 447 spa->spa_root_vdev = vd; 448 spa->spa_load_guid = spa_generate_guid(NULL); 449 } 450 451 if (guid == 0 && ops != &vdev_hole_ops) { 452 if (spa->spa_root_vdev == vd) { 453 /* 454 * The root vdev's guid will also be the pool guid, 455 * which must be unique among all pools. 456 */ 457 guid = spa_generate_guid(NULL); 458 } else { 459 /* 460 * Any other vdev's guid must be unique within the pool. 461 */ 462 guid = spa_generate_guid(spa); 463 } 464 ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 465 } 466 467 vd->vdev_spa = spa; 468 vd->vdev_id = id; 469 vd->vdev_guid = guid; 470 vd->vdev_guid_sum = guid; 471 vd->vdev_ops = ops; 472 vd->vdev_state = VDEV_STATE_CLOSED; 473 vd->vdev_ishole = (ops == &vdev_hole_ops); 474 vic->vic_prev_indirect_vdev = UINT64_MAX; 475 476 rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL); 477 mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL); 478 vd->vdev_obsolete_segments = range_tree_create(NULL, NULL); 479 480 list_link_init(&vd->vdev_leaf_node); 481 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 482 mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); 483 mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); 484 mutex_init(&vd->vdev_queue_lock, NULL, MUTEX_DEFAULT, NULL); 485 mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL); 486 mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL); 487 cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL); 488 cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL); 489 490 for (int t = 0; t < DTL_TYPES; t++) { 491 vd->vdev_dtl[t] = range_tree_create(NULL, NULL); 492 } 493 txg_list_create(&vd->vdev_ms_list, spa, 494 offsetof(struct metaslab, ms_txg_node)); 495 txg_list_create(&vd->vdev_dtl_list, spa, 496 offsetof(struct vdev, vdev_dtl_node)); 497 vd->vdev_stat.vs_timestamp = gethrtime(); 498 vdev_queue_init(vd); 499 vdev_cache_init(vd); 500 501 return (vd); 502 } 503 504 /* 505 * Allocate a new vdev. The 'alloctype' is used to control whether we are 506 * creating a new vdev or loading an existing one - the behavior is slightly 507 * different for each case. 508 */ 509 int 510 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, 511 int alloctype) 512 { 513 vdev_ops_t *ops; 514 char *type; 515 uint64_t guid = 0, islog, nparity; 516 vdev_t *vd; 517 vdev_indirect_config_t *vic; 518 519 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 520 521 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 522 return (SET_ERROR(EINVAL)); 523 524 if ((ops = vdev_getops(type)) == NULL) 525 return (SET_ERROR(EINVAL)); 526 527 /* 528 * If this is a load, get the vdev guid from the nvlist. 529 * Otherwise, vdev_alloc_common() will generate one for us. 530 */ 531 if (alloctype == VDEV_ALLOC_LOAD) { 532 uint64_t label_id; 533 534 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 535 label_id != id) 536 return (SET_ERROR(EINVAL)); 537 538 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 539 return (SET_ERROR(EINVAL)); 540 } else if (alloctype == VDEV_ALLOC_SPARE) { 541 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 542 return (SET_ERROR(EINVAL)); 543 } else if (alloctype == VDEV_ALLOC_L2CACHE) { 544 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 545 return (SET_ERROR(EINVAL)); 546 } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { 547 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 548 return (SET_ERROR(EINVAL)); 549 } 550 551 /* 552 * The first allocated vdev must be of type 'root'. 553 */ 554 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) 555 return (SET_ERROR(EINVAL)); 556 557 /* 558 * Determine whether we're a log vdev. 559 */ 560 islog = 0; 561 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 562 if (islog && spa_version(spa) < SPA_VERSION_SLOGS) 563 return (SET_ERROR(ENOTSUP)); 564 565 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES) 566 return (SET_ERROR(ENOTSUP)); 567 568 /* 569 * Set the nparity property for RAID-Z vdevs. 570 */ 571 nparity = -1ULL; 572 if (ops == &vdev_raidz_ops) { 573 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 574 &nparity) == 0) { 575 if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY) 576 return (SET_ERROR(EINVAL)); 577 /* 578 * Previous versions could only support 1 or 2 parity 579 * device. 580 */ 581 if (nparity > 1 && 582 spa_version(spa) < SPA_VERSION_RAIDZ2) 583 return (SET_ERROR(ENOTSUP)); 584 if (nparity > 2 && 585 spa_version(spa) < SPA_VERSION_RAIDZ3) 586 return (SET_ERROR(ENOTSUP)); 587 } else { 588 /* 589 * We require the parity to be specified for SPAs that 590 * support multiple parity levels. 591 */ 592 if (spa_version(spa) >= SPA_VERSION_RAIDZ2) 593 return (SET_ERROR(EINVAL)); 594 /* 595 * Otherwise, we default to 1 parity device for RAID-Z. 596 */ 597 nparity = 1; 598 } 599 } else { 600 nparity = 0; 601 } 602 ASSERT(nparity != -1ULL); 603 604 vd = vdev_alloc_common(spa, id, guid, ops); 605 vic = &vd->vdev_indirect_config; 606 607 vd->vdev_islog = islog; 608 vd->vdev_nparity = nparity; 609 610 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 611 vd->vdev_path = spa_strdup(vd->vdev_path); 612 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 613 vd->vdev_devid = spa_strdup(vd->vdev_devid); 614 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, 615 &vd->vdev_physpath) == 0) 616 vd->vdev_physpath = spa_strdup(vd->vdev_physpath); 617 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) 618 vd->vdev_fru = spa_strdup(vd->vdev_fru); 619 620 /* 621 * Set the whole_disk property. If it's not specified, leave the value 622 * as -1. 623 */ 624 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 625 &vd->vdev_wholedisk) != 0) 626 vd->vdev_wholedisk = -1ULL; 627 628 ASSERT0(vic->vic_mapping_object); 629 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT, 630 &vic->vic_mapping_object); 631 ASSERT0(vic->vic_births_object); 632 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS, 633 &vic->vic_births_object); 634 ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX); 635 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV, 636 &vic->vic_prev_indirect_vdev); 637 638 /* 639 * Look for the 'not present' flag. This will only be set if the device 640 * was not present at the time of import. 641 */ 642 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 643 &vd->vdev_not_present); 644 645 /* 646 * Get the alignment requirement. 647 */ 648 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); 649 650 /* 651 * Retrieve the vdev creation time. 652 */ 653 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, 654 &vd->vdev_crtxg); 655 656 /* 657 * If we're a top-level vdev, try to load the allocation parameters. 658 */ 659 if (parent && !parent->vdev_parent && 660 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { 661 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 662 &vd->vdev_ms_array); 663 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 664 &vd->vdev_ms_shift); 665 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 666 &vd->vdev_asize); 667 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING, 668 &vd->vdev_removing); 669 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP, 670 &vd->vdev_top_zap); 671 } else { 672 ASSERT0(vd->vdev_top_zap); 673 } 674 675 if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) { 676 ASSERT(alloctype == VDEV_ALLOC_LOAD || 677 alloctype == VDEV_ALLOC_ADD || 678 alloctype == VDEV_ALLOC_SPLIT || 679 alloctype == VDEV_ALLOC_ROOTPOOL); 680 vd->vdev_mg = metaslab_group_create(islog ? 681 spa_log_class(spa) : spa_normal_class(spa), vd, 682 spa->spa_alloc_count); 683 } 684 685 if (vd->vdev_ops->vdev_op_leaf && 686 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { 687 (void) nvlist_lookup_uint64(nv, 688 ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap); 689 } else { 690 ASSERT0(vd->vdev_leaf_zap); 691 } 692 693 /* 694 * If we're a leaf vdev, try to load the DTL object and other state. 695 */ 696 697 if (vd->vdev_ops->vdev_op_leaf && 698 (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || 699 alloctype == VDEV_ALLOC_ROOTPOOL)) { 700 if (alloctype == VDEV_ALLOC_LOAD) { 701 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 702 &vd->vdev_dtl_object); 703 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, 704 &vd->vdev_unspare); 705 } 706 707 if (alloctype == VDEV_ALLOC_ROOTPOOL) { 708 uint64_t spare = 0; 709 710 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 711 &spare) == 0 && spare) 712 spa_spare_add(vd); 713 } 714 715 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, 716 &vd->vdev_offline); 717 718 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG, 719 &vd->vdev_resilver_txg); 720 721 /* 722 * When importing a pool, we want to ignore the persistent fault 723 * state, as the diagnosis made on another system may not be 724 * valid in the current context. Local vdevs will 725 * remain in the faulted state. 726 */ 727 if (spa_load_state(spa) == SPA_LOAD_OPEN) { 728 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, 729 &vd->vdev_faulted); 730 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, 731 &vd->vdev_degraded); 732 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, 733 &vd->vdev_removed); 734 735 if (vd->vdev_faulted || vd->vdev_degraded) { 736 char *aux; 737 738 vd->vdev_label_aux = 739 VDEV_AUX_ERR_EXCEEDED; 740 if (nvlist_lookup_string(nv, 741 ZPOOL_CONFIG_AUX_STATE, &aux) == 0 && 742 strcmp(aux, "external") == 0) 743 vd->vdev_label_aux = VDEV_AUX_EXTERNAL; 744 } 745 } 746 } 747 748 /* 749 * Add ourselves to the parent's list of children. 750 */ 751 vdev_add_child(parent, vd); 752 753 *vdp = vd; 754 755 return (0); 756 } 757 758 void 759 vdev_free(vdev_t *vd) 760 { 761 spa_t *spa = vd->vdev_spa; 762 ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 763 764 /* 765 * vdev_free() implies closing the vdev first. This is simpler than 766 * trying to ensure complicated semantics for all callers. 767 */ 768 vdev_close(vd); 769 770 ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); 771 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 772 773 /* 774 * Free all children. 775 */ 776 for (int c = 0; c < vd->vdev_children; c++) 777 vdev_free(vd->vdev_child[c]); 778 779 ASSERT(vd->vdev_child == NULL); 780 ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 781 ASSERT(vd->vdev_initialize_thread == NULL); 782 783 /* 784 * Discard allocation state. 785 */ 786 if (vd->vdev_mg != NULL) { 787 vdev_metaslab_fini(vd); 788 metaslab_group_destroy(vd->vdev_mg); 789 } 790 791 ASSERT0(vd->vdev_stat.vs_space); 792 ASSERT0(vd->vdev_stat.vs_dspace); 793 ASSERT0(vd->vdev_stat.vs_alloc); 794 795 /* 796 * Remove this vdev from its parent's child list. 797 */ 798 vdev_remove_child(vd->vdev_parent, vd); 799 800 ASSERT(vd->vdev_parent == NULL); 801 ASSERT(!list_link_active(&vd->vdev_leaf_node)); 802 803 /* 804 * Clean up vdev structure. 805 */ 806 vdev_queue_fini(vd); 807 vdev_cache_fini(vd); 808 809 if (vd->vdev_path) 810 spa_strfree(vd->vdev_path); 811 if (vd->vdev_devid) 812 spa_strfree(vd->vdev_devid); 813 if (vd->vdev_physpath) 814 spa_strfree(vd->vdev_physpath); 815 if (vd->vdev_fru) 816 spa_strfree(vd->vdev_fru); 817 818 if (vd->vdev_isspare) 819 spa_spare_remove(vd); 820 if (vd->vdev_isl2cache) 821 spa_l2cache_remove(vd); 822 823 txg_list_destroy(&vd->vdev_ms_list); 824 txg_list_destroy(&vd->vdev_dtl_list); 825 826 mutex_enter(&vd->vdev_dtl_lock); 827 space_map_close(vd->vdev_dtl_sm); 828 for (int t = 0; t < DTL_TYPES; t++) { 829 range_tree_vacate(vd->vdev_dtl[t], NULL, NULL); 830 range_tree_destroy(vd->vdev_dtl[t]); 831 } 832 mutex_exit(&vd->vdev_dtl_lock); 833 834 EQUIV(vd->vdev_indirect_births != NULL, 835 vd->vdev_indirect_mapping != NULL); 836 if (vd->vdev_indirect_births != NULL) { 837 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 838 vdev_indirect_births_close(vd->vdev_indirect_births); 839 } 840 841 if (vd->vdev_obsolete_sm != NULL) { 842 ASSERT(vd->vdev_removing || 843 vd->vdev_ops == &vdev_indirect_ops); 844 space_map_close(vd->vdev_obsolete_sm); 845 vd->vdev_obsolete_sm = NULL; 846 } 847 range_tree_destroy(vd->vdev_obsolete_segments); 848 rw_destroy(&vd->vdev_indirect_rwlock); 849 mutex_destroy(&vd->vdev_obsolete_lock); 850 851 mutex_destroy(&vd->vdev_queue_lock); 852 mutex_destroy(&vd->vdev_dtl_lock); 853 mutex_destroy(&vd->vdev_stat_lock); 854 mutex_destroy(&vd->vdev_probe_lock); 855 mutex_destroy(&vd->vdev_initialize_lock); 856 mutex_destroy(&vd->vdev_initialize_io_lock); 857 cv_destroy(&vd->vdev_initialize_io_cv); 858 cv_destroy(&vd->vdev_initialize_cv); 859 860 if (vd == spa->spa_root_vdev) 861 spa->spa_root_vdev = NULL; 862 863 kmem_free(vd, sizeof (vdev_t)); 864 } 865 866 /* 867 * Transfer top-level vdev state from svd to tvd. 868 */ 869 static void 870 vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 871 { 872 spa_t *spa = svd->vdev_spa; 873 metaslab_t *msp; 874 vdev_t *vd; 875 int t; 876 877 ASSERT(tvd == tvd->vdev_top); 878 879 tvd->vdev_ms_array = svd->vdev_ms_array; 880 tvd->vdev_ms_shift = svd->vdev_ms_shift; 881 tvd->vdev_ms_count = svd->vdev_ms_count; 882 tvd->vdev_top_zap = svd->vdev_top_zap; 883 884 svd->vdev_ms_array = 0; 885 svd->vdev_ms_shift = 0; 886 svd->vdev_ms_count = 0; 887 svd->vdev_top_zap = 0; 888 889 if (tvd->vdev_mg) 890 ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg); 891 tvd->vdev_mg = svd->vdev_mg; 892 tvd->vdev_ms = svd->vdev_ms; 893 894 svd->vdev_mg = NULL; 895 svd->vdev_ms = NULL; 896 897 if (tvd->vdev_mg != NULL) 898 tvd->vdev_mg->mg_vd = tvd; 899 900 tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm; 901 svd->vdev_checkpoint_sm = NULL; 902 903 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 904 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 905 tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; 906 907 svd->vdev_stat.vs_alloc = 0; 908 svd->vdev_stat.vs_space = 0; 909 svd->vdev_stat.vs_dspace = 0; 910 911 /* 912 * State which may be set on a top-level vdev that's in the 913 * process of being removed. 914 */ 915 ASSERT0(tvd->vdev_indirect_config.vic_births_object); 916 ASSERT0(tvd->vdev_indirect_config.vic_mapping_object); 917 ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL); 918 ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL); 919 ASSERT3P(tvd->vdev_indirect_births, ==, NULL); 920 ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL); 921 ASSERT0(tvd->vdev_removing); 922 tvd->vdev_removing = svd->vdev_removing; 923 tvd->vdev_indirect_config = svd->vdev_indirect_config; 924 tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping; 925 tvd->vdev_indirect_births = svd->vdev_indirect_births; 926 range_tree_swap(&svd->vdev_obsolete_segments, 927 &tvd->vdev_obsolete_segments); 928 tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm; 929 svd->vdev_indirect_config.vic_mapping_object = 0; 930 svd->vdev_indirect_config.vic_births_object = 0; 931 svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL; 932 svd->vdev_indirect_mapping = NULL; 933 svd->vdev_indirect_births = NULL; 934 svd->vdev_obsolete_sm = NULL; 935 svd->vdev_removing = 0; 936 937 for (t = 0; t < TXG_SIZE; t++) { 938 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 939 (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 940 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 941 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 942 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 943 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 944 } 945 946 if (list_link_active(&svd->vdev_config_dirty_node)) { 947 vdev_config_clean(svd); 948 vdev_config_dirty(tvd); 949 } 950 951 if (list_link_active(&svd->vdev_state_dirty_node)) { 952 vdev_state_clean(svd); 953 vdev_state_dirty(tvd); 954 } 955 956 tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; 957 svd->vdev_deflate_ratio = 0; 958 959 tvd->vdev_islog = svd->vdev_islog; 960 svd->vdev_islog = 0; 961 } 962 963 static void 964 vdev_top_update(vdev_t *tvd, vdev_t *vd) 965 { 966 if (vd == NULL) 967 return; 968 969 vd->vdev_top = tvd; 970 971 for (int c = 0; c < vd->vdev_children; c++) 972 vdev_top_update(tvd, vd->vdev_child[c]); 973 } 974 975 /* 976 * Add a mirror/replacing vdev above an existing vdev. 977 */ 978 vdev_t * 979 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 980 { 981 spa_t *spa = cvd->vdev_spa; 982 vdev_t *pvd = cvd->vdev_parent; 983 vdev_t *mvd; 984 985 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 986 987 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 988 989 mvd->vdev_asize = cvd->vdev_asize; 990 mvd->vdev_min_asize = cvd->vdev_min_asize; 991 mvd->vdev_max_asize = cvd->vdev_max_asize; 992 mvd->vdev_psize = cvd->vdev_psize; 993 mvd->vdev_ashift = cvd->vdev_ashift; 994 mvd->vdev_state = cvd->vdev_state; 995 mvd->vdev_crtxg = cvd->vdev_crtxg; 996 997 vdev_remove_child(pvd, cvd); 998 vdev_add_child(pvd, mvd); 999 cvd->vdev_id = mvd->vdev_children; 1000 vdev_add_child(mvd, cvd); 1001 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 1002 1003 if (mvd == mvd->vdev_top) 1004 vdev_top_transfer(cvd, mvd); 1005 1006 return (mvd); 1007 } 1008 1009 /* 1010 * Remove a 1-way mirror/replacing vdev from the tree. 1011 */ 1012 void 1013 vdev_remove_parent(vdev_t *cvd) 1014 { 1015 vdev_t *mvd = cvd->vdev_parent; 1016 vdev_t *pvd = mvd->vdev_parent; 1017 1018 ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1019 1020 ASSERT(mvd->vdev_children == 1); 1021 ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 1022 mvd->vdev_ops == &vdev_replacing_ops || 1023 mvd->vdev_ops == &vdev_spare_ops); 1024 cvd->vdev_ashift = mvd->vdev_ashift; 1025 1026 vdev_remove_child(mvd, cvd); 1027 vdev_remove_child(pvd, mvd); 1028 1029 /* 1030 * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. 1031 * Otherwise, we could have detached an offline device, and when we 1032 * go to import the pool we'll think we have two top-level vdevs, 1033 * instead of a different version of the same top-level vdev. 1034 */ 1035 if (mvd->vdev_top == mvd) { 1036 uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; 1037 cvd->vdev_orig_guid = cvd->vdev_guid; 1038 cvd->vdev_guid += guid_delta; 1039 cvd->vdev_guid_sum += guid_delta; 1040 } 1041 cvd->vdev_id = mvd->vdev_id; 1042 vdev_add_child(pvd, cvd); 1043 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 1044 1045 if (cvd == cvd->vdev_top) 1046 vdev_top_transfer(mvd, cvd); 1047 1048 ASSERT(mvd->vdev_children == 0); 1049 vdev_free(mvd); 1050 } 1051 1052 int 1053 vdev_metaslab_init(vdev_t *vd, uint64_t txg) 1054 { 1055 spa_t *spa = vd->vdev_spa; 1056 objset_t *mos = spa->spa_meta_objset; 1057 uint64_t m; 1058 uint64_t oldc = vd->vdev_ms_count; 1059 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 1060 metaslab_t **mspp; 1061 int error; 1062 1063 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1064 1065 /* 1066 * This vdev is not being allocated from yet or is a hole. 1067 */ 1068 if (vd->vdev_ms_shift == 0) 1069 return (0); 1070 1071 ASSERT(!vd->vdev_ishole); 1072 1073 ASSERT(oldc <= newc); 1074 1075 mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 1076 1077 if (oldc != 0) { 1078 bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); 1079 kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); 1080 } 1081 1082 vd->vdev_ms = mspp; 1083 vd->vdev_ms_count = newc; 1084 for (m = oldc; m < newc; m++) { 1085 uint64_t object = 0; 1086 1087 /* 1088 * vdev_ms_array may be 0 if we are creating the "fake" 1089 * metaslabs for an indirect vdev for zdb's leak detection. 1090 * See zdb_leak_init(). 1091 */ 1092 if (txg == 0 && vd->vdev_ms_array != 0) { 1093 error = dmu_read(mos, vd->vdev_ms_array, 1094 m * sizeof (uint64_t), sizeof (uint64_t), &object, 1095 DMU_READ_PREFETCH); 1096 if (error != 0) { 1097 vdev_dbgmsg(vd, "unable to read the metaslab " 1098 "array [error=%d]", error); 1099 return (error); 1100 } 1101 } 1102 1103 error = metaslab_init(vd->vdev_mg, m, object, txg, 1104 &(vd->vdev_ms[m])); 1105 if (error != 0) { 1106 vdev_dbgmsg(vd, "metaslab_init failed [error=%d]", 1107 error); 1108 return (error); 1109 } 1110 } 1111 1112 if (txg == 0) 1113 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER); 1114 1115 /* 1116 * If the vdev is being removed we don't activate 1117 * the metaslabs since we want to ensure that no new 1118 * allocations are performed on this device. 1119 */ 1120 if (oldc == 0 && !vd->vdev_removing) 1121 metaslab_group_activate(vd->vdev_mg); 1122 1123 if (txg == 0) 1124 spa_config_exit(spa, SCL_ALLOC, FTAG); 1125 1126 return (0); 1127 } 1128 1129 void 1130 vdev_metaslab_fini(vdev_t *vd) 1131 { 1132 if (vd->vdev_checkpoint_sm != NULL) { 1133 ASSERT(spa_feature_is_active(vd->vdev_spa, 1134 SPA_FEATURE_POOL_CHECKPOINT)); 1135 space_map_close(vd->vdev_checkpoint_sm); 1136 /* 1137 * Even though we close the space map, we need to set its 1138 * pointer to NULL. The reason is that vdev_metaslab_fini() 1139 * may be called multiple times for certain operations 1140 * (i.e. when destroying a pool) so we need to ensure that 1141 * this clause never executes twice. This logic is similar 1142 * to the one used for the vdev_ms clause below. 1143 */ 1144 vd->vdev_checkpoint_sm = NULL; 1145 } 1146 1147 if (vd->vdev_ms != NULL) { 1148 uint64_t count = vd->vdev_ms_count; 1149 1150 metaslab_group_passivate(vd->vdev_mg); 1151 for (uint64_t m = 0; m < count; m++) { 1152 metaslab_t *msp = vd->vdev_ms[m]; 1153 1154 if (msp != NULL) 1155 metaslab_fini(msp); 1156 } 1157 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 1158 vd->vdev_ms = NULL; 1159 1160 vd->vdev_ms_count = 0; 1161 } 1162 ASSERT0(vd->vdev_ms_count); 1163 } 1164 1165 typedef struct vdev_probe_stats { 1166 boolean_t vps_readable; 1167 boolean_t vps_writeable; 1168 int vps_flags; 1169 } vdev_probe_stats_t; 1170 1171 static void 1172 vdev_probe_done(zio_t *zio) 1173 { 1174 spa_t *spa = zio->io_spa; 1175 vdev_t *vd = zio->io_vd; 1176 vdev_probe_stats_t *vps = zio->io_private; 1177 1178 ASSERT(vd->vdev_probe_zio != NULL); 1179 1180 if (zio->io_type == ZIO_TYPE_READ) { 1181 if (zio->io_error == 0) 1182 vps->vps_readable = 1; 1183 if (zio->io_error == 0 && spa_writeable(spa)) { 1184 zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, 1185 zio->io_offset, zio->io_size, zio->io_abd, 1186 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1187 ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); 1188 } else { 1189 abd_free(zio->io_abd); 1190 } 1191 } else if (zio->io_type == ZIO_TYPE_WRITE) { 1192 if (zio->io_error == 0) 1193 vps->vps_writeable = 1; 1194 abd_free(zio->io_abd); 1195 } else if (zio->io_type == ZIO_TYPE_NULL) { 1196 zio_t *pio; 1197 1198 vd->vdev_cant_read |= !vps->vps_readable; 1199 vd->vdev_cant_write |= !vps->vps_writeable; 1200 1201 if (vdev_readable(vd) && 1202 (vdev_writeable(vd) || !spa_writeable(spa))) { 1203 zio->io_error = 0; 1204 } else { 1205 ASSERT(zio->io_error != 0); 1206 vdev_dbgmsg(vd, "failed probe"); 1207 zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, 1208 spa, vd, NULL, 0, 0); 1209 zio->io_error = SET_ERROR(ENXIO); 1210 } 1211 1212 mutex_enter(&vd->vdev_probe_lock); 1213 ASSERT(vd->vdev_probe_zio == zio); 1214 vd->vdev_probe_zio = NULL; 1215 mutex_exit(&vd->vdev_probe_lock); 1216 1217 zio_link_t *zl = NULL; 1218 while ((pio = zio_walk_parents(zio, &zl)) != NULL) 1219 if (!vdev_accessible(vd, pio)) 1220 pio->io_error = SET_ERROR(ENXIO); 1221 1222 kmem_free(vps, sizeof (*vps)); 1223 } 1224 } 1225 1226 /* 1227 * Determine whether this device is accessible. 1228 * 1229 * Read and write to several known locations: the pad regions of each 1230 * vdev label but the first, which we leave alone in case it contains 1231 * a VTOC. 1232 */ 1233 zio_t * 1234 vdev_probe(vdev_t *vd, zio_t *zio) 1235 { 1236 spa_t *spa = vd->vdev_spa; 1237 vdev_probe_stats_t *vps = NULL; 1238 zio_t *pio; 1239 1240 ASSERT(vd->vdev_ops->vdev_op_leaf); 1241 1242 /* 1243 * Don't probe the probe. 1244 */ 1245 if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) 1246 return (NULL); 1247 1248 /* 1249 * To prevent 'probe storms' when a device fails, we create 1250 * just one probe i/o at a time. All zios that want to probe 1251 * this vdev will become parents of the probe io. 1252 */ 1253 mutex_enter(&vd->vdev_probe_lock); 1254 1255 if ((pio = vd->vdev_probe_zio) == NULL) { 1256 vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); 1257 1258 vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | 1259 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | 1260 ZIO_FLAG_TRYHARD; 1261 1262 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { 1263 /* 1264 * vdev_cant_read and vdev_cant_write can only 1265 * transition from TRUE to FALSE when we have the 1266 * SCL_ZIO lock as writer; otherwise they can only 1267 * transition from FALSE to TRUE. This ensures that 1268 * any zio looking at these values can assume that 1269 * failures persist for the life of the I/O. That's 1270 * important because when a device has intermittent 1271 * connectivity problems, we want to ensure that 1272 * they're ascribed to the device (ENXIO) and not 1273 * the zio (EIO). 1274 * 1275 * Since we hold SCL_ZIO as writer here, clear both 1276 * values so the probe can reevaluate from first 1277 * principles. 1278 */ 1279 vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; 1280 vd->vdev_cant_read = B_FALSE; 1281 vd->vdev_cant_write = B_FALSE; 1282 } 1283 1284 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, 1285 vdev_probe_done, vps, 1286 vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); 1287 1288 /* 1289 * We can't change the vdev state in this context, so we 1290 * kick off an async task to do it on our behalf. 1291 */ 1292 if (zio != NULL) { 1293 vd->vdev_probe_wanted = B_TRUE; 1294 spa_async_request(spa, SPA_ASYNC_PROBE); 1295 } 1296 } 1297 1298 if (zio != NULL) 1299 zio_add_child(zio, pio); 1300 1301 mutex_exit(&vd->vdev_probe_lock); 1302 1303 if (vps == NULL) { 1304 ASSERT(zio != NULL); 1305 return (NULL); 1306 } 1307 1308 for (int l = 1; l < VDEV_LABELS; l++) { 1309 zio_nowait(zio_read_phys(pio, vd, 1310 vdev_label_offset(vd->vdev_psize, l, 1311 offsetof(vdev_label_t, vl_pad2)), VDEV_PAD_SIZE, 1312 abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE), 1313 ZIO_CHECKSUM_OFF, vdev_probe_done, vps, 1314 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); 1315 } 1316 1317 if (zio == NULL) 1318 return (pio); 1319 1320 zio_nowait(pio); 1321 return (NULL); 1322 } 1323 1324 static void 1325 vdev_open_child(void *arg) 1326 { 1327 vdev_t *vd = arg; 1328 1329 vd->vdev_open_thread = curthread; 1330 vd->vdev_open_error = vdev_open(vd); 1331 vd->vdev_open_thread = NULL; 1332 } 1333 1334 boolean_t 1335 vdev_uses_zvols(vdev_t *vd) 1336 { 1337 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, 1338 strlen(ZVOL_DIR)) == 0) 1339 return (B_TRUE); 1340 for (int c = 0; c < vd->vdev_children; c++) 1341 if (vdev_uses_zvols(vd->vdev_child[c])) 1342 return (B_TRUE); 1343 return (B_FALSE); 1344 } 1345 1346 void 1347 vdev_open_children(vdev_t *vd) 1348 { 1349 taskq_t *tq; 1350 int children = vd->vdev_children; 1351 1352 /* 1353 * in order to handle pools on top of zvols, do the opens 1354 * in a single thread so that the same thread holds the 1355 * spa_namespace_lock 1356 */ 1357 if (vdev_uses_zvols(vd)) { 1358 for (int c = 0; c < children; c++) 1359 vd->vdev_child[c]->vdev_open_error = 1360 vdev_open(vd->vdev_child[c]); 1361 return; 1362 } 1363 tq = taskq_create("vdev_open", children, minclsyspri, 1364 children, children, TASKQ_PREPOPULATE); 1365 1366 for (int c = 0; c < children; c++) 1367 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], 1368 TQ_SLEEP) != TASKQID_INVALID); 1369 1370 taskq_destroy(tq); 1371 } 1372 1373 /* 1374 * Compute the raidz-deflation ratio. Note, we hard-code 1375 * in 128k (1 << 17) because it is the "typical" blocksize. 1376 * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change, 1377 * otherwise it would inconsistently account for existing bp's. 1378 */ 1379 static void 1380 vdev_set_deflate_ratio(vdev_t *vd) 1381 { 1382 if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) { 1383 vd->vdev_deflate_ratio = (1 << 17) / 1384 (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); 1385 } 1386 } 1387 1388 /* 1389 * Prepare a virtual device for access. 1390 */ 1391 int 1392 vdev_open(vdev_t *vd) 1393 { 1394 spa_t *spa = vd->vdev_spa; 1395 int error; 1396 uint64_t osize = 0; 1397 uint64_t max_osize = 0; 1398 uint64_t asize, max_asize, psize; 1399 uint64_t ashift = 0; 1400 1401 ASSERT(vd->vdev_open_thread == curthread || 1402 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1403 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 1404 vd->vdev_state == VDEV_STATE_CANT_OPEN || 1405 vd->vdev_state == VDEV_STATE_OFFLINE); 1406 1407 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1408 vd->vdev_cant_read = B_FALSE; 1409 vd->vdev_cant_write = B_FALSE; 1410 vd->vdev_min_asize = vdev_get_min_asize(vd); 1411 1412 /* 1413 * If this vdev is not removed, check its fault status. If it's 1414 * faulted, bail out of the open. 1415 */ 1416 if (!vd->vdev_removed && vd->vdev_faulted) { 1417 ASSERT(vd->vdev_children == 0); 1418 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1419 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1420 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1421 vd->vdev_label_aux); 1422 return (SET_ERROR(ENXIO)); 1423 } else if (vd->vdev_offline) { 1424 ASSERT(vd->vdev_children == 0); 1425 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 1426 return (SET_ERROR(ENXIO)); 1427 } 1428 1429 error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift); 1430 1431 /* 1432 * Reset the vdev_reopening flag so that we actually close 1433 * the vdev on error. 1434 */ 1435 vd->vdev_reopening = B_FALSE; 1436 if (zio_injection_enabled && error == 0) 1437 error = zio_handle_device_injection(vd, NULL, ENXIO); 1438 1439 if (error) { 1440 if (vd->vdev_removed && 1441 vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) 1442 vd->vdev_removed = B_FALSE; 1443 1444 if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) { 1445 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, 1446 vd->vdev_stat.vs_aux); 1447 } else { 1448 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1449 vd->vdev_stat.vs_aux); 1450 } 1451 return (error); 1452 } 1453 1454 vd->vdev_removed = B_FALSE; 1455 1456 /* 1457 * Recheck the faulted flag now that we have confirmed that 1458 * the vdev is accessible. If we're faulted, bail. 1459 */ 1460 if (vd->vdev_faulted) { 1461 ASSERT(vd->vdev_children == 0); 1462 ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || 1463 vd->vdev_label_aux == VDEV_AUX_EXTERNAL); 1464 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1465 vd->vdev_label_aux); 1466 return (SET_ERROR(ENXIO)); 1467 } 1468 1469 if (vd->vdev_degraded) { 1470 ASSERT(vd->vdev_children == 0); 1471 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1472 VDEV_AUX_ERR_EXCEEDED); 1473 } else { 1474 vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0); 1475 } 1476 1477 /* 1478 * For hole or missing vdevs we just return success. 1479 */ 1480 if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) 1481 return (0); 1482 1483 for (int c = 0; c < vd->vdev_children; c++) { 1484 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 1485 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 1486 VDEV_AUX_NONE); 1487 break; 1488 } 1489 } 1490 1491 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 1492 max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t)); 1493 1494 if (vd->vdev_children == 0) { 1495 if (osize < SPA_MINDEVSIZE) { 1496 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1497 VDEV_AUX_TOO_SMALL); 1498 return (SET_ERROR(EOVERFLOW)); 1499 } 1500 psize = osize; 1501 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 1502 max_asize = max_osize - (VDEV_LABEL_START_SIZE + 1503 VDEV_LABEL_END_SIZE); 1504 } else { 1505 if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - 1506 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 1507 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1508 VDEV_AUX_TOO_SMALL); 1509 return (SET_ERROR(EOVERFLOW)); 1510 } 1511 psize = 0; 1512 asize = osize; 1513 max_asize = max_osize; 1514 } 1515 1516 vd->vdev_psize = psize; 1517 1518 /* 1519 * Make sure the allocatable size hasn't shrunk too much. 1520 */ 1521 if (asize < vd->vdev_min_asize) { 1522 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1523 VDEV_AUX_BAD_LABEL); 1524 return (SET_ERROR(EINVAL)); 1525 } 1526 1527 if (vd->vdev_asize == 0) { 1528 /* 1529 * This is the first-ever open, so use the computed values. 1530 * For testing purposes, a higher ashift can be requested. 1531 */ 1532 vd->vdev_asize = asize; 1533 vd->vdev_max_asize = max_asize; 1534 vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); 1535 vd->vdev_ashift = MAX(zfs_ashift_min, vd->vdev_ashift); 1536 } else { 1537 /* 1538 * Detect if the alignment requirement has increased. 1539 * We don't want to make the pool unavailable, just 1540 * issue a warning instead. 1541 */ 1542 if (ashift > vd->vdev_top->vdev_ashift && 1543 vd->vdev_ops->vdev_op_leaf) { 1544 cmn_err(CE_WARN, 1545 "Disk, '%s', has a block alignment that is " 1546 "larger than the pool's alignment\n", 1547 vd->vdev_path); 1548 } 1549 vd->vdev_max_asize = max_asize; 1550 } 1551 1552 /* 1553 * If all children are healthy we update asize if either: 1554 * The asize has increased, due to a device expansion caused by dynamic 1555 * LUN growth or vdev replacement, and automatic expansion is enabled; 1556 * making the additional space available. 1557 * 1558 * The asize has decreased, due to a device shrink usually caused by a 1559 * vdev replace with a smaller device. This ensures that calculations 1560 * based of max_asize and asize e.g. esize are always valid. It's safe 1561 * to do this as we've already validated that asize is greater than 1562 * vdev_min_asize. 1563 */ 1564 if (vd->vdev_state == VDEV_STATE_HEALTHY && 1565 ((asize > vd->vdev_asize && 1566 (vd->vdev_expanding || spa->spa_autoexpand)) || 1567 (asize < vd->vdev_asize))) 1568 vd->vdev_asize = asize; 1569 1570 vdev_set_min_asize(vd); 1571 1572 /* 1573 * Ensure we can issue some IO before declaring the 1574 * vdev open for business. 1575 */ 1576 if (vd->vdev_ops->vdev_op_leaf && 1577 (error = zio_wait(vdev_probe(vd, NULL))) != 0) { 1578 vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, 1579 VDEV_AUX_ERR_EXCEEDED); 1580 return (error); 1581 } 1582 1583 /* 1584 * Track the min and max ashift values for normal data devices. 1585 */ 1586 if (vd->vdev_top == vd && vd->vdev_ashift != 0 && 1587 !vd->vdev_islog && vd->vdev_aux == NULL) { 1588 if (vd->vdev_ashift > spa->spa_max_ashift) 1589 spa->spa_max_ashift = vd->vdev_ashift; 1590 if (vd->vdev_ashift < spa->spa_min_ashift) 1591 spa->spa_min_ashift = vd->vdev_ashift; 1592 } 1593 1594 /* 1595 * If a leaf vdev has a DTL, and seems healthy, then kick off a 1596 * resilver. But don't do this if we are doing a reopen for a scrub, 1597 * since this would just restart the scrub we are already doing. 1598 */ 1599 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen && 1600 vdev_resilver_needed(vd, NULL, NULL)) 1601 spa_async_request(spa, SPA_ASYNC_RESILVER); 1602 1603 return (0); 1604 } 1605 1606 /* 1607 * Called once the vdevs are all opened, this routine validates the label 1608 * contents. This needs to be done before vdev_load() so that we don't 1609 * inadvertently do repair I/Os to the wrong device. 1610 * 1611 * This function will only return failure if one of the vdevs indicates that it 1612 * has since been destroyed or exported. This is only possible if 1613 * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state 1614 * will be updated but the function will return 0. 1615 */ 1616 int 1617 vdev_validate(vdev_t *vd) 1618 { 1619 spa_t *spa = vd->vdev_spa; 1620 nvlist_t *label; 1621 uint64_t guid = 0, aux_guid = 0, top_guid; 1622 uint64_t state; 1623 nvlist_t *nvl; 1624 uint64_t txg; 1625 1626 if (vdev_validate_skip) 1627 return (0); 1628 1629 for (uint64_t c = 0; c < vd->vdev_children; c++) 1630 if (vdev_validate(vd->vdev_child[c]) != 0) 1631 return (SET_ERROR(EBADF)); 1632 1633 /* 1634 * If the device has already failed, or was marked offline, don't do 1635 * any further validation. Otherwise, label I/O will fail and we will 1636 * overwrite the previous state. 1637 */ 1638 if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd)) 1639 return (0); 1640 1641 /* 1642 * If we are performing an extreme rewind, we allow for a label that 1643 * was modified at a point after the current txg. 1644 * If config lock is not held do not check for the txg. spa_sync could 1645 * be updating the vdev's label before updating spa_last_synced_txg. 1646 */ 1647 if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 || 1648 spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG) 1649 txg = UINT64_MAX; 1650 else 1651 txg = spa_last_synced_txg(spa); 1652 1653 if ((label = vdev_label_read_config(vd, txg)) == NULL) { 1654 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 1655 VDEV_AUX_BAD_LABEL); 1656 vdev_dbgmsg(vd, "vdev_validate: failed reading config for " 1657 "txg %llu", (u_longlong_t)txg); 1658 return (0); 1659 } 1660 1661 /* 1662 * Determine if this vdev has been split off into another 1663 * pool. If so, then refuse to open it. 1664 */ 1665 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID, 1666 &aux_guid) == 0 && aux_guid == spa_guid(spa)) { 1667 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1668 VDEV_AUX_SPLIT_POOL); 1669 nvlist_free(label); 1670 vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool"); 1671 return (0); 1672 } 1673 1674 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) { 1675 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1676 VDEV_AUX_CORRUPT_DATA); 1677 nvlist_free(label); 1678 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1679 ZPOOL_CONFIG_POOL_GUID); 1680 return (0); 1681 } 1682 1683 /* 1684 * If config is not trusted then ignore the spa guid check. This is 1685 * necessary because if the machine crashed during a re-guid the new 1686 * guid might have been written to all of the vdev labels, but not the 1687 * cached config. The check will be performed again once we have the 1688 * trusted config from the MOS. 1689 */ 1690 if (spa->spa_trust_config && guid != spa_guid(spa)) { 1691 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1692 VDEV_AUX_CORRUPT_DATA); 1693 nvlist_free(label); 1694 vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't " 1695 "match config (%llu != %llu)", (u_longlong_t)guid, 1696 (u_longlong_t)spa_guid(spa)); 1697 return (0); 1698 } 1699 1700 if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl) 1701 != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID, 1702 &aux_guid) != 0) 1703 aux_guid = 0; 1704 1705 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) { 1706 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1707 VDEV_AUX_CORRUPT_DATA); 1708 nvlist_free(label); 1709 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1710 ZPOOL_CONFIG_GUID); 1711 return (0); 1712 } 1713 1714 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid) 1715 != 0) { 1716 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1717 VDEV_AUX_CORRUPT_DATA); 1718 nvlist_free(label); 1719 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1720 ZPOOL_CONFIG_TOP_GUID); 1721 return (0); 1722 } 1723 1724 /* 1725 * If this vdev just became a top-level vdev because its sibling was 1726 * detached, it will have adopted the parent's vdev guid -- but the 1727 * label may or may not be on disk yet. Fortunately, either version 1728 * of the label will have the same top guid, so if we're a top-level 1729 * vdev, we can safely compare to that instead. 1730 * However, if the config comes from a cachefile that failed to update 1731 * after the detach, a top-level vdev will appear as a non top-level 1732 * vdev in the config. Also relax the constraints if we perform an 1733 * extreme rewind. 1734 * 1735 * If we split this vdev off instead, then we also check the 1736 * original pool's guid. We don't want to consider the vdev 1737 * corrupt if it is partway through a split operation. 1738 */ 1739 if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) { 1740 boolean_t mismatch = B_FALSE; 1741 if (spa->spa_trust_config && !spa->spa_extreme_rewind) { 1742 if (vd != vd->vdev_top || vd->vdev_guid != top_guid) 1743 mismatch = B_TRUE; 1744 } else { 1745 if (vd->vdev_guid != top_guid && 1746 vd->vdev_top->vdev_guid != guid) 1747 mismatch = B_TRUE; 1748 } 1749 1750 if (mismatch) { 1751 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1752 VDEV_AUX_CORRUPT_DATA); 1753 nvlist_free(label); 1754 vdev_dbgmsg(vd, "vdev_validate: config guid " 1755 "doesn't match label guid"); 1756 vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu", 1757 (u_longlong_t)vd->vdev_guid, 1758 (u_longlong_t)vd->vdev_top->vdev_guid); 1759 vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, " 1760 "aux_guid %llu", (u_longlong_t)guid, 1761 (u_longlong_t)top_guid, (u_longlong_t)aux_guid); 1762 return (0); 1763 } 1764 } 1765 1766 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 1767 &state) != 0) { 1768 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1769 VDEV_AUX_CORRUPT_DATA); 1770 nvlist_free(label); 1771 vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", 1772 ZPOOL_CONFIG_POOL_STATE); 1773 return (0); 1774 } 1775 1776 nvlist_free(label); 1777 1778 /* 1779 * If this is a verbatim import, no need to check the 1780 * state of the pool. 1781 */ 1782 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) && 1783 spa_load_state(spa) == SPA_LOAD_OPEN && 1784 state != POOL_STATE_ACTIVE) { 1785 vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) " 1786 "for spa %s", (u_longlong_t)state, spa->spa_name); 1787 return (SET_ERROR(EBADF)); 1788 } 1789 1790 /* 1791 * If we were able to open and validate a vdev that was 1792 * previously marked permanently unavailable, clear that state 1793 * now. 1794 */ 1795 if (vd->vdev_not_present) 1796 vd->vdev_not_present = 0; 1797 1798 return (0); 1799 } 1800 1801 static void 1802 vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd) 1803 { 1804 if (svd->vdev_path != NULL && dvd->vdev_path != NULL) { 1805 if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) { 1806 zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed " 1807 "from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid, 1808 dvd->vdev_path, svd->vdev_path); 1809 spa_strfree(dvd->vdev_path); 1810 dvd->vdev_path = spa_strdup(svd->vdev_path); 1811 } 1812 } else if (svd->vdev_path != NULL) { 1813 dvd->vdev_path = spa_strdup(svd->vdev_path); 1814 zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'", 1815 (u_longlong_t)dvd->vdev_guid, dvd->vdev_path); 1816 } 1817 } 1818 1819 /* 1820 * Recursively copy vdev paths from one vdev to another. Source and destination 1821 * vdev trees must have same geometry otherwise return error. Intended to copy 1822 * paths from userland config into MOS config. 1823 */ 1824 int 1825 vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd) 1826 { 1827 if ((svd->vdev_ops == &vdev_missing_ops) || 1828 (svd->vdev_ishole && dvd->vdev_ishole) || 1829 (dvd->vdev_ops == &vdev_indirect_ops)) 1830 return (0); 1831 1832 if (svd->vdev_ops != dvd->vdev_ops) { 1833 vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s", 1834 svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type); 1835 return (SET_ERROR(EINVAL)); 1836 } 1837 1838 if (svd->vdev_guid != dvd->vdev_guid) { 1839 vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != " 1840 "%llu)", (u_longlong_t)svd->vdev_guid, 1841 (u_longlong_t)dvd->vdev_guid); 1842 return (SET_ERROR(EINVAL)); 1843 } 1844 1845 if (svd->vdev_children != dvd->vdev_children) { 1846 vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: " 1847 "%llu != %llu", (u_longlong_t)svd->vdev_children, 1848 (u_longlong_t)dvd->vdev_children); 1849 return (SET_ERROR(EINVAL)); 1850 } 1851 1852 for (uint64_t i = 0; i < svd->vdev_children; i++) { 1853 int error = vdev_copy_path_strict(svd->vdev_child[i], 1854 dvd->vdev_child[i]); 1855 if (error != 0) 1856 return (error); 1857 } 1858 1859 if (svd->vdev_ops->vdev_op_leaf) 1860 vdev_copy_path_impl(svd, dvd); 1861 1862 return (0); 1863 } 1864 1865 static void 1866 vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd) 1867 { 1868 ASSERT(stvd->vdev_top == stvd); 1869 ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id); 1870 1871 for (uint64_t i = 0; i < dvd->vdev_children; i++) { 1872 vdev_copy_path_search(stvd, dvd->vdev_child[i]); 1873 } 1874 1875 if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd)) 1876 return; 1877 1878 /* 1879 * The idea here is that while a vdev can shift positions within 1880 * a top vdev (when replacing, attaching mirror, etc.) it cannot 1881 * step outside of it. 1882 */ 1883 vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid); 1884 1885 if (vd == NULL || vd->vdev_ops != dvd->vdev_ops) 1886 return; 1887 1888 ASSERT(vd->vdev_ops->vdev_op_leaf); 1889 1890 vdev_copy_path_impl(vd, dvd); 1891 } 1892 1893 /* 1894 * Recursively copy vdev paths from one root vdev to another. Source and 1895 * destination vdev trees may differ in geometry. For each destination leaf 1896 * vdev, search a vdev with the same guid and top vdev id in the source. 1897 * Intended to copy paths from userland config into MOS config. 1898 */ 1899 void 1900 vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd) 1901 { 1902 uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children); 1903 ASSERT(srvd->vdev_ops == &vdev_root_ops); 1904 ASSERT(drvd->vdev_ops == &vdev_root_ops); 1905 1906 for (uint64_t i = 0; i < children; i++) { 1907 vdev_copy_path_search(srvd->vdev_child[i], 1908 drvd->vdev_child[i]); 1909 } 1910 } 1911 1912 /* 1913 * Close a virtual device. 1914 */ 1915 void 1916 vdev_close(vdev_t *vd) 1917 { 1918 spa_t *spa = vd->vdev_spa; 1919 vdev_t *pvd = vd->vdev_parent; 1920 1921 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1922 1923 /* 1924 * If our parent is reopening, then we are as well, unless we are 1925 * going offline. 1926 */ 1927 if (pvd != NULL && pvd->vdev_reopening) 1928 vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline); 1929 1930 vd->vdev_ops->vdev_op_close(vd); 1931 1932 vdev_cache_purge(vd); 1933 1934 /* 1935 * We record the previous state before we close it, so that if we are 1936 * doing a reopen(), we don't generate FMA ereports if we notice that 1937 * it's still faulted. 1938 */ 1939 vd->vdev_prevstate = vd->vdev_state; 1940 1941 if (vd->vdev_offline) 1942 vd->vdev_state = VDEV_STATE_OFFLINE; 1943 else 1944 vd->vdev_state = VDEV_STATE_CLOSED; 1945 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 1946 } 1947 1948 void 1949 vdev_hold(vdev_t *vd) 1950 { 1951 spa_t *spa = vd->vdev_spa; 1952 1953 ASSERT(spa_is_root(spa)); 1954 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1955 return; 1956 1957 for (int c = 0; c < vd->vdev_children; c++) 1958 vdev_hold(vd->vdev_child[c]); 1959 1960 if (vd->vdev_ops->vdev_op_leaf) 1961 vd->vdev_ops->vdev_op_hold(vd); 1962 } 1963 1964 void 1965 vdev_rele(vdev_t *vd) 1966 { 1967 spa_t *spa = vd->vdev_spa; 1968 1969 ASSERT(spa_is_root(spa)); 1970 for (int c = 0; c < vd->vdev_children; c++) 1971 vdev_rele(vd->vdev_child[c]); 1972 1973 if (vd->vdev_ops->vdev_op_leaf) 1974 vd->vdev_ops->vdev_op_rele(vd); 1975 } 1976 1977 /* 1978 * Reopen all interior vdevs and any unopened leaves. We don't actually 1979 * reopen leaf vdevs which had previously been opened as they might deadlock 1980 * on the spa_config_lock. Instead we only obtain the leaf's physical size. 1981 * If the leaf has never been opened then open it, as usual. 1982 */ 1983 void 1984 vdev_reopen(vdev_t *vd) 1985 { 1986 spa_t *spa = vd->vdev_spa; 1987 1988 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 1989 1990 /* set the reopening flag unless we're taking the vdev offline */ 1991 vd->vdev_reopening = !vd->vdev_offline; 1992 vdev_close(vd); 1993 (void) vdev_open(vd); 1994 1995 /* 1996 * Call vdev_validate() here to make sure we have the same device. 1997 * Otherwise, a device with an invalid label could be successfully 1998 * opened in response to vdev_reopen(). 1999 */ 2000 if (vd->vdev_aux) { 2001 (void) vdev_validate_aux(vd); 2002 if (vdev_readable(vd) && vdev_writeable(vd) && 2003 vd->vdev_aux == &spa->spa_l2cache && 2004 !l2arc_vdev_present(vd)) 2005 l2arc_add_vdev(spa, vd); 2006 } else { 2007 (void) vdev_validate(vd); 2008 } 2009 2010 /* 2011 * Reassess parent vdev's health. 2012 */ 2013 vdev_propagate_state(vd); 2014 } 2015 2016 int 2017 vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) 2018 { 2019 int error; 2020 2021 /* 2022 * Normally, partial opens (e.g. of a mirror) are allowed. 2023 * For a create, however, we want to fail the request if 2024 * there are any components we can't open. 2025 */ 2026 error = vdev_open(vd); 2027 2028 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 2029 vdev_close(vd); 2030 return (error ? error : ENXIO); 2031 } 2032 2033 /* 2034 * Recursively load DTLs and initialize all labels. 2035 */ 2036 if ((error = vdev_dtl_load(vd)) != 0 || 2037 (error = vdev_label_init(vd, txg, isreplacing ? 2038 VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { 2039 vdev_close(vd); 2040 return (error); 2041 } 2042 2043 return (0); 2044 } 2045 2046 void 2047 vdev_metaslab_set_size(vdev_t *vd) 2048 { 2049 uint64_t asize = vd->vdev_asize; 2050 uint64_t ms_count = asize >> zfs_vdev_default_ms_shift; 2051 uint64_t ms_shift; 2052 2053 /* 2054 * There are two dimensions to the metaslab sizing calculation: 2055 * the size of the metaslab and the count of metaslabs per vdev. 2056 * 2057 * The default values used below are a good balance between memory 2058 * usage (larger metaslab size means more memory needed for loaded 2059 * metaslabs; more metaslabs means more memory needed for the 2060 * metaslab_t structs), metaslab load time (larger metaslabs take 2061 * longer to load), and metaslab sync time (more metaslabs means 2062 * more time spent syncing all of them). 2063 * 2064 * In general, we aim for zfs_vdev_default_ms_count (200) metaslabs. 2065 * The range of the dimensions are as follows: 2066 * 2067 * 2^29 <= ms_size <= 2^34 2068 * 16 <= ms_count <= 131,072 2069 * 2070 * On the lower end of vdev sizes, we aim for metaslabs sizes of 2071 * at least 512MB (2^29) to minimize fragmentation effects when 2072 * testing with smaller devices. However, the count constraint 2073 * of at least 16 metaslabs will override this minimum size goal. 2074 * 2075 * On the upper end of vdev sizes, we aim for a maximum metaslab 2076 * size of 16GB. However, we will cap the total count to 2^17 2077 * metaslabs to keep our memory footprint in check and let the 2078 * metaslab size grow from there if that limit is hit. 2079 * 2080 * The net effect of applying above constrains is summarized below. 2081 * 2082 * vdev size metaslab count 2083 * --------------|----------------- 2084 * < 8GB ~16 2085 * 8GB - 100GB one per 512MB 2086 * 100GB - 3TB ~200 2087 * 3TB - 2PB one per 16GB 2088 * > 2PB ~131,072 2089 * -------------------------------- 2090 * 2091 * Finally, note that all of the above calculate the initial 2092 * number of metaslabs. Expanding a top-level vdev will result 2093 * in additional metaslabs being allocated making it possible 2094 * to exceed the zfs_vdev_ms_count_limit. 2095 */ 2096 2097 if (ms_count < zfs_vdev_min_ms_count) 2098 ms_shift = highbit64(asize / zfs_vdev_min_ms_count); 2099 else if (ms_count > zfs_vdev_default_ms_count) 2100 ms_shift = highbit64(asize / zfs_vdev_default_ms_count); 2101 else 2102 ms_shift = zfs_vdev_default_ms_shift; 2103 2104 if (ms_shift < SPA_MAXBLOCKSHIFT) { 2105 ms_shift = SPA_MAXBLOCKSHIFT; 2106 } else if (ms_shift > zfs_vdev_max_ms_shift) { 2107 ms_shift = zfs_vdev_max_ms_shift; 2108 /* cap the total count to constrain memory footprint */ 2109 if ((asize >> ms_shift) > zfs_vdev_ms_count_limit) 2110 ms_shift = highbit64(asize / zfs_vdev_ms_count_limit); 2111 } 2112 2113 vd->vdev_ms_shift = ms_shift; 2114 ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT); 2115 } 2116 2117 void 2118 vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) 2119 { 2120 ASSERT(vd == vd->vdev_top); 2121 /* indirect vdevs don't have metaslabs or dtls */ 2122 ASSERT(vdev_is_concrete(vd) || flags == 0); 2123 ASSERT(ISP2(flags)); 2124 ASSERT(spa_writeable(vd->vdev_spa)); 2125 2126 if (flags & VDD_METASLAB) 2127 (void) txg_list_add(&vd->vdev_ms_list, arg, txg); 2128 2129 if (flags & VDD_DTL) 2130 (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); 2131 2132 (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); 2133 } 2134 2135 void 2136 vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg) 2137 { 2138 for (int c = 0; c < vd->vdev_children; c++) 2139 vdev_dirty_leaves(vd->vdev_child[c], flags, txg); 2140 2141 if (vd->vdev_ops->vdev_op_leaf) 2142 vdev_dirty(vd->vdev_top, flags, vd, txg); 2143 } 2144 2145 /* 2146 * DTLs. 2147 * 2148 * A vdev's DTL (dirty time log) is the set of transaction groups for which 2149 * the vdev has less than perfect replication. There are four kinds of DTL: 2150 * 2151 * DTL_MISSING: txgs for which the vdev has no valid copies of the data 2152 * 2153 * DTL_PARTIAL: txgs for which data is available, but not fully replicated 2154 * 2155 * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon 2156 * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of 2157 * txgs that was scrubbed. 2158 * 2159 * DTL_OUTAGE: txgs which cannot currently be read, whether due to 2160 * persistent errors or just some device being offline. 2161 * Unlike the other three, the DTL_OUTAGE map is not generally 2162 * maintained; it's only computed when needed, typically to 2163 * determine whether a device can be detached. 2164 * 2165 * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device 2166 * either has the data or it doesn't. 2167 * 2168 * For interior vdevs such as mirror and RAID-Z the picture is more complex. 2169 * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because 2170 * if any child is less than fully replicated, then so is its parent. 2171 * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, 2172 * comprising only those txgs which appear in 'maxfaults' or more children; 2173 * those are the txgs we don't have enough replication to read. For example, 2174 * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); 2175 * thus, its DTL_MISSING consists of the set of txgs that appear in more than 2176 * two child DTL_MISSING maps. 2177 * 2178 * It should be clear from the above that to compute the DTLs and outage maps 2179 * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. 2180 * Therefore, that is all we keep on disk. When loading the pool, or after 2181 * a configuration change, we generate all other DTLs from first principles. 2182 */ 2183 void 2184 vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 2185 { 2186 range_tree_t *rt = vd->vdev_dtl[t]; 2187 2188 ASSERT(t < DTL_TYPES); 2189 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 2190 ASSERT(spa_writeable(vd->vdev_spa)); 2191 2192 mutex_enter(&vd->vdev_dtl_lock); 2193 if (!range_tree_contains(rt, txg, size)) 2194 range_tree_add(rt, txg, size); 2195 mutex_exit(&vd->vdev_dtl_lock); 2196 } 2197 2198 boolean_t 2199 vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) 2200 { 2201 range_tree_t *rt = vd->vdev_dtl[t]; 2202 boolean_t dirty = B_FALSE; 2203 2204 ASSERT(t < DTL_TYPES); 2205 ASSERT(vd != vd->vdev_spa->spa_root_vdev); 2206 2207 /* 2208 * While we are loading the pool, the DTLs have not been loaded yet. 2209 * Ignore the DTLs and try all devices. This avoids a recursive 2210 * mutex enter on the vdev_dtl_lock, and also makes us try hard 2211 * when loading the pool (relying on the checksum to ensure that 2212 * we get the right data -- note that we while loading, we are 2213 * only reading the MOS, which is always checksummed). 2214 */ 2215 if (vd->vdev_spa->spa_load_state != SPA_LOAD_NONE) 2216 return (B_FALSE); 2217 2218 mutex_enter(&vd->vdev_dtl_lock); 2219 if (!range_tree_is_empty(rt)) 2220 dirty = range_tree_contains(rt, txg, size); 2221 mutex_exit(&vd->vdev_dtl_lock); 2222 2223 return (dirty); 2224 } 2225 2226 boolean_t 2227 vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) 2228 { 2229 range_tree_t *rt = vd->vdev_dtl[t]; 2230 boolean_t empty; 2231 2232 mutex_enter(&vd->vdev_dtl_lock); 2233 empty = range_tree_is_empty(rt); 2234 mutex_exit(&vd->vdev_dtl_lock); 2235 2236 return (empty); 2237 } 2238 2239 /* 2240 * Returns the lowest txg in the DTL range. 2241 */ 2242 static uint64_t 2243 vdev_dtl_min(vdev_t *vd) 2244 { 2245 range_seg_t *rs; 2246 2247 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); 2248 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); 2249 ASSERT0(vd->vdev_children); 2250 2251 rs = avl_first(&vd->vdev_dtl[DTL_MISSING]->rt_root); 2252 return (rs->rs_start - 1); 2253 } 2254 2255 /* 2256 * Returns the highest txg in the DTL. 2257 */ 2258 static uint64_t 2259 vdev_dtl_max(vdev_t *vd) 2260 { 2261 range_seg_t *rs; 2262 2263 ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); 2264 ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); 2265 ASSERT0(vd->vdev_children); 2266 2267 rs = avl_last(&vd->vdev_dtl[DTL_MISSING]->rt_root); 2268 return (rs->rs_end); 2269 } 2270 2271 /* 2272 * Determine if a resilvering vdev should remove any DTL entries from 2273 * its range. If the vdev was resilvering for the entire duration of the 2274 * scan then it should excise that range from its DTLs. Otherwise, this 2275 * vdev is considered partially resilvered and should leave its DTL 2276 * entries intact. The comment in vdev_dtl_reassess() describes how we 2277 * excise the DTLs. 2278 */ 2279 static boolean_t 2280 vdev_dtl_should_excise(vdev_t *vd) 2281 { 2282 spa_t *spa = vd->vdev_spa; 2283 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 2284 2285 ASSERT0(scn->scn_phys.scn_errors); 2286 ASSERT0(vd->vdev_children); 2287 2288 if (vd->vdev_state < VDEV_STATE_DEGRADED) 2289 return (B_FALSE); 2290 2291 if (vd->vdev_resilver_txg == 0 || 2292 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) 2293 return (B_TRUE); 2294 2295 /* 2296 * When a resilver is initiated the scan will assign the scn_max_txg 2297 * value to the highest txg value that exists in all DTLs. If this 2298 * device's max DTL is not part of this scan (i.e. it is not in 2299 * the range (scn_min_txg, scn_max_txg] then it is not eligible 2300 * for excision. 2301 */ 2302 if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) { 2303 ASSERT3U(scn->scn_phys.scn_min_txg, <=, vdev_dtl_min(vd)); 2304 ASSERT3U(scn->scn_phys.scn_min_txg, <, vd->vdev_resilver_txg); 2305 ASSERT3U(vd->vdev_resilver_txg, <=, scn->scn_phys.scn_max_txg); 2306 return (B_TRUE); 2307 } 2308 return (B_FALSE); 2309 } 2310 2311 /* 2312 * Reassess DTLs after a config change or scrub completion. 2313 */ 2314 void 2315 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 2316 { 2317 spa_t *spa = vd->vdev_spa; 2318 avl_tree_t reftree; 2319 int minref; 2320 2321 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 2322 2323 for (int c = 0; c < vd->vdev_children; c++) 2324 vdev_dtl_reassess(vd->vdev_child[c], txg, 2325 scrub_txg, scrub_done); 2326 2327 if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux) 2328 return; 2329 2330 if (vd->vdev_ops->vdev_op_leaf) { 2331 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 2332 2333 mutex_enter(&vd->vdev_dtl_lock); 2334 2335 /* 2336 * If we've completed a scan cleanly then determine 2337 * if this vdev should remove any DTLs. We only want to 2338 * excise regions on vdevs that were available during 2339 * the entire duration of this scan. 2340 */ 2341 if (scrub_txg != 0 && 2342 (spa->spa_scrub_started || 2343 (scn != NULL && scn->scn_phys.scn_errors == 0)) && 2344 vdev_dtl_should_excise(vd)) { 2345 /* 2346 * We completed a scrub up to scrub_txg. If we 2347 * did it without rebooting, then the scrub dtl 2348 * will be valid, so excise the old region and 2349 * fold in the scrub dtl. Otherwise, leave the 2350 * dtl as-is if there was an error. 2351 * 2352 * There's little trick here: to excise the beginning 2353 * of the DTL_MISSING map, we put it into a reference 2354 * tree and then add a segment with refcnt -1 that 2355 * covers the range [0, scrub_txg). This means 2356 * that each txg in that range has refcnt -1 or 0. 2357 * We then add DTL_SCRUB with a refcnt of 2, so that 2358 * entries in the range [0, scrub_txg) will have a 2359 * positive refcnt -- either 1 or 2. We then convert 2360 * the reference tree into the new DTL_MISSING map. 2361 */ 2362 space_reftree_create(&reftree); 2363 space_reftree_add_map(&reftree, 2364 vd->vdev_dtl[DTL_MISSING], 1); 2365 space_reftree_add_seg(&reftree, 0, scrub_txg, -1); 2366 space_reftree_add_map(&reftree, 2367 vd->vdev_dtl[DTL_SCRUB], 2); 2368 space_reftree_generate_map(&reftree, 2369 vd->vdev_dtl[DTL_MISSING], 1); 2370 space_reftree_destroy(&reftree); 2371 } 2372 range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); 2373 range_tree_walk(vd->vdev_dtl[DTL_MISSING], 2374 range_tree_add, vd->vdev_dtl[DTL_PARTIAL]); 2375 if (scrub_done) 2376 range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL); 2377 range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); 2378 if (!vdev_readable(vd)) 2379 range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); 2380 else 2381 range_tree_walk(vd->vdev_dtl[DTL_MISSING], 2382 range_tree_add, vd->vdev_dtl[DTL_OUTAGE]); 2383 2384 /* 2385 * If the vdev was resilvering and no longer has any 2386 * DTLs then reset its resilvering flag. 2387 */ 2388 if (vd->vdev_resilver_txg != 0 && 2389 range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && 2390 range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) 2391 vd->vdev_resilver_txg = 0; 2392 2393 mutex_exit(&vd->vdev_dtl_lock); 2394 2395 if (txg != 0) 2396 vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); 2397 return; 2398 } 2399 2400 mutex_enter(&vd->vdev_dtl_lock); 2401 for (int t = 0; t < DTL_TYPES; t++) { 2402 /* account for child's outage in parent's missing map */ 2403 int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; 2404 if (t == DTL_SCRUB) 2405 continue; /* leaf vdevs only */ 2406 if (t == DTL_PARTIAL) 2407 minref = 1; /* i.e. non-zero */ 2408 else if (vd->vdev_nparity != 0) 2409 minref = vd->vdev_nparity + 1; /* RAID-Z */ 2410 else 2411 minref = vd->vdev_children; /* any kind of mirror */ 2412 space_reftree_create(&reftree); 2413 for (int c = 0; c < vd->vdev_children; c++) { 2414 vdev_t *cvd = vd->vdev_child[c]; 2415 mutex_enter(&cvd->vdev_dtl_lock); 2416 space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1); 2417 mutex_exit(&cvd->vdev_dtl_lock); 2418 } 2419 space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref); 2420 space_reftree_destroy(&reftree); 2421 } 2422 mutex_exit(&vd->vdev_dtl_lock); 2423 } 2424 2425 int 2426 vdev_dtl_load(vdev_t *vd) 2427 { 2428 spa_t *spa = vd->vdev_spa; 2429 objset_t *mos = spa->spa_meta_objset; 2430 int error = 0; 2431 2432 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) { 2433 ASSERT(vdev_is_concrete(vd)); 2434 2435 error = space_map_open(&vd->vdev_dtl_sm, mos, 2436 vd->vdev_dtl_object, 0, -1ULL, 0); 2437 if (error) 2438 return (error); 2439 ASSERT(vd->vdev_dtl_sm != NULL); 2440 2441 mutex_enter(&vd->vdev_dtl_lock); 2442 2443 /* 2444 * Now that we've opened the space_map we need to update 2445 * the in-core DTL. 2446 */ 2447 space_map_update(vd->vdev_dtl_sm); 2448 2449 error = space_map_load(vd->vdev_dtl_sm, 2450 vd->vdev_dtl[DTL_MISSING], SM_ALLOC); 2451 mutex_exit(&vd->vdev_dtl_lock); 2452 2453 return (error); 2454 } 2455 2456 for (int c = 0; c < vd->vdev_children; c++) { 2457 error = vdev_dtl_load(vd->vdev_child[c]); 2458 if (error != 0) 2459 break; 2460 } 2461 2462 return (error); 2463 } 2464 2465 void 2466 vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx) 2467 { 2468 spa_t *spa = vd->vdev_spa; 2469 2470 VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx)); 2471 VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, 2472 zapobj, tx)); 2473 } 2474 2475 uint64_t 2476 vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx) 2477 { 2478 spa_t *spa = vd->vdev_spa; 2479 uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA, 2480 DMU_OT_NONE, 0, tx); 2481 2482 ASSERT(zap != 0); 2483 VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, 2484 zap, tx)); 2485 2486 return (zap); 2487 } 2488 2489 void 2490 vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx) 2491 { 2492 if (vd->vdev_ops != &vdev_hole_ops && 2493 vd->vdev_ops != &vdev_missing_ops && 2494 vd->vdev_ops != &vdev_root_ops && 2495 !vd->vdev_top->vdev_removing) { 2496 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) { 2497 vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx); 2498 } 2499 if (vd == vd->vdev_top && vd->vdev_top_zap == 0) { 2500 vd->vdev_top_zap = vdev_create_link_zap(vd, tx); 2501 } 2502 } 2503 for (uint64_t i = 0; i < vd->vdev_children; i++) { 2504 vdev_construct_zaps(vd->vdev_child[i], tx); 2505 } 2506 } 2507 2508 void 2509 vdev_dtl_sync(vdev_t *vd, uint64_t txg) 2510 { 2511 spa_t *spa = vd->vdev_spa; 2512 range_tree_t *rt = vd->vdev_dtl[DTL_MISSING]; 2513 objset_t *mos = spa->spa_meta_objset; 2514 range_tree_t *rtsync; 2515 dmu_tx_t *tx; 2516 uint64_t object = space_map_object(vd->vdev_dtl_sm); 2517 2518 ASSERT(vdev_is_concrete(vd)); 2519 ASSERT(vd->vdev_ops->vdev_op_leaf); 2520 2521 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2522 2523 if (vd->vdev_detached || vd->vdev_top->vdev_removing) { 2524 mutex_enter(&vd->vdev_dtl_lock); 2525 space_map_free(vd->vdev_dtl_sm, tx); 2526 space_map_close(vd->vdev_dtl_sm); 2527 vd->vdev_dtl_sm = NULL; 2528 mutex_exit(&vd->vdev_dtl_lock); 2529 2530 /* 2531 * We only destroy the leaf ZAP for detached leaves or for 2532 * removed log devices. Removed data devices handle leaf ZAP 2533 * cleanup later, once cancellation is no longer possible. 2534 */ 2535 if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached || 2536 vd->vdev_top->vdev_islog)) { 2537 vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx); 2538 vd->vdev_leaf_zap = 0; 2539 } 2540 2541 dmu_tx_commit(tx); 2542 return; 2543 } 2544 2545 if (vd->vdev_dtl_sm == NULL) { 2546 uint64_t new_object; 2547 2548 new_object = space_map_alloc(mos, vdev_dtl_sm_blksz, tx); 2549 VERIFY3U(new_object, !=, 0); 2550 2551 VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object, 2552 0, -1ULL, 0)); 2553 ASSERT(vd->vdev_dtl_sm != NULL); 2554 } 2555 2556 rtsync = range_tree_create(NULL, NULL); 2557 2558 mutex_enter(&vd->vdev_dtl_lock); 2559 range_tree_walk(rt, range_tree_add, rtsync); 2560 mutex_exit(&vd->vdev_dtl_lock); 2561 2562 space_map_truncate(vd->vdev_dtl_sm, vdev_dtl_sm_blksz, tx); 2563 space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx); 2564 range_tree_vacate(rtsync, NULL, NULL); 2565 2566 range_tree_destroy(rtsync); 2567 2568 /* 2569 * If the object for the space map has changed then dirty 2570 * the top level so that we update the config. 2571 */ 2572 if (object != space_map_object(vd->vdev_dtl_sm)) { 2573 vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, " 2574 "new object %llu", (u_longlong_t)txg, spa_name(spa), 2575 (u_longlong_t)object, 2576 (u_longlong_t)space_map_object(vd->vdev_dtl_sm)); 2577 vdev_config_dirty(vd->vdev_top); 2578 } 2579 2580 dmu_tx_commit(tx); 2581 2582 mutex_enter(&vd->vdev_dtl_lock); 2583 space_map_update(vd->vdev_dtl_sm); 2584 mutex_exit(&vd->vdev_dtl_lock); 2585 } 2586 2587 /* 2588 * Determine whether the specified vdev can be offlined/detached/removed 2589 * without losing data. 2590 */ 2591 boolean_t 2592 vdev_dtl_required(vdev_t *vd) 2593 { 2594 spa_t *spa = vd->vdev_spa; 2595 vdev_t *tvd = vd->vdev_top; 2596 uint8_t cant_read = vd->vdev_cant_read; 2597 boolean_t required; 2598 2599 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 2600 2601 if (vd == spa->spa_root_vdev || vd == tvd) 2602 return (B_TRUE); 2603 2604 /* 2605 * Temporarily mark the device as unreadable, and then determine 2606 * whether this results in any DTL outages in the top-level vdev. 2607 * If not, we can safely offline/detach/remove the device. 2608 */ 2609 vd->vdev_cant_read = B_TRUE; 2610 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 2611 required = !vdev_dtl_empty(tvd, DTL_OUTAGE); 2612 vd->vdev_cant_read = cant_read; 2613 vdev_dtl_reassess(tvd, 0, 0, B_FALSE); 2614 2615 if (!required && zio_injection_enabled) 2616 required = !!zio_handle_device_injection(vd, NULL, ECHILD); 2617 2618 return (required); 2619 } 2620 2621 /* 2622 * Determine if resilver is needed, and if so the txg range. 2623 */ 2624 boolean_t 2625 vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) 2626 { 2627 boolean_t needed = B_FALSE; 2628 uint64_t thismin = UINT64_MAX; 2629 uint64_t thismax = 0; 2630 2631 if (vd->vdev_children == 0) { 2632 mutex_enter(&vd->vdev_dtl_lock); 2633 if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && 2634 vdev_writeable(vd)) { 2635 2636 thismin = vdev_dtl_min(vd); 2637 thismax = vdev_dtl_max(vd); 2638 needed = B_TRUE; 2639 } 2640 mutex_exit(&vd->vdev_dtl_lock); 2641 } else { 2642 for (int c = 0; c < vd->vdev_children; c++) { 2643 vdev_t *cvd = vd->vdev_child[c]; 2644 uint64_t cmin, cmax; 2645 2646 if (vdev_resilver_needed(cvd, &cmin, &cmax)) { 2647 thismin = MIN(thismin, cmin); 2648 thismax = MAX(thismax, cmax); 2649 needed = B_TRUE; 2650 } 2651 } 2652 } 2653 2654 if (needed && minp) { 2655 *minp = thismin; 2656 *maxp = thismax; 2657 } 2658 return (needed); 2659 } 2660 2661 /* 2662 * Gets the checkpoint space map object from the vdev's ZAP. 2663 * Returns the spacemap object, or 0 if it wasn't in the ZAP 2664 * or the ZAP doesn't exist yet. 2665 */ 2666 int 2667 vdev_checkpoint_sm_object(vdev_t *vd) 2668 { 2669 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 2670 if (vd->vdev_top_zap == 0) { 2671 return (0); 2672 } 2673 2674 uint64_t sm_obj = 0; 2675 int err = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap, 2676 VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, &sm_obj); 2677 2678 ASSERT(err == 0 || err == ENOENT); 2679 2680 return (sm_obj); 2681 } 2682 2683 int 2684 vdev_load(vdev_t *vd) 2685 { 2686 int error = 0; 2687 /* 2688 * Recursively load all children. 2689 */ 2690 for (int c = 0; c < vd->vdev_children; c++) { 2691 error = vdev_load(vd->vdev_child[c]); 2692 if (error != 0) { 2693 return (error); 2694 } 2695 } 2696 2697 vdev_set_deflate_ratio(vd); 2698 2699 /* 2700 * If this is a top-level vdev, initialize its metaslabs. 2701 */ 2702 if (vd == vd->vdev_top && vdev_is_concrete(vd)) { 2703 if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) { 2704 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2705 VDEV_AUX_CORRUPT_DATA); 2706 vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, " 2707 "asize=%llu", (u_longlong_t)vd->vdev_ashift, 2708 (u_longlong_t)vd->vdev_asize); 2709 return (SET_ERROR(ENXIO)); 2710 } else if ((error = vdev_metaslab_init(vd, 0)) != 0) { 2711 vdev_dbgmsg(vd, "vdev_load: metaslab_init failed " 2712 "[error=%d]", error); 2713 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2714 VDEV_AUX_CORRUPT_DATA); 2715 return (error); 2716 } 2717 2718 uint64_t checkpoint_sm_obj = vdev_checkpoint_sm_object(vd); 2719 if (checkpoint_sm_obj != 0) { 2720 objset_t *mos = spa_meta_objset(vd->vdev_spa); 2721 ASSERT(vd->vdev_asize != 0); 2722 ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL); 2723 2724 if ((error = space_map_open(&vd->vdev_checkpoint_sm, 2725 mos, checkpoint_sm_obj, 0, vd->vdev_asize, 2726 vd->vdev_ashift))) { 2727 vdev_dbgmsg(vd, "vdev_load: space_map_open " 2728 "failed for checkpoint spacemap (obj %llu) " 2729 "[error=%d]", 2730 (u_longlong_t)checkpoint_sm_obj, error); 2731 return (error); 2732 } 2733 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 2734 space_map_update(vd->vdev_checkpoint_sm); 2735 2736 /* 2737 * Since the checkpoint_sm contains free entries 2738 * exclusively we can use sm_alloc to indicate the 2739 * culmulative checkpointed space that has been freed. 2740 */ 2741 vd->vdev_stat.vs_checkpoint_space = 2742 -vd->vdev_checkpoint_sm->sm_alloc; 2743 vd->vdev_spa->spa_checkpoint_info.sci_dspace += 2744 vd->vdev_stat.vs_checkpoint_space; 2745 } 2746 } 2747 2748 /* 2749 * If this is a leaf vdev, load its DTL. 2750 */ 2751 if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) { 2752 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2753 VDEV_AUX_CORRUPT_DATA); 2754 vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed " 2755 "[error=%d]", error); 2756 return (error); 2757 } 2758 2759 uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd); 2760 if (obsolete_sm_object != 0) { 2761 objset_t *mos = vd->vdev_spa->spa_meta_objset; 2762 ASSERT(vd->vdev_asize != 0); 2763 ASSERT3P(vd->vdev_obsolete_sm, ==, NULL); 2764 2765 if ((error = space_map_open(&vd->vdev_obsolete_sm, mos, 2766 obsolete_sm_object, 0, vd->vdev_asize, 0))) { 2767 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 2768 VDEV_AUX_CORRUPT_DATA); 2769 vdev_dbgmsg(vd, "vdev_load: space_map_open failed for " 2770 "obsolete spacemap (obj %llu) [error=%d]", 2771 (u_longlong_t)obsolete_sm_object, error); 2772 return (error); 2773 } 2774 space_map_update(vd->vdev_obsolete_sm); 2775 } 2776 2777 return (0); 2778 } 2779 2780 /* 2781 * The special vdev case is used for hot spares and l2cache devices. Its 2782 * sole purpose it to set the vdev state for the associated vdev. To do this, 2783 * we make sure that we can open the underlying device, then try to read the 2784 * label, and make sure that the label is sane and that it hasn't been 2785 * repurposed to another pool. 2786 */ 2787 int 2788 vdev_validate_aux(vdev_t *vd) 2789 { 2790 nvlist_t *label; 2791 uint64_t guid, version; 2792 uint64_t state; 2793 2794 if (!vdev_readable(vd)) 2795 return (0); 2796 2797 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) { 2798 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 2799 VDEV_AUX_CORRUPT_DATA); 2800 return (-1); 2801 } 2802 2803 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || 2804 !SPA_VERSION_IS_SUPPORTED(version) || 2805 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || 2806 guid != vd->vdev_guid || 2807 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { 2808 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 2809 VDEV_AUX_CORRUPT_DATA); 2810 nvlist_free(label); 2811 return (-1); 2812 } 2813 2814 /* 2815 * We don't actually check the pool state here. If it's in fact in 2816 * use by another pool, we update this fact on the fly when requested. 2817 */ 2818 nvlist_free(label); 2819 return (0); 2820 } 2821 2822 /* 2823 * Free the objects used to store this vdev's spacemaps, and the array 2824 * that points to them. 2825 */ 2826 void 2827 vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx) 2828 { 2829 if (vd->vdev_ms_array == 0) 2830 return; 2831 2832 objset_t *mos = vd->vdev_spa->spa_meta_objset; 2833 uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift; 2834 size_t array_bytes = array_count * sizeof (uint64_t); 2835 uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP); 2836 VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0, 2837 array_bytes, smobj_array, 0)); 2838 2839 for (uint64_t i = 0; i < array_count; i++) { 2840 uint64_t smobj = smobj_array[i]; 2841 if (smobj == 0) 2842 continue; 2843 2844 space_map_free_obj(mos, smobj, tx); 2845 } 2846 2847 kmem_free(smobj_array, array_bytes); 2848 VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx)); 2849 vd->vdev_ms_array = 0; 2850 } 2851 2852 static void 2853 vdev_remove_empty_log(vdev_t *vd, uint64_t txg) 2854 { 2855 spa_t *spa = vd->vdev_spa; 2856 2857 ASSERT(vd->vdev_islog); 2858 ASSERT(vd == vd->vdev_top); 2859 ASSERT3U(txg, ==, spa_syncing_txg(spa)); 2860 2861 if (vd->vdev_ms != NULL) { 2862 metaslab_group_t *mg = vd->vdev_mg; 2863 2864 metaslab_group_histogram_verify(mg); 2865 metaslab_class_histogram_verify(mg->mg_class); 2866 2867 for (int m = 0; m < vd->vdev_ms_count; m++) { 2868 metaslab_t *msp = vd->vdev_ms[m]; 2869 2870 if (msp == NULL || msp->ms_sm == NULL) 2871 continue; 2872 2873 mutex_enter(&msp->ms_lock); 2874 /* 2875 * If the metaslab was not loaded when the vdev 2876 * was removed then the histogram accounting may 2877 * not be accurate. Update the histogram information 2878 * here so that we ensure that the metaslab group 2879 * and metaslab class are up-to-date. 2880 */ 2881 metaslab_group_histogram_remove(mg, msp); 2882 2883 VERIFY0(space_map_allocated(msp->ms_sm)); 2884 space_map_close(msp->ms_sm); 2885 msp->ms_sm = NULL; 2886 mutex_exit(&msp->ms_lock); 2887 } 2888 2889 if (vd->vdev_checkpoint_sm != NULL) { 2890 ASSERT(spa_has_checkpoint(spa)); 2891 space_map_close(vd->vdev_checkpoint_sm); 2892 vd->vdev_checkpoint_sm = NULL; 2893 } 2894 2895 metaslab_group_histogram_verify(mg); 2896 metaslab_class_histogram_verify(mg->mg_class); 2897 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 2898 ASSERT0(mg->mg_histogram[i]); 2899 } 2900 2901 dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 2902 2903 vdev_destroy_spacemaps(vd, tx); 2904 if (vd->vdev_top_zap != 0) { 2905 vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx); 2906 vd->vdev_top_zap = 0; 2907 } 2908 2909 dmu_tx_commit(tx); 2910 } 2911 2912 void 2913 vdev_sync_done(vdev_t *vd, uint64_t txg) 2914 { 2915 metaslab_t *msp; 2916 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg)); 2917 2918 ASSERT(vdev_is_concrete(vd)); 2919 2920 while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 2921 != NULL) 2922 metaslab_sync_done(msp, txg); 2923 2924 if (reassess) 2925 metaslab_sync_reassess(vd->vdev_mg); 2926 } 2927 2928 void 2929 vdev_sync(vdev_t *vd, uint64_t txg) 2930 { 2931 spa_t *spa = vd->vdev_spa; 2932 vdev_t *lvd; 2933 metaslab_t *msp; 2934 dmu_tx_t *tx; 2935 2936 if (range_tree_space(vd->vdev_obsolete_segments) > 0) { 2937 dmu_tx_t *tx; 2938 2939 ASSERT(vd->vdev_removing || 2940 vd->vdev_ops == &vdev_indirect_ops); 2941 2942 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2943 vdev_indirect_sync_obsolete(vd, tx); 2944 dmu_tx_commit(tx); 2945 2946 /* 2947 * If the vdev is indirect, it can't have dirty 2948 * metaslabs or DTLs. 2949 */ 2950 if (vd->vdev_ops == &vdev_indirect_ops) { 2951 ASSERT(txg_list_empty(&vd->vdev_ms_list, txg)); 2952 ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg)); 2953 return; 2954 } 2955 } 2956 2957 ASSERT(vdev_is_concrete(vd)); 2958 2959 if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 && 2960 !vd->vdev_removing) { 2961 ASSERT(vd == vd->vdev_top); 2962 ASSERT0(vd->vdev_indirect_config.vic_mapping_object); 2963 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 2964 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 2965 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 2966 ASSERT(vd->vdev_ms_array != 0); 2967 vdev_config_dirty(vd); 2968 dmu_tx_commit(tx); 2969 } 2970 2971 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { 2972 metaslab_sync(msp, txg); 2973 (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); 2974 } 2975 2976 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 2977 vdev_dtl_sync(lvd, txg); 2978 2979 /* 2980 * If this is an empty log device being removed, destroy the 2981 * metadata associated with it. 2982 */ 2983 if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing) 2984 vdev_remove_empty_log(vd, txg); 2985 2986 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 2987 } 2988 2989 uint64_t 2990 vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 2991 { 2992 return (vd->vdev_ops->vdev_op_asize(vd, psize)); 2993 } 2994 2995 /* 2996 * Mark the given vdev faulted. A faulted vdev behaves as if the device could 2997 * not be opened, and no I/O is attempted. 2998 */ 2999 int 3000 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) 3001 { 3002 vdev_t *vd, *tvd; 3003 3004 spa_vdev_state_enter(spa, SCL_NONE); 3005 3006 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3007 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 3008 3009 if (!vd->vdev_ops->vdev_op_leaf) 3010 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3011 3012 tvd = vd->vdev_top; 3013 3014 /* 3015 * We don't directly use the aux state here, but if we do a 3016 * vdev_reopen(), we need this value to be present to remember why we 3017 * were faulted. 3018 */ 3019 vd->vdev_label_aux = aux; 3020 3021 /* 3022 * Faulted state takes precedence over degraded. 3023 */ 3024 vd->vdev_delayed_close = B_FALSE; 3025 vd->vdev_faulted = 1ULL; 3026 vd->vdev_degraded = 0ULL; 3027 vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux); 3028 3029 /* 3030 * If this device has the only valid copy of the data, then 3031 * back off and simply mark the vdev as degraded instead. 3032 */ 3033 if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) { 3034 vd->vdev_degraded = 1ULL; 3035 vd->vdev_faulted = 0ULL; 3036 3037 /* 3038 * If we reopen the device and it's not dead, only then do we 3039 * mark it degraded. 3040 */ 3041 vdev_reopen(tvd); 3042 3043 if (vdev_readable(vd)) 3044 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux); 3045 } 3046 3047 return (spa_vdev_state_exit(spa, vd, 0)); 3048 } 3049 3050 /* 3051 * Mark the given vdev degraded. A degraded vdev is purely an indication to the 3052 * user that something is wrong. The vdev continues to operate as normal as far 3053 * as I/O is concerned. 3054 */ 3055 int 3056 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux) 3057 { 3058 vdev_t *vd; 3059 3060 spa_vdev_state_enter(spa, SCL_NONE); 3061 3062 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3063 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 3064 3065 if (!vd->vdev_ops->vdev_op_leaf) 3066 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3067 3068 /* 3069 * If the vdev is already faulted, then don't do anything. 3070 */ 3071 if (vd->vdev_faulted || vd->vdev_degraded) 3072 return (spa_vdev_state_exit(spa, NULL, 0)); 3073 3074 vd->vdev_degraded = 1ULL; 3075 if (!vdev_is_dead(vd)) 3076 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, 3077 aux); 3078 3079 return (spa_vdev_state_exit(spa, vd, 0)); 3080 } 3081 3082 /* 3083 * Online the given vdev. 3084 * 3085 * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached 3086 * spare device should be detached when the device finishes resilvering. 3087 * Second, the online should be treated like a 'test' online case, so no FMA 3088 * events are generated if the device fails to open. 3089 */ 3090 int 3091 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) 3092 { 3093 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; 3094 boolean_t wasoffline; 3095 vdev_state_t oldstate; 3096 3097 spa_vdev_state_enter(spa, SCL_NONE); 3098 3099 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3100 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 3101 3102 if (!vd->vdev_ops->vdev_op_leaf) 3103 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3104 3105 wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline); 3106 oldstate = vd->vdev_state; 3107 3108 tvd = vd->vdev_top; 3109 vd->vdev_offline = B_FALSE; 3110 vd->vdev_tmpoffline = B_FALSE; 3111 vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); 3112 vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); 3113 3114 /* XXX - L2ARC 1.0 does not support expansion */ 3115 if (!vd->vdev_aux) { 3116 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 3117 pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND); 3118 } 3119 3120 vdev_reopen(tvd); 3121 vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; 3122 3123 if (!vd->vdev_aux) { 3124 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 3125 pvd->vdev_expanding = B_FALSE; 3126 } 3127 3128 if (newstate) 3129 *newstate = vd->vdev_state; 3130 if ((flags & ZFS_ONLINE_UNSPARE) && 3131 !vdev_is_dead(vd) && vd->vdev_parent && 3132 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 3133 vd->vdev_parent->vdev_child[0] == vd) 3134 vd->vdev_unspare = B_TRUE; 3135 3136 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { 3137 3138 /* XXX - L2ARC 1.0 does not support expansion */ 3139 if (vd->vdev_aux) 3140 return (spa_vdev_state_exit(spa, vd, ENOTSUP)); 3141 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 3142 } 3143 3144 /* Restart initializing if necessary */ 3145 mutex_enter(&vd->vdev_initialize_lock); 3146 if (vdev_writeable(vd) && 3147 vd->vdev_initialize_thread == NULL && 3148 vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) { 3149 (void) vdev_initialize(vd); 3150 } 3151 mutex_exit(&vd->vdev_initialize_lock); 3152 3153 if (wasoffline || 3154 (oldstate < VDEV_STATE_DEGRADED && 3155 vd->vdev_state >= VDEV_STATE_DEGRADED)) 3156 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE); 3157 3158 return (spa_vdev_state_exit(spa, vd, 0)); 3159 } 3160 3161 static int 3162 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags) 3163 { 3164 vdev_t *vd, *tvd; 3165 int error = 0; 3166 uint64_t generation; 3167 metaslab_group_t *mg; 3168 3169 top: 3170 spa_vdev_state_enter(spa, SCL_ALLOC); 3171 3172 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 3173 return (spa_vdev_state_exit(spa, NULL, ENODEV)); 3174 3175 if (!vd->vdev_ops->vdev_op_leaf) 3176 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 3177 3178 tvd = vd->vdev_top; 3179 mg = tvd->vdev_mg; 3180 generation = spa->spa_config_generation + 1; 3181 3182 /* 3183 * If the device isn't already offline, try to offline it. 3184 */ 3185 if (!vd->vdev_offline) { 3186 /* 3187 * If this device has the only valid copy of some data, 3188 * don't allow it to be offlined. Log devices are always 3189 * expendable. 3190 */ 3191 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 3192 vdev_dtl_required(vd)) 3193 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 3194 3195 /* 3196 * If the top-level is a slog and it has had allocations 3197 * then proceed. We check that the vdev's metaslab group 3198 * is not NULL since it's possible that we may have just 3199 * added this vdev but not yet initialized its metaslabs. 3200 */ 3201 if (tvd->vdev_islog && mg != NULL) { 3202 /* 3203 * Prevent any future allocations. 3204 */ 3205 metaslab_group_passivate(mg); 3206 (void) spa_vdev_state_exit(spa, vd, 0); 3207 3208 error = spa_reset_logs(spa); 3209 3210 /* 3211 * If the log device was successfully reset but has 3212 * checkpointed data, do not offline it. 3213 */ 3214 if (error == 0 && 3215 tvd->vdev_checkpoint_sm != NULL) { 3216 ASSERT3U(tvd->vdev_checkpoint_sm->sm_alloc, 3217 !=, 0); 3218 error = ZFS_ERR_CHECKPOINT_EXISTS; 3219 } 3220 3221 spa_vdev_state_enter(spa, SCL_ALLOC); 3222 3223 /* 3224 * Check to see if the config has changed. 3225 */ 3226 if (error || generation != spa->spa_config_generation) { 3227 metaslab_group_activate(mg); 3228 if (error) 3229 return (spa_vdev_state_exit(spa, 3230 vd, error)); 3231 (void) spa_vdev_state_exit(spa, vd, 0); 3232 goto top; 3233 } 3234 ASSERT0(tvd->vdev_stat.vs_alloc); 3235 } 3236 3237 /* 3238 * Offline this device and reopen its top-level vdev. 3239 * If the top-level vdev is a log device then just offline 3240 * it. Otherwise, if this action results in the top-level 3241 * vdev becoming unusable, undo it and fail the request. 3242 */ 3243 vd->vdev_offline = B_TRUE; 3244 vdev_reopen(tvd); 3245 3246 if (!tvd->vdev_islog && vd->vdev_aux == NULL && 3247 vdev_is_dead(tvd)) { 3248 vd->vdev_offline = B_FALSE; 3249 vdev_reopen(tvd); 3250 return (spa_vdev_state_exit(spa, NULL, EBUSY)); 3251 } 3252 3253 /* 3254 * Add the device back into the metaslab rotor so that 3255 * once we online the device it's open for business. 3256 */ 3257 if (tvd->vdev_islog && mg != NULL) 3258 metaslab_group_activate(mg); 3259 } 3260 3261 vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); 3262 3263 return (spa_vdev_state_exit(spa, vd, 0)); 3264 } 3265 3266 int 3267 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) 3268 { 3269 int error; 3270 3271 mutex_enter(&spa->spa_vdev_top_lock); 3272 error = vdev_offline_locked(spa, guid, flags); 3273 mutex_exit(&spa->spa_vdev_top_lock); 3274 3275 return (error); 3276 } 3277 3278 /* 3279 * Clear the error counts associated with this vdev. Unlike vdev_online() and 3280 * vdev_offline(), we assume the spa config is locked. We also clear all 3281 * children. If 'vd' is NULL, then the user wants to clear all vdevs. 3282 */ 3283 void 3284 vdev_clear(spa_t *spa, vdev_t *vd) 3285 { 3286 vdev_t *rvd = spa->spa_root_vdev; 3287 3288 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); 3289 3290 if (vd == NULL) 3291 vd = rvd; 3292 3293 vd->vdev_stat.vs_read_errors = 0; 3294 vd->vdev_stat.vs_write_errors = 0; 3295 vd->vdev_stat.vs_checksum_errors = 0; 3296 3297 for (int c = 0; c < vd->vdev_children; c++) 3298 vdev_clear(spa, vd->vdev_child[c]); 3299 3300 /* 3301 * It makes no sense to "clear" an indirect vdev. 3302 */ 3303 if (!vdev_is_concrete(vd)) 3304 return; 3305 3306 /* 3307 * If we're in the FAULTED state or have experienced failed I/O, then 3308 * clear the persistent state and attempt to reopen the device. We 3309 * also mark the vdev config dirty, so that the new faulted state is 3310 * written out to disk. 3311 */ 3312 if (vd->vdev_faulted || vd->vdev_degraded || 3313 !vdev_readable(vd) || !vdev_writeable(vd)) { 3314 3315 /* 3316 * When reopening in reponse to a clear event, it may be due to 3317 * a fmadm repair request. In this case, if the device is 3318 * still broken, we want to still post the ereport again. 3319 */ 3320 vd->vdev_forcefault = B_TRUE; 3321 3322 vd->vdev_faulted = vd->vdev_degraded = 0ULL; 3323 vd->vdev_cant_read = B_FALSE; 3324 vd->vdev_cant_write = B_FALSE; 3325 3326 vdev_reopen(vd == rvd ? rvd : vd->vdev_top); 3327 3328 vd->vdev_forcefault = B_FALSE; 3329 3330 if (vd != rvd && vdev_writeable(vd->vdev_top)) 3331 vdev_state_dirty(vd->vdev_top); 3332 3333 if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) 3334 spa_async_request(spa, SPA_ASYNC_RESILVER); 3335 3336 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR); 3337 } 3338 3339 /* 3340 * When clearing a FMA-diagnosed fault, we always want to 3341 * unspare the device, as we assume that the original spare was 3342 * done in response to the FMA fault. 3343 */ 3344 if (!vdev_is_dead(vd) && vd->vdev_parent != NULL && 3345 vd->vdev_parent->vdev_ops == &vdev_spare_ops && 3346 vd->vdev_parent->vdev_child[0] == vd) 3347 vd->vdev_unspare = B_TRUE; 3348 } 3349 3350 boolean_t 3351 vdev_is_dead(vdev_t *vd) 3352 { 3353 /* 3354 * Holes and missing devices are always considered "dead". 3355 * This simplifies the code since we don't have to check for 3356 * these types of devices in the various code paths. 3357 * Instead we rely on the fact that we skip over dead devices 3358 * before issuing I/O to them. 3359 */ 3360 return (vd->vdev_state < VDEV_STATE_DEGRADED || 3361 vd->vdev_ops == &vdev_hole_ops || 3362 vd->vdev_ops == &vdev_missing_ops); 3363 } 3364 3365 boolean_t 3366 vdev_readable(vdev_t *vd) 3367 { 3368 return (!vdev_is_dead(vd) && !vd->vdev_cant_read); 3369 } 3370 3371 boolean_t 3372 vdev_writeable(vdev_t *vd) 3373 { 3374 return (!vdev_is_dead(vd) && !vd->vdev_cant_write && 3375 vdev_is_concrete(vd)); 3376 } 3377 3378 boolean_t 3379 vdev_allocatable(vdev_t *vd) 3380 { 3381 uint64_t state = vd->vdev_state; 3382 3383 /* 3384 * We currently allow allocations from vdevs which may be in the 3385 * process of reopening (i.e. VDEV_STATE_CLOSED). If the device 3386 * fails to reopen then we'll catch it later when we're holding 3387 * the proper locks. Note that we have to get the vdev state 3388 * in a local variable because although it changes atomically, 3389 * we're asking two separate questions about it. 3390 */ 3391 return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && 3392 !vd->vdev_cant_write && vdev_is_concrete(vd) && 3393 vd->vdev_mg->mg_initialized); 3394 } 3395 3396 boolean_t 3397 vdev_accessible(vdev_t *vd, zio_t *zio) 3398 { 3399 ASSERT(zio->io_vd == vd); 3400 3401 if (vdev_is_dead(vd) || vd->vdev_remove_wanted) 3402 return (B_FALSE); 3403 3404 if (zio->io_type == ZIO_TYPE_READ) 3405 return (!vd->vdev_cant_read); 3406 3407 if (zio->io_type == ZIO_TYPE_WRITE) 3408 return (!vd->vdev_cant_write); 3409 3410 return (B_TRUE); 3411 } 3412 3413 boolean_t 3414 vdev_is_spacemap_addressable(vdev_t *vd) 3415 { 3416 if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2)) 3417 return (B_TRUE); 3418 3419 /* 3420 * If double-word space map entries are not enabled we assume 3421 * 47 bits of the space map entry are dedicated to the entry's 3422 * offset (see SM_OFFSET_BITS in space_map.h). We then use that 3423 * to calculate the maximum address that can be described by a 3424 * space map entry for the given device. 3425 */ 3426 uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS; 3427 3428 if (shift >= 63) /* detect potential overflow */ 3429 return (B_TRUE); 3430 3431 return (vd->vdev_asize < (1ULL << shift)); 3432 } 3433 3434 /* 3435 * Get statistics for the given vdev. 3436 */ 3437 void 3438 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 3439 { 3440 spa_t *spa = vd->vdev_spa; 3441 vdev_t *rvd = spa->spa_root_vdev; 3442 vdev_t *tvd = vd->vdev_top; 3443 3444 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 3445 3446 mutex_enter(&vd->vdev_stat_lock); 3447 bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 3448 vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 3449 vs->vs_state = vd->vdev_state; 3450 vs->vs_rsize = vdev_get_min_asize(vd); 3451 if (vd->vdev_ops->vdev_op_leaf) { 3452 vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 3453 /* 3454 * Report intializing progress. Since we don't have the 3455 * initializing locks held, this is only an estimate (although a 3456 * fairly accurate one). 3457 */ 3458 vs->vs_initialize_bytes_done = vd->vdev_initialize_bytes_done; 3459 vs->vs_initialize_bytes_est = vd->vdev_initialize_bytes_est; 3460 vs->vs_initialize_state = vd->vdev_initialize_state; 3461 vs->vs_initialize_action_time = vd->vdev_initialize_action_time; 3462 } 3463 /* 3464 * Report expandable space on top-level, non-auxillary devices only. 3465 * The expandable space is reported in terms of metaslab sized units 3466 * since that determines how much space the pool can expand. 3467 */ 3468 if (vd->vdev_aux == NULL && tvd != NULL) { 3469 vs->vs_esize = P2ALIGN(vd->vdev_max_asize - vd->vdev_asize - 3470 spa->spa_bootsize, 1ULL << tvd->vdev_ms_shift); 3471 } 3472 if (vd->vdev_aux == NULL && vd == vd->vdev_top && 3473 vdev_is_concrete(vd)) { 3474 vs->vs_fragmentation = vd->vdev_mg->mg_fragmentation; 3475 } 3476 3477 /* 3478 * If we're getting stats on the root vdev, aggregate the I/O counts 3479 * over all top-level vdevs (i.e. the direct children of the root). 3480 */ 3481 if (vd == rvd) { 3482 for (int c = 0; c < rvd->vdev_children; c++) { 3483 vdev_t *cvd = rvd->vdev_child[c]; 3484 vdev_stat_t *cvs = &cvd->vdev_stat; 3485 3486 for (int t = 0; t < ZIO_TYPES; t++) { 3487 vs->vs_ops[t] += cvs->vs_ops[t]; 3488 vs->vs_bytes[t] += cvs->vs_bytes[t]; 3489 } 3490 cvs->vs_scan_removing = cvd->vdev_removing; 3491 } 3492 } 3493 mutex_exit(&vd->vdev_stat_lock); 3494 } 3495 3496 void 3497 vdev_clear_stats(vdev_t *vd) 3498 { 3499 mutex_enter(&vd->vdev_stat_lock); 3500 vd->vdev_stat.vs_space = 0; 3501 vd->vdev_stat.vs_dspace = 0; 3502 vd->vdev_stat.vs_alloc = 0; 3503 mutex_exit(&vd->vdev_stat_lock); 3504 } 3505 3506 void 3507 vdev_scan_stat_init(vdev_t *vd) 3508 { 3509 vdev_stat_t *vs = &vd->vdev_stat; 3510 3511 for (int c = 0; c < vd->vdev_children; c++) 3512 vdev_scan_stat_init(vd->vdev_child[c]); 3513 3514 mutex_enter(&vd->vdev_stat_lock); 3515 vs->vs_scan_processed = 0; 3516 mutex_exit(&vd->vdev_stat_lock); 3517 } 3518 3519 void 3520 vdev_stat_update(zio_t *zio, uint64_t psize) 3521 { 3522 spa_t *spa = zio->io_spa; 3523 vdev_t *rvd = spa->spa_root_vdev; 3524 vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; 3525 vdev_t *pvd; 3526 uint64_t txg = zio->io_txg; 3527 vdev_stat_t *vs = &vd->vdev_stat; 3528 zio_type_t type = zio->io_type; 3529 int flags = zio->io_flags; 3530 3531 /* 3532 * If this i/o is a gang leader, it didn't do any actual work. 3533 */ 3534 if (zio->io_gang_tree) 3535 return; 3536 3537 if (zio->io_error == 0) { 3538 /* 3539 * If this is a root i/o, don't count it -- we've already 3540 * counted the top-level vdevs, and vdev_get_stats() will 3541 * aggregate them when asked. This reduces contention on 3542 * the root vdev_stat_lock and implicitly handles blocks 3543 * that compress away to holes, for which there is no i/o. 3544 * (Holes never create vdev children, so all the counters 3545 * remain zero, which is what we want.) 3546 * 3547 * Note: this only applies to successful i/o (io_error == 0) 3548 * because unlike i/o counts, errors are not additive. 3549 * When reading a ditto block, for example, failure of 3550 * one top-level vdev does not imply a root-level error. 3551 */ 3552 if (vd == rvd) 3553 return; 3554 3555 ASSERT(vd == zio->io_vd); 3556 3557 if (flags & ZIO_FLAG_IO_BYPASS) 3558 return; 3559 3560 mutex_enter(&vd->vdev_stat_lock); 3561 3562 if (flags & ZIO_FLAG_IO_REPAIR) { 3563 if (flags & ZIO_FLAG_SCAN_THREAD) { 3564 dsl_scan_phys_t *scn_phys = 3565 &spa->spa_dsl_pool->dp_scan->scn_phys; 3566 uint64_t *processed = &scn_phys->scn_processed; 3567 3568 /* XXX cleanup? */ 3569 if (vd->vdev_ops->vdev_op_leaf) 3570 atomic_add_64(processed, psize); 3571 vs->vs_scan_processed += psize; 3572 } 3573 3574 if (flags & ZIO_FLAG_SELF_HEAL) 3575 vs->vs_self_healed += psize; 3576 } 3577 3578 vs->vs_ops[type]++; 3579 vs->vs_bytes[type] += psize; 3580 3581 mutex_exit(&vd->vdev_stat_lock); 3582 return; 3583 } 3584 3585 if (flags & ZIO_FLAG_SPECULATIVE) 3586 return; 3587 3588 /* 3589 * If this is an I/O error that is going to be retried, then ignore the 3590 * error. Otherwise, the user may interpret B_FAILFAST I/O errors as 3591 * hard errors, when in reality they can happen for any number of 3592 * innocuous reasons (bus resets, MPxIO link failure, etc). 3593 */ 3594 if (zio->io_error == EIO && 3595 !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 3596 return; 3597 3598 /* 3599 * Intent logs writes won't propagate their error to the root 3600 * I/O so don't mark these types of failures as pool-level 3601 * errors. 3602 */ 3603 if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 3604 return; 3605 3606 mutex_enter(&vd->vdev_stat_lock); 3607 if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) { 3608 if (zio->io_error == ECKSUM) 3609 vs->vs_checksum_errors++; 3610 else 3611 vs->vs_read_errors++; 3612 } 3613 if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd)) 3614 vs->vs_write_errors++; 3615 mutex_exit(&vd->vdev_stat_lock); 3616 3617 if (spa->spa_load_state == SPA_LOAD_NONE && 3618 type == ZIO_TYPE_WRITE && txg != 0 && 3619 (!(flags & ZIO_FLAG_IO_REPAIR) || 3620 (flags & ZIO_FLAG_SCAN_THREAD) || 3621 spa->spa_claiming)) { 3622 /* 3623 * This is either a normal write (not a repair), or it's 3624 * a repair induced by the scrub thread, or it's a repair 3625 * made by zil_claim() during spa_load() in the first txg. 3626 * In the normal case, we commit the DTL change in the same 3627 * txg as the block was born. In the scrub-induced repair 3628 * case, we know that scrubs run in first-pass syncing context, 3629 * so we commit the DTL change in spa_syncing_txg(spa). 3630 * In the zil_claim() case, we commit in spa_first_txg(spa). 3631 * 3632 * We currently do not make DTL entries for failed spontaneous 3633 * self-healing writes triggered by normal (non-scrubbing) 3634 * reads, because we have no transactional context in which to 3635 * do so -- and it's not clear that it'd be desirable anyway. 3636 */ 3637 if (vd->vdev_ops->vdev_op_leaf) { 3638 uint64_t commit_txg = txg; 3639 if (flags & ZIO_FLAG_SCAN_THREAD) { 3640 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 3641 ASSERT(spa_sync_pass(spa) == 1); 3642 vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); 3643 commit_txg = spa_syncing_txg(spa); 3644 } else if (spa->spa_claiming) { 3645 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 3646 commit_txg = spa_first_txg(spa); 3647 } 3648 ASSERT(commit_txg >= spa_syncing_txg(spa)); 3649 if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) 3650 return; 3651 for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) 3652 vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); 3653 vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); 3654 } 3655 if (vd != rvd) 3656 vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); 3657 } 3658 } 3659 3660 /* 3661 * Update the in-core space usage stats for this vdev, its metaslab class, 3662 * and the root vdev. 3663 */ 3664 void 3665 vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta, 3666 int64_t space_delta) 3667 { 3668 int64_t dspace_delta = space_delta; 3669 spa_t *spa = vd->vdev_spa; 3670 vdev_t *rvd = spa->spa_root_vdev; 3671 metaslab_group_t *mg = vd->vdev_mg; 3672 metaslab_class_t *mc = mg ? mg->mg_class : NULL; 3673 3674 ASSERT(vd == vd->vdev_top); 3675 3676 /* 3677 * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion 3678 * factor. We must calculate this here and not at the root vdev 3679 * because the root vdev's psize-to-asize is simply the max of its 3680 * childrens', thus not accurate enough for us. 3681 */ 3682 ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); 3683 ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); 3684 dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * 3685 vd->vdev_deflate_ratio; 3686 3687 mutex_enter(&vd->vdev_stat_lock); 3688 vd->vdev_stat.vs_alloc += alloc_delta; 3689 vd->vdev_stat.vs_space += space_delta; 3690 vd->vdev_stat.vs_dspace += dspace_delta; 3691 mutex_exit(&vd->vdev_stat_lock); 3692 3693 if (mc == spa_normal_class(spa)) { 3694 mutex_enter(&rvd->vdev_stat_lock); 3695 rvd->vdev_stat.vs_alloc += alloc_delta; 3696 rvd->vdev_stat.vs_space += space_delta; 3697 rvd->vdev_stat.vs_dspace += dspace_delta; 3698 mutex_exit(&rvd->vdev_stat_lock); 3699 } 3700 3701 if (mc != NULL) { 3702 ASSERT(rvd == vd->vdev_parent); 3703 ASSERT(vd->vdev_ms_count != 0); 3704 3705 metaslab_class_space_update(mc, 3706 alloc_delta, defer_delta, space_delta, dspace_delta); 3707 } 3708 } 3709 3710 /* 3711 * Mark a top-level vdev's config as dirty, placing it on the dirty list 3712 * so that it will be written out next time the vdev configuration is synced. 3713 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 3714 */ 3715 void 3716 vdev_config_dirty(vdev_t *vd) 3717 { 3718 spa_t *spa = vd->vdev_spa; 3719 vdev_t *rvd = spa->spa_root_vdev; 3720 int c; 3721 3722 ASSERT(spa_writeable(spa)); 3723 3724 /* 3725 * If this is an aux vdev (as with l2cache and spare devices), then we 3726 * update the vdev config manually and set the sync flag. 3727 */ 3728 if (vd->vdev_aux != NULL) { 3729 spa_aux_vdev_t *sav = vd->vdev_aux; 3730 nvlist_t **aux; 3731 uint_t naux; 3732 3733 for (c = 0; c < sav->sav_count; c++) { 3734 if (sav->sav_vdevs[c] == vd) 3735 break; 3736 } 3737 3738 if (c == sav->sav_count) { 3739 /* 3740 * We're being removed. There's nothing more to do. 3741 */ 3742 ASSERT(sav->sav_sync == B_TRUE); 3743 return; 3744 } 3745 3746 sav->sav_sync = B_TRUE; 3747 3748 if (nvlist_lookup_nvlist_array(sav->sav_config, 3749 ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { 3750 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 3751 ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); 3752 } 3753 3754 ASSERT(c < naux); 3755 3756 /* 3757 * Setting the nvlist in the middle if the array is a little 3758 * sketchy, but it will work. 3759 */ 3760 nvlist_free(aux[c]); 3761 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0); 3762 3763 return; 3764 } 3765 3766 /* 3767 * The dirty list is protected by the SCL_CONFIG lock. The caller 3768 * must either hold SCL_CONFIG as writer, or must be the sync thread 3769 * (which holds SCL_CONFIG as reader). There's only one sync thread, 3770 * so this is sufficient to ensure mutual exclusion. 3771 */ 3772 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 3773 (dsl_pool_sync_context(spa_get_dsl(spa)) && 3774 spa_config_held(spa, SCL_CONFIG, RW_READER))); 3775 3776 if (vd == rvd) { 3777 for (c = 0; c < rvd->vdev_children; c++) 3778 vdev_config_dirty(rvd->vdev_child[c]); 3779 } else { 3780 ASSERT(vd == vd->vdev_top); 3781 3782 if (!list_link_active(&vd->vdev_config_dirty_node) && 3783 vdev_is_concrete(vd)) { 3784 list_insert_head(&spa->spa_config_dirty_list, vd); 3785 } 3786 } 3787 } 3788 3789 void 3790 vdev_config_clean(vdev_t *vd) 3791 { 3792 spa_t *spa = vd->vdev_spa; 3793 3794 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || 3795 (dsl_pool_sync_context(spa_get_dsl(spa)) && 3796 spa_config_held(spa, SCL_CONFIG, RW_READER))); 3797 3798 ASSERT(list_link_active(&vd->vdev_config_dirty_node)); 3799 list_remove(&spa->spa_config_dirty_list, vd); 3800 } 3801 3802 /* 3803 * Mark a top-level vdev's state as dirty, so that the next pass of 3804 * spa_sync() can convert this into vdev_config_dirty(). We distinguish 3805 * the state changes from larger config changes because they require 3806 * much less locking, and are often needed for administrative actions. 3807 */ 3808 void 3809 vdev_state_dirty(vdev_t *vd) 3810 { 3811 spa_t *spa = vd->vdev_spa; 3812 3813 ASSERT(spa_writeable(spa)); 3814 ASSERT(vd == vd->vdev_top); 3815 3816 /* 3817 * The state list is protected by the SCL_STATE lock. The caller 3818 * must either hold SCL_STATE as writer, or must be the sync thread 3819 * (which holds SCL_STATE as reader). There's only one sync thread, 3820 * so this is sufficient to ensure mutual exclusion. 3821 */ 3822 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 3823 (dsl_pool_sync_context(spa_get_dsl(spa)) && 3824 spa_config_held(spa, SCL_STATE, RW_READER))); 3825 3826 if (!list_link_active(&vd->vdev_state_dirty_node) && 3827 vdev_is_concrete(vd)) 3828 list_insert_head(&spa->spa_state_dirty_list, vd); 3829 } 3830 3831 void 3832 vdev_state_clean(vdev_t *vd) 3833 { 3834 spa_t *spa = vd->vdev_spa; 3835 3836 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || 3837 (dsl_pool_sync_context(spa_get_dsl(spa)) && 3838 spa_config_held(spa, SCL_STATE, RW_READER))); 3839 3840 ASSERT(list_link_active(&vd->vdev_state_dirty_node)); 3841 list_remove(&spa->spa_state_dirty_list, vd); 3842 } 3843 3844 /* 3845 * Propagate vdev state up from children to parent. 3846 */ 3847 void 3848 vdev_propagate_state(vdev_t *vd) 3849 { 3850 spa_t *spa = vd->vdev_spa; 3851 vdev_t *rvd = spa->spa_root_vdev; 3852 int degraded = 0, faulted = 0; 3853 int corrupted = 0; 3854 vdev_t *child; 3855 3856 if (vd->vdev_children > 0) { 3857 for (int c = 0; c < vd->vdev_children; c++) { 3858 child = vd->vdev_child[c]; 3859 3860 /* 3861 * Don't factor holes or indirect vdevs into the 3862 * decision. 3863 */ 3864 if (!vdev_is_concrete(child)) 3865 continue; 3866 3867 if (!vdev_readable(child) || 3868 (!vdev_writeable(child) && spa_writeable(spa))) { 3869 /* 3870 * Root special: if there is a top-level log 3871 * device, treat the root vdev as if it were 3872 * degraded. 3873 */ 3874 if (child->vdev_islog && vd == rvd) 3875 degraded++; 3876 else 3877 faulted++; 3878 } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { 3879 degraded++; 3880 } 3881 3882 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 3883 corrupted++; 3884 } 3885 3886 vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); 3887 3888 /* 3889 * Root special: if there is a top-level vdev that cannot be 3890 * opened due to corrupted metadata, then propagate the root 3891 * vdev's aux state as 'corrupt' rather than 'insufficient 3892 * replicas'. 3893 */ 3894 if (corrupted && vd == rvd && 3895 rvd->vdev_state == VDEV_STATE_CANT_OPEN) 3896 vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, 3897 VDEV_AUX_CORRUPT_DATA); 3898 } 3899 3900 if (vd->vdev_parent) 3901 vdev_propagate_state(vd->vdev_parent); 3902 } 3903 3904 /* 3905 * Set a vdev's state. If this is during an open, we don't update the parent 3906 * state, because we're in the process of opening children depth-first. 3907 * Otherwise, we propagate the change to the parent. 3908 * 3909 * If this routine places a device in a faulted state, an appropriate ereport is 3910 * generated. 3911 */ 3912 void 3913 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 3914 { 3915 uint64_t save_state; 3916 spa_t *spa = vd->vdev_spa; 3917 3918 if (state == vd->vdev_state) { 3919 vd->vdev_stat.vs_aux = aux; 3920 return; 3921 } 3922 3923 save_state = vd->vdev_state; 3924 3925 vd->vdev_state = state; 3926 vd->vdev_stat.vs_aux = aux; 3927 3928 /* 3929 * If we are setting the vdev state to anything but an open state, then 3930 * always close the underlying device unless the device has requested 3931 * a delayed close (i.e. we're about to remove or fault the device). 3932 * Otherwise, we keep accessible but invalid devices open forever. 3933 * We don't call vdev_close() itself, because that implies some extra 3934 * checks (offline, etc) that we don't want here. This is limited to 3935 * leaf devices, because otherwise closing the device will affect other 3936 * children. 3937 */ 3938 if (!vd->vdev_delayed_close && vdev_is_dead(vd) && 3939 vd->vdev_ops->vdev_op_leaf) 3940 vd->vdev_ops->vdev_op_close(vd); 3941 3942 /* 3943 * If we have brought this vdev back into service, we need 3944 * to notify fmd so that it can gracefully repair any outstanding 3945 * cases due to a missing device. We do this in all cases, even those 3946 * that probably don't correlate to a repaired fault. This is sure to 3947 * catch all cases, and we let the zfs-retire agent sort it out. If 3948 * this is a transient state it's OK, as the retire agent will 3949 * double-check the state of the vdev before repairing it. 3950 */ 3951 if (state == VDEV_STATE_HEALTHY && vd->vdev_ops->vdev_op_leaf && 3952 vd->vdev_prevstate != state) 3953 zfs_post_state_change(spa, vd); 3954 3955 if (vd->vdev_removed && 3956 state == VDEV_STATE_CANT_OPEN && 3957 (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { 3958 /* 3959 * If the previous state is set to VDEV_STATE_REMOVED, then this 3960 * device was previously marked removed and someone attempted to 3961 * reopen it. If this failed due to a nonexistent device, then 3962 * keep the device in the REMOVED state. We also let this be if 3963 * it is one of our special test online cases, which is only 3964 * attempting to online the device and shouldn't generate an FMA 3965 * fault. 3966 */ 3967 vd->vdev_state = VDEV_STATE_REMOVED; 3968 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 3969 } else if (state == VDEV_STATE_REMOVED) { 3970 vd->vdev_removed = B_TRUE; 3971 } else if (state == VDEV_STATE_CANT_OPEN) { 3972 /* 3973 * If we fail to open a vdev during an import or recovery, we 3974 * mark it as "not available", which signifies that it was 3975 * never there to begin with. Failure to open such a device 3976 * is not considered an error. 3977 */ 3978 if ((spa_load_state(spa) == SPA_LOAD_IMPORT || 3979 spa_load_state(spa) == SPA_LOAD_RECOVER) && 3980 vd->vdev_ops->vdev_op_leaf) 3981 vd->vdev_not_present = 1; 3982 3983 /* 3984 * Post the appropriate ereport. If the 'prevstate' field is 3985 * set to something other than VDEV_STATE_UNKNOWN, it indicates 3986 * that this is part of a vdev_reopen(). In this case, we don't 3987 * want to post the ereport if the device was already in the 3988 * CANT_OPEN state beforehand. 3989 * 3990 * If the 'checkremove' flag is set, then this is an attempt to 3991 * online the device in response to an insertion event. If we 3992 * hit this case, then we have detected an insertion event for a 3993 * faulted or offline device that wasn't in the removed state. 3994 * In this scenario, we don't post an ereport because we are 3995 * about to replace the device, or attempt an online with 3996 * vdev_forcefault, which will generate the fault for us. 3997 */ 3998 if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && 3999 !vd->vdev_not_present && !vd->vdev_checkremove && 4000 vd != spa->spa_root_vdev) { 4001 const char *class; 4002 4003 switch (aux) { 4004 case VDEV_AUX_OPEN_FAILED: 4005 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 4006 break; 4007 case VDEV_AUX_CORRUPT_DATA: 4008 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 4009 break; 4010 case VDEV_AUX_NO_REPLICAS: 4011 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 4012 break; 4013 case VDEV_AUX_BAD_GUID_SUM: 4014 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 4015 break; 4016 case VDEV_AUX_TOO_SMALL: 4017 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 4018 break; 4019 case VDEV_AUX_BAD_LABEL: 4020 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 4021 break; 4022 default: 4023 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 4024 } 4025 4026 zfs_ereport_post(class, spa, vd, NULL, save_state, 0); 4027 } 4028 4029 /* Erase any notion of persistent removed state */ 4030 vd->vdev_removed = B_FALSE; 4031 } else { 4032 vd->vdev_removed = B_FALSE; 4033 } 4034 4035 if (!isopen && vd->vdev_parent) 4036 vdev_propagate_state(vd->vdev_parent); 4037 } 4038 4039 boolean_t 4040 vdev_children_are_offline(vdev_t *vd) 4041 { 4042 ASSERT(!vd->vdev_ops->vdev_op_leaf); 4043 4044 for (uint64_t i = 0; i < vd->vdev_children; i++) { 4045 if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE) 4046 return (B_FALSE); 4047 } 4048 4049 return (B_TRUE); 4050 } 4051 4052 /* 4053 * Check the vdev configuration to ensure that it's capable of supporting 4054 * a root pool. We do not support partial configuration. 4055 * In addition, only a single top-level vdev is allowed. 4056 */ 4057 boolean_t 4058 vdev_is_bootable(vdev_t *vd) 4059 { 4060 if (!vd->vdev_ops->vdev_op_leaf) { 4061 char *vdev_type = vd->vdev_ops->vdev_op_type; 4062 4063 if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && 4064 vd->vdev_children > 1) { 4065 return (B_FALSE); 4066 } else if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0 || 4067 strcmp(vdev_type, VDEV_TYPE_INDIRECT) == 0) { 4068 return (B_FALSE); 4069 } 4070 } 4071 4072 for (int c = 0; c < vd->vdev_children; c++) { 4073 if (!vdev_is_bootable(vd->vdev_child[c])) 4074 return (B_FALSE); 4075 } 4076 return (B_TRUE); 4077 } 4078 4079 boolean_t 4080 vdev_is_concrete(vdev_t *vd) 4081 { 4082 vdev_ops_t *ops = vd->vdev_ops; 4083 if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops || 4084 ops == &vdev_missing_ops || ops == &vdev_root_ops) { 4085 return (B_FALSE); 4086 } else { 4087 return (B_TRUE); 4088 } 4089 } 4090 4091 /* 4092 * Determine if a log device has valid content. If the vdev was 4093 * removed or faulted in the MOS config then we know that 4094 * the content on the log device has already been written to the pool. 4095 */ 4096 boolean_t 4097 vdev_log_state_valid(vdev_t *vd) 4098 { 4099 if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted && 4100 !vd->vdev_removed) 4101 return (B_TRUE); 4102 4103 for (int c = 0; c < vd->vdev_children; c++) 4104 if (vdev_log_state_valid(vd->vdev_child[c])) 4105 return (B_TRUE); 4106 4107 return (B_FALSE); 4108 } 4109 4110 /* 4111 * Expand a vdev if possible. 4112 */ 4113 void 4114 vdev_expand(vdev_t *vd, uint64_t txg) 4115 { 4116 ASSERT(vd->vdev_top == vd); 4117 ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); 4118 ASSERT(vdev_is_concrete(vd)); 4119 4120 vdev_set_deflate_ratio(vd); 4121 4122 if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) { 4123 VERIFY(vdev_metaslab_init(vd, txg) == 0); 4124 vdev_config_dirty(vd); 4125 } 4126 } 4127 4128 /* 4129 * Split a vdev. 4130 */ 4131 void 4132 vdev_split(vdev_t *vd) 4133 { 4134 vdev_t *cvd, *pvd = vd->vdev_parent; 4135 4136 vdev_remove_child(pvd, vd); 4137 vdev_compact_children(pvd); 4138 4139 cvd = pvd->vdev_child[0]; 4140 if (pvd->vdev_children == 1) { 4141 vdev_remove_parent(cvd); 4142 cvd->vdev_splitting = B_TRUE; 4143 } 4144 vdev_propagate_state(cvd); 4145 } 4146 4147 void 4148 vdev_deadman(vdev_t *vd) 4149 { 4150 for (int c = 0; c < vd->vdev_children; c++) { 4151 vdev_t *cvd = vd->vdev_child[c]; 4152 4153 vdev_deadman(cvd); 4154 } 4155 4156 if (vd->vdev_ops->vdev_op_leaf) { 4157 vdev_queue_t *vq = &vd->vdev_queue; 4158 4159 mutex_enter(&vq->vq_lock); 4160 if (avl_numnodes(&vq->vq_active_tree) > 0) { 4161 spa_t *spa = vd->vdev_spa; 4162 zio_t *fio; 4163 uint64_t delta; 4164 4165 /* 4166 * Look at the head of all the pending queues, 4167 * if any I/O has been outstanding for longer than 4168 * the spa_deadman_synctime we panic the system. 4169 */ 4170 fio = avl_first(&vq->vq_active_tree); 4171 delta = gethrtime() - fio->io_timestamp; 4172 if (delta > spa_deadman_synctime(spa)) { 4173 vdev_dbgmsg(vd, "SLOW IO: zio timestamp " 4174 "%lluns, delta %lluns, last io %lluns", 4175 fio->io_timestamp, (u_longlong_t)delta, 4176 vq->vq_io_complete_ts); 4177 fm_panic("I/O to pool '%s' appears to be " 4178 "hung.", spa_name(spa)); 4179 } 4180 } 4181 mutex_exit(&vq->vq_lock); 4182 } 4183 } 4184