1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/zfs_context.h> 29 #include <sys/fm/fs/zfs.h> 30 #include <sys/spa.h> 31 #include <sys/spa_impl.h> 32 #include <sys/dmu.h> 33 #include <sys/dmu_tx.h> 34 #include <sys/vdev_impl.h> 35 #include <sys/uberblock_impl.h> 36 #include <sys/metaslab.h> 37 #include <sys/metaslab_impl.h> 38 #include <sys/space_map.h> 39 #include <sys/zio.h> 40 #include <sys/zap.h> 41 #include <sys/fs/zfs.h> 42 43 /* 44 * Virtual device management. 45 */ 46 47 static vdev_ops_t *vdev_ops_table[] = { 48 &vdev_root_ops, 49 &vdev_raidz_ops, 50 &vdev_mirror_ops, 51 &vdev_replacing_ops, 52 &vdev_disk_ops, 53 &vdev_file_ops, 54 &vdev_missing_ops, 55 NULL 56 }; 57 58 /* 59 * Given a vdev type, return the appropriate ops vector. 60 */ 61 static vdev_ops_t * 62 vdev_getops(const char *type) 63 { 64 vdev_ops_t *ops, **opspp; 65 66 for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) 67 if (strcmp(ops->vdev_op_type, type) == 0) 68 break; 69 70 return (ops); 71 } 72 73 /* 74 * Default asize function: return the MAX of psize with the asize of 75 * all children. This is what's used by anything other than RAID-Z. 76 */ 77 uint64_t 78 vdev_default_asize(vdev_t *vd, uint64_t psize) 79 { 80 uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_ashift); 81 uint64_t csize; 82 uint64_t c; 83 84 for (c = 0; c < vd->vdev_children; c++) { 85 csize = vdev_psize_to_asize(vd->vdev_child[c], psize); 86 asize = MAX(asize, csize); 87 } 88 89 return (asize); 90 } 91 92 /* 93 * Get the replaceable or attachable device size. 94 * If the parent is a mirror or raidz, the replaceable size is the minimum 95 * psize of all its children. For the rest, just return our own psize. 96 * 97 * e.g. 98 * psize rsize 99 * root - - 100 * mirror/raidz - - 101 * disk1 20g 20g 102 * disk2 40g 20g 103 * disk3 80g 80g 104 */ 105 uint64_t 106 vdev_get_rsize(vdev_t *vd) 107 { 108 vdev_t *pvd, *cvd; 109 uint64_t c, rsize; 110 111 pvd = vd->vdev_parent; 112 113 /* 114 * If our parent is NULL or the root, just return our own psize. 115 */ 116 if (pvd == NULL || pvd->vdev_parent == NULL) 117 return (vd->vdev_psize); 118 119 rsize = 0; 120 121 for (c = 0; c < pvd->vdev_children; c++) { 122 cvd = pvd->vdev_child[c]; 123 rsize = MIN(rsize - 1, cvd->vdev_psize - 1) + 1; 124 } 125 126 return (rsize); 127 } 128 129 vdev_t * 130 vdev_lookup_top(spa_t *spa, uint64_t vdev) 131 { 132 vdev_t *rvd = spa->spa_root_vdev; 133 134 if (vdev < rvd->vdev_children) 135 return (rvd->vdev_child[vdev]); 136 137 return (NULL); 138 } 139 140 vdev_t * 141 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) 142 { 143 int c; 144 vdev_t *mvd; 145 146 if (vd->vdev_guid == guid) 147 return (vd); 148 149 for (c = 0; c < vd->vdev_children; c++) 150 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != 151 NULL) 152 return (mvd); 153 154 return (NULL); 155 } 156 157 void 158 vdev_add_child(vdev_t *pvd, vdev_t *cvd) 159 { 160 size_t oldsize, newsize; 161 uint64_t id = cvd->vdev_id; 162 vdev_t **newchild; 163 164 ASSERT(spa_config_held(cvd->vdev_spa, RW_WRITER)); 165 ASSERT(cvd->vdev_parent == NULL); 166 167 cvd->vdev_parent = pvd; 168 169 if (pvd == NULL) 170 return; 171 172 ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); 173 174 oldsize = pvd->vdev_children * sizeof (vdev_t *); 175 pvd->vdev_children = MAX(pvd->vdev_children, id + 1); 176 newsize = pvd->vdev_children * sizeof (vdev_t *); 177 178 newchild = kmem_zalloc(newsize, KM_SLEEP); 179 if (pvd->vdev_child != NULL) { 180 bcopy(pvd->vdev_child, newchild, oldsize); 181 kmem_free(pvd->vdev_child, oldsize); 182 } 183 184 pvd->vdev_child = newchild; 185 pvd->vdev_child[id] = cvd; 186 187 cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); 188 ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); 189 190 /* 191 * Walk up all ancestors to update guid sum. 192 */ 193 for (; pvd != NULL; pvd = pvd->vdev_parent) 194 pvd->vdev_guid_sum += cvd->vdev_guid_sum; 195 } 196 197 void 198 vdev_remove_child(vdev_t *pvd, vdev_t *cvd) 199 { 200 int c; 201 uint_t id = cvd->vdev_id; 202 203 ASSERT(cvd->vdev_parent == pvd); 204 205 if (pvd == NULL) 206 return; 207 208 ASSERT(id < pvd->vdev_children); 209 ASSERT(pvd->vdev_child[id] == cvd); 210 211 pvd->vdev_child[id] = NULL; 212 cvd->vdev_parent = NULL; 213 214 for (c = 0; c < pvd->vdev_children; c++) 215 if (pvd->vdev_child[c]) 216 break; 217 218 if (c == pvd->vdev_children) { 219 kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); 220 pvd->vdev_child = NULL; 221 pvd->vdev_children = 0; 222 } 223 224 /* 225 * Walk up all ancestors to update guid sum. 226 */ 227 for (; pvd != NULL; pvd = pvd->vdev_parent) 228 pvd->vdev_guid_sum -= cvd->vdev_guid_sum; 229 } 230 231 /* 232 * Remove any holes in the child array. 233 */ 234 void 235 vdev_compact_children(vdev_t *pvd) 236 { 237 vdev_t **newchild, *cvd; 238 int oldc = pvd->vdev_children; 239 int newc, c; 240 241 ASSERT(spa_config_held(pvd->vdev_spa, RW_WRITER)); 242 243 for (c = newc = 0; c < oldc; c++) 244 if (pvd->vdev_child[c]) 245 newc++; 246 247 newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); 248 249 for (c = newc = 0; c < oldc; c++) { 250 if ((cvd = pvd->vdev_child[c]) != NULL) { 251 newchild[newc] = cvd; 252 cvd->vdev_id = newc++; 253 } 254 } 255 256 kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); 257 pvd->vdev_child = newchild; 258 pvd->vdev_children = newc; 259 } 260 261 /* 262 * Allocate and minimally initialize a vdev_t. 263 */ 264 static vdev_t * 265 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) 266 { 267 vdev_t *vd; 268 269 vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); 270 271 if (spa->spa_root_vdev == NULL) { 272 ASSERT(ops == &vdev_root_ops); 273 spa->spa_root_vdev = vd; 274 } 275 276 if (guid == 0) { 277 if (spa->spa_root_vdev == vd) { 278 /* 279 * The root vdev's guid will also be the pool guid, 280 * which must be unique among all pools. 281 */ 282 while (guid == 0 || spa_guid_exists(guid, 0)) 283 guid = spa_get_random(-1ULL); 284 } else { 285 /* 286 * Any other vdev's guid must be unique within the pool. 287 */ 288 while (guid == 0 || 289 spa_guid_exists(spa_guid(spa), guid)) 290 guid = spa_get_random(-1ULL); 291 } 292 ASSERT(!spa_guid_exists(spa_guid(spa), guid)); 293 } 294 295 vd->vdev_spa = spa; 296 vd->vdev_id = id; 297 vd->vdev_guid = guid; 298 vd->vdev_guid_sum = guid; 299 vd->vdev_ops = ops; 300 vd->vdev_state = VDEV_STATE_CLOSED; 301 302 mutex_init(&vd->vdev_dirty_lock, NULL, MUTEX_DEFAULT, NULL); 303 mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); 304 space_map_create(&vd->vdev_dtl_map, 0, -1ULL, 0, &vd->vdev_dtl_lock); 305 space_map_create(&vd->vdev_dtl_scrub, 0, -1ULL, 0, &vd->vdev_dtl_lock); 306 txg_list_create(&vd->vdev_ms_list, 307 offsetof(struct metaslab, ms_txg_node)); 308 txg_list_create(&vd->vdev_dtl_list, 309 offsetof(struct vdev, vdev_dtl_node)); 310 vd->vdev_stat.vs_timestamp = gethrtime(); 311 312 return (vd); 313 } 314 315 /* 316 * Free a vdev_t that has been removed from service. 317 */ 318 static void 319 vdev_free_common(vdev_t *vd) 320 { 321 spa_t *spa = vd->vdev_spa; 322 323 if (vd->vdev_path) 324 spa_strfree(vd->vdev_path); 325 if (vd->vdev_devid) 326 spa_strfree(vd->vdev_devid); 327 328 txg_list_destroy(&vd->vdev_ms_list); 329 txg_list_destroy(&vd->vdev_dtl_list); 330 mutex_enter(&vd->vdev_dtl_lock); 331 space_map_vacate(&vd->vdev_dtl_map, NULL, NULL); 332 space_map_destroy(&vd->vdev_dtl_map); 333 space_map_vacate(&vd->vdev_dtl_scrub, NULL, NULL); 334 space_map_destroy(&vd->vdev_dtl_scrub); 335 mutex_exit(&vd->vdev_dtl_lock); 336 mutex_destroy(&vd->vdev_dtl_lock); 337 mutex_destroy(&vd->vdev_dirty_lock); 338 339 if (vd == spa->spa_root_vdev) 340 spa->spa_root_vdev = NULL; 341 342 kmem_free(vd, sizeof (vdev_t)); 343 } 344 345 /* 346 * Allocate a new vdev. The 'alloctype' is used to control whether we are 347 * creating a new vdev or loading an existing one - the behavior is slightly 348 * different for each case. 349 */ 350 vdev_t * 351 vdev_alloc(spa_t *spa, nvlist_t *nv, vdev_t *parent, uint_t id, int alloctype) 352 { 353 vdev_ops_t *ops; 354 char *type; 355 uint64_t guid = 0, offline = 0; 356 vdev_t *vd; 357 358 ASSERT(spa_config_held(spa, RW_WRITER)); 359 360 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 361 return (NULL); 362 363 if ((ops = vdev_getops(type)) == NULL) 364 return (NULL); 365 366 /* 367 * If this is a load, get the vdev guid from the nvlist. 368 * Otherwise, vdev_alloc_common() will generate one for us. 369 */ 370 if (alloctype == VDEV_ALLOC_LOAD) { 371 uint64_t label_id; 372 373 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || 374 label_id != id) 375 return (NULL); 376 377 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 378 return (NULL); 379 } 380 381 vd = vdev_alloc_common(spa, id, guid, ops); 382 383 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) 384 vd->vdev_path = spa_strdup(vd->vdev_path); 385 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) 386 vd->vdev_devid = spa_strdup(vd->vdev_devid); 387 388 /* 389 * Set the whole_disk property. If it's not specified, leave the value 390 * as -1. 391 */ 392 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 393 &vd->vdev_wholedisk) != 0) 394 vd->vdev_wholedisk = -1ULL; 395 396 /* 397 * Look for the 'not present' flag. This will only be set if the device 398 * was not present at the time of import. 399 */ 400 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 401 &vd->vdev_not_present); 402 403 /* 404 * If we're a top-level vdev, try to load the allocation parameters. 405 */ 406 if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) { 407 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, 408 &vd->vdev_ms_array); 409 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, 410 &vd->vdev_ms_shift); 411 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, 412 &vd->vdev_ashift); 413 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, 414 &vd->vdev_asize); 415 } 416 417 /* 418 * If we're a leaf vdev, try to load the DTL object 419 * and the offline state. 420 */ 421 vd->vdev_offline = B_FALSE; 422 if (vd->vdev_ops->vdev_op_leaf && alloctype == VDEV_ALLOC_LOAD) { 423 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, 424 &vd->vdev_dtl.smo_object); 425 426 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &offline) 427 == 0) 428 vd->vdev_offline = offline; 429 } 430 431 /* 432 * Add ourselves to the parent's list of children. 433 */ 434 vdev_add_child(parent, vd); 435 436 return (vd); 437 } 438 439 void 440 vdev_free(vdev_t *vd) 441 { 442 int c; 443 444 /* 445 * vdev_free() implies closing the vdev first. This is simpler than 446 * trying to ensure complicated semantics for all callers. 447 */ 448 vdev_close(vd); 449 450 /* 451 * It's possible to free a vdev that's been added to the dirty 452 * list when in the middle of spa_vdev_add(). Handle that case 453 * correctly here. 454 */ 455 if (vd->vdev_is_dirty) 456 vdev_config_clean(vd); 457 458 /* 459 * Free all children. 460 */ 461 for (c = 0; c < vd->vdev_children; c++) 462 vdev_free(vd->vdev_child[c]); 463 464 ASSERT(vd->vdev_child == NULL); 465 ASSERT(vd->vdev_guid_sum == vd->vdev_guid); 466 467 /* 468 * Discard allocation state. 469 */ 470 if (vd == vd->vdev_top) 471 vdev_metaslab_fini(vd); 472 473 ASSERT3U(vd->vdev_stat.vs_space, ==, 0); 474 ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); 475 476 /* 477 * Remove this vdev from its parent's child list. 478 */ 479 vdev_remove_child(vd->vdev_parent, vd); 480 481 ASSERT(vd->vdev_parent == NULL); 482 483 vdev_free_common(vd); 484 } 485 486 /* 487 * Transfer top-level vdev state from svd to tvd. 488 */ 489 static void 490 vdev_top_transfer(vdev_t *svd, vdev_t *tvd) 491 { 492 spa_t *spa = svd->vdev_spa; 493 metaslab_t *msp; 494 vdev_t *vd; 495 int t; 496 497 ASSERT(tvd == tvd->vdev_top); 498 499 tvd->vdev_ms_array = svd->vdev_ms_array; 500 tvd->vdev_ms_shift = svd->vdev_ms_shift; 501 tvd->vdev_ms_count = svd->vdev_ms_count; 502 503 svd->vdev_ms_array = 0; 504 svd->vdev_ms_shift = 0; 505 svd->vdev_ms_count = 0; 506 507 tvd->vdev_mg = svd->vdev_mg; 508 tvd->vdev_mg->mg_vd = tvd; 509 tvd->vdev_ms = svd->vdev_ms; 510 tvd->vdev_smo = svd->vdev_smo; 511 512 svd->vdev_mg = NULL; 513 svd->vdev_ms = NULL; 514 svd->vdev_smo = NULL; 515 516 tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; 517 tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; 518 519 svd->vdev_stat.vs_alloc = 0; 520 svd->vdev_stat.vs_space = 0; 521 522 for (t = 0; t < TXG_SIZE; t++) { 523 while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) 524 (void) txg_list_add(&tvd->vdev_ms_list, msp, t); 525 while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) 526 (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); 527 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) 528 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); 529 tvd->vdev_dirty[t] = svd->vdev_dirty[t]; 530 svd->vdev_dirty[t] = 0; 531 } 532 533 if (svd->vdev_is_dirty) { 534 vdev_config_clean(svd); 535 vdev_config_dirty(tvd); 536 } 537 538 tvd->vdev_reopen_wanted = svd->vdev_reopen_wanted; 539 svd->vdev_reopen_wanted = 0; 540 } 541 542 static void 543 vdev_top_update(vdev_t *tvd, vdev_t *vd) 544 { 545 int c; 546 547 if (vd == NULL) 548 return; 549 550 vd->vdev_top = tvd; 551 552 for (c = 0; c < vd->vdev_children; c++) 553 vdev_top_update(tvd, vd->vdev_child[c]); 554 } 555 556 /* 557 * Add a mirror/replacing vdev above an existing vdev. 558 */ 559 vdev_t * 560 vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) 561 { 562 spa_t *spa = cvd->vdev_spa; 563 vdev_t *pvd = cvd->vdev_parent; 564 vdev_t *mvd; 565 566 ASSERT(spa_config_held(spa, RW_WRITER)); 567 568 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); 569 vdev_remove_child(pvd, cvd); 570 vdev_add_child(pvd, mvd); 571 cvd->vdev_id = mvd->vdev_children; 572 vdev_add_child(mvd, cvd); 573 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 574 575 mvd->vdev_asize = cvd->vdev_asize; 576 mvd->vdev_ashift = cvd->vdev_ashift; 577 mvd->vdev_state = cvd->vdev_state; 578 579 if (mvd == mvd->vdev_top) 580 vdev_top_transfer(cvd, mvd); 581 582 return (mvd); 583 } 584 585 /* 586 * Remove a 1-way mirror/replacing vdev from the tree. 587 */ 588 void 589 vdev_remove_parent(vdev_t *cvd) 590 { 591 vdev_t *mvd = cvd->vdev_parent; 592 vdev_t *pvd = mvd->vdev_parent; 593 594 ASSERT(spa_config_held(cvd->vdev_spa, RW_WRITER)); 595 596 ASSERT(mvd->vdev_children == 1); 597 ASSERT(mvd->vdev_ops == &vdev_mirror_ops || 598 mvd->vdev_ops == &vdev_replacing_ops); 599 600 vdev_remove_child(mvd, cvd); 601 vdev_remove_child(pvd, mvd); 602 cvd->vdev_id = mvd->vdev_id; 603 vdev_add_child(pvd, cvd); 604 vdev_top_update(cvd->vdev_top, cvd->vdev_top); 605 606 if (cvd == cvd->vdev_top) 607 vdev_top_transfer(mvd, cvd); 608 609 ASSERT(mvd->vdev_children == 0); 610 vdev_free(mvd); 611 } 612 613 int 614 vdev_metaslab_init(vdev_t *vd, uint64_t txg) 615 { 616 spa_t *spa = vd->vdev_spa; 617 metaslab_class_t *mc = spa_metaslab_class_select(spa); 618 uint64_t c; 619 uint64_t oldc = vd->vdev_ms_count; 620 uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; 621 space_map_obj_t *smo = vd->vdev_smo; 622 metaslab_t **mspp = vd->vdev_ms; 623 int ret; 624 625 if (vd->vdev_ms_shift == 0) /* not being allocated from yet */ 626 return (0); 627 628 dprintf("%s oldc %llu newc %llu\n", vdev_description(vd), oldc, newc); 629 630 ASSERT(oldc <= newc); 631 632 vd->vdev_smo = kmem_zalloc(newc * sizeof (*smo), KM_SLEEP); 633 vd->vdev_ms = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); 634 vd->vdev_ms_count = newc; 635 636 if (vd->vdev_mg == NULL) { 637 if (txg == 0) { 638 dmu_buf_t *db; 639 uint64_t *ms_array; 640 641 ms_array = kmem_zalloc(newc * sizeof (uint64_t), 642 KM_SLEEP); 643 644 if ((ret = dmu_read(spa->spa_meta_objset, 645 vd->vdev_ms_array, 0, 646 newc * sizeof (uint64_t), ms_array)) != 0) { 647 kmem_free(ms_array, newc * sizeof (uint64_t)); 648 goto error; 649 } 650 651 for (c = 0; c < newc; c++) { 652 if (ms_array[c] == 0) 653 continue; 654 if ((ret = dmu_bonus_hold( 655 spa->spa_meta_objset, ms_array[c], 656 FTAG, &db)) != 0) { 657 kmem_free(ms_array, 658 newc * sizeof (uint64_t)); 659 goto error; 660 } 661 ASSERT3U(db->db_size, ==, sizeof (*smo)); 662 bcopy(db->db_data, &vd->vdev_smo[c], 663 db->db_size); 664 ASSERT3U(vd->vdev_smo[c].smo_object, ==, 665 ms_array[c]); 666 dmu_buf_rele(db, FTAG); 667 } 668 kmem_free(ms_array, newc * sizeof (uint64_t)); 669 } 670 vd->vdev_mg = metaslab_group_create(mc, vd); 671 } 672 673 for (c = 0; c < oldc; c++) { 674 vd->vdev_smo[c] = smo[c]; 675 vd->vdev_ms[c] = mspp[c]; 676 mspp[c]->ms_smo = &vd->vdev_smo[c]; 677 } 678 679 for (c = oldc; c < newc; c++) 680 metaslab_init(vd->vdev_mg, &vd->vdev_smo[c], &vd->vdev_ms[c], 681 c << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); 682 683 if (oldc != 0) { 684 kmem_free(smo, oldc * sizeof (*smo)); 685 kmem_free(mspp, oldc * sizeof (*mspp)); 686 } 687 688 return (0); 689 690 error: 691 /* 692 * On error, undo any partial progress we may have made, and restore the 693 * old metaslab values. 694 */ 695 kmem_free(vd->vdev_smo, newc * sizeof (*smo)); 696 kmem_free(vd->vdev_ms, newc * sizeof (*mspp)); 697 698 vd->vdev_smo = smo; 699 vd->vdev_ms = mspp; 700 vd->vdev_ms_count = oldc; 701 702 return (ret); 703 } 704 705 void 706 vdev_metaslab_fini(vdev_t *vd) 707 { 708 uint64_t m; 709 uint64_t count = vd->vdev_ms_count; 710 711 if (vd->vdev_ms != NULL) { 712 for (m = 0; m < count; m++) 713 metaslab_fini(vd->vdev_ms[m]); 714 kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); 715 vd->vdev_ms = NULL; 716 } 717 718 if (vd->vdev_smo != NULL) { 719 kmem_free(vd->vdev_smo, count * sizeof (space_map_obj_t)); 720 vd->vdev_smo = NULL; 721 } 722 } 723 724 /* 725 * Prepare a virtual device for access. 726 */ 727 int 728 vdev_open(vdev_t *vd) 729 { 730 int error; 731 vdev_knob_t *vk; 732 int c; 733 uint64_t osize = 0; 734 uint64_t asize, psize; 735 uint64_t ashift = -1ULL; 736 737 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || 738 vd->vdev_state == VDEV_STATE_CANT_OPEN || 739 vd->vdev_state == VDEV_STATE_OFFLINE); 740 741 if (vd->vdev_fault_mode == VDEV_FAULT_COUNT) 742 vd->vdev_fault_arg >>= 1; 743 else 744 vd->vdev_fault_mode = VDEV_FAULT_NONE; 745 746 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 747 748 for (vk = vdev_knob_next(NULL); vk != NULL; vk = vdev_knob_next(vk)) { 749 uint64_t *valp = (uint64_t *)((char *)vd + vk->vk_offset); 750 751 *valp = vk->vk_default; 752 *valp = MAX(*valp, vk->vk_min); 753 *valp = MIN(*valp, vk->vk_max); 754 } 755 756 if (vd->vdev_ops->vdev_op_leaf) { 757 vdev_cache_init(vd); 758 vdev_queue_init(vd); 759 vd->vdev_cache_active = B_TRUE; 760 } 761 762 if (vd->vdev_offline) { 763 ASSERT(vd->vdev_children == 0); 764 vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); 765 return (ENXIO); 766 } 767 768 error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); 769 770 if (zio_injection_enabled && error == 0) 771 error = zio_handle_device_injection(vd, ENXIO); 772 773 dprintf("%s = %d, osize %llu, state = %d\n", 774 vdev_description(vd), error, osize, vd->vdev_state); 775 776 if (error) { 777 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 778 vd->vdev_stat.vs_aux); 779 return (error); 780 } 781 782 vd->vdev_state = VDEV_STATE_HEALTHY; 783 784 for (c = 0; c < vd->vdev_children; c++) 785 if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { 786 vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, 787 VDEV_AUX_NONE); 788 break; 789 } 790 791 osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); 792 793 if (vd->vdev_children == 0) { 794 if (osize < SPA_MINDEVSIZE) { 795 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 796 VDEV_AUX_TOO_SMALL); 797 return (EOVERFLOW); 798 } 799 psize = osize; 800 asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); 801 } else { 802 if (osize < SPA_MINDEVSIZE - 803 (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { 804 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 805 VDEV_AUX_TOO_SMALL); 806 return (EOVERFLOW); 807 } 808 psize = 0; 809 asize = osize; 810 } 811 812 vd->vdev_psize = psize; 813 814 if (vd->vdev_asize == 0) { 815 /* 816 * This is the first-ever open, so use the computed values. 817 */ 818 vd->vdev_asize = asize; 819 vd->vdev_ashift = ashift; 820 } else { 821 /* 822 * Make sure the alignment requirement hasn't increased. 823 */ 824 if (ashift > vd->vdev_ashift) { 825 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 826 VDEV_AUX_BAD_LABEL); 827 return (EINVAL); 828 } 829 830 /* 831 * Make sure the device hasn't shrunk. 832 */ 833 if (asize < vd->vdev_asize) { 834 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, 835 VDEV_AUX_BAD_LABEL); 836 return (EINVAL); 837 } 838 839 /* 840 * If all children are healthy and the asize has increased, 841 * then we've experienced dynamic LUN growth. 842 */ 843 if (vd->vdev_state == VDEV_STATE_HEALTHY && 844 asize > vd->vdev_asize) { 845 vd->vdev_asize = asize; 846 } 847 } 848 849 /* 850 * If we were able to open a vdev that was marked permanently 851 * unavailable, clear that state now. 852 */ 853 if (vd->vdev_not_present) 854 vd->vdev_not_present = 0; 855 856 /* 857 * This allows the ZFS DE to close cases appropriately. If a device 858 * goes away and later returns, we want to close the associated case. 859 * But it's not enough to simply post this only when a device goes from 860 * CANT_OPEN -> HEALTHY. If we reboot the system and the device is 861 * back, we also need to close the case (otherwise we will try to replay 862 * it). So we have to post this notifier every time. Since this only 863 * occurs during pool open or error recovery, this should not be an 864 * issue. 865 */ 866 zfs_post_ok(vd->vdev_spa, vd); 867 868 return (0); 869 } 870 871 /* 872 * Close a virtual device. 873 */ 874 void 875 vdev_close(vdev_t *vd) 876 { 877 vd->vdev_ops->vdev_op_close(vd); 878 879 if (vd->vdev_cache_active) { 880 vdev_cache_fini(vd); 881 vdev_queue_fini(vd); 882 vd->vdev_cache_active = B_FALSE; 883 } 884 885 if (vd->vdev_offline) 886 vd->vdev_state = VDEV_STATE_OFFLINE; 887 else 888 vd->vdev_state = VDEV_STATE_CLOSED; 889 vd->vdev_stat.vs_aux = VDEV_AUX_NONE; 890 } 891 892 void 893 vdev_reopen(vdev_t *vd) 894 { 895 spa_t *spa = vd->vdev_spa; 896 vdev_t *rvd = spa->spa_root_vdev; 897 int c; 898 899 ASSERT(spa_config_held(spa, RW_WRITER)); 900 901 if (vd == rvd) { 902 for (c = 0; c < rvd->vdev_children; c++) 903 vdev_reopen(rvd->vdev_child[c]); 904 return; 905 } 906 907 /* only valid for top-level vdevs */ 908 ASSERT3P(vd, ==, vd->vdev_top); 909 910 vdev_close(vd); 911 (void) vdev_open(vd); 912 913 /* 914 * Reassess root vdev's health. 915 */ 916 rvd->vdev_state = VDEV_STATE_HEALTHY; 917 for (c = 0; c < rvd->vdev_children; c++) { 918 uint64_t state = rvd->vdev_child[c]->vdev_state; 919 rvd->vdev_state = MIN(rvd->vdev_state, state); 920 } 921 } 922 923 int 924 vdev_create(vdev_t *vd, uint64_t txg) 925 { 926 int error; 927 928 /* 929 * Normally, partial opens (e.g. of a mirror) are allowed. 930 * For a create, however, we want to fail the request if 931 * there are any components we can't open. 932 */ 933 error = vdev_open(vd); 934 935 if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { 936 vdev_close(vd); 937 return (error ? error : ENXIO); 938 } 939 940 /* 941 * Recursively initialize all labels. 942 */ 943 if ((error = vdev_label_init(vd, txg)) != 0) { 944 vdev_close(vd); 945 return (error); 946 } 947 948 return (0); 949 } 950 951 /* 952 * The is the latter half of vdev_create(). It is distinct because it 953 * involves initiating transactions in order to do metaslab creation. 954 * For creation, we want to try to create all vdevs at once and then undo it 955 * if anything fails; this is much harder if we have pending transactions. 956 */ 957 void 958 vdev_init(vdev_t *vd, uint64_t txg) 959 { 960 /* 961 * Aim for roughly 200 metaslabs per vdev. 962 */ 963 vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); 964 vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); 965 966 /* 967 * Initialize the vdev's metaslabs. This can't fail because 968 * there's nothing to read when creating all new metaslabs. 969 */ 970 VERIFY(vdev_metaslab_init(vd, txg) == 0); 971 } 972 973 void 974 vdev_dirty(vdev_t *vd, uint8_t flags, uint64_t txg) 975 { 976 vdev_t *tvd = vd->vdev_top; 977 978 mutex_enter(&tvd->vdev_dirty_lock); 979 if ((tvd->vdev_dirty[txg & TXG_MASK] & flags) != flags) { 980 tvd->vdev_dirty[txg & TXG_MASK] |= flags; 981 (void) txg_list_add(&tvd->vdev_spa->spa_vdev_txg_list, 982 tvd, txg); 983 } 984 mutex_exit(&tvd->vdev_dirty_lock); 985 } 986 987 void 988 vdev_dtl_dirty(space_map_t *sm, uint64_t txg, uint64_t size) 989 { 990 mutex_enter(sm->sm_lock); 991 if (!space_map_contains(sm, txg, size)) 992 space_map_add(sm, txg, size); 993 mutex_exit(sm->sm_lock); 994 } 995 996 int 997 vdev_dtl_contains(space_map_t *sm, uint64_t txg, uint64_t size) 998 { 999 int dirty; 1000 1001 /* 1002 * Quick test without the lock -- covers the common case that 1003 * there are no dirty time segments. 1004 */ 1005 if (sm->sm_space == 0) 1006 return (0); 1007 1008 mutex_enter(sm->sm_lock); 1009 dirty = space_map_contains(sm, txg, size); 1010 mutex_exit(sm->sm_lock); 1011 1012 return (dirty); 1013 } 1014 1015 /* 1016 * Reassess DTLs after a config change or scrub completion. 1017 */ 1018 void 1019 vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) 1020 { 1021 spa_t *spa = vd->vdev_spa; 1022 int c; 1023 1024 ASSERT(spa_config_held(spa, RW_WRITER)); 1025 1026 if (vd->vdev_children == 0) { 1027 mutex_enter(&vd->vdev_dtl_lock); 1028 /* 1029 * We're successfully scrubbed everything up to scrub_txg. 1030 * Therefore, excise all old DTLs up to that point, then 1031 * fold in the DTLs for everything we couldn't scrub. 1032 */ 1033 if (scrub_txg != 0) { 1034 space_map_excise(&vd->vdev_dtl_map, 0, scrub_txg); 1035 space_map_union(&vd->vdev_dtl_map, &vd->vdev_dtl_scrub); 1036 } 1037 if (scrub_done) 1038 space_map_vacate(&vd->vdev_dtl_scrub, NULL, NULL); 1039 mutex_exit(&vd->vdev_dtl_lock); 1040 if (txg != 0) { 1041 vdev_t *tvd = vd->vdev_top; 1042 vdev_dirty(tvd, VDD_DTL, txg); 1043 (void) txg_list_add(&tvd->vdev_dtl_list, vd, txg); 1044 } 1045 return; 1046 } 1047 1048 /* 1049 * Make sure the DTLs are always correct under the scrub lock. 1050 */ 1051 if (vd == spa->spa_root_vdev) 1052 mutex_enter(&spa->spa_scrub_lock); 1053 1054 mutex_enter(&vd->vdev_dtl_lock); 1055 space_map_vacate(&vd->vdev_dtl_map, NULL, NULL); 1056 space_map_vacate(&vd->vdev_dtl_scrub, NULL, NULL); 1057 mutex_exit(&vd->vdev_dtl_lock); 1058 1059 for (c = 0; c < vd->vdev_children; c++) { 1060 vdev_t *cvd = vd->vdev_child[c]; 1061 vdev_dtl_reassess(cvd, txg, scrub_txg, scrub_done); 1062 mutex_enter(&vd->vdev_dtl_lock); 1063 space_map_union(&vd->vdev_dtl_map, &cvd->vdev_dtl_map); 1064 space_map_union(&vd->vdev_dtl_scrub, &cvd->vdev_dtl_scrub); 1065 mutex_exit(&vd->vdev_dtl_lock); 1066 } 1067 1068 if (vd == spa->spa_root_vdev) 1069 mutex_exit(&spa->spa_scrub_lock); 1070 } 1071 1072 static int 1073 vdev_dtl_load(vdev_t *vd) 1074 { 1075 spa_t *spa = vd->vdev_spa; 1076 space_map_obj_t *smo = &vd->vdev_dtl; 1077 dmu_buf_t *db; 1078 int error; 1079 1080 ASSERT(vd->vdev_children == 0); 1081 1082 if (smo->smo_object == 0) 1083 return (0); 1084 1085 if ((error = dmu_bonus_hold(spa->spa_meta_objset, smo->smo_object, 1086 FTAG, &db)) != 0) 1087 return (error); 1088 ASSERT3U(db->db_size, ==, sizeof (*smo)); 1089 bcopy(db->db_data, smo, db->db_size); 1090 dmu_buf_rele(db, FTAG); 1091 1092 mutex_enter(&vd->vdev_dtl_lock); 1093 error = space_map_load(&vd->vdev_dtl_map, smo, SM_ALLOC, 1094 spa->spa_meta_objset, smo->smo_objsize, smo->smo_alloc); 1095 mutex_exit(&vd->vdev_dtl_lock); 1096 1097 return (error); 1098 } 1099 1100 void 1101 vdev_dtl_sync(vdev_t *vd, uint64_t txg) 1102 { 1103 spa_t *spa = vd->vdev_spa; 1104 space_map_obj_t *smo = &vd->vdev_dtl; 1105 space_map_t *sm = &vd->vdev_dtl_map; 1106 space_map_t smsync; 1107 kmutex_t smlock; 1108 avl_tree_t *t = &sm->sm_root; 1109 space_seg_t *ss; 1110 dmu_buf_t *db; 1111 dmu_tx_t *tx; 1112 1113 dprintf("%s in txg %llu pass %d\n", 1114 vdev_description(vd), (u_longlong_t)txg, spa_sync_pass(spa)); 1115 1116 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1117 1118 if (vd->vdev_detached) { 1119 if (smo->smo_object != 0) { 1120 int err = dmu_object_free(spa->spa_meta_objset, 1121 smo->smo_object, tx); 1122 ASSERT3U(err, ==, 0); 1123 smo->smo_object = 0; 1124 } 1125 dmu_tx_commit(tx); 1126 return; 1127 } 1128 1129 if (smo->smo_object == 0) { 1130 ASSERT(smo->smo_objsize == 0); 1131 ASSERT(smo->smo_alloc == 0); 1132 smo->smo_object = dmu_object_alloc(spa->spa_meta_objset, 1133 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 1134 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 1135 ASSERT(smo->smo_object != 0); 1136 vdev_config_dirty(vd->vdev_top); 1137 } 1138 1139 VERIFY(0 == dmu_free_range(spa->spa_meta_objset, smo->smo_object, 1140 0, smo->smo_objsize, tx)); 1141 1142 mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); 1143 1144 space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, 1145 &smlock); 1146 1147 mutex_enter(&smlock); 1148 1149 mutex_enter(&vd->vdev_dtl_lock); 1150 for (ss = avl_first(t); ss != NULL; ss = AVL_NEXT(t, ss)) 1151 space_map_add(&smsync, ss->ss_start, ss->ss_end - ss->ss_start); 1152 mutex_exit(&vd->vdev_dtl_lock); 1153 1154 smo->smo_objsize = 0; 1155 smo->smo_alloc = smsync.sm_space; 1156 1157 space_map_sync(&smsync, NULL, smo, SM_ALLOC, spa->spa_meta_objset, tx); 1158 space_map_destroy(&smsync); 1159 1160 mutex_exit(&smlock); 1161 mutex_destroy(&smlock); 1162 1163 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, smo->smo_object, 1164 FTAG, &db)); 1165 dmu_buf_will_dirty(db, tx); 1166 ASSERT3U(db->db_size, ==, sizeof (*smo)); 1167 bcopy(smo, db->db_data, db->db_size); 1168 dmu_buf_rele(db, FTAG); 1169 1170 dmu_tx_commit(tx); 1171 } 1172 1173 int 1174 vdev_load(vdev_t *vd) 1175 { 1176 spa_t *spa = vd->vdev_spa; 1177 int c, error; 1178 nvlist_t *label; 1179 uint64_t guid, state; 1180 1181 dprintf("loading %s\n", vdev_description(vd)); 1182 1183 /* 1184 * Recursively load all children. 1185 */ 1186 for (c = 0; c < vd->vdev_children; c++) 1187 if ((error = vdev_load(vd->vdev_child[c])) != 0) 1188 return (error); 1189 1190 /* 1191 * If this is a leaf vdev, make sure its agrees with its disk labels. 1192 */ 1193 if (vd->vdev_ops->vdev_op_leaf) { 1194 1195 if (vdev_is_dead(vd)) 1196 return (0); 1197 1198 /* 1199 * XXX state transitions don't propagate to parent here. 1200 * Also, merely setting the state isn't sufficient because 1201 * it's not persistent; a vdev_reopen() would make us 1202 * forget all about it. 1203 */ 1204 if ((label = vdev_label_read_config(vd)) == NULL) { 1205 dprintf("can't load label config\n"); 1206 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1207 VDEV_AUX_CORRUPT_DATA); 1208 return (0); 1209 } 1210 1211 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, 1212 &guid) != 0 || guid != spa_guid(spa)) { 1213 dprintf("bad or missing pool GUID (%llu)\n", guid); 1214 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1215 VDEV_AUX_CORRUPT_DATA); 1216 nvlist_free(label); 1217 return (0); 1218 } 1219 1220 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) || 1221 guid != vd->vdev_guid) { 1222 dprintf("bad or missing vdev guid (%llu != %llu)\n", 1223 guid, vd->vdev_guid); 1224 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1225 VDEV_AUX_CORRUPT_DATA); 1226 nvlist_free(label); 1227 return (0); 1228 } 1229 1230 /* 1231 * If we find a vdev with a matching pool guid and vdev guid, 1232 * but the pool state is not active, it indicates that the user 1233 * exported or destroyed the pool without affecting the config 1234 * cache (if / was mounted readonly, for example). In this 1235 * case, immediately return EBADF so the caller can remove it 1236 * from the config. 1237 */ 1238 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, 1239 &state)) { 1240 dprintf("missing pool state\n"); 1241 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1242 VDEV_AUX_CORRUPT_DATA); 1243 nvlist_free(label); 1244 return (0); 1245 } 1246 1247 if (state != POOL_STATE_ACTIVE && 1248 (spa->spa_load_state == SPA_LOAD_OPEN || 1249 state != POOL_STATE_EXPORTED)) { 1250 dprintf("pool state not active (%llu)\n", state); 1251 nvlist_free(label); 1252 return (EBADF); 1253 } 1254 1255 nvlist_free(label); 1256 } 1257 1258 /* 1259 * If this is a top-level vdev, initialize its metaslabs. 1260 */ 1261 if (vd == vd->vdev_top) { 1262 1263 if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) { 1264 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1265 VDEV_AUX_CORRUPT_DATA); 1266 return (0); 1267 } 1268 1269 if ((error = vdev_metaslab_init(vd, 0)) != 0) { 1270 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1271 VDEV_AUX_CORRUPT_DATA); 1272 return (0); 1273 } 1274 } 1275 1276 /* 1277 * If this is a leaf vdev, load its DTL. 1278 */ 1279 if (vd->vdev_ops->vdev_op_leaf) { 1280 error = vdev_dtl_load(vd); 1281 if (error) { 1282 dprintf("can't load DTL for %s, error %d\n", 1283 vdev_description(vd), error); 1284 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 1285 VDEV_AUX_CORRUPT_DATA); 1286 return (0); 1287 } 1288 } 1289 1290 return (0); 1291 } 1292 1293 void 1294 vdev_sync_done(vdev_t *vd, uint64_t txg) 1295 { 1296 metaslab_t *msp; 1297 1298 dprintf("%s txg %llu\n", vdev_description(vd), txg); 1299 1300 while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) 1301 metaslab_sync_done(msp, txg); 1302 } 1303 1304 void 1305 vdev_add_sync(vdev_t *vd, uint64_t txg) 1306 { 1307 spa_t *spa = vd->vdev_spa; 1308 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1309 1310 ASSERT(vd == vd->vdev_top); 1311 1312 if (vd->vdev_ms_array == 0) 1313 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, 1314 DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); 1315 1316 ASSERT(vd->vdev_ms_array != 0); 1317 1318 vdev_config_dirty(vd); 1319 1320 dmu_tx_commit(tx); 1321 } 1322 1323 void 1324 vdev_sync(vdev_t *vd, uint64_t txg) 1325 { 1326 spa_t *spa = vd->vdev_spa; 1327 vdev_t *lvd; 1328 metaslab_t *msp; 1329 uint8_t *dirtyp = &vd->vdev_dirty[txg & TXG_MASK]; 1330 uint8_t dirty = *dirtyp; 1331 1332 mutex_enter(&vd->vdev_dirty_lock); 1333 *dirtyp &= ~(VDD_ALLOC | VDD_FREE | VDD_ADD | VDD_DTL); 1334 mutex_exit(&vd->vdev_dirty_lock); 1335 1336 dprintf("%s txg %llu pass %d\n", 1337 vdev_description(vd), (u_longlong_t)txg, spa_sync_pass(spa)); 1338 1339 if (dirty & VDD_ADD) 1340 vdev_add_sync(vd, txg); 1341 1342 while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) 1343 metaslab_sync(msp, txg); 1344 1345 while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) 1346 vdev_dtl_sync(lvd, txg); 1347 1348 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); 1349 } 1350 1351 uint64_t 1352 vdev_psize_to_asize(vdev_t *vd, uint64_t psize) 1353 { 1354 return (vd->vdev_ops->vdev_op_asize(vd, psize)); 1355 } 1356 1357 void 1358 vdev_io_start(zio_t *zio) 1359 { 1360 zio->io_vd->vdev_ops->vdev_op_io_start(zio); 1361 } 1362 1363 void 1364 vdev_io_done(zio_t *zio) 1365 { 1366 zio->io_vd->vdev_ops->vdev_op_io_done(zio); 1367 } 1368 1369 const char * 1370 vdev_description(vdev_t *vd) 1371 { 1372 if (vd == NULL || vd->vdev_ops == NULL) 1373 return ("<unknown>"); 1374 1375 if (vd->vdev_path != NULL) 1376 return (vd->vdev_path); 1377 1378 if (vd->vdev_parent == NULL) 1379 return (spa_name(vd->vdev_spa)); 1380 1381 return (vd->vdev_ops->vdev_op_type); 1382 } 1383 1384 int 1385 vdev_online(spa_t *spa, uint64_t guid) 1386 { 1387 vdev_t *rvd, *vd; 1388 uint64_t txg; 1389 1390 txg = spa_vdev_enter(spa); 1391 1392 rvd = spa->spa_root_vdev; 1393 1394 if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL) 1395 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1396 1397 if (!vd->vdev_ops->vdev_op_leaf) 1398 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1399 1400 dprintf("ONLINE: %s\n", vdev_description(vd)); 1401 1402 vd->vdev_offline = B_FALSE; 1403 vd->vdev_tmpoffline = B_FALSE; 1404 vdev_reopen(vd->vdev_top); 1405 1406 vdev_config_dirty(vd->vdev_top); 1407 1408 (void) spa_vdev_exit(spa, NULL, txg, 0); 1409 1410 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0); 1411 1412 return (0); 1413 } 1414 1415 int 1416 vdev_offline(spa_t *spa, uint64_t guid, int istmp) 1417 { 1418 vdev_t *rvd, *vd; 1419 uint64_t txg; 1420 1421 txg = spa_vdev_enter(spa); 1422 1423 rvd = spa->spa_root_vdev; 1424 1425 if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL) 1426 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 1427 1428 if (!vd->vdev_ops->vdev_op_leaf) 1429 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 1430 1431 dprintf("OFFLINE: %s\n", vdev_description(vd)); 1432 1433 /* vdev is already offlined, do nothing */ 1434 if (vd->vdev_offline) 1435 return (spa_vdev_exit(spa, NULL, txg, 0)); 1436 1437 /* 1438 * If this device's top-level vdev has a non-empty DTL, 1439 * don't allow the device to be offlined. 1440 * 1441 * XXX -- we should make this more precise by allowing the offline 1442 * as long as the remaining devices don't have any DTL holes. 1443 */ 1444 if (vd->vdev_top->vdev_dtl_map.sm_space != 0) 1445 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1446 1447 /* 1448 * Set this device to offline state and reopen its top-level vdev. 1449 * If this action results in the top-level vdev becoming unusable, 1450 * undo it and fail the request. 1451 */ 1452 vd->vdev_offline = B_TRUE; 1453 vdev_reopen(vd->vdev_top); 1454 if (vdev_is_dead(vd->vdev_top)) { 1455 vd->vdev_offline = B_FALSE; 1456 vdev_reopen(vd->vdev_top); 1457 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 1458 } 1459 1460 vd->vdev_tmpoffline = istmp; 1461 if (!istmp) 1462 vdev_config_dirty(vd->vdev_top); 1463 1464 return (spa_vdev_exit(spa, NULL, txg, 0)); 1465 } 1466 1467 /* 1468 * Clear the error counts associated with this vdev. Unlike vdev_online() and 1469 * vdev_offline(), we assume the spa config is locked. We also clear all 1470 * children. If 'vd' is NULL, then the user wants to clear all vdevs. 1471 */ 1472 void 1473 vdev_clear(spa_t *spa, vdev_t *vd) 1474 { 1475 int c; 1476 1477 if (vd == NULL) 1478 vd = spa->spa_root_vdev; 1479 1480 vd->vdev_stat.vs_read_errors = 0; 1481 vd->vdev_stat.vs_write_errors = 0; 1482 vd->vdev_stat.vs_checksum_errors = 0; 1483 1484 for (c = 0; c < vd->vdev_children; c++) 1485 vdev_clear(spa, vd->vdev_child[c]); 1486 } 1487 1488 int 1489 vdev_is_dead(vdev_t *vd) 1490 { 1491 return (vd->vdev_state <= VDEV_STATE_CANT_OPEN); 1492 } 1493 1494 int 1495 vdev_error_inject(vdev_t *vd, zio_t *zio) 1496 { 1497 int error = 0; 1498 1499 if (vd->vdev_fault_mode == VDEV_FAULT_NONE) 1500 return (0); 1501 1502 if (((1ULL << zio->io_type) & vd->vdev_fault_mask) == 0) 1503 return (0); 1504 1505 switch (vd->vdev_fault_mode) { 1506 case VDEV_FAULT_RANDOM: 1507 if (spa_get_random(vd->vdev_fault_arg) == 0) 1508 error = EIO; 1509 break; 1510 1511 case VDEV_FAULT_COUNT: 1512 if ((int64_t)--vd->vdev_fault_arg <= 0) 1513 vd->vdev_fault_mode = VDEV_FAULT_NONE; 1514 error = EIO; 1515 break; 1516 } 1517 1518 if (error != 0) { 1519 dprintf("returning %d for type %d on %s state %d offset %llx\n", 1520 error, zio->io_type, vdev_description(vd), 1521 vd->vdev_state, zio->io_offset); 1522 } 1523 1524 return (error); 1525 } 1526 1527 /* 1528 * Get statistics for the given vdev. 1529 */ 1530 void 1531 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) 1532 { 1533 vdev_t *rvd = vd->vdev_spa->spa_root_vdev; 1534 int c, t; 1535 1536 mutex_enter(&vd->vdev_stat_lock); 1537 bcopy(&vd->vdev_stat, vs, sizeof (*vs)); 1538 vs->vs_timestamp = gethrtime() - vs->vs_timestamp; 1539 vs->vs_state = vd->vdev_state; 1540 vs->vs_rsize = vdev_get_rsize(vd); 1541 mutex_exit(&vd->vdev_stat_lock); 1542 1543 /* 1544 * If we're getting stats on the root vdev, aggregate the I/O counts 1545 * over all top-level vdevs (i.e. the direct children of the root). 1546 */ 1547 if (vd == rvd) { 1548 for (c = 0; c < rvd->vdev_children; c++) { 1549 vdev_t *cvd = rvd->vdev_child[c]; 1550 vdev_stat_t *cvs = &cvd->vdev_stat; 1551 1552 mutex_enter(&vd->vdev_stat_lock); 1553 for (t = 0; t < ZIO_TYPES; t++) { 1554 vs->vs_ops[t] += cvs->vs_ops[t]; 1555 vs->vs_bytes[t] += cvs->vs_bytes[t]; 1556 } 1557 vs->vs_read_errors += cvs->vs_read_errors; 1558 vs->vs_write_errors += cvs->vs_write_errors; 1559 vs->vs_checksum_errors += cvs->vs_checksum_errors; 1560 vs->vs_scrub_examined += cvs->vs_scrub_examined; 1561 vs->vs_scrub_errors += cvs->vs_scrub_errors; 1562 mutex_exit(&vd->vdev_stat_lock); 1563 } 1564 } 1565 } 1566 1567 void 1568 vdev_stat_update(zio_t *zio) 1569 { 1570 vdev_t *vd = zio->io_vd; 1571 vdev_t *pvd; 1572 uint64_t txg = zio->io_txg; 1573 vdev_stat_t *vs = &vd->vdev_stat; 1574 zio_type_t type = zio->io_type; 1575 int flags = zio->io_flags; 1576 1577 if (zio->io_error == 0) { 1578 if (!(flags & ZIO_FLAG_IO_BYPASS)) { 1579 mutex_enter(&vd->vdev_stat_lock); 1580 vs->vs_ops[type]++; 1581 vs->vs_bytes[type] += zio->io_size; 1582 mutex_exit(&vd->vdev_stat_lock); 1583 } 1584 if ((flags & ZIO_FLAG_IO_REPAIR) && 1585 zio->io_delegate_list == NULL) { 1586 mutex_enter(&vd->vdev_stat_lock); 1587 if (flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) 1588 vs->vs_scrub_repaired += zio->io_size; 1589 else 1590 vs->vs_self_healed += zio->io_size; 1591 mutex_exit(&vd->vdev_stat_lock); 1592 } 1593 return; 1594 } 1595 1596 if (flags & ZIO_FLAG_SPECULATIVE) 1597 return; 1598 1599 if (!vdev_is_dead(vd)) { 1600 mutex_enter(&vd->vdev_stat_lock); 1601 if (type == ZIO_TYPE_READ) { 1602 if (zio->io_error == ECKSUM) 1603 vs->vs_checksum_errors++; 1604 else 1605 vs->vs_read_errors++; 1606 } 1607 if (type == ZIO_TYPE_WRITE) 1608 vs->vs_write_errors++; 1609 mutex_exit(&vd->vdev_stat_lock); 1610 } 1611 1612 if (type == ZIO_TYPE_WRITE) { 1613 if (txg == 0 || vd->vdev_children != 0) 1614 return; 1615 if (flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) { 1616 ASSERT(flags & ZIO_FLAG_IO_REPAIR); 1617 for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) 1618 vdev_dtl_dirty(&pvd->vdev_dtl_scrub, txg, 1); 1619 } 1620 if (!(flags & ZIO_FLAG_IO_REPAIR)) { 1621 vdev_t *tvd = vd->vdev_top; 1622 if (vdev_dtl_contains(&vd->vdev_dtl_map, txg, 1)) 1623 return; 1624 vdev_dirty(tvd, VDD_DTL, txg); 1625 (void) txg_list_add(&tvd->vdev_dtl_list, vd, txg); 1626 for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) 1627 vdev_dtl_dirty(&pvd->vdev_dtl_map, txg, 1); 1628 } 1629 } 1630 } 1631 1632 void 1633 vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete) 1634 { 1635 int c; 1636 vdev_stat_t *vs = &vd->vdev_stat; 1637 1638 for (c = 0; c < vd->vdev_children; c++) 1639 vdev_scrub_stat_update(vd->vdev_child[c], type, complete); 1640 1641 mutex_enter(&vd->vdev_stat_lock); 1642 1643 if (type == POOL_SCRUB_NONE) { 1644 /* 1645 * Update completion and end time. Leave everything else alone 1646 * so we can report what happened during the previous scrub. 1647 */ 1648 vs->vs_scrub_complete = complete; 1649 vs->vs_scrub_end = gethrestime_sec(); 1650 } else { 1651 vs->vs_scrub_type = type; 1652 vs->vs_scrub_complete = 0; 1653 vs->vs_scrub_examined = 0; 1654 vs->vs_scrub_repaired = 0; 1655 vs->vs_scrub_errors = 0; 1656 vs->vs_scrub_start = gethrestime_sec(); 1657 vs->vs_scrub_end = 0; 1658 } 1659 1660 mutex_exit(&vd->vdev_stat_lock); 1661 } 1662 1663 /* 1664 * Update the in-core space usage stats for this vdev and the root vdev. 1665 */ 1666 void 1667 vdev_space_update(vdev_t *vd, uint64_t space_delta, uint64_t alloc_delta) 1668 { 1669 ASSERT(vd == vd->vdev_top); 1670 1671 do { 1672 mutex_enter(&vd->vdev_stat_lock); 1673 vd->vdev_stat.vs_space += space_delta; 1674 vd->vdev_stat.vs_alloc += alloc_delta; 1675 mutex_exit(&vd->vdev_stat_lock); 1676 } while ((vd = vd->vdev_parent) != NULL); 1677 } 1678 1679 /* 1680 * Various knobs to tune a vdev. 1681 */ 1682 static vdev_knob_t vdev_knob[] = { 1683 { 1684 "cache_size", 1685 "size of the read-ahead cache", 1686 0, 1687 1ULL << 30, 1688 10ULL << 20, 1689 offsetof(struct vdev, vdev_cache.vc_size) 1690 }, 1691 { 1692 "cache_bshift", 1693 "log2 of cache blocksize", 1694 SPA_MINBLOCKSHIFT, 1695 SPA_MAXBLOCKSHIFT, 1696 16, 1697 offsetof(struct vdev, vdev_cache.vc_bshift) 1698 }, 1699 { 1700 "cache_max", 1701 "largest block size to cache", 1702 0, 1703 SPA_MAXBLOCKSIZE, 1704 1ULL << 14, 1705 offsetof(struct vdev, vdev_cache.vc_max) 1706 }, 1707 { 1708 "min_pending", 1709 "minimum pending I/Os to the disk", 1710 1, 1711 10000, 1712 2, 1713 offsetof(struct vdev, vdev_queue.vq_min_pending) 1714 }, 1715 { 1716 "max_pending", 1717 "maximum pending I/Os to the disk", 1718 1, 1719 10000, 1720 35, 1721 offsetof(struct vdev, vdev_queue.vq_max_pending) 1722 }, 1723 { 1724 "scrub_limit", 1725 "maximum scrub/resilver I/O queue", 1726 0, 1727 10000, 1728 70, 1729 offsetof(struct vdev, vdev_queue.vq_scrub_limit) 1730 }, 1731 { 1732 "agg_limit", 1733 "maximum size of aggregated I/Os", 1734 0, 1735 SPA_MAXBLOCKSIZE, 1736 SPA_MAXBLOCKSIZE, 1737 offsetof(struct vdev, vdev_queue.vq_agg_limit) 1738 }, 1739 { 1740 "time_shift", 1741 "deadline = pri + (lbolt >> time_shift)", 1742 0, 1743 63, 1744 4, 1745 offsetof(struct vdev, vdev_queue.vq_time_shift) 1746 }, 1747 { 1748 "ramp_rate", 1749 "exponential I/O issue ramp-up rate", 1750 1, 1751 10000, 1752 2, 1753 offsetof(struct vdev, vdev_queue.vq_ramp_rate) 1754 }, 1755 }; 1756 1757 vdev_knob_t * 1758 vdev_knob_next(vdev_knob_t *vk) 1759 { 1760 if (vk == NULL) 1761 return (vdev_knob); 1762 1763 if (++vk == vdev_knob + sizeof (vdev_knob) / sizeof (vdev_knob_t)) 1764 return (NULL); 1765 1766 return (vk); 1767 } 1768 1769 /* 1770 * Mark a top-level vdev's config as dirty, placing it on the dirty list 1771 * so that it will be written out next time the vdev configuration is synced. 1772 * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. 1773 */ 1774 void 1775 vdev_config_dirty(vdev_t *vd) 1776 { 1777 spa_t *spa = vd->vdev_spa; 1778 vdev_t *rvd = spa->spa_root_vdev; 1779 int c; 1780 1781 if (vd == rvd) { 1782 for (c = 0; c < rvd->vdev_children; c++) 1783 vdev_config_dirty(rvd->vdev_child[c]); 1784 } else { 1785 ASSERT(vd == vd->vdev_top); 1786 1787 if (!vd->vdev_is_dirty) { 1788 list_insert_head(&spa->spa_dirty_list, vd); 1789 vd->vdev_is_dirty = B_TRUE; 1790 } 1791 } 1792 } 1793 1794 void 1795 vdev_config_clean(vdev_t *vd) 1796 { 1797 ASSERT(vd->vdev_is_dirty); 1798 1799 list_remove(&vd->vdev_spa->spa_dirty_list, vd); 1800 vd->vdev_is_dirty = B_FALSE; 1801 } 1802 1803 /* 1804 * Set a vdev's state. If this is during an open, we don't update the parent 1805 * state, because we're in the process of opening children depth-first. 1806 * Otherwise, we propagate the change to the parent. 1807 * 1808 * If this routine places a device in a faulted state, an appropriate ereport is 1809 * generated. 1810 */ 1811 void 1812 vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) 1813 { 1814 uint64_t prev_state; 1815 1816 if (state == vd->vdev_state) { 1817 vd->vdev_stat.vs_aux = aux; 1818 return; 1819 } 1820 1821 prev_state = vd->vdev_state; 1822 1823 vd->vdev_state = state; 1824 vd->vdev_stat.vs_aux = aux; 1825 1826 if (state == VDEV_STATE_CANT_OPEN) { 1827 /* 1828 * If we fail to open a vdev during an import, we mark it as 1829 * "not available", which signifies that it was never there to 1830 * begin with. Failure to open such a device is not considered 1831 * an error. 1832 */ 1833 if (!vd->vdev_not_present && 1834 vd != vd->vdev_spa->spa_root_vdev) { 1835 const char *class; 1836 1837 switch (aux) { 1838 case VDEV_AUX_OPEN_FAILED: 1839 class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; 1840 break; 1841 case VDEV_AUX_CORRUPT_DATA: 1842 class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; 1843 break; 1844 case VDEV_AUX_NO_REPLICAS: 1845 class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; 1846 break; 1847 case VDEV_AUX_BAD_GUID_SUM: 1848 class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; 1849 break; 1850 case VDEV_AUX_TOO_SMALL: 1851 class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; 1852 break; 1853 case VDEV_AUX_BAD_LABEL: 1854 class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; 1855 break; 1856 default: 1857 class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; 1858 } 1859 1860 zfs_ereport_post(class, vd->vdev_spa, 1861 vd, NULL, prev_state, 0); 1862 } 1863 1864 if (vd->vdev_spa->spa_load_state == SPA_LOAD_IMPORT && 1865 vd->vdev_ops->vdev_op_leaf) 1866 vd->vdev_not_present = 1; 1867 } 1868 1869 if (isopen) 1870 return; 1871 1872 if (vd->vdev_parent != NULL) { 1873 int c; 1874 int degraded = 0, faulted = 0; 1875 int corrupted = 0; 1876 vdev_t *parent, *child; 1877 1878 parent = vd->vdev_parent; 1879 for (c = 0; c < parent->vdev_children; c++) { 1880 child = parent->vdev_child[c]; 1881 if (child->vdev_state <= VDEV_STATE_CANT_OPEN) 1882 faulted++; 1883 else if (child->vdev_state == VDEV_STATE_DEGRADED) 1884 degraded++; 1885 1886 if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) 1887 corrupted++; 1888 } 1889 1890 vd->vdev_parent->vdev_ops->vdev_op_state_change( 1891 vd->vdev_parent, faulted, degraded); 1892 1893 /* 1894 * Root special: if this is a toplevel vdev that cannot be 1895 * opened due to corrupted metadata, then propagate the root 1896 * vdev's aux state as 'corrupt' rather than 'insufficient 1897 * replicas'. 1898 */ 1899 if (corrupted && vd == vd->vdev_top) 1900 vdev_set_state(vd->vdev_spa->spa_root_vdev, 1901 B_FALSE, VDEV_STATE_CANT_OPEN, 1902 VDEV_AUX_CORRUPT_DATA); 1903 } 1904 } 1905