1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/fm/fs/zfs.h> 28 #include <sys/spa.h> 29 #include <sys/txg.h> 30 #include <sys/spa_impl.h> 31 #include <sys/vdev_impl.h> 32 #include <sys/zio_impl.h> 33 #include <sys/zio_compress.h> 34 #include <sys/zio_checksum.h> 35 36 /* 37 * ========================================================================== 38 * I/O priority table 39 * ========================================================================== 40 */ 41 uint8_t zio_priority_table[ZIO_PRIORITY_TABLE_SIZE] = { 42 0, /* ZIO_PRIORITY_NOW */ 43 0, /* ZIO_PRIORITY_SYNC_READ */ 44 0, /* ZIO_PRIORITY_SYNC_WRITE */ 45 6, /* ZIO_PRIORITY_ASYNC_READ */ 46 4, /* ZIO_PRIORITY_ASYNC_WRITE */ 47 4, /* ZIO_PRIORITY_FREE */ 48 0, /* ZIO_PRIORITY_CACHE_FILL */ 49 0, /* ZIO_PRIORITY_LOG_WRITE */ 50 10, /* ZIO_PRIORITY_RESILVER */ 51 20, /* ZIO_PRIORITY_SCRUB */ 52 }; 53 54 /* 55 * ========================================================================== 56 * I/O type descriptions 57 * ========================================================================== 58 */ 59 char *zio_type_name[ZIO_TYPES] = { 60 "null", "read", "write", "free", "claim", "ioctl" }; 61 62 #define SYNC_PASS_DEFERRED_FREE 1 /* defer frees after this pass */ 63 #define SYNC_PASS_DONT_COMPRESS 4 /* don't compress after this pass */ 64 #define SYNC_PASS_REWRITE 1 /* rewrite new bps after this pass */ 65 66 /* 67 * ========================================================================== 68 * I/O kmem caches 69 * ========================================================================== 70 */ 71 kmem_cache_t *zio_cache; 72 kmem_cache_t *zio_link_cache; 73 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 74 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 75 76 #ifdef _KERNEL 77 extern vmem_t *zio_alloc_arena; 78 #endif 79 80 /* 81 * An allocating zio is one that either currently has the DVA allocate 82 * stage set or will have it later in its lifetime. 83 */ 84 #define IO_IS_ALLOCATING(zio) \ 85 ((zio)->io_orig_pipeline & (1U << ZIO_STAGE_DVA_ALLOCATE)) 86 87 void 88 zio_init(void) 89 { 90 size_t c; 91 vmem_t *data_alloc_arena = NULL; 92 93 #ifdef _KERNEL 94 data_alloc_arena = zio_alloc_arena; 95 #endif 96 zio_cache = kmem_cache_create("zio_cache", 97 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 98 zio_link_cache = kmem_cache_create("zio_link_cache", 99 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 100 101 /* 102 * For small buffers, we want a cache for each multiple of 103 * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache 104 * for each quarter-power of 2. For large buffers, we want 105 * a cache for each multiple of PAGESIZE. 106 */ 107 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 108 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 109 size_t p2 = size; 110 size_t align = 0; 111 112 while (p2 & (p2 - 1)) 113 p2 &= p2 - 1; 114 115 if (size <= 4 * SPA_MINBLOCKSIZE) { 116 align = SPA_MINBLOCKSIZE; 117 } else if (P2PHASE(size, PAGESIZE) == 0) { 118 align = PAGESIZE; 119 } else if (P2PHASE(size, p2 >> 2) == 0) { 120 align = p2 >> 2; 121 } 122 123 if (align != 0) { 124 char name[36]; 125 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 126 zio_buf_cache[c] = kmem_cache_create(name, size, 127 align, NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); 128 129 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 130 zio_data_buf_cache[c] = kmem_cache_create(name, size, 131 align, NULL, NULL, NULL, NULL, data_alloc_arena, 132 KMC_NODEBUG); 133 } 134 } 135 136 while (--c != 0) { 137 ASSERT(zio_buf_cache[c] != NULL); 138 if (zio_buf_cache[c - 1] == NULL) 139 zio_buf_cache[c - 1] = zio_buf_cache[c]; 140 141 ASSERT(zio_data_buf_cache[c] != NULL); 142 if (zio_data_buf_cache[c - 1] == NULL) 143 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 144 } 145 146 zio_inject_init(); 147 } 148 149 void 150 zio_fini(void) 151 { 152 size_t c; 153 kmem_cache_t *last_cache = NULL; 154 kmem_cache_t *last_data_cache = NULL; 155 156 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 157 if (zio_buf_cache[c] != last_cache) { 158 last_cache = zio_buf_cache[c]; 159 kmem_cache_destroy(zio_buf_cache[c]); 160 } 161 zio_buf_cache[c] = NULL; 162 163 if (zio_data_buf_cache[c] != last_data_cache) { 164 last_data_cache = zio_data_buf_cache[c]; 165 kmem_cache_destroy(zio_data_buf_cache[c]); 166 } 167 zio_data_buf_cache[c] = NULL; 168 } 169 170 kmem_cache_destroy(zio_link_cache); 171 kmem_cache_destroy(zio_cache); 172 173 zio_inject_fini(); 174 } 175 176 /* 177 * ========================================================================== 178 * Allocate and free I/O buffers 179 * ========================================================================== 180 */ 181 182 /* 183 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 184 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 185 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 186 * excess / transient data in-core during a crashdump. 187 */ 188 void * 189 zio_buf_alloc(size_t size) 190 { 191 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 192 193 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 194 195 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 196 } 197 198 /* 199 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 200 * crashdump if the kernel panics. This exists so that we will limit the amount 201 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 202 * of kernel heap dumped to disk when the kernel panics) 203 */ 204 void * 205 zio_data_buf_alloc(size_t size) 206 { 207 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 208 209 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 210 211 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 212 } 213 214 void 215 zio_buf_free(void *buf, size_t size) 216 { 217 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 218 219 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 220 221 kmem_cache_free(zio_buf_cache[c], buf); 222 } 223 224 void 225 zio_data_buf_free(void *buf, size_t size) 226 { 227 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 228 229 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 230 231 kmem_cache_free(zio_data_buf_cache[c], buf); 232 } 233 234 /* 235 * ========================================================================== 236 * Push and pop I/O transform buffers 237 * ========================================================================== 238 */ 239 static void 240 zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize, 241 zio_transform_func_t *transform) 242 { 243 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 244 245 zt->zt_orig_data = zio->io_data; 246 zt->zt_orig_size = zio->io_size; 247 zt->zt_bufsize = bufsize; 248 zt->zt_transform = transform; 249 250 zt->zt_next = zio->io_transform_stack; 251 zio->io_transform_stack = zt; 252 253 zio->io_data = data; 254 zio->io_size = size; 255 } 256 257 static void 258 zio_pop_transforms(zio_t *zio) 259 { 260 zio_transform_t *zt; 261 262 while ((zt = zio->io_transform_stack) != NULL) { 263 if (zt->zt_transform != NULL) 264 zt->zt_transform(zio, 265 zt->zt_orig_data, zt->zt_orig_size); 266 267 zio_buf_free(zio->io_data, zt->zt_bufsize); 268 269 zio->io_data = zt->zt_orig_data; 270 zio->io_size = zt->zt_orig_size; 271 zio->io_transform_stack = zt->zt_next; 272 273 kmem_free(zt, sizeof (zio_transform_t)); 274 } 275 } 276 277 /* 278 * ========================================================================== 279 * I/O transform callbacks for subblocks and decompression 280 * ========================================================================== 281 */ 282 static void 283 zio_subblock(zio_t *zio, void *data, uint64_t size) 284 { 285 ASSERT(zio->io_size > size); 286 287 if (zio->io_type == ZIO_TYPE_READ) 288 bcopy(zio->io_data, data, size); 289 } 290 291 static void 292 zio_decompress(zio_t *zio, void *data, uint64_t size) 293 { 294 if (zio->io_error == 0 && 295 zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 296 zio->io_data, zio->io_size, data, size) != 0) 297 zio->io_error = EIO; 298 } 299 300 /* 301 * ========================================================================== 302 * I/O parent/child relationships and pipeline interlocks 303 * ========================================================================== 304 */ 305 /* 306 * NOTE - Callers to zio_walk_parents() and zio_walk_children must 307 * continue calling these functions until they return NULL. 308 * Otherwise, the next caller will pick up the list walk in 309 * some indeterminate state. (Otherwise every caller would 310 * have to pass in a cookie to keep the state represented by 311 * io_walk_link, which gets annoying.) 312 */ 313 zio_t * 314 zio_walk_parents(zio_t *cio) 315 { 316 zio_link_t *zl = cio->io_walk_link; 317 list_t *pl = &cio->io_parent_list; 318 319 zl = (zl == NULL) ? list_head(pl) : list_next(pl, zl); 320 cio->io_walk_link = zl; 321 322 if (zl == NULL) 323 return (NULL); 324 325 ASSERT(zl->zl_child == cio); 326 return (zl->zl_parent); 327 } 328 329 zio_t * 330 zio_walk_children(zio_t *pio) 331 { 332 zio_link_t *zl = pio->io_walk_link; 333 list_t *cl = &pio->io_child_list; 334 335 zl = (zl == NULL) ? list_head(cl) : list_next(cl, zl); 336 pio->io_walk_link = zl; 337 338 if (zl == NULL) 339 return (NULL); 340 341 ASSERT(zl->zl_parent == pio); 342 return (zl->zl_child); 343 } 344 345 zio_t * 346 zio_unique_parent(zio_t *cio) 347 { 348 zio_t *pio = zio_walk_parents(cio); 349 350 VERIFY(zio_walk_parents(cio) == NULL); 351 return (pio); 352 } 353 354 void 355 zio_add_child(zio_t *pio, zio_t *cio) 356 { 357 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 358 359 /* 360 * Logical I/Os can have logical, gang, or vdev children. 361 * Gang I/Os can have gang or vdev children. 362 * Vdev I/Os can only have vdev children. 363 * The following ASSERT captures all of these constraints. 364 */ 365 ASSERT(cio->io_child_type <= pio->io_child_type); 366 367 zl->zl_parent = pio; 368 zl->zl_child = cio; 369 370 mutex_enter(&cio->io_lock); 371 mutex_enter(&pio->io_lock); 372 373 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 374 375 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 376 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 377 378 list_insert_head(&pio->io_child_list, zl); 379 list_insert_head(&cio->io_parent_list, zl); 380 381 mutex_exit(&pio->io_lock); 382 mutex_exit(&cio->io_lock); 383 } 384 385 static void 386 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 387 { 388 ASSERT(zl->zl_parent == pio); 389 ASSERT(zl->zl_child == cio); 390 391 mutex_enter(&cio->io_lock); 392 mutex_enter(&pio->io_lock); 393 394 list_remove(&pio->io_child_list, zl); 395 list_remove(&cio->io_parent_list, zl); 396 397 mutex_exit(&pio->io_lock); 398 mutex_exit(&cio->io_lock); 399 400 kmem_cache_free(zio_link_cache, zl); 401 } 402 403 static boolean_t 404 zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) 405 { 406 uint64_t *countp = &zio->io_children[child][wait]; 407 boolean_t waiting = B_FALSE; 408 409 mutex_enter(&zio->io_lock); 410 ASSERT(zio->io_stall == NULL); 411 if (*countp != 0) { 412 zio->io_stage--; 413 zio->io_stall = countp; 414 waiting = B_TRUE; 415 } 416 mutex_exit(&zio->io_lock); 417 418 return (waiting); 419 } 420 421 static void 422 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 423 { 424 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 425 int *errorp = &pio->io_child_error[zio->io_child_type]; 426 427 mutex_enter(&pio->io_lock); 428 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 429 *errorp = zio_worst_error(*errorp, zio->io_error); 430 pio->io_reexecute |= zio->io_reexecute; 431 ASSERT3U(*countp, >, 0); 432 if (--*countp == 0 && pio->io_stall == countp) { 433 pio->io_stall = NULL; 434 mutex_exit(&pio->io_lock); 435 zio_execute(pio); 436 } else { 437 mutex_exit(&pio->io_lock); 438 } 439 } 440 441 static void 442 zio_inherit_child_errors(zio_t *zio, enum zio_child c) 443 { 444 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 445 zio->io_error = zio->io_child_error[c]; 446 } 447 448 /* 449 * ========================================================================== 450 * Create the various types of I/O (read, write, free, etc) 451 * ========================================================================== 452 */ 453 static zio_t * 454 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 455 void *data, uint64_t size, zio_done_func_t *done, void *private, 456 zio_type_t type, int priority, int flags, vdev_t *vd, uint64_t offset, 457 const zbookmark_t *zb, uint8_t stage, uint32_t pipeline) 458 { 459 zio_t *zio; 460 461 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 462 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 463 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 464 465 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 466 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 467 ASSERT(vd || stage == ZIO_STAGE_OPEN); 468 469 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 470 bzero(zio, sizeof (zio_t)); 471 472 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 473 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 474 475 list_create(&zio->io_parent_list, sizeof (zio_link_t), 476 offsetof(zio_link_t, zl_parent_node)); 477 list_create(&zio->io_child_list, sizeof (zio_link_t), 478 offsetof(zio_link_t, zl_child_node)); 479 480 if (vd != NULL) 481 zio->io_child_type = ZIO_CHILD_VDEV; 482 else if (flags & ZIO_FLAG_GANG_CHILD) 483 zio->io_child_type = ZIO_CHILD_GANG; 484 else 485 zio->io_child_type = ZIO_CHILD_LOGICAL; 486 487 if (bp != NULL) { 488 zio->io_bp = bp; 489 zio->io_bp_copy = *bp; 490 zio->io_bp_orig = *bp; 491 if (type != ZIO_TYPE_WRITE) 492 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 493 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 494 zio->io_logical = zio; 495 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 496 pipeline |= ZIO_GANG_STAGES; 497 } 498 499 zio->io_spa = spa; 500 zio->io_txg = txg; 501 zio->io_data = data; 502 zio->io_size = size; 503 zio->io_done = done; 504 zio->io_private = private; 505 zio->io_type = type; 506 zio->io_priority = priority; 507 zio->io_vd = vd; 508 zio->io_offset = offset; 509 zio->io_orig_flags = zio->io_flags = flags; 510 zio->io_orig_stage = zio->io_stage = stage; 511 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 512 513 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 514 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 515 516 if (zb != NULL) 517 zio->io_bookmark = *zb; 518 519 if (pio != NULL) { 520 if (zio->io_logical == NULL) 521 zio->io_logical = pio->io_logical; 522 if (zio->io_child_type == ZIO_CHILD_GANG) 523 zio->io_gang_leader = pio->io_gang_leader; 524 zio_add_child(pio, zio); 525 } 526 527 return (zio); 528 } 529 530 static void 531 zio_destroy(zio_t *zio) 532 { 533 list_destroy(&zio->io_parent_list); 534 list_destroy(&zio->io_child_list); 535 mutex_destroy(&zio->io_lock); 536 cv_destroy(&zio->io_cv); 537 kmem_cache_free(zio_cache, zio); 538 } 539 540 zio_t * 541 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 542 void *private, int flags) 543 { 544 zio_t *zio; 545 546 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 547 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 548 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 549 550 return (zio); 551 } 552 553 zio_t * 554 zio_root(spa_t *spa, zio_done_func_t *done, void *private, int flags) 555 { 556 return (zio_null(NULL, spa, NULL, done, private, flags)); 557 } 558 559 zio_t * 560 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 561 void *data, uint64_t size, zio_done_func_t *done, void *private, 562 int priority, int flags, const zbookmark_t *zb) 563 { 564 zio_t *zio; 565 566 zio = zio_create(pio, spa, bp->blk_birth, (blkptr_t *)bp, 567 data, size, done, private, 568 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 569 ZIO_STAGE_OPEN, ZIO_READ_PIPELINE); 570 571 return (zio); 572 } 573 574 void 575 zio_skip_write(zio_t *zio) 576 { 577 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 578 ASSERT(zio->io_stage == ZIO_STAGE_READY); 579 ASSERT(!BP_IS_GANG(zio->io_bp)); 580 581 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 582 } 583 584 zio_t * 585 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 586 void *data, uint64_t size, zio_prop_t *zp, 587 zio_done_func_t *ready, zio_done_func_t *done, void *private, 588 int priority, int flags, const zbookmark_t *zb) 589 { 590 zio_t *zio; 591 592 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 593 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 594 zp->zp_compress >= ZIO_COMPRESS_OFF && 595 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 596 zp->zp_type < DMU_OT_NUMTYPES && 597 zp->zp_level < 32 && 598 zp->zp_ndvas > 0 && 599 zp->zp_ndvas <= spa_max_replication(spa)); 600 ASSERT(ready != NULL); 601 602 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 603 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 604 ZIO_STAGE_OPEN, ZIO_WRITE_PIPELINE); 605 606 zio->io_ready = ready; 607 zio->io_prop = *zp; 608 609 return (zio); 610 } 611 612 zio_t * 613 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data, 614 uint64_t size, zio_done_func_t *done, void *private, int priority, 615 int flags, zbookmark_t *zb) 616 { 617 zio_t *zio; 618 619 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 620 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 621 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 622 623 return (zio); 624 } 625 626 zio_t * 627 zio_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 628 zio_done_func_t *done, void *private, int flags) 629 { 630 zio_t *zio; 631 632 ASSERT(!BP_IS_HOLE(bp)); 633 634 if (bp->blk_fill == BLK_FILL_ALREADY_FREED) 635 return (zio_null(pio, spa, NULL, NULL, NULL, flags)); 636 637 if (txg == spa->spa_syncing_txg && 638 spa_sync_pass(spa) > SYNC_PASS_DEFERRED_FREE) { 639 bplist_enqueue_deferred(&spa->spa_sync_bplist, bp); 640 return (zio_null(pio, spa, NULL, NULL, NULL, flags)); 641 } 642 643 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 644 done, private, ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, flags, 645 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_FREE_PIPELINE); 646 647 return (zio); 648 } 649 650 zio_t * 651 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 652 zio_done_func_t *done, void *private, int flags) 653 { 654 zio_t *zio; 655 656 /* 657 * A claim is an allocation of a specific block. Claims are needed 658 * to support immediate writes in the intent log. The issue is that 659 * immediate writes contain committed data, but in a txg that was 660 * *not* committed. Upon opening the pool after an unclean shutdown, 661 * the intent log claims all blocks that contain immediate write data 662 * so that the SPA knows they're in use. 663 * 664 * All claims *must* be resolved in the first txg -- before the SPA 665 * starts allocating blocks -- so that nothing is allocated twice. 666 */ 667 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); 668 ASSERT3U(spa_first_txg(spa), <=, txg); 669 670 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 671 done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, flags, 672 NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 673 674 return (zio); 675 } 676 677 zio_t * 678 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 679 zio_done_func_t *done, void *private, int priority, int flags) 680 { 681 zio_t *zio; 682 int c; 683 684 if (vd->vdev_children == 0) { 685 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 686 ZIO_TYPE_IOCTL, priority, flags, vd, 0, NULL, 687 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 688 689 zio->io_cmd = cmd; 690 } else { 691 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 692 693 for (c = 0; c < vd->vdev_children; c++) 694 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 695 done, private, priority, flags)); 696 } 697 698 return (zio); 699 } 700 701 zio_t * 702 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 703 void *data, int checksum, zio_done_func_t *done, void *private, 704 int priority, int flags, boolean_t labels) 705 { 706 zio_t *zio; 707 708 ASSERT(vd->vdev_children == 0); 709 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 710 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 711 ASSERT3U(offset + size, <=, vd->vdev_psize); 712 713 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 714 ZIO_TYPE_READ, priority, flags, vd, offset, NULL, 715 ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 716 717 zio->io_prop.zp_checksum = checksum; 718 719 return (zio); 720 } 721 722 zio_t * 723 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 724 void *data, int checksum, zio_done_func_t *done, void *private, 725 int priority, int flags, boolean_t labels) 726 { 727 zio_t *zio; 728 729 ASSERT(vd->vdev_children == 0); 730 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 731 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 732 ASSERT3U(offset + size, <=, vd->vdev_psize); 733 734 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, 735 ZIO_TYPE_WRITE, priority, flags, vd, offset, NULL, 736 ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 737 738 zio->io_prop.zp_checksum = checksum; 739 740 if (zio_checksum_table[checksum].ci_zbt) { 741 /* 742 * zbt checksums are necessarily destructive -- they modify 743 * the end of the write buffer to hold the verifier/checksum. 744 * Therefore, we must make a local copy in case the data is 745 * being written to multiple places in parallel. 746 */ 747 void *wbuf = zio_buf_alloc(size); 748 bcopy(data, wbuf, size); 749 zio_push_transform(zio, wbuf, size, size, NULL); 750 } 751 752 return (zio); 753 } 754 755 /* 756 * Create a child I/O to do some work for us. 757 */ 758 zio_t * 759 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 760 void *data, uint64_t size, int type, int priority, int flags, 761 zio_done_func_t *done, void *private) 762 { 763 uint32_t pipeline = ZIO_VDEV_CHILD_PIPELINE; 764 zio_t *zio; 765 766 ASSERT(vd->vdev_parent == 767 (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev)); 768 769 if (type == ZIO_TYPE_READ && bp != NULL) { 770 /* 771 * If we have the bp, then the child should perform the 772 * checksum and the parent need not. This pushes error 773 * detection as close to the leaves as possible and 774 * eliminates redundant checksums in the interior nodes. 775 */ 776 pipeline |= 1U << ZIO_STAGE_CHECKSUM_VERIFY; 777 pio->io_pipeline &= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY); 778 } 779 780 if (vd->vdev_children == 0) 781 offset += VDEV_LABEL_START_SIZE; 782 783 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, 784 done, private, type, priority, 785 (pio->io_flags & ZIO_FLAG_VDEV_INHERIT) | 786 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | flags, 787 vd, offset, &pio->io_bookmark, 788 ZIO_STAGE_VDEV_IO_START - 1, pipeline); 789 790 return (zio); 791 } 792 793 zio_t * 794 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size, 795 int type, int priority, int flags, zio_done_func_t *done, void *private) 796 { 797 zio_t *zio; 798 799 ASSERT(vd->vdev_ops->vdev_op_leaf); 800 801 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 802 data, size, done, private, type, priority, 803 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY, 804 vd, offset, NULL, 805 ZIO_STAGE_VDEV_IO_START - 1, ZIO_VDEV_CHILD_PIPELINE); 806 807 return (zio); 808 } 809 810 void 811 zio_flush(zio_t *zio, vdev_t *vd) 812 { 813 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 814 NULL, NULL, ZIO_PRIORITY_NOW, 815 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 816 } 817 818 /* 819 * ========================================================================== 820 * Prepare to read and write logical blocks 821 * ========================================================================== 822 */ 823 824 static int 825 zio_read_bp_init(zio_t *zio) 826 { 827 blkptr_t *bp = zio->io_bp; 828 829 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 830 zio->io_child_type == ZIO_CHILD_LOGICAL && 831 !(zio->io_flags & ZIO_FLAG_RAW)) { 832 uint64_t csize = BP_GET_PSIZE(bp); 833 void *cbuf = zio_buf_alloc(csize); 834 835 zio_push_transform(zio, cbuf, csize, csize, zio_decompress); 836 } 837 838 if (!dmu_ot[BP_GET_TYPE(bp)].ot_metadata && BP_GET_LEVEL(bp) == 0) 839 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 840 841 return (ZIO_PIPELINE_CONTINUE); 842 } 843 844 static int 845 zio_write_bp_init(zio_t *zio) 846 { 847 zio_prop_t *zp = &zio->io_prop; 848 int compress = zp->zp_compress; 849 blkptr_t *bp = zio->io_bp; 850 void *cbuf; 851 uint64_t lsize = zio->io_size; 852 uint64_t csize = lsize; 853 uint64_t cbufsize = 0; 854 int pass = 1; 855 856 /* 857 * If our children haven't all reached the ready stage, 858 * wait for them and then repeat this pipeline stage. 859 */ 860 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 861 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY)) 862 return (ZIO_PIPELINE_STOP); 863 864 if (!IO_IS_ALLOCATING(zio)) 865 return (ZIO_PIPELINE_CONTINUE); 866 867 ASSERT(compress != ZIO_COMPRESS_INHERIT); 868 869 if (bp->blk_birth == zio->io_txg) { 870 /* 871 * We're rewriting an existing block, which means we're 872 * working on behalf of spa_sync(). For spa_sync() to 873 * converge, it must eventually be the case that we don't 874 * have to allocate new blocks. But compression changes 875 * the blocksize, which forces a reallocate, and makes 876 * convergence take longer. Therefore, after the first 877 * few passes, stop compressing to ensure convergence. 878 */ 879 pass = spa_sync_pass(zio->io_spa); 880 881 if (pass > SYNC_PASS_DONT_COMPRESS) 882 compress = ZIO_COMPRESS_OFF; 883 884 /* Make sure someone doesn't change their mind on overwrites */ 885 ASSERT(MIN(zp->zp_ndvas + BP_IS_GANG(bp), 886 spa_max_replication(zio->io_spa)) == BP_GET_NDVAS(bp)); 887 } 888 889 if (compress != ZIO_COMPRESS_OFF) { 890 if (!zio_compress_data(compress, zio->io_data, zio->io_size, 891 &cbuf, &csize, &cbufsize)) { 892 compress = ZIO_COMPRESS_OFF; 893 } else if (csize != 0) { 894 zio_push_transform(zio, cbuf, csize, cbufsize, NULL); 895 } 896 } 897 898 /* 899 * The final pass of spa_sync() must be all rewrites, but the first 900 * few passes offer a trade-off: allocating blocks defers convergence, 901 * but newly allocated blocks are sequential, so they can be written 902 * to disk faster. Therefore, we allow the first few passes of 903 * spa_sync() to allocate new blocks, but force rewrites after that. 904 * There should only be a handful of blocks after pass 1 in any case. 905 */ 906 if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == csize && 907 pass > SYNC_PASS_REWRITE) { 908 ASSERT(csize != 0); 909 uint32_t gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 910 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 911 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 912 } else { 913 BP_ZERO(bp); 914 zio->io_pipeline = ZIO_WRITE_PIPELINE; 915 } 916 917 if (csize == 0) { 918 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 919 } else { 920 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 921 BP_SET_LSIZE(bp, lsize); 922 BP_SET_PSIZE(bp, csize); 923 BP_SET_COMPRESS(bp, compress); 924 BP_SET_CHECKSUM(bp, zp->zp_checksum); 925 BP_SET_TYPE(bp, zp->zp_type); 926 BP_SET_LEVEL(bp, zp->zp_level); 927 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 928 } 929 930 if (zio_injection_enabled && 931 zio->io_spa->spa_syncing_txg == zio->io_txg) 932 zio_handle_ignored_writes(zio); 933 934 return (ZIO_PIPELINE_CONTINUE); 935 } 936 937 /* 938 * ========================================================================== 939 * Execute the I/O pipeline 940 * ========================================================================== 941 */ 942 943 static void 944 zio_taskq_dispatch(zio_t *zio, enum zio_taskq_type q) 945 { 946 zio_type_t t = zio->io_type; 947 948 /* 949 * If we're a config writer or a probe, the normal issue and 950 * interrupt threads may all be blocked waiting for the config lock. 951 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 952 */ 953 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 954 t = ZIO_TYPE_NULL; 955 956 /* 957 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 958 */ 959 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 960 t = ZIO_TYPE_NULL; 961 962 (void) taskq_dispatch(zio->io_spa->spa_zio_taskq[t][q], 963 (task_func_t *)zio_execute, zio, TQ_SLEEP); 964 } 965 966 static boolean_t 967 zio_taskq_member(zio_t *zio, enum zio_taskq_type q) 968 { 969 kthread_t *executor = zio->io_executor; 970 spa_t *spa = zio->io_spa; 971 972 for (zio_type_t t = 0; t < ZIO_TYPES; t++) 973 if (taskq_member(spa->spa_zio_taskq[t][q], executor)) 974 return (B_TRUE); 975 976 return (B_FALSE); 977 } 978 979 static int 980 zio_issue_async(zio_t *zio) 981 { 982 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE); 983 984 return (ZIO_PIPELINE_STOP); 985 } 986 987 void 988 zio_interrupt(zio_t *zio) 989 { 990 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT); 991 } 992 993 /* 994 * Execute the I/O pipeline until one of the following occurs: 995 * (1) the I/O completes; (2) the pipeline stalls waiting for 996 * dependent child I/Os; (3) the I/O issues, so we're waiting 997 * for an I/O completion interrupt; (4) the I/O is delegated by 998 * vdev-level caching or aggregation; (5) the I/O is deferred 999 * due to vdev-level queueing; (6) the I/O is handed off to 1000 * another thread. In all cases, the pipeline stops whenever 1001 * there's no CPU work; it never burns a thread in cv_wait(). 1002 * 1003 * There's no locking on io_stage because there's no legitimate way 1004 * for multiple threads to be attempting to process the same I/O. 1005 */ 1006 static zio_pipe_stage_t *zio_pipeline[ZIO_STAGES]; 1007 1008 void 1009 zio_execute(zio_t *zio) 1010 { 1011 zio->io_executor = curthread; 1012 1013 while (zio->io_stage < ZIO_STAGE_DONE) { 1014 uint32_t pipeline = zio->io_pipeline; 1015 zio_stage_t stage = zio->io_stage; 1016 int rv; 1017 1018 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1019 1020 while (((1U << ++stage) & pipeline) == 0) 1021 continue; 1022 1023 ASSERT(stage <= ZIO_STAGE_DONE); 1024 ASSERT(zio->io_stall == NULL); 1025 1026 /* 1027 * If we are in interrupt context and this pipeline stage 1028 * will grab a config lock that is held across I/O, 1029 * issue async to avoid deadlock. 1030 */ 1031 if (((1U << stage) & ZIO_CONFIG_LOCK_BLOCKING_STAGES) && 1032 zio->io_vd == NULL && 1033 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1034 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE); 1035 return; 1036 } 1037 1038 zio->io_stage = stage; 1039 rv = zio_pipeline[stage](zio); 1040 1041 if (rv == ZIO_PIPELINE_STOP) 1042 return; 1043 1044 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1045 } 1046 } 1047 1048 /* 1049 * ========================================================================== 1050 * Initiate I/O, either sync or async 1051 * ========================================================================== 1052 */ 1053 int 1054 zio_wait(zio_t *zio) 1055 { 1056 int error; 1057 1058 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1059 ASSERT(zio->io_executor == NULL); 1060 1061 zio->io_waiter = curthread; 1062 1063 zio_execute(zio); 1064 1065 mutex_enter(&zio->io_lock); 1066 while (zio->io_executor != NULL) 1067 cv_wait(&zio->io_cv, &zio->io_lock); 1068 mutex_exit(&zio->io_lock); 1069 1070 error = zio->io_error; 1071 zio_destroy(zio); 1072 1073 return (error); 1074 } 1075 1076 void 1077 zio_nowait(zio_t *zio) 1078 { 1079 ASSERT(zio->io_executor == NULL); 1080 1081 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1082 zio_unique_parent(zio) == NULL) { 1083 /* 1084 * This is a logical async I/O with no parent to wait for it. 1085 * We add it to the spa_async_root_zio "Godfather" I/O which 1086 * will ensure they complete prior to unloading the pool. 1087 */ 1088 spa_t *spa = zio->io_spa; 1089 1090 zio_add_child(spa->spa_async_zio_root, zio); 1091 } 1092 1093 zio_execute(zio); 1094 } 1095 1096 /* 1097 * ========================================================================== 1098 * Reexecute or suspend/resume failed I/O 1099 * ========================================================================== 1100 */ 1101 1102 static void 1103 zio_reexecute(zio_t *pio) 1104 { 1105 zio_t *cio, *cio_next; 1106 1107 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1108 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1109 ASSERT(pio->io_gang_leader == NULL); 1110 ASSERT(pio->io_gang_tree == NULL); 1111 1112 pio->io_flags = pio->io_orig_flags; 1113 pio->io_stage = pio->io_orig_stage; 1114 pio->io_pipeline = pio->io_orig_pipeline; 1115 pio->io_reexecute = 0; 1116 pio->io_error = 0; 1117 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1118 pio->io_state[w] = 0; 1119 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1120 pio->io_child_error[c] = 0; 1121 1122 if (IO_IS_ALLOCATING(pio)) { 1123 /* 1124 * Remember the failed bp so that the io_ready() callback 1125 * can update its accounting upon reexecution. The block 1126 * was already freed in zio_done(); we indicate this with 1127 * a fill count of -1 so that zio_free() knows to skip it. 1128 */ 1129 blkptr_t *bp = pio->io_bp; 1130 ASSERT(bp->blk_birth == 0 || bp->blk_birth == pio->io_txg); 1131 bp->blk_fill = BLK_FILL_ALREADY_FREED; 1132 pio->io_bp_orig = *bp; 1133 BP_ZERO(bp); 1134 } 1135 1136 /* 1137 * As we reexecute pio's children, new children could be created. 1138 * New children go to the head of pio's io_child_list, however, 1139 * so we will (correctly) not reexecute them. The key is that 1140 * the remainder of pio's io_child_list, from 'cio_next' onward, 1141 * cannot be affected by any side effects of reexecuting 'cio'. 1142 */ 1143 for (cio = zio_walk_children(pio); cio != NULL; cio = cio_next) { 1144 cio_next = zio_walk_children(pio); 1145 mutex_enter(&pio->io_lock); 1146 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1147 pio->io_children[cio->io_child_type][w]++; 1148 mutex_exit(&pio->io_lock); 1149 zio_reexecute(cio); 1150 } 1151 1152 /* 1153 * Now that all children have been reexecuted, execute the parent. 1154 * We don't reexecute "The Godfather" I/O here as it's the 1155 * responsibility of the caller to wait on him. 1156 */ 1157 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) 1158 zio_execute(pio); 1159 } 1160 1161 void 1162 zio_suspend(spa_t *spa, zio_t *zio) 1163 { 1164 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1165 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1166 "failure and the failure mode property for this pool " 1167 "is set to panic.", spa_name(spa)); 1168 1169 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1170 1171 mutex_enter(&spa->spa_suspend_lock); 1172 1173 if (spa->spa_suspend_zio_root == NULL) 1174 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1175 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1176 ZIO_FLAG_GODFATHER); 1177 1178 spa->spa_suspended = B_TRUE; 1179 1180 if (zio != NULL) { 1181 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1182 ASSERT(zio != spa->spa_suspend_zio_root); 1183 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1184 ASSERT(zio_unique_parent(zio) == NULL); 1185 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1186 zio_add_child(spa->spa_suspend_zio_root, zio); 1187 } 1188 1189 mutex_exit(&spa->spa_suspend_lock); 1190 } 1191 1192 int 1193 zio_resume(spa_t *spa) 1194 { 1195 zio_t *pio; 1196 1197 /* 1198 * Reexecute all previously suspended i/o. 1199 */ 1200 mutex_enter(&spa->spa_suspend_lock); 1201 spa->spa_suspended = B_FALSE; 1202 cv_broadcast(&spa->spa_suspend_cv); 1203 pio = spa->spa_suspend_zio_root; 1204 spa->spa_suspend_zio_root = NULL; 1205 mutex_exit(&spa->spa_suspend_lock); 1206 1207 if (pio == NULL) 1208 return (0); 1209 1210 zio_reexecute(pio); 1211 return (zio_wait(pio)); 1212 } 1213 1214 void 1215 zio_resume_wait(spa_t *spa) 1216 { 1217 mutex_enter(&spa->spa_suspend_lock); 1218 while (spa_suspended(spa)) 1219 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1220 mutex_exit(&spa->spa_suspend_lock); 1221 } 1222 1223 /* 1224 * ========================================================================== 1225 * Gang blocks. 1226 * 1227 * A gang block is a collection of small blocks that looks to the DMU 1228 * like one large block. When zio_dva_allocate() cannot find a block 1229 * of the requested size, due to either severe fragmentation or the pool 1230 * being nearly full, it calls zio_write_gang_block() to construct the 1231 * block from smaller fragments. 1232 * 1233 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1234 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1235 * an indirect block: it's an array of block pointers. It consumes 1236 * only one sector and hence is allocatable regardless of fragmentation. 1237 * The gang header's bps point to its gang members, which hold the data. 1238 * 1239 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1240 * as the verifier to ensure uniqueness of the SHA256 checksum. 1241 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1242 * not the gang header. This ensures that data block signatures (needed for 1243 * deduplication) are independent of how the block is physically stored. 1244 * 1245 * Gang blocks can be nested: a gang member may itself be a gang block. 1246 * Thus every gang block is a tree in which root and all interior nodes are 1247 * gang headers, and the leaves are normal blocks that contain user data. 1248 * The root of the gang tree is called the gang leader. 1249 * 1250 * To perform any operation (read, rewrite, free, claim) on a gang block, 1251 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1252 * in the io_gang_tree field of the original logical i/o by recursively 1253 * reading the gang leader and all gang headers below it. This yields 1254 * an in-core tree containing the contents of every gang header and the 1255 * bps for every constituent of the gang block. 1256 * 1257 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1258 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1259 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1260 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1261 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1262 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1263 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1264 * of the gang header plus zio_checksum_compute() of the data to update the 1265 * gang header's blk_cksum as described above. 1266 * 1267 * The two-phase assemble/issue model solves the problem of partial failure -- 1268 * what if you'd freed part of a gang block but then couldn't read the 1269 * gang header for another part? Assembling the entire gang tree first 1270 * ensures that all the necessary gang header I/O has succeeded before 1271 * starting the actual work of free, claim, or write. Once the gang tree 1272 * is assembled, free and claim are in-memory operations that cannot fail. 1273 * 1274 * In the event that a gang write fails, zio_dva_unallocate() walks the 1275 * gang tree to immediately free (i.e. insert back into the space map) 1276 * everything we've allocated. This ensures that we don't get ENOSPC 1277 * errors during repeated suspend/resume cycles due to a flaky device. 1278 * 1279 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1280 * the gang tree, we won't modify the block, so we can safely defer the free 1281 * (knowing that the block is still intact). If we *can* assemble the gang 1282 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1283 * each constituent bp and we can allocate a new block on the next sync pass. 1284 * 1285 * In all cases, the gang tree allows complete recovery from partial failure. 1286 * ========================================================================== 1287 */ 1288 1289 static zio_t * 1290 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1291 { 1292 if (gn != NULL) 1293 return (pio); 1294 1295 return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp), 1296 NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1297 &pio->io_bookmark)); 1298 } 1299 1300 zio_t * 1301 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1302 { 1303 zio_t *zio; 1304 1305 if (gn != NULL) { 1306 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1307 gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority, 1308 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1309 /* 1310 * As we rewrite each gang header, the pipeline will compute 1311 * a new gang block header checksum for it; but no one will 1312 * compute a new data checksum, so we do that here. The one 1313 * exception is the gang leader: the pipeline already computed 1314 * its data checksum because that stage precedes gang assembly. 1315 * (Presently, nothing actually uses interior data checksums; 1316 * this is just good hygiene.) 1317 */ 1318 if (gn != pio->io_gang_leader->io_gang_tree) { 1319 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1320 data, BP_GET_PSIZE(bp)); 1321 } 1322 } else { 1323 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1324 data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority, 1325 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1326 } 1327 1328 return (zio); 1329 } 1330 1331 /* ARGSUSED */ 1332 zio_t * 1333 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1334 { 1335 return (zio_free(pio, pio->io_spa, pio->io_txg, bp, 1336 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1337 } 1338 1339 /* ARGSUSED */ 1340 zio_t * 1341 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) 1342 { 1343 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1344 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1345 } 1346 1347 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 1348 NULL, 1349 zio_read_gang, 1350 zio_rewrite_gang, 1351 zio_free_gang, 1352 zio_claim_gang, 1353 NULL 1354 }; 1355 1356 static void zio_gang_tree_assemble_done(zio_t *zio); 1357 1358 static zio_gang_node_t * 1359 zio_gang_node_alloc(zio_gang_node_t **gnpp) 1360 { 1361 zio_gang_node_t *gn; 1362 1363 ASSERT(*gnpp == NULL); 1364 1365 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 1366 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 1367 *gnpp = gn; 1368 1369 return (gn); 1370 } 1371 1372 static void 1373 zio_gang_node_free(zio_gang_node_t **gnpp) 1374 { 1375 zio_gang_node_t *gn = *gnpp; 1376 1377 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1378 ASSERT(gn->gn_child[g] == NULL); 1379 1380 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1381 kmem_free(gn, sizeof (*gn)); 1382 *gnpp = NULL; 1383 } 1384 1385 static void 1386 zio_gang_tree_free(zio_gang_node_t **gnpp) 1387 { 1388 zio_gang_node_t *gn = *gnpp; 1389 1390 if (gn == NULL) 1391 return; 1392 1393 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1394 zio_gang_tree_free(&gn->gn_child[g]); 1395 1396 zio_gang_node_free(gnpp); 1397 } 1398 1399 static void 1400 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 1401 { 1402 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 1403 1404 ASSERT(gio->io_gang_leader == gio); 1405 ASSERT(BP_IS_GANG(bp)); 1406 1407 zio_nowait(zio_read(gio, gio->io_spa, bp, gn->gn_gbh, 1408 SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn, 1409 gio->io_priority, ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 1410 } 1411 1412 static void 1413 zio_gang_tree_assemble_done(zio_t *zio) 1414 { 1415 zio_t *gio = zio->io_gang_leader; 1416 zio_gang_node_t *gn = zio->io_private; 1417 blkptr_t *bp = zio->io_bp; 1418 1419 ASSERT(gio == zio_unique_parent(zio)); 1420 ASSERT(zio_walk_children(zio) == NULL); 1421 1422 if (zio->io_error) 1423 return; 1424 1425 if (BP_SHOULD_BYTESWAP(bp)) 1426 byteswap_uint64_array(zio->io_data, zio->io_size); 1427 1428 ASSERT(zio->io_data == gn->gn_gbh); 1429 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 1430 ASSERT(gn->gn_gbh->zg_tail.zbt_magic == ZBT_MAGIC); 1431 1432 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1433 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1434 if (!BP_IS_GANG(gbp)) 1435 continue; 1436 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 1437 } 1438 } 1439 1440 static void 1441 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) 1442 { 1443 zio_t *gio = pio->io_gang_leader; 1444 zio_t *zio; 1445 1446 ASSERT(BP_IS_GANG(bp) == !!gn); 1447 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 1448 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 1449 1450 /* 1451 * If you're a gang header, your data is in gn->gn_gbh. 1452 * If you're a gang member, your data is in 'data' and gn == NULL. 1453 */ 1454 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data); 1455 1456 if (gn != NULL) { 1457 ASSERT(gn->gn_gbh->zg_tail.zbt_magic == ZBT_MAGIC); 1458 1459 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1460 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 1461 if (BP_IS_HOLE(gbp)) 1462 continue; 1463 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data); 1464 data = (char *)data + BP_GET_PSIZE(gbp); 1465 } 1466 } 1467 1468 if (gn == gio->io_gang_tree) 1469 ASSERT3P((char *)gio->io_data + gio->io_size, ==, data); 1470 1471 if (zio != pio) 1472 zio_nowait(zio); 1473 } 1474 1475 static int 1476 zio_gang_assemble(zio_t *zio) 1477 { 1478 blkptr_t *bp = zio->io_bp; 1479 1480 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 1481 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1482 1483 zio->io_gang_leader = zio; 1484 1485 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 1486 1487 return (ZIO_PIPELINE_CONTINUE); 1488 } 1489 1490 static int 1491 zio_gang_issue(zio_t *zio) 1492 { 1493 blkptr_t *bp = zio->io_bp; 1494 1495 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE)) 1496 return (ZIO_PIPELINE_STOP); 1497 1498 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 1499 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1500 1501 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 1502 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_data); 1503 else 1504 zio_gang_tree_free(&zio->io_gang_tree); 1505 1506 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1507 1508 return (ZIO_PIPELINE_CONTINUE); 1509 } 1510 1511 static void 1512 zio_write_gang_member_ready(zio_t *zio) 1513 { 1514 zio_t *pio = zio_unique_parent(zio); 1515 zio_t *gio = zio->io_gang_leader; 1516 dva_t *cdva = zio->io_bp->blk_dva; 1517 dva_t *pdva = pio->io_bp->blk_dva; 1518 uint64_t asize; 1519 1520 if (BP_IS_HOLE(zio->io_bp)) 1521 return; 1522 1523 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 1524 1525 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 1526 ASSERT3U(zio->io_prop.zp_ndvas, ==, gio->io_prop.zp_ndvas); 1527 ASSERT3U(zio->io_prop.zp_ndvas, <=, BP_GET_NDVAS(zio->io_bp)); 1528 ASSERT3U(pio->io_prop.zp_ndvas, <=, BP_GET_NDVAS(pio->io_bp)); 1529 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 1530 1531 mutex_enter(&pio->io_lock); 1532 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 1533 ASSERT(DVA_GET_GANG(&pdva[d])); 1534 asize = DVA_GET_ASIZE(&pdva[d]); 1535 asize += DVA_GET_ASIZE(&cdva[d]); 1536 DVA_SET_ASIZE(&pdva[d], asize); 1537 } 1538 mutex_exit(&pio->io_lock); 1539 } 1540 1541 static int 1542 zio_write_gang_block(zio_t *pio) 1543 { 1544 spa_t *spa = pio->io_spa; 1545 blkptr_t *bp = pio->io_bp; 1546 zio_t *gio = pio->io_gang_leader; 1547 zio_t *zio; 1548 zio_gang_node_t *gn, **gnpp; 1549 zio_gbh_phys_t *gbh; 1550 uint64_t txg = pio->io_txg; 1551 uint64_t resid = pio->io_size; 1552 uint64_t lsize; 1553 int ndvas = gio->io_prop.zp_ndvas; 1554 int gbh_ndvas = MIN(ndvas + 1, spa_max_replication(spa)); 1555 zio_prop_t zp; 1556 int error; 1557 1558 error = metaslab_alloc(spa, spa->spa_normal_class, SPA_GANGBLOCKSIZE, 1559 bp, gbh_ndvas, txg, pio == gio ? NULL : gio->io_bp, 1560 METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER); 1561 if (error) { 1562 pio->io_error = error; 1563 return (ZIO_PIPELINE_CONTINUE); 1564 } 1565 1566 if (pio == gio) { 1567 gnpp = &gio->io_gang_tree; 1568 } else { 1569 gnpp = pio->io_private; 1570 ASSERT(pio->io_ready == zio_write_gang_member_ready); 1571 } 1572 1573 gn = zio_gang_node_alloc(gnpp); 1574 gbh = gn->gn_gbh; 1575 bzero(gbh, SPA_GANGBLOCKSIZE); 1576 1577 /* 1578 * Create the gang header. 1579 */ 1580 zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL, 1581 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1582 1583 /* 1584 * Create and nowait the gang children. 1585 */ 1586 for (int g = 0; resid != 0; resid -= lsize, g++) { 1587 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 1588 SPA_MINBLOCKSIZE); 1589 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 1590 1591 zp.zp_checksum = gio->io_prop.zp_checksum; 1592 zp.zp_compress = ZIO_COMPRESS_OFF; 1593 zp.zp_type = DMU_OT_NONE; 1594 zp.zp_level = 0; 1595 zp.zp_ndvas = gio->io_prop.zp_ndvas; 1596 1597 zio_nowait(zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 1598 (char *)pio->io_data + (pio->io_size - resid), lsize, &zp, 1599 zio_write_gang_member_ready, NULL, &gn->gn_child[g], 1600 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1601 &pio->io_bookmark)); 1602 } 1603 1604 /* 1605 * Set pio's pipeline to just wait for zio to finish. 1606 */ 1607 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1608 1609 zio_nowait(zio); 1610 1611 return (ZIO_PIPELINE_CONTINUE); 1612 } 1613 1614 /* 1615 * ========================================================================== 1616 * Allocate and free blocks 1617 * ========================================================================== 1618 */ 1619 1620 static int 1621 zio_dva_allocate(zio_t *zio) 1622 { 1623 spa_t *spa = zio->io_spa; 1624 metaslab_class_t *mc = spa->spa_normal_class; 1625 blkptr_t *bp = zio->io_bp; 1626 int error; 1627 1628 if (zio->io_gang_leader == NULL) { 1629 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 1630 zio->io_gang_leader = zio; 1631 } 1632 1633 ASSERT(BP_IS_HOLE(bp)); 1634 ASSERT3U(BP_GET_NDVAS(bp), ==, 0); 1635 ASSERT3U(zio->io_prop.zp_ndvas, >, 0); 1636 ASSERT3U(zio->io_prop.zp_ndvas, <=, spa_max_replication(spa)); 1637 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 1638 1639 error = metaslab_alloc(spa, mc, zio->io_size, bp, 1640 zio->io_prop.zp_ndvas, zio->io_txg, NULL, 0); 1641 1642 if (error) { 1643 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 1644 return (zio_write_gang_block(zio)); 1645 zio->io_error = error; 1646 } 1647 1648 return (ZIO_PIPELINE_CONTINUE); 1649 } 1650 1651 static int 1652 zio_dva_free(zio_t *zio) 1653 { 1654 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 1655 1656 return (ZIO_PIPELINE_CONTINUE); 1657 } 1658 1659 static int 1660 zio_dva_claim(zio_t *zio) 1661 { 1662 int error; 1663 1664 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 1665 if (error) 1666 zio->io_error = error; 1667 1668 return (ZIO_PIPELINE_CONTINUE); 1669 } 1670 1671 /* 1672 * Undo an allocation. This is used by zio_done() when an I/O fails 1673 * and we want to give back the block we just allocated. 1674 * This handles both normal blocks and gang blocks. 1675 */ 1676 static void 1677 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 1678 { 1679 spa_t *spa = zio->io_spa; 1680 boolean_t now = !(zio->io_flags & ZIO_FLAG_IO_REWRITE); 1681 1682 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 1683 1684 if (zio->io_bp == bp && !now) { 1685 /* 1686 * This is a rewrite for sync-to-convergence. 1687 * We can't do a metaslab_free(NOW) because bp wasn't allocated 1688 * during this sync pass, which means that metaslab_sync() 1689 * already committed the allocation. 1690 */ 1691 ASSERT(DVA_EQUAL(BP_IDENTITY(bp), 1692 BP_IDENTITY(&zio->io_bp_orig))); 1693 ASSERT(spa_sync_pass(spa) > 1); 1694 1695 if (BP_IS_GANG(bp) && gn == NULL) { 1696 /* 1697 * This is a gang leader whose gang header(s) we 1698 * couldn't read now, so defer the free until later. 1699 * The block should still be intact because without 1700 * the headers, we'd never even start the rewrite. 1701 */ 1702 bplist_enqueue_deferred(&spa->spa_sync_bplist, bp); 1703 return; 1704 } 1705 } 1706 1707 if (!BP_IS_HOLE(bp)) 1708 metaslab_free(spa, bp, bp->blk_birth, now); 1709 1710 if (gn != NULL) { 1711 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 1712 zio_dva_unallocate(zio, gn->gn_child[g], 1713 &gn->gn_gbh->zg_blkptr[g]); 1714 } 1715 } 1716 } 1717 1718 /* 1719 * Try to allocate an intent log block. Return 0 on success, errno on failure. 1720 */ 1721 int 1722 zio_alloc_blk(spa_t *spa, uint64_t size, blkptr_t *new_bp, blkptr_t *old_bp, 1723 uint64_t txg, boolean_t use_slog) 1724 { 1725 int error = 1; 1726 1727 if (use_slog) 1728 error = metaslab_alloc(spa, spa->spa_log_class, size, 1729 new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID); 1730 1731 if (error) 1732 error = metaslab_alloc(spa, spa->spa_normal_class, size, 1733 new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID); 1734 1735 if (error == 0) { 1736 BP_SET_LSIZE(new_bp, size); 1737 BP_SET_PSIZE(new_bp, size); 1738 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 1739 BP_SET_CHECKSUM(new_bp, ZIO_CHECKSUM_ZILOG); 1740 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 1741 BP_SET_LEVEL(new_bp, 0); 1742 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 1743 } 1744 1745 return (error); 1746 } 1747 1748 /* 1749 * Free an intent log block. We know it can't be a gang block, so there's 1750 * nothing to do except metaslab_free() it. 1751 */ 1752 void 1753 zio_free_blk(spa_t *spa, blkptr_t *bp, uint64_t txg) 1754 { 1755 ASSERT(!BP_IS_GANG(bp)); 1756 1757 metaslab_free(spa, bp, txg, B_FALSE); 1758 } 1759 1760 /* 1761 * ========================================================================== 1762 * Read and write to physical devices 1763 * ========================================================================== 1764 */ 1765 static int 1766 zio_vdev_io_start(zio_t *zio) 1767 { 1768 vdev_t *vd = zio->io_vd; 1769 uint64_t align; 1770 spa_t *spa = zio->io_spa; 1771 1772 ASSERT(zio->io_error == 0); 1773 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 1774 1775 if (vd == NULL) { 1776 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 1777 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 1778 1779 /* 1780 * The mirror_ops handle multiple DVAs in a single BP. 1781 */ 1782 return (vdev_mirror_ops.vdev_op_io_start(zio)); 1783 } 1784 1785 align = 1ULL << vd->vdev_top->vdev_ashift; 1786 1787 if (P2PHASE(zio->io_size, align) != 0) { 1788 uint64_t asize = P2ROUNDUP(zio->io_size, align); 1789 char *abuf = zio_buf_alloc(asize); 1790 ASSERT(vd == vd->vdev_top); 1791 if (zio->io_type == ZIO_TYPE_WRITE) { 1792 bcopy(zio->io_data, abuf, zio->io_size); 1793 bzero(abuf + zio->io_size, asize - zio->io_size); 1794 } 1795 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 1796 } 1797 1798 ASSERT(P2PHASE(zio->io_offset, align) == 0); 1799 ASSERT(P2PHASE(zio->io_size, align) == 0); 1800 ASSERT(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 1801 1802 /* 1803 * If this is a repair I/O, and there's no self-healing involved -- 1804 * that is, we're just resilvering what we expect to resilver -- 1805 * then don't do the I/O unless zio's txg is actually in vd's DTL. 1806 * This prevents spurious resilvering with nested replication. 1807 * For example, given a mirror of mirrors, (A+B)+(C+D), if only 1808 * A is out of date, we'll read from C+D, then use the data to 1809 * resilver A+B -- but we don't actually want to resilver B, just A. 1810 * The top-level mirror has no way to know this, so instead we just 1811 * discard unnecessary repairs as we work our way down the vdev tree. 1812 * The same logic applies to any form of nested replication: 1813 * ditto + mirror, RAID-Z + replacing, etc. This covers them all. 1814 */ 1815 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 1816 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 1817 zio->io_txg != 0 && /* not a delegated i/o */ 1818 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 1819 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 1820 zio_vdev_io_bypass(zio); 1821 return (ZIO_PIPELINE_CONTINUE); 1822 } 1823 1824 if (vd->vdev_ops->vdev_op_leaf && 1825 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { 1826 1827 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio) == 0) 1828 return (ZIO_PIPELINE_CONTINUE); 1829 1830 if ((zio = vdev_queue_io(zio)) == NULL) 1831 return (ZIO_PIPELINE_STOP); 1832 1833 if (!vdev_accessible(vd, zio)) { 1834 zio->io_error = ENXIO; 1835 zio_interrupt(zio); 1836 return (ZIO_PIPELINE_STOP); 1837 } 1838 } 1839 1840 return (vd->vdev_ops->vdev_op_io_start(zio)); 1841 } 1842 1843 static int 1844 zio_vdev_io_done(zio_t *zio) 1845 { 1846 vdev_t *vd = zio->io_vd; 1847 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 1848 boolean_t unexpected_error = B_FALSE; 1849 1850 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 1851 return (ZIO_PIPELINE_STOP); 1852 1853 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 1854 1855 if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { 1856 1857 vdev_queue_io_done(zio); 1858 1859 if (zio->io_type == ZIO_TYPE_WRITE) 1860 vdev_cache_write(zio); 1861 1862 if (zio_injection_enabled && zio->io_error == 0) 1863 zio->io_error = zio_handle_device_injection(vd, 1864 zio, EIO); 1865 1866 if (zio_injection_enabled && zio->io_error == 0) 1867 zio->io_error = zio_handle_label_injection(zio, EIO); 1868 1869 if (zio->io_error) { 1870 if (!vdev_accessible(vd, zio)) { 1871 zio->io_error = ENXIO; 1872 } else { 1873 unexpected_error = B_TRUE; 1874 } 1875 } 1876 } 1877 1878 ops->vdev_op_io_done(zio); 1879 1880 if (unexpected_error) 1881 VERIFY(vdev_probe(vd, zio) == NULL); 1882 1883 return (ZIO_PIPELINE_CONTINUE); 1884 } 1885 1886 /* 1887 * For non-raidz ZIOs, we can just copy aside the bad data read from the 1888 * disk, and use that to finish the checksum ereport later. 1889 */ 1890 static void 1891 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 1892 const void *good_buf) 1893 { 1894 /* no processing needed */ 1895 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 1896 } 1897 1898 /*ARGSUSED*/ 1899 void 1900 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 1901 { 1902 void *buf = zio_buf_alloc(zio->io_size); 1903 1904 bcopy(zio->io_data, buf, zio->io_size); 1905 1906 zcr->zcr_cbinfo = zio->io_size; 1907 zcr->zcr_cbdata = buf; 1908 zcr->zcr_finish = zio_vsd_default_cksum_finish; 1909 zcr->zcr_free = zio_buf_free; 1910 } 1911 1912 static int 1913 zio_vdev_io_assess(zio_t *zio) 1914 { 1915 vdev_t *vd = zio->io_vd; 1916 1917 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 1918 return (ZIO_PIPELINE_STOP); 1919 1920 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 1921 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 1922 1923 if (zio->io_vsd != NULL) { 1924 zio->io_vsd_ops->vsd_free(zio); 1925 zio->io_vsd = NULL; 1926 } 1927 1928 if (zio_injection_enabled && zio->io_error == 0) 1929 zio->io_error = zio_handle_fault_injection(zio, EIO); 1930 1931 /* 1932 * If the I/O failed, determine whether we should attempt to retry it. 1933 */ 1934 if (zio->io_error && vd == NULL && 1935 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 1936 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 1937 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 1938 zio->io_error = 0; 1939 zio->io_flags |= ZIO_FLAG_IO_RETRY | 1940 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 1941 zio->io_stage = ZIO_STAGE_VDEV_IO_START - 1; 1942 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE); 1943 return (ZIO_PIPELINE_STOP); 1944 } 1945 1946 /* 1947 * If we got an error on a leaf device, convert it to ENXIO 1948 * if the device is not accessible at all. 1949 */ 1950 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 1951 !vdev_accessible(vd, zio)) 1952 zio->io_error = ENXIO; 1953 1954 /* 1955 * If we can't write to an interior vdev (mirror or RAID-Z), 1956 * set vdev_cant_write so that we stop trying to allocate from it. 1957 */ 1958 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 1959 vd != NULL && !vd->vdev_ops->vdev_op_leaf) 1960 vd->vdev_cant_write = B_TRUE; 1961 1962 if (zio->io_error) 1963 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1964 1965 return (ZIO_PIPELINE_CONTINUE); 1966 } 1967 1968 void 1969 zio_vdev_io_reissue(zio_t *zio) 1970 { 1971 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 1972 ASSERT(zio->io_error == 0); 1973 1974 zio->io_stage--; 1975 } 1976 1977 void 1978 zio_vdev_io_redone(zio_t *zio) 1979 { 1980 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 1981 1982 zio->io_stage--; 1983 } 1984 1985 void 1986 zio_vdev_io_bypass(zio_t *zio) 1987 { 1988 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 1989 ASSERT(zio->io_error == 0); 1990 1991 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 1992 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS - 1; 1993 } 1994 1995 /* 1996 * ========================================================================== 1997 * Generate and verify checksums 1998 * ========================================================================== 1999 */ 2000 static int 2001 zio_checksum_generate(zio_t *zio) 2002 { 2003 blkptr_t *bp = zio->io_bp; 2004 enum zio_checksum checksum; 2005 2006 if (bp == NULL) { 2007 /* 2008 * This is zio_write_phys(). 2009 * We're either generating a label checksum, or none at all. 2010 */ 2011 checksum = zio->io_prop.zp_checksum; 2012 2013 if (checksum == ZIO_CHECKSUM_OFF) 2014 return (ZIO_PIPELINE_CONTINUE); 2015 2016 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 2017 } else { 2018 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 2019 ASSERT(!IO_IS_ALLOCATING(zio)); 2020 checksum = ZIO_CHECKSUM_GANG_HEADER; 2021 } else { 2022 checksum = BP_GET_CHECKSUM(bp); 2023 } 2024 } 2025 2026 zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size); 2027 2028 return (ZIO_PIPELINE_CONTINUE); 2029 } 2030 2031 static int 2032 zio_checksum_verify(zio_t *zio) 2033 { 2034 zio_bad_cksum_t info; 2035 2036 blkptr_t *bp = zio->io_bp; 2037 int error; 2038 2039 if (bp == NULL) { 2040 /* 2041 * This is zio_read_phys(). 2042 * We're either verifying a label checksum, or nothing at all. 2043 */ 2044 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 2045 return (ZIO_PIPELINE_CONTINUE); 2046 2047 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 2048 } 2049 2050 if ((error = zio_checksum_error(zio, &info)) != 0) { 2051 zio->io_error = error; 2052 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 2053 zfs_ereport_start_checksum(zio->io_spa, 2054 zio->io_vd, zio, zio->io_offset, 2055 zio->io_size, NULL, &info); 2056 } 2057 } 2058 2059 return (ZIO_PIPELINE_CONTINUE); 2060 } 2061 2062 /* 2063 * Called by RAID-Z to ensure we don't compute the checksum twice. 2064 */ 2065 void 2066 zio_checksum_verified(zio_t *zio) 2067 { 2068 zio->io_pipeline &= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY); 2069 } 2070 2071 /* 2072 * ========================================================================== 2073 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 2074 * An error of 0 indictes success. ENXIO indicates whole-device failure, 2075 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 2076 * indicate errors that are specific to one I/O, and most likely permanent. 2077 * Any other error is presumed to be worse because we weren't expecting it. 2078 * ========================================================================== 2079 */ 2080 int 2081 zio_worst_error(int e1, int e2) 2082 { 2083 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 2084 int r1, r2; 2085 2086 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 2087 if (e1 == zio_error_rank[r1]) 2088 break; 2089 2090 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 2091 if (e2 == zio_error_rank[r2]) 2092 break; 2093 2094 return (r1 > r2 ? e1 : e2); 2095 } 2096 2097 /* 2098 * ========================================================================== 2099 * I/O completion 2100 * ========================================================================== 2101 */ 2102 static int 2103 zio_ready(zio_t *zio) 2104 { 2105 blkptr_t *bp = zio->io_bp; 2106 zio_t *pio, *pio_next; 2107 2108 if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY)) 2109 return (ZIO_PIPELINE_STOP); 2110 2111 if (zio->io_ready) { 2112 ASSERT(IO_IS_ALLOCATING(zio)); 2113 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2114 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 2115 2116 zio->io_ready(zio); 2117 } 2118 2119 if (bp != NULL && bp != &zio->io_bp_copy) 2120 zio->io_bp_copy = *bp; 2121 2122 if (zio->io_error) 2123 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2124 2125 mutex_enter(&zio->io_lock); 2126 zio->io_state[ZIO_WAIT_READY] = 1; 2127 pio = zio_walk_parents(zio); 2128 mutex_exit(&zio->io_lock); 2129 2130 /* 2131 * As we notify zio's parents, new parents could be added. 2132 * New parents go to the head of zio's io_parent_list, however, 2133 * so we will (correctly) not notify them. The remainder of zio's 2134 * io_parent_list, from 'pio_next' onward, cannot change because 2135 * all parents must wait for us to be done before they can be done. 2136 */ 2137 for (; pio != NULL; pio = pio_next) { 2138 pio_next = zio_walk_parents(zio); 2139 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 2140 } 2141 2142 return (ZIO_PIPELINE_CONTINUE); 2143 } 2144 2145 static int 2146 zio_done(zio_t *zio) 2147 { 2148 spa_t *spa = zio->io_spa; 2149 zio_t *lio = zio->io_logical; 2150 blkptr_t *bp = zio->io_bp; 2151 vdev_t *vd = zio->io_vd; 2152 uint64_t psize = zio->io_size; 2153 zio_t *pio, *pio_next; 2154 2155 /* 2156 * If our children haven't all completed, 2157 * wait for them and then repeat this pipeline stage. 2158 */ 2159 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) || 2160 zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) || 2161 zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) 2162 return (ZIO_PIPELINE_STOP); 2163 2164 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 2165 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 2166 ASSERT(zio->io_children[c][w] == 0); 2167 2168 if (bp != NULL) { 2169 ASSERT(bp->blk_pad[0] == 0); 2170 ASSERT(bp->blk_pad[1] == 0); 2171 ASSERT(bp->blk_pad[2] == 0); 2172 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 2173 (bp == zio_unique_parent(zio)->io_bp)); 2174 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 2175 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 2176 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 2177 ASSERT3U(zio->io_prop.zp_ndvas, <=, BP_GET_NDVAS(bp)); 2178 ASSERT(BP_COUNT_GANG(bp) == 0 || 2179 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 2180 } 2181 } 2182 2183 /* 2184 * If there were child vdev or gang errors, they apply to us now. 2185 */ 2186 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 2187 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 2188 2189 zio_pop_transforms(zio); /* note: may set zio->io_error */ 2190 2191 vdev_stat_update(zio, psize); 2192 2193 if (zio->io_error) { 2194 /* 2195 * If this I/O is attached to a particular vdev, 2196 * generate an error message describing the I/O failure 2197 * at the block level. We ignore these errors if the 2198 * device is currently unavailable. 2199 */ 2200 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 2201 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 2202 2203 if ((zio->io_error == EIO || !(zio->io_flags & 2204 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 2205 zio == lio) { 2206 /* 2207 * For logical I/O requests, tell the SPA to log the 2208 * error and generate a logical data ereport. 2209 */ 2210 spa_log_error(spa, zio); 2211 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 2212 0, 0); 2213 } 2214 } 2215 2216 if (zio->io_error && zio == lio) { 2217 /* 2218 * Determine whether zio should be reexecuted. This will 2219 * propagate all the way to the root via zio_notify_parent(). 2220 */ 2221 ASSERT(vd == NULL && bp != NULL); 2222 2223 if (IO_IS_ALLOCATING(zio)) 2224 if (zio->io_error != ENOSPC) 2225 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 2226 else 2227 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2228 2229 if ((zio->io_type == ZIO_TYPE_READ || 2230 zio->io_type == ZIO_TYPE_FREE) && 2231 zio->io_error == ENXIO && 2232 spa->spa_load_state == SPA_LOAD_NONE && 2233 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 2234 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2235 2236 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 2237 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 2238 2239 /* 2240 * Here is a possibly good place to attempt to do 2241 * either combinatorial reconstruction or error correction 2242 * based on checksums. It also might be a good place 2243 * to send out preliminary ereports before we suspend 2244 * processing. 2245 */ 2246 } 2247 2248 /* 2249 * If there were logical child errors, they apply to us now. 2250 * We defer this until now to avoid conflating logical child 2251 * errors with errors that happened to the zio itself when 2252 * updating vdev stats and reporting FMA events above. 2253 */ 2254 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 2255 2256 if ((zio->io_error || zio->io_reexecute) && IO_IS_ALLOCATING(zio) && 2257 zio->io_child_type == ZIO_CHILD_LOGICAL) { 2258 ASSERT(zio->io_child_type != ZIO_CHILD_GANG); 2259 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 2260 } 2261 2262 zio_gang_tree_free(&zio->io_gang_tree); 2263 2264 /* 2265 * Godfather I/Os should never suspend. 2266 */ 2267 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 2268 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 2269 zio->io_reexecute = 0; 2270 2271 if (zio->io_reexecute) { 2272 /* 2273 * This is a logical I/O that wants to reexecute. 2274 * 2275 * Reexecute is top-down. When an i/o fails, if it's not 2276 * the root, it simply notifies its parent and sticks around. 2277 * The parent, seeing that it still has children in zio_done(), 2278 * does the same. This percolates all the way up to the root. 2279 * The root i/o will reexecute or suspend the entire tree. 2280 * 2281 * This approach ensures that zio_reexecute() honors 2282 * all the original i/o dependency relationships, e.g. 2283 * parents not executing until children are ready. 2284 */ 2285 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2286 2287 zio->io_gang_leader = NULL; 2288 2289 mutex_enter(&zio->io_lock); 2290 zio->io_state[ZIO_WAIT_DONE] = 1; 2291 mutex_exit(&zio->io_lock); 2292 2293 /* 2294 * "The Godfather" I/O monitors its children but is 2295 * not a true parent to them. It will track them through 2296 * the pipeline but severs its ties whenever they get into 2297 * trouble (e.g. suspended). This allows "The Godfather" 2298 * I/O to return status without blocking. 2299 */ 2300 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 2301 zio_link_t *zl = zio->io_walk_link; 2302 pio_next = zio_walk_parents(zio); 2303 2304 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 2305 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 2306 zio_remove_child(pio, zio, zl); 2307 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 2308 } 2309 } 2310 2311 if ((pio = zio_unique_parent(zio)) != NULL) { 2312 /* 2313 * We're not a root i/o, so there's nothing to do 2314 * but notify our parent. Don't propagate errors 2315 * upward since we haven't permanently failed yet. 2316 */ 2317 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 2318 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 2319 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 2320 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 2321 /* 2322 * We'd fail again if we reexecuted now, so suspend 2323 * until conditions improve (e.g. device comes online). 2324 */ 2325 zio_suspend(spa, zio); 2326 } else { 2327 /* 2328 * Reexecution is potentially a huge amount of work. 2329 * Hand it off to the otherwise-unused claim taskq. 2330 */ 2331 (void) taskq_dispatch( 2332 spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE], 2333 (task_func_t *)zio_reexecute, zio, TQ_SLEEP); 2334 } 2335 return (ZIO_PIPELINE_STOP); 2336 } 2337 2338 ASSERT(zio_walk_children(zio) == NULL); 2339 ASSERT(zio->io_reexecute == 0); 2340 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 2341 2342 /* Report any checksum errors, since the IO is complete */ 2343 while (zio->io_cksum_report != NULL) { 2344 zio_cksum_report_t *rpt = zio->io_cksum_report; 2345 2346 zio->io_cksum_report = rpt->zcr_next; 2347 rpt->zcr_next = NULL; 2348 2349 /* only pass in our data buffer if we've succeeded. */ 2350 rpt->zcr_finish(rpt, 2351 (zio->io_error == 0) ? zio->io_data : NULL); 2352 2353 zfs_ereport_free_checksum(rpt); 2354 } 2355 2356 /* 2357 * It is the responsibility of the done callback to ensure that this 2358 * particular zio is no longer discoverable for adoption, and as 2359 * such, cannot acquire any new parents. 2360 */ 2361 if (zio->io_done) 2362 zio->io_done(zio); 2363 2364 mutex_enter(&zio->io_lock); 2365 zio->io_state[ZIO_WAIT_DONE] = 1; 2366 mutex_exit(&zio->io_lock); 2367 2368 for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { 2369 zio_link_t *zl = zio->io_walk_link; 2370 pio_next = zio_walk_parents(zio); 2371 zio_remove_child(pio, zio, zl); 2372 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 2373 } 2374 2375 if (zio->io_waiter != NULL) { 2376 mutex_enter(&zio->io_lock); 2377 zio->io_executor = NULL; 2378 cv_broadcast(&zio->io_cv); 2379 mutex_exit(&zio->io_lock); 2380 } else { 2381 zio_destroy(zio); 2382 } 2383 2384 return (ZIO_PIPELINE_STOP); 2385 } 2386 2387 /* 2388 * ========================================================================== 2389 * I/O pipeline definition 2390 * ========================================================================== 2391 */ 2392 static zio_pipe_stage_t *zio_pipeline[ZIO_STAGES] = { 2393 NULL, 2394 zio_issue_async, 2395 zio_read_bp_init, 2396 zio_write_bp_init, 2397 zio_checksum_generate, 2398 zio_gang_assemble, 2399 zio_gang_issue, 2400 zio_dva_allocate, 2401 zio_dva_free, 2402 zio_dva_claim, 2403 zio_ready, 2404 zio_vdev_io_start, 2405 zio_vdev_io_done, 2406 zio_vdev_io_assess, 2407 zio_checksum_verify, 2408 zio_done 2409 }; 2410