1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/zfs_context.h> 29 #include <sys/fm/fs/zfs.h> 30 #include <sys/spa.h> 31 #include <sys/txg.h> 32 #include <sys/spa_impl.h> 33 #include <sys/vdev_impl.h> 34 #include <sys/zio_impl.h> 35 #include <sys/zio_compress.h> 36 #include <sys/zio_checksum.h> 37 38 /* 39 * ========================================================================== 40 * I/O priority table 41 * ========================================================================== 42 */ 43 uint8_t zio_priority_table[ZIO_PRIORITY_TABLE_SIZE] = { 44 0, /* ZIO_PRIORITY_NOW */ 45 0, /* ZIO_PRIORITY_SYNC_READ */ 46 0, /* ZIO_PRIORITY_SYNC_WRITE */ 47 6, /* ZIO_PRIORITY_ASYNC_READ */ 48 4, /* ZIO_PRIORITY_ASYNC_WRITE */ 49 4, /* ZIO_PRIORITY_FREE */ 50 0, /* ZIO_PRIORITY_CACHE_FILL */ 51 0, /* ZIO_PRIORITY_LOG_WRITE */ 52 10, /* ZIO_PRIORITY_RESILVER */ 53 20, /* ZIO_PRIORITY_SCRUB */ 54 }; 55 56 /* 57 * ========================================================================== 58 * I/O type descriptions 59 * ========================================================================== 60 */ 61 char *zio_type_name[ZIO_TYPES] = { 62 "null", "read", "write", "free", "claim", "ioctl" }; 63 64 /* At or above this size, force gang blocking - for testing */ 65 uint64_t zio_gang_bang = SPA_MAXBLOCKSIZE + 1; 66 67 typedef struct zio_sync_pass { 68 int zp_defer_free; /* defer frees after this pass */ 69 int zp_dontcompress; /* don't compress after this pass */ 70 int zp_rewrite; /* rewrite new bps after this pass */ 71 } zio_sync_pass_t; 72 73 zio_sync_pass_t zio_sync_pass = { 74 1, /* zp_defer_free */ 75 4, /* zp_dontcompress */ 76 1, /* zp_rewrite */ 77 }; 78 79 /* 80 * ========================================================================== 81 * I/O kmem caches 82 * ========================================================================== 83 */ 84 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 85 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 86 87 #ifdef _KERNEL 88 extern vmem_t *zio_alloc_arena; 89 #endif 90 91 void 92 zio_init(void) 93 { 94 size_t c; 95 vmem_t *data_alloc_arena = NULL; 96 97 #ifdef _KERNEL 98 data_alloc_arena = zio_alloc_arena; 99 #endif 100 101 /* 102 * For small buffers, we want a cache for each multiple of 103 * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache 104 * for each quarter-power of 2. For large buffers, we want 105 * a cache for each multiple of PAGESIZE. 106 */ 107 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 108 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 109 size_t p2 = size; 110 size_t align = 0; 111 112 while (p2 & (p2 - 1)) 113 p2 &= p2 - 1; 114 115 if (size <= 4 * SPA_MINBLOCKSIZE) { 116 align = SPA_MINBLOCKSIZE; 117 } else if (P2PHASE(size, PAGESIZE) == 0) { 118 align = PAGESIZE; 119 } else if (P2PHASE(size, p2 >> 2) == 0) { 120 align = p2 >> 2; 121 } 122 123 if (align != 0) { 124 char name[36]; 125 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 126 zio_buf_cache[c] = kmem_cache_create(name, size, 127 align, NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); 128 129 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 130 zio_data_buf_cache[c] = kmem_cache_create(name, size, 131 align, NULL, NULL, NULL, NULL, data_alloc_arena, 132 KMC_NODEBUG); 133 134 dprintf("creating cache for size %5lx align %5lx\n", 135 size, align); 136 } 137 } 138 139 while (--c != 0) { 140 ASSERT(zio_buf_cache[c] != NULL); 141 if (zio_buf_cache[c - 1] == NULL) 142 zio_buf_cache[c - 1] = zio_buf_cache[c]; 143 144 ASSERT(zio_data_buf_cache[c] != NULL); 145 if (zio_data_buf_cache[c - 1] == NULL) 146 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 147 } 148 149 zio_inject_init(); 150 } 151 152 void 153 zio_fini(void) 154 { 155 size_t c; 156 kmem_cache_t *last_cache = NULL; 157 kmem_cache_t *last_data_cache = NULL; 158 159 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 160 if (zio_buf_cache[c] != last_cache) { 161 last_cache = zio_buf_cache[c]; 162 kmem_cache_destroy(zio_buf_cache[c]); 163 } 164 zio_buf_cache[c] = NULL; 165 166 if (zio_data_buf_cache[c] != last_data_cache) { 167 last_data_cache = zio_data_buf_cache[c]; 168 kmem_cache_destroy(zio_data_buf_cache[c]); 169 } 170 zio_data_buf_cache[c] = NULL; 171 } 172 173 zio_inject_fini(); 174 } 175 176 /* 177 * ========================================================================== 178 * Allocate and free I/O buffers 179 * ========================================================================== 180 */ 181 182 /* 183 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 184 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 185 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 186 * excess / transient data in-core during a crashdump. 187 */ 188 void * 189 zio_buf_alloc(size_t size) 190 { 191 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 192 193 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 194 195 return (kmem_cache_alloc(zio_buf_cache[c], KM_SLEEP)); 196 } 197 198 /* 199 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 200 * crashdump if the kernel panics. This exists so that we will limit the amount 201 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 202 * of kernel heap dumped to disk when the kernel panics) 203 */ 204 void * 205 zio_data_buf_alloc(size_t size) 206 { 207 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 208 209 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 210 211 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_SLEEP)); 212 } 213 214 void 215 zio_buf_free(void *buf, size_t size) 216 { 217 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 218 219 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 220 221 kmem_cache_free(zio_buf_cache[c], buf); 222 } 223 224 void 225 zio_data_buf_free(void *buf, size_t size) 226 { 227 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 228 229 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 230 231 kmem_cache_free(zio_data_buf_cache[c], buf); 232 } 233 /* 234 * ========================================================================== 235 * Push and pop I/O transform buffers 236 * ========================================================================== 237 */ 238 static void 239 zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize) 240 { 241 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 242 243 zt->zt_data = data; 244 zt->zt_size = size; 245 zt->zt_bufsize = bufsize; 246 247 zt->zt_next = zio->io_transform_stack; 248 zio->io_transform_stack = zt; 249 250 zio->io_data = data; 251 zio->io_size = size; 252 } 253 254 static void 255 zio_pop_transform(zio_t *zio, void **data, uint64_t *size, uint64_t *bufsize) 256 { 257 zio_transform_t *zt = zio->io_transform_stack; 258 259 *data = zt->zt_data; 260 *size = zt->zt_size; 261 *bufsize = zt->zt_bufsize; 262 263 zio->io_transform_stack = zt->zt_next; 264 kmem_free(zt, sizeof (zio_transform_t)); 265 266 if ((zt = zio->io_transform_stack) != NULL) { 267 zio->io_data = zt->zt_data; 268 zio->io_size = zt->zt_size; 269 } 270 } 271 272 static void 273 zio_clear_transform_stack(zio_t *zio) 274 { 275 void *data; 276 uint64_t size, bufsize; 277 278 ASSERT(zio->io_transform_stack != NULL); 279 280 zio_pop_transform(zio, &data, &size, &bufsize); 281 while (zio->io_transform_stack != NULL) { 282 zio_buf_free(data, bufsize); 283 zio_pop_transform(zio, &data, &size, &bufsize); 284 } 285 } 286 287 /* 288 * ========================================================================== 289 * Create the various types of I/O (read, write, free) 290 * ========================================================================== 291 */ 292 static zio_t * 293 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 294 void *data, uint64_t size, zio_done_func_t *done, void *private, 295 zio_type_t type, int priority, int flags, uint8_t stage, uint32_t pipeline) 296 { 297 zio_t *zio; 298 299 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 300 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 301 302 zio = kmem_zalloc(sizeof (zio_t), KM_SLEEP); 303 zio->io_parent = pio; 304 zio->io_spa = spa; 305 zio->io_txg = txg; 306 if (bp != NULL) { 307 zio->io_bp = bp; 308 zio->io_bp_copy = *bp; 309 zio->io_bp_orig = *bp; 310 } 311 zio->io_done = done; 312 zio->io_private = private; 313 zio->io_type = type; 314 zio->io_priority = priority; 315 zio->io_stage = stage; 316 zio->io_pipeline = pipeline; 317 zio->io_async_stages = ZIO_ASYNC_PIPELINE_STAGES; 318 zio->io_timestamp = lbolt64; 319 zio->io_flags = flags; 320 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 321 zio_push_transform(zio, data, size, size); 322 323 if (pio == NULL) { 324 if (!(flags & ZIO_FLAG_CONFIG_HELD)) 325 spa_config_enter(zio->io_spa, RW_READER, zio); 326 zio->io_root = zio; 327 } else { 328 zio->io_root = pio->io_root; 329 if (!(flags & ZIO_FLAG_NOBOOKMARK)) 330 zio->io_logical = pio->io_logical; 331 mutex_enter(&pio->io_lock); 332 if (stage < ZIO_STAGE_READY) 333 pio->io_children_notready++; 334 pio->io_children_notdone++; 335 zio->io_sibling_next = pio->io_child; 336 zio->io_sibling_prev = NULL; 337 if (pio->io_child != NULL) 338 pio->io_child->io_sibling_prev = zio; 339 pio->io_child = zio; 340 zio->io_ndvas = pio->io_ndvas; 341 mutex_exit(&pio->io_lock); 342 } 343 344 return (zio); 345 } 346 347 zio_t * 348 zio_null(zio_t *pio, spa_t *spa, zio_done_func_t *done, void *private, 349 int flags) 350 { 351 zio_t *zio; 352 353 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 354 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, ZIO_STAGE_OPEN, 355 ZIO_WAIT_FOR_CHILDREN_PIPELINE); 356 357 return (zio); 358 } 359 360 zio_t * 361 zio_root(spa_t *spa, zio_done_func_t *done, void *private, int flags) 362 { 363 return (zio_null(NULL, spa, done, private, flags)); 364 } 365 366 zio_t * 367 zio_read(zio_t *pio, spa_t *spa, blkptr_t *bp, void *data, 368 uint64_t size, zio_done_func_t *done, void *private, 369 int priority, int flags, zbookmark_t *zb) 370 { 371 zio_t *zio; 372 373 ASSERT3U(size, ==, BP_GET_LSIZE(bp)); 374 375 zio = zio_create(pio, spa, bp->blk_birth, bp, data, size, done, private, 376 ZIO_TYPE_READ, priority, flags | ZIO_FLAG_USER, 377 ZIO_STAGE_OPEN, ZIO_READ_PIPELINE); 378 zio->io_bookmark = *zb; 379 380 zio->io_logical = zio; 381 382 /* 383 * Work off our copy of the bp so the caller can free it. 384 */ 385 zio->io_bp = &zio->io_bp_copy; 386 387 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) { 388 uint64_t csize = BP_GET_PSIZE(bp); 389 void *cbuf = zio_buf_alloc(csize); 390 391 zio_push_transform(zio, cbuf, csize, csize); 392 zio->io_pipeline |= 1U << ZIO_STAGE_READ_DECOMPRESS; 393 } 394 395 if (BP_IS_GANG(bp)) { 396 uint64_t gsize = SPA_GANGBLOCKSIZE; 397 void *gbuf = zio_buf_alloc(gsize); 398 399 zio_push_transform(zio, gbuf, gsize, gsize); 400 zio->io_pipeline |= 1U << ZIO_STAGE_READ_GANG_MEMBERS; 401 } 402 403 return (zio); 404 } 405 406 zio_t * 407 zio_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 408 uint64_t txg, blkptr_t *bp, void *data, uint64_t size, 409 zio_done_func_t *done, void *private, int priority, int flags, 410 zbookmark_t *zb) 411 { 412 zio_t *zio; 413 414 ASSERT(checksum >= ZIO_CHECKSUM_OFF && 415 checksum < ZIO_CHECKSUM_FUNCTIONS); 416 417 ASSERT(compress >= ZIO_COMPRESS_OFF && 418 compress < ZIO_COMPRESS_FUNCTIONS); 419 420 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 421 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_USER, 422 ZIO_STAGE_OPEN, ZIO_WRITE_PIPELINE); 423 424 zio->io_bookmark = *zb; 425 426 zio->io_logical = zio; 427 428 zio->io_checksum = checksum; 429 zio->io_compress = compress; 430 zio->io_ndvas = ncopies; 431 432 if (compress != ZIO_COMPRESS_OFF) 433 zio->io_async_stages |= 1U << ZIO_STAGE_WRITE_COMPRESS; 434 435 if (bp->blk_birth != txg) { 436 /* XXX the bp usually (always?) gets re-zeroed later */ 437 BP_ZERO(bp); 438 BP_SET_LSIZE(bp, size); 439 BP_SET_PSIZE(bp, size); 440 } else { 441 /* Make sure someone doesn't change their mind on overwrites */ 442 ASSERT(MIN(zio->io_ndvas + BP_IS_GANG(bp), 443 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 444 } 445 446 return (zio); 447 } 448 449 zio_t * 450 zio_rewrite(zio_t *pio, spa_t *spa, int checksum, 451 uint64_t txg, blkptr_t *bp, void *data, uint64_t size, 452 zio_done_func_t *done, void *private, int priority, int flags, 453 zbookmark_t *zb) 454 { 455 zio_t *zio; 456 457 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 458 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_USER, 459 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 460 461 zio->io_bookmark = *zb; 462 zio->io_checksum = checksum; 463 zio->io_compress = ZIO_COMPRESS_OFF; 464 465 if (pio != NULL) 466 ASSERT3U(zio->io_ndvas, <=, BP_GET_NDVAS(bp)); 467 468 return (zio); 469 } 470 471 static zio_t * 472 zio_write_allocate(zio_t *pio, spa_t *spa, int checksum, 473 uint64_t txg, blkptr_t *bp, void *data, uint64_t size, 474 zio_done_func_t *done, void *private, int priority, int flags) 475 { 476 zio_t *zio; 477 478 BP_ZERO(bp); 479 BP_SET_LSIZE(bp, size); 480 BP_SET_PSIZE(bp, size); 481 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 482 483 zio = zio_create(pio, spa, txg, bp, data, size, done, private, 484 ZIO_TYPE_WRITE, priority, flags, 485 ZIO_STAGE_OPEN, ZIO_WRITE_ALLOCATE_PIPELINE); 486 487 zio->io_checksum = checksum; 488 zio->io_compress = ZIO_COMPRESS_OFF; 489 490 return (zio); 491 } 492 493 zio_t * 494 zio_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 495 zio_done_func_t *done, void *private) 496 { 497 zio_t *zio; 498 499 ASSERT(!BP_IS_HOLE(bp)); 500 501 if (txg == spa->spa_syncing_txg && 502 spa->spa_sync_pass > zio_sync_pass.zp_defer_free) { 503 bplist_enqueue_deferred(&spa->spa_sync_bplist, bp); 504 return (zio_null(pio, spa, NULL, NULL, 0)); 505 } 506 507 zio = zio_create(pio, spa, txg, bp, NULL, 0, done, private, 508 ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, ZIO_FLAG_USER, 509 ZIO_STAGE_OPEN, ZIO_FREE_PIPELINE); 510 511 zio->io_bp = &zio->io_bp_copy; 512 513 return (zio); 514 } 515 516 zio_t * 517 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 518 zio_done_func_t *done, void *private) 519 { 520 zio_t *zio; 521 522 /* 523 * A claim is an allocation of a specific block. Claims are needed 524 * to support immediate writes in the intent log. The issue is that 525 * immediate writes contain committed data, but in a txg that was 526 * *not* committed. Upon opening the pool after an unclean shutdown, 527 * the intent log claims all blocks that contain immediate write data 528 * so that the SPA knows they're in use. 529 * 530 * All claims *must* be resolved in the first txg -- before the SPA 531 * starts allocating blocks -- so that nothing is allocated twice. 532 */ 533 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); 534 ASSERT3U(spa_first_txg(spa), <=, txg); 535 536 zio = zio_create(pio, spa, txg, bp, NULL, 0, done, private, 537 ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 0, 538 ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 539 540 zio->io_bp = &zio->io_bp_copy; 541 542 return (zio); 543 } 544 545 zio_t * 546 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 547 zio_done_func_t *done, void *private, int priority, int flags) 548 { 549 zio_t *zio; 550 int c; 551 552 if (vd->vdev_children == 0) { 553 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 554 ZIO_TYPE_IOCTL, priority, flags, 555 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 556 557 zio->io_vd = vd; 558 zio->io_cmd = cmd; 559 } else { 560 zio = zio_null(pio, spa, NULL, NULL, flags); 561 562 for (c = 0; c < vd->vdev_children; c++) 563 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 564 done, private, priority, flags)); 565 } 566 567 return (zio); 568 } 569 570 static void 571 zio_phys_bp_init(vdev_t *vd, blkptr_t *bp, uint64_t offset, uint64_t size, 572 int checksum) 573 { 574 ASSERT(vd->vdev_children == 0); 575 576 ASSERT(size <= SPA_MAXBLOCKSIZE); 577 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 578 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 579 580 ASSERT(offset + size <= VDEV_LABEL_START_SIZE || 581 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 582 ASSERT3U(offset + size, <=, vd->vdev_psize); 583 584 BP_ZERO(bp); 585 586 BP_SET_LSIZE(bp, size); 587 BP_SET_PSIZE(bp, size); 588 589 BP_SET_CHECKSUM(bp, checksum); 590 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 591 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 592 593 if (checksum != ZIO_CHECKSUM_OFF) 594 ZIO_SET_CHECKSUM(&bp->blk_cksum, offset, 0, 0, 0); 595 } 596 597 zio_t * 598 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 599 void *data, int checksum, zio_done_func_t *done, void *private, 600 int priority, int flags) 601 { 602 zio_t *zio; 603 blkptr_t blk; 604 605 zio_phys_bp_init(vd, &blk, offset, size, checksum); 606 607 zio = zio_create(pio, vd->vdev_spa, 0, &blk, data, size, done, private, 608 ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, 609 ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 610 611 zio->io_vd = vd; 612 zio->io_offset = offset; 613 614 /* 615 * Work off our copy of the bp so the caller can free it. 616 */ 617 zio->io_bp = &zio->io_bp_copy; 618 619 return (zio); 620 } 621 622 zio_t * 623 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 624 void *data, int checksum, zio_done_func_t *done, void *private, 625 int priority, int flags) 626 { 627 zio_block_tail_t *zbt; 628 void *wbuf; 629 zio_t *zio; 630 blkptr_t blk; 631 632 zio_phys_bp_init(vd, &blk, offset, size, checksum); 633 634 zio = zio_create(pio, vd->vdev_spa, 0, &blk, data, size, done, private, 635 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, 636 ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 637 638 zio->io_vd = vd; 639 zio->io_offset = offset; 640 641 zio->io_bp = &zio->io_bp_copy; 642 zio->io_checksum = checksum; 643 644 if (zio_checksum_table[checksum].ci_zbt) { 645 /* 646 * zbt checksums are necessarily destructive -- they modify 647 * one word of the write buffer to hold the verifier/checksum. 648 * Therefore, we must make a local copy in case the data is 649 * being written to multiple places. 650 */ 651 wbuf = zio_buf_alloc(size); 652 bcopy(data, wbuf, size); 653 zio_push_transform(zio, wbuf, size, size); 654 655 zbt = (zio_block_tail_t *)((char *)wbuf + size) - 1; 656 zbt->zbt_cksum = blk.blk_cksum; 657 } 658 659 return (zio); 660 } 661 662 /* 663 * Create a child I/O to do some work for us. It has no associated bp. 664 */ 665 zio_t * 666 zio_vdev_child_io(zio_t *zio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 667 void *data, uint64_t size, int type, int priority, int flags, 668 zio_done_func_t *done, void *private) 669 { 670 uint32_t pipeline = ZIO_VDEV_CHILD_PIPELINE; 671 zio_t *cio; 672 673 if (type == ZIO_TYPE_READ && bp != NULL) { 674 /* 675 * If we have the bp, then the child should perform the 676 * checksum and the parent need not. This pushes error 677 * detection as close to the leaves as possible and 678 * eliminates redundant checksums in the interior nodes. 679 */ 680 pipeline |= 1U << ZIO_STAGE_CHECKSUM_VERIFY; 681 zio->io_pipeline &= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY); 682 } 683 684 cio = zio_create(zio, zio->io_spa, zio->io_txg, bp, data, size, 685 done, private, type, priority, 686 (zio->io_flags & ZIO_FLAG_VDEV_INHERIT) | ZIO_FLAG_CANFAIL | flags, 687 ZIO_STAGE_VDEV_IO_START - 1, pipeline); 688 689 cio->io_vd = vd; 690 cio->io_offset = offset; 691 692 return (cio); 693 } 694 695 /* 696 * ========================================================================== 697 * Initiate I/O, either sync or async 698 * ========================================================================== 699 */ 700 int 701 zio_wait(zio_t *zio) 702 { 703 int error; 704 705 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 706 707 zio->io_waiter = curthread; 708 709 zio_next_stage_async(zio); 710 711 mutex_enter(&zio->io_lock); 712 while (zio->io_stalled != ZIO_STAGE_DONE) 713 cv_wait(&zio->io_cv, &zio->io_lock); 714 mutex_exit(&zio->io_lock); 715 716 error = zio->io_error; 717 mutex_destroy(&zio->io_lock); 718 kmem_free(zio, sizeof (zio_t)); 719 720 return (error); 721 } 722 723 void 724 zio_nowait(zio_t *zio) 725 { 726 zio_next_stage_async(zio); 727 } 728 729 /* 730 * ========================================================================== 731 * I/O pipeline interlocks: parent/child dependency scoreboarding 732 * ========================================================================== 733 */ 734 static void 735 zio_wait_for_children(zio_t *zio, uint32_t stage, uint64_t *countp) 736 { 737 mutex_enter(&zio->io_lock); 738 if (*countp == 0) { 739 ASSERT(zio->io_stalled == 0); 740 mutex_exit(&zio->io_lock); 741 zio_next_stage(zio); 742 } else { 743 zio->io_stalled = stage; 744 mutex_exit(&zio->io_lock); 745 } 746 } 747 748 static void 749 zio_notify_parent(zio_t *zio, uint32_t stage, uint64_t *countp) 750 { 751 zio_t *pio = zio->io_parent; 752 753 mutex_enter(&pio->io_lock); 754 if (pio->io_error == 0 && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 755 pio->io_error = zio->io_error; 756 if (--*countp == 0 && pio->io_stalled == stage) { 757 pio->io_stalled = 0; 758 mutex_exit(&pio->io_lock); 759 zio_next_stage_async(pio); 760 } else { 761 mutex_exit(&pio->io_lock); 762 } 763 } 764 765 static void 766 zio_wait_children_ready(zio_t *zio) 767 { 768 zio_wait_for_children(zio, ZIO_STAGE_WAIT_CHILDREN_READY, 769 &zio->io_children_notready); 770 } 771 772 void 773 zio_wait_children_done(zio_t *zio) 774 { 775 zio_wait_for_children(zio, ZIO_STAGE_WAIT_CHILDREN_DONE, 776 &zio->io_children_notdone); 777 } 778 779 static void 780 zio_ready(zio_t *zio) 781 { 782 zio_t *pio = zio->io_parent; 783 784 if (pio != NULL) 785 zio_notify_parent(zio, ZIO_STAGE_WAIT_CHILDREN_READY, 786 &pio->io_children_notready); 787 788 if (zio->io_bp) 789 zio->io_bp_copy = *zio->io_bp; 790 791 zio_next_stage(zio); 792 } 793 794 static void 795 zio_done(zio_t *zio) 796 { 797 zio_t *pio = zio->io_parent; 798 spa_t *spa = zio->io_spa; 799 blkptr_t *bp = zio->io_bp; 800 vdev_t *vd = zio->io_vd; 801 802 ASSERT(zio->io_children_notready == 0); 803 ASSERT(zio->io_children_notdone == 0); 804 805 if (bp != NULL) { 806 ASSERT(bp->blk_pad[0] == 0); 807 ASSERT(bp->blk_pad[1] == 0); 808 ASSERT(bp->blk_pad[2] == 0); 809 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0); 810 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 811 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 812 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 813 if (zio->io_ndvas != 0) 814 ASSERT3U(zio->io_ndvas, <=, BP_GET_NDVAS(bp)); 815 ASSERT(BP_COUNT_GANG(bp) == 0 || 816 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 817 } 818 } 819 820 if (vd != NULL) 821 vdev_stat_update(zio); 822 823 if (zio->io_error) { 824 /* 825 * If this I/O is attached to a particular vdev, 826 * generate an error message describing the I/O failure 827 * at the block level. We ignore these errors if the 828 * device is currently unavailable. 829 */ 830 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 831 zfs_ereport_post(FM_EREPORT_ZFS_IO, 832 zio->io_spa, vd, zio, 0, 0); 833 834 if ((zio->io_error == EIO || 835 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) && 836 zio->io_logical == zio) { 837 /* 838 * For root I/O requests, tell the SPA to log the error 839 * appropriately. Also, generate a logical data 840 * ereport. 841 */ 842 spa_log_error(zio->io_spa, zio); 843 844 zfs_ereport_post(FM_EREPORT_ZFS_DATA, 845 zio->io_spa, NULL, zio, 0, 0); 846 } 847 848 /* 849 * For I/O requests that cannot fail, panic appropriately. 850 */ 851 if (!(zio->io_flags & ZIO_FLAG_CANFAIL)) { 852 char *blkbuf; 853 854 blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_NOSLEEP); 855 if (blkbuf) { 856 sprintf_blkptr(blkbuf, BP_SPRINTF_LEN, 857 bp ? bp : &zio->io_bp_copy); 858 } 859 panic("ZFS: %s (%s on %s off %llx: zio %p %s): error " 860 "%d", zio->io_error == ECKSUM ? 861 "bad checksum" : "I/O failure", 862 zio_type_name[zio->io_type], 863 vdev_description(vd), 864 (u_longlong_t)zio->io_offset, 865 zio, blkbuf ? blkbuf : "", zio->io_error); 866 } 867 } 868 zio_clear_transform_stack(zio); 869 870 if (zio->io_done) 871 zio->io_done(zio); 872 873 ASSERT(zio->io_delegate_list == NULL); 874 ASSERT(zio->io_delegate_next == NULL); 875 876 if (pio != NULL) { 877 zio_t *next, *prev; 878 879 mutex_enter(&pio->io_lock); 880 next = zio->io_sibling_next; 881 prev = zio->io_sibling_prev; 882 if (next != NULL) 883 next->io_sibling_prev = prev; 884 if (prev != NULL) 885 prev->io_sibling_next = next; 886 if (pio->io_child == zio) 887 pio->io_child = next; 888 mutex_exit(&pio->io_lock); 889 890 zio_notify_parent(zio, ZIO_STAGE_WAIT_CHILDREN_DONE, 891 &pio->io_children_notdone); 892 } 893 894 if (pio == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_HELD)) 895 spa_config_exit(spa, zio); 896 897 if (zio->io_waiter != NULL) { 898 mutex_enter(&zio->io_lock); 899 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 900 zio->io_stalled = zio->io_stage; 901 cv_broadcast(&zio->io_cv); 902 mutex_exit(&zio->io_lock); 903 } else { 904 kmem_free(zio, sizeof (zio_t)); 905 } 906 } 907 908 /* 909 * ========================================================================== 910 * Compression support 911 * ========================================================================== 912 */ 913 static void 914 zio_write_compress(zio_t *zio) 915 { 916 int compress = zio->io_compress; 917 blkptr_t *bp = zio->io_bp; 918 void *cbuf; 919 uint64_t lsize = zio->io_size; 920 uint64_t csize = lsize; 921 uint64_t cbufsize = 0; 922 int pass; 923 924 if (bp->blk_birth == zio->io_txg) { 925 /* 926 * We're rewriting an existing block, which means we're 927 * working on behalf of spa_sync(). For spa_sync() to 928 * converge, it must eventually be the case that we don't 929 * have to allocate new blocks. But compression changes 930 * the blocksize, which forces a reallocate, and makes 931 * convergence take longer. Therefore, after the first 932 * few passes, stop compressing to ensure convergence. 933 */ 934 pass = spa_sync_pass(zio->io_spa); 935 if (pass > zio_sync_pass.zp_dontcompress) 936 compress = ZIO_COMPRESS_OFF; 937 } else { 938 ASSERT(BP_IS_HOLE(bp)); 939 pass = 1; 940 } 941 942 if (compress != ZIO_COMPRESS_OFF) 943 if (!zio_compress_data(compress, zio->io_data, zio->io_size, 944 &cbuf, &csize, &cbufsize)) 945 compress = ZIO_COMPRESS_OFF; 946 947 if (compress != ZIO_COMPRESS_OFF && csize != 0) 948 zio_push_transform(zio, cbuf, csize, cbufsize); 949 950 /* 951 * The final pass of spa_sync() must be all rewrites, but the first 952 * few passes offer a trade-off: allocating blocks defers convergence, 953 * but newly allocated blocks are sequential, so they can be written 954 * to disk faster. Therefore, we allow the first few passes of 955 * spa_sync() to reallocate new blocks, but force rewrites after that. 956 * There should only be a handful of blocks after pass 1 in any case. 957 */ 958 if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == csize && 959 pass > zio_sync_pass.zp_rewrite) { 960 ASSERT(csize != 0); 961 BP_SET_LSIZE(bp, lsize); 962 BP_SET_COMPRESS(bp, compress); 963 zio->io_pipeline = ZIO_REWRITE_PIPELINE; 964 } else { 965 if (bp->blk_birth == zio->io_txg) { 966 ASSERT3U(BP_GET_LSIZE(bp), ==, lsize); 967 bzero(bp, sizeof (blkptr_t)); 968 } 969 if (csize == 0) { 970 BP_ZERO(bp); 971 zio->io_pipeline = ZIO_WAIT_FOR_CHILDREN_PIPELINE; 972 } else { 973 ASSERT3U(BP_GET_NDVAS(bp), ==, 0); 974 BP_SET_LSIZE(bp, lsize); 975 BP_SET_PSIZE(bp, csize); 976 BP_SET_COMPRESS(bp, compress); 977 zio->io_pipeline = ZIO_WRITE_ALLOCATE_PIPELINE; 978 } 979 } 980 981 zio_next_stage(zio); 982 } 983 984 static void 985 zio_read_decompress(zio_t *zio) 986 { 987 blkptr_t *bp = zio->io_bp; 988 void *data; 989 uint64_t size; 990 uint64_t bufsize; 991 int compress = BP_GET_COMPRESS(bp); 992 993 ASSERT(compress != ZIO_COMPRESS_OFF); 994 995 zio_pop_transform(zio, &data, &size, &bufsize); 996 997 if (zio_decompress_data(compress, data, size, 998 zio->io_data, zio->io_size)) 999 zio->io_error = EIO; 1000 1001 zio_buf_free(data, bufsize); 1002 1003 zio_next_stage(zio); 1004 } 1005 1006 /* 1007 * ========================================================================== 1008 * Gang block support 1009 * ========================================================================== 1010 */ 1011 static void 1012 zio_gang_pipeline(zio_t *zio) 1013 { 1014 /* 1015 * By default, the pipeline assumes that we're dealing with a gang 1016 * block. If we're not, strip out any gang-specific stages. 1017 */ 1018 if (!BP_IS_GANG(zio->io_bp)) 1019 zio->io_pipeline &= ~ZIO_GANG_STAGES; 1020 1021 zio_next_stage(zio); 1022 } 1023 1024 static void 1025 zio_gang_byteswap(zio_t *zio) 1026 { 1027 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 1028 1029 if (BP_SHOULD_BYTESWAP(zio->io_bp)) 1030 byteswap_uint64_array(zio->io_data, zio->io_size); 1031 } 1032 1033 static void 1034 zio_get_gang_header(zio_t *zio) 1035 { 1036 blkptr_t *bp = zio->io_bp; 1037 uint64_t gsize = SPA_GANGBLOCKSIZE; 1038 void *gbuf = zio_buf_alloc(gsize); 1039 1040 ASSERT(BP_IS_GANG(bp)); 1041 1042 zio_push_transform(zio, gbuf, gsize, gsize); 1043 1044 zio_nowait(zio_create(zio, zio->io_spa, bp->blk_birth, bp, gbuf, gsize, 1045 NULL, NULL, ZIO_TYPE_READ, zio->io_priority, 1046 zio->io_flags & ZIO_FLAG_GANG_INHERIT, 1047 ZIO_STAGE_OPEN, ZIO_READ_PIPELINE)); 1048 1049 zio_wait_children_done(zio); 1050 } 1051 1052 static void 1053 zio_read_gang_members(zio_t *zio) 1054 { 1055 zio_gbh_phys_t *gbh; 1056 uint64_t gsize, gbufsize, loff, lsize; 1057 int i; 1058 1059 ASSERT(BP_IS_GANG(zio->io_bp)); 1060 1061 zio_gang_byteswap(zio); 1062 zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize); 1063 1064 for (loff = 0, i = 0; loff != zio->io_size; loff += lsize, i++) { 1065 blkptr_t *gbp = &gbh->zg_blkptr[i]; 1066 lsize = BP_GET_PSIZE(gbp); 1067 1068 ASSERT(BP_GET_COMPRESS(gbp) == ZIO_COMPRESS_OFF); 1069 ASSERT3U(lsize, ==, BP_GET_LSIZE(gbp)); 1070 ASSERT3U(loff + lsize, <=, zio->io_size); 1071 ASSERT(i < SPA_GBH_NBLKPTRS); 1072 ASSERT(!BP_IS_HOLE(gbp)); 1073 1074 zio_nowait(zio_read(zio, zio->io_spa, gbp, 1075 (char *)zio->io_data + loff, lsize, NULL, NULL, 1076 zio->io_priority, zio->io_flags & ZIO_FLAG_GANG_INHERIT, 1077 &zio->io_bookmark)); 1078 } 1079 1080 zio_buf_free(gbh, gbufsize); 1081 zio_wait_children_done(zio); 1082 } 1083 1084 static void 1085 zio_rewrite_gang_members(zio_t *zio) 1086 { 1087 zio_gbh_phys_t *gbh; 1088 uint64_t gsize, gbufsize, loff, lsize; 1089 int i; 1090 1091 ASSERT(BP_IS_GANG(zio->io_bp)); 1092 ASSERT3U(zio->io_size, ==, SPA_GANGBLOCKSIZE); 1093 1094 zio_gang_byteswap(zio); 1095 zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize); 1096 1097 ASSERT(gsize == gbufsize); 1098 1099 for (loff = 0, i = 0; loff != zio->io_size; loff += lsize, i++) { 1100 blkptr_t *gbp = &gbh->zg_blkptr[i]; 1101 lsize = BP_GET_PSIZE(gbp); 1102 1103 ASSERT(BP_GET_COMPRESS(gbp) == ZIO_COMPRESS_OFF); 1104 ASSERT3U(lsize, ==, BP_GET_LSIZE(gbp)); 1105 ASSERT3U(loff + lsize, <=, zio->io_size); 1106 ASSERT(i < SPA_GBH_NBLKPTRS); 1107 ASSERT(!BP_IS_HOLE(gbp)); 1108 1109 zio_nowait(zio_rewrite(zio, zio->io_spa, zio->io_checksum, 1110 zio->io_txg, gbp, (char *)zio->io_data + loff, lsize, 1111 NULL, NULL, zio->io_priority, zio->io_flags, 1112 &zio->io_bookmark)); 1113 } 1114 1115 zio_push_transform(zio, gbh, gsize, gbufsize); 1116 zio_wait_children_ready(zio); 1117 } 1118 1119 static void 1120 zio_free_gang_members(zio_t *zio) 1121 { 1122 zio_gbh_phys_t *gbh; 1123 uint64_t gsize, gbufsize; 1124 int i; 1125 1126 ASSERT(BP_IS_GANG(zio->io_bp)); 1127 1128 zio_gang_byteswap(zio); 1129 zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize); 1130 1131 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) { 1132 blkptr_t *gbp = &gbh->zg_blkptr[i]; 1133 1134 if (BP_IS_HOLE(gbp)) 1135 continue; 1136 zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg, 1137 gbp, NULL, NULL)); 1138 } 1139 1140 zio_buf_free(gbh, gbufsize); 1141 zio_next_stage(zio); 1142 } 1143 1144 static void 1145 zio_claim_gang_members(zio_t *zio) 1146 { 1147 zio_gbh_phys_t *gbh; 1148 uint64_t gsize, gbufsize; 1149 int i; 1150 1151 ASSERT(BP_IS_GANG(zio->io_bp)); 1152 1153 zio_gang_byteswap(zio); 1154 zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize); 1155 1156 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) { 1157 blkptr_t *gbp = &gbh->zg_blkptr[i]; 1158 if (BP_IS_HOLE(gbp)) 1159 continue; 1160 zio_nowait(zio_claim(zio, zio->io_spa, zio->io_txg, 1161 gbp, NULL, NULL)); 1162 } 1163 1164 zio_buf_free(gbh, gbufsize); 1165 zio_next_stage(zio); 1166 } 1167 1168 static void 1169 zio_write_allocate_gang_member_done(zio_t *zio) 1170 { 1171 zio_t *pio = zio->io_parent; 1172 dva_t *cdva = zio->io_bp->blk_dva; 1173 dva_t *pdva = pio->io_bp->blk_dva; 1174 uint64_t asize; 1175 int d; 1176 1177 ASSERT3U(pio->io_ndvas, ==, zio->io_ndvas); 1178 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 1179 ASSERT3U(zio->io_ndvas, <=, BP_GET_NDVAS(zio->io_bp)); 1180 ASSERT3U(pio->io_ndvas, <=, BP_GET_NDVAS(pio->io_bp)); 1181 1182 mutex_enter(&pio->io_lock); 1183 for (d = 0; d < BP_GET_NDVAS(pio->io_bp); d++) { 1184 ASSERT(DVA_GET_GANG(&pdva[d])); 1185 asize = DVA_GET_ASIZE(&pdva[d]); 1186 asize += DVA_GET_ASIZE(&cdva[d]); 1187 DVA_SET_ASIZE(&pdva[d], asize); 1188 } 1189 mutex_exit(&pio->io_lock); 1190 } 1191 1192 static void 1193 zio_write_allocate_gang_members(zio_t *zio) 1194 { 1195 blkptr_t *bp = zio->io_bp; 1196 dva_t *dva = bp->blk_dva; 1197 spa_t *spa = zio->io_spa; 1198 zio_gbh_phys_t *gbh; 1199 uint64_t txg = zio->io_txg; 1200 uint64_t resid = zio->io_size; 1201 uint64_t maxalloc = P2ROUNDUP(zio->io_size >> 1, SPA_MINBLOCKSIZE); 1202 uint64_t gsize, loff, lsize; 1203 uint32_t gbps_left; 1204 int ndvas = zio->io_ndvas; 1205 int gbh_ndvas = MIN(ndvas + 1, spa_max_replication(spa)); 1206 int error; 1207 int i, d; 1208 1209 gsize = SPA_GANGBLOCKSIZE; 1210 gbps_left = SPA_GBH_NBLKPTRS; 1211 1212 error = metaslab_alloc(spa, gsize, bp, gbh_ndvas, txg, NULL, B_FALSE); 1213 if (error == ENOSPC) 1214 panic("can't allocate gang block header"); 1215 ASSERT(error == 0); 1216 1217 for (d = 0; d < gbh_ndvas; d++) 1218 DVA_SET_GANG(&dva[d], 1); 1219 1220 bp->blk_birth = txg; 1221 1222 gbh = zio_buf_alloc(gsize); 1223 bzero(gbh, gsize); 1224 1225 /* We need to test multi-level gang blocks */ 1226 if (maxalloc >= zio_gang_bang && (lbolt & 0x1) == 0) 1227 maxalloc = MAX(maxalloc >> 2, SPA_MINBLOCKSIZE); 1228 1229 for (loff = 0, i = 0; loff != zio->io_size; 1230 loff += lsize, resid -= lsize, gbps_left--, i++) { 1231 blkptr_t *gbp = &gbh->zg_blkptr[i]; 1232 dva = gbp->blk_dva; 1233 1234 ASSERT(gbps_left != 0); 1235 maxalloc = MIN(maxalloc, resid); 1236 1237 while (resid <= maxalloc * gbps_left) { 1238 error = metaslab_alloc(spa, maxalloc, gbp, ndvas, 1239 txg, bp, B_FALSE); 1240 if (error == 0) 1241 break; 1242 ASSERT3U(error, ==, ENOSPC); 1243 if (maxalloc == SPA_MINBLOCKSIZE) 1244 panic("really out of space"); 1245 maxalloc = P2ROUNDUP(maxalloc >> 1, SPA_MINBLOCKSIZE); 1246 } 1247 1248 if (resid <= maxalloc * gbps_left) { 1249 lsize = maxalloc; 1250 BP_SET_LSIZE(gbp, lsize); 1251 BP_SET_PSIZE(gbp, lsize); 1252 BP_SET_COMPRESS(gbp, ZIO_COMPRESS_OFF); 1253 gbp->blk_birth = txg; 1254 zio_nowait(zio_rewrite(zio, spa, 1255 zio->io_checksum, txg, gbp, 1256 (char *)zio->io_data + loff, lsize, 1257 zio_write_allocate_gang_member_done, NULL, 1258 zio->io_priority, zio->io_flags, 1259 &zio->io_bookmark)); 1260 } else { 1261 lsize = P2ROUNDUP(resid / gbps_left, SPA_MINBLOCKSIZE); 1262 ASSERT(lsize != SPA_MINBLOCKSIZE); 1263 zio_nowait(zio_write_allocate(zio, spa, 1264 zio->io_checksum, txg, gbp, 1265 (char *)zio->io_data + loff, lsize, 1266 zio_write_allocate_gang_member_done, NULL, 1267 zio->io_priority, zio->io_flags)); 1268 } 1269 } 1270 1271 ASSERT(resid == 0 && loff == zio->io_size); 1272 1273 zio->io_pipeline |= 1U << ZIO_STAGE_GANG_CHECKSUM_GENERATE; 1274 1275 zio_push_transform(zio, gbh, gsize, gsize); 1276 /* 1277 * As much as we'd like this to be zio_wait_children_ready(), 1278 * updating our ASIZE doesn't happen until the io_done callback, 1279 * so we have to wait for that to finish in order for our BP 1280 * to be stable. 1281 */ 1282 zio_wait_children_done(zio); 1283 } 1284 1285 /* 1286 * ========================================================================== 1287 * Allocate and free blocks 1288 * ========================================================================== 1289 */ 1290 static void 1291 zio_dva_allocate(zio_t *zio) 1292 { 1293 blkptr_t *bp = zio->io_bp; 1294 int error; 1295 1296 ASSERT(BP_IS_HOLE(bp)); 1297 ASSERT3U(BP_GET_NDVAS(bp), ==, 0); 1298 ASSERT3U(zio->io_ndvas, >, 0); 1299 ASSERT3U(zio->io_ndvas, <=, spa_max_replication(zio->io_spa)); 1300 1301 /* For testing, make some blocks above a certain size be gang blocks */ 1302 if (zio->io_size >= zio_gang_bang && (lbolt & 0x3) == 0) { 1303 zio_write_allocate_gang_members(zio); 1304 return; 1305 } 1306 1307 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 1308 1309 error = metaslab_alloc(zio->io_spa, zio->io_size, bp, zio->io_ndvas, 1310 zio->io_txg, NULL, B_FALSE); 1311 1312 if (error == 0) { 1313 bp->blk_birth = zio->io_txg; 1314 } else if (error == ENOSPC) { 1315 if (zio->io_size == SPA_MINBLOCKSIZE) 1316 panic("really, truly out of space"); 1317 zio_write_allocate_gang_members(zio); 1318 return; 1319 } else { 1320 zio->io_error = error; 1321 } 1322 zio_next_stage(zio); 1323 } 1324 1325 static void 1326 zio_dva_free(zio_t *zio) 1327 { 1328 blkptr_t *bp = zio->io_bp; 1329 1330 metaslab_free(zio->io_spa, bp, zio->io_txg, B_FALSE); 1331 1332 BP_ZERO(bp); 1333 1334 zio_next_stage(zio); 1335 } 1336 1337 static void 1338 zio_dva_claim(zio_t *zio) 1339 { 1340 zio->io_error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 1341 1342 zio_next_stage(zio); 1343 } 1344 1345 /* 1346 * ========================================================================== 1347 * Read and write to physical devices 1348 * ========================================================================== 1349 */ 1350 1351 static void 1352 zio_vdev_io_start(zio_t *zio) 1353 { 1354 vdev_t *vd = zio->io_vd; 1355 vdev_t *tvd = vd ? vd->vdev_top : NULL; 1356 blkptr_t *bp = zio->io_bp; 1357 uint64_t align; 1358 1359 if (vd == NULL) { 1360 /* The mirror_ops handle multiple DVAs in a single BP */ 1361 vdev_mirror_ops.vdev_op_io_start(zio); 1362 return; 1363 } 1364 1365 align = 1ULL << tvd->vdev_ashift; 1366 1367 if (zio->io_retries == 0 && vd == tvd) 1368 zio->io_flags |= ZIO_FLAG_FAILFAST; 1369 1370 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 1371 vd->vdev_children == 0) { 1372 zio->io_flags |= ZIO_FLAG_PHYSICAL; 1373 zio->io_offset += VDEV_LABEL_START_SIZE; 1374 } 1375 1376 if (P2PHASE(zio->io_size, align) != 0) { 1377 uint64_t asize = P2ROUNDUP(zio->io_size, align); 1378 char *abuf = zio_buf_alloc(asize); 1379 ASSERT(vd == tvd); 1380 if (zio->io_type == ZIO_TYPE_WRITE) { 1381 bcopy(zio->io_data, abuf, zio->io_size); 1382 bzero(abuf + zio->io_size, asize - zio->io_size); 1383 } 1384 zio_push_transform(zio, abuf, asize, asize); 1385 ASSERT(!(zio->io_flags & ZIO_FLAG_SUBBLOCK)); 1386 zio->io_flags |= ZIO_FLAG_SUBBLOCK; 1387 } 1388 1389 ASSERT(P2PHASE(zio->io_offset, align) == 0); 1390 ASSERT(P2PHASE(zio->io_size, align) == 0); 1391 ASSERT(bp == NULL || 1392 P2ROUNDUP(ZIO_GET_IOSIZE(zio), align) == zio->io_size); 1393 ASSERT(zio->io_type != ZIO_TYPE_WRITE || (spa_mode & FWRITE)); 1394 1395 vdev_io_start(zio); 1396 1397 /* zio_next_stage_async() gets called from io completion interrupt */ 1398 } 1399 1400 static void 1401 zio_vdev_io_done(zio_t *zio) 1402 { 1403 if (zio->io_vd == NULL) 1404 /* The mirror_ops handle multiple DVAs in a single BP */ 1405 vdev_mirror_ops.vdev_op_io_done(zio); 1406 else 1407 vdev_io_done(zio); 1408 } 1409 1410 /* XXPOLICY */ 1411 boolean_t 1412 zio_should_retry(zio_t *zio) 1413 { 1414 vdev_t *vd = zio->io_vd; 1415 1416 if (zio->io_error == 0) 1417 return (B_FALSE); 1418 if (zio->io_delegate_list != NULL) 1419 return (B_FALSE); 1420 if (vd && vd != vd->vdev_top) 1421 return (B_FALSE); 1422 if (zio->io_flags & ZIO_FLAG_DONT_RETRY) 1423 return (B_FALSE); 1424 if (zio->io_retries > 0) 1425 return (B_FALSE); 1426 1427 return (B_TRUE); 1428 } 1429 1430 static void 1431 zio_vdev_io_assess(zio_t *zio) 1432 { 1433 vdev_t *vd = zio->io_vd; 1434 vdev_t *tvd = vd ? vd->vdev_top : NULL; 1435 1436 ASSERT(zio->io_vsd == NULL); 1437 1438 if (zio->io_flags & ZIO_FLAG_SUBBLOCK) { 1439 void *abuf; 1440 uint64_t asize; 1441 ASSERT(vd == tvd); 1442 zio_pop_transform(zio, &abuf, &asize, &asize); 1443 if (zio->io_type == ZIO_TYPE_READ) 1444 bcopy(abuf, zio->io_data, zio->io_size); 1445 zio_buf_free(abuf, asize); 1446 zio->io_flags &= ~ZIO_FLAG_SUBBLOCK; 1447 } 1448 1449 if (zio_injection_enabled && !zio->io_error) 1450 zio->io_error = zio_handle_fault_injection(zio, EIO); 1451 1452 /* 1453 * If the I/O failed, determine whether we should attempt to retry it. 1454 */ 1455 /* XXPOLICY */ 1456 if (zio_should_retry(zio)) { 1457 ASSERT(tvd == vd); 1458 1459 zio->io_retries++; 1460 zio->io_error = 0; 1461 zio->io_flags &= ZIO_FLAG_VDEV_INHERIT; 1462 /* XXPOLICY */ 1463 zio->io_flags &= ~ZIO_FLAG_FAILFAST; 1464 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1465 zio->io_stage = ZIO_STAGE_VDEV_IO_START - 1; 1466 1467 dprintf("retry #%d for %s to %s offset %llx\n", 1468 zio->io_retries, zio_type_name[zio->io_type], 1469 vdev_description(vd), zio->io_offset); 1470 1471 zio_next_stage_async(zio); 1472 return; 1473 } 1474 1475 if (zio->io_error != 0 && zio->io_error != ECKSUM && 1476 !(zio->io_flags & ZIO_FLAG_SPECULATIVE) && vd) { 1477 /* 1478 * Poor man's hotplug support. Even if we're done retrying this 1479 * I/O, try to reopen the vdev to see if it's still attached. 1480 * To avoid excessive thrashing, we only try it once a minute. 1481 * This also has the effect of detecting when missing devices 1482 * have come back, by polling the device once a minute. 1483 * 1484 * We need to do this asynchronously because we can't grab 1485 * all the necessary locks way down here. 1486 */ 1487 if (gethrtime() - vd->vdev_last_try > 60ULL * NANOSEC) { 1488 vd->vdev_last_try = gethrtime(); 1489 tvd->vdev_reopen_wanted = 1; 1490 spa_async_request(vd->vdev_spa, SPA_ASYNC_REOPEN); 1491 } 1492 } 1493 1494 zio_next_stage(zio); 1495 } 1496 1497 void 1498 zio_vdev_io_reissue(zio_t *zio) 1499 { 1500 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 1501 ASSERT(zio->io_error == 0); 1502 1503 zio->io_stage--; 1504 } 1505 1506 void 1507 zio_vdev_io_redone(zio_t *zio) 1508 { 1509 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 1510 1511 zio->io_stage--; 1512 } 1513 1514 void 1515 zio_vdev_io_bypass(zio_t *zio) 1516 { 1517 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 1518 ASSERT(zio->io_error == 0); 1519 1520 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 1521 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS - 1; 1522 } 1523 1524 /* 1525 * ========================================================================== 1526 * Generate and verify checksums 1527 * ========================================================================== 1528 */ 1529 static void 1530 zio_checksum_generate(zio_t *zio) 1531 { 1532 int checksum = zio->io_checksum; 1533 blkptr_t *bp = zio->io_bp; 1534 1535 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 1536 1537 BP_SET_CHECKSUM(bp, checksum); 1538 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1539 1540 zio_checksum(checksum, &bp->blk_cksum, zio->io_data, zio->io_size); 1541 1542 zio_next_stage(zio); 1543 } 1544 1545 static void 1546 zio_gang_checksum_generate(zio_t *zio) 1547 { 1548 zio_cksum_t zc; 1549 zio_gbh_phys_t *gbh = zio->io_data; 1550 1551 ASSERT(BP_IS_GANG(zio->io_bp)); 1552 ASSERT3U(zio->io_size, ==, SPA_GANGBLOCKSIZE); 1553 1554 zio_set_gang_verifier(zio, &gbh->zg_tail.zbt_cksum); 1555 1556 zio_checksum(ZIO_CHECKSUM_GANG_HEADER, &zc, zio->io_data, zio->io_size); 1557 1558 zio_next_stage(zio); 1559 } 1560 1561 static void 1562 zio_checksum_verify(zio_t *zio) 1563 { 1564 if (zio->io_bp != NULL) { 1565 zio->io_error = zio_checksum_error(zio); 1566 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) 1567 zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM, 1568 zio->io_spa, zio->io_vd, zio, 0, 0); 1569 } 1570 1571 zio_next_stage(zio); 1572 } 1573 1574 /* 1575 * Called by RAID-Z to ensure we don't compute the checksum twice. 1576 */ 1577 void 1578 zio_checksum_verified(zio_t *zio) 1579 { 1580 zio->io_pipeline &= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY); 1581 } 1582 1583 /* 1584 * Set the external verifier for a gang block based on stuff in the bp 1585 */ 1586 void 1587 zio_set_gang_verifier(zio_t *zio, zio_cksum_t *zcp) 1588 { 1589 blkptr_t *bp = zio->io_bp; 1590 1591 zcp->zc_word[0] = DVA_GET_VDEV(BP_IDENTITY(bp)); 1592 zcp->zc_word[1] = DVA_GET_OFFSET(BP_IDENTITY(bp)); 1593 zcp->zc_word[2] = bp->blk_birth; 1594 zcp->zc_word[3] = 0; 1595 } 1596 1597 /* 1598 * ========================================================================== 1599 * Define the pipeline 1600 * ========================================================================== 1601 */ 1602 typedef void zio_pipe_stage_t(zio_t *zio); 1603 1604 static void 1605 zio_badop(zio_t *zio) 1606 { 1607 panic("Invalid I/O pipeline stage %u for zio %p", zio->io_stage, zio); 1608 } 1609 1610 zio_pipe_stage_t *zio_pipeline[ZIO_STAGE_DONE + 2] = { 1611 zio_badop, 1612 zio_wait_children_ready, 1613 zio_write_compress, 1614 zio_checksum_generate, 1615 zio_gang_pipeline, 1616 zio_get_gang_header, 1617 zio_rewrite_gang_members, 1618 zio_free_gang_members, 1619 zio_claim_gang_members, 1620 zio_dva_allocate, 1621 zio_dva_free, 1622 zio_dva_claim, 1623 zio_gang_checksum_generate, 1624 zio_ready, 1625 zio_vdev_io_start, 1626 zio_vdev_io_done, 1627 zio_vdev_io_assess, 1628 zio_wait_children_done, 1629 zio_checksum_verify, 1630 zio_read_gang_members, 1631 zio_read_decompress, 1632 zio_done, 1633 zio_badop 1634 }; 1635 1636 /* 1637 * Move an I/O to the next stage of the pipeline and execute that stage. 1638 * There's no locking on io_stage because there's no legitimate way for 1639 * multiple threads to be attempting to process the same I/O. 1640 */ 1641 void 1642 zio_next_stage(zio_t *zio) 1643 { 1644 uint32_t pipeline = zio->io_pipeline; 1645 1646 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1647 1648 if (zio->io_error) { 1649 dprintf("zio %p vdev %s offset %llx stage %d error %d\n", 1650 zio, vdev_description(zio->io_vd), 1651 zio->io_offset, zio->io_stage, zio->io_error); 1652 if (((1U << zio->io_stage) & ZIO_VDEV_IO_PIPELINE) == 0) 1653 pipeline &= ZIO_ERROR_PIPELINE_MASK; 1654 } 1655 1656 while (((1U << ++zio->io_stage) & pipeline) == 0) 1657 continue; 1658 1659 ASSERT(zio->io_stage <= ZIO_STAGE_DONE); 1660 ASSERT(zio->io_stalled == 0); 1661 1662 zio_pipeline[zio->io_stage](zio); 1663 } 1664 1665 void 1666 zio_next_stage_async(zio_t *zio) 1667 { 1668 taskq_t *tq; 1669 uint32_t pipeline = zio->io_pipeline; 1670 1671 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1672 1673 if (zio->io_error) { 1674 dprintf("zio %p vdev %s offset %llx stage %d error %d\n", 1675 zio, vdev_description(zio->io_vd), 1676 zio->io_offset, zio->io_stage, zio->io_error); 1677 if (((1U << zio->io_stage) & ZIO_VDEV_IO_PIPELINE) == 0) 1678 pipeline &= ZIO_ERROR_PIPELINE_MASK; 1679 } 1680 1681 while (((1U << ++zio->io_stage) & pipeline) == 0) 1682 continue; 1683 1684 ASSERT(zio->io_stage <= ZIO_STAGE_DONE); 1685 ASSERT(zio->io_stalled == 0); 1686 1687 /* 1688 * For performance, we'll probably want two sets of task queues: 1689 * per-CPU issue taskqs and per-CPU completion taskqs. The per-CPU 1690 * part is for read performance: since we have to make a pass over 1691 * the data to checksum it anyway, we want to do this on the same CPU 1692 * that issued the read, because (assuming CPU scheduling affinity) 1693 * that thread is probably still there. Getting this optimization 1694 * right avoids performance-hostile cache-to-cache transfers. 1695 * 1696 * Note that having two sets of task queues is also necessary for 1697 * correctness: if all of the issue threads get bogged down waiting 1698 * for dependent reads (e.g. metaslab freelist) to complete, then 1699 * there won't be any threads available to service I/O completion 1700 * interrupts. 1701 */ 1702 if ((1U << zio->io_stage) & zio->io_async_stages) { 1703 if (zio->io_stage < ZIO_STAGE_VDEV_IO_DONE) 1704 tq = zio->io_spa->spa_zio_issue_taskq[zio->io_type]; 1705 else 1706 tq = zio->io_spa->spa_zio_intr_taskq[zio->io_type]; 1707 (void) taskq_dispatch(tq, 1708 (task_func_t *)zio_pipeline[zio->io_stage], zio, TQ_SLEEP); 1709 } else { 1710 zio_pipeline[zio->io_stage](zio); 1711 } 1712 } 1713 1714 /* 1715 * Try to allocate an intent log block. Return 0 on success, errno on failure. 1716 */ 1717 int 1718 zio_alloc_blk(spa_t *spa, uint64_t size, blkptr_t *new_bp, blkptr_t *old_bp, 1719 uint64_t txg) 1720 { 1721 int error; 1722 1723 spa_config_enter(spa, RW_READER, FTAG); 1724 1725 /* 1726 * We were passed the previous log blocks dva_t in bp->blk_dva[0]. 1727 */ 1728 error = metaslab_alloc(spa, size, new_bp, 1, txg, old_bp, B_TRUE); 1729 1730 if (error == 0) { 1731 BP_SET_LSIZE(new_bp, size); 1732 BP_SET_PSIZE(new_bp, size); 1733 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 1734 BP_SET_CHECKSUM(new_bp, ZIO_CHECKSUM_ZILOG); 1735 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 1736 BP_SET_LEVEL(new_bp, 0); 1737 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 1738 new_bp->blk_birth = txg; 1739 } 1740 1741 spa_config_exit(spa, FTAG); 1742 1743 return (error); 1744 } 1745 1746 /* 1747 * Free an intent log block. We know it can't be a gang block, so there's 1748 * nothing to do except metaslab_free() it. 1749 */ 1750 void 1751 zio_free_blk(spa_t *spa, blkptr_t *bp, uint64_t txg) 1752 { 1753 ASSERT(!BP_IS_GANG(bp)); 1754 1755 spa_config_enter(spa, RW_READER, FTAG); 1756 1757 metaslab_free(spa, bp, txg, B_FALSE); 1758 1759 spa_config_exit(spa, FTAG); 1760 } 1761