1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 * Copyright (c) 2017, Intel Corporation. 27 */ 28 29 #include <sys/sysmacros.h> 30 #include <sys/zfs_context.h> 31 #include <sys/fm/fs/zfs.h> 32 #include <sys/spa.h> 33 #include <sys/txg.h> 34 #include <sys/spa_impl.h> 35 #include <sys/vdev_impl.h> 36 #include <sys/vdev_trim.h> 37 #include <sys/zio_impl.h> 38 #include <sys/zio_compress.h> 39 #include <sys/zio_checksum.h> 40 #include <sys/dmu_objset.h> 41 #include <sys/arc.h> 42 #include <sys/ddt.h> 43 #include <sys/blkptr.h> 44 #include <sys/zfeature.h> 45 #include <sys/dsl_scan.h> 46 #include <sys/metaslab_impl.h> 47 #include <sys/abd.h> 48 #include <sys/cityhash.h> 49 #include <sys/dsl_crypt.h> 50 51 /* 52 * ========================================================================== 53 * I/O type descriptions 54 * ========================================================================== 55 */ 56 const char *zio_type_name[ZIO_TYPES] = { 57 "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 58 "zio_ioctl", "z_trim" 59 }; 60 61 boolean_t zio_dva_throttle_enabled = B_TRUE; 62 63 /* 64 * ========================================================================== 65 * I/O kmem caches 66 * ========================================================================== 67 */ 68 kmem_cache_t *zio_cache; 69 kmem_cache_t *zio_link_cache; 70 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 71 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 72 73 #ifdef _KERNEL 74 extern vmem_t *zio_alloc_arena; 75 #endif 76 77 #define ZIO_PIPELINE_CONTINUE 0x100 78 #define ZIO_PIPELINE_STOP 0x101 79 80 #define BP_SPANB(indblkshift, level) \ 81 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) 82 #define COMPARE_META_LEVEL 0x80000000ul 83 /* 84 * The following actions directly effect the spa's sync-to-convergence logic. 85 * The values below define the sync pass when we start performing the action. 86 * Care should be taken when changing these values as they directly impact 87 * spa_sync() performance. Tuning these values may introduce subtle performance 88 * pathologies and should only be done in the context of performance analysis. 89 * These tunables will eventually be removed and replaced with #defines once 90 * enough analysis has been done to determine optimal values. 91 * 92 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 93 * regular blocks are not deferred. 94 */ 95 int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 96 int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 97 int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 98 99 /* 100 * An allocating zio is one that either currently has the DVA allocate 101 * stage set or will have it later in its lifetime. 102 */ 103 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 104 105 boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 106 107 #ifdef ZFS_DEBUG 108 int zio_buf_debug_limit = 16384; 109 #else 110 int zio_buf_debug_limit = 0; 111 #endif 112 113 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t); 114 115 void 116 zio_init(void) 117 { 118 size_t c; 119 vmem_t *data_alloc_arena = NULL; 120 121 #ifdef _KERNEL 122 data_alloc_arena = zio_alloc_arena; 123 #endif 124 zio_cache = kmem_cache_create("zio_cache", 125 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 126 zio_link_cache = kmem_cache_create("zio_link_cache", 127 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 128 129 /* 130 * For small buffers, we want a cache for each multiple of 131 * SPA_MINBLOCKSIZE. For larger buffers, we want a cache 132 * for each quarter-power of 2. 133 */ 134 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 135 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 136 size_t p2 = size; 137 size_t align = 0; 138 size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 139 140 while (!ISP2(p2)) 141 p2 &= p2 - 1; 142 143 #ifndef _KERNEL 144 /* 145 * If we are using watchpoints, put each buffer on its own page, 146 * to eliminate the performance overhead of trapping to the 147 * kernel when modifying a non-watched buffer that shares the 148 * page with a watched buffer. 149 */ 150 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 151 continue; 152 #endif 153 if (size <= 4 * SPA_MINBLOCKSIZE) { 154 align = SPA_MINBLOCKSIZE; 155 } else if (IS_P2ALIGNED(size, p2 >> 2)) { 156 align = MIN(p2 >> 2, PAGESIZE); 157 } 158 159 if (align != 0) { 160 char name[36]; 161 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 162 zio_buf_cache[c] = kmem_cache_create(name, size, 163 align, NULL, NULL, NULL, NULL, NULL, cflags); 164 165 /* 166 * Since zio_data bufs do not appear in crash dumps, we 167 * pass KMC_NOTOUCH so that no allocator metadata is 168 * stored with the buffers. 169 */ 170 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 171 zio_data_buf_cache[c] = kmem_cache_create(name, size, 172 align, NULL, NULL, NULL, NULL, data_alloc_arena, 173 cflags | KMC_NOTOUCH); 174 } 175 } 176 177 while (--c != 0) { 178 ASSERT(zio_buf_cache[c] != NULL); 179 if (zio_buf_cache[c - 1] == NULL) 180 zio_buf_cache[c - 1] = zio_buf_cache[c]; 181 182 ASSERT(zio_data_buf_cache[c] != NULL); 183 if (zio_data_buf_cache[c - 1] == NULL) 184 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 185 } 186 187 zio_inject_init(); 188 } 189 190 void 191 zio_fini(void) 192 { 193 size_t c; 194 kmem_cache_t *last_cache = NULL; 195 kmem_cache_t *last_data_cache = NULL; 196 197 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 198 if (zio_buf_cache[c] != last_cache) { 199 last_cache = zio_buf_cache[c]; 200 kmem_cache_destroy(zio_buf_cache[c]); 201 } 202 zio_buf_cache[c] = NULL; 203 204 if (zio_data_buf_cache[c] != last_data_cache) { 205 last_data_cache = zio_data_buf_cache[c]; 206 kmem_cache_destroy(zio_data_buf_cache[c]); 207 } 208 zio_data_buf_cache[c] = NULL; 209 } 210 211 kmem_cache_destroy(zio_link_cache); 212 kmem_cache_destroy(zio_cache); 213 214 zio_inject_fini(); 215 } 216 217 /* 218 * ========================================================================== 219 * Allocate and free I/O buffers 220 * ========================================================================== 221 */ 222 223 /* 224 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 225 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 226 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 227 * excess / transient data in-core during a crashdump. 228 */ 229 void * 230 zio_buf_alloc(size_t size) 231 { 232 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 233 234 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 235 236 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 237 } 238 239 /* 240 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 241 * crashdump if the kernel panics. This exists so that we will limit the amount 242 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 243 * of kernel heap dumped to disk when the kernel panics) 244 */ 245 void * 246 zio_data_buf_alloc(size_t size) 247 { 248 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 249 250 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 251 252 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 253 } 254 255 void 256 zio_buf_free(void *buf, size_t size) 257 { 258 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 259 260 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 261 262 kmem_cache_free(zio_buf_cache[c], buf); 263 } 264 265 void 266 zio_data_buf_free(void *buf, size_t size) 267 { 268 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 269 270 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 271 272 kmem_cache_free(zio_data_buf_cache[c], buf); 273 } 274 275 /* ARGSUSED */ 276 static void 277 zio_abd_free(void *abd, size_t size) 278 { 279 abd_free((abd_t *)abd); 280 } 281 282 /* 283 * ========================================================================== 284 * Push and pop I/O transform buffers 285 * ========================================================================== 286 */ 287 void 288 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize, 289 zio_transform_func_t *transform) 290 { 291 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 292 293 /* 294 * Ensure that anyone expecting this zio to contain a linear ABD isn't 295 * going to get a nasty surprise when they try to access the data. 296 */ 297 IMPLY(abd_is_linear(zio->io_abd), abd_is_linear(data)); 298 299 zt->zt_orig_abd = zio->io_abd; 300 zt->zt_orig_size = zio->io_size; 301 zt->zt_bufsize = bufsize; 302 zt->zt_transform = transform; 303 304 zt->zt_next = zio->io_transform_stack; 305 zio->io_transform_stack = zt; 306 307 zio->io_abd = data; 308 zio->io_size = size; 309 } 310 311 void 312 zio_pop_transforms(zio_t *zio) 313 { 314 zio_transform_t *zt; 315 316 while ((zt = zio->io_transform_stack) != NULL) { 317 if (zt->zt_transform != NULL) 318 zt->zt_transform(zio, 319 zt->zt_orig_abd, zt->zt_orig_size); 320 321 if (zt->zt_bufsize != 0) 322 abd_free(zio->io_abd); 323 324 zio->io_abd = zt->zt_orig_abd; 325 zio->io_size = zt->zt_orig_size; 326 zio->io_transform_stack = zt->zt_next; 327 328 kmem_free(zt, sizeof (zio_transform_t)); 329 } 330 } 331 332 /* 333 * ========================================================================== 334 * I/O transform callbacks for subblocks, decompression, and decryption 335 * ========================================================================== 336 */ 337 static void 338 zio_subblock(zio_t *zio, abd_t *data, uint64_t size) 339 { 340 ASSERT(zio->io_size > size); 341 342 if (zio->io_type == ZIO_TYPE_READ) 343 abd_copy(data, zio->io_abd, size); 344 } 345 346 static void 347 zio_decompress(zio_t *zio, abd_t *data, uint64_t size) 348 { 349 if (zio->io_error == 0) { 350 void *tmp = abd_borrow_buf(data, size); 351 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 352 zio->io_abd, tmp, zio->io_size, size); 353 abd_return_buf_copy(data, tmp, size); 354 355 if (ret != 0) 356 zio->io_error = SET_ERROR(EIO); 357 } 358 } 359 360 static void 361 zio_decrypt(zio_t *zio, abd_t *data, uint64_t size) 362 { 363 int ret; 364 void *tmp; 365 blkptr_t *bp = zio->io_bp; 366 spa_t *spa = zio->io_spa; 367 uint64_t dsobj = zio->io_bookmark.zb_objset; 368 uint64_t lsize = BP_GET_LSIZE(bp); 369 dmu_object_type_t ot = BP_GET_TYPE(bp); 370 uint8_t salt[ZIO_DATA_SALT_LEN]; 371 uint8_t iv[ZIO_DATA_IV_LEN]; 372 uint8_t mac[ZIO_DATA_MAC_LEN]; 373 boolean_t no_crypt = B_FALSE; 374 375 ASSERT(BP_USES_CRYPT(bp)); 376 ASSERT3U(size, !=, 0); 377 378 if (zio->io_error != 0) 379 return; 380 381 /* 382 * Verify the cksum of MACs stored in an indirect bp. It will always 383 * be possible to verify this since it does not require an encryption 384 * key. 385 */ 386 if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) { 387 zio_crypt_decode_mac_bp(bp, mac); 388 389 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) { 390 /* 391 * We haven't decompressed the data yet, but 392 * zio_crypt_do_indirect_mac_checksum() requires 393 * decompressed data to be able to parse out the MACs 394 * from the indirect block. We decompress it now and 395 * throw away the result after we are finished. 396 */ 397 tmp = zio_buf_alloc(lsize); 398 ret = zio_decompress_data(BP_GET_COMPRESS(bp), 399 zio->io_abd, tmp, zio->io_size, lsize); 400 if (ret != 0) { 401 ret = SET_ERROR(EIO); 402 goto error; 403 } 404 ret = zio_crypt_do_indirect_mac_checksum(B_FALSE, 405 tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac); 406 zio_buf_free(tmp, lsize); 407 } else { 408 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE, 409 zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac); 410 } 411 abd_copy(data, zio->io_abd, size); 412 413 if (ret != 0) 414 goto error; 415 416 return; 417 } 418 419 /* 420 * If this is an authenticated block, just check the MAC. It would be 421 * nice to separate this out into its own flag, but for the moment 422 * enum zio_flag is out of bits. 423 */ 424 if (BP_IS_AUTHENTICATED(bp)) { 425 if (ot == DMU_OT_OBJSET) { 426 ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, 427 dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp)); 428 } else { 429 zio_crypt_decode_mac_bp(bp, mac); 430 ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, 431 zio->io_abd, size, mac); 432 } 433 abd_copy(data, zio->io_abd, size); 434 435 if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) { 436 ret = zio_handle_decrypt_injection(spa, 437 &zio->io_bookmark, ot, ECKSUM); 438 } 439 if (ret != 0) 440 goto error; 441 442 return; 443 } 444 445 zio_crypt_decode_params_bp(bp, salt, iv); 446 447 if (ot == DMU_OT_INTENT_LOG) { 448 tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t)); 449 zio_crypt_decode_mac_zil(tmp, mac); 450 abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t)); 451 } else { 452 zio_crypt_decode_mac_bp(bp, mac); 453 } 454 455 ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp), 456 BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data, 457 zio->io_abd, &no_crypt); 458 if (no_crypt) 459 abd_copy(data, zio->io_abd, size); 460 461 if (ret != 0) 462 goto error; 463 464 return; 465 466 error: 467 /* assert that the key was found unless this was speculative */ 468 ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE)); 469 470 /* 471 * If there was a decryption / authentication error return EIO as 472 * the io_error. If this was not a speculative zio, create an ereport. 473 */ 474 if (ret == ECKSUM) { 475 zio->io_error = SET_ERROR(EIO); 476 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { 477 spa_log_error(spa, &zio->io_bookmark); 478 zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION, 479 spa, NULL, &zio->io_bookmark, zio, 0, 0); 480 } 481 } else { 482 zio->io_error = ret; 483 } 484 } 485 486 /* 487 * ========================================================================== 488 * I/O parent/child relationships and pipeline interlocks 489 * ========================================================================== 490 */ 491 zio_t * 492 zio_walk_parents(zio_t *cio, zio_link_t **zl) 493 { 494 list_t *pl = &cio->io_parent_list; 495 496 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl); 497 if (*zl == NULL) 498 return (NULL); 499 500 ASSERT((*zl)->zl_child == cio); 501 return ((*zl)->zl_parent); 502 } 503 504 zio_t * 505 zio_walk_children(zio_t *pio, zio_link_t **zl) 506 { 507 list_t *cl = &pio->io_child_list; 508 509 ASSERT(MUTEX_HELD(&pio->io_lock)); 510 511 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl); 512 if (*zl == NULL) 513 return (NULL); 514 515 ASSERT((*zl)->zl_parent == pio); 516 return ((*zl)->zl_child); 517 } 518 519 zio_t * 520 zio_unique_parent(zio_t *cio) 521 { 522 zio_link_t *zl = NULL; 523 zio_t *pio = zio_walk_parents(cio, &zl); 524 525 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL); 526 return (pio); 527 } 528 529 void 530 zio_add_child(zio_t *pio, zio_t *cio) 531 { 532 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 533 534 /* 535 * Logical I/Os can have logical, gang, or vdev children. 536 * Gang I/Os can have gang or vdev children. 537 * Vdev I/Os can only have vdev children. 538 * The following ASSERT captures all of these constraints. 539 */ 540 ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 541 542 zl->zl_parent = pio; 543 zl->zl_child = cio; 544 545 mutex_enter(&pio->io_lock); 546 mutex_enter(&cio->io_lock); 547 548 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 549 550 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 551 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 552 553 list_insert_head(&pio->io_child_list, zl); 554 list_insert_head(&cio->io_parent_list, zl); 555 556 pio->io_child_count++; 557 cio->io_parent_count++; 558 559 mutex_exit(&cio->io_lock); 560 mutex_exit(&pio->io_lock); 561 } 562 563 static void 564 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 565 { 566 ASSERT(zl->zl_parent == pio); 567 ASSERT(zl->zl_child == cio); 568 569 mutex_enter(&pio->io_lock); 570 mutex_enter(&cio->io_lock); 571 572 list_remove(&pio->io_child_list, zl); 573 list_remove(&cio->io_parent_list, zl); 574 575 pio->io_child_count--; 576 cio->io_parent_count--; 577 578 mutex_exit(&cio->io_lock); 579 mutex_exit(&pio->io_lock); 580 581 kmem_cache_free(zio_link_cache, zl); 582 } 583 584 static boolean_t 585 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait) 586 { 587 boolean_t waiting = B_FALSE; 588 589 mutex_enter(&zio->io_lock); 590 ASSERT(zio->io_stall == NULL); 591 for (int c = 0; c < ZIO_CHILD_TYPES; c++) { 592 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c))) 593 continue; 594 595 uint64_t *countp = &zio->io_children[c][wait]; 596 if (*countp != 0) { 597 zio->io_stage >>= 1; 598 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN); 599 zio->io_stall = countp; 600 waiting = B_TRUE; 601 break; 602 } 603 } 604 mutex_exit(&zio->io_lock); 605 return (waiting); 606 } 607 608 static void 609 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 610 { 611 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 612 int *errorp = &pio->io_child_error[zio->io_child_type]; 613 614 mutex_enter(&pio->io_lock); 615 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 616 *errorp = zio_worst_error(*errorp, zio->io_error); 617 pio->io_reexecute |= zio->io_reexecute; 618 ASSERT3U(*countp, >, 0); 619 620 (*countp)--; 621 622 if (*countp == 0 && pio->io_stall == countp) { 623 zio_taskq_type_t type = 624 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE : 625 ZIO_TASKQ_INTERRUPT; 626 pio->io_stall = NULL; 627 mutex_exit(&pio->io_lock); 628 /* 629 * Dispatch the parent zio in its own taskq so that 630 * the child can continue to make progress. This also 631 * prevents overflowing the stack when we have deeply nested 632 * parent-child relationships. 633 */ 634 zio_taskq_dispatch(pio, type, B_FALSE); 635 } else { 636 mutex_exit(&pio->io_lock); 637 } 638 } 639 640 static void 641 zio_inherit_child_errors(zio_t *zio, enum zio_child c) 642 { 643 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 644 zio->io_error = zio->io_child_error[c]; 645 } 646 647 int 648 zio_bookmark_compare(const void *x1, const void *x2) 649 { 650 const zio_t *z1 = x1; 651 const zio_t *z2 = x2; 652 653 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset) 654 return (-1); 655 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset) 656 return (1); 657 658 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object) 659 return (-1); 660 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object) 661 return (1); 662 663 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level) 664 return (-1); 665 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level) 666 return (1); 667 668 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid) 669 return (-1); 670 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid) 671 return (1); 672 673 if (z1 < z2) 674 return (-1); 675 if (z1 > z2) 676 return (1); 677 678 return (0); 679 } 680 681 /* 682 * ========================================================================== 683 * Create the various types of I/O (read, write, free, etc) 684 * ========================================================================== 685 */ 686 static zio_t * 687 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 688 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done, 689 void *private, zio_type_t type, zio_priority_t priority, 690 enum zio_flag flags, vdev_t *vd, uint64_t offset, 691 const zbookmark_phys_t *zb, enum zio_stage stage, enum zio_stage pipeline) 692 { 693 zio_t *zio; 694 695 IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE); 696 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0); 697 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 698 699 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 700 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 701 ASSERT(vd || stage == ZIO_STAGE_OPEN); 702 703 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0); 704 705 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 706 bzero(zio, sizeof (zio_t)); 707 708 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 709 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 710 711 list_create(&zio->io_parent_list, sizeof (zio_link_t), 712 offsetof(zio_link_t, zl_parent_node)); 713 list_create(&zio->io_child_list, sizeof (zio_link_t), 714 offsetof(zio_link_t, zl_child_node)); 715 metaslab_trace_init(&zio->io_alloc_list); 716 717 if (vd != NULL) 718 zio->io_child_type = ZIO_CHILD_VDEV; 719 else if (flags & ZIO_FLAG_GANG_CHILD) 720 zio->io_child_type = ZIO_CHILD_GANG; 721 else if (flags & ZIO_FLAG_DDT_CHILD) 722 zio->io_child_type = ZIO_CHILD_DDT; 723 else 724 zio->io_child_type = ZIO_CHILD_LOGICAL; 725 726 if (bp != NULL) { 727 zio->io_bp = (blkptr_t *)bp; 728 zio->io_bp_copy = *bp; 729 zio->io_bp_orig = *bp; 730 if (type != ZIO_TYPE_WRITE || 731 zio->io_child_type == ZIO_CHILD_DDT) 732 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 733 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 734 zio->io_logical = zio; 735 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 736 pipeline |= ZIO_GANG_STAGES; 737 } 738 739 zio->io_spa = spa; 740 zio->io_txg = txg; 741 zio->io_done = done; 742 zio->io_private = private; 743 zio->io_type = type; 744 zio->io_priority = priority; 745 zio->io_vd = vd; 746 zio->io_offset = offset; 747 zio->io_orig_abd = zio->io_abd = data; 748 zio->io_orig_size = zio->io_size = psize; 749 zio->io_lsize = lsize; 750 zio->io_orig_flags = zio->io_flags = flags; 751 zio->io_orig_stage = zio->io_stage = stage; 752 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 753 zio->io_pipeline_trace = ZIO_STAGE_OPEN; 754 755 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 756 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 757 758 if (zb != NULL) 759 zio->io_bookmark = *zb; 760 761 if (pio != NULL) { 762 if (zio->io_metaslab_class == NULL) 763 zio->io_metaslab_class = pio->io_metaslab_class; 764 if (zio->io_logical == NULL) 765 zio->io_logical = pio->io_logical; 766 if (zio->io_child_type == ZIO_CHILD_GANG) 767 zio->io_gang_leader = pio->io_gang_leader; 768 zio_add_child(pio, zio); 769 } 770 771 return (zio); 772 } 773 774 static void 775 zio_destroy(zio_t *zio) 776 { 777 metaslab_trace_fini(&zio->io_alloc_list); 778 list_destroy(&zio->io_parent_list); 779 list_destroy(&zio->io_child_list); 780 mutex_destroy(&zio->io_lock); 781 cv_destroy(&zio->io_cv); 782 kmem_cache_free(zio_cache, zio); 783 } 784 785 zio_t * 786 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 787 void *private, enum zio_flag flags) 788 { 789 zio_t *zio; 790 791 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 792 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 793 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 794 795 return (zio); 796 } 797 798 zio_t * 799 zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 800 { 801 return (zio_null(NULL, spa, NULL, done, private, flags)); 802 } 803 804 void 805 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp) 806 { 807 if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { 808 zfs_panic_recover("blkptr at %p has invalid TYPE %llu", 809 bp, (longlong_t)BP_GET_TYPE(bp)); 810 } 811 if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS || 812 BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) { 813 zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu", 814 bp, (longlong_t)BP_GET_CHECKSUM(bp)); 815 } 816 if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS || 817 BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) { 818 zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu", 819 bp, (longlong_t)BP_GET_COMPRESS(bp)); 820 } 821 if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { 822 zfs_panic_recover("blkptr at %p has invalid LSIZE %llu", 823 bp, (longlong_t)BP_GET_LSIZE(bp)); 824 } 825 if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { 826 zfs_panic_recover("blkptr at %p has invalid PSIZE %llu", 827 bp, (longlong_t)BP_GET_PSIZE(bp)); 828 } 829 830 if (BP_IS_EMBEDDED(bp)) { 831 if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) { 832 zfs_panic_recover("blkptr at %p has invalid ETYPE %llu", 833 bp, (longlong_t)BPE_GET_ETYPE(bp)); 834 } 835 } 836 837 /* 838 * Do not verify individual DVAs if the config is not trusted. This 839 * will be done once the zio is executed in vdev_mirror_map_alloc. 840 */ 841 if (!spa->spa_trust_config) 842 return; 843 844 /* 845 * Pool-specific checks. 846 * 847 * Note: it would be nice to verify that the blk_birth and 848 * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() 849 * allows the birth time of log blocks (and dmu_sync()-ed blocks 850 * that are in the log) to be arbitrarily large. 851 */ 852 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 853 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); 854 if (vdevid >= spa->spa_root_vdev->vdev_children) { 855 zfs_panic_recover("blkptr at %p DVA %u has invalid " 856 "VDEV %llu", 857 bp, i, (longlong_t)vdevid); 858 continue; 859 } 860 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 861 if (vd == NULL) { 862 zfs_panic_recover("blkptr at %p DVA %u has invalid " 863 "VDEV %llu", 864 bp, i, (longlong_t)vdevid); 865 continue; 866 } 867 if (vd->vdev_ops == &vdev_hole_ops) { 868 zfs_panic_recover("blkptr at %p DVA %u has hole " 869 "VDEV %llu", 870 bp, i, (longlong_t)vdevid); 871 continue; 872 } 873 if (vd->vdev_ops == &vdev_missing_ops) { 874 /* 875 * "missing" vdevs are valid during import, but we 876 * don't have their detailed info (e.g. asize), so 877 * we can't perform any more checks on them. 878 */ 879 continue; 880 } 881 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 882 uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]); 883 if (BP_IS_GANG(bp)) 884 asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 885 if (offset + asize > vd->vdev_asize) { 886 zfs_panic_recover("blkptr at %p DVA %u has invalid " 887 "OFFSET %llu", 888 bp, i, (longlong_t)offset); 889 } 890 } 891 } 892 893 boolean_t 894 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp) 895 { 896 uint64_t vdevid = DVA_GET_VDEV(dva); 897 898 if (vdevid >= spa->spa_root_vdev->vdev_children) 899 return (B_FALSE); 900 901 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 902 if (vd == NULL) 903 return (B_FALSE); 904 905 if (vd->vdev_ops == &vdev_hole_ops) 906 return (B_FALSE); 907 908 if (vd->vdev_ops == &vdev_missing_ops) { 909 return (B_FALSE); 910 } 911 912 uint64_t offset = DVA_GET_OFFSET(dva); 913 uint64_t asize = DVA_GET_ASIZE(dva); 914 915 if (BP_IS_GANG(bp)) 916 asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 917 if (offset + asize > vd->vdev_asize) 918 return (B_FALSE); 919 920 return (B_TRUE); 921 } 922 923 zio_t * 924 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 925 abd_t *data, uint64_t size, zio_done_func_t *done, void *private, 926 zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 927 { 928 zio_t *zio; 929 930 zfs_blkptr_verify(spa, bp); 931 932 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 933 data, size, size, done, private, 934 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 935 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 936 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 937 938 return (zio); 939 } 940 941 zio_t * 942 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 943 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp, 944 zio_done_func_t *ready, zio_done_func_t *children_ready, 945 zio_done_func_t *physdone, zio_done_func_t *done, 946 void *private, zio_priority_t priority, enum zio_flag flags, 947 const zbookmark_phys_t *zb) 948 { 949 zio_t *zio; 950 951 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 952 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 953 zp->zp_compress >= ZIO_COMPRESS_OFF && 954 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 955 DMU_OT_IS_VALID(zp->zp_type) && 956 zp->zp_level < 32 && 957 zp->zp_copies > 0 && 958 zp->zp_copies <= spa_max_replication(spa)); 959 960 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private, 961 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 962 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 963 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 964 965 zio->io_ready = ready; 966 zio->io_children_ready = children_ready; 967 zio->io_physdone = physdone; 968 zio->io_prop = *zp; 969 970 /* 971 * Data can be NULL if we are going to call zio_write_override() to 972 * provide the already-allocated BP. But we may need the data to 973 * verify a dedup hit (if requested). In this case, don't try to 974 * dedup (just take the already-allocated BP verbatim). Encrypted 975 * dedup blocks need data as well so we also disable dedup in this 976 * case. 977 */ 978 if (data == NULL && 979 (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) { 980 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 981 } 982 983 return (zio); 984 } 985 986 zio_t * 987 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data, 988 uint64_t size, zio_done_func_t *done, void *private, 989 zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb) 990 { 991 zio_t *zio; 992 993 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private, 994 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb, 995 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 996 997 return (zio); 998 } 999 1000 void 1001 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) 1002 { 1003 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 1004 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1005 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1006 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 1007 1008 /* 1009 * We must reset the io_prop to match the values that existed 1010 * when the bp was first written by dmu_sync() keeping in mind 1011 * that nopwrite and dedup are mutually exclusive. 1012 */ 1013 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 1014 zio->io_prop.zp_nopwrite = nopwrite; 1015 zio->io_prop.zp_copies = copies; 1016 zio->io_bp_override = bp; 1017 } 1018 1019 void 1020 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 1021 { 1022 1023 zfs_blkptr_verify(spa, bp); 1024 1025 /* 1026 * The check for EMBEDDED is a performance optimization. We 1027 * process the free here (by ignoring it) rather than 1028 * putting it on the list and then processing it in zio_free_sync(). 1029 */ 1030 if (BP_IS_EMBEDDED(bp)) 1031 return; 1032 metaslab_check_free(spa, bp); 1033 1034 /* 1035 * Frees that are for the currently-syncing txg, are not going to be 1036 * deferred, and which will not need to do a read (i.e. not GANG or 1037 * DEDUP), can be processed immediately. Otherwise, put them on the 1038 * in-memory list for later processing. 1039 */ 1040 if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp) || 1041 txg != spa->spa_syncing_txg || 1042 spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) { 1043 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 1044 } else { 1045 VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0))); 1046 } 1047 } 1048 1049 zio_t * 1050 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1051 enum zio_flag flags) 1052 { 1053 zio_t *zio; 1054 enum zio_stage stage = ZIO_FREE_PIPELINE; 1055 1056 ASSERT(!BP_IS_HOLE(bp)); 1057 ASSERT(spa_syncing_txg(spa) == txg); 1058 ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); 1059 1060 if (BP_IS_EMBEDDED(bp)) 1061 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 1062 1063 metaslab_check_free(spa, bp); 1064 arc_freed(spa, bp); 1065 dsl_scan_freed(spa, bp); 1066 1067 /* 1068 * GANG and DEDUP blocks can induce a read (for the gang block header, 1069 * or the DDT), so issue them asynchronously so that this thread is 1070 * not tied up. 1071 */ 1072 if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) 1073 stage |= ZIO_STAGE_ISSUE_ASYNC; 1074 1075 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 1076 BP_GET_PSIZE(bp), NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, 1077 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage); 1078 1079 return (zio); 1080 } 1081 1082 zio_t * 1083 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1084 zio_done_func_t *done, void *private, enum zio_flag flags) 1085 { 1086 zio_t *zio; 1087 1088 zfs_blkptr_verify(spa, bp); 1089 1090 if (BP_IS_EMBEDDED(bp)) 1091 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 1092 1093 /* 1094 * A claim is an allocation of a specific block. Claims are needed 1095 * to support immediate writes in the intent log. The issue is that 1096 * immediate writes contain committed data, but in a txg that was 1097 * *not* committed. Upon opening the pool after an unclean shutdown, 1098 * the intent log claims all blocks that contain immediate write data 1099 * so that the SPA knows they're in use. 1100 * 1101 * All claims *must* be resolved in the first txg -- before the SPA 1102 * starts allocating blocks -- so that nothing is allocated twice. 1103 * If txg == 0 we just verify that the block is claimable. 1104 */ 1105 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, 1106 spa_min_claim_txg(spa)); 1107 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0); 1108 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 1109 1110 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 1111 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 1112 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 1113 ASSERT0(zio->io_queued_timestamp); 1114 1115 return (zio); 1116 } 1117 1118 zio_t * 1119 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 1120 zio_done_func_t *done, void *private, enum zio_flag flags) 1121 { 1122 zio_t *zio; 1123 int c; 1124 1125 if (vd->vdev_children == 0) { 1126 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 1127 ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 1128 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 1129 1130 zio->io_cmd = cmd; 1131 } else { 1132 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 1133 1134 for (c = 0; c < vd->vdev_children; c++) 1135 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 1136 done, private, flags)); 1137 } 1138 1139 return (zio); 1140 } 1141 1142 zio_t * 1143 zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1144 zio_done_func_t *done, void *private, zio_priority_t priority, 1145 enum zio_flag flags, enum trim_flag trim_flags) 1146 { 1147 zio_t *zio; 1148 1149 ASSERT0(vd->vdev_children); 1150 ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 1151 ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 1152 ASSERT3U(size, !=, 0); 1153 1154 zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done, 1155 private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL, 1156 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE); 1157 zio->io_trim_flags = trim_flags; 1158 1159 return (zio); 1160 } 1161 1162 zio_t * 1163 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1164 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1165 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 1166 { 1167 zio_t *zio; 1168 1169 ASSERT(vd->vdev_children == 0); 1170 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1171 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1172 ASSERT3U(offset + size, <=, vd->vdev_psize); 1173 1174 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1175 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1176 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 1177 1178 zio->io_prop.zp_checksum = checksum; 1179 1180 return (zio); 1181 } 1182 1183 zio_t * 1184 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1185 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1186 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 1187 { 1188 zio_t *zio; 1189 1190 ASSERT(vd->vdev_children == 0); 1191 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1192 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1193 ASSERT3U(offset + size, <=, vd->vdev_psize); 1194 1195 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1196 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1197 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 1198 1199 zio->io_prop.zp_checksum = checksum; 1200 1201 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) { 1202 /* 1203 * zec checksums are necessarily destructive -- they modify 1204 * the end of the write buffer to hold the verifier/checksum. 1205 * Therefore, we must make a local copy in case the data is 1206 * being written to multiple places in parallel. 1207 */ 1208 abd_t *wbuf = abd_alloc_sametype(data, size); 1209 abd_copy(wbuf, data, size); 1210 1211 zio_push_transform(zio, wbuf, size, size, NULL); 1212 } 1213 1214 return (zio); 1215 } 1216 1217 /* 1218 * Create a child I/O to do some work for us. 1219 */ 1220 zio_t * 1221 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 1222 abd_t *data, uint64_t size, int type, zio_priority_t priority, 1223 enum zio_flag flags, zio_done_func_t *done, void *private) 1224 { 1225 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 1226 zio_t *zio; 1227 1228 /* 1229 * vdev child I/Os do not propagate their error to the parent. 1230 * Therefore, for correct operation the caller *must* check for 1231 * and handle the error in the child i/o's done callback. 1232 * The only exceptions are i/os that we don't care about 1233 * (OPTIONAL or REPAIR). 1234 */ 1235 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) || 1236 done != NULL); 1237 1238 if (type == ZIO_TYPE_READ && bp != NULL) { 1239 /* 1240 * If we have the bp, then the child should perform the 1241 * checksum and the parent need not. This pushes error 1242 * detection as close to the leaves as possible and 1243 * eliminates redundant checksums in the interior nodes. 1244 */ 1245 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 1246 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 1247 } 1248 1249 if (vd->vdev_ops->vdev_op_leaf) { 1250 ASSERT0(vd->vdev_children); 1251 offset += VDEV_LABEL_START_SIZE; 1252 } 1253 1254 flags |= ZIO_VDEV_CHILD_FLAGS(pio); 1255 1256 /* 1257 * If we've decided to do a repair, the write is not speculative -- 1258 * even if the original read was. 1259 */ 1260 if (flags & ZIO_FLAG_IO_REPAIR) 1261 flags &= ~ZIO_FLAG_SPECULATIVE; 1262 1263 /* 1264 * If we're creating a child I/O that is not associated with a 1265 * top-level vdev, then the child zio is not an allocating I/O. 1266 * If this is a retried I/O then we ignore it since we will 1267 * have already processed the original allocating I/O. 1268 */ 1269 if (flags & ZIO_FLAG_IO_ALLOCATING && 1270 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { 1271 ASSERT(pio->io_metaslab_class != NULL); 1272 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled); 1273 ASSERT(type == ZIO_TYPE_WRITE); 1274 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE); 1275 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR)); 1276 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || 1277 pio->io_child_type == ZIO_CHILD_GANG); 1278 1279 flags &= ~ZIO_FLAG_IO_ALLOCATING; 1280 } 1281 1282 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size, 1283 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 1284 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 1285 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 1286 1287 zio->io_physdone = pio->io_physdone; 1288 if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) 1289 zio->io_logical->io_phys_children++; 1290 1291 return (zio); 1292 } 1293 1294 zio_t * 1295 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size, 1296 zio_type_t type, zio_priority_t priority, enum zio_flag flags, 1297 zio_done_func_t *done, void *private) 1298 { 1299 zio_t *zio; 1300 1301 ASSERT(vd->vdev_ops->vdev_op_leaf); 1302 1303 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 1304 data, size, size, done, private, type, priority, 1305 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 1306 vd, offset, NULL, 1307 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 1308 1309 return (zio); 1310 } 1311 1312 void 1313 zio_flush(zio_t *zio, vdev_t *vd) 1314 { 1315 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 1316 NULL, NULL, 1317 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 1318 } 1319 1320 void 1321 zio_shrink(zio_t *zio, uint64_t size) 1322 { 1323 ASSERT3P(zio->io_executor, ==, NULL); 1324 ASSERT3P(zio->io_orig_size, ==, zio->io_size); 1325 ASSERT3U(size, <=, zio->io_size); 1326 1327 /* 1328 * We don't shrink for raidz because of problems with the 1329 * reconstruction when reading back less than the block size. 1330 * Note, BP_IS_RAIDZ() assumes no compression. 1331 */ 1332 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1333 if (!BP_IS_RAIDZ(zio->io_bp)) { 1334 /* we are not doing a raw write */ 1335 ASSERT3U(zio->io_size, ==, zio->io_lsize); 1336 zio->io_orig_size = zio->io_size = zio->io_lsize = size; 1337 } 1338 } 1339 1340 /* 1341 * ========================================================================== 1342 * Prepare to read and write logical blocks 1343 * ========================================================================== 1344 */ 1345 1346 static int 1347 zio_read_bp_init(zio_t *zio) 1348 { 1349 blkptr_t *bp = zio->io_bp; 1350 uint64_t psize = 1351 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1352 1353 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1354 1355 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1356 zio->io_child_type == ZIO_CHILD_LOGICAL && 1357 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { 1358 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1359 psize, psize, zio_decompress); 1360 } 1361 1362 if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) || 1363 BP_HAS_INDIRECT_MAC_CKSUM(bp)) && 1364 zio->io_child_type == ZIO_CHILD_LOGICAL) { 1365 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1366 psize, psize, zio_decrypt); 1367 } 1368 1369 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1370 int psize = BPE_GET_PSIZE(bp); 1371 void *data = abd_borrow_buf(zio->io_abd, psize); 1372 1373 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1374 decode_embedded_bp_compressed(bp, data); 1375 abd_return_buf_copy(zio->io_abd, data, psize); 1376 } else { 1377 ASSERT(!BP_IS_EMBEDDED(bp)); 1378 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1379 } 1380 1381 if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 1382 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1383 1384 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 1385 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1386 1387 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1388 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1389 1390 return (ZIO_PIPELINE_CONTINUE); 1391 } 1392 1393 static int 1394 zio_write_bp_init(zio_t *zio) 1395 { 1396 if (!IO_IS_ALLOCATING(zio)) 1397 return (ZIO_PIPELINE_CONTINUE); 1398 1399 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1400 1401 if (zio->io_bp_override) { 1402 blkptr_t *bp = zio->io_bp; 1403 zio_prop_t *zp = &zio->io_prop; 1404 1405 ASSERT(bp->blk_birth != zio->io_txg); 1406 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 1407 1408 *bp = *zio->io_bp_override; 1409 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1410 1411 if (BP_IS_EMBEDDED(bp)) 1412 return (ZIO_PIPELINE_CONTINUE); 1413 1414 /* 1415 * If we've been overridden and nopwrite is set then 1416 * set the flag accordingly to indicate that a nopwrite 1417 * has already occurred. 1418 */ 1419 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 1420 ASSERT(!zp->zp_dedup); 1421 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum); 1422 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1423 return (ZIO_PIPELINE_CONTINUE); 1424 } 1425 1426 ASSERT(!zp->zp_nopwrite); 1427 1428 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1429 return (ZIO_PIPELINE_CONTINUE); 1430 1431 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags & 1432 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify); 1433 1434 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum && 1435 !zp->zp_encrypt) { 1436 BP_SET_DEDUP(bp, 1); 1437 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1438 return (ZIO_PIPELINE_CONTINUE); 1439 } 1440 1441 /* 1442 * We were unable to handle this as an override bp, treat 1443 * it as a regular write I/O. 1444 */ 1445 zio->io_bp_override = NULL; 1446 *bp = zio->io_bp_orig; 1447 zio->io_pipeline = zio->io_orig_pipeline; 1448 } 1449 1450 return (ZIO_PIPELINE_CONTINUE); 1451 } 1452 1453 static int 1454 zio_write_compress(zio_t *zio) 1455 { 1456 spa_t *spa = zio->io_spa; 1457 zio_prop_t *zp = &zio->io_prop; 1458 enum zio_compress compress = zp->zp_compress; 1459 blkptr_t *bp = zio->io_bp; 1460 uint64_t lsize = zio->io_lsize; 1461 uint64_t psize = zio->io_size; 1462 int pass = 1; 1463 1464 /* 1465 * If our children haven't all reached the ready stage, 1466 * wait for them and then repeat this pipeline stage. 1467 */ 1468 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | 1469 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) { 1470 return (ZIO_PIPELINE_STOP); 1471 } 1472 1473 if (!IO_IS_ALLOCATING(zio)) 1474 return (ZIO_PIPELINE_CONTINUE); 1475 1476 if (zio->io_children_ready != NULL) { 1477 /* 1478 * Now that all our children are ready, run the callback 1479 * associated with this zio in case it wants to modify the 1480 * data to be written. 1481 */ 1482 ASSERT3U(zp->zp_level, >, 0); 1483 zio->io_children_ready(zio); 1484 } 1485 1486 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1487 ASSERT(zio->io_bp_override == NULL); 1488 1489 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { 1490 /* 1491 * We're rewriting an existing block, which means we're 1492 * working on behalf of spa_sync(). For spa_sync() to 1493 * converge, it must eventually be the case that we don't 1494 * have to allocate new blocks. But compression changes 1495 * the blocksize, which forces a reallocate, and makes 1496 * convergence take longer. Therefore, after the first 1497 * few passes, stop compressing to ensure convergence. 1498 */ 1499 pass = spa_sync_pass(spa); 1500 1501 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1502 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1503 ASSERT(!BP_GET_DEDUP(bp)); 1504 1505 if (pass >= zfs_sync_pass_dont_compress) 1506 compress = ZIO_COMPRESS_OFF; 1507 1508 /* Make sure someone doesn't change their mind on overwrites */ 1509 ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), 1510 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1511 } 1512 1513 /* If it's a compressed write that is not raw, compress the buffer. */ 1514 if (compress != ZIO_COMPRESS_OFF && 1515 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { 1516 void *cbuf = zio_buf_alloc(lsize); 1517 psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize); 1518 if (psize == 0 || psize == lsize) { 1519 compress = ZIO_COMPRESS_OFF; 1520 zio_buf_free(cbuf, lsize); 1521 } else if (!zp->zp_dedup && !zp->zp_encrypt && 1522 psize <= BPE_PAYLOAD_SIZE && 1523 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 1524 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 1525 encode_embedded_bp_compressed(bp, 1526 cbuf, compress, lsize, psize); 1527 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 1528 BP_SET_TYPE(bp, zio->io_prop.zp_type); 1529 BP_SET_LEVEL(bp, zio->io_prop.zp_level); 1530 zio_buf_free(cbuf, lsize); 1531 bp->blk_birth = zio->io_txg; 1532 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1533 ASSERT(spa_feature_is_active(spa, 1534 SPA_FEATURE_EMBEDDED_DATA)); 1535 return (ZIO_PIPELINE_CONTINUE); 1536 } else { 1537 /* 1538 * Round up compressed size up to the ashift 1539 * of the smallest-ashift device, and zero the tail. 1540 * This ensures that the compressed size of the BP 1541 * (and thus compressratio property) are correct, 1542 * in that we charge for the padding used to fill out 1543 * the last sector. 1544 */ 1545 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 1546 size_t rounded = (size_t)P2ROUNDUP(psize, 1547 1ULL << spa->spa_min_ashift); 1548 if (rounded >= lsize) { 1549 compress = ZIO_COMPRESS_OFF; 1550 zio_buf_free(cbuf, lsize); 1551 psize = lsize; 1552 } else { 1553 abd_t *cdata = abd_get_from_buf(cbuf, lsize); 1554 abd_take_ownership_of_buf(cdata, B_TRUE); 1555 abd_zero_off(cdata, psize, rounded - psize); 1556 psize = rounded; 1557 zio_push_transform(zio, cdata, 1558 psize, lsize, NULL); 1559 } 1560 } 1561 1562 /* 1563 * We were unable to handle this as an override bp, treat 1564 * it as a regular write I/O. 1565 */ 1566 zio->io_bp_override = NULL; 1567 *bp = zio->io_bp_orig; 1568 zio->io_pipeline = zio->io_orig_pipeline; 1569 1570 } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 && 1571 zp->zp_type == DMU_OT_DNODE) { 1572 /* 1573 * The DMU actually relies on the zio layer's compression 1574 * to free metadnode blocks that have had all contained 1575 * dnodes freed. As a result, even when doing a raw 1576 * receive, we must check whether the block can be compressed 1577 * to a hole. 1578 */ 1579 psize = zio_compress_data(ZIO_COMPRESS_EMPTY, 1580 zio->io_abd, NULL, lsize); 1581 if (psize == 0) 1582 compress = ZIO_COMPRESS_OFF; 1583 } else { 1584 ASSERT3U(psize, !=, 0); 1585 } 1586 1587 /* 1588 * The final pass of spa_sync() must be all rewrites, but the first 1589 * few passes offer a trade-off: allocating blocks defers convergence, 1590 * but newly allocated blocks are sequential, so they can be written 1591 * to disk faster. Therefore, we allow the first few passes of 1592 * spa_sync() to allocate new blocks, but force rewrites after that. 1593 * There should only be a handful of blocks after pass 1 in any case. 1594 */ 1595 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && 1596 BP_GET_PSIZE(bp) == psize && 1597 pass >= zfs_sync_pass_rewrite) { 1598 VERIFY3U(psize, !=, 0); 1599 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1600 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1601 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1602 } else { 1603 BP_ZERO(bp); 1604 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1605 } 1606 1607 if (psize == 0) { 1608 if (zio->io_bp_orig.blk_birth != 0 && 1609 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 1610 BP_SET_LSIZE(bp, lsize); 1611 BP_SET_TYPE(bp, zp->zp_type); 1612 BP_SET_LEVEL(bp, zp->zp_level); 1613 BP_SET_BIRTH(bp, zio->io_txg, 0); 1614 } 1615 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1616 } else { 1617 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1618 BP_SET_LSIZE(bp, lsize); 1619 BP_SET_TYPE(bp, zp->zp_type); 1620 BP_SET_LEVEL(bp, zp->zp_level); 1621 BP_SET_PSIZE(bp, psize); 1622 BP_SET_COMPRESS(bp, compress); 1623 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1624 BP_SET_DEDUP(bp, zp->zp_dedup); 1625 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1626 if (zp->zp_dedup) { 1627 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1628 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1629 ASSERT(!zp->zp_encrypt || 1630 DMU_OT_IS_ENCRYPTED(zp->zp_type)); 1631 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1632 } 1633 if (zp->zp_nopwrite) { 1634 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1635 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1636 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 1637 } 1638 } 1639 return (ZIO_PIPELINE_CONTINUE); 1640 } 1641 1642 static int 1643 zio_free_bp_init(zio_t *zio) 1644 { 1645 blkptr_t *bp = zio->io_bp; 1646 1647 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1648 if (BP_GET_DEDUP(bp)) 1649 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1650 } 1651 1652 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1653 1654 return (ZIO_PIPELINE_CONTINUE); 1655 } 1656 1657 /* 1658 * ========================================================================== 1659 * Execute the I/O pipeline 1660 * ========================================================================== 1661 */ 1662 1663 static void 1664 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 1665 { 1666 spa_t *spa = zio->io_spa; 1667 zio_type_t t = zio->io_type; 1668 int flags = (cutinline ? TQ_FRONT : 0); 1669 1670 /* 1671 * If we're a config writer or a probe, the normal issue and 1672 * interrupt threads may all be blocked waiting for the config lock. 1673 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1674 */ 1675 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1676 t = ZIO_TYPE_NULL; 1677 1678 /* 1679 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1680 */ 1681 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1682 t = ZIO_TYPE_NULL; 1683 1684 /* 1685 * If this is a high priority I/O, then use the high priority taskq if 1686 * available. 1687 */ 1688 if ((zio->io_priority == ZIO_PRIORITY_NOW || 1689 zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) && 1690 spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 1691 q++; 1692 1693 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1694 1695 /* 1696 * NB: We are assuming that the zio can only be dispatched 1697 * to a single taskq at a time. It would be a grievous error 1698 * to dispatch the zio to another taskq at the same time. 1699 */ 1700 ASSERT(zio->io_tqent.tqent_next == NULL); 1701 spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, 1702 flags, &zio->io_tqent); 1703 } 1704 1705 static boolean_t 1706 zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 1707 { 1708 kthread_t *executor = zio->io_executor; 1709 spa_t *spa = zio->io_spa; 1710 1711 for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 1712 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1713 uint_t i; 1714 for (i = 0; i < tqs->stqs_count; i++) { 1715 if (taskq_member(tqs->stqs_taskq[i], executor)) 1716 return (B_TRUE); 1717 } 1718 } 1719 1720 return (B_FALSE); 1721 } 1722 1723 static int 1724 zio_issue_async(zio_t *zio) 1725 { 1726 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1727 1728 return (ZIO_PIPELINE_STOP); 1729 } 1730 1731 void 1732 zio_interrupt(zio_t *zio) 1733 { 1734 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1735 } 1736 1737 void 1738 zio_delay_interrupt(zio_t *zio) 1739 { 1740 /* 1741 * The timeout_generic() function isn't defined in userspace, so 1742 * rather than trying to implement the function, the zio delay 1743 * functionality has been disabled for userspace builds. 1744 */ 1745 1746 #ifdef _KERNEL 1747 /* 1748 * If io_target_timestamp is zero, then no delay has been registered 1749 * for this IO, thus jump to the end of this function and "skip" the 1750 * delay; issuing it directly to the zio layer. 1751 */ 1752 if (zio->io_target_timestamp != 0) { 1753 hrtime_t now = gethrtime(); 1754 1755 if (now >= zio->io_target_timestamp) { 1756 /* 1757 * This IO has already taken longer than the target 1758 * delay to complete, so we don't want to delay it 1759 * any longer; we "miss" the delay and issue it 1760 * directly to the zio layer. This is likely due to 1761 * the target latency being set to a value less than 1762 * the underlying hardware can satisfy (e.g. delay 1763 * set to 1ms, but the disks take 10ms to complete an 1764 * IO request). 1765 */ 1766 1767 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio, 1768 hrtime_t, now); 1769 1770 zio_interrupt(zio); 1771 } else { 1772 hrtime_t diff = zio->io_target_timestamp - now; 1773 1774 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio, 1775 hrtime_t, now, hrtime_t, diff); 1776 1777 (void) timeout_generic(CALLOUT_NORMAL, 1778 (void (*)(void *))zio_interrupt, zio, diff, 1, 0); 1779 } 1780 1781 return; 1782 } 1783 #endif 1784 1785 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio); 1786 zio_interrupt(zio); 1787 } 1788 1789 /* 1790 * Execute the I/O pipeline until one of the following occurs: 1791 * 1792 * (1) the I/O completes 1793 * (2) the pipeline stalls waiting for dependent child I/Os 1794 * (3) the I/O issues, so we're waiting for an I/O completion interrupt 1795 * (4) the I/O is delegated by vdev-level caching or aggregation 1796 * (5) the I/O is deferred due to vdev-level queueing 1797 * (6) the I/O is handed off to another thread. 1798 * 1799 * In all cases, the pipeline stops whenever there's no CPU work; it never 1800 * burns a thread in cv_wait(). 1801 * 1802 * There's no locking on io_stage because there's no legitimate way 1803 * for multiple threads to be attempting to process the same I/O. 1804 */ 1805 static zio_pipe_stage_t *zio_pipeline[]; 1806 1807 void 1808 zio_execute(zio_t *zio) 1809 { 1810 zio->io_executor = curthread; 1811 1812 ASSERT3U(zio->io_queued_timestamp, >, 0); 1813 1814 while (zio->io_stage < ZIO_STAGE_DONE) { 1815 enum zio_stage pipeline = zio->io_pipeline; 1816 enum zio_stage stage = zio->io_stage; 1817 int rv; 1818 1819 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1820 ASSERT(ISP2(stage)); 1821 ASSERT(zio->io_stall == NULL); 1822 1823 do { 1824 stage <<= 1; 1825 } while ((stage & pipeline) == 0); 1826 1827 ASSERT(stage <= ZIO_STAGE_DONE); 1828 1829 /* 1830 * If we are in interrupt context and this pipeline stage 1831 * will grab a config lock that is held across I/O, 1832 * or may wait for an I/O that needs an interrupt thread 1833 * to complete, issue async to avoid deadlock. 1834 * 1835 * For VDEV_IO_START, we cut in line so that the io will 1836 * be sent to disk promptly. 1837 */ 1838 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1839 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1840 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 1841 zio_requeue_io_start_cut_in_line : B_FALSE; 1842 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1843 return; 1844 } 1845 1846 zio->io_stage = stage; 1847 zio->io_pipeline_trace |= zio->io_stage; 1848 rv = zio_pipeline[highbit64(stage) - 1](zio); 1849 1850 if (rv == ZIO_PIPELINE_STOP) 1851 return; 1852 1853 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1854 } 1855 } 1856 1857 /* 1858 * ========================================================================== 1859 * Initiate I/O, either sync or async 1860 * ========================================================================== 1861 */ 1862 int 1863 zio_wait(zio_t *zio) 1864 { 1865 int error; 1866 1867 ASSERT3P(zio->io_stage, ==, ZIO_STAGE_OPEN); 1868 ASSERT3P(zio->io_executor, ==, NULL); 1869 1870 zio->io_waiter = curthread; 1871 ASSERT0(zio->io_queued_timestamp); 1872 zio->io_queued_timestamp = gethrtime(); 1873 1874 zio_execute(zio); 1875 1876 mutex_enter(&zio->io_lock); 1877 while (zio->io_executor != NULL) 1878 cv_wait(&zio->io_cv, &zio->io_lock); 1879 mutex_exit(&zio->io_lock); 1880 1881 error = zio->io_error; 1882 zio_destroy(zio); 1883 1884 return (error); 1885 } 1886 1887 void 1888 zio_nowait(zio_t *zio) 1889 { 1890 ASSERT3P(zio->io_executor, ==, NULL); 1891 1892 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1893 zio_unique_parent(zio) == NULL) { 1894 /* 1895 * This is a logical async I/O with no parent to wait for it. 1896 * We add it to the spa_async_root_zio "Godfather" I/O which 1897 * will ensure they complete prior to unloading the pool. 1898 */ 1899 spa_t *spa = zio->io_spa; 1900 1901 zio_add_child(spa->spa_async_zio_root[CPU_SEQID], zio); 1902 } 1903 1904 ASSERT0(zio->io_queued_timestamp); 1905 zio->io_queued_timestamp = gethrtime(); 1906 zio_execute(zio); 1907 } 1908 1909 /* 1910 * ========================================================================== 1911 * Reexecute, cancel, or suspend/resume failed I/O 1912 * ========================================================================== 1913 */ 1914 1915 static void 1916 zio_reexecute(zio_t *pio) 1917 { 1918 zio_t *cio, *cio_next; 1919 1920 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1921 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1922 ASSERT(pio->io_gang_leader == NULL); 1923 ASSERT(pio->io_gang_tree == NULL); 1924 1925 pio->io_flags = pio->io_orig_flags; 1926 pio->io_stage = pio->io_orig_stage; 1927 pio->io_pipeline = pio->io_orig_pipeline; 1928 pio->io_reexecute = 0; 1929 pio->io_flags |= ZIO_FLAG_REEXECUTED; 1930 pio->io_pipeline_trace = 0; 1931 pio->io_error = 0; 1932 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1933 pio->io_state[w] = 0; 1934 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1935 pio->io_child_error[c] = 0; 1936 1937 if (IO_IS_ALLOCATING(pio)) 1938 BP_ZERO(pio->io_bp); 1939 1940 /* 1941 * As we reexecute pio's children, new children could be created. 1942 * New children go to the head of pio's io_child_list, however, 1943 * so we will (correctly) not reexecute them. The key is that 1944 * the remainder of pio's io_child_list, from 'cio_next' onward, 1945 * cannot be affected by any side effects of reexecuting 'cio'. 1946 */ 1947 zio_link_t *zl = NULL; 1948 mutex_enter(&pio->io_lock); 1949 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 1950 cio_next = zio_walk_children(pio, &zl); 1951 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1952 pio->io_children[cio->io_child_type][w]++; 1953 mutex_exit(&pio->io_lock); 1954 zio_reexecute(cio); 1955 mutex_enter(&pio->io_lock); 1956 } 1957 mutex_exit(&pio->io_lock); 1958 1959 /* 1960 * Now that all children have been reexecuted, execute the parent. 1961 * We don't reexecute "The Godfather" I/O here as it's the 1962 * responsibility of the caller to wait on it. 1963 */ 1964 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) { 1965 pio->io_queued_timestamp = gethrtime(); 1966 zio_execute(pio); 1967 } 1968 } 1969 1970 void 1971 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason) 1972 { 1973 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1974 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1975 "failure and the failure mode property for this pool " 1976 "is set to panic.", spa_name(spa)); 1977 1978 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, 1979 NULL, NULL, 0, 0); 1980 1981 mutex_enter(&spa->spa_suspend_lock); 1982 1983 if (spa->spa_suspend_zio_root == NULL) 1984 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1985 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1986 ZIO_FLAG_GODFATHER); 1987 1988 spa->spa_suspended = reason; 1989 1990 if (zio != NULL) { 1991 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1992 ASSERT(zio != spa->spa_suspend_zio_root); 1993 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1994 ASSERT(zio_unique_parent(zio) == NULL); 1995 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1996 zio_add_child(spa->spa_suspend_zio_root, zio); 1997 } 1998 1999 mutex_exit(&spa->spa_suspend_lock); 2000 } 2001 2002 int 2003 zio_resume(spa_t *spa) 2004 { 2005 zio_t *pio; 2006 2007 /* 2008 * Reexecute all previously suspended i/o. 2009 */ 2010 mutex_enter(&spa->spa_suspend_lock); 2011 spa->spa_suspended = ZIO_SUSPEND_NONE; 2012 cv_broadcast(&spa->spa_suspend_cv); 2013 pio = spa->spa_suspend_zio_root; 2014 spa->spa_suspend_zio_root = NULL; 2015 mutex_exit(&spa->spa_suspend_lock); 2016 2017 if (pio == NULL) 2018 return (0); 2019 2020 zio_reexecute(pio); 2021 return (zio_wait(pio)); 2022 } 2023 2024 void 2025 zio_resume_wait(spa_t *spa) 2026 { 2027 mutex_enter(&spa->spa_suspend_lock); 2028 while (spa_suspended(spa)) 2029 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 2030 mutex_exit(&spa->spa_suspend_lock); 2031 } 2032 2033 /* 2034 * ========================================================================== 2035 * Gang blocks. 2036 * 2037 * A gang block is a collection of small blocks that looks to the DMU 2038 * like one large block. When zio_dva_allocate() cannot find a block 2039 * of the requested size, due to either severe fragmentation or the pool 2040 * being nearly full, it calls zio_write_gang_block() to construct the 2041 * block from smaller fragments. 2042 * 2043 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 2044 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 2045 * an indirect block: it's an array of block pointers. It consumes 2046 * only one sector and hence is allocatable regardless of fragmentation. 2047 * The gang header's bps point to its gang members, which hold the data. 2048 * 2049 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 2050 * as the verifier to ensure uniqueness of the SHA256 checksum. 2051 * Critically, the gang block bp's blk_cksum is the checksum of the data, 2052 * not the gang header. This ensures that data block signatures (needed for 2053 * deduplication) are independent of how the block is physically stored. 2054 * 2055 * Gang blocks can be nested: a gang member may itself be a gang block. 2056 * Thus every gang block is a tree in which root and all interior nodes are 2057 * gang headers, and the leaves are normal blocks that contain user data. 2058 * The root of the gang tree is called the gang leader. 2059 * 2060 * To perform any operation (read, rewrite, free, claim) on a gang block, 2061 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 2062 * in the io_gang_tree field of the original logical i/o by recursively 2063 * reading the gang leader and all gang headers below it. This yields 2064 * an in-core tree containing the contents of every gang header and the 2065 * bps for every constituent of the gang block. 2066 * 2067 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 2068 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 2069 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 2070 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 2071 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 2072 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 2073 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 2074 * of the gang header plus zio_checksum_compute() of the data to update the 2075 * gang header's blk_cksum as described above. 2076 * 2077 * The two-phase assemble/issue model solves the problem of partial failure -- 2078 * what if you'd freed part of a gang block but then couldn't read the 2079 * gang header for another part? Assembling the entire gang tree first 2080 * ensures that all the necessary gang header I/O has succeeded before 2081 * starting the actual work of free, claim, or write. Once the gang tree 2082 * is assembled, free and claim are in-memory operations that cannot fail. 2083 * 2084 * In the event that a gang write fails, zio_dva_unallocate() walks the 2085 * gang tree to immediately free (i.e. insert back into the space map) 2086 * everything we've allocated. This ensures that we don't get ENOSPC 2087 * errors during repeated suspend/resume cycles due to a flaky device. 2088 * 2089 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 2090 * the gang tree, we won't modify the block, so we can safely defer the free 2091 * (knowing that the block is still intact). If we *can* assemble the gang 2092 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 2093 * each constituent bp and we can allocate a new block on the next sync pass. 2094 * 2095 * In all cases, the gang tree allows complete recovery from partial failure. 2096 * ========================================================================== 2097 */ 2098 2099 static void 2100 zio_gang_issue_func_done(zio_t *zio) 2101 { 2102 abd_put(zio->io_abd); 2103 } 2104 2105 static zio_t * 2106 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2107 uint64_t offset) 2108 { 2109 if (gn != NULL) 2110 return (pio); 2111 2112 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset), 2113 BP_GET_PSIZE(bp), zio_gang_issue_func_done, 2114 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2115 &pio->io_bookmark)); 2116 } 2117 2118 static zio_t * 2119 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2120 uint64_t offset) 2121 { 2122 zio_t *zio; 2123 2124 if (gn != NULL) { 2125 abd_t *gbh_abd = 2126 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2127 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2128 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL, 2129 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2130 &pio->io_bookmark); 2131 /* 2132 * As we rewrite each gang header, the pipeline will compute 2133 * a new gang block header checksum for it; but no one will 2134 * compute a new data checksum, so we do that here. The one 2135 * exception is the gang leader: the pipeline already computed 2136 * its data checksum because that stage precedes gang assembly. 2137 * (Presently, nothing actually uses interior data checksums; 2138 * this is just good hygiene.) 2139 */ 2140 if (gn != pio->io_gang_leader->io_gang_tree) { 2141 abd_t *buf = abd_get_offset(data, offset); 2142 2143 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 2144 buf, BP_GET_PSIZE(bp)); 2145 2146 abd_put(buf); 2147 } 2148 /* 2149 * If we are here to damage data for testing purposes, 2150 * leave the GBH alone so that we can detect the damage. 2151 */ 2152 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 2153 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 2154 } else { 2155 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2156 abd_get_offset(data, offset), BP_GET_PSIZE(bp), 2157 zio_gang_issue_func_done, NULL, pio->io_priority, 2158 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2159 } 2160 2161 return (zio); 2162 } 2163 2164 /* ARGSUSED */ 2165 static zio_t * 2166 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2167 uint64_t offset) 2168 { 2169 return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 2170 ZIO_GANG_CHILD_FLAGS(pio))); 2171 } 2172 2173 /* ARGSUSED */ 2174 static zio_t * 2175 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2176 uint64_t offset) 2177 { 2178 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 2179 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 2180 } 2181 2182 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 2183 NULL, 2184 zio_read_gang, 2185 zio_rewrite_gang, 2186 zio_free_gang, 2187 zio_claim_gang, 2188 NULL 2189 }; 2190 2191 static void zio_gang_tree_assemble_done(zio_t *zio); 2192 2193 static zio_gang_node_t * 2194 zio_gang_node_alloc(zio_gang_node_t **gnpp) 2195 { 2196 zio_gang_node_t *gn; 2197 2198 ASSERT(*gnpp == NULL); 2199 2200 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 2201 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 2202 *gnpp = gn; 2203 2204 return (gn); 2205 } 2206 2207 static void 2208 zio_gang_node_free(zio_gang_node_t **gnpp) 2209 { 2210 zio_gang_node_t *gn = *gnpp; 2211 2212 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2213 ASSERT(gn->gn_child[g] == NULL); 2214 2215 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2216 kmem_free(gn, sizeof (*gn)); 2217 *gnpp = NULL; 2218 } 2219 2220 static void 2221 zio_gang_tree_free(zio_gang_node_t **gnpp) 2222 { 2223 zio_gang_node_t *gn = *gnpp; 2224 2225 if (gn == NULL) 2226 return; 2227 2228 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2229 zio_gang_tree_free(&gn->gn_child[g]); 2230 2231 zio_gang_node_free(gnpp); 2232 } 2233 2234 static void 2235 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 2236 { 2237 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 2238 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2239 2240 ASSERT(gio->io_gang_leader == gio); 2241 ASSERT(BP_IS_GANG(bp)); 2242 2243 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2244 zio_gang_tree_assemble_done, gn, gio->io_priority, 2245 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 2246 } 2247 2248 static void 2249 zio_gang_tree_assemble_done(zio_t *zio) 2250 { 2251 zio_t *gio = zio->io_gang_leader; 2252 zio_gang_node_t *gn = zio->io_private; 2253 blkptr_t *bp = zio->io_bp; 2254 2255 ASSERT(gio == zio_unique_parent(zio)); 2256 ASSERT(zio->io_child_count == 0); 2257 2258 if (zio->io_error) 2259 return; 2260 2261 /* this ABD was created from a linear buf in zio_gang_tree_assemble */ 2262 if (BP_SHOULD_BYTESWAP(bp)) 2263 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size); 2264 2265 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh); 2266 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 2267 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2268 2269 abd_put(zio->io_abd); 2270 2271 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2272 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2273 if (!BP_IS_GANG(gbp)) 2274 continue; 2275 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 2276 } 2277 } 2278 2279 static void 2280 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data, 2281 uint64_t offset) 2282 { 2283 zio_t *gio = pio->io_gang_leader; 2284 zio_t *zio; 2285 2286 ASSERT(BP_IS_GANG(bp) == !!gn); 2287 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 2288 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 2289 2290 /* 2291 * If you're a gang header, your data is in gn->gn_gbh. 2292 * If you're a gang member, your data is in 'data' and gn == NULL. 2293 */ 2294 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset); 2295 2296 if (gn != NULL) { 2297 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2298 2299 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2300 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2301 if (BP_IS_HOLE(gbp)) 2302 continue; 2303 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data, 2304 offset); 2305 offset += BP_GET_PSIZE(gbp); 2306 } 2307 } 2308 2309 if (gn == gio->io_gang_tree) 2310 ASSERT3U(gio->io_size, ==, offset); 2311 2312 if (zio != pio) 2313 zio_nowait(zio); 2314 } 2315 2316 static int 2317 zio_gang_assemble(zio_t *zio) 2318 { 2319 blkptr_t *bp = zio->io_bp; 2320 2321 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 2322 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2323 2324 zio->io_gang_leader = zio; 2325 2326 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 2327 2328 return (ZIO_PIPELINE_CONTINUE); 2329 } 2330 2331 static int 2332 zio_gang_issue(zio_t *zio) 2333 { 2334 blkptr_t *bp = zio->io_bp; 2335 2336 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) { 2337 return (ZIO_PIPELINE_STOP); 2338 } 2339 2340 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 2341 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2342 2343 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 2344 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd, 2345 0); 2346 else 2347 zio_gang_tree_free(&zio->io_gang_tree); 2348 2349 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2350 2351 return (ZIO_PIPELINE_CONTINUE); 2352 } 2353 2354 static void 2355 zio_write_gang_member_ready(zio_t *zio) 2356 { 2357 zio_t *pio = zio_unique_parent(zio); 2358 zio_t *gio = zio->io_gang_leader; 2359 dva_t *cdva = zio->io_bp->blk_dva; 2360 dva_t *pdva = pio->io_bp->blk_dva; 2361 uint64_t asize; 2362 2363 if (BP_IS_HOLE(zio->io_bp)) 2364 return; 2365 2366 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 2367 2368 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 2369 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 2370 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 2371 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 2372 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 2373 2374 mutex_enter(&pio->io_lock); 2375 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 2376 ASSERT(DVA_GET_GANG(&pdva[d])); 2377 asize = DVA_GET_ASIZE(&pdva[d]); 2378 asize += DVA_GET_ASIZE(&cdva[d]); 2379 DVA_SET_ASIZE(&pdva[d], asize); 2380 } 2381 mutex_exit(&pio->io_lock); 2382 } 2383 2384 static void 2385 zio_write_gang_done(zio_t *zio) 2386 { 2387 /* 2388 * The io_abd field will be NULL for a zio with no data. The io_flags 2389 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't 2390 * check for it here as it is cleared in zio_ready. 2391 */ 2392 if (zio->io_abd != NULL) 2393 abd_put(zio->io_abd); 2394 } 2395 2396 static int 2397 zio_write_gang_block(zio_t *pio) 2398 { 2399 spa_t *spa = pio->io_spa; 2400 metaslab_class_t *mc = spa_normal_class(spa); 2401 blkptr_t *bp = pio->io_bp; 2402 zio_t *gio = pio->io_gang_leader; 2403 zio_t *zio; 2404 zio_gang_node_t *gn, **gnpp; 2405 zio_gbh_phys_t *gbh; 2406 abd_t *gbh_abd; 2407 uint64_t txg = pio->io_txg; 2408 uint64_t resid = pio->io_size; 2409 uint64_t lsize; 2410 int copies = gio->io_prop.zp_copies; 2411 int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 2412 zio_prop_t zp; 2413 int error; 2414 boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA); 2415 2416 /* 2417 * encrypted blocks need DVA[2] free so encrypted gang headers can't 2418 * have a third copy. 2419 */ 2420 if (gio->io_prop.zp_encrypt && gbh_copies >= SPA_DVAS_PER_BP) 2421 gbh_copies = SPA_DVAS_PER_BP - 1; 2422 2423 int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER; 2424 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2425 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2426 ASSERT(has_data); 2427 2428 flags |= METASLAB_ASYNC_ALLOC; 2429 VERIFY(zfs_refcount_held(&mc->mc_alloc_slots[pio->io_allocator], 2430 pio)); 2431 2432 /* 2433 * The logical zio has already placed a reservation for 2434 * 'copies' allocation slots but gang blocks may require 2435 * additional copies. These additional copies 2436 * (i.e. gbh_copies - copies) are guaranteed to succeed 2437 * since metaslab_class_throttle_reserve() always allows 2438 * additional reservations for gang blocks. 2439 */ 2440 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies, 2441 pio->io_allocator, pio, flags)); 2442 } 2443 2444 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, 2445 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, 2446 &pio->io_alloc_list, pio, pio->io_allocator); 2447 if (error) { 2448 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2449 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2450 ASSERT(has_data); 2451 2452 /* 2453 * If we failed to allocate the gang block header then 2454 * we remove any additional allocation reservations that 2455 * we placed here. The original reservation will 2456 * be removed when the logical I/O goes to the ready 2457 * stage. 2458 */ 2459 metaslab_class_throttle_unreserve(mc, 2460 gbh_copies - copies, pio->io_allocator, pio); 2461 } 2462 pio->io_error = error; 2463 return (ZIO_PIPELINE_CONTINUE); 2464 } 2465 2466 if (pio == gio) { 2467 gnpp = &gio->io_gang_tree; 2468 } else { 2469 gnpp = pio->io_private; 2470 ASSERT(pio->io_ready == zio_write_gang_member_ready); 2471 } 2472 2473 gn = zio_gang_node_alloc(gnpp); 2474 gbh = gn->gn_gbh; 2475 bzero(gbh, SPA_GANGBLOCKSIZE); 2476 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE); 2477 2478 /* 2479 * Create the gang header. 2480 */ 2481 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2482 zio_write_gang_done, NULL, pio->io_priority, 2483 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2484 2485 /* 2486 * Create and nowait the gang children. 2487 */ 2488 for (int g = 0; resid != 0; resid -= lsize, g++) { 2489 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 2490 SPA_MINBLOCKSIZE); 2491 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 2492 2493 zp.zp_checksum = gio->io_prop.zp_checksum; 2494 zp.zp_compress = ZIO_COMPRESS_OFF; 2495 zp.zp_type = DMU_OT_NONE; 2496 zp.zp_level = 0; 2497 zp.zp_copies = gio->io_prop.zp_copies; 2498 zp.zp_dedup = B_FALSE; 2499 zp.zp_dedup_verify = B_FALSE; 2500 zp.zp_nopwrite = B_FALSE; 2501 zp.zp_encrypt = gio->io_prop.zp_encrypt; 2502 zp.zp_byteorder = gio->io_prop.zp_byteorder; 2503 bzero(zp.zp_salt, ZIO_DATA_SALT_LEN); 2504 bzero(zp.zp_iv, ZIO_DATA_IV_LEN); 2505 bzero(zp.zp_mac, ZIO_DATA_MAC_LEN); 2506 2507 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 2508 has_data ? abd_get_offset(pio->io_abd, pio->io_size - 2509 resid) : NULL, lsize, lsize, &zp, 2510 zio_write_gang_member_ready, NULL, NULL, 2511 zio_write_gang_done, &gn->gn_child[g], pio->io_priority, 2512 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2513 2514 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2515 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2516 ASSERT(has_data); 2517 2518 /* 2519 * Gang children won't throttle but we should 2520 * account for their work, so reserve an allocation 2521 * slot for them here. 2522 */ 2523 VERIFY(metaslab_class_throttle_reserve(mc, 2524 zp.zp_copies, cio->io_allocator, cio, flags)); 2525 } 2526 zio_nowait(cio); 2527 } 2528 2529 /* 2530 * Set pio's pipeline to just wait for zio to finish. 2531 */ 2532 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2533 2534 zio_nowait(zio); 2535 2536 return (ZIO_PIPELINE_CONTINUE); 2537 } 2538 2539 /* 2540 * The zio_nop_write stage in the pipeline determines if allocating a 2541 * new bp is necessary. The nopwrite feature can handle writes in 2542 * either syncing or open context (i.e. zil writes) and as a result is 2543 * mutually exclusive with dedup. 2544 * 2545 * By leveraging a cryptographically secure checksum, such as SHA256, we 2546 * can compare the checksums of the new data and the old to determine if 2547 * allocating a new block is required. Note that our requirements for 2548 * cryptographic strength are fairly weak: there can't be any accidental 2549 * hash collisions, but we don't need to be secure against intentional 2550 * (malicious) collisions. To trigger a nopwrite, you have to be able 2551 * to write the file to begin with, and triggering an incorrect (hash 2552 * collision) nopwrite is no worse than simply writing to the file. 2553 * That said, there are no known attacks against the checksum algorithms 2554 * used for nopwrite, assuming that the salt and the checksums 2555 * themselves remain secret. 2556 */ 2557 static int 2558 zio_nop_write(zio_t *zio) 2559 { 2560 blkptr_t *bp = zio->io_bp; 2561 blkptr_t *bp_orig = &zio->io_bp_orig; 2562 zio_prop_t *zp = &zio->io_prop; 2563 2564 ASSERT(BP_GET_LEVEL(bp) == 0); 2565 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 2566 ASSERT(zp->zp_nopwrite); 2567 ASSERT(!zp->zp_dedup); 2568 ASSERT(zio->io_bp_override == NULL); 2569 ASSERT(IO_IS_ALLOCATING(zio)); 2570 2571 /* 2572 * Check to see if the original bp and the new bp have matching 2573 * characteristics (i.e. same checksum, compression algorithms, etc). 2574 * If they don't then just continue with the pipeline which will 2575 * allocate a new bp. 2576 */ 2577 if (BP_IS_HOLE(bp_orig) || 2578 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags & 2579 ZCHECKSUM_FLAG_NOPWRITE) || 2580 BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) || 2581 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 2582 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 2583 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 2584 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 2585 return (ZIO_PIPELINE_CONTINUE); 2586 2587 /* 2588 * If the checksums match then reset the pipeline so that we 2589 * avoid allocating a new bp and issuing any I/O. 2590 */ 2591 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 2592 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags & 2593 ZCHECKSUM_FLAG_NOPWRITE); 2594 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 2595 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 2596 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 2597 ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, 2598 sizeof (uint64_t)) == 0); 2599 2600 *bp = *bp_orig; 2601 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2602 zio->io_flags |= ZIO_FLAG_NOPWRITE; 2603 } 2604 2605 return (ZIO_PIPELINE_CONTINUE); 2606 } 2607 2608 /* 2609 * ========================================================================== 2610 * Dedup 2611 * ========================================================================== 2612 */ 2613 static void 2614 zio_ddt_child_read_done(zio_t *zio) 2615 { 2616 blkptr_t *bp = zio->io_bp; 2617 ddt_entry_t *dde = zio->io_private; 2618 ddt_phys_t *ddp; 2619 zio_t *pio = zio_unique_parent(zio); 2620 2621 mutex_enter(&pio->io_lock); 2622 ddp = ddt_phys_select(dde, bp); 2623 if (zio->io_error == 0) 2624 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 2625 2626 if (zio->io_error == 0 && dde->dde_repair_abd == NULL) 2627 dde->dde_repair_abd = zio->io_abd; 2628 else 2629 abd_free(zio->io_abd); 2630 mutex_exit(&pio->io_lock); 2631 } 2632 2633 static int 2634 zio_ddt_read_start(zio_t *zio) 2635 { 2636 blkptr_t *bp = zio->io_bp; 2637 2638 ASSERT(BP_GET_DEDUP(bp)); 2639 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2640 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2641 2642 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2643 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2644 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 2645 ddt_phys_t *ddp = dde->dde_phys; 2646 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 2647 blkptr_t blk; 2648 2649 ASSERT(zio->io_vsd == NULL); 2650 zio->io_vsd = dde; 2651 2652 if (ddp_self == NULL) 2653 return (ZIO_PIPELINE_CONTINUE); 2654 2655 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2656 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 2657 continue; 2658 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 2659 &blk); 2660 zio_nowait(zio_read(zio, zio->io_spa, &blk, 2661 abd_alloc_for_io(zio->io_size, B_TRUE), 2662 zio->io_size, zio_ddt_child_read_done, dde, 2663 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) | 2664 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark)); 2665 } 2666 return (ZIO_PIPELINE_CONTINUE); 2667 } 2668 2669 zio_nowait(zio_read(zio, zio->io_spa, bp, 2670 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority, 2671 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 2672 2673 return (ZIO_PIPELINE_CONTINUE); 2674 } 2675 2676 static int 2677 zio_ddt_read_done(zio_t *zio) 2678 { 2679 blkptr_t *bp = zio->io_bp; 2680 2681 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) { 2682 return (ZIO_PIPELINE_STOP); 2683 } 2684 2685 ASSERT(BP_GET_DEDUP(bp)); 2686 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2687 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2688 2689 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2690 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2691 ddt_entry_t *dde = zio->io_vsd; 2692 if (ddt == NULL) { 2693 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 2694 return (ZIO_PIPELINE_CONTINUE); 2695 } 2696 if (dde == NULL) { 2697 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 2698 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2699 return (ZIO_PIPELINE_STOP); 2700 } 2701 if (dde->dde_repair_abd != NULL) { 2702 abd_copy(zio->io_abd, dde->dde_repair_abd, 2703 zio->io_size); 2704 zio->io_child_error[ZIO_CHILD_DDT] = 0; 2705 } 2706 ddt_repair_done(ddt, dde); 2707 zio->io_vsd = NULL; 2708 } 2709 2710 ASSERT(zio->io_vsd == NULL); 2711 2712 return (ZIO_PIPELINE_CONTINUE); 2713 } 2714 2715 static boolean_t 2716 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 2717 { 2718 spa_t *spa = zio->io_spa; 2719 boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW); 2720 2721 /* We should never get a raw, override zio */ 2722 ASSERT(!(zio->io_bp_override && do_raw)); 2723 2724 /* 2725 * Note: we compare the original data, not the transformed data, 2726 * because when zio->io_bp is an override bp, we will not have 2727 * pushed the I/O transforms. That's an important optimization 2728 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 2729 * However, we should never get a raw, override zio so in these 2730 * cases we can compare the io_data directly. This is useful because 2731 * it allows us to do dedup verification even if we don't have access 2732 * to the original data (for instance, if the encryption keys aren't 2733 * loaded). 2734 */ 2735 2736 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2737 zio_t *lio = dde->dde_lead_zio[p]; 2738 2739 if (lio != NULL && do_raw) { 2740 return (lio->io_size != zio->io_size || 2741 abd_cmp(zio->io_abd, lio->io_abd, 2742 zio->io_size) != 0); 2743 } else if (lio != NULL) { 2744 return (lio->io_orig_size != zio->io_orig_size || 2745 abd_cmp(zio->io_orig_abd, lio->io_orig_abd, 2746 zio->io_orig_size) != 0); 2747 } 2748 } 2749 2750 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2751 ddt_phys_t *ddp = &dde->dde_phys[p]; 2752 2753 if (ddp->ddp_phys_birth != 0 && do_raw) { 2754 blkptr_t blk = *zio->io_bp; 2755 uint64_t psize; 2756 abd_t *tmpabd; 2757 int error; 2758 2759 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2760 psize = BP_GET_PSIZE(&blk); 2761 2762 if (psize != zio->io_size) 2763 return (B_TRUE); 2764 2765 ddt_exit(ddt); 2766 2767 tmpabd = abd_alloc_for_io(psize, B_TRUE); 2768 2769 error = zio_wait(zio_read(NULL, spa, &blk, tmpabd, 2770 psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ, 2771 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 2772 ZIO_FLAG_RAW, &zio->io_bookmark)); 2773 2774 if (error == 0) { 2775 if (abd_cmp(tmpabd, zio->io_abd, psize) != 0) 2776 error = SET_ERROR(ENOENT); 2777 } 2778 2779 abd_free(tmpabd); 2780 ddt_enter(ddt); 2781 return (error != 0); 2782 } else if (ddp->ddp_phys_birth != 0) { 2783 arc_buf_t *abuf = NULL; 2784 arc_flags_t aflags = ARC_FLAG_WAIT; 2785 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 2786 blkptr_t blk = *zio->io_bp; 2787 int error; 2788 2789 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2790 2791 if (BP_GET_LSIZE(&blk) != zio->io_orig_size) 2792 return (B_TRUE); 2793 2794 ddt_exit(ddt); 2795 2796 /* 2797 * Intuitively, it would make more sense to compare 2798 * io_abd than io_orig_abd in the raw case since you 2799 * don't want to look at any transformations that have 2800 * happened to the data. However, for raw I/Os the 2801 * data will actually be the same in io_abd and 2802 * io_orig_abd, so all we have to do is issue this as 2803 * a raw ARC read. 2804 */ 2805 if (do_raw) { 2806 zio_flags |= ZIO_FLAG_RAW; 2807 ASSERT3U(zio->io_size, ==, zio->io_orig_size); 2808 ASSERT0(abd_cmp(zio->io_abd, zio->io_orig_abd, 2809 zio->io_size)); 2810 ASSERT3P(zio->io_transform_stack, ==, NULL); 2811 } 2812 2813 error = arc_read(NULL, spa, &blk, 2814 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 2815 zio_flags, &aflags, &zio->io_bookmark); 2816 2817 if (error == 0) { 2818 if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data, 2819 zio->io_orig_size) != 0) 2820 error = SET_ERROR(ENOENT); 2821 arc_buf_destroy(abuf, &abuf); 2822 } 2823 2824 ddt_enter(ddt); 2825 return (error != 0); 2826 } 2827 } 2828 2829 return (B_FALSE); 2830 } 2831 2832 static void 2833 zio_ddt_child_write_ready(zio_t *zio) 2834 { 2835 int p = zio->io_prop.zp_copies; 2836 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2837 ddt_entry_t *dde = zio->io_private; 2838 ddt_phys_t *ddp = &dde->dde_phys[p]; 2839 zio_t *pio; 2840 2841 if (zio->io_error) 2842 return; 2843 2844 ddt_enter(ddt); 2845 2846 ASSERT(dde->dde_lead_zio[p] == zio); 2847 2848 ddt_phys_fill(ddp, zio->io_bp); 2849 2850 zio_link_t *zl = NULL; 2851 while ((pio = zio_walk_parents(zio, &zl)) != NULL) 2852 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 2853 2854 ddt_exit(ddt); 2855 } 2856 2857 static void 2858 zio_ddt_child_write_done(zio_t *zio) 2859 { 2860 int p = zio->io_prop.zp_copies; 2861 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2862 ddt_entry_t *dde = zio->io_private; 2863 ddt_phys_t *ddp = &dde->dde_phys[p]; 2864 2865 ddt_enter(ddt); 2866 2867 ASSERT(ddp->ddp_refcnt == 0); 2868 ASSERT(dde->dde_lead_zio[p] == zio); 2869 dde->dde_lead_zio[p] = NULL; 2870 2871 if (zio->io_error == 0) { 2872 zio_link_t *zl = NULL; 2873 while (zio_walk_parents(zio, &zl) != NULL) 2874 ddt_phys_addref(ddp); 2875 } else { 2876 ddt_phys_clear(ddp); 2877 } 2878 2879 ddt_exit(ddt); 2880 } 2881 2882 static void 2883 zio_ddt_ditto_write_done(zio_t *zio) 2884 { 2885 int p = DDT_PHYS_DITTO; 2886 zio_prop_t *zp = &zio->io_prop; 2887 blkptr_t *bp = zio->io_bp; 2888 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2889 ddt_entry_t *dde = zio->io_private; 2890 ddt_phys_t *ddp = &dde->dde_phys[p]; 2891 ddt_key_t *ddk = &dde->dde_key; 2892 2893 ddt_enter(ddt); 2894 2895 ASSERT(ddp->ddp_refcnt == 0); 2896 ASSERT(dde->dde_lead_zio[p] == zio); 2897 dde->dde_lead_zio[p] = NULL; 2898 2899 if (zio->io_error == 0) { 2900 ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2901 ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2902 ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2903 if (ddp->ddp_phys_birth != 0) 2904 ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2905 ddt_phys_fill(ddp, bp); 2906 } 2907 2908 ddt_exit(ddt); 2909 } 2910 2911 static int 2912 zio_ddt_write(zio_t *zio) 2913 { 2914 spa_t *spa = zio->io_spa; 2915 blkptr_t *bp = zio->io_bp; 2916 uint64_t txg = zio->io_txg; 2917 zio_prop_t *zp = &zio->io_prop; 2918 int p = zp->zp_copies; 2919 int ditto_copies; 2920 zio_t *cio = NULL; 2921 zio_t *dio = NULL; 2922 ddt_t *ddt = ddt_select(spa, bp); 2923 ddt_entry_t *dde; 2924 ddt_phys_t *ddp; 2925 2926 ASSERT(BP_GET_DEDUP(bp)); 2927 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2928 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 2929 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW))); 2930 2931 ddt_enter(ddt); 2932 dde = ddt_lookup(ddt, bp, B_TRUE); 2933 ddp = &dde->dde_phys[p]; 2934 2935 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2936 /* 2937 * If we're using a weak checksum, upgrade to a strong checksum 2938 * and try again. If we're already using a strong checksum, 2939 * we can't resolve it, so just convert to an ordinary write. 2940 * (And automatically e-mail a paper to Nature?) 2941 */ 2942 if (!(zio_checksum_table[zp->zp_checksum].ci_flags & 2943 ZCHECKSUM_FLAG_DEDUP)) { 2944 zp->zp_checksum = spa_dedup_checksum(spa); 2945 zio_pop_transforms(zio); 2946 zio->io_stage = ZIO_STAGE_OPEN; 2947 BP_ZERO(bp); 2948 } else { 2949 zp->zp_dedup = B_FALSE; 2950 BP_SET_DEDUP(bp, B_FALSE); 2951 } 2952 ASSERT(!BP_GET_DEDUP(bp)); 2953 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2954 ddt_exit(ddt); 2955 return (ZIO_PIPELINE_CONTINUE); 2956 } 2957 2958 ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2959 ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2960 2961 if (ditto_copies > ddt_ditto_copies_present(dde) && 2962 dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2963 zio_prop_t czp = *zp; 2964 2965 czp.zp_copies = ditto_copies; 2966 2967 /* 2968 * If we arrived here with an override bp, we won't have run 2969 * the transform stack, so we won't have the data we need to 2970 * generate a child i/o. So, toss the override bp and restart. 2971 * This is safe, because using the override bp is just an 2972 * optimization; and it's rare, so the cost doesn't matter. 2973 */ 2974 if (zio->io_bp_override) { 2975 zio_pop_transforms(zio); 2976 zio->io_stage = ZIO_STAGE_OPEN; 2977 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2978 zio->io_bp_override = NULL; 2979 BP_ZERO(bp); 2980 ddt_exit(ddt); 2981 return (ZIO_PIPELINE_CONTINUE); 2982 } 2983 2984 dio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 2985 zio->io_orig_size, zio->io_orig_size, &czp, NULL, NULL, 2986 NULL, zio_ddt_ditto_write_done, dde, zio->io_priority, 2987 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2988 2989 zio_push_transform(dio, zio->io_abd, zio->io_size, 0, NULL); 2990 dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2991 } 2992 2993 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2994 if (ddp->ddp_phys_birth != 0) 2995 ddt_bp_fill(ddp, bp, txg); 2996 if (dde->dde_lead_zio[p] != NULL) 2997 zio_add_child(zio, dde->dde_lead_zio[p]); 2998 else 2999 ddt_phys_addref(ddp); 3000 } else if (zio->io_bp_override) { 3001 ASSERT(bp->blk_birth == txg); 3002 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 3003 ddt_phys_fill(ddp, bp); 3004 ddt_phys_addref(ddp); 3005 } else { 3006 cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 3007 zio->io_orig_size, zio->io_orig_size, zp, 3008 zio_ddt_child_write_ready, NULL, NULL, 3009 zio_ddt_child_write_done, dde, zio->io_priority, 3010 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 3011 3012 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL); 3013 dde->dde_lead_zio[p] = cio; 3014 } 3015 3016 ddt_exit(ddt); 3017 3018 if (cio) 3019 zio_nowait(cio); 3020 if (dio) 3021 zio_nowait(dio); 3022 3023 return (ZIO_PIPELINE_CONTINUE); 3024 } 3025 3026 ddt_entry_t *freedde; /* for debugging */ 3027 3028 static int 3029 zio_ddt_free(zio_t *zio) 3030 { 3031 spa_t *spa = zio->io_spa; 3032 blkptr_t *bp = zio->io_bp; 3033 ddt_t *ddt = ddt_select(spa, bp); 3034 ddt_entry_t *dde; 3035 ddt_phys_t *ddp; 3036 3037 ASSERT(BP_GET_DEDUP(bp)); 3038 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3039 3040 ddt_enter(ddt); 3041 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 3042 ddp = ddt_phys_select(dde, bp); 3043 ddt_phys_decref(ddp); 3044 ddt_exit(ddt); 3045 3046 return (ZIO_PIPELINE_CONTINUE); 3047 } 3048 3049 /* 3050 * ========================================================================== 3051 * Allocate and free blocks 3052 * ========================================================================== 3053 */ 3054 3055 static zio_t * 3056 zio_io_to_allocate(spa_t *spa, int allocator) 3057 { 3058 zio_t *zio; 3059 3060 ASSERT(MUTEX_HELD(&spa->spa_alloc_locks[allocator])); 3061 3062 zio = avl_first(&spa->spa_alloc_trees[allocator]); 3063 if (zio == NULL) 3064 return (NULL); 3065 3066 ASSERT(IO_IS_ALLOCATING(zio)); 3067 3068 /* 3069 * Try to place a reservation for this zio. If we're unable to 3070 * reserve then we throttle. 3071 */ 3072 ASSERT3U(zio->io_allocator, ==, allocator); 3073 if (!metaslab_class_throttle_reserve(zio->io_metaslab_class, 3074 zio->io_prop.zp_copies, zio->io_allocator, zio, 0)) { 3075 return (NULL); 3076 } 3077 3078 avl_remove(&spa->spa_alloc_trees[allocator], zio); 3079 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); 3080 3081 return (zio); 3082 } 3083 3084 static int 3085 zio_dva_throttle(zio_t *zio) 3086 { 3087 spa_t *spa = zio->io_spa; 3088 zio_t *nio; 3089 metaslab_class_t *mc; 3090 3091 /* locate an appropriate allocation class */ 3092 mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type, 3093 zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk); 3094 3095 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE || 3096 !mc->mc_alloc_throttle_enabled || 3097 zio->io_child_type == ZIO_CHILD_GANG || 3098 zio->io_flags & ZIO_FLAG_NODATA) { 3099 return (ZIO_PIPELINE_CONTINUE); 3100 } 3101 3102 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 3103 3104 ASSERT3U(zio->io_queued_timestamp, >, 0); 3105 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE); 3106 3107 zbookmark_phys_t *bm = &zio->io_bookmark; 3108 /* 3109 * We want to try to use as many allocators as possible to help improve 3110 * performance, but we also want logically adjacent IOs to be physically 3111 * adjacent to improve sequential read performance. We chunk each object 3112 * into 2^20 block regions, and then hash based on the objset, object, 3113 * level, and region to accomplish both of these goals. 3114 */ 3115 zio->io_allocator = cityhash4(bm->zb_objset, bm->zb_object, 3116 bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count; 3117 mutex_enter(&spa->spa_alloc_locks[zio->io_allocator]); 3118 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3119 zio->io_metaslab_class = mc; 3120 avl_add(&spa->spa_alloc_trees[zio->io_allocator], zio); 3121 nio = zio_io_to_allocate(spa, zio->io_allocator); 3122 mutex_exit(&spa->spa_alloc_locks[zio->io_allocator]); 3123 3124 if (nio == zio) 3125 return (ZIO_PIPELINE_CONTINUE); 3126 3127 if (nio != NULL) { 3128 ASSERT(nio->io_stage == ZIO_STAGE_DVA_THROTTLE); 3129 /* 3130 * We are passing control to a new zio so make sure that 3131 * it is processed by a different thread. We do this to 3132 * avoid stack overflows that can occur when parents are 3133 * throttled and children are making progress. We allow 3134 * it to go to the head of the taskq since it's already 3135 * been waiting. 3136 */ 3137 zio_taskq_dispatch(nio, ZIO_TASKQ_ISSUE, B_TRUE); 3138 } 3139 return (ZIO_PIPELINE_STOP); 3140 } 3141 3142 static void 3143 zio_allocate_dispatch(spa_t *spa, int allocator) 3144 { 3145 zio_t *zio; 3146 3147 mutex_enter(&spa->spa_alloc_locks[allocator]); 3148 zio = zio_io_to_allocate(spa, allocator); 3149 mutex_exit(&spa->spa_alloc_locks[allocator]); 3150 if (zio == NULL) 3151 return; 3152 3153 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE); 3154 ASSERT0(zio->io_error); 3155 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE); 3156 } 3157 3158 static int 3159 zio_dva_allocate(zio_t *zio) 3160 { 3161 spa_t *spa = zio->io_spa; 3162 metaslab_class_t *mc; 3163 blkptr_t *bp = zio->io_bp; 3164 int error; 3165 int flags = 0; 3166 3167 if (zio->io_gang_leader == NULL) { 3168 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 3169 zio->io_gang_leader = zio; 3170 } 3171 3172 ASSERT(BP_IS_HOLE(bp)); 3173 ASSERT0(BP_GET_NDVAS(bp)); 3174 ASSERT3U(zio->io_prop.zp_copies, >, 0); 3175 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 3176 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 3177 3178 if (zio->io_flags & ZIO_FLAG_NODATA) 3179 flags |= METASLAB_DONT_THROTTLE; 3180 if (zio->io_flags & ZIO_FLAG_GANG_CHILD) 3181 flags |= METASLAB_GANG_CHILD; 3182 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) 3183 flags |= METASLAB_ASYNC_ALLOC; 3184 3185 /* 3186 * if not already chosen, locate an appropriate allocation class 3187 */ 3188 mc = zio->io_metaslab_class; 3189 if (mc == NULL) { 3190 mc = spa_preferred_class(spa, zio->io_size, 3191 zio->io_prop.zp_type, zio->io_prop.zp_level, 3192 zio->io_prop.zp_zpl_smallblk); 3193 zio->io_metaslab_class = mc; 3194 } 3195 3196 error = metaslab_alloc(spa, mc, zio->io_size, bp, 3197 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 3198 &zio->io_alloc_list, zio, zio->io_allocator); 3199 3200 /* 3201 * Fallback to normal class when an alloc class is full 3202 */ 3203 if (error == ENOSPC && mc != spa_normal_class(spa)) { 3204 /* 3205 * If throttling, transfer reservation over to normal class. 3206 * The io_allocator slot can remain the same even though we 3207 * are switching classes. 3208 */ 3209 if (mc->mc_alloc_throttle_enabled && 3210 (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) { 3211 metaslab_class_throttle_unreserve(mc, 3212 zio->io_prop.zp_copies, zio->io_allocator, zio); 3213 zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING; 3214 3215 mc = spa_normal_class(spa); 3216 VERIFY(metaslab_class_throttle_reserve(mc, 3217 zio->io_prop.zp_copies, zio->io_allocator, zio, 3218 flags | METASLAB_MUST_RESERVE)); 3219 } else { 3220 mc = spa_normal_class(spa); 3221 } 3222 zio->io_metaslab_class = mc; 3223 3224 error = metaslab_alloc(spa, mc, zio->io_size, bp, 3225 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 3226 &zio->io_alloc_list, zio, zio->io_allocator); 3227 } 3228 3229 if (error != 0) { 3230 zfs_dbgmsg("%s: metaslab allocation failure: zio %p, " 3231 "size %llu, error %d", spa_name(spa), zio, zio->io_size, 3232 error); 3233 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 3234 return (zio_write_gang_block(zio)); 3235 zio->io_error = error; 3236 } 3237 3238 return (ZIO_PIPELINE_CONTINUE); 3239 } 3240 3241 static int 3242 zio_dva_free(zio_t *zio) 3243 { 3244 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 3245 3246 return (ZIO_PIPELINE_CONTINUE); 3247 } 3248 3249 static int 3250 zio_dva_claim(zio_t *zio) 3251 { 3252 int error; 3253 3254 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 3255 if (error) 3256 zio->io_error = error; 3257 3258 return (ZIO_PIPELINE_CONTINUE); 3259 } 3260 3261 /* 3262 * Undo an allocation. This is used by zio_done() when an I/O fails 3263 * and we want to give back the block we just allocated. 3264 * This handles both normal blocks and gang blocks. 3265 */ 3266 static void 3267 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 3268 { 3269 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 3270 ASSERT(zio->io_bp_override == NULL); 3271 3272 if (!BP_IS_HOLE(bp)) 3273 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 3274 3275 if (gn != NULL) { 3276 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 3277 zio_dva_unallocate(zio, gn->gn_child[g], 3278 &gn->gn_gbh->zg_blkptr[g]); 3279 } 3280 } 3281 } 3282 3283 /* 3284 * Try to allocate an intent log block. Return 0 on success, errno on failure. 3285 */ 3286 int 3287 zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp, 3288 blkptr_t *old_bp, uint64_t size, boolean_t *slog) 3289 { 3290 int error = 1; 3291 zio_alloc_list_t io_alloc_list; 3292 3293 ASSERT(txg > spa_syncing_txg(spa)); 3294 3295 metaslab_trace_init(&io_alloc_list); 3296 3297 /* 3298 * Block pointer fields are useful to metaslabs for stats and debugging. 3299 * Fill in the obvious ones before calling into metaslab_alloc(). 3300 */ 3301 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3302 BP_SET_PSIZE(new_bp, size); 3303 BP_SET_LEVEL(new_bp, 0); 3304 3305 /* 3306 * When allocating a zil block, we don't have information about 3307 * the final destination of the block except the objset it's part 3308 * of, so we just hash the objset ID to pick the allocator to get 3309 * some parallelism. 3310 */ 3311 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1, 3312 txg, old_bp, METASLAB_HINTBP_AVOID, &io_alloc_list, NULL, 3313 cityhash4(0, 0, 0, 3314 os->os_dsl_dataset->ds_object) % spa->spa_alloc_count); 3315 if (error == 0) { 3316 *slog = TRUE; 3317 } else { 3318 error = metaslab_alloc(spa, spa_normal_class(spa), size, 3319 new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID, 3320 &io_alloc_list, NULL, cityhash4(0, 0, 0, 3321 os->os_dsl_dataset->ds_object) % spa->spa_alloc_count); 3322 if (error == 0) 3323 *slog = FALSE; 3324 } 3325 metaslab_trace_fini(&io_alloc_list); 3326 3327 if (error == 0) { 3328 BP_SET_LSIZE(new_bp, size); 3329 BP_SET_PSIZE(new_bp, size); 3330 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 3331 BP_SET_CHECKSUM(new_bp, 3332 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 3333 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 3334 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3335 BP_SET_LEVEL(new_bp, 0); 3336 BP_SET_DEDUP(new_bp, 0); 3337 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 3338 3339 /* 3340 * encrypted blocks will require an IV and salt. We generate 3341 * these now since we will not be rewriting the bp at 3342 * rewrite time. 3343 */ 3344 if (os->os_encrypted) { 3345 uint8_t iv[ZIO_DATA_IV_LEN]; 3346 uint8_t salt[ZIO_DATA_SALT_LEN]; 3347 3348 BP_SET_CRYPT(new_bp, B_TRUE); 3349 VERIFY0(spa_crypt_get_salt(spa, 3350 dmu_objset_id(os), salt)); 3351 VERIFY0(zio_crypt_generate_iv(iv)); 3352 3353 zio_crypt_encode_params_bp(new_bp, salt, iv); 3354 } 3355 } else { 3356 zfs_dbgmsg("%s: zil block allocation failure: " 3357 "size %llu, error %d", spa_name(spa), size, error); 3358 } 3359 3360 return (error); 3361 } 3362 3363 /* 3364 * ========================================================================== 3365 * Read and write to physical devices 3366 * ========================================================================== 3367 */ 3368 3369 /* 3370 * Issue an I/O to the underlying vdev. Typically the issue pipeline 3371 * stops after this stage and will resume upon I/O completion. 3372 * However, there are instances where the vdev layer may need to 3373 * continue the pipeline when an I/O was not issued. Since the I/O 3374 * that was sent to the vdev layer might be different than the one 3375 * currently active in the pipeline (see vdev_queue_io()), we explicitly 3376 * force the underlying vdev layers to call either zio_execute() or 3377 * zio_interrupt() to ensure that the pipeline continues with the correct I/O. 3378 */ 3379 static int 3380 zio_vdev_io_start(zio_t *zio) 3381 { 3382 vdev_t *vd = zio->io_vd; 3383 uint64_t align; 3384 spa_t *spa = zio->io_spa; 3385 3386 ASSERT(zio->io_error == 0); 3387 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 3388 3389 if (vd == NULL) { 3390 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3391 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 3392 3393 /* 3394 * The mirror_ops handle multiple DVAs in a single BP. 3395 */ 3396 vdev_mirror_ops.vdev_op_io_start(zio); 3397 return (ZIO_PIPELINE_STOP); 3398 } 3399 3400 ASSERT3P(zio->io_logical, !=, zio); 3401 if (zio->io_type == ZIO_TYPE_WRITE) { 3402 ASSERT(spa->spa_trust_config); 3403 3404 /* 3405 * Note: the code can handle other kinds of writes, 3406 * but we don't expect them. 3407 */ 3408 if (zio->io_vd->vdev_removing) { 3409 ASSERT(zio->io_flags & 3410 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL | 3411 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)); 3412 } 3413 } 3414 3415 align = 1ULL << vd->vdev_top->vdev_ashift; 3416 3417 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 3418 P2PHASE(zio->io_size, align) != 0) { 3419 /* Transform logical writes to be a full physical block size. */ 3420 uint64_t asize = P2ROUNDUP(zio->io_size, align); 3421 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize); 3422 ASSERT(vd == vd->vdev_top); 3423 if (zio->io_type == ZIO_TYPE_WRITE) { 3424 abd_copy(abuf, zio->io_abd, zio->io_size); 3425 abd_zero_off(abuf, zio->io_size, asize - zio->io_size); 3426 } 3427 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 3428 } 3429 3430 /* 3431 * If this is not a physical io, make sure that it is properly aligned 3432 * before proceeding. 3433 */ 3434 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 3435 ASSERT0(P2PHASE(zio->io_offset, align)); 3436 ASSERT0(P2PHASE(zio->io_size, align)); 3437 } else { 3438 /* 3439 * For physical writes, we allow 512b aligned writes and assume 3440 * the device will perform a read-modify-write as necessary. 3441 */ 3442 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 3443 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 3444 } 3445 3446 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 3447 3448 /* 3449 * If this is a repair I/O, and there's no self-healing involved -- 3450 * that is, we're just resilvering what we expect to resilver -- 3451 * then don't do the I/O unless zio's txg is actually in vd's DTL. 3452 * This prevents spurious resilvering. 3453 * 3454 * There are a few ways that we can end up creating these spurious 3455 * resilver i/os: 3456 * 3457 * 1. A resilver i/o will be issued if any DVA in the BP has a 3458 * dirty DTL. The mirror code will issue resilver writes to 3459 * each DVA, including the one(s) that are not on vdevs with dirty 3460 * DTLs. 3461 * 3462 * 2. With nested replication, which happens when we have a 3463 * "replacing" or "spare" vdev that's a child of a mirror or raidz. 3464 * For example, given mirror(replacing(A+B), C), it's likely that 3465 * only A is out of date (it's the new device). In this case, we'll 3466 * read from C, then use the data to resilver A+B -- but we don't 3467 * actually want to resilver B, just A. The top-level mirror has no 3468 * way to know this, so instead we just discard unnecessary repairs 3469 * as we work our way down the vdev tree. 3470 * 3471 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc. 3472 * The same logic applies to any form of nested replication: ditto 3473 * + mirror, RAID-Z + replacing, etc. 3474 * 3475 * However, indirect vdevs point off to other vdevs which may have 3476 * DTL's, so we never bypass them. The child i/os on concrete vdevs 3477 * will be properly bypassed instead. 3478 */ 3479 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 3480 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 3481 zio->io_txg != 0 && /* not a delegated i/o */ 3482 vd->vdev_ops != &vdev_indirect_ops && 3483 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 3484 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3485 zio_vdev_io_bypass(zio); 3486 return (ZIO_PIPELINE_CONTINUE); 3487 } 3488 3489 if (vd->vdev_ops->vdev_op_leaf && (zio->io_type == ZIO_TYPE_READ || 3490 zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM)) { 3491 3492 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio)) 3493 return (ZIO_PIPELINE_CONTINUE); 3494 3495 if ((zio = vdev_queue_io(zio)) == NULL) 3496 return (ZIO_PIPELINE_STOP); 3497 3498 if (!vdev_accessible(vd, zio)) { 3499 zio->io_error = SET_ERROR(ENXIO); 3500 zio_interrupt(zio); 3501 return (ZIO_PIPELINE_STOP); 3502 } 3503 } 3504 3505 vd->vdev_ops->vdev_op_io_start(zio); 3506 return (ZIO_PIPELINE_STOP); 3507 } 3508 3509 static int 3510 zio_vdev_io_done(zio_t *zio) 3511 { 3512 vdev_t *vd = zio->io_vd; 3513 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 3514 boolean_t unexpected_error = B_FALSE; 3515 3516 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3517 return (ZIO_PIPELINE_STOP); 3518 } 3519 3520 ASSERT(zio->io_type == ZIO_TYPE_READ || 3521 zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM); 3522 3523 if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { 3524 3525 vdev_queue_io_done(zio); 3526 3527 if (zio->io_type == ZIO_TYPE_WRITE) 3528 vdev_cache_write(zio); 3529 3530 if (zio_injection_enabled && zio->io_error == 0) 3531 zio->io_error = zio_handle_device_injection(vd, 3532 zio, EIO); 3533 3534 if (zio_injection_enabled && zio->io_error == 0) 3535 zio->io_error = zio_handle_label_injection(zio, EIO); 3536 3537 if (zio->io_error && zio->io_type != ZIO_TYPE_TRIM) { 3538 if (!vdev_accessible(vd, zio)) { 3539 zio->io_error = SET_ERROR(ENXIO); 3540 } else { 3541 unexpected_error = B_TRUE; 3542 } 3543 } 3544 } 3545 3546 ops->vdev_op_io_done(zio); 3547 3548 if (unexpected_error) 3549 VERIFY(vdev_probe(vd, zio) == NULL); 3550 3551 return (ZIO_PIPELINE_CONTINUE); 3552 } 3553 3554 /* 3555 * This function is used to change the priority of an existing zio that is 3556 * currently in-flight. This is used by the arc to upgrade priority in the 3557 * event that a demand read is made for a block that is currently queued 3558 * as a scrub or async read IO. Otherwise, the high priority read request 3559 * would end up having to wait for the lower priority IO. 3560 */ 3561 void 3562 zio_change_priority(zio_t *pio, zio_priority_t priority) 3563 { 3564 zio_t *cio, *cio_next; 3565 zio_link_t *zl = NULL; 3566 3567 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 3568 3569 if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) { 3570 vdev_queue_change_io_priority(pio, priority); 3571 } else { 3572 pio->io_priority = priority; 3573 } 3574 3575 mutex_enter(&pio->io_lock); 3576 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 3577 cio_next = zio_walk_children(pio, &zl); 3578 zio_change_priority(cio, priority); 3579 } 3580 mutex_exit(&pio->io_lock); 3581 } 3582 3583 /* 3584 * For non-raidz ZIOs, we can just copy aside the bad data read from the 3585 * disk, and use that to finish the checksum ereport later. 3586 */ 3587 static void 3588 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 3589 const abd_t *good_buf) 3590 { 3591 /* no processing needed */ 3592 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 3593 } 3594 3595 /*ARGSUSED*/ 3596 void 3597 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 3598 { 3599 void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size); 3600 3601 abd_copy(abd, zio->io_abd, zio->io_size); 3602 3603 zcr->zcr_cbinfo = zio->io_size; 3604 zcr->zcr_cbdata = abd; 3605 zcr->zcr_finish = zio_vsd_default_cksum_finish; 3606 zcr->zcr_free = zio_abd_free; 3607 } 3608 3609 static int 3610 zio_vdev_io_assess(zio_t *zio) 3611 { 3612 vdev_t *vd = zio->io_vd; 3613 3614 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3615 return (ZIO_PIPELINE_STOP); 3616 } 3617 3618 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3619 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 3620 3621 if (zio->io_vsd != NULL) { 3622 zio->io_vsd_ops->vsd_free(zio); 3623 zio->io_vsd = NULL; 3624 } 3625 3626 if (zio_injection_enabled && zio->io_error == 0) 3627 zio->io_error = zio_handle_fault_injection(zio, EIO); 3628 3629 /* 3630 * If the I/O failed, determine whether we should attempt to retry it. 3631 * 3632 * On retry, we cut in line in the issue queue, since we don't want 3633 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 3634 */ 3635 if (zio->io_error && vd == NULL && 3636 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 3637 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 3638 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 3639 zio->io_error = 0; 3640 zio->io_flags |= ZIO_FLAG_IO_RETRY | 3641 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 3642 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 3643 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 3644 zio_requeue_io_start_cut_in_line); 3645 return (ZIO_PIPELINE_STOP); 3646 } 3647 3648 /* 3649 * If we got an error on a leaf device, convert it to ENXIO 3650 * if the device is not accessible at all. 3651 */ 3652 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 3653 !vdev_accessible(vd, zio)) 3654 zio->io_error = SET_ERROR(ENXIO); 3655 3656 /* 3657 * If we can't write to an interior vdev (mirror or RAID-Z), 3658 * set vdev_cant_write so that we stop trying to allocate from it. 3659 */ 3660 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 3661 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 3662 vd->vdev_cant_write = B_TRUE; 3663 } 3664 3665 /* 3666 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future 3667 * attempts will ever succeed. In this case we set a persistent 3668 * boolean flag so that we don't bother with it in the future. 3669 */ 3670 if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) && 3671 zio->io_type == ZIO_TYPE_IOCTL && 3672 zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL) 3673 vd->vdev_nowritecache = B_TRUE; 3674 3675 if (zio->io_error) 3676 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3677 3678 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 3679 zio->io_physdone != NULL) { 3680 ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); 3681 ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); 3682 zio->io_physdone(zio->io_logical); 3683 } 3684 3685 return (ZIO_PIPELINE_CONTINUE); 3686 } 3687 3688 void 3689 zio_vdev_io_reissue(zio_t *zio) 3690 { 3691 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3692 ASSERT(zio->io_error == 0); 3693 3694 zio->io_stage >>= 1; 3695 } 3696 3697 void 3698 zio_vdev_io_redone(zio_t *zio) 3699 { 3700 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 3701 3702 zio->io_stage >>= 1; 3703 } 3704 3705 void 3706 zio_vdev_io_bypass(zio_t *zio) 3707 { 3708 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3709 ASSERT(zio->io_error == 0); 3710 3711 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 3712 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 3713 } 3714 3715 /* 3716 * ========================================================================== 3717 * Encrypt and store encryption parameters 3718 * ========================================================================== 3719 */ 3720 3721 3722 /* 3723 * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for 3724 * managing the storage of encryption parameters and passing them to the 3725 * lower-level encryption functions. 3726 */ 3727 static int 3728 zio_encrypt(zio_t *zio) 3729 { 3730 zio_prop_t *zp = &zio->io_prop; 3731 spa_t *spa = zio->io_spa; 3732 blkptr_t *bp = zio->io_bp; 3733 uint64_t psize = BP_GET_PSIZE(bp); 3734 uint64_t dsobj = zio->io_bookmark.zb_objset; 3735 dmu_object_type_t ot = BP_GET_TYPE(bp); 3736 void *enc_buf = NULL; 3737 abd_t *eabd = NULL; 3738 uint8_t salt[ZIO_DATA_SALT_LEN]; 3739 uint8_t iv[ZIO_DATA_IV_LEN]; 3740 uint8_t mac[ZIO_DATA_MAC_LEN]; 3741 boolean_t no_crypt = B_FALSE; 3742 3743 /* the root zio already encrypted the data */ 3744 if (zio->io_child_type == ZIO_CHILD_GANG) 3745 return (ZIO_PIPELINE_CONTINUE); 3746 3747 /* only ZIL blocks are re-encrypted on rewrite */ 3748 if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG) 3749 return (ZIO_PIPELINE_CONTINUE); 3750 3751 if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) { 3752 BP_SET_CRYPT(bp, B_FALSE); 3753 return (ZIO_PIPELINE_CONTINUE); 3754 } 3755 3756 /* if we are doing raw encryption set the provided encryption params */ 3757 if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) { 3758 ASSERT0(BP_GET_LEVEL(bp)); 3759 BP_SET_CRYPT(bp, B_TRUE); 3760 BP_SET_BYTEORDER(bp, zp->zp_byteorder); 3761 if (ot != DMU_OT_OBJSET) 3762 zio_crypt_encode_mac_bp(bp, zp->zp_mac); 3763 3764 /* dnode blocks must be written out in the provided byteorder */ 3765 if (zp->zp_byteorder != ZFS_HOST_BYTEORDER && 3766 ot == DMU_OT_DNODE) { 3767 void *bswap_buf = zio_buf_alloc(psize); 3768 abd_t *babd = abd_get_from_buf(bswap_buf, psize); 3769 3770 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 3771 abd_copy_to_buf(bswap_buf, zio->io_abd, psize); 3772 dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf, 3773 psize); 3774 3775 abd_take_ownership_of_buf(babd, B_TRUE); 3776 zio_push_transform(zio, babd, psize, psize, NULL); 3777 } 3778 3779 if (DMU_OT_IS_ENCRYPTED(ot)) 3780 zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv); 3781 return (ZIO_PIPELINE_CONTINUE); 3782 } 3783 3784 /* indirect blocks only maintain a cksum of the lower level MACs */ 3785 if (BP_GET_LEVEL(bp) > 0) { 3786 BP_SET_CRYPT(bp, B_TRUE); 3787 VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE, 3788 zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp), 3789 mac)); 3790 zio_crypt_encode_mac_bp(bp, mac); 3791 return (ZIO_PIPELINE_CONTINUE); 3792 } 3793 3794 /* 3795 * Objset blocks are a special case since they have 2 256-bit MACs 3796 * embedded within them. 3797 */ 3798 if (ot == DMU_OT_OBJSET) { 3799 ASSERT0(DMU_OT_IS_ENCRYPTED(ot)); 3800 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 3801 BP_SET_CRYPT(bp, B_TRUE); 3802 VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj, 3803 zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp))); 3804 return (ZIO_PIPELINE_CONTINUE); 3805 } 3806 3807 /* unencrypted object types are only authenticated with a MAC */ 3808 if (!DMU_OT_IS_ENCRYPTED(ot)) { 3809 BP_SET_CRYPT(bp, B_TRUE); 3810 VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj, 3811 zio->io_abd, psize, mac)); 3812 zio_crypt_encode_mac_bp(bp, mac); 3813 return (ZIO_PIPELINE_CONTINUE); 3814 } 3815 3816 /* 3817 * Later passes of sync-to-convergence may decide to rewrite data 3818 * in place to avoid more disk reallocations. This presents a problem 3819 * for encryption because this consitutes rewriting the new data with 3820 * the same encryption key and IV. However, this only applies to blocks 3821 * in the MOS (particularly the spacemaps) and we do not encrypt the 3822 * MOS. We assert that the zio is allocating or an intent log write 3823 * to enforce this. 3824 */ 3825 ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG); 3826 ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG); 3827 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION)); 3828 ASSERT3U(psize, !=, 0); 3829 3830 enc_buf = zio_buf_alloc(psize); 3831 eabd = abd_get_from_buf(enc_buf, psize); 3832 abd_take_ownership_of_buf(eabd, B_TRUE); 3833 3834 /* 3835 * For an explanation of what encryption parameters are stored 3836 * where, see the block comment in zio_crypt.c. 3837 */ 3838 if (ot == DMU_OT_INTENT_LOG) { 3839 zio_crypt_decode_params_bp(bp, salt, iv); 3840 } else { 3841 BP_SET_CRYPT(bp, B_TRUE); 3842 } 3843 3844 /* Perform the encryption. This should not fail */ 3845 VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark, 3846 BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), 3847 salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt)); 3848 3849 /* encode encryption metadata into the bp */ 3850 if (ot == DMU_OT_INTENT_LOG) { 3851 /* 3852 * ZIL blocks store the MAC in the embedded checksum, so the 3853 * transform must always be applied. 3854 */ 3855 zio_crypt_encode_mac_zil(enc_buf, mac); 3856 zio_push_transform(zio, eabd, psize, psize, NULL); 3857 } else { 3858 BP_SET_CRYPT(bp, B_TRUE); 3859 zio_crypt_encode_params_bp(bp, salt, iv); 3860 zio_crypt_encode_mac_bp(bp, mac); 3861 3862 if (no_crypt) { 3863 ASSERT3U(ot, ==, DMU_OT_DNODE); 3864 abd_free(eabd); 3865 } else { 3866 zio_push_transform(zio, eabd, psize, psize, NULL); 3867 } 3868 } 3869 3870 return (ZIO_PIPELINE_CONTINUE); 3871 } 3872 3873 /* 3874 * ========================================================================== 3875 * Generate and verify checksums 3876 * ========================================================================== 3877 */ 3878 static int 3879 zio_checksum_generate(zio_t *zio) 3880 { 3881 blkptr_t *bp = zio->io_bp; 3882 enum zio_checksum checksum; 3883 3884 if (bp == NULL) { 3885 /* 3886 * This is zio_write_phys(). 3887 * We're either generating a label checksum, or none at all. 3888 */ 3889 checksum = zio->io_prop.zp_checksum; 3890 3891 if (checksum == ZIO_CHECKSUM_OFF) 3892 return (ZIO_PIPELINE_CONTINUE); 3893 3894 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 3895 } else { 3896 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 3897 ASSERT(!IO_IS_ALLOCATING(zio)); 3898 checksum = ZIO_CHECKSUM_GANG_HEADER; 3899 } else { 3900 checksum = BP_GET_CHECKSUM(bp); 3901 } 3902 } 3903 3904 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size); 3905 3906 return (ZIO_PIPELINE_CONTINUE); 3907 } 3908 3909 static int 3910 zio_checksum_verify(zio_t *zio) 3911 { 3912 zio_bad_cksum_t info; 3913 blkptr_t *bp = zio->io_bp; 3914 int error; 3915 3916 ASSERT(zio->io_vd != NULL); 3917 3918 if (bp == NULL) { 3919 /* 3920 * This is zio_read_phys(). 3921 * We're either verifying a label checksum, or nothing at all. 3922 */ 3923 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 3924 return (ZIO_PIPELINE_CONTINUE); 3925 3926 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 3927 } 3928 3929 if ((error = zio_checksum_error(zio, &info)) != 0) { 3930 zio->io_error = error; 3931 if (error == ECKSUM && 3932 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 3933 zfs_ereport_start_checksum(zio->io_spa, 3934 zio->io_vd, &zio->io_bookmark, zio, 3935 zio->io_offset, zio->io_size, NULL, &info); 3936 } 3937 } 3938 3939 return (ZIO_PIPELINE_CONTINUE); 3940 } 3941 3942 /* 3943 * Called by RAID-Z to ensure we don't compute the checksum twice. 3944 */ 3945 void 3946 zio_checksum_verified(zio_t *zio) 3947 { 3948 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 3949 } 3950 3951 /* 3952 * ========================================================================== 3953 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 3954 * An error of 0 indicates success. ENXIO indicates whole-device failure, 3955 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 3956 * indicate errors that are specific to one I/O, and most likely permanent. 3957 * Any other error is presumed to be worse because we weren't expecting it. 3958 * ========================================================================== 3959 */ 3960 int 3961 zio_worst_error(int e1, int e2) 3962 { 3963 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 3964 int r1, r2; 3965 3966 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 3967 if (e1 == zio_error_rank[r1]) 3968 break; 3969 3970 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 3971 if (e2 == zio_error_rank[r2]) 3972 break; 3973 3974 return (r1 > r2 ? e1 : e2); 3975 } 3976 3977 /* 3978 * ========================================================================== 3979 * I/O completion 3980 * ========================================================================== 3981 */ 3982 static int 3983 zio_ready(zio_t *zio) 3984 { 3985 blkptr_t *bp = zio->io_bp; 3986 zio_t *pio, *pio_next; 3987 zio_link_t *zl = NULL; 3988 3989 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, 3990 ZIO_WAIT_READY)) { 3991 return (ZIO_PIPELINE_STOP); 3992 } 3993 3994 if (zio->io_ready) { 3995 ASSERT(IO_IS_ALLOCATING(zio)); 3996 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 3997 (zio->io_flags & ZIO_FLAG_NOPWRITE)); 3998 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 3999 4000 zio->io_ready(zio); 4001 } 4002 4003 if (bp != NULL && bp != &zio->io_bp_copy) 4004 zio->io_bp_copy = *bp; 4005 4006 if (zio->io_error != 0) { 4007 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 4008 4009 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 4010 ASSERT(IO_IS_ALLOCATING(zio)); 4011 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 4012 ASSERT(zio->io_metaslab_class != NULL); 4013 4014 /* 4015 * We were unable to allocate anything, unreserve and 4016 * issue the next I/O to allocate. 4017 */ 4018 metaslab_class_throttle_unreserve( 4019 zio->io_metaslab_class, zio->io_prop.zp_copies, 4020 zio->io_allocator, zio); 4021 zio_allocate_dispatch(zio->io_spa, zio->io_allocator); 4022 } 4023 } 4024 4025 mutex_enter(&zio->io_lock); 4026 zio->io_state[ZIO_WAIT_READY] = 1; 4027 pio = zio_walk_parents(zio, &zl); 4028 mutex_exit(&zio->io_lock); 4029 4030 /* 4031 * As we notify zio's parents, new parents could be added. 4032 * New parents go to the head of zio's io_parent_list, however, 4033 * so we will (correctly) not notify them. The remainder of zio's 4034 * io_parent_list, from 'pio_next' onward, cannot change because 4035 * all parents must wait for us to be done before they can be done. 4036 */ 4037 for (; pio != NULL; pio = pio_next) { 4038 pio_next = zio_walk_parents(zio, &zl); 4039 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 4040 } 4041 4042 if (zio->io_flags & ZIO_FLAG_NODATA) { 4043 if (BP_IS_GANG(bp)) { 4044 zio->io_flags &= ~ZIO_FLAG_NODATA; 4045 } else { 4046 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE); 4047 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 4048 } 4049 } 4050 4051 if (zio_injection_enabled && 4052 zio->io_spa->spa_syncing_txg == zio->io_txg) 4053 zio_handle_ignored_writes(zio); 4054 4055 return (ZIO_PIPELINE_CONTINUE); 4056 } 4057 4058 /* 4059 * Update the allocation throttle accounting. 4060 */ 4061 static void 4062 zio_dva_throttle_done(zio_t *zio) 4063 { 4064 zio_t *lio = zio->io_logical; 4065 zio_t *pio = zio_unique_parent(zio); 4066 vdev_t *vd = zio->io_vd; 4067 int flags = METASLAB_ASYNC_ALLOC; 4068 4069 ASSERT3P(zio->io_bp, !=, NULL); 4070 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 4071 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE); 4072 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 4073 ASSERT(vd != NULL); 4074 ASSERT3P(vd, ==, vd->vdev_top); 4075 ASSERT(!(zio->io_flags & (ZIO_FLAG_IO_REPAIR | ZIO_FLAG_IO_RETRY))); 4076 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); 4077 ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE)); 4078 ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA)); 4079 4080 /* 4081 * Parents of gang children can have two flavors -- ones that 4082 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set) 4083 * and ones that allocated the constituent blocks. The allocation 4084 * throttle needs to know the allocating parent zio so we must find 4085 * it here. 4086 */ 4087 if (pio->io_child_type == ZIO_CHILD_GANG) { 4088 /* 4089 * If our parent is a rewrite gang child then our grandparent 4090 * would have been the one that performed the allocation. 4091 */ 4092 if (pio->io_flags & ZIO_FLAG_IO_REWRITE) 4093 pio = zio_unique_parent(pio); 4094 flags |= METASLAB_GANG_CHILD; 4095 } 4096 4097 ASSERT(IO_IS_ALLOCATING(pio)); 4098 ASSERT3P(zio, !=, zio->io_logical); 4099 ASSERT(zio->io_logical != NULL); 4100 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 4101 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE); 4102 ASSERT(zio->io_metaslab_class != NULL); 4103 4104 mutex_enter(&pio->io_lock); 4105 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags, 4106 pio->io_allocator, B_TRUE); 4107 mutex_exit(&pio->io_lock); 4108 4109 metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1, 4110 pio->io_allocator, pio); 4111 4112 /* 4113 * Call into the pipeline to see if there is more work that 4114 * needs to be done. If there is work to be done it will be 4115 * dispatched to another taskq thread. 4116 */ 4117 zio_allocate_dispatch(zio->io_spa, pio->io_allocator); 4118 } 4119 4120 static int 4121 zio_done(zio_t *zio) 4122 { 4123 spa_t *spa = zio->io_spa; 4124 zio_t *lio = zio->io_logical; 4125 blkptr_t *bp = zio->io_bp; 4126 vdev_t *vd = zio->io_vd; 4127 uint64_t psize = zio->io_size; 4128 zio_t *pio, *pio_next; 4129 zio_link_t *zl = NULL; 4130 4131 /* 4132 * If our children haven't all completed, 4133 * wait for them and then repeat this pipeline stage. 4134 */ 4135 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) { 4136 return (ZIO_PIPELINE_STOP); 4137 } 4138 4139 /* 4140 * If the allocation throttle is enabled, then update the accounting. 4141 * We only track child I/Os that are part of an allocating async 4142 * write. We must do this since the allocation is performed 4143 * by the logical I/O but the actual write is done by child I/Os. 4144 */ 4145 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && 4146 zio->io_child_type == ZIO_CHILD_VDEV) { 4147 ASSERT(zio->io_metaslab_class != NULL); 4148 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled); 4149 zio_dva_throttle_done(zio); 4150 } 4151 4152 /* 4153 * If the allocation throttle is enabled, verify that 4154 * we have decremented the refcounts for every I/O that was throttled. 4155 */ 4156 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 4157 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 4158 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 4159 ASSERT(bp != NULL); 4160 4161 metaslab_group_alloc_verify(spa, zio->io_bp, zio, 4162 zio->io_allocator); 4163 VERIFY(zfs_refcount_not_held( 4164 &zio->io_metaslab_class->mc_alloc_slots[zio->io_allocator], 4165 zio)); 4166 } 4167 4168 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 4169 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 4170 ASSERT(zio->io_children[c][w] == 0); 4171 4172 if (bp != NULL && !BP_IS_EMBEDDED(bp)) { 4173 ASSERT(bp->blk_pad[0] == 0); 4174 ASSERT(bp->blk_pad[1] == 0); 4175 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 4176 (bp == zio_unique_parent(zio)->io_bp)); 4177 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 4178 zio->io_bp_override == NULL && 4179 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 4180 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 4181 ASSERT(BP_COUNT_GANG(bp) == 0 || 4182 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 4183 } 4184 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 4185 VERIFY(BP_EQUAL(bp, &zio->io_bp_orig)); 4186 } 4187 4188 /* 4189 * If there were child vdev/gang/ddt errors, they apply to us now. 4190 */ 4191 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 4192 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 4193 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 4194 4195 /* 4196 * If the I/O on the transformed data was successful, generate any 4197 * checksum reports now while we still have the transformed data. 4198 */ 4199 if (zio->io_error == 0) { 4200 while (zio->io_cksum_report != NULL) { 4201 zio_cksum_report_t *zcr = zio->io_cksum_report; 4202 uint64_t align = zcr->zcr_align; 4203 uint64_t asize = P2ROUNDUP(psize, align); 4204 abd_t *adata = zio->io_abd; 4205 4206 if (asize != psize) { 4207 adata = abd_alloc(asize, B_TRUE); 4208 abd_copy(adata, zio->io_abd, psize); 4209 abd_zero_off(adata, psize, asize - psize); 4210 } 4211 4212 zio->io_cksum_report = zcr->zcr_next; 4213 zcr->zcr_next = NULL; 4214 zcr->zcr_finish(zcr, adata); 4215 zfs_ereport_free_checksum(zcr); 4216 4217 if (asize != psize) 4218 abd_free(adata); 4219 } 4220 } 4221 4222 zio_pop_transforms(zio); /* note: may set zio->io_error */ 4223 4224 vdev_stat_update(zio, psize); 4225 4226 if (zio->io_error) { 4227 /* 4228 * If this I/O is attached to a particular vdev, 4229 * generate an error message describing the I/O failure 4230 * at the block level. We ignore these errors if the 4231 * device is currently unavailable. 4232 */ 4233 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 4234 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, 4235 &zio->io_bookmark, zio, 0, 0); 4236 4237 if ((zio->io_error == EIO || !(zio->io_flags & 4238 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 4239 zio == lio) { 4240 /* 4241 * For logical I/O requests, tell the SPA to log the 4242 * error and generate a logical data ereport. 4243 */ 4244 spa_log_error(spa, &zio->io_bookmark); 4245 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, 4246 &zio->io_bookmark, zio, 0, 0); 4247 } 4248 } 4249 4250 if (zio->io_error && zio == lio) { 4251 /* 4252 * Determine whether zio should be reexecuted. This will 4253 * propagate all the way to the root via zio_notify_parent(). 4254 */ 4255 ASSERT(vd == NULL && bp != NULL); 4256 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 4257 4258 if (IO_IS_ALLOCATING(zio) && 4259 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 4260 if (zio->io_error != ENOSPC) 4261 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 4262 else 4263 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4264 } 4265 4266 if ((zio->io_type == ZIO_TYPE_READ || 4267 zio->io_type == ZIO_TYPE_FREE) && 4268 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 4269 zio->io_error == ENXIO && 4270 spa_load_state(spa) == SPA_LOAD_NONE && 4271 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 4272 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4273 4274 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 4275 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4276 4277 /* 4278 * Here is a possibly good place to attempt to do 4279 * either combinatorial reconstruction or error correction 4280 * based on checksums. It also might be a good place 4281 * to send out preliminary ereports before we suspend 4282 * processing. 4283 */ 4284 } 4285 4286 /* 4287 * If there were logical child errors, they apply to us now. 4288 * We defer this until now to avoid conflating logical child 4289 * errors with errors that happened to the zio itself when 4290 * updating vdev stats and reporting FMA events above. 4291 */ 4292 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 4293 4294 if ((zio->io_error || zio->io_reexecute) && 4295 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 4296 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 4297 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 4298 4299 zio_gang_tree_free(&zio->io_gang_tree); 4300 4301 /* 4302 * Godfather I/Os should never suspend. 4303 */ 4304 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 4305 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 4306 zio->io_reexecute = 0; 4307 4308 if (zio->io_reexecute) { 4309 /* 4310 * This is a logical I/O that wants to reexecute. 4311 * 4312 * Reexecute is top-down. When an i/o fails, if it's not 4313 * the root, it simply notifies its parent and sticks around. 4314 * The parent, seeing that it still has children in zio_done(), 4315 * does the same. This percolates all the way up to the root. 4316 * The root i/o will reexecute or suspend the entire tree. 4317 * 4318 * This approach ensures that zio_reexecute() honors 4319 * all the original i/o dependency relationships, e.g. 4320 * parents not executing until children are ready. 4321 */ 4322 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 4323 4324 zio->io_gang_leader = NULL; 4325 4326 mutex_enter(&zio->io_lock); 4327 zio->io_state[ZIO_WAIT_DONE] = 1; 4328 mutex_exit(&zio->io_lock); 4329 4330 /* 4331 * "The Godfather" I/O monitors its children but is 4332 * not a true parent to them. It will track them through 4333 * the pipeline but severs its ties whenever they get into 4334 * trouble (e.g. suspended). This allows "The Godfather" 4335 * I/O to return status without blocking. 4336 */ 4337 zl = NULL; 4338 for (pio = zio_walk_parents(zio, &zl); pio != NULL; 4339 pio = pio_next) { 4340 zio_link_t *remove_zl = zl; 4341 pio_next = zio_walk_parents(zio, &zl); 4342 4343 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 4344 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 4345 zio_remove_child(pio, zio, remove_zl); 4346 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 4347 } 4348 } 4349 4350 if ((pio = zio_unique_parent(zio)) != NULL) { 4351 /* 4352 * We're not a root i/o, so there's nothing to do 4353 * but notify our parent. Don't propagate errors 4354 * upward since we haven't permanently failed yet. 4355 */ 4356 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 4357 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 4358 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 4359 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 4360 /* 4361 * We'd fail again if we reexecuted now, so suspend 4362 * until conditions improve (e.g. device comes online). 4363 */ 4364 zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR); 4365 } else { 4366 /* 4367 * Reexecution is potentially a huge amount of work. 4368 * Hand it off to the otherwise-unused claim taskq. 4369 */ 4370 ASSERT(zio->io_tqent.tqent_next == NULL); 4371 spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM, 4372 ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio, 4373 0, &zio->io_tqent); 4374 } 4375 return (ZIO_PIPELINE_STOP); 4376 } 4377 4378 ASSERT(zio->io_child_count == 0); 4379 ASSERT(zio->io_reexecute == 0); 4380 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 4381 4382 /* 4383 * Report any checksum errors, since the I/O is complete. 4384 */ 4385 while (zio->io_cksum_report != NULL) { 4386 zio_cksum_report_t *zcr = zio->io_cksum_report; 4387 zio->io_cksum_report = zcr->zcr_next; 4388 zcr->zcr_next = NULL; 4389 zcr->zcr_finish(zcr, NULL); 4390 zfs_ereport_free_checksum(zcr); 4391 } 4392 4393 /* 4394 * It is the responsibility of the done callback to ensure that this 4395 * particular zio is no longer discoverable for adoption, and as 4396 * such, cannot acquire any new parents. 4397 */ 4398 if (zio->io_done) 4399 zio->io_done(zio); 4400 4401 mutex_enter(&zio->io_lock); 4402 zio->io_state[ZIO_WAIT_DONE] = 1; 4403 mutex_exit(&zio->io_lock); 4404 4405 zl = NULL; 4406 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) { 4407 zio_link_t *remove_zl = zl; 4408 pio_next = zio_walk_parents(zio, &zl); 4409 zio_remove_child(pio, zio, remove_zl); 4410 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 4411 } 4412 4413 if (zio->io_waiter != NULL) { 4414 mutex_enter(&zio->io_lock); 4415 zio->io_executor = NULL; 4416 cv_broadcast(&zio->io_cv); 4417 mutex_exit(&zio->io_lock); 4418 } else { 4419 zio_destroy(zio); 4420 } 4421 4422 return (ZIO_PIPELINE_STOP); 4423 } 4424 4425 /* 4426 * ========================================================================== 4427 * I/O pipeline definition 4428 * ========================================================================== 4429 */ 4430 static zio_pipe_stage_t *zio_pipeline[] = { 4431 NULL, 4432 zio_read_bp_init, 4433 zio_write_bp_init, 4434 zio_free_bp_init, 4435 zio_issue_async, 4436 zio_write_compress, 4437 zio_encrypt, 4438 zio_checksum_generate, 4439 zio_nop_write, 4440 zio_ddt_read_start, 4441 zio_ddt_read_done, 4442 zio_ddt_write, 4443 zio_ddt_free, 4444 zio_gang_assemble, 4445 zio_gang_issue, 4446 zio_dva_throttle, 4447 zio_dva_allocate, 4448 zio_dva_free, 4449 zio_dva_claim, 4450 zio_ready, 4451 zio_vdev_io_start, 4452 zio_vdev_io_done, 4453 zio_vdev_io_assess, 4454 zio_checksum_verify, 4455 zio_done 4456 }; 4457 4458 4459 4460 4461 /* 4462 * Compare two zbookmark_phys_t's to see which we would reach first in a 4463 * pre-order traversal of the object tree. 4464 * 4465 * This is simple in every case aside from the meta-dnode object. For all other 4466 * objects, we traverse them in order (object 1 before object 2, and so on). 4467 * However, all of these objects are traversed while traversing object 0, since 4468 * the data it points to is the list of objects. Thus, we need to convert to a 4469 * canonical representation so we can compare meta-dnode bookmarks to 4470 * non-meta-dnode bookmarks. 4471 * 4472 * We do this by calculating "equivalents" for each field of the zbookmark. 4473 * zbookmarks outside of the meta-dnode use their own object and level, and 4474 * calculate the level 0 equivalent (the first L0 blkid that is contained in the 4475 * blocks this bookmark refers to) by multiplying their blkid by their span 4476 * (the number of L0 blocks contained within one block at their level). 4477 * zbookmarks inside the meta-dnode calculate their object equivalent 4478 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use 4479 * level + 1<<31 (any value larger than a level could ever be) for their level. 4480 * This causes them to always compare before a bookmark in their object 4481 * equivalent, compare appropriately to bookmarks in other objects, and to 4482 * compare appropriately to other bookmarks in the meta-dnode. 4483 */ 4484 int 4485 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, 4486 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) 4487 { 4488 /* 4489 * These variables represent the "equivalent" values for the zbookmark, 4490 * after converting zbookmarks inside the meta dnode to their 4491 * normal-object equivalents. 4492 */ 4493 uint64_t zb1obj, zb2obj; 4494 uint64_t zb1L0, zb2L0; 4495 uint64_t zb1level, zb2level; 4496 4497 if (zb1->zb_object == zb2->zb_object && 4498 zb1->zb_level == zb2->zb_level && 4499 zb1->zb_blkid == zb2->zb_blkid) 4500 return (0); 4501 4502 /* 4503 * BP_SPANB calculates the span in blocks. 4504 */ 4505 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); 4506 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); 4507 4508 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 4509 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 4510 zb1L0 = 0; 4511 zb1level = zb1->zb_level + COMPARE_META_LEVEL; 4512 } else { 4513 zb1obj = zb1->zb_object; 4514 zb1level = zb1->zb_level; 4515 } 4516 4517 if (zb2->zb_object == DMU_META_DNODE_OBJECT) { 4518 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 4519 zb2L0 = 0; 4520 zb2level = zb2->zb_level + COMPARE_META_LEVEL; 4521 } else { 4522 zb2obj = zb2->zb_object; 4523 zb2level = zb2->zb_level; 4524 } 4525 4526 /* Now that we have a canonical representation, do the comparison. */ 4527 if (zb1obj != zb2obj) 4528 return (zb1obj < zb2obj ? -1 : 1); 4529 else if (zb1L0 != zb2L0) 4530 return (zb1L0 < zb2L0 ? -1 : 1); 4531 else if (zb1level != zb2level) 4532 return (zb1level > zb2level ? -1 : 1); 4533 /* 4534 * This can (theoretically) happen if the bookmarks have the same object 4535 * and level, but different blkids, if the block sizes are not the same. 4536 * There is presently no way to change the indirect block sizes 4537 */ 4538 return (0); 4539 } 4540 4541 /* 4542 * This function checks the following: given that last_block is the place that 4543 * our traversal stopped last time, does that guarantee that we've visited 4544 * every node under subtree_root? Therefore, we can't just use the raw output 4545 * of zbookmark_compare. We have to pass in a modified version of 4546 * subtree_root; by incrementing the block id, and then checking whether 4547 * last_block is before or equal to that, we can tell whether or not having 4548 * visited last_block implies that all of subtree_root's children have been 4549 * visited. 4550 */ 4551 boolean_t 4552 zbookmark_subtree_completed(const dnode_phys_t *dnp, 4553 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 4554 { 4555 zbookmark_phys_t mod_zb = *subtree_root; 4556 mod_zb.zb_blkid++; 4557 ASSERT(last_block->zb_level == 0); 4558 4559 /* The objset_phys_t isn't before anything. */ 4560 if (dnp == NULL) 4561 return (B_FALSE); 4562 4563 /* 4564 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the 4565 * data block size in sectors, because that variable is only used if 4566 * the bookmark refers to a block in the meta-dnode. Since we don't 4567 * know without examining it what object it refers to, and there's no 4568 * harm in passing in this value in other cases, we always pass it in. 4569 * 4570 * We pass in 0 for the indirect block size shift because zb2 must be 4571 * level 0. The indirect block size is only used to calculate the span 4572 * of the bookmark, but since the bookmark must be level 0, the span is 4573 * always 1, so the math works out. 4574 * 4575 * If you make changes to how the zbookmark_compare code works, be sure 4576 * to make sure that this code still works afterwards. 4577 */ 4578 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 4579 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, 4580 last_block) <= 0); 4581 } 4582