1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 */ 27 28 #include <sys/sysmacros.h> 29 #include <sys/zfs_context.h> 30 #include <sys/fm/fs/zfs.h> 31 #include <sys/spa.h> 32 #include <sys/txg.h> 33 #include <sys/spa_impl.h> 34 #include <sys/vdev_impl.h> 35 #include <sys/zio_impl.h> 36 #include <sys/zio_compress.h> 37 #include <sys/zio_checksum.h> 38 #include <sys/dmu_objset.h> 39 #include <sys/arc.h> 40 #include <sys/ddt.h> 41 #include <sys/blkptr.h> 42 #include <sys/zfeature.h> 43 #include <sys/metaslab_impl.h> 44 #include <sys/abd.h> 45 #include <sys/cityhash.h> 46 47 /* 48 * ========================================================================== 49 * I/O type descriptions 50 * ========================================================================== 51 */ 52 const char *zio_type_name[ZIO_TYPES] = { 53 "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 54 "zio_ioctl" 55 }; 56 57 boolean_t zio_dva_throttle_enabled = B_TRUE; 58 59 /* 60 * ========================================================================== 61 * I/O kmem caches 62 * ========================================================================== 63 */ 64 kmem_cache_t *zio_cache; 65 kmem_cache_t *zio_link_cache; 66 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 67 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 68 69 #ifdef _KERNEL 70 extern vmem_t *zio_alloc_arena; 71 #endif 72 73 #define ZIO_PIPELINE_CONTINUE 0x100 74 #define ZIO_PIPELINE_STOP 0x101 75 76 #define BP_SPANB(indblkshift, level) \ 77 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) 78 #define COMPARE_META_LEVEL 0x80000000ul 79 /* 80 * The following actions directly effect the spa's sync-to-convergence logic. 81 * The values below define the sync pass when we start performing the action. 82 * Care should be taken when changing these values as they directly impact 83 * spa_sync() performance. Tuning these values may introduce subtle performance 84 * pathologies and should only be done in the context of performance analysis. 85 * These tunables will eventually be removed and replaced with #defines once 86 * enough analysis has been done to determine optimal values. 87 * 88 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 89 * regular blocks are not deferred. 90 */ 91 int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 92 int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 93 int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 94 95 /* 96 * An allocating zio is one that either currently has the DVA allocate 97 * stage set or will have it later in its lifetime. 98 */ 99 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 100 101 boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 102 103 #ifdef ZFS_DEBUG 104 int zio_buf_debug_limit = 16384; 105 #else 106 int zio_buf_debug_limit = 0; 107 #endif 108 109 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t); 110 111 void 112 zio_init(void) 113 { 114 size_t c; 115 vmem_t *data_alloc_arena = NULL; 116 117 #ifdef _KERNEL 118 data_alloc_arena = zio_alloc_arena; 119 #endif 120 zio_cache = kmem_cache_create("zio_cache", 121 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 122 zio_link_cache = kmem_cache_create("zio_link_cache", 123 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 124 125 /* 126 * For small buffers, we want a cache for each multiple of 127 * SPA_MINBLOCKSIZE. For larger buffers, we want a cache 128 * for each quarter-power of 2. 129 */ 130 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 131 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 132 size_t p2 = size; 133 size_t align = 0; 134 size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 135 136 while (!ISP2(p2)) 137 p2 &= p2 - 1; 138 139 #ifndef _KERNEL 140 /* 141 * If we are using watchpoints, put each buffer on its own page, 142 * to eliminate the performance overhead of trapping to the 143 * kernel when modifying a non-watched buffer that shares the 144 * page with a watched buffer. 145 */ 146 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 147 continue; 148 #endif 149 if (size <= 4 * SPA_MINBLOCKSIZE) { 150 align = SPA_MINBLOCKSIZE; 151 } else if (IS_P2ALIGNED(size, p2 >> 2)) { 152 align = MIN(p2 >> 2, PAGESIZE); 153 } 154 155 if (align != 0) { 156 char name[36]; 157 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 158 zio_buf_cache[c] = kmem_cache_create(name, size, 159 align, NULL, NULL, NULL, NULL, NULL, cflags); 160 161 /* 162 * Since zio_data bufs do not appear in crash dumps, we 163 * pass KMC_NOTOUCH so that no allocator metadata is 164 * stored with the buffers. 165 */ 166 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 167 zio_data_buf_cache[c] = kmem_cache_create(name, size, 168 align, NULL, NULL, NULL, NULL, data_alloc_arena, 169 cflags | KMC_NOTOUCH); 170 } 171 } 172 173 while (--c != 0) { 174 ASSERT(zio_buf_cache[c] != NULL); 175 if (zio_buf_cache[c - 1] == NULL) 176 zio_buf_cache[c - 1] = zio_buf_cache[c]; 177 178 ASSERT(zio_data_buf_cache[c] != NULL); 179 if (zio_data_buf_cache[c - 1] == NULL) 180 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 181 } 182 183 zio_inject_init(); 184 } 185 186 void 187 zio_fini(void) 188 { 189 size_t c; 190 kmem_cache_t *last_cache = NULL; 191 kmem_cache_t *last_data_cache = NULL; 192 193 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 194 if (zio_buf_cache[c] != last_cache) { 195 last_cache = zio_buf_cache[c]; 196 kmem_cache_destroy(zio_buf_cache[c]); 197 } 198 zio_buf_cache[c] = NULL; 199 200 if (zio_data_buf_cache[c] != last_data_cache) { 201 last_data_cache = zio_data_buf_cache[c]; 202 kmem_cache_destroy(zio_data_buf_cache[c]); 203 } 204 zio_data_buf_cache[c] = NULL; 205 } 206 207 kmem_cache_destroy(zio_link_cache); 208 kmem_cache_destroy(zio_cache); 209 210 zio_inject_fini(); 211 } 212 213 /* 214 * ========================================================================== 215 * Allocate and free I/O buffers 216 * ========================================================================== 217 */ 218 219 /* 220 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 221 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 222 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 223 * excess / transient data in-core during a crashdump. 224 */ 225 void * 226 zio_buf_alloc(size_t size) 227 { 228 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 229 230 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 231 232 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 233 } 234 235 /* 236 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 237 * crashdump if the kernel panics. This exists so that we will limit the amount 238 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 239 * of kernel heap dumped to disk when the kernel panics) 240 */ 241 void * 242 zio_data_buf_alloc(size_t size) 243 { 244 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 245 246 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 247 248 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 249 } 250 251 void 252 zio_buf_free(void *buf, size_t size) 253 { 254 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 255 256 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 257 258 kmem_cache_free(zio_buf_cache[c], buf); 259 } 260 261 void 262 zio_data_buf_free(void *buf, size_t size) 263 { 264 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 265 266 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 267 268 kmem_cache_free(zio_data_buf_cache[c], buf); 269 } 270 271 /* 272 * ========================================================================== 273 * Push and pop I/O transform buffers 274 * ========================================================================== 275 */ 276 void 277 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize, 278 zio_transform_func_t *transform) 279 { 280 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 281 282 /* 283 * Ensure that anyone expecting this zio to contain a linear ABD isn't 284 * going to get a nasty surprise when they try to access the data. 285 */ 286 IMPLY(abd_is_linear(zio->io_abd), abd_is_linear(data)); 287 288 zt->zt_orig_abd = zio->io_abd; 289 zt->zt_orig_size = zio->io_size; 290 zt->zt_bufsize = bufsize; 291 zt->zt_transform = transform; 292 293 zt->zt_next = zio->io_transform_stack; 294 zio->io_transform_stack = zt; 295 296 zio->io_abd = data; 297 zio->io_size = size; 298 } 299 300 void 301 zio_pop_transforms(zio_t *zio) 302 { 303 zio_transform_t *zt; 304 305 while ((zt = zio->io_transform_stack) != NULL) { 306 if (zt->zt_transform != NULL) 307 zt->zt_transform(zio, 308 zt->zt_orig_abd, zt->zt_orig_size); 309 310 if (zt->zt_bufsize != 0) 311 abd_free(zio->io_abd); 312 313 zio->io_abd = zt->zt_orig_abd; 314 zio->io_size = zt->zt_orig_size; 315 zio->io_transform_stack = zt->zt_next; 316 317 kmem_free(zt, sizeof (zio_transform_t)); 318 } 319 } 320 321 /* 322 * ========================================================================== 323 * I/O transform callbacks for subblocks and decompression 324 * ========================================================================== 325 */ 326 static void 327 zio_subblock(zio_t *zio, abd_t *data, uint64_t size) 328 { 329 ASSERT(zio->io_size > size); 330 331 if (zio->io_type == ZIO_TYPE_READ) 332 abd_copy(data, zio->io_abd, size); 333 } 334 335 static void 336 zio_decompress(zio_t *zio, abd_t *data, uint64_t size) 337 { 338 if (zio->io_error == 0) { 339 void *tmp = abd_borrow_buf(data, size); 340 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 341 zio->io_abd, tmp, zio->io_size, size); 342 abd_return_buf_copy(data, tmp, size); 343 344 if (ret != 0) 345 zio->io_error = SET_ERROR(EIO); 346 } 347 } 348 349 /* 350 * ========================================================================== 351 * I/O parent/child relationships and pipeline interlocks 352 * ========================================================================== 353 */ 354 zio_t * 355 zio_walk_parents(zio_t *cio, zio_link_t **zl) 356 { 357 list_t *pl = &cio->io_parent_list; 358 359 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl); 360 if (*zl == NULL) 361 return (NULL); 362 363 ASSERT((*zl)->zl_child == cio); 364 return ((*zl)->zl_parent); 365 } 366 367 zio_t * 368 zio_walk_children(zio_t *pio, zio_link_t **zl) 369 { 370 list_t *cl = &pio->io_child_list; 371 372 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl); 373 if (*zl == NULL) 374 return (NULL); 375 376 ASSERT((*zl)->zl_parent == pio); 377 return ((*zl)->zl_child); 378 } 379 380 zio_t * 381 zio_unique_parent(zio_t *cio) 382 { 383 zio_link_t *zl = NULL; 384 zio_t *pio = zio_walk_parents(cio, &zl); 385 386 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL); 387 return (pio); 388 } 389 390 void 391 zio_add_child(zio_t *pio, zio_t *cio) 392 { 393 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 394 395 /* 396 * Logical I/Os can have logical, gang, or vdev children. 397 * Gang I/Os can have gang or vdev children. 398 * Vdev I/Os can only have vdev children. 399 * The following ASSERT captures all of these constraints. 400 */ 401 ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 402 403 zl->zl_parent = pio; 404 zl->zl_child = cio; 405 406 mutex_enter(&cio->io_lock); 407 mutex_enter(&pio->io_lock); 408 409 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 410 411 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 412 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 413 414 list_insert_head(&pio->io_child_list, zl); 415 list_insert_head(&cio->io_parent_list, zl); 416 417 pio->io_child_count++; 418 cio->io_parent_count++; 419 420 mutex_exit(&pio->io_lock); 421 mutex_exit(&cio->io_lock); 422 } 423 424 static void 425 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 426 { 427 ASSERT(zl->zl_parent == pio); 428 ASSERT(zl->zl_child == cio); 429 430 mutex_enter(&cio->io_lock); 431 mutex_enter(&pio->io_lock); 432 433 list_remove(&pio->io_child_list, zl); 434 list_remove(&cio->io_parent_list, zl); 435 436 pio->io_child_count--; 437 cio->io_parent_count--; 438 439 mutex_exit(&pio->io_lock); 440 mutex_exit(&cio->io_lock); 441 442 kmem_cache_free(zio_link_cache, zl); 443 } 444 445 static boolean_t 446 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait) 447 { 448 boolean_t waiting = B_FALSE; 449 450 mutex_enter(&zio->io_lock); 451 ASSERT(zio->io_stall == NULL); 452 for (int c = 0; c < ZIO_CHILD_TYPES; c++) { 453 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c))) 454 continue; 455 456 uint64_t *countp = &zio->io_children[c][wait]; 457 if (*countp != 0) { 458 zio->io_stage >>= 1; 459 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN); 460 zio->io_stall = countp; 461 waiting = B_TRUE; 462 break; 463 } 464 } 465 mutex_exit(&zio->io_lock); 466 return (waiting); 467 } 468 469 static void 470 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 471 { 472 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 473 int *errorp = &pio->io_child_error[zio->io_child_type]; 474 475 mutex_enter(&pio->io_lock); 476 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 477 *errorp = zio_worst_error(*errorp, zio->io_error); 478 pio->io_reexecute |= zio->io_reexecute; 479 ASSERT3U(*countp, >, 0); 480 481 (*countp)--; 482 483 if (*countp == 0 && pio->io_stall == countp) { 484 zio_taskq_type_t type = 485 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE : 486 ZIO_TASKQ_INTERRUPT; 487 pio->io_stall = NULL; 488 mutex_exit(&pio->io_lock); 489 /* 490 * Dispatch the parent zio in its own taskq so that 491 * the child can continue to make progress. This also 492 * prevents overflowing the stack when we have deeply nested 493 * parent-child relationships. 494 */ 495 zio_taskq_dispatch(pio, type, B_FALSE); 496 } else { 497 mutex_exit(&pio->io_lock); 498 } 499 } 500 501 static void 502 zio_inherit_child_errors(zio_t *zio, enum zio_child c) 503 { 504 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 505 zio->io_error = zio->io_child_error[c]; 506 } 507 508 int 509 zio_bookmark_compare(const void *x1, const void *x2) 510 { 511 const zio_t *z1 = x1; 512 const zio_t *z2 = x2; 513 514 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset) 515 return (-1); 516 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset) 517 return (1); 518 519 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object) 520 return (-1); 521 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object) 522 return (1); 523 524 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level) 525 return (-1); 526 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level) 527 return (1); 528 529 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid) 530 return (-1); 531 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid) 532 return (1); 533 534 if (z1 < z2) 535 return (-1); 536 if (z1 > z2) 537 return (1); 538 539 return (0); 540 } 541 542 /* 543 * ========================================================================== 544 * Create the various types of I/O (read, write, free, etc) 545 * ========================================================================== 546 */ 547 static zio_t * 548 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 549 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done, 550 void *private, zio_type_t type, zio_priority_t priority, 551 enum zio_flag flags, vdev_t *vd, uint64_t offset, 552 const zbookmark_phys_t *zb, enum zio_stage stage, enum zio_stage pipeline) 553 { 554 zio_t *zio; 555 556 ASSERT3U(psize, <=, SPA_MAXBLOCKSIZE); 557 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0); 558 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 559 560 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 561 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 562 ASSERT(vd || stage == ZIO_STAGE_OPEN); 563 564 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW) != 0); 565 566 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 567 bzero(zio, sizeof (zio_t)); 568 569 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 570 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 571 572 list_create(&zio->io_parent_list, sizeof (zio_link_t), 573 offsetof(zio_link_t, zl_parent_node)); 574 list_create(&zio->io_child_list, sizeof (zio_link_t), 575 offsetof(zio_link_t, zl_child_node)); 576 metaslab_trace_init(&zio->io_alloc_list); 577 578 if (vd != NULL) 579 zio->io_child_type = ZIO_CHILD_VDEV; 580 else if (flags & ZIO_FLAG_GANG_CHILD) 581 zio->io_child_type = ZIO_CHILD_GANG; 582 else if (flags & ZIO_FLAG_DDT_CHILD) 583 zio->io_child_type = ZIO_CHILD_DDT; 584 else 585 zio->io_child_type = ZIO_CHILD_LOGICAL; 586 587 if (bp != NULL) { 588 zio->io_bp = (blkptr_t *)bp; 589 zio->io_bp_copy = *bp; 590 zio->io_bp_orig = *bp; 591 if (type != ZIO_TYPE_WRITE || 592 zio->io_child_type == ZIO_CHILD_DDT) 593 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 594 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 595 zio->io_logical = zio; 596 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 597 pipeline |= ZIO_GANG_STAGES; 598 } 599 600 zio->io_spa = spa; 601 zio->io_txg = txg; 602 zio->io_done = done; 603 zio->io_private = private; 604 zio->io_type = type; 605 zio->io_priority = priority; 606 zio->io_vd = vd; 607 zio->io_offset = offset; 608 zio->io_orig_abd = zio->io_abd = data; 609 zio->io_orig_size = zio->io_size = psize; 610 zio->io_lsize = lsize; 611 zio->io_orig_flags = zio->io_flags = flags; 612 zio->io_orig_stage = zio->io_stage = stage; 613 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 614 zio->io_pipeline_trace = ZIO_STAGE_OPEN; 615 616 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 617 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 618 619 if (zb != NULL) 620 zio->io_bookmark = *zb; 621 622 if (pio != NULL) { 623 if (zio->io_logical == NULL) 624 zio->io_logical = pio->io_logical; 625 if (zio->io_child_type == ZIO_CHILD_GANG) 626 zio->io_gang_leader = pio->io_gang_leader; 627 zio_add_child(pio, zio); 628 } 629 630 return (zio); 631 } 632 633 static void 634 zio_destroy(zio_t *zio) 635 { 636 metaslab_trace_fini(&zio->io_alloc_list); 637 list_destroy(&zio->io_parent_list); 638 list_destroy(&zio->io_child_list); 639 mutex_destroy(&zio->io_lock); 640 cv_destroy(&zio->io_cv); 641 kmem_cache_free(zio_cache, zio); 642 } 643 644 zio_t * 645 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 646 void *private, enum zio_flag flags) 647 { 648 zio_t *zio; 649 650 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 651 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 652 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 653 654 return (zio); 655 } 656 657 zio_t * 658 zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 659 { 660 return (zio_null(NULL, spa, NULL, done, private, flags)); 661 } 662 663 void 664 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp) 665 { 666 if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { 667 zfs_panic_recover("blkptr at %p has invalid TYPE %llu", 668 bp, (longlong_t)BP_GET_TYPE(bp)); 669 } 670 if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS || 671 BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) { 672 zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu", 673 bp, (longlong_t)BP_GET_CHECKSUM(bp)); 674 } 675 if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS || 676 BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) { 677 zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu", 678 bp, (longlong_t)BP_GET_COMPRESS(bp)); 679 } 680 if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { 681 zfs_panic_recover("blkptr at %p has invalid LSIZE %llu", 682 bp, (longlong_t)BP_GET_LSIZE(bp)); 683 } 684 if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { 685 zfs_panic_recover("blkptr at %p has invalid PSIZE %llu", 686 bp, (longlong_t)BP_GET_PSIZE(bp)); 687 } 688 689 if (BP_IS_EMBEDDED(bp)) { 690 if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) { 691 zfs_panic_recover("blkptr at %p has invalid ETYPE %llu", 692 bp, (longlong_t)BPE_GET_ETYPE(bp)); 693 } 694 } 695 696 /* 697 * Do not verify individual DVAs if the config is not trusted. This 698 * will be done once the zio is executed in vdev_mirror_map_alloc. 699 */ 700 if (!spa->spa_trust_config) 701 return; 702 703 /* 704 * Pool-specific checks. 705 * 706 * Note: it would be nice to verify that the blk_birth and 707 * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() 708 * allows the birth time of log blocks (and dmu_sync()-ed blocks 709 * that are in the log) to be arbitrarily large. 710 */ 711 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 712 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); 713 if (vdevid >= spa->spa_root_vdev->vdev_children) { 714 zfs_panic_recover("blkptr at %p DVA %u has invalid " 715 "VDEV %llu", 716 bp, i, (longlong_t)vdevid); 717 continue; 718 } 719 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 720 if (vd == NULL) { 721 zfs_panic_recover("blkptr at %p DVA %u has invalid " 722 "VDEV %llu", 723 bp, i, (longlong_t)vdevid); 724 continue; 725 } 726 if (vd->vdev_ops == &vdev_hole_ops) { 727 zfs_panic_recover("blkptr at %p DVA %u has hole " 728 "VDEV %llu", 729 bp, i, (longlong_t)vdevid); 730 continue; 731 } 732 if (vd->vdev_ops == &vdev_missing_ops) { 733 /* 734 * "missing" vdevs are valid during import, but we 735 * don't have their detailed info (e.g. asize), so 736 * we can't perform any more checks on them. 737 */ 738 continue; 739 } 740 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 741 uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]); 742 if (BP_IS_GANG(bp)) 743 asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 744 if (offset + asize > vd->vdev_asize) { 745 zfs_panic_recover("blkptr at %p DVA %u has invalid " 746 "OFFSET %llu", 747 bp, i, (longlong_t)offset); 748 } 749 } 750 } 751 752 boolean_t 753 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp) 754 { 755 uint64_t vdevid = DVA_GET_VDEV(dva); 756 757 if (vdevid >= spa->spa_root_vdev->vdev_children) 758 return (B_FALSE); 759 760 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 761 if (vd == NULL) 762 return (B_FALSE); 763 764 if (vd->vdev_ops == &vdev_hole_ops) 765 return (B_FALSE); 766 767 if (vd->vdev_ops == &vdev_missing_ops) { 768 return (B_FALSE); 769 } 770 771 uint64_t offset = DVA_GET_OFFSET(dva); 772 uint64_t asize = DVA_GET_ASIZE(dva); 773 774 if (BP_IS_GANG(bp)) 775 asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 776 if (offset + asize > vd->vdev_asize) 777 return (B_FALSE); 778 779 return (B_TRUE); 780 } 781 782 zio_t * 783 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 784 abd_t *data, uint64_t size, zio_done_func_t *done, void *private, 785 zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 786 { 787 zio_t *zio; 788 789 zfs_blkptr_verify(spa, bp); 790 791 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 792 data, size, size, done, private, 793 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 794 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 795 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 796 797 return (zio); 798 } 799 800 zio_t * 801 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 802 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp, 803 zio_done_func_t *ready, zio_done_func_t *children_ready, 804 zio_done_func_t *physdone, zio_done_func_t *done, 805 void *private, zio_priority_t priority, enum zio_flag flags, 806 const zbookmark_phys_t *zb) 807 { 808 zio_t *zio; 809 810 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 811 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 812 zp->zp_compress >= ZIO_COMPRESS_OFF && 813 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 814 DMU_OT_IS_VALID(zp->zp_type) && 815 zp->zp_level < 32 && 816 zp->zp_copies > 0 && 817 zp->zp_copies <= spa_max_replication(spa)); 818 819 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private, 820 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 821 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 822 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 823 824 zio->io_ready = ready; 825 zio->io_children_ready = children_ready; 826 zio->io_physdone = physdone; 827 zio->io_prop = *zp; 828 829 /* 830 * Data can be NULL if we are going to call zio_write_override() to 831 * provide the already-allocated BP. But we may need the data to 832 * verify a dedup hit (if requested). In this case, don't try to 833 * dedup (just take the already-allocated BP verbatim). 834 */ 835 if (data == NULL && zio->io_prop.zp_dedup_verify) { 836 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 837 } 838 839 return (zio); 840 } 841 842 zio_t * 843 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data, 844 uint64_t size, zio_done_func_t *done, void *private, 845 zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb) 846 { 847 zio_t *zio; 848 849 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private, 850 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb, 851 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 852 853 return (zio); 854 } 855 856 void 857 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) 858 { 859 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 860 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 861 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 862 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 863 864 /* 865 * We must reset the io_prop to match the values that existed 866 * when the bp was first written by dmu_sync() keeping in mind 867 * that nopwrite and dedup are mutually exclusive. 868 */ 869 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 870 zio->io_prop.zp_nopwrite = nopwrite; 871 zio->io_prop.zp_copies = copies; 872 zio->io_bp_override = bp; 873 } 874 875 void 876 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 877 { 878 879 zfs_blkptr_verify(spa, bp); 880 881 /* 882 * The check for EMBEDDED is a performance optimization. We 883 * process the free here (by ignoring it) rather than 884 * putting it on the list and then processing it in zio_free_sync(). 885 */ 886 if (BP_IS_EMBEDDED(bp)) 887 return; 888 metaslab_check_free(spa, bp); 889 890 /* 891 * Frees that are for the currently-syncing txg, are not going to be 892 * deferred, and which will not need to do a read (i.e. not GANG or 893 * DEDUP), can be processed immediately. Otherwise, put them on the 894 * in-memory list for later processing. 895 */ 896 if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp) || 897 txg != spa->spa_syncing_txg || 898 spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) { 899 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 900 } else { 901 VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0))); 902 } 903 } 904 905 zio_t * 906 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 907 enum zio_flag flags) 908 { 909 zio_t *zio; 910 enum zio_stage stage = ZIO_FREE_PIPELINE; 911 912 ASSERT(!BP_IS_HOLE(bp)); 913 ASSERT(spa_syncing_txg(spa) == txg); 914 ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); 915 916 if (BP_IS_EMBEDDED(bp)) 917 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 918 919 metaslab_check_free(spa, bp); 920 arc_freed(spa, bp); 921 922 /* 923 * GANG and DEDUP blocks can induce a read (for the gang block header, 924 * or the DDT), so issue them asynchronously so that this thread is 925 * not tied up. 926 */ 927 if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) 928 stage |= ZIO_STAGE_ISSUE_ASYNC; 929 930 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 931 BP_GET_PSIZE(bp), NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, 932 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage); 933 934 return (zio); 935 } 936 937 zio_t * 938 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 939 zio_done_func_t *done, void *private, enum zio_flag flags) 940 { 941 zio_t *zio; 942 943 zfs_blkptr_verify(spa, bp); 944 945 if (BP_IS_EMBEDDED(bp)) 946 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 947 948 /* 949 * A claim is an allocation of a specific block. Claims are needed 950 * to support immediate writes in the intent log. The issue is that 951 * immediate writes contain committed data, but in a txg that was 952 * *not* committed. Upon opening the pool after an unclean shutdown, 953 * the intent log claims all blocks that contain immediate write data 954 * so that the SPA knows they're in use. 955 * 956 * All claims *must* be resolved in the first txg -- before the SPA 957 * starts allocating blocks -- so that nothing is allocated twice. 958 * If txg == 0 we just verify that the block is claimable. 959 */ 960 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, 961 spa_min_claim_txg(spa)); 962 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0); 963 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 964 965 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 966 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 967 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 968 ASSERT0(zio->io_queued_timestamp); 969 970 return (zio); 971 } 972 973 zio_t * 974 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 975 zio_done_func_t *done, void *private, enum zio_flag flags) 976 { 977 zio_t *zio; 978 int c; 979 980 if (vd->vdev_children == 0) { 981 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 982 ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 983 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 984 985 zio->io_cmd = cmd; 986 } else { 987 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 988 989 for (c = 0; c < vd->vdev_children; c++) 990 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 991 done, private, flags)); 992 } 993 994 return (zio); 995 } 996 997 zio_t * 998 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 999 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1000 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 1001 { 1002 zio_t *zio; 1003 1004 ASSERT(vd->vdev_children == 0); 1005 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1006 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1007 ASSERT3U(offset + size, <=, vd->vdev_psize); 1008 1009 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1010 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1011 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 1012 1013 zio->io_prop.zp_checksum = checksum; 1014 1015 return (zio); 1016 } 1017 1018 zio_t * 1019 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1020 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1021 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 1022 { 1023 zio_t *zio; 1024 1025 ASSERT(vd->vdev_children == 0); 1026 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1027 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1028 ASSERT3U(offset + size, <=, vd->vdev_psize); 1029 1030 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1031 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1032 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 1033 1034 zio->io_prop.zp_checksum = checksum; 1035 1036 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) { 1037 /* 1038 * zec checksums are necessarily destructive -- they modify 1039 * the end of the write buffer to hold the verifier/checksum. 1040 * Therefore, we must make a local copy in case the data is 1041 * being written to multiple places in parallel. 1042 */ 1043 abd_t *wbuf = abd_alloc_sametype(data, size); 1044 abd_copy(wbuf, data, size); 1045 1046 zio_push_transform(zio, wbuf, size, size, NULL); 1047 } 1048 1049 return (zio); 1050 } 1051 1052 /* 1053 * Create a child I/O to do some work for us. 1054 */ 1055 zio_t * 1056 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 1057 abd_t *data, uint64_t size, int type, zio_priority_t priority, 1058 enum zio_flag flags, zio_done_func_t *done, void *private) 1059 { 1060 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 1061 zio_t *zio; 1062 1063 /* 1064 * vdev child I/Os do not propagate their error to the parent. 1065 * Therefore, for correct operation the caller *must* check for 1066 * and handle the error in the child i/o's done callback. 1067 * The only exceptions are i/os that we don't care about 1068 * (OPTIONAL or REPAIR). 1069 */ 1070 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) || 1071 done != NULL); 1072 1073 /* 1074 * In the common case, where the parent zio was to a normal vdev, 1075 * the child zio must be to a child vdev of that vdev. Otherwise, 1076 * the child zio must be to a top-level vdev. 1077 */ 1078 if (pio->io_vd != NULL && pio->io_vd->vdev_ops != &vdev_indirect_ops) { 1079 ASSERT3P(vd->vdev_parent, ==, pio->io_vd); 1080 } else { 1081 ASSERT3P(vd, ==, vd->vdev_top); 1082 } 1083 1084 if (type == ZIO_TYPE_READ && bp != NULL) { 1085 /* 1086 * If we have the bp, then the child should perform the 1087 * checksum and the parent need not. This pushes error 1088 * detection as close to the leaves as possible and 1089 * eliminates redundant checksums in the interior nodes. 1090 */ 1091 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 1092 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 1093 } 1094 1095 if (vd->vdev_ops->vdev_op_leaf) { 1096 ASSERT0(vd->vdev_children); 1097 offset += VDEV_LABEL_START_SIZE; 1098 } 1099 1100 flags |= ZIO_VDEV_CHILD_FLAGS(pio); 1101 1102 /* 1103 * If we've decided to do a repair, the write is not speculative -- 1104 * even if the original read was. 1105 */ 1106 if (flags & ZIO_FLAG_IO_REPAIR) 1107 flags &= ~ZIO_FLAG_SPECULATIVE; 1108 1109 /* 1110 * If we're creating a child I/O that is not associated with a 1111 * top-level vdev, then the child zio is not an allocating I/O. 1112 * If this is a retried I/O then we ignore it since we will 1113 * have already processed the original allocating I/O. 1114 */ 1115 if (flags & ZIO_FLAG_IO_ALLOCATING && 1116 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { 1117 metaslab_class_t *mc = spa_normal_class(pio->io_spa); 1118 1119 ASSERT(mc->mc_alloc_throttle_enabled); 1120 ASSERT(type == ZIO_TYPE_WRITE); 1121 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE); 1122 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR)); 1123 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || 1124 pio->io_child_type == ZIO_CHILD_GANG); 1125 1126 flags &= ~ZIO_FLAG_IO_ALLOCATING; 1127 } 1128 1129 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size, 1130 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 1131 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 1132 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 1133 1134 zio->io_physdone = pio->io_physdone; 1135 if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) 1136 zio->io_logical->io_phys_children++; 1137 1138 return (zio); 1139 } 1140 1141 zio_t * 1142 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size, 1143 int type, zio_priority_t priority, enum zio_flag flags, 1144 zio_done_func_t *done, void *private) 1145 { 1146 zio_t *zio; 1147 1148 ASSERT(vd->vdev_ops->vdev_op_leaf); 1149 1150 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 1151 data, size, size, done, private, type, priority, 1152 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 1153 vd, offset, NULL, 1154 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 1155 1156 return (zio); 1157 } 1158 1159 void 1160 zio_flush(zio_t *zio, vdev_t *vd) 1161 { 1162 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 1163 NULL, NULL, 1164 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 1165 } 1166 1167 void 1168 zio_shrink(zio_t *zio, uint64_t size) 1169 { 1170 ASSERT3P(zio->io_executor, ==, NULL); 1171 ASSERT3P(zio->io_orig_size, ==, zio->io_size); 1172 ASSERT3U(size, <=, zio->io_size); 1173 1174 /* 1175 * We don't shrink for raidz because of problems with the 1176 * reconstruction when reading back less than the block size. 1177 * Note, BP_IS_RAIDZ() assumes no compression. 1178 */ 1179 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1180 if (!BP_IS_RAIDZ(zio->io_bp)) { 1181 /* we are not doing a raw write */ 1182 ASSERT3U(zio->io_size, ==, zio->io_lsize); 1183 zio->io_orig_size = zio->io_size = zio->io_lsize = size; 1184 } 1185 } 1186 1187 /* 1188 * ========================================================================== 1189 * Prepare to read and write logical blocks 1190 * ========================================================================== 1191 */ 1192 1193 static int 1194 zio_read_bp_init(zio_t *zio) 1195 { 1196 blkptr_t *bp = zio->io_bp; 1197 1198 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1199 1200 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1201 zio->io_child_type == ZIO_CHILD_LOGICAL && 1202 !(zio->io_flags & ZIO_FLAG_RAW)) { 1203 uint64_t psize = 1204 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1205 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1206 psize, psize, zio_decompress); 1207 } 1208 1209 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1210 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1211 1212 int psize = BPE_GET_PSIZE(bp); 1213 void *data = abd_borrow_buf(zio->io_abd, psize); 1214 decode_embedded_bp_compressed(bp, data); 1215 abd_return_buf_copy(zio->io_abd, data, psize); 1216 } else { 1217 ASSERT(!BP_IS_EMBEDDED(bp)); 1218 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1219 } 1220 1221 if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 1222 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1223 1224 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 1225 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1226 1227 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1228 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1229 1230 return (ZIO_PIPELINE_CONTINUE); 1231 } 1232 1233 static int 1234 zio_write_bp_init(zio_t *zio) 1235 { 1236 if (!IO_IS_ALLOCATING(zio)) 1237 return (ZIO_PIPELINE_CONTINUE); 1238 1239 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1240 1241 if (zio->io_bp_override) { 1242 blkptr_t *bp = zio->io_bp; 1243 zio_prop_t *zp = &zio->io_prop; 1244 1245 ASSERT(bp->blk_birth != zio->io_txg); 1246 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 1247 1248 *bp = *zio->io_bp_override; 1249 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1250 1251 if (BP_IS_EMBEDDED(bp)) 1252 return (ZIO_PIPELINE_CONTINUE); 1253 1254 /* 1255 * If we've been overridden and nopwrite is set then 1256 * set the flag accordingly to indicate that a nopwrite 1257 * has already occurred. 1258 */ 1259 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 1260 ASSERT(!zp->zp_dedup); 1261 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum); 1262 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1263 return (ZIO_PIPELINE_CONTINUE); 1264 } 1265 1266 ASSERT(!zp->zp_nopwrite); 1267 1268 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1269 return (ZIO_PIPELINE_CONTINUE); 1270 1271 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags & 1272 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify); 1273 1274 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { 1275 BP_SET_DEDUP(bp, 1); 1276 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1277 return (ZIO_PIPELINE_CONTINUE); 1278 } 1279 1280 /* 1281 * We were unable to handle this as an override bp, treat 1282 * it as a regular write I/O. 1283 */ 1284 zio->io_bp_override = NULL; 1285 *bp = zio->io_bp_orig; 1286 zio->io_pipeline = zio->io_orig_pipeline; 1287 } 1288 1289 return (ZIO_PIPELINE_CONTINUE); 1290 } 1291 1292 static int 1293 zio_write_compress(zio_t *zio) 1294 { 1295 spa_t *spa = zio->io_spa; 1296 zio_prop_t *zp = &zio->io_prop; 1297 enum zio_compress compress = zp->zp_compress; 1298 blkptr_t *bp = zio->io_bp; 1299 uint64_t lsize = zio->io_lsize; 1300 uint64_t psize = zio->io_size; 1301 int pass = 1; 1302 1303 EQUIV(lsize != psize, (zio->io_flags & ZIO_FLAG_RAW) != 0); 1304 1305 /* 1306 * If our children haven't all reached the ready stage, 1307 * wait for them and then repeat this pipeline stage. 1308 */ 1309 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | 1310 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) { 1311 return (ZIO_PIPELINE_STOP); 1312 } 1313 1314 if (!IO_IS_ALLOCATING(zio)) 1315 return (ZIO_PIPELINE_CONTINUE); 1316 1317 if (zio->io_children_ready != NULL) { 1318 /* 1319 * Now that all our children are ready, run the callback 1320 * associated with this zio in case it wants to modify the 1321 * data to be written. 1322 */ 1323 ASSERT3U(zp->zp_level, >, 0); 1324 zio->io_children_ready(zio); 1325 } 1326 1327 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1328 ASSERT(zio->io_bp_override == NULL); 1329 1330 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { 1331 /* 1332 * We're rewriting an existing block, which means we're 1333 * working on behalf of spa_sync(). For spa_sync() to 1334 * converge, it must eventually be the case that we don't 1335 * have to allocate new blocks. But compression changes 1336 * the blocksize, which forces a reallocate, and makes 1337 * convergence take longer. Therefore, after the first 1338 * few passes, stop compressing to ensure convergence. 1339 */ 1340 pass = spa_sync_pass(spa); 1341 1342 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1343 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1344 ASSERT(!BP_GET_DEDUP(bp)); 1345 1346 if (pass >= zfs_sync_pass_dont_compress) 1347 compress = ZIO_COMPRESS_OFF; 1348 1349 /* Make sure someone doesn't change their mind on overwrites */ 1350 ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), 1351 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1352 } 1353 1354 /* If it's a compressed write that is not raw, compress the buffer. */ 1355 if (compress != ZIO_COMPRESS_OFF && psize == lsize) { 1356 void *cbuf = zio_buf_alloc(lsize); 1357 psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize); 1358 if (psize == 0 || psize == lsize) { 1359 compress = ZIO_COMPRESS_OFF; 1360 zio_buf_free(cbuf, lsize); 1361 } else if (!zp->zp_dedup && psize <= BPE_PAYLOAD_SIZE && 1362 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 1363 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 1364 encode_embedded_bp_compressed(bp, 1365 cbuf, compress, lsize, psize); 1366 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 1367 BP_SET_TYPE(bp, zio->io_prop.zp_type); 1368 BP_SET_LEVEL(bp, zio->io_prop.zp_level); 1369 zio_buf_free(cbuf, lsize); 1370 bp->blk_birth = zio->io_txg; 1371 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1372 ASSERT(spa_feature_is_active(spa, 1373 SPA_FEATURE_EMBEDDED_DATA)); 1374 return (ZIO_PIPELINE_CONTINUE); 1375 } else { 1376 /* 1377 * Round up compressed size up to the ashift 1378 * of the smallest-ashift device, and zero the tail. 1379 * This ensures that the compressed size of the BP 1380 * (and thus compressratio property) are correct, 1381 * in that we charge for the padding used to fill out 1382 * the last sector. 1383 */ 1384 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 1385 size_t rounded = (size_t)P2ROUNDUP(psize, 1386 1ULL << spa->spa_min_ashift); 1387 if (rounded >= lsize) { 1388 compress = ZIO_COMPRESS_OFF; 1389 zio_buf_free(cbuf, lsize); 1390 psize = lsize; 1391 } else { 1392 abd_t *cdata = abd_get_from_buf(cbuf, lsize); 1393 abd_take_ownership_of_buf(cdata, B_TRUE); 1394 abd_zero_off(cdata, psize, rounded - psize); 1395 psize = rounded; 1396 zio_push_transform(zio, cdata, 1397 psize, lsize, NULL); 1398 } 1399 } 1400 1401 /* 1402 * We were unable to handle this as an override bp, treat 1403 * it as a regular write I/O. 1404 */ 1405 zio->io_bp_override = NULL; 1406 *bp = zio->io_bp_orig; 1407 zio->io_pipeline = zio->io_orig_pipeline; 1408 } else { 1409 ASSERT3U(psize, !=, 0); 1410 } 1411 1412 /* 1413 * The final pass of spa_sync() must be all rewrites, but the first 1414 * few passes offer a trade-off: allocating blocks defers convergence, 1415 * but newly allocated blocks are sequential, so they can be written 1416 * to disk faster. Therefore, we allow the first few passes of 1417 * spa_sync() to allocate new blocks, but force rewrites after that. 1418 * There should only be a handful of blocks after pass 1 in any case. 1419 */ 1420 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && 1421 BP_GET_PSIZE(bp) == psize && 1422 pass >= zfs_sync_pass_rewrite) { 1423 ASSERT(psize != 0); 1424 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1425 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1426 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1427 } else { 1428 BP_ZERO(bp); 1429 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1430 } 1431 1432 if (psize == 0) { 1433 if (zio->io_bp_orig.blk_birth != 0 && 1434 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 1435 BP_SET_LSIZE(bp, lsize); 1436 BP_SET_TYPE(bp, zp->zp_type); 1437 BP_SET_LEVEL(bp, zp->zp_level); 1438 BP_SET_BIRTH(bp, zio->io_txg, 0); 1439 } 1440 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1441 } else { 1442 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1443 BP_SET_LSIZE(bp, lsize); 1444 BP_SET_TYPE(bp, zp->zp_type); 1445 BP_SET_LEVEL(bp, zp->zp_level); 1446 BP_SET_PSIZE(bp, psize); 1447 BP_SET_COMPRESS(bp, compress); 1448 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1449 BP_SET_DEDUP(bp, zp->zp_dedup); 1450 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1451 if (zp->zp_dedup) { 1452 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1453 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1454 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1455 } 1456 if (zp->zp_nopwrite) { 1457 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1458 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1459 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 1460 } 1461 } 1462 return (ZIO_PIPELINE_CONTINUE); 1463 } 1464 1465 static int 1466 zio_free_bp_init(zio_t *zio) 1467 { 1468 blkptr_t *bp = zio->io_bp; 1469 1470 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1471 if (BP_GET_DEDUP(bp)) 1472 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1473 } 1474 1475 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1476 1477 return (ZIO_PIPELINE_CONTINUE); 1478 } 1479 1480 /* 1481 * ========================================================================== 1482 * Execute the I/O pipeline 1483 * ========================================================================== 1484 */ 1485 1486 static void 1487 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 1488 { 1489 spa_t *spa = zio->io_spa; 1490 zio_type_t t = zio->io_type; 1491 int flags = (cutinline ? TQ_FRONT : 0); 1492 1493 /* 1494 * If we're a config writer or a probe, the normal issue and 1495 * interrupt threads may all be blocked waiting for the config lock. 1496 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1497 */ 1498 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1499 t = ZIO_TYPE_NULL; 1500 1501 /* 1502 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1503 */ 1504 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1505 t = ZIO_TYPE_NULL; 1506 1507 /* 1508 * If this is a high priority I/O, then use the high priority taskq if 1509 * available. 1510 */ 1511 if (zio->io_priority == ZIO_PRIORITY_NOW && 1512 spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 1513 q++; 1514 1515 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1516 1517 /* 1518 * NB: We are assuming that the zio can only be dispatched 1519 * to a single taskq at a time. It would be a grievous error 1520 * to dispatch the zio to another taskq at the same time. 1521 */ 1522 ASSERT(zio->io_tqent.tqent_next == NULL); 1523 spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, 1524 flags, &zio->io_tqent); 1525 } 1526 1527 static boolean_t 1528 zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 1529 { 1530 kthread_t *executor = zio->io_executor; 1531 spa_t *spa = zio->io_spa; 1532 1533 for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 1534 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1535 uint_t i; 1536 for (i = 0; i < tqs->stqs_count; i++) { 1537 if (taskq_member(tqs->stqs_taskq[i], executor)) 1538 return (B_TRUE); 1539 } 1540 } 1541 1542 return (B_FALSE); 1543 } 1544 1545 static int 1546 zio_issue_async(zio_t *zio) 1547 { 1548 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1549 1550 return (ZIO_PIPELINE_STOP); 1551 } 1552 1553 void 1554 zio_interrupt(zio_t *zio) 1555 { 1556 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1557 } 1558 1559 void 1560 zio_delay_interrupt(zio_t *zio) 1561 { 1562 /* 1563 * The timeout_generic() function isn't defined in userspace, so 1564 * rather than trying to implement the function, the zio delay 1565 * functionality has been disabled for userspace builds. 1566 */ 1567 1568 #ifdef _KERNEL 1569 /* 1570 * If io_target_timestamp is zero, then no delay has been registered 1571 * for this IO, thus jump to the end of this function and "skip" the 1572 * delay; issuing it directly to the zio layer. 1573 */ 1574 if (zio->io_target_timestamp != 0) { 1575 hrtime_t now = gethrtime(); 1576 1577 if (now >= zio->io_target_timestamp) { 1578 /* 1579 * This IO has already taken longer than the target 1580 * delay to complete, so we don't want to delay it 1581 * any longer; we "miss" the delay and issue it 1582 * directly to the zio layer. This is likely due to 1583 * the target latency being set to a value less than 1584 * the underlying hardware can satisfy (e.g. delay 1585 * set to 1ms, but the disks take 10ms to complete an 1586 * IO request). 1587 */ 1588 1589 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio, 1590 hrtime_t, now); 1591 1592 zio_interrupt(zio); 1593 } else { 1594 hrtime_t diff = zio->io_target_timestamp - now; 1595 1596 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio, 1597 hrtime_t, now, hrtime_t, diff); 1598 1599 (void) timeout_generic(CALLOUT_NORMAL, 1600 (void (*)(void *))zio_interrupt, zio, diff, 1, 0); 1601 } 1602 1603 return; 1604 } 1605 #endif 1606 1607 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio); 1608 zio_interrupt(zio); 1609 } 1610 1611 /* 1612 * Execute the I/O pipeline until one of the following occurs: 1613 * 1614 * (1) the I/O completes 1615 * (2) the pipeline stalls waiting for dependent child I/Os 1616 * (3) the I/O issues, so we're waiting for an I/O completion interrupt 1617 * (4) the I/O is delegated by vdev-level caching or aggregation 1618 * (5) the I/O is deferred due to vdev-level queueing 1619 * (6) the I/O is handed off to another thread. 1620 * 1621 * In all cases, the pipeline stops whenever there's no CPU work; it never 1622 * burns a thread in cv_wait(). 1623 * 1624 * There's no locking on io_stage because there's no legitimate way 1625 * for multiple threads to be attempting to process the same I/O. 1626 */ 1627 static zio_pipe_stage_t *zio_pipeline[]; 1628 1629 void 1630 zio_execute(zio_t *zio) 1631 { 1632 zio->io_executor = curthread; 1633 1634 ASSERT3U(zio->io_queued_timestamp, >, 0); 1635 1636 while (zio->io_stage < ZIO_STAGE_DONE) { 1637 enum zio_stage pipeline = zio->io_pipeline; 1638 enum zio_stage stage = zio->io_stage; 1639 int rv; 1640 1641 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1642 ASSERT(ISP2(stage)); 1643 ASSERT(zio->io_stall == NULL); 1644 1645 do { 1646 stage <<= 1; 1647 } while ((stage & pipeline) == 0); 1648 1649 ASSERT(stage <= ZIO_STAGE_DONE); 1650 1651 /* 1652 * If we are in interrupt context and this pipeline stage 1653 * will grab a config lock that is held across I/O, 1654 * or may wait for an I/O that needs an interrupt thread 1655 * to complete, issue async to avoid deadlock. 1656 * 1657 * For VDEV_IO_START, we cut in line so that the io will 1658 * be sent to disk promptly. 1659 */ 1660 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1661 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1662 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 1663 zio_requeue_io_start_cut_in_line : B_FALSE; 1664 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1665 return; 1666 } 1667 1668 zio->io_stage = stage; 1669 zio->io_pipeline_trace |= zio->io_stage; 1670 rv = zio_pipeline[highbit64(stage) - 1](zio); 1671 1672 if (rv == ZIO_PIPELINE_STOP) 1673 return; 1674 1675 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1676 } 1677 } 1678 1679 /* 1680 * ========================================================================== 1681 * Initiate I/O, either sync or async 1682 * ========================================================================== 1683 */ 1684 int 1685 zio_wait(zio_t *zio) 1686 { 1687 int error; 1688 1689 ASSERT3P(zio->io_stage, ==, ZIO_STAGE_OPEN); 1690 ASSERT3P(zio->io_executor, ==, NULL); 1691 1692 zio->io_waiter = curthread; 1693 ASSERT0(zio->io_queued_timestamp); 1694 zio->io_queued_timestamp = gethrtime(); 1695 1696 zio_execute(zio); 1697 1698 mutex_enter(&zio->io_lock); 1699 while (zio->io_executor != NULL) 1700 cv_wait(&zio->io_cv, &zio->io_lock); 1701 mutex_exit(&zio->io_lock); 1702 1703 error = zio->io_error; 1704 zio_destroy(zio); 1705 1706 return (error); 1707 } 1708 1709 void 1710 zio_nowait(zio_t *zio) 1711 { 1712 ASSERT3P(zio->io_executor, ==, NULL); 1713 1714 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1715 zio_unique_parent(zio) == NULL) { 1716 /* 1717 * This is a logical async I/O with no parent to wait for it. 1718 * We add it to the spa_async_root_zio "Godfather" I/O which 1719 * will ensure they complete prior to unloading the pool. 1720 */ 1721 spa_t *spa = zio->io_spa; 1722 1723 zio_add_child(spa->spa_async_zio_root[CPU_SEQID], zio); 1724 } 1725 1726 ASSERT0(zio->io_queued_timestamp); 1727 zio->io_queued_timestamp = gethrtime(); 1728 zio_execute(zio); 1729 } 1730 1731 /* 1732 * ========================================================================== 1733 * Reexecute, cancel, or suspend/resume failed I/O 1734 * ========================================================================== 1735 */ 1736 1737 static void 1738 zio_reexecute(zio_t *pio) 1739 { 1740 zio_t *cio, *cio_next; 1741 1742 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1743 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1744 ASSERT(pio->io_gang_leader == NULL); 1745 ASSERT(pio->io_gang_tree == NULL); 1746 1747 pio->io_flags = pio->io_orig_flags; 1748 pio->io_stage = pio->io_orig_stage; 1749 pio->io_pipeline = pio->io_orig_pipeline; 1750 pio->io_reexecute = 0; 1751 pio->io_flags |= ZIO_FLAG_REEXECUTED; 1752 pio->io_pipeline_trace = 0; 1753 pio->io_error = 0; 1754 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1755 pio->io_state[w] = 0; 1756 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1757 pio->io_child_error[c] = 0; 1758 1759 if (IO_IS_ALLOCATING(pio)) 1760 BP_ZERO(pio->io_bp); 1761 1762 /* 1763 * As we reexecute pio's children, new children could be created. 1764 * New children go to the head of pio's io_child_list, however, 1765 * so we will (correctly) not reexecute them. The key is that 1766 * the remainder of pio's io_child_list, from 'cio_next' onward, 1767 * cannot be affected by any side effects of reexecuting 'cio'. 1768 */ 1769 zio_link_t *zl = NULL; 1770 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 1771 cio_next = zio_walk_children(pio, &zl); 1772 mutex_enter(&pio->io_lock); 1773 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1774 pio->io_children[cio->io_child_type][w]++; 1775 mutex_exit(&pio->io_lock); 1776 zio_reexecute(cio); 1777 } 1778 1779 /* 1780 * Now that all children have been reexecuted, execute the parent. 1781 * We don't reexecute "The Godfather" I/O here as it's the 1782 * responsibility of the caller to wait on it. 1783 */ 1784 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) { 1785 pio->io_queued_timestamp = gethrtime(); 1786 zio_execute(pio); 1787 } 1788 } 1789 1790 void 1791 zio_suspend(spa_t *spa, zio_t *zio) 1792 { 1793 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1794 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1795 "failure and the failure mode property for this pool " 1796 "is set to panic.", spa_name(spa)); 1797 1798 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1799 1800 mutex_enter(&spa->spa_suspend_lock); 1801 1802 if (spa->spa_suspend_zio_root == NULL) 1803 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1804 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1805 ZIO_FLAG_GODFATHER); 1806 1807 spa->spa_suspended = B_TRUE; 1808 1809 if (zio != NULL) { 1810 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1811 ASSERT(zio != spa->spa_suspend_zio_root); 1812 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1813 ASSERT(zio_unique_parent(zio) == NULL); 1814 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1815 zio_add_child(spa->spa_suspend_zio_root, zio); 1816 } 1817 1818 mutex_exit(&spa->spa_suspend_lock); 1819 } 1820 1821 int 1822 zio_resume(spa_t *spa) 1823 { 1824 zio_t *pio; 1825 1826 /* 1827 * Reexecute all previously suspended i/o. 1828 */ 1829 mutex_enter(&spa->spa_suspend_lock); 1830 spa->spa_suspended = B_FALSE; 1831 cv_broadcast(&spa->spa_suspend_cv); 1832 pio = spa->spa_suspend_zio_root; 1833 spa->spa_suspend_zio_root = NULL; 1834 mutex_exit(&spa->spa_suspend_lock); 1835 1836 if (pio == NULL) 1837 return (0); 1838 1839 zio_reexecute(pio); 1840 return (zio_wait(pio)); 1841 } 1842 1843 void 1844 zio_resume_wait(spa_t *spa) 1845 { 1846 mutex_enter(&spa->spa_suspend_lock); 1847 while (spa_suspended(spa)) 1848 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1849 mutex_exit(&spa->spa_suspend_lock); 1850 } 1851 1852 /* 1853 * ========================================================================== 1854 * Gang blocks. 1855 * 1856 * A gang block is a collection of small blocks that looks to the DMU 1857 * like one large block. When zio_dva_allocate() cannot find a block 1858 * of the requested size, due to either severe fragmentation or the pool 1859 * being nearly full, it calls zio_write_gang_block() to construct the 1860 * block from smaller fragments. 1861 * 1862 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1863 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1864 * an indirect block: it's an array of block pointers. It consumes 1865 * only one sector and hence is allocatable regardless of fragmentation. 1866 * The gang header's bps point to its gang members, which hold the data. 1867 * 1868 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1869 * as the verifier to ensure uniqueness of the SHA256 checksum. 1870 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1871 * not the gang header. This ensures that data block signatures (needed for 1872 * deduplication) are independent of how the block is physically stored. 1873 * 1874 * Gang blocks can be nested: a gang member may itself be a gang block. 1875 * Thus every gang block is a tree in which root and all interior nodes are 1876 * gang headers, and the leaves are normal blocks that contain user data. 1877 * The root of the gang tree is called the gang leader. 1878 * 1879 * To perform any operation (read, rewrite, free, claim) on a gang block, 1880 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1881 * in the io_gang_tree field of the original logical i/o by recursively 1882 * reading the gang leader and all gang headers below it. This yields 1883 * an in-core tree containing the contents of every gang header and the 1884 * bps for every constituent of the gang block. 1885 * 1886 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1887 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1888 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1889 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1890 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1891 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1892 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1893 * of the gang header plus zio_checksum_compute() of the data to update the 1894 * gang header's blk_cksum as described above. 1895 * 1896 * The two-phase assemble/issue model solves the problem of partial failure -- 1897 * what if you'd freed part of a gang block but then couldn't read the 1898 * gang header for another part? Assembling the entire gang tree first 1899 * ensures that all the necessary gang header I/O has succeeded before 1900 * starting the actual work of free, claim, or write. Once the gang tree 1901 * is assembled, free and claim are in-memory operations that cannot fail. 1902 * 1903 * In the event that a gang write fails, zio_dva_unallocate() walks the 1904 * gang tree to immediately free (i.e. insert back into the space map) 1905 * everything we've allocated. This ensures that we don't get ENOSPC 1906 * errors during repeated suspend/resume cycles due to a flaky device. 1907 * 1908 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1909 * the gang tree, we won't modify the block, so we can safely defer the free 1910 * (knowing that the block is still intact). If we *can* assemble the gang 1911 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1912 * each constituent bp and we can allocate a new block on the next sync pass. 1913 * 1914 * In all cases, the gang tree allows complete recovery from partial failure. 1915 * ========================================================================== 1916 */ 1917 1918 static void 1919 zio_gang_issue_func_done(zio_t *zio) 1920 { 1921 abd_put(zio->io_abd); 1922 } 1923 1924 static zio_t * 1925 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1926 uint64_t offset) 1927 { 1928 if (gn != NULL) 1929 return (pio); 1930 1931 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset), 1932 BP_GET_PSIZE(bp), zio_gang_issue_func_done, 1933 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1934 &pio->io_bookmark)); 1935 } 1936 1937 static zio_t * 1938 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1939 uint64_t offset) 1940 { 1941 zio_t *zio; 1942 1943 if (gn != NULL) { 1944 abd_t *gbh_abd = 1945 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1946 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1947 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL, 1948 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1949 &pio->io_bookmark); 1950 /* 1951 * As we rewrite each gang header, the pipeline will compute 1952 * a new gang block header checksum for it; but no one will 1953 * compute a new data checksum, so we do that here. The one 1954 * exception is the gang leader: the pipeline already computed 1955 * its data checksum because that stage precedes gang assembly. 1956 * (Presently, nothing actually uses interior data checksums; 1957 * this is just good hygiene.) 1958 */ 1959 if (gn != pio->io_gang_leader->io_gang_tree) { 1960 abd_t *buf = abd_get_offset(data, offset); 1961 1962 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1963 buf, BP_GET_PSIZE(bp)); 1964 1965 abd_put(buf); 1966 } 1967 /* 1968 * If we are here to damage data for testing purposes, 1969 * leave the GBH alone so that we can detect the damage. 1970 */ 1971 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 1972 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 1973 } else { 1974 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1975 abd_get_offset(data, offset), BP_GET_PSIZE(bp), 1976 zio_gang_issue_func_done, NULL, pio->io_priority, 1977 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1978 } 1979 1980 return (zio); 1981 } 1982 1983 /* ARGSUSED */ 1984 static zio_t * 1985 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1986 uint64_t offset) 1987 { 1988 return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 1989 ZIO_GANG_CHILD_FLAGS(pio))); 1990 } 1991 1992 /* ARGSUSED */ 1993 static zio_t * 1994 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1995 uint64_t offset) 1996 { 1997 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1998 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1999 } 2000 2001 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 2002 NULL, 2003 zio_read_gang, 2004 zio_rewrite_gang, 2005 zio_free_gang, 2006 zio_claim_gang, 2007 NULL 2008 }; 2009 2010 static void zio_gang_tree_assemble_done(zio_t *zio); 2011 2012 static zio_gang_node_t * 2013 zio_gang_node_alloc(zio_gang_node_t **gnpp) 2014 { 2015 zio_gang_node_t *gn; 2016 2017 ASSERT(*gnpp == NULL); 2018 2019 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 2020 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 2021 *gnpp = gn; 2022 2023 return (gn); 2024 } 2025 2026 static void 2027 zio_gang_node_free(zio_gang_node_t **gnpp) 2028 { 2029 zio_gang_node_t *gn = *gnpp; 2030 2031 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2032 ASSERT(gn->gn_child[g] == NULL); 2033 2034 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2035 kmem_free(gn, sizeof (*gn)); 2036 *gnpp = NULL; 2037 } 2038 2039 static void 2040 zio_gang_tree_free(zio_gang_node_t **gnpp) 2041 { 2042 zio_gang_node_t *gn = *gnpp; 2043 2044 if (gn == NULL) 2045 return; 2046 2047 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2048 zio_gang_tree_free(&gn->gn_child[g]); 2049 2050 zio_gang_node_free(gnpp); 2051 } 2052 2053 static void 2054 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 2055 { 2056 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 2057 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2058 2059 ASSERT(gio->io_gang_leader == gio); 2060 ASSERT(BP_IS_GANG(bp)); 2061 2062 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2063 zio_gang_tree_assemble_done, gn, gio->io_priority, 2064 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 2065 } 2066 2067 static void 2068 zio_gang_tree_assemble_done(zio_t *zio) 2069 { 2070 zio_t *gio = zio->io_gang_leader; 2071 zio_gang_node_t *gn = zio->io_private; 2072 blkptr_t *bp = zio->io_bp; 2073 2074 ASSERT(gio == zio_unique_parent(zio)); 2075 ASSERT(zio->io_child_count == 0); 2076 2077 if (zio->io_error) 2078 return; 2079 2080 /* this ABD was created from a linear buf in zio_gang_tree_assemble */ 2081 if (BP_SHOULD_BYTESWAP(bp)) 2082 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size); 2083 2084 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh); 2085 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 2086 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2087 2088 abd_put(zio->io_abd); 2089 2090 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2091 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2092 if (!BP_IS_GANG(gbp)) 2093 continue; 2094 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 2095 } 2096 } 2097 2098 static void 2099 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data, 2100 uint64_t offset) 2101 { 2102 zio_t *gio = pio->io_gang_leader; 2103 zio_t *zio; 2104 2105 ASSERT(BP_IS_GANG(bp) == !!gn); 2106 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 2107 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 2108 2109 /* 2110 * If you're a gang header, your data is in gn->gn_gbh. 2111 * If you're a gang member, your data is in 'data' and gn == NULL. 2112 */ 2113 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset); 2114 2115 if (gn != NULL) { 2116 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2117 2118 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2119 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2120 if (BP_IS_HOLE(gbp)) 2121 continue; 2122 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data, 2123 offset); 2124 offset += BP_GET_PSIZE(gbp); 2125 } 2126 } 2127 2128 if (gn == gio->io_gang_tree) 2129 ASSERT3U(gio->io_size, ==, offset); 2130 2131 if (zio != pio) 2132 zio_nowait(zio); 2133 } 2134 2135 static int 2136 zio_gang_assemble(zio_t *zio) 2137 { 2138 blkptr_t *bp = zio->io_bp; 2139 2140 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 2141 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2142 2143 zio->io_gang_leader = zio; 2144 2145 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 2146 2147 return (ZIO_PIPELINE_CONTINUE); 2148 } 2149 2150 static int 2151 zio_gang_issue(zio_t *zio) 2152 { 2153 blkptr_t *bp = zio->io_bp; 2154 2155 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) { 2156 return (ZIO_PIPELINE_STOP); 2157 } 2158 2159 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 2160 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2161 2162 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 2163 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd, 2164 0); 2165 else 2166 zio_gang_tree_free(&zio->io_gang_tree); 2167 2168 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2169 2170 return (ZIO_PIPELINE_CONTINUE); 2171 } 2172 2173 static void 2174 zio_write_gang_member_ready(zio_t *zio) 2175 { 2176 zio_t *pio = zio_unique_parent(zio); 2177 zio_t *gio = zio->io_gang_leader; 2178 dva_t *cdva = zio->io_bp->blk_dva; 2179 dva_t *pdva = pio->io_bp->blk_dva; 2180 uint64_t asize; 2181 2182 if (BP_IS_HOLE(zio->io_bp)) 2183 return; 2184 2185 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 2186 2187 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 2188 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 2189 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 2190 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 2191 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 2192 2193 mutex_enter(&pio->io_lock); 2194 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 2195 ASSERT(DVA_GET_GANG(&pdva[d])); 2196 asize = DVA_GET_ASIZE(&pdva[d]); 2197 asize += DVA_GET_ASIZE(&cdva[d]); 2198 DVA_SET_ASIZE(&pdva[d], asize); 2199 } 2200 mutex_exit(&pio->io_lock); 2201 } 2202 2203 static void 2204 zio_write_gang_done(zio_t *zio) 2205 { 2206 abd_put(zio->io_abd); 2207 } 2208 2209 static int 2210 zio_write_gang_block(zio_t *pio) 2211 { 2212 spa_t *spa = pio->io_spa; 2213 metaslab_class_t *mc = spa_normal_class(spa); 2214 blkptr_t *bp = pio->io_bp; 2215 zio_t *gio = pio->io_gang_leader; 2216 zio_t *zio; 2217 zio_gang_node_t *gn, **gnpp; 2218 zio_gbh_phys_t *gbh; 2219 abd_t *gbh_abd; 2220 uint64_t txg = pio->io_txg; 2221 uint64_t resid = pio->io_size; 2222 uint64_t lsize; 2223 int copies = gio->io_prop.zp_copies; 2224 int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 2225 zio_prop_t zp; 2226 int error; 2227 2228 int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER; 2229 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2230 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2231 ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA)); 2232 2233 flags |= METASLAB_ASYNC_ALLOC; 2234 VERIFY(refcount_held(&mc->mc_alloc_slots[pio->io_allocator], 2235 pio)); 2236 2237 /* 2238 * The logical zio has already placed a reservation for 2239 * 'copies' allocation slots but gang blocks may require 2240 * additional copies. These additional copies 2241 * (i.e. gbh_copies - copies) are guaranteed to succeed 2242 * since metaslab_class_throttle_reserve() always allows 2243 * additional reservations for gang blocks. 2244 */ 2245 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies, 2246 pio->io_allocator, pio, flags)); 2247 } 2248 2249 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, 2250 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, 2251 &pio->io_alloc_list, pio, pio->io_allocator); 2252 if (error) { 2253 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2254 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2255 ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA)); 2256 2257 /* 2258 * If we failed to allocate the gang block header then 2259 * we remove any additional allocation reservations that 2260 * we placed here. The original reservation will 2261 * be removed when the logical I/O goes to the ready 2262 * stage. 2263 */ 2264 metaslab_class_throttle_unreserve(mc, 2265 gbh_copies - copies, pio->io_allocator, pio); 2266 } 2267 pio->io_error = error; 2268 return (ZIO_PIPELINE_CONTINUE); 2269 } 2270 2271 if (pio == gio) { 2272 gnpp = &gio->io_gang_tree; 2273 } else { 2274 gnpp = pio->io_private; 2275 ASSERT(pio->io_ready == zio_write_gang_member_ready); 2276 } 2277 2278 gn = zio_gang_node_alloc(gnpp); 2279 gbh = gn->gn_gbh; 2280 bzero(gbh, SPA_GANGBLOCKSIZE); 2281 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE); 2282 2283 /* 2284 * Create the gang header. 2285 */ 2286 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2287 zio_write_gang_done, NULL, pio->io_priority, 2288 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2289 2290 /* 2291 * Create and nowait the gang children. 2292 */ 2293 for (int g = 0; resid != 0; resid -= lsize, g++) { 2294 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 2295 SPA_MINBLOCKSIZE); 2296 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 2297 2298 zp.zp_checksum = gio->io_prop.zp_checksum; 2299 zp.zp_compress = ZIO_COMPRESS_OFF; 2300 zp.zp_type = DMU_OT_NONE; 2301 zp.zp_level = 0; 2302 zp.zp_copies = gio->io_prop.zp_copies; 2303 zp.zp_dedup = B_FALSE; 2304 zp.zp_dedup_verify = B_FALSE; 2305 zp.zp_nopwrite = B_FALSE; 2306 2307 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 2308 abd_get_offset(pio->io_abd, pio->io_size - resid), lsize, 2309 lsize, &zp, zio_write_gang_member_ready, NULL, NULL, 2310 zio_write_gang_done, &gn->gn_child[g], pio->io_priority, 2311 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2312 2313 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2314 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2315 ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA)); 2316 2317 /* 2318 * Gang children won't throttle but we should 2319 * account for their work, so reserve an allocation 2320 * slot for them here. 2321 */ 2322 VERIFY(metaslab_class_throttle_reserve(mc, 2323 zp.zp_copies, cio->io_allocator, cio, flags)); 2324 } 2325 zio_nowait(cio); 2326 } 2327 2328 /* 2329 * Set pio's pipeline to just wait for zio to finish. 2330 */ 2331 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2332 2333 zio_nowait(zio); 2334 2335 return (ZIO_PIPELINE_CONTINUE); 2336 } 2337 2338 /* 2339 * The zio_nop_write stage in the pipeline determines if allocating a 2340 * new bp is necessary. The nopwrite feature can handle writes in 2341 * either syncing or open context (i.e. zil writes) and as a result is 2342 * mutually exclusive with dedup. 2343 * 2344 * By leveraging a cryptographically secure checksum, such as SHA256, we 2345 * can compare the checksums of the new data and the old to determine if 2346 * allocating a new block is required. Note that our requirements for 2347 * cryptographic strength are fairly weak: there can't be any accidental 2348 * hash collisions, but we don't need to be secure against intentional 2349 * (malicious) collisions. To trigger a nopwrite, you have to be able 2350 * to write the file to begin with, and triggering an incorrect (hash 2351 * collision) nopwrite is no worse than simply writing to the file. 2352 * That said, there are no known attacks against the checksum algorithms 2353 * used for nopwrite, assuming that the salt and the checksums 2354 * themselves remain secret. 2355 */ 2356 static int 2357 zio_nop_write(zio_t *zio) 2358 { 2359 blkptr_t *bp = zio->io_bp; 2360 blkptr_t *bp_orig = &zio->io_bp_orig; 2361 zio_prop_t *zp = &zio->io_prop; 2362 2363 ASSERT(BP_GET_LEVEL(bp) == 0); 2364 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 2365 ASSERT(zp->zp_nopwrite); 2366 ASSERT(!zp->zp_dedup); 2367 ASSERT(zio->io_bp_override == NULL); 2368 ASSERT(IO_IS_ALLOCATING(zio)); 2369 2370 /* 2371 * Check to see if the original bp and the new bp have matching 2372 * characteristics (i.e. same checksum, compression algorithms, etc). 2373 * If they don't then just continue with the pipeline which will 2374 * allocate a new bp. 2375 */ 2376 if (BP_IS_HOLE(bp_orig) || 2377 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags & 2378 ZCHECKSUM_FLAG_NOPWRITE) || 2379 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 2380 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 2381 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 2382 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 2383 return (ZIO_PIPELINE_CONTINUE); 2384 2385 /* 2386 * If the checksums match then reset the pipeline so that we 2387 * avoid allocating a new bp and issuing any I/O. 2388 */ 2389 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 2390 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags & 2391 ZCHECKSUM_FLAG_NOPWRITE); 2392 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 2393 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 2394 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 2395 ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, 2396 sizeof (uint64_t)) == 0); 2397 2398 *bp = *bp_orig; 2399 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2400 zio->io_flags |= ZIO_FLAG_NOPWRITE; 2401 } 2402 2403 return (ZIO_PIPELINE_CONTINUE); 2404 } 2405 2406 /* 2407 * ========================================================================== 2408 * Dedup 2409 * ========================================================================== 2410 */ 2411 static void 2412 zio_ddt_child_read_done(zio_t *zio) 2413 { 2414 blkptr_t *bp = zio->io_bp; 2415 ddt_entry_t *dde = zio->io_private; 2416 ddt_phys_t *ddp; 2417 zio_t *pio = zio_unique_parent(zio); 2418 2419 mutex_enter(&pio->io_lock); 2420 ddp = ddt_phys_select(dde, bp); 2421 if (zio->io_error == 0) 2422 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 2423 2424 if (zio->io_error == 0 && dde->dde_repair_abd == NULL) 2425 dde->dde_repair_abd = zio->io_abd; 2426 else 2427 abd_free(zio->io_abd); 2428 mutex_exit(&pio->io_lock); 2429 } 2430 2431 static int 2432 zio_ddt_read_start(zio_t *zio) 2433 { 2434 blkptr_t *bp = zio->io_bp; 2435 2436 ASSERT(BP_GET_DEDUP(bp)); 2437 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2438 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2439 2440 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2441 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2442 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 2443 ddt_phys_t *ddp = dde->dde_phys; 2444 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 2445 blkptr_t blk; 2446 2447 ASSERT(zio->io_vsd == NULL); 2448 zio->io_vsd = dde; 2449 2450 if (ddp_self == NULL) 2451 return (ZIO_PIPELINE_CONTINUE); 2452 2453 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2454 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 2455 continue; 2456 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 2457 &blk); 2458 zio_nowait(zio_read(zio, zio->io_spa, &blk, 2459 abd_alloc_for_io(zio->io_size, B_TRUE), 2460 zio->io_size, zio_ddt_child_read_done, dde, 2461 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) | 2462 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark)); 2463 } 2464 return (ZIO_PIPELINE_CONTINUE); 2465 } 2466 2467 zio_nowait(zio_read(zio, zio->io_spa, bp, 2468 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority, 2469 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 2470 2471 return (ZIO_PIPELINE_CONTINUE); 2472 } 2473 2474 static int 2475 zio_ddt_read_done(zio_t *zio) 2476 { 2477 blkptr_t *bp = zio->io_bp; 2478 2479 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) { 2480 return (ZIO_PIPELINE_STOP); 2481 } 2482 2483 ASSERT(BP_GET_DEDUP(bp)); 2484 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2485 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2486 2487 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2488 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2489 ddt_entry_t *dde = zio->io_vsd; 2490 if (ddt == NULL) { 2491 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 2492 return (ZIO_PIPELINE_CONTINUE); 2493 } 2494 if (dde == NULL) { 2495 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 2496 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2497 return (ZIO_PIPELINE_STOP); 2498 } 2499 if (dde->dde_repair_abd != NULL) { 2500 abd_copy(zio->io_abd, dde->dde_repair_abd, 2501 zio->io_size); 2502 zio->io_child_error[ZIO_CHILD_DDT] = 0; 2503 } 2504 ddt_repair_done(ddt, dde); 2505 zio->io_vsd = NULL; 2506 } 2507 2508 ASSERT(zio->io_vsd == NULL); 2509 2510 return (ZIO_PIPELINE_CONTINUE); 2511 } 2512 2513 static boolean_t 2514 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 2515 { 2516 spa_t *spa = zio->io_spa; 2517 boolean_t do_raw = (zio->io_flags & ZIO_FLAG_RAW); 2518 2519 /* We should never get a raw, override zio */ 2520 ASSERT(!(zio->io_bp_override && do_raw)); 2521 2522 /* 2523 * Note: we compare the original data, not the transformed data, 2524 * because when zio->io_bp is an override bp, we will not have 2525 * pushed the I/O transforms. That's an important optimization 2526 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 2527 */ 2528 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2529 zio_t *lio = dde->dde_lead_zio[p]; 2530 2531 if (lio != NULL) { 2532 return (lio->io_orig_size != zio->io_orig_size || 2533 abd_cmp(zio->io_orig_abd, lio->io_orig_abd, 2534 zio->io_orig_size) != 0); 2535 } 2536 } 2537 2538 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2539 ddt_phys_t *ddp = &dde->dde_phys[p]; 2540 2541 if (ddp->ddp_phys_birth != 0) { 2542 arc_buf_t *abuf = NULL; 2543 arc_flags_t aflags = ARC_FLAG_WAIT; 2544 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 2545 blkptr_t blk = *zio->io_bp; 2546 int error; 2547 2548 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2549 2550 ddt_exit(ddt); 2551 2552 /* 2553 * Intuitively, it would make more sense to compare 2554 * io_abd than io_orig_abd in the raw case since you 2555 * don't want to look at any transformations that have 2556 * happened to the data. However, for raw I/Os the 2557 * data will actually be the same in io_abd and 2558 * io_orig_abd, so all we have to do is issue this as 2559 * a raw ARC read. 2560 */ 2561 if (do_raw) { 2562 zio_flags |= ZIO_FLAG_RAW; 2563 ASSERT3U(zio->io_size, ==, zio->io_orig_size); 2564 ASSERT0(abd_cmp(zio->io_abd, zio->io_orig_abd, 2565 zio->io_size)); 2566 ASSERT3P(zio->io_transform_stack, ==, NULL); 2567 } 2568 2569 error = arc_read(NULL, spa, &blk, 2570 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 2571 zio_flags, &aflags, &zio->io_bookmark); 2572 2573 if (error == 0) { 2574 if (arc_buf_size(abuf) != zio->io_orig_size || 2575 abd_cmp_buf(zio->io_orig_abd, abuf->b_data, 2576 zio->io_orig_size) != 0) 2577 error = SET_ERROR(EEXIST); 2578 arc_buf_destroy(abuf, &abuf); 2579 } 2580 2581 ddt_enter(ddt); 2582 return (error != 0); 2583 } 2584 } 2585 2586 return (B_FALSE); 2587 } 2588 2589 static void 2590 zio_ddt_child_write_ready(zio_t *zio) 2591 { 2592 int p = zio->io_prop.zp_copies; 2593 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2594 ddt_entry_t *dde = zio->io_private; 2595 ddt_phys_t *ddp = &dde->dde_phys[p]; 2596 zio_t *pio; 2597 2598 if (zio->io_error) 2599 return; 2600 2601 ddt_enter(ddt); 2602 2603 ASSERT(dde->dde_lead_zio[p] == zio); 2604 2605 ddt_phys_fill(ddp, zio->io_bp); 2606 2607 zio_link_t *zl = NULL; 2608 while ((pio = zio_walk_parents(zio, &zl)) != NULL) 2609 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 2610 2611 ddt_exit(ddt); 2612 } 2613 2614 static void 2615 zio_ddt_child_write_done(zio_t *zio) 2616 { 2617 int p = zio->io_prop.zp_copies; 2618 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2619 ddt_entry_t *dde = zio->io_private; 2620 ddt_phys_t *ddp = &dde->dde_phys[p]; 2621 2622 ddt_enter(ddt); 2623 2624 ASSERT(ddp->ddp_refcnt == 0); 2625 ASSERT(dde->dde_lead_zio[p] == zio); 2626 dde->dde_lead_zio[p] = NULL; 2627 2628 if (zio->io_error == 0) { 2629 zio_link_t *zl = NULL; 2630 while (zio_walk_parents(zio, &zl) != NULL) 2631 ddt_phys_addref(ddp); 2632 } else { 2633 ddt_phys_clear(ddp); 2634 } 2635 2636 ddt_exit(ddt); 2637 } 2638 2639 static void 2640 zio_ddt_ditto_write_done(zio_t *zio) 2641 { 2642 int p = DDT_PHYS_DITTO; 2643 zio_prop_t *zp = &zio->io_prop; 2644 blkptr_t *bp = zio->io_bp; 2645 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2646 ddt_entry_t *dde = zio->io_private; 2647 ddt_phys_t *ddp = &dde->dde_phys[p]; 2648 ddt_key_t *ddk = &dde->dde_key; 2649 2650 ddt_enter(ddt); 2651 2652 ASSERT(ddp->ddp_refcnt == 0); 2653 ASSERT(dde->dde_lead_zio[p] == zio); 2654 dde->dde_lead_zio[p] = NULL; 2655 2656 if (zio->io_error == 0) { 2657 ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2658 ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2659 ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2660 if (ddp->ddp_phys_birth != 0) 2661 ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2662 ddt_phys_fill(ddp, bp); 2663 } 2664 2665 ddt_exit(ddt); 2666 } 2667 2668 static int 2669 zio_ddt_write(zio_t *zio) 2670 { 2671 spa_t *spa = zio->io_spa; 2672 blkptr_t *bp = zio->io_bp; 2673 uint64_t txg = zio->io_txg; 2674 zio_prop_t *zp = &zio->io_prop; 2675 int p = zp->zp_copies; 2676 int ditto_copies; 2677 zio_t *cio = NULL; 2678 zio_t *dio = NULL; 2679 ddt_t *ddt = ddt_select(spa, bp); 2680 ddt_entry_t *dde; 2681 ddt_phys_t *ddp; 2682 2683 ASSERT(BP_GET_DEDUP(bp)); 2684 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2685 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 2686 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW))); 2687 2688 ddt_enter(ddt); 2689 dde = ddt_lookup(ddt, bp, B_TRUE); 2690 ddp = &dde->dde_phys[p]; 2691 2692 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2693 /* 2694 * If we're using a weak checksum, upgrade to a strong checksum 2695 * and try again. If we're already using a strong checksum, 2696 * we can't resolve it, so just convert to an ordinary write. 2697 * (And automatically e-mail a paper to Nature?) 2698 */ 2699 if (!(zio_checksum_table[zp->zp_checksum].ci_flags & 2700 ZCHECKSUM_FLAG_DEDUP)) { 2701 zp->zp_checksum = spa_dedup_checksum(spa); 2702 zio_pop_transforms(zio); 2703 zio->io_stage = ZIO_STAGE_OPEN; 2704 BP_ZERO(bp); 2705 } else { 2706 zp->zp_dedup = B_FALSE; 2707 BP_SET_DEDUP(bp, B_FALSE); 2708 } 2709 ASSERT(!BP_GET_DEDUP(bp)); 2710 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2711 ddt_exit(ddt); 2712 return (ZIO_PIPELINE_CONTINUE); 2713 } 2714 2715 ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2716 ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2717 2718 if (ditto_copies > ddt_ditto_copies_present(dde) && 2719 dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2720 zio_prop_t czp = *zp; 2721 2722 czp.zp_copies = ditto_copies; 2723 2724 /* 2725 * If we arrived here with an override bp, we won't have run 2726 * the transform stack, so we won't have the data we need to 2727 * generate a child i/o. So, toss the override bp and restart. 2728 * This is safe, because using the override bp is just an 2729 * optimization; and it's rare, so the cost doesn't matter. 2730 */ 2731 if (zio->io_bp_override) { 2732 zio_pop_transforms(zio); 2733 zio->io_stage = ZIO_STAGE_OPEN; 2734 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2735 zio->io_bp_override = NULL; 2736 BP_ZERO(bp); 2737 ddt_exit(ddt); 2738 return (ZIO_PIPELINE_CONTINUE); 2739 } 2740 2741 dio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 2742 zio->io_orig_size, zio->io_orig_size, &czp, NULL, NULL, 2743 NULL, zio_ddt_ditto_write_done, dde, zio->io_priority, 2744 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2745 2746 zio_push_transform(dio, zio->io_abd, zio->io_size, 0, NULL); 2747 dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2748 } 2749 2750 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2751 if (ddp->ddp_phys_birth != 0) 2752 ddt_bp_fill(ddp, bp, txg); 2753 if (dde->dde_lead_zio[p] != NULL) 2754 zio_add_child(zio, dde->dde_lead_zio[p]); 2755 else 2756 ddt_phys_addref(ddp); 2757 } else if (zio->io_bp_override) { 2758 ASSERT(bp->blk_birth == txg); 2759 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2760 ddt_phys_fill(ddp, bp); 2761 ddt_phys_addref(ddp); 2762 } else { 2763 cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 2764 zio->io_orig_size, zio->io_orig_size, zp, 2765 zio_ddt_child_write_ready, NULL, NULL, 2766 zio_ddt_child_write_done, dde, zio->io_priority, 2767 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2768 2769 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL); 2770 dde->dde_lead_zio[p] = cio; 2771 } 2772 2773 ddt_exit(ddt); 2774 2775 if (cio) 2776 zio_nowait(cio); 2777 if (dio) 2778 zio_nowait(dio); 2779 2780 return (ZIO_PIPELINE_CONTINUE); 2781 } 2782 2783 ddt_entry_t *freedde; /* for debugging */ 2784 2785 static int 2786 zio_ddt_free(zio_t *zio) 2787 { 2788 spa_t *spa = zio->io_spa; 2789 blkptr_t *bp = zio->io_bp; 2790 ddt_t *ddt = ddt_select(spa, bp); 2791 ddt_entry_t *dde; 2792 ddt_phys_t *ddp; 2793 2794 ASSERT(BP_GET_DEDUP(bp)); 2795 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2796 2797 ddt_enter(ddt); 2798 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 2799 ddp = ddt_phys_select(dde, bp); 2800 ddt_phys_decref(ddp); 2801 ddt_exit(ddt); 2802 2803 return (ZIO_PIPELINE_CONTINUE); 2804 } 2805 2806 /* 2807 * ========================================================================== 2808 * Allocate and free blocks 2809 * ========================================================================== 2810 */ 2811 2812 static zio_t * 2813 zio_io_to_allocate(spa_t *spa, int allocator) 2814 { 2815 zio_t *zio; 2816 2817 ASSERT(MUTEX_HELD(&spa->spa_alloc_locks[allocator])); 2818 2819 zio = avl_first(&spa->spa_alloc_trees[allocator]); 2820 if (zio == NULL) 2821 return (NULL); 2822 2823 ASSERT(IO_IS_ALLOCATING(zio)); 2824 2825 /* 2826 * Try to place a reservation for this zio. If we're unable to 2827 * reserve then we throttle. 2828 */ 2829 ASSERT3U(zio->io_allocator, ==, allocator); 2830 if (!metaslab_class_throttle_reserve(spa_normal_class(spa), 2831 zio->io_prop.zp_copies, zio->io_allocator, zio, 0)) { 2832 return (NULL); 2833 } 2834 2835 avl_remove(&spa->spa_alloc_trees[allocator], zio); 2836 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); 2837 2838 return (zio); 2839 } 2840 2841 static int 2842 zio_dva_throttle(zio_t *zio) 2843 { 2844 spa_t *spa = zio->io_spa; 2845 zio_t *nio; 2846 2847 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE || 2848 !spa_normal_class(zio->io_spa)->mc_alloc_throttle_enabled || 2849 zio->io_child_type == ZIO_CHILD_GANG || 2850 zio->io_flags & ZIO_FLAG_NODATA) { 2851 return (ZIO_PIPELINE_CONTINUE); 2852 } 2853 2854 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2855 2856 ASSERT3U(zio->io_queued_timestamp, >, 0); 2857 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE); 2858 2859 zbookmark_phys_t *bm = &zio->io_bookmark; 2860 /* 2861 * We want to try to use as many allocators as possible to help improve 2862 * performance, but we also want logically adjacent IOs to be physically 2863 * adjacent to improve sequential read performance. We chunk each object 2864 * into 2^20 block regions, and then hash based on the objset, object, 2865 * level, and region to accomplish both of these goals. 2866 */ 2867 zio->io_allocator = cityhash4(bm->zb_objset, bm->zb_object, 2868 bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count; 2869 mutex_enter(&spa->spa_alloc_locks[zio->io_allocator]); 2870 2871 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 2872 avl_add(&spa->spa_alloc_trees[zio->io_allocator], zio); 2873 2874 nio = zio_io_to_allocate(zio->io_spa, zio->io_allocator); 2875 mutex_exit(&spa->spa_alloc_locks[zio->io_allocator]); 2876 2877 if (nio == zio) 2878 return (ZIO_PIPELINE_CONTINUE); 2879 2880 if (nio != NULL) { 2881 ASSERT(nio->io_stage == ZIO_STAGE_DVA_THROTTLE); 2882 /* 2883 * We are passing control to a new zio so make sure that 2884 * it is processed by a different thread. We do this to 2885 * avoid stack overflows that can occur when parents are 2886 * throttled and children are making progress. We allow 2887 * it to go to the head of the taskq since it's already 2888 * been waiting. 2889 */ 2890 zio_taskq_dispatch(nio, ZIO_TASKQ_ISSUE, B_TRUE); 2891 } 2892 return (ZIO_PIPELINE_STOP); 2893 } 2894 2895 void 2896 zio_allocate_dispatch(spa_t *spa, int allocator) 2897 { 2898 zio_t *zio; 2899 2900 mutex_enter(&spa->spa_alloc_locks[allocator]); 2901 zio = zio_io_to_allocate(spa, allocator); 2902 mutex_exit(&spa->spa_alloc_locks[allocator]); 2903 if (zio == NULL) 2904 return; 2905 2906 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE); 2907 ASSERT0(zio->io_error); 2908 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE); 2909 } 2910 2911 static int 2912 zio_dva_allocate(zio_t *zio) 2913 { 2914 spa_t *spa = zio->io_spa; 2915 metaslab_class_t *mc = spa_normal_class(spa); 2916 blkptr_t *bp = zio->io_bp; 2917 int error; 2918 int flags = 0; 2919 2920 if (zio->io_gang_leader == NULL) { 2921 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2922 zio->io_gang_leader = zio; 2923 } 2924 2925 ASSERT(BP_IS_HOLE(bp)); 2926 ASSERT0(BP_GET_NDVAS(bp)); 2927 ASSERT3U(zio->io_prop.zp_copies, >, 0); 2928 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 2929 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 2930 2931 if (zio->io_flags & ZIO_FLAG_NODATA) { 2932 flags |= METASLAB_DONT_THROTTLE; 2933 } 2934 if (zio->io_flags & ZIO_FLAG_GANG_CHILD) { 2935 flags |= METASLAB_GANG_CHILD; 2936 } 2937 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) { 2938 flags |= METASLAB_ASYNC_ALLOC; 2939 } 2940 2941 error = metaslab_alloc(spa, mc, zio->io_size, bp, 2942 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 2943 &zio->io_alloc_list, zio, zio->io_allocator); 2944 2945 if (error != 0) { 2946 spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, " 2947 "size %llu, error %d", spa_name(spa), zio, zio->io_size, 2948 error); 2949 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 2950 return (zio_write_gang_block(zio)); 2951 zio->io_error = error; 2952 } 2953 2954 return (ZIO_PIPELINE_CONTINUE); 2955 } 2956 2957 static int 2958 zio_dva_free(zio_t *zio) 2959 { 2960 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 2961 2962 return (ZIO_PIPELINE_CONTINUE); 2963 } 2964 2965 static int 2966 zio_dva_claim(zio_t *zio) 2967 { 2968 int error; 2969 2970 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 2971 if (error) 2972 zio->io_error = error; 2973 2974 return (ZIO_PIPELINE_CONTINUE); 2975 } 2976 2977 /* 2978 * Undo an allocation. This is used by zio_done() when an I/O fails 2979 * and we want to give back the block we just allocated. 2980 * This handles both normal blocks and gang blocks. 2981 */ 2982 static void 2983 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 2984 { 2985 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2986 ASSERT(zio->io_bp_override == NULL); 2987 2988 if (!BP_IS_HOLE(bp)) 2989 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 2990 2991 if (gn != NULL) { 2992 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2993 zio_dva_unallocate(zio, gn->gn_child[g], 2994 &gn->gn_gbh->zg_blkptr[g]); 2995 } 2996 } 2997 } 2998 2999 /* 3000 * Try to allocate an intent log block. Return 0 on success, errno on failure. 3001 */ 3002 int 3003 zio_alloc_zil(spa_t *spa, uint64_t objset, uint64_t txg, blkptr_t *new_bp, 3004 blkptr_t *old_bp, uint64_t size, boolean_t *slog) 3005 { 3006 int error = 1; 3007 zio_alloc_list_t io_alloc_list; 3008 3009 ASSERT(txg > spa_syncing_txg(spa)); 3010 3011 metaslab_trace_init(&io_alloc_list); 3012 /* 3013 * When allocating a zil block, we don't have information about 3014 * the final destination of the block except the objset it's part 3015 * of, so we just hash the objset ID to pick the allocator to get 3016 * some parallelism. 3017 */ 3018 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1, 3019 txg, old_bp, METASLAB_HINTBP_AVOID, &io_alloc_list, NULL, 3020 cityhash4(0, 0, 0, objset) % spa->spa_alloc_count); 3021 if (error == 0) { 3022 *slog = TRUE; 3023 } else { 3024 error = metaslab_alloc(spa, spa_normal_class(spa), size, 3025 new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID, 3026 &io_alloc_list, NULL, cityhash4(0, 0, 0, objset) % 3027 spa->spa_alloc_count); 3028 if (error == 0) 3029 *slog = FALSE; 3030 } 3031 metaslab_trace_fini(&io_alloc_list); 3032 3033 if (error == 0) { 3034 BP_SET_LSIZE(new_bp, size); 3035 BP_SET_PSIZE(new_bp, size); 3036 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 3037 BP_SET_CHECKSUM(new_bp, 3038 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 3039 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 3040 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3041 BP_SET_LEVEL(new_bp, 0); 3042 BP_SET_DEDUP(new_bp, 0); 3043 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 3044 } else { 3045 zfs_dbgmsg("%s: zil block allocation failure: " 3046 "size %llu, error %d", spa_name(spa), size, error); 3047 } 3048 3049 return (error); 3050 } 3051 3052 /* 3053 * ========================================================================== 3054 * Read and write to physical devices 3055 * ========================================================================== 3056 */ 3057 3058 3059 /* 3060 * Issue an I/O to the underlying vdev. Typically the issue pipeline 3061 * stops after this stage and will resume upon I/O completion. 3062 * However, there are instances where the vdev layer may need to 3063 * continue the pipeline when an I/O was not issued. Since the I/O 3064 * that was sent to the vdev layer might be different than the one 3065 * currently active in the pipeline (see vdev_queue_io()), we explicitly 3066 * force the underlying vdev layers to call either zio_execute() or 3067 * zio_interrupt() to ensure that the pipeline continues with the correct I/O. 3068 */ 3069 static int 3070 zio_vdev_io_start(zio_t *zio) 3071 { 3072 vdev_t *vd = zio->io_vd; 3073 uint64_t align; 3074 spa_t *spa = zio->io_spa; 3075 3076 ASSERT(zio->io_error == 0); 3077 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 3078 3079 if (vd == NULL) { 3080 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3081 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 3082 3083 /* 3084 * The mirror_ops handle multiple DVAs in a single BP. 3085 */ 3086 vdev_mirror_ops.vdev_op_io_start(zio); 3087 return (ZIO_PIPELINE_STOP); 3088 } 3089 3090 ASSERT3P(zio->io_logical, !=, zio); 3091 if (zio->io_type == ZIO_TYPE_WRITE) { 3092 ASSERT(spa->spa_trust_config); 3093 3094 if (zio->io_vd->vdev_removing) { 3095 ASSERT(zio->io_flags & 3096 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL | 3097 ZIO_FLAG_INDUCE_DAMAGE)); 3098 } 3099 } 3100 3101 /* 3102 * We keep track of time-sensitive I/Os so that the scan thread 3103 * can quickly react to certain workloads. In particular, we care 3104 * about non-scrubbing, top-level reads and writes with the following 3105 * characteristics: 3106 * - synchronous writes of user data to non-slog devices 3107 * - any reads of user data 3108 * When these conditions are met, adjust the timestamp of spa_last_io 3109 * which allows the scan thread to adjust its workload accordingly. 3110 */ 3111 if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && 3112 vd == vd->vdev_top && !vd->vdev_islog && 3113 zio->io_bookmark.zb_objset != DMU_META_OBJSET && 3114 zio->io_txg != spa_syncing_txg(spa)) { 3115 uint64_t old = spa->spa_last_io; 3116 uint64_t new = ddi_get_lbolt64(); 3117 if (old != new) 3118 (void) atomic_cas_64(&spa->spa_last_io, old, new); 3119 } 3120 3121 align = 1ULL << vd->vdev_top->vdev_ashift; 3122 3123 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 3124 P2PHASE(zio->io_size, align) != 0) { 3125 /* Transform logical writes to be a full physical block size. */ 3126 uint64_t asize = P2ROUNDUP(zio->io_size, align); 3127 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize); 3128 ASSERT(vd == vd->vdev_top); 3129 if (zio->io_type == ZIO_TYPE_WRITE) { 3130 abd_copy(abuf, zio->io_abd, zio->io_size); 3131 abd_zero_off(abuf, zio->io_size, asize - zio->io_size); 3132 } 3133 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 3134 } 3135 3136 /* 3137 * If this is not a physical io, make sure that it is properly aligned 3138 * before proceeding. 3139 */ 3140 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 3141 ASSERT0(P2PHASE(zio->io_offset, align)); 3142 ASSERT0(P2PHASE(zio->io_size, align)); 3143 } else { 3144 /* 3145 * For physical writes, we allow 512b aligned writes and assume 3146 * the device will perform a read-modify-write as necessary. 3147 */ 3148 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 3149 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 3150 } 3151 3152 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 3153 3154 /* 3155 * If this is a repair I/O, and there's no self-healing involved -- 3156 * that is, we're just resilvering what we expect to resilver -- 3157 * then don't do the I/O unless zio's txg is actually in vd's DTL. 3158 * This prevents spurious resilvering with nested replication. 3159 * For example, given a mirror of mirrors, (A+B)+(C+D), if only 3160 * A is out of date, we'll read from C+D, then use the data to 3161 * resilver A+B -- but we don't actually want to resilver B, just A. 3162 * The top-level mirror has no way to know this, so instead we just 3163 * discard unnecessary repairs as we work our way down the vdev tree. 3164 * The same logic applies to any form of nested replication: 3165 * ditto + mirror, RAID-Z + replacing, etc. This covers them all. 3166 */ 3167 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 3168 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 3169 zio->io_txg != 0 && /* not a delegated i/o */ 3170 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 3171 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3172 zio_vdev_io_bypass(zio); 3173 return (ZIO_PIPELINE_CONTINUE); 3174 } 3175 3176 if (vd->vdev_ops->vdev_op_leaf && 3177 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { 3178 3179 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio)) 3180 return (ZIO_PIPELINE_CONTINUE); 3181 3182 if ((zio = vdev_queue_io(zio)) == NULL) 3183 return (ZIO_PIPELINE_STOP); 3184 3185 if (!vdev_accessible(vd, zio)) { 3186 zio->io_error = SET_ERROR(ENXIO); 3187 zio_interrupt(zio); 3188 return (ZIO_PIPELINE_STOP); 3189 } 3190 } 3191 3192 vd->vdev_ops->vdev_op_io_start(zio); 3193 return (ZIO_PIPELINE_STOP); 3194 } 3195 3196 static int 3197 zio_vdev_io_done(zio_t *zio) 3198 { 3199 vdev_t *vd = zio->io_vd; 3200 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 3201 boolean_t unexpected_error = B_FALSE; 3202 3203 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3204 return (ZIO_PIPELINE_STOP); 3205 } 3206 3207 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 3208 3209 if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { 3210 3211 vdev_queue_io_done(zio); 3212 3213 if (zio->io_type == ZIO_TYPE_WRITE) 3214 vdev_cache_write(zio); 3215 3216 if (zio_injection_enabled && zio->io_error == 0) 3217 zio->io_error = zio_handle_device_injection(vd, 3218 zio, EIO); 3219 3220 if (zio_injection_enabled && zio->io_error == 0) 3221 zio->io_error = zio_handle_label_injection(zio, EIO); 3222 3223 if (zio->io_error) { 3224 if (!vdev_accessible(vd, zio)) { 3225 zio->io_error = SET_ERROR(ENXIO); 3226 } else { 3227 unexpected_error = B_TRUE; 3228 } 3229 } 3230 } 3231 3232 ops->vdev_op_io_done(zio); 3233 3234 if (unexpected_error) 3235 VERIFY(vdev_probe(vd, zio) == NULL); 3236 3237 return (ZIO_PIPELINE_CONTINUE); 3238 } 3239 3240 /* 3241 * For non-raidz ZIOs, we can just copy aside the bad data read from the 3242 * disk, and use that to finish the checksum ereport later. 3243 */ 3244 static void 3245 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 3246 const void *good_buf) 3247 { 3248 /* no processing needed */ 3249 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 3250 } 3251 3252 /*ARGSUSED*/ 3253 void 3254 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 3255 { 3256 void *buf = zio_buf_alloc(zio->io_size); 3257 3258 abd_copy_to_buf(buf, zio->io_abd, zio->io_size); 3259 3260 zcr->zcr_cbinfo = zio->io_size; 3261 zcr->zcr_cbdata = buf; 3262 zcr->zcr_finish = zio_vsd_default_cksum_finish; 3263 zcr->zcr_free = zio_buf_free; 3264 } 3265 3266 static int 3267 zio_vdev_io_assess(zio_t *zio) 3268 { 3269 vdev_t *vd = zio->io_vd; 3270 3271 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3272 return (ZIO_PIPELINE_STOP); 3273 } 3274 3275 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3276 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 3277 3278 if (zio->io_vsd != NULL) { 3279 zio->io_vsd_ops->vsd_free(zio); 3280 zio->io_vsd = NULL; 3281 } 3282 3283 if (zio_injection_enabled && zio->io_error == 0) 3284 zio->io_error = zio_handle_fault_injection(zio, EIO); 3285 3286 /* 3287 * If the I/O failed, determine whether we should attempt to retry it. 3288 * 3289 * On retry, we cut in line in the issue queue, since we don't want 3290 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 3291 */ 3292 if (zio->io_error && vd == NULL && 3293 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 3294 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 3295 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 3296 zio->io_error = 0; 3297 zio->io_flags |= ZIO_FLAG_IO_RETRY | 3298 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 3299 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 3300 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 3301 zio_requeue_io_start_cut_in_line); 3302 return (ZIO_PIPELINE_STOP); 3303 } 3304 3305 /* 3306 * If we got an error on a leaf device, convert it to ENXIO 3307 * if the device is not accessible at all. 3308 */ 3309 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 3310 !vdev_accessible(vd, zio)) 3311 zio->io_error = SET_ERROR(ENXIO); 3312 3313 /* 3314 * If we can't write to an interior vdev (mirror or RAID-Z), 3315 * set vdev_cant_write so that we stop trying to allocate from it. 3316 */ 3317 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 3318 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 3319 vd->vdev_cant_write = B_TRUE; 3320 } 3321 3322 /* 3323 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future 3324 * attempts will ever succeed. In this case we set a persistent bit so 3325 * that we don't bother with it in the future. 3326 */ 3327 if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) && 3328 zio->io_type == ZIO_TYPE_IOCTL && 3329 zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL) 3330 vd->vdev_nowritecache = B_TRUE; 3331 3332 if (zio->io_error) 3333 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3334 3335 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 3336 zio->io_physdone != NULL) { 3337 ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); 3338 ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); 3339 zio->io_physdone(zio->io_logical); 3340 } 3341 3342 return (ZIO_PIPELINE_CONTINUE); 3343 } 3344 3345 void 3346 zio_vdev_io_reissue(zio_t *zio) 3347 { 3348 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3349 ASSERT(zio->io_error == 0); 3350 3351 zio->io_stage >>= 1; 3352 } 3353 3354 void 3355 zio_vdev_io_redone(zio_t *zio) 3356 { 3357 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 3358 3359 zio->io_stage >>= 1; 3360 } 3361 3362 void 3363 zio_vdev_io_bypass(zio_t *zio) 3364 { 3365 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3366 ASSERT(zio->io_error == 0); 3367 3368 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 3369 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 3370 } 3371 3372 /* 3373 * ========================================================================== 3374 * Generate and verify checksums 3375 * ========================================================================== 3376 */ 3377 static int 3378 zio_checksum_generate(zio_t *zio) 3379 { 3380 blkptr_t *bp = zio->io_bp; 3381 enum zio_checksum checksum; 3382 3383 if (bp == NULL) { 3384 /* 3385 * This is zio_write_phys(). 3386 * We're either generating a label checksum, or none at all. 3387 */ 3388 checksum = zio->io_prop.zp_checksum; 3389 3390 if (checksum == ZIO_CHECKSUM_OFF) 3391 return (ZIO_PIPELINE_CONTINUE); 3392 3393 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 3394 } else { 3395 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 3396 ASSERT(!IO_IS_ALLOCATING(zio)); 3397 checksum = ZIO_CHECKSUM_GANG_HEADER; 3398 } else { 3399 checksum = BP_GET_CHECKSUM(bp); 3400 } 3401 } 3402 3403 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size); 3404 3405 return (ZIO_PIPELINE_CONTINUE); 3406 } 3407 3408 static int 3409 zio_checksum_verify(zio_t *zio) 3410 { 3411 zio_bad_cksum_t info; 3412 blkptr_t *bp = zio->io_bp; 3413 int error; 3414 3415 ASSERT(zio->io_vd != NULL); 3416 3417 if (bp == NULL) { 3418 /* 3419 * This is zio_read_phys(). 3420 * We're either verifying a label checksum, or nothing at all. 3421 */ 3422 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 3423 return (ZIO_PIPELINE_CONTINUE); 3424 3425 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 3426 } 3427 3428 if ((error = zio_checksum_error(zio, &info)) != 0) { 3429 zio->io_error = error; 3430 if (error == ECKSUM && 3431 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 3432 zfs_ereport_start_checksum(zio->io_spa, 3433 zio->io_vd, zio, zio->io_offset, 3434 zio->io_size, NULL, &info); 3435 } 3436 } 3437 3438 return (ZIO_PIPELINE_CONTINUE); 3439 } 3440 3441 /* 3442 * Called by RAID-Z to ensure we don't compute the checksum twice. 3443 */ 3444 void 3445 zio_checksum_verified(zio_t *zio) 3446 { 3447 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 3448 } 3449 3450 /* 3451 * ========================================================================== 3452 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 3453 * An error of 0 indicates success. ENXIO indicates whole-device failure, 3454 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 3455 * indicate errors that are specific to one I/O, and most likely permanent. 3456 * Any other error is presumed to be worse because we weren't expecting it. 3457 * ========================================================================== 3458 */ 3459 int 3460 zio_worst_error(int e1, int e2) 3461 { 3462 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 3463 int r1, r2; 3464 3465 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 3466 if (e1 == zio_error_rank[r1]) 3467 break; 3468 3469 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 3470 if (e2 == zio_error_rank[r2]) 3471 break; 3472 3473 return (r1 > r2 ? e1 : e2); 3474 } 3475 3476 /* 3477 * ========================================================================== 3478 * I/O completion 3479 * ========================================================================== 3480 */ 3481 static int 3482 zio_ready(zio_t *zio) 3483 { 3484 blkptr_t *bp = zio->io_bp; 3485 zio_t *pio, *pio_next; 3486 zio_link_t *zl = NULL; 3487 3488 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, 3489 ZIO_WAIT_READY)) { 3490 return (ZIO_PIPELINE_STOP); 3491 } 3492 3493 if (zio->io_ready) { 3494 ASSERT(IO_IS_ALLOCATING(zio)); 3495 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 3496 (zio->io_flags & ZIO_FLAG_NOPWRITE)); 3497 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 3498 3499 zio->io_ready(zio); 3500 } 3501 3502 if (bp != NULL && bp != &zio->io_bp_copy) 3503 zio->io_bp_copy = *bp; 3504 3505 if (zio->io_error != 0) { 3506 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3507 3508 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3509 ASSERT(IO_IS_ALLOCATING(zio)); 3510 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3511 /* 3512 * We were unable to allocate anything, unreserve and 3513 * issue the next I/O to allocate. 3514 */ 3515 metaslab_class_throttle_unreserve( 3516 spa_normal_class(zio->io_spa), 3517 zio->io_prop.zp_copies, zio->io_allocator, zio); 3518 zio_allocate_dispatch(zio->io_spa, zio->io_allocator); 3519 } 3520 } 3521 3522 mutex_enter(&zio->io_lock); 3523 zio->io_state[ZIO_WAIT_READY] = 1; 3524 pio = zio_walk_parents(zio, &zl); 3525 mutex_exit(&zio->io_lock); 3526 3527 /* 3528 * As we notify zio's parents, new parents could be added. 3529 * New parents go to the head of zio's io_parent_list, however, 3530 * so we will (correctly) not notify them. The remainder of zio's 3531 * io_parent_list, from 'pio_next' onward, cannot change because 3532 * all parents must wait for us to be done before they can be done. 3533 */ 3534 for (; pio != NULL; pio = pio_next) { 3535 pio_next = zio_walk_parents(zio, &zl); 3536 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 3537 } 3538 3539 if (zio->io_flags & ZIO_FLAG_NODATA) { 3540 if (BP_IS_GANG(bp)) { 3541 zio->io_flags &= ~ZIO_FLAG_NODATA; 3542 } else { 3543 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE); 3544 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 3545 } 3546 } 3547 3548 if (zio_injection_enabled && 3549 zio->io_spa->spa_syncing_txg == zio->io_txg) 3550 zio_handle_ignored_writes(zio); 3551 3552 return (ZIO_PIPELINE_CONTINUE); 3553 } 3554 3555 /* 3556 * Update the allocation throttle accounting. 3557 */ 3558 static void 3559 zio_dva_throttle_done(zio_t *zio) 3560 { 3561 zio_t *lio = zio->io_logical; 3562 zio_t *pio = zio_unique_parent(zio); 3563 vdev_t *vd = zio->io_vd; 3564 int flags = METASLAB_ASYNC_ALLOC; 3565 3566 ASSERT3P(zio->io_bp, !=, NULL); 3567 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 3568 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE); 3569 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 3570 ASSERT(vd != NULL); 3571 ASSERT3P(vd, ==, vd->vdev_top); 3572 ASSERT(!(zio->io_flags & (ZIO_FLAG_IO_REPAIR | ZIO_FLAG_IO_RETRY))); 3573 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); 3574 ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE)); 3575 ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA)); 3576 3577 /* 3578 * Parents of gang children can have two flavors -- ones that 3579 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set) 3580 * and ones that allocated the constituent blocks. The allocation 3581 * throttle needs to know the allocating parent zio so we must find 3582 * it here. 3583 */ 3584 if (pio->io_child_type == ZIO_CHILD_GANG) { 3585 /* 3586 * If our parent is a rewrite gang child then our grandparent 3587 * would have been the one that performed the allocation. 3588 */ 3589 if (pio->io_flags & ZIO_FLAG_IO_REWRITE) 3590 pio = zio_unique_parent(pio); 3591 flags |= METASLAB_GANG_CHILD; 3592 } 3593 3594 ASSERT(IO_IS_ALLOCATING(pio)); 3595 ASSERT3P(zio, !=, zio->io_logical); 3596 ASSERT(zio->io_logical != NULL); 3597 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 3598 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE); 3599 3600 mutex_enter(&pio->io_lock); 3601 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags, 3602 pio->io_allocator, B_TRUE); 3603 mutex_exit(&pio->io_lock); 3604 3605 metaslab_class_throttle_unreserve(spa_normal_class(zio->io_spa), 3606 1, pio->io_allocator, pio); 3607 3608 /* 3609 * Call into the pipeline to see if there is more work that 3610 * needs to be done. If there is work to be done it will be 3611 * dispatched to another taskq thread. 3612 */ 3613 zio_allocate_dispatch(zio->io_spa, pio->io_allocator); 3614 } 3615 3616 static int 3617 zio_done(zio_t *zio) 3618 { 3619 spa_t *spa = zio->io_spa; 3620 zio_t *lio = zio->io_logical; 3621 blkptr_t *bp = zio->io_bp; 3622 vdev_t *vd = zio->io_vd; 3623 uint64_t psize = zio->io_size; 3624 zio_t *pio, *pio_next; 3625 metaslab_class_t *mc = spa_normal_class(spa); 3626 zio_link_t *zl = NULL; 3627 3628 /* 3629 * If our children haven't all completed, 3630 * wait for them and then repeat this pipeline stage. 3631 */ 3632 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) { 3633 return (ZIO_PIPELINE_STOP); 3634 } 3635 3636 /* 3637 * If the allocation throttle is enabled, then update the accounting. 3638 * We only track child I/Os that are part of an allocating async 3639 * write. We must do this since the allocation is performed 3640 * by the logical I/O but the actual write is done by child I/Os. 3641 */ 3642 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && 3643 zio->io_child_type == ZIO_CHILD_VDEV) { 3644 ASSERT(mc->mc_alloc_throttle_enabled); 3645 zio_dva_throttle_done(zio); 3646 } 3647 3648 /* 3649 * If the allocation throttle is enabled, verify that 3650 * we have decremented the refcounts for every I/O that was throttled. 3651 */ 3652 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3653 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3654 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3655 ASSERT(bp != NULL); 3656 metaslab_group_alloc_verify(spa, zio->io_bp, zio, 3657 zio->io_allocator); 3658 VERIFY(refcount_not_held(&mc->mc_alloc_slots[zio->io_allocator], 3659 zio)); 3660 } 3661 3662 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 3663 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 3664 ASSERT(zio->io_children[c][w] == 0); 3665 3666 if (bp != NULL && !BP_IS_EMBEDDED(bp)) { 3667 ASSERT(bp->blk_pad[0] == 0); 3668 ASSERT(bp->blk_pad[1] == 0); 3669 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 3670 (bp == zio_unique_parent(zio)->io_bp)); 3671 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 3672 zio->io_bp_override == NULL && 3673 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 3674 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 3675 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 3676 ASSERT(BP_COUNT_GANG(bp) == 0 || 3677 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 3678 } 3679 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 3680 VERIFY(BP_EQUAL(bp, &zio->io_bp_orig)); 3681 } 3682 3683 /* 3684 * If there were child vdev/gang/ddt errors, they apply to us now. 3685 */ 3686 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 3687 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 3688 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 3689 3690 /* 3691 * If the I/O on the transformed data was successful, generate any 3692 * checksum reports now while we still have the transformed data. 3693 */ 3694 if (zio->io_error == 0) { 3695 while (zio->io_cksum_report != NULL) { 3696 zio_cksum_report_t *zcr = zio->io_cksum_report; 3697 uint64_t align = zcr->zcr_align; 3698 uint64_t asize = P2ROUNDUP(psize, align); 3699 char *abuf = NULL; 3700 abd_t *adata = zio->io_abd; 3701 3702 if (asize != psize) { 3703 adata = abd_alloc_linear(asize, B_TRUE); 3704 abd_copy(adata, zio->io_abd, psize); 3705 abd_zero_off(adata, psize, asize - psize); 3706 } 3707 3708 if (adata != NULL) 3709 abuf = abd_borrow_buf_copy(adata, asize); 3710 3711 zio->io_cksum_report = zcr->zcr_next; 3712 zcr->zcr_next = NULL; 3713 zcr->zcr_finish(zcr, abuf); 3714 zfs_ereport_free_checksum(zcr); 3715 3716 if (adata != NULL) 3717 abd_return_buf(adata, abuf, asize); 3718 3719 if (asize != psize) 3720 abd_free(adata); 3721 } 3722 } 3723 3724 zio_pop_transforms(zio); /* note: may set zio->io_error */ 3725 3726 vdev_stat_update(zio, psize); 3727 3728 if (zio->io_error) { 3729 /* 3730 * If this I/O is attached to a particular vdev, 3731 * generate an error message describing the I/O failure 3732 * at the block level. We ignore these errors if the 3733 * device is currently unavailable. 3734 */ 3735 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 3736 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 3737 3738 if ((zio->io_error == EIO || !(zio->io_flags & 3739 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 3740 zio == lio) { 3741 /* 3742 * For logical I/O requests, tell the SPA to log the 3743 * error and generate a logical data ereport. 3744 */ 3745 spa_log_error(spa, zio); 3746 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 3747 0, 0); 3748 } 3749 } 3750 3751 if (zio->io_error && zio == lio) { 3752 /* 3753 * Determine whether zio should be reexecuted. This will 3754 * propagate all the way to the root via zio_notify_parent(). 3755 */ 3756 ASSERT(vd == NULL && bp != NULL); 3757 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3758 3759 if (IO_IS_ALLOCATING(zio) && 3760 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 3761 if (zio->io_error != ENOSPC) 3762 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 3763 else 3764 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3765 } 3766 3767 if ((zio->io_type == ZIO_TYPE_READ || 3768 zio->io_type == ZIO_TYPE_FREE) && 3769 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 3770 zio->io_error == ENXIO && 3771 spa_load_state(spa) == SPA_LOAD_NONE && 3772 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 3773 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3774 3775 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 3776 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3777 3778 /* 3779 * Here is a possibly good place to attempt to do 3780 * either combinatorial reconstruction or error correction 3781 * based on checksums. It also might be a good place 3782 * to send out preliminary ereports before we suspend 3783 * processing. 3784 */ 3785 } 3786 3787 /* 3788 * If there were logical child errors, they apply to us now. 3789 * We defer this until now to avoid conflating logical child 3790 * errors with errors that happened to the zio itself when 3791 * updating vdev stats and reporting FMA events above. 3792 */ 3793 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 3794 3795 if ((zio->io_error || zio->io_reexecute) && 3796 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 3797 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 3798 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 3799 3800 zio_gang_tree_free(&zio->io_gang_tree); 3801 3802 /* 3803 * Godfather I/Os should never suspend. 3804 */ 3805 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 3806 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 3807 zio->io_reexecute = 0; 3808 3809 if (zio->io_reexecute) { 3810 /* 3811 * This is a logical I/O that wants to reexecute. 3812 * 3813 * Reexecute is top-down. When an i/o fails, if it's not 3814 * the root, it simply notifies its parent and sticks around. 3815 * The parent, seeing that it still has children in zio_done(), 3816 * does the same. This percolates all the way up to the root. 3817 * The root i/o will reexecute or suspend the entire tree. 3818 * 3819 * This approach ensures that zio_reexecute() honors 3820 * all the original i/o dependency relationships, e.g. 3821 * parents not executing until children are ready. 3822 */ 3823 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3824 3825 zio->io_gang_leader = NULL; 3826 3827 mutex_enter(&zio->io_lock); 3828 zio->io_state[ZIO_WAIT_DONE] = 1; 3829 mutex_exit(&zio->io_lock); 3830 3831 /* 3832 * "The Godfather" I/O monitors its children but is 3833 * not a true parent to them. It will track them through 3834 * the pipeline but severs its ties whenever they get into 3835 * trouble (e.g. suspended). This allows "The Godfather" 3836 * I/O to return status without blocking. 3837 */ 3838 zl = NULL; 3839 for (pio = zio_walk_parents(zio, &zl); pio != NULL; 3840 pio = pio_next) { 3841 zio_link_t *remove_zl = zl; 3842 pio_next = zio_walk_parents(zio, &zl); 3843 3844 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 3845 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 3846 zio_remove_child(pio, zio, remove_zl); 3847 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3848 } 3849 } 3850 3851 if ((pio = zio_unique_parent(zio)) != NULL) { 3852 /* 3853 * We're not a root i/o, so there's nothing to do 3854 * but notify our parent. Don't propagate errors 3855 * upward since we haven't permanently failed yet. 3856 */ 3857 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 3858 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 3859 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3860 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 3861 /* 3862 * We'd fail again if we reexecuted now, so suspend 3863 * until conditions improve (e.g. device comes online). 3864 */ 3865 zio_suspend(spa, zio); 3866 } else { 3867 /* 3868 * Reexecution is potentially a huge amount of work. 3869 * Hand it off to the otherwise-unused claim taskq. 3870 */ 3871 ASSERT(zio->io_tqent.tqent_next == NULL); 3872 spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM, 3873 ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio, 3874 0, &zio->io_tqent); 3875 } 3876 return (ZIO_PIPELINE_STOP); 3877 } 3878 3879 ASSERT(zio->io_child_count == 0); 3880 ASSERT(zio->io_reexecute == 0); 3881 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 3882 3883 /* 3884 * Report any checksum errors, since the I/O is complete. 3885 */ 3886 while (zio->io_cksum_report != NULL) { 3887 zio_cksum_report_t *zcr = zio->io_cksum_report; 3888 zio->io_cksum_report = zcr->zcr_next; 3889 zcr->zcr_next = NULL; 3890 zcr->zcr_finish(zcr, NULL); 3891 zfs_ereport_free_checksum(zcr); 3892 } 3893 3894 /* 3895 * It is the responsibility of the done callback to ensure that this 3896 * particular zio is no longer discoverable for adoption, and as 3897 * such, cannot acquire any new parents. 3898 */ 3899 if (zio->io_done) 3900 zio->io_done(zio); 3901 3902 mutex_enter(&zio->io_lock); 3903 zio->io_state[ZIO_WAIT_DONE] = 1; 3904 mutex_exit(&zio->io_lock); 3905 3906 zl = NULL; 3907 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) { 3908 zio_link_t *remove_zl = zl; 3909 pio_next = zio_walk_parents(zio, &zl); 3910 zio_remove_child(pio, zio, remove_zl); 3911 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3912 } 3913 3914 if (zio->io_waiter != NULL) { 3915 mutex_enter(&zio->io_lock); 3916 zio->io_executor = NULL; 3917 cv_broadcast(&zio->io_cv); 3918 mutex_exit(&zio->io_lock); 3919 } else { 3920 zio_destroy(zio); 3921 } 3922 3923 return (ZIO_PIPELINE_STOP); 3924 } 3925 3926 /* 3927 * ========================================================================== 3928 * I/O pipeline definition 3929 * ========================================================================== 3930 */ 3931 static zio_pipe_stage_t *zio_pipeline[] = { 3932 NULL, 3933 zio_read_bp_init, 3934 zio_write_bp_init, 3935 zio_free_bp_init, 3936 zio_issue_async, 3937 zio_write_compress, 3938 zio_checksum_generate, 3939 zio_nop_write, 3940 zio_ddt_read_start, 3941 zio_ddt_read_done, 3942 zio_ddt_write, 3943 zio_ddt_free, 3944 zio_gang_assemble, 3945 zio_gang_issue, 3946 zio_dva_throttle, 3947 zio_dva_allocate, 3948 zio_dva_free, 3949 zio_dva_claim, 3950 zio_ready, 3951 zio_vdev_io_start, 3952 zio_vdev_io_done, 3953 zio_vdev_io_assess, 3954 zio_checksum_verify, 3955 zio_done 3956 }; 3957 3958 3959 3960 3961 /* 3962 * Compare two zbookmark_phys_t's to see which we would reach first in a 3963 * pre-order traversal of the object tree. 3964 * 3965 * This is simple in every case aside from the meta-dnode object. For all other 3966 * objects, we traverse them in order (object 1 before object 2, and so on). 3967 * However, all of these objects are traversed while traversing object 0, since 3968 * the data it points to is the list of objects. Thus, we need to convert to a 3969 * canonical representation so we can compare meta-dnode bookmarks to 3970 * non-meta-dnode bookmarks. 3971 * 3972 * We do this by calculating "equivalents" for each field of the zbookmark. 3973 * zbookmarks outside of the meta-dnode use their own object and level, and 3974 * calculate the level 0 equivalent (the first L0 blkid that is contained in the 3975 * blocks this bookmark refers to) by multiplying their blkid by their span 3976 * (the number of L0 blocks contained within one block at their level). 3977 * zbookmarks inside the meta-dnode calculate their object equivalent 3978 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use 3979 * level + 1<<31 (any value larger than a level could ever be) for their level. 3980 * This causes them to always compare before a bookmark in their object 3981 * equivalent, compare appropriately to bookmarks in other objects, and to 3982 * compare appropriately to other bookmarks in the meta-dnode. 3983 */ 3984 int 3985 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, 3986 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) 3987 { 3988 /* 3989 * These variables represent the "equivalent" values for the zbookmark, 3990 * after converting zbookmarks inside the meta dnode to their 3991 * normal-object equivalents. 3992 */ 3993 uint64_t zb1obj, zb2obj; 3994 uint64_t zb1L0, zb2L0; 3995 uint64_t zb1level, zb2level; 3996 3997 if (zb1->zb_object == zb2->zb_object && 3998 zb1->zb_level == zb2->zb_level && 3999 zb1->zb_blkid == zb2->zb_blkid) 4000 return (0); 4001 4002 /* 4003 * BP_SPANB calculates the span in blocks. 4004 */ 4005 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); 4006 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); 4007 4008 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 4009 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 4010 zb1L0 = 0; 4011 zb1level = zb1->zb_level + COMPARE_META_LEVEL; 4012 } else { 4013 zb1obj = zb1->zb_object; 4014 zb1level = zb1->zb_level; 4015 } 4016 4017 if (zb2->zb_object == DMU_META_DNODE_OBJECT) { 4018 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 4019 zb2L0 = 0; 4020 zb2level = zb2->zb_level + COMPARE_META_LEVEL; 4021 } else { 4022 zb2obj = zb2->zb_object; 4023 zb2level = zb2->zb_level; 4024 } 4025 4026 /* Now that we have a canonical representation, do the comparison. */ 4027 if (zb1obj != zb2obj) 4028 return (zb1obj < zb2obj ? -1 : 1); 4029 else if (zb1L0 != zb2L0) 4030 return (zb1L0 < zb2L0 ? -1 : 1); 4031 else if (zb1level != zb2level) 4032 return (zb1level > zb2level ? -1 : 1); 4033 /* 4034 * This can (theoretically) happen if the bookmarks have the same object 4035 * and level, but different blkids, if the block sizes are not the same. 4036 * There is presently no way to change the indirect block sizes 4037 */ 4038 return (0); 4039 } 4040 4041 /* 4042 * This function checks the following: given that last_block is the place that 4043 * our traversal stopped last time, does that guarantee that we've visited 4044 * every node under subtree_root? Therefore, we can't just use the raw output 4045 * of zbookmark_compare. We have to pass in a modified version of 4046 * subtree_root; by incrementing the block id, and then checking whether 4047 * last_block is before or equal to that, we can tell whether or not having 4048 * visited last_block implies that all of subtree_root's children have been 4049 * visited. 4050 */ 4051 boolean_t 4052 zbookmark_subtree_completed(const dnode_phys_t *dnp, 4053 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 4054 { 4055 zbookmark_phys_t mod_zb = *subtree_root; 4056 mod_zb.zb_blkid++; 4057 ASSERT(last_block->zb_level == 0); 4058 4059 /* The objset_phys_t isn't before anything. */ 4060 if (dnp == NULL) 4061 return (B_FALSE); 4062 4063 /* 4064 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the 4065 * data block size in sectors, because that variable is only used if 4066 * the bookmark refers to a block in the meta-dnode. Since we don't 4067 * know without examining it what object it refers to, and there's no 4068 * harm in passing in this value in other cases, we always pass it in. 4069 * 4070 * We pass in 0 for the indirect block size shift because zb2 must be 4071 * level 0. The indirect block size is only used to calculate the span 4072 * of the bookmark, but since the bookmark must be level 0, the span is 4073 * always 1, so the math works out. 4074 * 4075 * If you make changes to how the zbookmark_compare code works, be sure 4076 * to make sure that this code still works afterwards. 4077 */ 4078 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 4079 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, 4080 last_block) <= 0); 4081 } 4082