1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 * Copyright (c) 2017, Intel Corporation. 27 */ 28 29 #include <sys/sysmacros.h> 30 #include <sys/zfs_context.h> 31 #include <sys/fm/fs/zfs.h> 32 #include <sys/spa.h> 33 #include <sys/txg.h> 34 #include <sys/spa_impl.h> 35 #include <sys/vdev_impl.h> 36 #include <sys/zio_impl.h> 37 #include <sys/zio_compress.h> 38 #include <sys/zio_checksum.h> 39 #include <sys/dmu_objset.h> 40 #include <sys/arc.h> 41 #include <sys/ddt.h> 42 #include <sys/blkptr.h> 43 #include <sys/zfeature.h> 44 #include <sys/metaslab_impl.h> 45 #include <sys/abd.h> 46 #include <sys/cityhash.h> 47 48 /* 49 * ========================================================================== 50 * I/O type descriptions 51 * ========================================================================== 52 */ 53 const char *zio_type_name[ZIO_TYPES] = { 54 "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 55 "zio_ioctl" 56 }; 57 58 boolean_t zio_dva_throttle_enabled = B_TRUE; 59 60 /* 61 * ========================================================================== 62 * I/O kmem caches 63 * ========================================================================== 64 */ 65 kmem_cache_t *zio_cache; 66 kmem_cache_t *zio_link_cache; 67 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 68 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 69 70 #ifdef _KERNEL 71 extern vmem_t *zio_alloc_arena; 72 #endif 73 74 #define ZIO_PIPELINE_CONTINUE 0x100 75 #define ZIO_PIPELINE_STOP 0x101 76 77 #define BP_SPANB(indblkshift, level) \ 78 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) 79 #define COMPARE_META_LEVEL 0x80000000ul 80 /* 81 * The following actions directly effect the spa's sync-to-convergence logic. 82 * The values below define the sync pass when we start performing the action. 83 * Care should be taken when changing these values as they directly impact 84 * spa_sync() performance. Tuning these values may introduce subtle performance 85 * pathologies and should only be done in the context of performance analysis. 86 * These tunables will eventually be removed and replaced with #defines once 87 * enough analysis has been done to determine optimal values. 88 * 89 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 90 * regular blocks are not deferred. 91 */ 92 int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 93 int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 94 int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 95 96 /* 97 * An allocating zio is one that either currently has the DVA allocate 98 * stage set or will have it later in its lifetime. 99 */ 100 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 101 102 boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 103 104 #ifdef ZFS_DEBUG 105 int zio_buf_debug_limit = 16384; 106 #else 107 int zio_buf_debug_limit = 0; 108 #endif 109 110 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t); 111 112 void 113 zio_init(void) 114 { 115 size_t c; 116 vmem_t *data_alloc_arena = NULL; 117 118 #ifdef _KERNEL 119 data_alloc_arena = zio_alloc_arena; 120 #endif 121 zio_cache = kmem_cache_create("zio_cache", 122 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 123 zio_link_cache = kmem_cache_create("zio_link_cache", 124 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 125 126 /* 127 * For small buffers, we want a cache for each multiple of 128 * SPA_MINBLOCKSIZE. For larger buffers, we want a cache 129 * for each quarter-power of 2. 130 */ 131 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 132 size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 133 size_t p2 = size; 134 size_t align = 0; 135 size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 136 137 while (!ISP2(p2)) 138 p2 &= p2 - 1; 139 140 #ifndef _KERNEL 141 /* 142 * If we are using watchpoints, put each buffer on its own page, 143 * to eliminate the performance overhead of trapping to the 144 * kernel when modifying a non-watched buffer that shares the 145 * page with a watched buffer. 146 */ 147 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 148 continue; 149 #endif 150 if (size <= 4 * SPA_MINBLOCKSIZE) { 151 align = SPA_MINBLOCKSIZE; 152 } else if (IS_P2ALIGNED(size, p2 >> 2)) { 153 align = MIN(p2 >> 2, PAGESIZE); 154 } 155 156 if (align != 0) { 157 char name[36]; 158 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 159 zio_buf_cache[c] = kmem_cache_create(name, size, 160 align, NULL, NULL, NULL, NULL, NULL, cflags); 161 162 /* 163 * Since zio_data bufs do not appear in crash dumps, we 164 * pass KMC_NOTOUCH so that no allocator metadata is 165 * stored with the buffers. 166 */ 167 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 168 zio_data_buf_cache[c] = kmem_cache_create(name, size, 169 align, NULL, NULL, NULL, NULL, data_alloc_arena, 170 cflags | KMC_NOTOUCH); 171 } 172 } 173 174 while (--c != 0) { 175 ASSERT(zio_buf_cache[c] != NULL); 176 if (zio_buf_cache[c - 1] == NULL) 177 zio_buf_cache[c - 1] = zio_buf_cache[c]; 178 179 ASSERT(zio_data_buf_cache[c] != NULL); 180 if (zio_data_buf_cache[c - 1] == NULL) 181 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 182 } 183 184 zio_inject_init(); 185 } 186 187 void 188 zio_fini(void) 189 { 190 size_t c; 191 kmem_cache_t *last_cache = NULL; 192 kmem_cache_t *last_data_cache = NULL; 193 194 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 195 if (zio_buf_cache[c] != last_cache) { 196 last_cache = zio_buf_cache[c]; 197 kmem_cache_destroy(zio_buf_cache[c]); 198 } 199 zio_buf_cache[c] = NULL; 200 201 if (zio_data_buf_cache[c] != last_data_cache) { 202 last_data_cache = zio_data_buf_cache[c]; 203 kmem_cache_destroy(zio_data_buf_cache[c]); 204 } 205 zio_data_buf_cache[c] = NULL; 206 } 207 208 kmem_cache_destroy(zio_link_cache); 209 kmem_cache_destroy(zio_cache); 210 211 zio_inject_fini(); 212 } 213 214 /* 215 * ========================================================================== 216 * Allocate and free I/O buffers 217 * ========================================================================== 218 */ 219 220 /* 221 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 222 * crashdump if the kernel panics, so use it judiciously. Obviously, it's 223 * useful to inspect ZFS metadata, but if possible, we should avoid keeping 224 * excess / transient data in-core during a crashdump. 225 */ 226 void * 227 zio_buf_alloc(size_t size) 228 { 229 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 230 231 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 232 233 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 234 } 235 236 /* 237 * Use zio_data_buf_alloc to allocate data. The data will not appear in a 238 * crashdump if the kernel panics. This exists so that we will limit the amount 239 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 240 * of kernel heap dumped to disk when the kernel panics) 241 */ 242 void * 243 zio_data_buf_alloc(size_t size) 244 { 245 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 246 247 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 248 249 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 250 } 251 252 void 253 zio_buf_free(void *buf, size_t size) 254 { 255 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 256 257 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 258 259 kmem_cache_free(zio_buf_cache[c], buf); 260 } 261 262 void 263 zio_data_buf_free(void *buf, size_t size) 264 { 265 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 266 267 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 268 269 kmem_cache_free(zio_data_buf_cache[c], buf); 270 } 271 272 /* 273 * ========================================================================== 274 * Push and pop I/O transform buffers 275 * ========================================================================== 276 */ 277 void 278 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize, 279 zio_transform_func_t *transform) 280 { 281 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 282 283 /* 284 * Ensure that anyone expecting this zio to contain a linear ABD isn't 285 * going to get a nasty surprise when they try to access the data. 286 */ 287 IMPLY(abd_is_linear(zio->io_abd), abd_is_linear(data)); 288 289 zt->zt_orig_abd = zio->io_abd; 290 zt->zt_orig_size = zio->io_size; 291 zt->zt_bufsize = bufsize; 292 zt->zt_transform = transform; 293 294 zt->zt_next = zio->io_transform_stack; 295 zio->io_transform_stack = zt; 296 297 zio->io_abd = data; 298 zio->io_size = size; 299 } 300 301 void 302 zio_pop_transforms(zio_t *zio) 303 { 304 zio_transform_t *zt; 305 306 while ((zt = zio->io_transform_stack) != NULL) { 307 if (zt->zt_transform != NULL) 308 zt->zt_transform(zio, 309 zt->zt_orig_abd, zt->zt_orig_size); 310 311 if (zt->zt_bufsize != 0) 312 abd_free(zio->io_abd); 313 314 zio->io_abd = zt->zt_orig_abd; 315 zio->io_size = zt->zt_orig_size; 316 zio->io_transform_stack = zt->zt_next; 317 318 kmem_free(zt, sizeof (zio_transform_t)); 319 } 320 } 321 322 /* 323 * ========================================================================== 324 * I/O transform callbacks for subblocks and decompression 325 * ========================================================================== 326 */ 327 static void 328 zio_subblock(zio_t *zio, abd_t *data, uint64_t size) 329 { 330 ASSERT(zio->io_size > size); 331 332 if (zio->io_type == ZIO_TYPE_READ) 333 abd_copy(data, zio->io_abd, size); 334 } 335 336 static void 337 zio_decompress(zio_t *zio, abd_t *data, uint64_t size) 338 { 339 if (zio->io_error == 0) { 340 void *tmp = abd_borrow_buf(data, size); 341 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 342 zio->io_abd, tmp, zio->io_size, size); 343 abd_return_buf_copy(data, tmp, size); 344 345 if (ret != 0) 346 zio->io_error = SET_ERROR(EIO); 347 } 348 } 349 350 /* 351 * ========================================================================== 352 * I/O parent/child relationships and pipeline interlocks 353 * ========================================================================== 354 */ 355 zio_t * 356 zio_walk_parents(zio_t *cio, zio_link_t **zl) 357 { 358 list_t *pl = &cio->io_parent_list; 359 360 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl); 361 if (*zl == NULL) 362 return (NULL); 363 364 ASSERT((*zl)->zl_child == cio); 365 return ((*zl)->zl_parent); 366 } 367 368 zio_t * 369 zio_walk_children(zio_t *pio, zio_link_t **zl) 370 { 371 list_t *cl = &pio->io_child_list; 372 373 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl); 374 if (*zl == NULL) 375 return (NULL); 376 377 ASSERT((*zl)->zl_parent == pio); 378 return ((*zl)->zl_child); 379 } 380 381 zio_t * 382 zio_unique_parent(zio_t *cio) 383 { 384 zio_link_t *zl = NULL; 385 zio_t *pio = zio_walk_parents(cio, &zl); 386 387 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL); 388 return (pio); 389 } 390 391 void 392 zio_add_child(zio_t *pio, zio_t *cio) 393 { 394 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 395 396 /* 397 * Logical I/Os can have logical, gang, or vdev children. 398 * Gang I/Os can have gang or vdev children. 399 * Vdev I/Os can only have vdev children. 400 * The following ASSERT captures all of these constraints. 401 */ 402 ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 403 404 zl->zl_parent = pio; 405 zl->zl_child = cio; 406 407 mutex_enter(&cio->io_lock); 408 mutex_enter(&pio->io_lock); 409 410 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 411 412 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 413 pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 414 415 list_insert_head(&pio->io_child_list, zl); 416 list_insert_head(&cio->io_parent_list, zl); 417 418 pio->io_child_count++; 419 cio->io_parent_count++; 420 421 mutex_exit(&pio->io_lock); 422 mutex_exit(&cio->io_lock); 423 } 424 425 static void 426 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 427 { 428 ASSERT(zl->zl_parent == pio); 429 ASSERT(zl->zl_child == cio); 430 431 mutex_enter(&cio->io_lock); 432 mutex_enter(&pio->io_lock); 433 434 list_remove(&pio->io_child_list, zl); 435 list_remove(&cio->io_parent_list, zl); 436 437 pio->io_child_count--; 438 cio->io_parent_count--; 439 440 mutex_exit(&pio->io_lock); 441 mutex_exit(&cio->io_lock); 442 443 kmem_cache_free(zio_link_cache, zl); 444 } 445 446 static boolean_t 447 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait) 448 { 449 boolean_t waiting = B_FALSE; 450 451 mutex_enter(&zio->io_lock); 452 ASSERT(zio->io_stall == NULL); 453 for (int c = 0; c < ZIO_CHILD_TYPES; c++) { 454 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c))) 455 continue; 456 457 uint64_t *countp = &zio->io_children[c][wait]; 458 if (*countp != 0) { 459 zio->io_stage >>= 1; 460 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN); 461 zio->io_stall = countp; 462 waiting = B_TRUE; 463 break; 464 } 465 } 466 mutex_exit(&zio->io_lock); 467 return (waiting); 468 } 469 470 static void 471 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 472 { 473 uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 474 int *errorp = &pio->io_child_error[zio->io_child_type]; 475 476 mutex_enter(&pio->io_lock); 477 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 478 *errorp = zio_worst_error(*errorp, zio->io_error); 479 pio->io_reexecute |= zio->io_reexecute; 480 ASSERT3U(*countp, >, 0); 481 482 (*countp)--; 483 484 if (*countp == 0 && pio->io_stall == countp) { 485 zio_taskq_type_t type = 486 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE : 487 ZIO_TASKQ_INTERRUPT; 488 pio->io_stall = NULL; 489 mutex_exit(&pio->io_lock); 490 /* 491 * Dispatch the parent zio in its own taskq so that 492 * the child can continue to make progress. This also 493 * prevents overflowing the stack when we have deeply nested 494 * parent-child relationships. 495 */ 496 zio_taskq_dispatch(pio, type, B_FALSE); 497 } else { 498 mutex_exit(&pio->io_lock); 499 } 500 } 501 502 static void 503 zio_inherit_child_errors(zio_t *zio, enum zio_child c) 504 { 505 if (zio->io_child_error[c] != 0 && zio->io_error == 0) 506 zio->io_error = zio->io_child_error[c]; 507 } 508 509 int 510 zio_bookmark_compare(const void *x1, const void *x2) 511 { 512 const zio_t *z1 = x1; 513 const zio_t *z2 = x2; 514 515 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset) 516 return (-1); 517 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset) 518 return (1); 519 520 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object) 521 return (-1); 522 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object) 523 return (1); 524 525 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level) 526 return (-1); 527 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level) 528 return (1); 529 530 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid) 531 return (-1); 532 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid) 533 return (1); 534 535 if (z1 < z2) 536 return (-1); 537 if (z1 > z2) 538 return (1); 539 540 return (0); 541 } 542 543 /* 544 * ========================================================================== 545 * Create the various types of I/O (read, write, free, etc) 546 * ========================================================================== 547 */ 548 static zio_t * 549 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 550 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done, 551 void *private, zio_type_t type, zio_priority_t priority, 552 enum zio_flag flags, vdev_t *vd, uint64_t offset, 553 const zbookmark_phys_t *zb, enum zio_stage stage, enum zio_stage pipeline) 554 { 555 zio_t *zio; 556 557 ASSERT3U(psize, <=, SPA_MAXBLOCKSIZE); 558 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0); 559 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 560 561 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 562 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 563 ASSERT(vd || stage == ZIO_STAGE_OPEN); 564 565 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW) != 0); 566 567 zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 568 bzero(zio, sizeof (zio_t)); 569 570 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 571 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 572 573 list_create(&zio->io_parent_list, sizeof (zio_link_t), 574 offsetof(zio_link_t, zl_parent_node)); 575 list_create(&zio->io_child_list, sizeof (zio_link_t), 576 offsetof(zio_link_t, zl_child_node)); 577 metaslab_trace_init(&zio->io_alloc_list); 578 579 if (vd != NULL) 580 zio->io_child_type = ZIO_CHILD_VDEV; 581 else if (flags & ZIO_FLAG_GANG_CHILD) 582 zio->io_child_type = ZIO_CHILD_GANG; 583 else if (flags & ZIO_FLAG_DDT_CHILD) 584 zio->io_child_type = ZIO_CHILD_DDT; 585 else 586 zio->io_child_type = ZIO_CHILD_LOGICAL; 587 588 if (bp != NULL) { 589 zio->io_bp = (blkptr_t *)bp; 590 zio->io_bp_copy = *bp; 591 zio->io_bp_orig = *bp; 592 if (type != ZIO_TYPE_WRITE || 593 zio->io_child_type == ZIO_CHILD_DDT) 594 zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 595 if (zio->io_child_type == ZIO_CHILD_LOGICAL) 596 zio->io_logical = zio; 597 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 598 pipeline |= ZIO_GANG_STAGES; 599 } 600 601 zio->io_spa = spa; 602 zio->io_txg = txg; 603 zio->io_done = done; 604 zio->io_private = private; 605 zio->io_type = type; 606 zio->io_priority = priority; 607 zio->io_vd = vd; 608 zio->io_offset = offset; 609 zio->io_orig_abd = zio->io_abd = data; 610 zio->io_orig_size = zio->io_size = psize; 611 zio->io_lsize = lsize; 612 zio->io_orig_flags = zio->io_flags = flags; 613 zio->io_orig_stage = zio->io_stage = stage; 614 zio->io_orig_pipeline = zio->io_pipeline = pipeline; 615 zio->io_pipeline_trace = ZIO_STAGE_OPEN; 616 617 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 618 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 619 620 if (zb != NULL) 621 zio->io_bookmark = *zb; 622 623 if (pio != NULL) { 624 if (zio->io_metaslab_class == NULL) 625 zio->io_metaslab_class = pio->io_metaslab_class; 626 if (zio->io_logical == NULL) 627 zio->io_logical = pio->io_logical; 628 if (zio->io_child_type == ZIO_CHILD_GANG) 629 zio->io_gang_leader = pio->io_gang_leader; 630 zio_add_child(pio, zio); 631 } 632 633 return (zio); 634 } 635 636 static void 637 zio_destroy(zio_t *zio) 638 { 639 metaslab_trace_fini(&zio->io_alloc_list); 640 list_destroy(&zio->io_parent_list); 641 list_destroy(&zio->io_child_list); 642 mutex_destroy(&zio->io_lock); 643 cv_destroy(&zio->io_cv); 644 kmem_cache_free(zio_cache, zio); 645 } 646 647 zio_t * 648 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 649 void *private, enum zio_flag flags) 650 { 651 zio_t *zio; 652 653 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 654 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 655 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 656 657 return (zio); 658 } 659 660 zio_t * 661 zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 662 { 663 return (zio_null(NULL, spa, NULL, done, private, flags)); 664 } 665 666 void 667 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp) 668 { 669 if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { 670 zfs_panic_recover("blkptr at %p has invalid TYPE %llu", 671 bp, (longlong_t)BP_GET_TYPE(bp)); 672 } 673 if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS || 674 BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) { 675 zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu", 676 bp, (longlong_t)BP_GET_CHECKSUM(bp)); 677 } 678 if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS || 679 BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) { 680 zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu", 681 bp, (longlong_t)BP_GET_COMPRESS(bp)); 682 } 683 if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { 684 zfs_panic_recover("blkptr at %p has invalid LSIZE %llu", 685 bp, (longlong_t)BP_GET_LSIZE(bp)); 686 } 687 if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { 688 zfs_panic_recover("blkptr at %p has invalid PSIZE %llu", 689 bp, (longlong_t)BP_GET_PSIZE(bp)); 690 } 691 692 if (BP_IS_EMBEDDED(bp)) { 693 if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) { 694 zfs_panic_recover("blkptr at %p has invalid ETYPE %llu", 695 bp, (longlong_t)BPE_GET_ETYPE(bp)); 696 } 697 } 698 699 /* 700 * Do not verify individual DVAs if the config is not trusted. This 701 * will be done once the zio is executed in vdev_mirror_map_alloc. 702 */ 703 if (!spa->spa_trust_config) 704 return; 705 706 /* 707 * Pool-specific checks. 708 * 709 * Note: it would be nice to verify that the blk_birth and 710 * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() 711 * allows the birth time of log blocks (and dmu_sync()-ed blocks 712 * that are in the log) to be arbitrarily large. 713 */ 714 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 715 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); 716 if (vdevid >= spa->spa_root_vdev->vdev_children) { 717 zfs_panic_recover("blkptr at %p DVA %u has invalid " 718 "VDEV %llu", 719 bp, i, (longlong_t)vdevid); 720 continue; 721 } 722 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 723 if (vd == NULL) { 724 zfs_panic_recover("blkptr at %p DVA %u has invalid " 725 "VDEV %llu", 726 bp, i, (longlong_t)vdevid); 727 continue; 728 } 729 if (vd->vdev_ops == &vdev_hole_ops) { 730 zfs_panic_recover("blkptr at %p DVA %u has hole " 731 "VDEV %llu", 732 bp, i, (longlong_t)vdevid); 733 continue; 734 } 735 if (vd->vdev_ops == &vdev_missing_ops) { 736 /* 737 * "missing" vdevs are valid during import, but we 738 * don't have their detailed info (e.g. asize), so 739 * we can't perform any more checks on them. 740 */ 741 continue; 742 } 743 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 744 uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]); 745 if (BP_IS_GANG(bp)) 746 asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 747 if (offset + asize > vd->vdev_asize) { 748 zfs_panic_recover("blkptr at %p DVA %u has invalid " 749 "OFFSET %llu", 750 bp, i, (longlong_t)offset); 751 } 752 } 753 } 754 755 boolean_t 756 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp) 757 { 758 uint64_t vdevid = DVA_GET_VDEV(dva); 759 760 if (vdevid >= spa->spa_root_vdev->vdev_children) 761 return (B_FALSE); 762 763 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 764 if (vd == NULL) 765 return (B_FALSE); 766 767 if (vd->vdev_ops == &vdev_hole_ops) 768 return (B_FALSE); 769 770 if (vd->vdev_ops == &vdev_missing_ops) { 771 return (B_FALSE); 772 } 773 774 uint64_t offset = DVA_GET_OFFSET(dva); 775 uint64_t asize = DVA_GET_ASIZE(dva); 776 777 if (BP_IS_GANG(bp)) 778 asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 779 if (offset + asize > vd->vdev_asize) 780 return (B_FALSE); 781 782 return (B_TRUE); 783 } 784 785 zio_t * 786 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 787 abd_t *data, uint64_t size, zio_done_func_t *done, void *private, 788 zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 789 { 790 zio_t *zio; 791 792 zfs_blkptr_verify(spa, bp); 793 794 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 795 data, size, size, done, private, 796 ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 797 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 798 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 799 800 return (zio); 801 } 802 803 zio_t * 804 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 805 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp, 806 zio_done_func_t *ready, zio_done_func_t *children_ready, 807 zio_done_func_t *physdone, zio_done_func_t *done, 808 void *private, zio_priority_t priority, enum zio_flag flags, 809 const zbookmark_phys_t *zb) 810 { 811 zio_t *zio; 812 813 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 814 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 815 zp->zp_compress >= ZIO_COMPRESS_OFF && 816 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 817 DMU_OT_IS_VALID(zp->zp_type) && 818 zp->zp_level < 32 && 819 zp->zp_copies > 0 && 820 zp->zp_copies <= spa_max_replication(spa)); 821 822 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private, 823 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 824 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 825 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 826 827 zio->io_ready = ready; 828 zio->io_children_ready = children_ready; 829 zio->io_physdone = physdone; 830 zio->io_prop = *zp; 831 832 /* 833 * Data can be NULL if we are going to call zio_write_override() to 834 * provide the already-allocated BP. But we may need the data to 835 * verify a dedup hit (if requested). In this case, don't try to 836 * dedup (just take the already-allocated BP verbatim). 837 */ 838 if (data == NULL && zio->io_prop.zp_dedup_verify) { 839 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 840 } 841 842 return (zio); 843 } 844 845 zio_t * 846 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data, 847 uint64_t size, zio_done_func_t *done, void *private, 848 zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb) 849 { 850 zio_t *zio; 851 852 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private, 853 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb, 854 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 855 856 return (zio); 857 } 858 859 void 860 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) 861 { 862 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 863 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 864 ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 865 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 866 867 /* 868 * We must reset the io_prop to match the values that existed 869 * when the bp was first written by dmu_sync() keeping in mind 870 * that nopwrite and dedup are mutually exclusive. 871 */ 872 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 873 zio->io_prop.zp_nopwrite = nopwrite; 874 zio->io_prop.zp_copies = copies; 875 zio->io_bp_override = bp; 876 } 877 878 void 879 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 880 { 881 882 zfs_blkptr_verify(spa, bp); 883 884 /* 885 * The check for EMBEDDED is a performance optimization. We 886 * process the free here (by ignoring it) rather than 887 * putting it on the list and then processing it in zio_free_sync(). 888 */ 889 if (BP_IS_EMBEDDED(bp)) 890 return; 891 metaslab_check_free(spa, bp); 892 893 /* 894 * Frees that are for the currently-syncing txg, are not going to be 895 * deferred, and which will not need to do a read (i.e. not GANG or 896 * DEDUP), can be processed immediately. Otherwise, put them on the 897 * in-memory list for later processing. 898 */ 899 if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp) || 900 txg != spa->spa_syncing_txg || 901 spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) { 902 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 903 } else { 904 VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0))); 905 } 906 } 907 908 zio_t * 909 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 910 enum zio_flag flags) 911 { 912 zio_t *zio; 913 enum zio_stage stage = ZIO_FREE_PIPELINE; 914 915 ASSERT(!BP_IS_HOLE(bp)); 916 ASSERT(spa_syncing_txg(spa) == txg); 917 ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); 918 919 if (BP_IS_EMBEDDED(bp)) 920 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 921 922 metaslab_check_free(spa, bp); 923 arc_freed(spa, bp); 924 925 /* 926 * GANG and DEDUP blocks can induce a read (for the gang block header, 927 * or the DDT), so issue them asynchronously so that this thread is 928 * not tied up. 929 */ 930 if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) 931 stage |= ZIO_STAGE_ISSUE_ASYNC; 932 933 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 934 BP_GET_PSIZE(bp), NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, 935 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage); 936 937 return (zio); 938 } 939 940 zio_t * 941 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 942 zio_done_func_t *done, void *private, enum zio_flag flags) 943 { 944 zio_t *zio; 945 946 zfs_blkptr_verify(spa, bp); 947 948 if (BP_IS_EMBEDDED(bp)) 949 return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 950 951 /* 952 * A claim is an allocation of a specific block. Claims are needed 953 * to support immediate writes in the intent log. The issue is that 954 * immediate writes contain committed data, but in a txg that was 955 * *not* committed. Upon opening the pool after an unclean shutdown, 956 * the intent log claims all blocks that contain immediate write data 957 * so that the SPA knows they're in use. 958 * 959 * All claims *must* be resolved in the first txg -- before the SPA 960 * starts allocating blocks -- so that nothing is allocated twice. 961 * If txg == 0 we just verify that the block is claimable. 962 */ 963 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, 964 spa_min_claim_txg(spa)); 965 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0); 966 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 967 968 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 969 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 970 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 971 ASSERT0(zio->io_queued_timestamp); 972 973 return (zio); 974 } 975 976 zio_t * 977 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 978 zio_done_func_t *done, void *private, enum zio_flag flags) 979 { 980 zio_t *zio; 981 int c; 982 983 if (vd->vdev_children == 0) { 984 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 985 ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 986 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 987 988 zio->io_cmd = cmd; 989 } else { 990 zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 991 992 for (c = 0; c < vd->vdev_children; c++) 993 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 994 done, private, flags)); 995 } 996 997 return (zio); 998 } 999 1000 zio_t * 1001 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1002 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1003 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 1004 { 1005 zio_t *zio; 1006 1007 ASSERT(vd->vdev_children == 0); 1008 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1009 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1010 ASSERT3U(offset + size, <=, vd->vdev_psize); 1011 1012 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1013 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1014 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 1015 1016 zio->io_prop.zp_checksum = checksum; 1017 1018 return (zio); 1019 } 1020 1021 zio_t * 1022 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1023 abd_t *data, int checksum, zio_done_func_t *done, void *private, 1024 zio_priority_t priority, enum zio_flag flags, boolean_t labels) 1025 { 1026 zio_t *zio; 1027 1028 ASSERT(vd->vdev_children == 0); 1029 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1030 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1031 ASSERT3U(offset + size, <=, vd->vdev_psize); 1032 1033 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 1034 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, 1035 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 1036 1037 zio->io_prop.zp_checksum = checksum; 1038 1039 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) { 1040 /* 1041 * zec checksums are necessarily destructive -- they modify 1042 * the end of the write buffer to hold the verifier/checksum. 1043 * Therefore, we must make a local copy in case the data is 1044 * being written to multiple places in parallel. 1045 */ 1046 abd_t *wbuf = abd_alloc_sametype(data, size); 1047 abd_copy(wbuf, data, size); 1048 1049 zio_push_transform(zio, wbuf, size, size, NULL); 1050 } 1051 1052 return (zio); 1053 } 1054 1055 /* 1056 * Create a child I/O to do some work for us. 1057 */ 1058 zio_t * 1059 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 1060 abd_t *data, uint64_t size, int type, zio_priority_t priority, 1061 enum zio_flag flags, zio_done_func_t *done, void *private) 1062 { 1063 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 1064 zio_t *zio; 1065 1066 /* 1067 * vdev child I/Os do not propagate their error to the parent. 1068 * Therefore, for correct operation the caller *must* check for 1069 * and handle the error in the child i/o's done callback. 1070 * The only exceptions are i/os that we don't care about 1071 * (OPTIONAL or REPAIR). 1072 */ 1073 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) || 1074 done != NULL); 1075 1076 if (type == ZIO_TYPE_READ && bp != NULL) { 1077 /* 1078 * If we have the bp, then the child should perform the 1079 * checksum and the parent need not. This pushes error 1080 * detection as close to the leaves as possible and 1081 * eliminates redundant checksums in the interior nodes. 1082 */ 1083 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 1084 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 1085 } 1086 1087 if (vd->vdev_ops->vdev_op_leaf) { 1088 ASSERT0(vd->vdev_children); 1089 offset += VDEV_LABEL_START_SIZE; 1090 } 1091 1092 flags |= ZIO_VDEV_CHILD_FLAGS(pio); 1093 1094 /* 1095 * If we've decided to do a repair, the write is not speculative -- 1096 * even if the original read was. 1097 */ 1098 if (flags & ZIO_FLAG_IO_REPAIR) 1099 flags &= ~ZIO_FLAG_SPECULATIVE; 1100 1101 /* 1102 * If we're creating a child I/O that is not associated with a 1103 * top-level vdev, then the child zio is not an allocating I/O. 1104 * If this is a retried I/O then we ignore it since we will 1105 * have already processed the original allocating I/O. 1106 */ 1107 if (flags & ZIO_FLAG_IO_ALLOCATING && 1108 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { 1109 ASSERT(pio->io_metaslab_class != NULL); 1110 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled); 1111 ASSERT(type == ZIO_TYPE_WRITE); 1112 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE); 1113 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR)); 1114 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || 1115 pio->io_child_type == ZIO_CHILD_GANG); 1116 1117 flags &= ~ZIO_FLAG_IO_ALLOCATING; 1118 } 1119 1120 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size, 1121 done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 1122 ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 1123 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 1124 1125 zio->io_physdone = pio->io_physdone; 1126 if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) 1127 zio->io_logical->io_phys_children++; 1128 1129 return (zio); 1130 } 1131 1132 zio_t * 1133 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size, 1134 zio_type_t type, zio_priority_t priority, enum zio_flag flags, 1135 zio_done_func_t *done, void *private) 1136 { 1137 zio_t *zio; 1138 1139 ASSERT(vd->vdev_ops->vdev_op_leaf); 1140 1141 zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 1142 data, size, size, done, private, type, priority, 1143 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 1144 vd, offset, NULL, 1145 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 1146 1147 return (zio); 1148 } 1149 1150 void 1151 zio_flush(zio_t *zio, vdev_t *vd) 1152 { 1153 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 1154 NULL, NULL, 1155 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 1156 } 1157 1158 void 1159 zio_shrink(zio_t *zio, uint64_t size) 1160 { 1161 ASSERT3P(zio->io_executor, ==, NULL); 1162 ASSERT3P(zio->io_orig_size, ==, zio->io_size); 1163 ASSERT3U(size, <=, zio->io_size); 1164 1165 /* 1166 * We don't shrink for raidz because of problems with the 1167 * reconstruction when reading back less than the block size. 1168 * Note, BP_IS_RAIDZ() assumes no compression. 1169 */ 1170 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 1171 if (!BP_IS_RAIDZ(zio->io_bp)) { 1172 /* we are not doing a raw write */ 1173 ASSERT3U(zio->io_size, ==, zio->io_lsize); 1174 zio->io_orig_size = zio->io_size = zio->io_lsize = size; 1175 } 1176 } 1177 1178 /* 1179 * ========================================================================== 1180 * Prepare to read and write logical blocks 1181 * ========================================================================== 1182 */ 1183 1184 static int 1185 zio_read_bp_init(zio_t *zio) 1186 { 1187 blkptr_t *bp = zio->io_bp; 1188 1189 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1190 1191 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1192 zio->io_child_type == ZIO_CHILD_LOGICAL && 1193 !(zio->io_flags & ZIO_FLAG_RAW)) { 1194 uint64_t psize = 1195 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1196 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1197 psize, psize, zio_decompress); 1198 } 1199 1200 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1201 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1202 1203 int psize = BPE_GET_PSIZE(bp); 1204 void *data = abd_borrow_buf(zio->io_abd, psize); 1205 decode_embedded_bp_compressed(bp, data); 1206 abd_return_buf_copy(zio->io_abd, data, psize); 1207 } else { 1208 ASSERT(!BP_IS_EMBEDDED(bp)); 1209 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1210 } 1211 1212 if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 1213 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1214 1215 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 1216 zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1217 1218 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1219 zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1220 1221 return (ZIO_PIPELINE_CONTINUE); 1222 } 1223 1224 static int 1225 zio_write_bp_init(zio_t *zio) 1226 { 1227 if (!IO_IS_ALLOCATING(zio)) 1228 return (ZIO_PIPELINE_CONTINUE); 1229 1230 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1231 1232 if (zio->io_bp_override) { 1233 blkptr_t *bp = zio->io_bp; 1234 zio_prop_t *zp = &zio->io_prop; 1235 1236 ASSERT(bp->blk_birth != zio->io_txg); 1237 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 1238 1239 *bp = *zio->io_bp_override; 1240 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1241 1242 if (BP_IS_EMBEDDED(bp)) 1243 return (ZIO_PIPELINE_CONTINUE); 1244 1245 /* 1246 * If we've been overridden and nopwrite is set then 1247 * set the flag accordingly to indicate that a nopwrite 1248 * has already occurred. 1249 */ 1250 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 1251 ASSERT(!zp->zp_dedup); 1252 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum); 1253 zio->io_flags |= ZIO_FLAG_NOPWRITE; 1254 return (ZIO_PIPELINE_CONTINUE); 1255 } 1256 1257 ASSERT(!zp->zp_nopwrite); 1258 1259 if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1260 return (ZIO_PIPELINE_CONTINUE); 1261 1262 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags & 1263 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify); 1264 1265 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { 1266 BP_SET_DEDUP(bp, 1); 1267 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1268 return (ZIO_PIPELINE_CONTINUE); 1269 } 1270 1271 /* 1272 * We were unable to handle this as an override bp, treat 1273 * it as a regular write I/O. 1274 */ 1275 zio->io_bp_override = NULL; 1276 *bp = zio->io_bp_orig; 1277 zio->io_pipeline = zio->io_orig_pipeline; 1278 } 1279 1280 return (ZIO_PIPELINE_CONTINUE); 1281 } 1282 1283 static int 1284 zio_write_compress(zio_t *zio) 1285 { 1286 spa_t *spa = zio->io_spa; 1287 zio_prop_t *zp = &zio->io_prop; 1288 enum zio_compress compress = zp->zp_compress; 1289 blkptr_t *bp = zio->io_bp; 1290 uint64_t lsize = zio->io_lsize; 1291 uint64_t psize = zio->io_size; 1292 int pass = 1; 1293 1294 EQUIV(lsize != psize, (zio->io_flags & ZIO_FLAG_RAW) != 0); 1295 1296 /* 1297 * If our children haven't all reached the ready stage, 1298 * wait for them and then repeat this pipeline stage. 1299 */ 1300 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | 1301 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) { 1302 return (ZIO_PIPELINE_STOP); 1303 } 1304 1305 if (!IO_IS_ALLOCATING(zio)) 1306 return (ZIO_PIPELINE_CONTINUE); 1307 1308 if (zio->io_children_ready != NULL) { 1309 /* 1310 * Now that all our children are ready, run the callback 1311 * associated with this zio in case it wants to modify the 1312 * data to be written. 1313 */ 1314 ASSERT3U(zp->zp_level, >, 0); 1315 zio->io_children_ready(zio); 1316 } 1317 1318 ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1319 ASSERT(zio->io_bp_override == NULL); 1320 1321 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { 1322 /* 1323 * We're rewriting an existing block, which means we're 1324 * working on behalf of spa_sync(). For spa_sync() to 1325 * converge, it must eventually be the case that we don't 1326 * have to allocate new blocks. But compression changes 1327 * the blocksize, which forces a reallocate, and makes 1328 * convergence take longer. Therefore, after the first 1329 * few passes, stop compressing to ensure convergence. 1330 */ 1331 pass = spa_sync_pass(spa); 1332 1333 ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1334 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1335 ASSERT(!BP_GET_DEDUP(bp)); 1336 1337 if (pass >= zfs_sync_pass_dont_compress) 1338 compress = ZIO_COMPRESS_OFF; 1339 1340 /* Make sure someone doesn't change their mind on overwrites */ 1341 ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), 1342 spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1343 } 1344 1345 /* If it's a compressed write that is not raw, compress the buffer. */ 1346 if (compress != ZIO_COMPRESS_OFF && psize == lsize) { 1347 void *cbuf = zio_buf_alloc(lsize); 1348 psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize); 1349 if (psize == 0 || psize == lsize) { 1350 compress = ZIO_COMPRESS_OFF; 1351 zio_buf_free(cbuf, lsize); 1352 } else if (!zp->zp_dedup && psize <= BPE_PAYLOAD_SIZE && 1353 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 1354 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 1355 encode_embedded_bp_compressed(bp, 1356 cbuf, compress, lsize, psize); 1357 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 1358 BP_SET_TYPE(bp, zio->io_prop.zp_type); 1359 BP_SET_LEVEL(bp, zio->io_prop.zp_level); 1360 zio_buf_free(cbuf, lsize); 1361 bp->blk_birth = zio->io_txg; 1362 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1363 ASSERT(spa_feature_is_active(spa, 1364 SPA_FEATURE_EMBEDDED_DATA)); 1365 return (ZIO_PIPELINE_CONTINUE); 1366 } else { 1367 /* 1368 * Round up compressed size up to the ashift 1369 * of the smallest-ashift device, and zero the tail. 1370 * This ensures that the compressed size of the BP 1371 * (and thus compressratio property) are correct, 1372 * in that we charge for the padding used to fill out 1373 * the last sector. 1374 */ 1375 ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 1376 size_t rounded = (size_t)P2ROUNDUP(psize, 1377 1ULL << spa->spa_min_ashift); 1378 if (rounded >= lsize) { 1379 compress = ZIO_COMPRESS_OFF; 1380 zio_buf_free(cbuf, lsize); 1381 psize = lsize; 1382 } else { 1383 abd_t *cdata = abd_get_from_buf(cbuf, lsize); 1384 abd_take_ownership_of_buf(cdata, B_TRUE); 1385 abd_zero_off(cdata, psize, rounded - psize); 1386 psize = rounded; 1387 zio_push_transform(zio, cdata, 1388 psize, lsize, NULL); 1389 } 1390 } 1391 1392 /* 1393 * We were unable to handle this as an override bp, treat 1394 * it as a regular write I/O. 1395 */ 1396 zio->io_bp_override = NULL; 1397 *bp = zio->io_bp_orig; 1398 zio->io_pipeline = zio->io_orig_pipeline; 1399 } else { 1400 ASSERT3U(psize, !=, 0); 1401 } 1402 1403 /* 1404 * The final pass of spa_sync() must be all rewrites, but the first 1405 * few passes offer a trade-off: allocating blocks defers convergence, 1406 * but newly allocated blocks are sequential, so they can be written 1407 * to disk faster. Therefore, we allow the first few passes of 1408 * spa_sync() to allocate new blocks, but force rewrites after that. 1409 * There should only be a handful of blocks after pass 1 in any case. 1410 */ 1411 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && 1412 BP_GET_PSIZE(bp) == psize && 1413 pass >= zfs_sync_pass_rewrite) { 1414 VERIFY3U(psize, !=, 0); 1415 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1416 1417 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1418 zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1419 } else { 1420 BP_ZERO(bp); 1421 zio->io_pipeline = ZIO_WRITE_PIPELINE; 1422 } 1423 1424 if (psize == 0) { 1425 if (zio->io_bp_orig.blk_birth != 0 && 1426 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 1427 BP_SET_LSIZE(bp, lsize); 1428 BP_SET_TYPE(bp, zp->zp_type); 1429 BP_SET_LEVEL(bp, zp->zp_level); 1430 BP_SET_BIRTH(bp, zio->io_txg, 0); 1431 } 1432 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1433 } else { 1434 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1435 BP_SET_LSIZE(bp, lsize); 1436 BP_SET_TYPE(bp, zp->zp_type); 1437 BP_SET_LEVEL(bp, zp->zp_level); 1438 BP_SET_PSIZE(bp, psize); 1439 BP_SET_COMPRESS(bp, compress); 1440 BP_SET_CHECKSUM(bp, zp->zp_checksum); 1441 BP_SET_DEDUP(bp, zp->zp_dedup); 1442 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1443 if (zp->zp_dedup) { 1444 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1445 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1446 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1447 } 1448 if (zp->zp_nopwrite) { 1449 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1450 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1451 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 1452 } 1453 } 1454 return (ZIO_PIPELINE_CONTINUE); 1455 } 1456 1457 static int 1458 zio_free_bp_init(zio_t *zio) 1459 { 1460 blkptr_t *bp = zio->io_bp; 1461 1462 if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1463 if (BP_GET_DEDUP(bp)) 1464 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1465 } 1466 1467 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 1468 1469 return (ZIO_PIPELINE_CONTINUE); 1470 } 1471 1472 /* 1473 * ========================================================================== 1474 * Execute the I/O pipeline 1475 * ========================================================================== 1476 */ 1477 1478 static void 1479 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 1480 { 1481 spa_t *spa = zio->io_spa; 1482 zio_type_t t = zio->io_type; 1483 int flags = (cutinline ? TQ_FRONT : 0); 1484 1485 /* 1486 * If we're a config writer or a probe, the normal issue and 1487 * interrupt threads may all be blocked waiting for the config lock. 1488 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 1489 */ 1490 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1491 t = ZIO_TYPE_NULL; 1492 1493 /* 1494 * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 1495 */ 1496 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1497 t = ZIO_TYPE_NULL; 1498 1499 /* 1500 * If this is a high priority I/O, then use the high priority taskq if 1501 * available. 1502 */ 1503 if ((zio->io_priority == ZIO_PRIORITY_NOW || 1504 zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) && 1505 spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 1506 q++; 1507 1508 ASSERT3U(q, <, ZIO_TASKQ_TYPES); 1509 1510 /* 1511 * NB: We are assuming that the zio can only be dispatched 1512 * to a single taskq at a time. It would be a grievous error 1513 * to dispatch the zio to another taskq at the same time. 1514 */ 1515 ASSERT(zio->io_tqent.tqent_next == NULL); 1516 spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, 1517 flags, &zio->io_tqent); 1518 } 1519 1520 static boolean_t 1521 zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 1522 { 1523 kthread_t *executor = zio->io_executor; 1524 spa_t *spa = zio->io_spa; 1525 1526 for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 1527 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1528 uint_t i; 1529 for (i = 0; i < tqs->stqs_count; i++) { 1530 if (taskq_member(tqs->stqs_taskq[i], executor)) 1531 return (B_TRUE); 1532 } 1533 } 1534 1535 return (B_FALSE); 1536 } 1537 1538 static int 1539 zio_issue_async(zio_t *zio) 1540 { 1541 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1542 1543 return (ZIO_PIPELINE_STOP); 1544 } 1545 1546 void 1547 zio_interrupt(zio_t *zio) 1548 { 1549 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1550 } 1551 1552 void 1553 zio_delay_interrupt(zio_t *zio) 1554 { 1555 /* 1556 * The timeout_generic() function isn't defined in userspace, so 1557 * rather than trying to implement the function, the zio delay 1558 * functionality has been disabled for userspace builds. 1559 */ 1560 1561 #ifdef _KERNEL 1562 /* 1563 * If io_target_timestamp is zero, then no delay has been registered 1564 * for this IO, thus jump to the end of this function and "skip" the 1565 * delay; issuing it directly to the zio layer. 1566 */ 1567 if (zio->io_target_timestamp != 0) { 1568 hrtime_t now = gethrtime(); 1569 1570 if (now >= zio->io_target_timestamp) { 1571 /* 1572 * This IO has already taken longer than the target 1573 * delay to complete, so we don't want to delay it 1574 * any longer; we "miss" the delay and issue it 1575 * directly to the zio layer. This is likely due to 1576 * the target latency being set to a value less than 1577 * the underlying hardware can satisfy (e.g. delay 1578 * set to 1ms, but the disks take 10ms to complete an 1579 * IO request). 1580 */ 1581 1582 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio, 1583 hrtime_t, now); 1584 1585 zio_interrupt(zio); 1586 } else { 1587 hrtime_t diff = zio->io_target_timestamp - now; 1588 1589 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio, 1590 hrtime_t, now, hrtime_t, diff); 1591 1592 (void) timeout_generic(CALLOUT_NORMAL, 1593 (void (*)(void *))zio_interrupt, zio, diff, 1, 0); 1594 } 1595 1596 return; 1597 } 1598 #endif 1599 1600 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio); 1601 zio_interrupt(zio); 1602 } 1603 1604 /* 1605 * Execute the I/O pipeline until one of the following occurs: 1606 * 1607 * (1) the I/O completes 1608 * (2) the pipeline stalls waiting for dependent child I/Os 1609 * (3) the I/O issues, so we're waiting for an I/O completion interrupt 1610 * (4) the I/O is delegated by vdev-level caching or aggregation 1611 * (5) the I/O is deferred due to vdev-level queueing 1612 * (6) the I/O is handed off to another thread. 1613 * 1614 * In all cases, the pipeline stops whenever there's no CPU work; it never 1615 * burns a thread in cv_wait(). 1616 * 1617 * There's no locking on io_stage because there's no legitimate way 1618 * for multiple threads to be attempting to process the same I/O. 1619 */ 1620 static zio_pipe_stage_t *zio_pipeline[]; 1621 1622 void 1623 zio_execute(zio_t *zio) 1624 { 1625 zio->io_executor = curthread; 1626 1627 ASSERT3U(zio->io_queued_timestamp, >, 0); 1628 1629 while (zio->io_stage < ZIO_STAGE_DONE) { 1630 enum zio_stage pipeline = zio->io_pipeline; 1631 enum zio_stage stage = zio->io_stage; 1632 int rv; 1633 1634 ASSERT(!MUTEX_HELD(&zio->io_lock)); 1635 ASSERT(ISP2(stage)); 1636 ASSERT(zio->io_stall == NULL); 1637 1638 do { 1639 stage <<= 1; 1640 } while ((stage & pipeline) == 0); 1641 1642 ASSERT(stage <= ZIO_STAGE_DONE); 1643 1644 /* 1645 * If we are in interrupt context and this pipeline stage 1646 * will grab a config lock that is held across I/O, 1647 * or may wait for an I/O that needs an interrupt thread 1648 * to complete, issue async to avoid deadlock. 1649 * 1650 * For VDEV_IO_START, we cut in line so that the io will 1651 * be sent to disk promptly. 1652 */ 1653 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1654 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 1655 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 1656 zio_requeue_io_start_cut_in_line : B_FALSE; 1657 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1658 return; 1659 } 1660 1661 zio->io_stage = stage; 1662 zio->io_pipeline_trace |= zio->io_stage; 1663 rv = zio_pipeline[highbit64(stage) - 1](zio); 1664 1665 if (rv == ZIO_PIPELINE_STOP) 1666 return; 1667 1668 ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1669 } 1670 } 1671 1672 /* 1673 * ========================================================================== 1674 * Initiate I/O, either sync or async 1675 * ========================================================================== 1676 */ 1677 int 1678 zio_wait(zio_t *zio) 1679 { 1680 int error; 1681 1682 ASSERT3P(zio->io_stage, ==, ZIO_STAGE_OPEN); 1683 ASSERT3P(zio->io_executor, ==, NULL); 1684 1685 zio->io_waiter = curthread; 1686 ASSERT0(zio->io_queued_timestamp); 1687 zio->io_queued_timestamp = gethrtime(); 1688 1689 zio_execute(zio); 1690 1691 mutex_enter(&zio->io_lock); 1692 while (zio->io_executor != NULL) 1693 cv_wait(&zio->io_cv, &zio->io_lock); 1694 mutex_exit(&zio->io_lock); 1695 1696 error = zio->io_error; 1697 zio_destroy(zio); 1698 1699 return (error); 1700 } 1701 1702 void 1703 zio_nowait(zio_t *zio) 1704 { 1705 ASSERT3P(zio->io_executor, ==, NULL); 1706 1707 if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1708 zio_unique_parent(zio) == NULL) { 1709 /* 1710 * This is a logical async I/O with no parent to wait for it. 1711 * We add it to the spa_async_root_zio "Godfather" I/O which 1712 * will ensure they complete prior to unloading the pool. 1713 */ 1714 spa_t *spa = zio->io_spa; 1715 1716 zio_add_child(spa->spa_async_zio_root[CPU_SEQID], zio); 1717 } 1718 1719 ASSERT0(zio->io_queued_timestamp); 1720 zio->io_queued_timestamp = gethrtime(); 1721 zio_execute(zio); 1722 } 1723 1724 /* 1725 * ========================================================================== 1726 * Reexecute, cancel, or suspend/resume failed I/O 1727 * ========================================================================== 1728 */ 1729 1730 static void 1731 zio_reexecute(zio_t *pio) 1732 { 1733 zio_t *cio, *cio_next; 1734 1735 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1736 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1737 ASSERT(pio->io_gang_leader == NULL); 1738 ASSERT(pio->io_gang_tree == NULL); 1739 1740 pio->io_flags = pio->io_orig_flags; 1741 pio->io_stage = pio->io_orig_stage; 1742 pio->io_pipeline = pio->io_orig_pipeline; 1743 pio->io_reexecute = 0; 1744 pio->io_flags |= ZIO_FLAG_REEXECUTED; 1745 pio->io_pipeline_trace = 0; 1746 pio->io_error = 0; 1747 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1748 pio->io_state[w] = 0; 1749 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1750 pio->io_child_error[c] = 0; 1751 1752 if (IO_IS_ALLOCATING(pio)) 1753 BP_ZERO(pio->io_bp); 1754 1755 /* 1756 * As we reexecute pio's children, new children could be created. 1757 * New children go to the head of pio's io_child_list, however, 1758 * so we will (correctly) not reexecute them. The key is that 1759 * the remainder of pio's io_child_list, from 'cio_next' onward, 1760 * cannot be affected by any side effects of reexecuting 'cio'. 1761 */ 1762 zio_link_t *zl = NULL; 1763 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 1764 cio_next = zio_walk_children(pio, &zl); 1765 mutex_enter(&pio->io_lock); 1766 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1767 pio->io_children[cio->io_child_type][w]++; 1768 mutex_exit(&pio->io_lock); 1769 zio_reexecute(cio); 1770 } 1771 1772 /* 1773 * Now that all children have been reexecuted, execute the parent. 1774 * We don't reexecute "The Godfather" I/O here as it's the 1775 * responsibility of the caller to wait on it. 1776 */ 1777 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) { 1778 pio->io_queued_timestamp = gethrtime(); 1779 zio_execute(pio); 1780 } 1781 } 1782 1783 void 1784 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason) 1785 { 1786 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1787 fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1788 "failure and the failure mode property for this pool " 1789 "is set to panic.", spa_name(spa)); 1790 1791 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 1792 1793 mutex_enter(&spa->spa_suspend_lock); 1794 1795 if (spa->spa_suspend_zio_root == NULL) 1796 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 1797 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 1798 ZIO_FLAG_GODFATHER); 1799 1800 spa->spa_suspended = reason; 1801 1802 if (zio != NULL) { 1803 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1804 ASSERT(zio != spa->spa_suspend_zio_root); 1805 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1806 ASSERT(zio_unique_parent(zio) == NULL); 1807 ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1808 zio_add_child(spa->spa_suspend_zio_root, zio); 1809 } 1810 1811 mutex_exit(&spa->spa_suspend_lock); 1812 } 1813 1814 int 1815 zio_resume(spa_t *spa) 1816 { 1817 zio_t *pio; 1818 1819 /* 1820 * Reexecute all previously suspended i/o. 1821 */ 1822 mutex_enter(&spa->spa_suspend_lock); 1823 spa->spa_suspended = ZIO_SUSPEND_NONE; 1824 cv_broadcast(&spa->spa_suspend_cv); 1825 pio = spa->spa_suspend_zio_root; 1826 spa->spa_suspend_zio_root = NULL; 1827 mutex_exit(&spa->spa_suspend_lock); 1828 1829 if (pio == NULL) 1830 return (0); 1831 1832 zio_reexecute(pio); 1833 return (zio_wait(pio)); 1834 } 1835 1836 void 1837 zio_resume_wait(spa_t *spa) 1838 { 1839 mutex_enter(&spa->spa_suspend_lock); 1840 while (spa_suspended(spa)) 1841 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1842 mutex_exit(&spa->spa_suspend_lock); 1843 } 1844 1845 /* 1846 * ========================================================================== 1847 * Gang blocks. 1848 * 1849 * A gang block is a collection of small blocks that looks to the DMU 1850 * like one large block. When zio_dva_allocate() cannot find a block 1851 * of the requested size, due to either severe fragmentation or the pool 1852 * being nearly full, it calls zio_write_gang_block() to construct the 1853 * block from smaller fragments. 1854 * 1855 * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1856 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1857 * an indirect block: it's an array of block pointers. It consumes 1858 * only one sector and hence is allocatable regardless of fragmentation. 1859 * The gang header's bps point to its gang members, which hold the data. 1860 * 1861 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1862 * as the verifier to ensure uniqueness of the SHA256 checksum. 1863 * Critically, the gang block bp's blk_cksum is the checksum of the data, 1864 * not the gang header. This ensures that data block signatures (needed for 1865 * deduplication) are independent of how the block is physically stored. 1866 * 1867 * Gang blocks can be nested: a gang member may itself be a gang block. 1868 * Thus every gang block is a tree in which root and all interior nodes are 1869 * gang headers, and the leaves are normal blocks that contain user data. 1870 * The root of the gang tree is called the gang leader. 1871 * 1872 * To perform any operation (read, rewrite, free, claim) on a gang block, 1873 * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1874 * in the io_gang_tree field of the original logical i/o by recursively 1875 * reading the gang leader and all gang headers below it. This yields 1876 * an in-core tree containing the contents of every gang header and the 1877 * bps for every constituent of the gang block. 1878 * 1879 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1880 * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1881 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1882 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1883 * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1884 * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1885 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1886 * of the gang header plus zio_checksum_compute() of the data to update the 1887 * gang header's blk_cksum as described above. 1888 * 1889 * The two-phase assemble/issue model solves the problem of partial failure -- 1890 * what if you'd freed part of a gang block but then couldn't read the 1891 * gang header for another part? Assembling the entire gang tree first 1892 * ensures that all the necessary gang header I/O has succeeded before 1893 * starting the actual work of free, claim, or write. Once the gang tree 1894 * is assembled, free and claim are in-memory operations that cannot fail. 1895 * 1896 * In the event that a gang write fails, zio_dva_unallocate() walks the 1897 * gang tree to immediately free (i.e. insert back into the space map) 1898 * everything we've allocated. This ensures that we don't get ENOSPC 1899 * errors during repeated suspend/resume cycles due to a flaky device. 1900 * 1901 * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1902 * the gang tree, we won't modify the block, so we can safely defer the free 1903 * (knowing that the block is still intact). If we *can* assemble the gang 1904 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1905 * each constituent bp and we can allocate a new block on the next sync pass. 1906 * 1907 * In all cases, the gang tree allows complete recovery from partial failure. 1908 * ========================================================================== 1909 */ 1910 1911 static void 1912 zio_gang_issue_func_done(zio_t *zio) 1913 { 1914 abd_put(zio->io_abd); 1915 } 1916 1917 static zio_t * 1918 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1919 uint64_t offset) 1920 { 1921 if (gn != NULL) 1922 return (pio); 1923 1924 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset), 1925 BP_GET_PSIZE(bp), zio_gang_issue_func_done, 1926 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1927 &pio->io_bookmark)); 1928 } 1929 1930 static zio_t * 1931 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1932 uint64_t offset) 1933 { 1934 zio_t *zio; 1935 1936 if (gn != NULL) { 1937 abd_t *gbh_abd = 1938 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1939 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1940 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL, 1941 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1942 &pio->io_bookmark); 1943 /* 1944 * As we rewrite each gang header, the pipeline will compute 1945 * a new gang block header checksum for it; but no one will 1946 * compute a new data checksum, so we do that here. The one 1947 * exception is the gang leader: the pipeline already computed 1948 * its data checksum because that stage precedes gang assembly. 1949 * (Presently, nothing actually uses interior data checksums; 1950 * this is just good hygiene.) 1951 */ 1952 if (gn != pio->io_gang_leader->io_gang_tree) { 1953 abd_t *buf = abd_get_offset(data, offset); 1954 1955 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1956 buf, BP_GET_PSIZE(bp)); 1957 1958 abd_put(buf); 1959 } 1960 /* 1961 * If we are here to damage data for testing purposes, 1962 * leave the GBH alone so that we can detect the damage. 1963 */ 1964 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 1965 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 1966 } else { 1967 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1968 abd_get_offset(data, offset), BP_GET_PSIZE(bp), 1969 zio_gang_issue_func_done, NULL, pio->io_priority, 1970 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1971 } 1972 1973 return (zio); 1974 } 1975 1976 /* ARGSUSED */ 1977 static zio_t * 1978 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1979 uint64_t offset) 1980 { 1981 return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 1982 ZIO_GANG_CHILD_FLAGS(pio))); 1983 } 1984 1985 /* ARGSUSED */ 1986 static zio_t * 1987 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1988 uint64_t offset) 1989 { 1990 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1991 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1992 } 1993 1994 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 1995 NULL, 1996 zio_read_gang, 1997 zio_rewrite_gang, 1998 zio_free_gang, 1999 zio_claim_gang, 2000 NULL 2001 }; 2002 2003 static void zio_gang_tree_assemble_done(zio_t *zio); 2004 2005 static zio_gang_node_t * 2006 zio_gang_node_alloc(zio_gang_node_t **gnpp) 2007 { 2008 zio_gang_node_t *gn; 2009 2010 ASSERT(*gnpp == NULL); 2011 2012 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 2013 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 2014 *gnpp = gn; 2015 2016 return (gn); 2017 } 2018 2019 static void 2020 zio_gang_node_free(zio_gang_node_t **gnpp) 2021 { 2022 zio_gang_node_t *gn = *gnpp; 2023 2024 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2025 ASSERT(gn->gn_child[g] == NULL); 2026 2027 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2028 kmem_free(gn, sizeof (*gn)); 2029 *gnpp = NULL; 2030 } 2031 2032 static void 2033 zio_gang_tree_free(zio_gang_node_t **gnpp) 2034 { 2035 zio_gang_node_t *gn = *gnpp; 2036 2037 if (gn == NULL) 2038 return; 2039 2040 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2041 zio_gang_tree_free(&gn->gn_child[g]); 2042 2043 zio_gang_node_free(gnpp); 2044 } 2045 2046 static void 2047 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 2048 { 2049 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 2050 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2051 2052 ASSERT(gio->io_gang_leader == gio); 2053 ASSERT(BP_IS_GANG(bp)); 2054 2055 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2056 zio_gang_tree_assemble_done, gn, gio->io_priority, 2057 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 2058 } 2059 2060 static void 2061 zio_gang_tree_assemble_done(zio_t *zio) 2062 { 2063 zio_t *gio = zio->io_gang_leader; 2064 zio_gang_node_t *gn = zio->io_private; 2065 blkptr_t *bp = zio->io_bp; 2066 2067 ASSERT(gio == zio_unique_parent(zio)); 2068 ASSERT(zio->io_child_count == 0); 2069 2070 if (zio->io_error) 2071 return; 2072 2073 /* this ABD was created from a linear buf in zio_gang_tree_assemble */ 2074 if (BP_SHOULD_BYTESWAP(bp)) 2075 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size); 2076 2077 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh); 2078 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 2079 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2080 2081 abd_put(zio->io_abd); 2082 2083 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2084 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2085 if (!BP_IS_GANG(gbp)) 2086 continue; 2087 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 2088 } 2089 } 2090 2091 static void 2092 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data, 2093 uint64_t offset) 2094 { 2095 zio_t *gio = pio->io_gang_leader; 2096 zio_t *zio; 2097 2098 ASSERT(BP_IS_GANG(bp) == !!gn); 2099 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 2100 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 2101 2102 /* 2103 * If you're a gang header, your data is in gn->gn_gbh. 2104 * If you're a gang member, your data is in 'data' and gn == NULL. 2105 */ 2106 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset); 2107 2108 if (gn != NULL) { 2109 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2110 2111 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2112 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2113 if (BP_IS_HOLE(gbp)) 2114 continue; 2115 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data, 2116 offset); 2117 offset += BP_GET_PSIZE(gbp); 2118 } 2119 } 2120 2121 if (gn == gio->io_gang_tree) 2122 ASSERT3U(gio->io_size, ==, offset); 2123 2124 if (zio != pio) 2125 zio_nowait(zio); 2126 } 2127 2128 static int 2129 zio_gang_assemble(zio_t *zio) 2130 { 2131 blkptr_t *bp = zio->io_bp; 2132 2133 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 2134 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2135 2136 zio->io_gang_leader = zio; 2137 2138 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 2139 2140 return (ZIO_PIPELINE_CONTINUE); 2141 } 2142 2143 static int 2144 zio_gang_issue(zio_t *zio) 2145 { 2146 blkptr_t *bp = zio->io_bp; 2147 2148 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) { 2149 return (ZIO_PIPELINE_STOP); 2150 } 2151 2152 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 2153 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2154 2155 if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 2156 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd, 2157 0); 2158 else 2159 zio_gang_tree_free(&zio->io_gang_tree); 2160 2161 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2162 2163 return (ZIO_PIPELINE_CONTINUE); 2164 } 2165 2166 static void 2167 zio_write_gang_member_ready(zio_t *zio) 2168 { 2169 zio_t *pio = zio_unique_parent(zio); 2170 zio_t *gio = zio->io_gang_leader; 2171 dva_t *cdva = zio->io_bp->blk_dva; 2172 dva_t *pdva = pio->io_bp->blk_dva; 2173 uint64_t asize; 2174 2175 if (BP_IS_HOLE(zio->io_bp)) 2176 return; 2177 2178 ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 2179 2180 ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 2181 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 2182 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 2183 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 2184 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 2185 2186 mutex_enter(&pio->io_lock); 2187 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 2188 ASSERT(DVA_GET_GANG(&pdva[d])); 2189 asize = DVA_GET_ASIZE(&pdva[d]); 2190 asize += DVA_GET_ASIZE(&cdva[d]); 2191 DVA_SET_ASIZE(&pdva[d], asize); 2192 } 2193 mutex_exit(&pio->io_lock); 2194 } 2195 2196 static void 2197 zio_write_gang_done(zio_t *zio) 2198 { 2199 /* 2200 * The io_abd field will be NULL for a zio with no data. The io_flags 2201 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't 2202 * check for it here as it is cleared in zio_ready. 2203 */ 2204 if (zio->io_abd != NULL) 2205 abd_put(zio->io_abd); 2206 } 2207 2208 static int 2209 zio_write_gang_block(zio_t *pio) 2210 { 2211 spa_t *spa = pio->io_spa; 2212 metaslab_class_t *mc = spa_normal_class(spa); 2213 blkptr_t *bp = pio->io_bp; 2214 zio_t *gio = pio->io_gang_leader; 2215 zio_t *zio; 2216 zio_gang_node_t *gn, **gnpp; 2217 zio_gbh_phys_t *gbh; 2218 abd_t *gbh_abd; 2219 uint64_t txg = pio->io_txg; 2220 uint64_t resid = pio->io_size; 2221 uint64_t lsize; 2222 int copies = gio->io_prop.zp_copies; 2223 int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 2224 zio_prop_t zp; 2225 int error; 2226 boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA); 2227 2228 int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER; 2229 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2230 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2231 ASSERT(has_data); 2232 2233 flags |= METASLAB_ASYNC_ALLOC; 2234 VERIFY(zfs_refcount_held(&mc->mc_alloc_slots[pio->io_allocator], 2235 pio)); 2236 2237 /* 2238 * The logical zio has already placed a reservation for 2239 * 'copies' allocation slots but gang blocks may require 2240 * additional copies. These additional copies 2241 * (i.e. gbh_copies - copies) are guaranteed to succeed 2242 * since metaslab_class_throttle_reserve() always allows 2243 * additional reservations for gang blocks. 2244 */ 2245 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies, 2246 pio->io_allocator, pio, flags)); 2247 } 2248 2249 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, 2250 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, 2251 &pio->io_alloc_list, pio, pio->io_allocator); 2252 if (error) { 2253 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2254 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2255 ASSERT(has_data); 2256 2257 /* 2258 * If we failed to allocate the gang block header then 2259 * we remove any additional allocation reservations that 2260 * we placed here. The original reservation will 2261 * be removed when the logical I/O goes to the ready 2262 * stage. 2263 */ 2264 metaslab_class_throttle_unreserve(mc, 2265 gbh_copies - copies, pio->io_allocator, pio); 2266 } 2267 pio->io_error = error; 2268 return (ZIO_PIPELINE_CONTINUE); 2269 } 2270 2271 if (pio == gio) { 2272 gnpp = &gio->io_gang_tree; 2273 } else { 2274 gnpp = pio->io_private; 2275 ASSERT(pio->io_ready == zio_write_gang_member_ready); 2276 } 2277 2278 gn = zio_gang_node_alloc(gnpp); 2279 gbh = gn->gn_gbh; 2280 bzero(gbh, SPA_GANGBLOCKSIZE); 2281 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE); 2282 2283 /* 2284 * Create the gang header. 2285 */ 2286 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2287 zio_write_gang_done, NULL, pio->io_priority, 2288 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2289 2290 /* 2291 * Create and nowait the gang children. 2292 */ 2293 for (int g = 0; resid != 0; resid -= lsize, g++) { 2294 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 2295 SPA_MINBLOCKSIZE); 2296 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 2297 2298 zp.zp_checksum = gio->io_prop.zp_checksum; 2299 zp.zp_compress = ZIO_COMPRESS_OFF; 2300 zp.zp_type = DMU_OT_NONE; 2301 zp.zp_level = 0; 2302 zp.zp_copies = gio->io_prop.zp_copies; 2303 zp.zp_dedup = B_FALSE; 2304 zp.zp_dedup_verify = B_FALSE; 2305 zp.zp_nopwrite = B_FALSE; 2306 2307 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 2308 has_data ? abd_get_offset(pio->io_abd, pio->io_size - 2309 resid) : NULL, lsize, lsize, &zp, 2310 zio_write_gang_member_ready, NULL, NULL, 2311 zio_write_gang_done, &gn->gn_child[g], pio->io_priority, 2312 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2313 2314 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 2315 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 2316 ASSERT(has_data); 2317 2318 /* 2319 * Gang children won't throttle but we should 2320 * account for their work, so reserve an allocation 2321 * slot for them here. 2322 */ 2323 VERIFY(metaslab_class_throttle_reserve(mc, 2324 zp.zp_copies, cio->io_allocator, cio, flags)); 2325 } 2326 zio_nowait(cio); 2327 } 2328 2329 /* 2330 * Set pio's pipeline to just wait for zio to finish. 2331 */ 2332 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2333 2334 zio_nowait(zio); 2335 2336 return (ZIO_PIPELINE_CONTINUE); 2337 } 2338 2339 /* 2340 * The zio_nop_write stage in the pipeline determines if allocating a 2341 * new bp is necessary. The nopwrite feature can handle writes in 2342 * either syncing or open context (i.e. zil writes) and as a result is 2343 * mutually exclusive with dedup. 2344 * 2345 * By leveraging a cryptographically secure checksum, such as SHA256, we 2346 * can compare the checksums of the new data and the old to determine if 2347 * allocating a new block is required. Note that our requirements for 2348 * cryptographic strength are fairly weak: there can't be any accidental 2349 * hash collisions, but we don't need to be secure against intentional 2350 * (malicious) collisions. To trigger a nopwrite, you have to be able 2351 * to write the file to begin with, and triggering an incorrect (hash 2352 * collision) nopwrite is no worse than simply writing to the file. 2353 * That said, there are no known attacks against the checksum algorithms 2354 * used for nopwrite, assuming that the salt and the checksums 2355 * themselves remain secret. 2356 */ 2357 static int 2358 zio_nop_write(zio_t *zio) 2359 { 2360 blkptr_t *bp = zio->io_bp; 2361 blkptr_t *bp_orig = &zio->io_bp_orig; 2362 zio_prop_t *zp = &zio->io_prop; 2363 2364 ASSERT(BP_GET_LEVEL(bp) == 0); 2365 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 2366 ASSERT(zp->zp_nopwrite); 2367 ASSERT(!zp->zp_dedup); 2368 ASSERT(zio->io_bp_override == NULL); 2369 ASSERT(IO_IS_ALLOCATING(zio)); 2370 2371 /* 2372 * Check to see if the original bp and the new bp have matching 2373 * characteristics (i.e. same checksum, compression algorithms, etc). 2374 * If they don't then just continue with the pipeline which will 2375 * allocate a new bp. 2376 */ 2377 if (BP_IS_HOLE(bp_orig) || 2378 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags & 2379 ZCHECKSUM_FLAG_NOPWRITE) || 2380 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 2381 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 2382 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 2383 zp->zp_copies != BP_GET_NDVAS(bp_orig)) 2384 return (ZIO_PIPELINE_CONTINUE); 2385 2386 /* 2387 * If the checksums match then reset the pipeline so that we 2388 * avoid allocating a new bp and issuing any I/O. 2389 */ 2390 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 2391 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags & 2392 ZCHECKSUM_FLAG_NOPWRITE); 2393 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 2394 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 2395 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 2396 ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, 2397 sizeof (uint64_t)) == 0); 2398 2399 *bp = *bp_orig; 2400 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2401 zio->io_flags |= ZIO_FLAG_NOPWRITE; 2402 } 2403 2404 return (ZIO_PIPELINE_CONTINUE); 2405 } 2406 2407 /* 2408 * ========================================================================== 2409 * Dedup 2410 * ========================================================================== 2411 */ 2412 static void 2413 zio_ddt_child_read_done(zio_t *zio) 2414 { 2415 blkptr_t *bp = zio->io_bp; 2416 ddt_entry_t *dde = zio->io_private; 2417 ddt_phys_t *ddp; 2418 zio_t *pio = zio_unique_parent(zio); 2419 2420 mutex_enter(&pio->io_lock); 2421 ddp = ddt_phys_select(dde, bp); 2422 if (zio->io_error == 0) 2423 ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 2424 2425 if (zio->io_error == 0 && dde->dde_repair_abd == NULL) 2426 dde->dde_repair_abd = zio->io_abd; 2427 else 2428 abd_free(zio->io_abd); 2429 mutex_exit(&pio->io_lock); 2430 } 2431 2432 static int 2433 zio_ddt_read_start(zio_t *zio) 2434 { 2435 blkptr_t *bp = zio->io_bp; 2436 2437 ASSERT(BP_GET_DEDUP(bp)); 2438 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2439 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2440 2441 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2442 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2443 ddt_entry_t *dde = ddt_repair_start(ddt, bp); 2444 ddt_phys_t *ddp = dde->dde_phys; 2445 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 2446 blkptr_t blk; 2447 2448 ASSERT(zio->io_vsd == NULL); 2449 zio->io_vsd = dde; 2450 2451 if (ddp_self == NULL) 2452 return (ZIO_PIPELINE_CONTINUE); 2453 2454 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2455 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 2456 continue; 2457 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 2458 &blk); 2459 zio_nowait(zio_read(zio, zio->io_spa, &blk, 2460 abd_alloc_for_io(zio->io_size, B_TRUE), 2461 zio->io_size, zio_ddt_child_read_done, dde, 2462 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) | 2463 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark)); 2464 } 2465 return (ZIO_PIPELINE_CONTINUE); 2466 } 2467 2468 zio_nowait(zio_read(zio, zio->io_spa, bp, 2469 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority, 2470 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 2471 2472 return (ZIO_PIPELINE_CONTINUE); 2473 } 2474 2475 static int 2476 zio_ddt_read_done(zio_t *zio) 2477 { 2478 blkptr_t *bp = zio->io_bp; 2479 2480 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) { 2481 return (ZIO_PIPELINE_STOP); 2482 } 2483 2484 ASSERT(BP_GET_DEDUP(bp)); 2485 ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2486 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2487 2488 if (zio->io_child_error[ZIO_CHILD_DDT]) { 2489 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2490 ddt_entry_t *dde = zio->io_vsd; 2491 if (ddt == NULL) { 2492 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 2493 return (ZIO_PIPELINE_CONTINUE); 2494 } 2495 if (dde == NULL) { 2496 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 2497 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2498 return (ZIO_PIPELINE_STOP); 2499 } 2500 if (dde->dde_repair_abd != NULL) { 2501 abd_copy(zio->io_abd, dde->dde_repair_abd, 2502 zio->io_size); 2503 zio->io_child_error[ZIO_CHILD_DDT] = 0; 2504 } 2505 ddt_repair_done(ddt, dde); 2506 zio->io_vsd = NULL; 2507 } 2508 2509 ASSERT(zio->io_vsd == NULL); 2510 2511 return (ZIO_PIPELINE_CONTINUE); 2512 } 2513 2514 static boolean_t 2515 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 2516 { 2517 spa_t *spa = zio->io_spa; 2518 boolean_t do_raw = (zio->io_flags & ZIO_FLAG_RAW); 2519 2520 /* We should never get a raw, override zio */ 2521 ASSERT(!(zio->io_bp_override && do_raw)); 2522 2523 /* 2524 * Note: we compare the original data, not the transformed data, 2525 * because when zio->io_bp is an override bp, we will not have 2526 * pushed the I/O transforms. That's an important optimization 2527 * because otherwise we'd compress/encrypt all dmu_sync() data twice. 2528 */ 2529 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2530 zio_t *lio = dde->dde_lead_zio[p]; 2531 2532 if (lio != NULL) { 2533 return (lio->io_orig_size != zio->io_orig_size || 2534 abd_cmp(zio->io_orig_abd, lio->io_orig_abd, 2535 zio->io_orig_size) != 0); 2536 } 2537 } 2538 2539 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2540 ddt_phys_t *ddp = &dde->dde_phys[p]; 2541 2542 if (ddp->ddp_phys_birth != 0) { 2543 arc_buf_t *abuf = NULL; 2544 arc_flags_t aflags = ARC_FLAG_WAIT; 2545 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 2546 blkptr_t blk = *zio->io_bp; 2547 int error; 2548 2549 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2550 2551 ddt_exit(ddt); 2552 2553 /* 2554 * Intuitively, it would make more sense to compare 2555 * io_abd than io_orig_abd in the raw case since you 2556 * don't want to look at any transformations that have 2557 * happened to the data. However, for raw I/Os the 2558 * data will actually be the same in io_abd and 2559 * io_orig_abd, so all we have to do is issue this as 2560 * a raw ARC read. 2561 */ 2562 if (do_raw) { 2563 zio_flags |= ZIO_FLAG_RAW; 2564 ASSERT3U(zio->io_size, ==, zio->io_orig_size); 2565 ASSERT0(abd_cmp(zio->io_abd, zio->io_orig_abd, 2566 zio->io_size)); 2567 ASSERT3P(zio->io_transform_stack, ==, NULL); 2568 } 2569 2570 error = arc_read(NULL, spa, &blk, 2571 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 2572 zio_flags, &aflags, &zio->io_bookmark); 2573 2574 if (error == 0) { 2575 if (arc_buf_size(abuf) != zio->io_orig_size || 2576 abd_cmp_buf(zio->io_orig_abd, abuf->b_data, 2577 zio->io_orig_size) != 0) 2578 error = SET_ERROR(EEXIST); 2579 arc_buf_destroy(abuf, &abuf); 2580 } 2581 2582 ddt_enter(ddt); 2583 return (error != 0); 2584 } 2585 } 2586 2587 return (B_FALSE); 2588 } 2589 2590 static void 2591 zio_ddt_child_write_ready(zio_t *zio) 2592 { 2593 int p = zio->io_prop.zp_copies; 2594 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2595 ddt_entry_t *dde = zio->io_private; 2596 ddt_phys_t *ddp = &dde->dde_phys[p]; 2597 zio_t *pio; 2598 2599 if (zio->io_error) 2600 return; 2601 2602 ddt_enter(ddt); 2603 2604 ASSERT(dde->dde_lead_zio[p] == zio); 2605 2606 ddt_phys_fill(ddp, zio->io_bp); 2607 2608 zio_link_t *zl = NULL; 2609 while ((pio = zio_walk_parents(zio, &zl)) != NULL) 2610 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 2611 2612 ddt_exit(ddt); 2613 } 2614 2615 static void 2616 zio_ddt_child_write_done(zio_t *zio) 2617 { 2618 int p = zio->io_prop.zp_copies; 2619 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2620 ddt_entry_t *dde = zio->io_private; 2621 ddt_phys_t *ddp = &dde->dde_phys[p]; 2622 2623 ddt_enter(ddt); 2624 2625 ASSERT(ddp->ddp_refcnt == 0); 2626 ASSERT(dde->dde_lead_zio[p] == zio); 2627 dde->dde_lead_zio[p] = NULL; 2628 2629 if (zio->io_error == 0) { 2630 zio_link_t *zl = NULL; 2631 while (zio_walk_parents(zio, &zl) != NULL) 2632 ddt_phys_addref(ddp); 2633 } else { 2634 ddt_phys_clear(ddp); 2635 } 2636 2637 ddt_exit(ddt); 2638 } 2639 2640 static void 2641 zio_ddt_ditto_write_done(zio_t *zio) 2642 { 2643 int p = DDT_PHYS_DITTO; 2644 zio_prop_t *zp = &zio->io_prop; 2645 blkptr_t *bp = zio->io_bp; 2646 ddt_t *ddt = ddt_select(zio->io_spa, bp); 2647 ddt_entry_t *dde = zio->io_private; 2648 ddt_phys_t *ddp = &dde->dde_phys[p]; 2649 ddt_key_t *ddk = &dde->dde_key; 2650 2651 ddt_enter(ddt); 2652 2653 ASSERT(ddp->ddp_refcnt == 0); 2654 ASSERT(dde->dde_lead_zio[p] == zio); 2655 dde->dde_lead_zio[p] = NULL; 2656 2657 if (zio->io_error == 0) { 2658 ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2659 ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2660 ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2661 if (ddp->ddp_phys_birth != 0) 2662 ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2663 ddt_phys_fill(ddp, bp); 2664 } 2665 2666 ddt_exit(ddt); 2667 } 2668 2669 static int 2670 zio_ddt_write(zio_t *zio) 2671 { 2672 spa_t *spa = zio->io_spa; 2673 blkptr_t *bp = zio->io_bp; 2674 uint64_t txg = zio->io_txg; 2675 zio_prop_t *zp = &zio->io_prop; 2676 int p = zp->zp_copies; 2677 int ditto_copies; 2678 zio_t *cio = NULL; 2679 zio_t *dio = NULL; 2680 ddt_t *ddt = ddt_select(spa, bp); 2681 ddt_entry_t *dde; 2682 ddt_phys_t *ddp; 2683 2684 ASSERT(BP_GET_DEDUP(bp)); 2685 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2686 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 2687 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW))); 2688 2689 ddt_enter(ddt); 2690 dde = ddt_lookup(ddt, bp, B_TRUE); 2691 ddp = &dde->dde_phys[p]; 2692 2693 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2694 /* 2695 * If we're using a weak checksum, upgrade to a strong checksum 2696 * and try again. If we're already using a strong checksum, 2697 * we can't resolve it, so just convert to an ordinary write. 2698 * (And automatically e-mail a paper to Nature?) 2699 */ 2700 if (!(zio_checksum_table[zp->zp_checksum].ci_flags & 2701 ZCHECKSUM_FLAG_DEDUP)) { 2702 zp->zp_checksum = spa_dedup_checksum(spa); 2703 zio_pop_transforms(zio); 2704 zio->io_stage = ZIO_STAGE_OPEN; 2705 BP_ZERO(bp); 2706 } else { 2707 zp->zp_dedup = B_FALSE; 2708 BP_SET_DEDUP(bp, B_FALSE); 2709 } 2710 ASSERT(!BP_GET_DEDUP(bp)); 2711 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2712 ddt_exit(ddt); 2713 return (ZIO_PIPELINE_CONTINUE); 2714 } 2715 2716 ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2717 ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2718 2719 if (ditto_copies > ddt_ditto_copies_present(dde) && 2720 dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2721 zio_prop_t czp = *zp; 2722 2723 czp.zp_copies = ditto_copies; 2724 2725 /* 2726 * If we arrived here with an override bp, we won't have run 2727 * the transform stack, so we won't have the data we need to 2728 * generate a child i/o. So, toss the override bp and restart. 2729 * This is safe, because using the override bp is just an 2730 * optimization; and it's rare, so the cost doesn't matter. 2731 */ 2732 if (zio->io_bp_override) { 2733 zio_pop_transforms(zio); 2734 zio->io_stage = ZIO_STAGE_OPEN; 2735 zio->io_pipeline = ZIO_WRITE_PIPELINE; 2736 zio->io_bp_override = NULL; 2737 BP_ZERO(bp); 2738 ddt_exit(ddt); 2739 return (ZIO_PIPELINE_CONTINUE); 2740 } 2741 2742 dio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 2743 zio->io_orig_size, zio->io_orig_size, &czp, NULL, NULL, 2744 NULL, zio_ddt_ditto_write_done, dde, zio->io_priority, 2745 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2746 2747 zio_push_transform(dio, zio->io_abd, zio->io_size, 0, NULL); 2748 dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2749 } 2750 2751 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2752 if (ddp->ddp_phys_birth != 0) 2753 ddt_bp_fill(ddp, bp, txg); 2754 if (dde->dde_lead_zio[p] != NULL) 2755 zio_add_child(zio, dde->dde_lead_zio[p]); 2756 else 2757 ddt_phys_addref(ddp); 2758 } else if (zio->io_bp_override) { 2759 ASSERT(bp->blk_birth == txg); 2760 ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2761 ddt_phys_fill(ddp, bp); 2762 ddt_phys_addref(ddp); 2763 } else { 2764 cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 2765 zio->io_orig_size, zio->io_orig_size, zp, 2766 zio_ddt_child_write_ready, NULL, NULL, 2767 zio_ddt_child_write_done, dde, zio->io_priority, 2768 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2769 2770 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL); 2771 dde->dde_lead_zio[p] = cio; 2772 } 2773 2774 ddt_exit(ddt); 2775 2776 if (cio) 2777 zio_nowait(cio); 2778 if (dio) 2779 zio_nowait(dio); 2780 2781 return (ZIO_PIPELINE_CONTINUE); 2782 } 2783 2784 ddt_entry_t *freedde; /* for debugging */ 2785 2786 static int 2787 zio_ddt_free(zio_t *zio) 2788 { 2789 spa_t *spa = zio->io_spa; 2790 blkptr_t *bp = zio->io_bp; 2791 ddt_t *ddt = ddt_select(spa, bp); 2792 ddt_entry_t *dde; 2793 ddt_phys_t *ddp; 2794 2795 ASSERT(BP_GET_DEDUP(bp)); 2796 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2797 2798 ddt_enter(ddt); 2799 freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 2800 ddp = ddt_phys_select(dde, bp); 2801 ddt_phys_decref(ddp); 2802 ddt_exit(ddt); 2803 2804 return (ZIO_PIPELINE_CONTINUE); 2805 } 2806 2807 /* 2808 * ========================================================================== 2809 * Allocate and free blocks 2810 * ========================================================================== 2811 */ 2812 2813 static zio_t * 2814 zio_io_to_allocate(spa_t *spa, int allocator) 2815 { 2816 zio_t *zio; 2817 2818 ASSERT(MUTEX_HELD(&spa->spa_alloc_locks[allocator])); 2819 2820 zio = avl_first(&spa->spa_alloc_trees[allocator]); 2821 if (zio == NULL) 2822 return (NULL); 2823 2824 ASSERT(IO_IS_ALLOCATING(zio)); 2825 2826 /* 2827 * Try to place a reservation for this zio. If we're unable to 2828 * reserve then we throttle. 2829 */ 2830 ASSERT3U(zio->io_allocator, ==, allocator); 2831 if (!metaslab_class_throttle_reserve(zio->io_metaslab_class, 2832 zio->io_prop.zp_copies, zio->io_allocator, zio, 0)) { 2833 return (NULL); 2834 } 2835 2836 avl_remove(&spa->spa_alloc_trees[allocator], zio); 2837 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); 2838 2839 return (zio); 2840 } 2841 2842 static int 2843 zio_dva_throttle(zio_t *zio) 2844 { 2845 spa_t *spa = zio->io_spa; 2846 zio_t *nio; 2847 metaslab_class_t *mc; 2848 2849 /* locate an appropriate allocation class */ 2850 mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type, 2851 zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk); 2852 2853 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE || 2854 !mc->mc_alloc_throttle_enabled || 2855 zio->io_child_type == ZIO_CHILD_GANG || 2856 zio->io_flags & ZIO_FLAG_NODATA) { 2857 return (ZIO_PIPELINE_CONTINUE); 2858 } 2859 2860 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2861 2862 ASSERT3U(zio->io_queued_timestamp, >, 0); 2863 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE); 2864 2865 zbookmark_phys_t *bm = &zio->io_bookmark; 2866 /* 2867 * We want to try to use as many allocators as possible to help improve 2868 * performance, but we also want logically adjacent IOs to be physically 2869 * adjacent to improve sequential read performance. We chunk each object 2870 * into 2^20 block regions, and then hash based on the objset, object, 2871 * level, and region to accomplish both of these goals. 2872 */ 2873 zio->io_allocator = cityhash4(bm->zb_objset, bm->zb_object, 2874 bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count; 2875 mutex_enter(&spa->spa_alloc_locks[zio->io_allocator]); 2876 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 2877 zio->io_metaslab_class = mc; 2878 avl_add(&spa->spa_alloc_trees[zio->io_allocator], zio); 2879 nio = zio_io_to_allocate(spa, zio->io_allocator); 2880 mutex_exit(&spa->spa_alloc_locks[zio->io_allocator]); 2881 2882 if (nio == zio) 2883 return (ZIO_PIPELINE_CONTINUE); 2884 2885 if (nio != NULL) { 2886 ASSERT(nio->io_stage == ZIO_STAGE_DVA_THROTTLE); 2887 /* 2888 * We are passing control to a new zio so make sure that 2889 * it is processed by a different thread. We do this to 2890 * avoid stack overflows that can occur when parents are 2891 * throttled and children are making progress. We allow 2892 * it to go to the head of the taskq since it's already 2893 * been waiting. 2894 */ 2895 zio_taskq_dispatch(nio, ZIO_TASKQ_ISSUE, B_TRUE); 2896 } 2897 return (ZIO_PIPELINE_STOP); 2898 } 2899 2900 static void 2901 zio_allocate_dispatch(spa_t *spa, int allocator) 2902 { 2903 zio_t *zio; 2904 2905 mutex_enter(&spa->spa_alloc_locks[allocator]); 2906 zio = zio_io_to_allocate(spa, allocator); 2907 mutex_exit(&spa->spa_alloc_locks[allocator]); 2908 if (zio == NULL) 2909 return; 2910 2911 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE); 2912 ASSERT0(zio->io_error); 2913 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE); 2914 } 2915 2916 static int 2917 zio_dva_allocate(zio_t *zio) 2918 { 2919 spa_t *spa = zio->io_spa; 2920 metaslab_class_t *mc; 2921 blkptr_t *bp = zio->io_bp; 2922 int error; 2923 int flags = 0; 2924 2925 if (zio->io_gang_leader == NULL) { 2926 ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2927 zio->io_gang_leader = zio; 2928 } 2929 2930 ASSERT(BP_IS_HOLE(bp)); 2931 ASSERT0(BP_GET_NDVAS(bp)); 2932 ASSERT3U(zio->io_prop.zp_copies, >, 0); 2933 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 2934 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 2935 2936 if (zio->io_flags & ZIO_FLAG_NODATA) 2937 flags |= METASLAB_DONT_THROTTLE; 2938 if (zio->io_flags & ZIO_FLAG_GANG_CHILD) 2939 flags |= METASLAB_GANG_CHILD; 2940 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) 2941 flags |= METASLAB_ASYNC_ALLOC; 2942 2943 /* 2944 * if not already chosen, locate an appropriate allocation class 2945 */ 2946 mc = zio->io_metaslab_class; 2947 if (mc == NULL) { 2948 mc = spa_preferred_class(spa, zio->io_size, 2949 zio->io_prop.zp_type, zio->io_prop.zp_level, 2950 zio->io_prop.zp_zpl_smallblk); 2951 zio->io_metaslab_class = mc; 2952 } 2953 2954 error = metaslab_alloc(spa, mc, zio->io_size, bp, 2955 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 2956 &zio->io_alloc_list, zio, zio->io_allocator); 2957 2958 /* 2959 * Fallback to normal class when an alloc class is full 2960 */ 2961 if (error == ENOSPC && mc != spa_normal_class(spa)) { 2962 /* 2963 * If throttling, transfer reservation over to normal class. 2964 * The io_allocator slot can remain the same even though we 2965 * are switching classes. 2966 */ 2967 if (mc->mc_alloc_throttle_enabled && 2968 (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) { 2969 metaslab_class_throttle_unreserve(mc, 2970 zio->io_prop.zp_copies, zio->io_allocator, zio); 2971 zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING; 2972 2973 mc = spa_normal_class(spa); 2974 VERIFY(metaslab_class_throttle_reserve(mc, 2975 zio->io_prop.zp_copies, zio->io_allocator, zio, 2976 flags | METASLAB_MUST_RESERVE)); 2977 } else { 2978 mc = spa_normal_class(spa); 2979 } 2980 zio->io_metaslab_class = mc; 2981 2982 error = metaslab_alloc(spa, mc, zio->io_size, bp, 2983 zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 2984 &zio->io_alloc_list, zio, zio->io_allocator); 2985 } 2986 2987 if (error != 0) { 2988 zfs_dbgmsg("%s: metaslab allocation failure: zio %p, " 2989 "size %llu, error %d", spa_name(spa), zio, zio->io_size, 2990 error); 2991 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 2992 return (zio_write_gang_block(zio)); 2993 zio->io_error = error; 2994 } 2995 2996 return (ZIO_PIPELINE_CONTINUE); 2997 } 2998 2999 static int 3000 zio_dva_free(zio_t *zio) 3001 { 3002 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 3003 3004 return (ZIO_PIPELINE_CONTINUE); 3005 } 3006 3007 static int 3008 zio_dva_claim(zio_t *zio) 3009 { 3010 int error; 3011 3012 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 3013 if (error) 3014 zio->io_error = error; 3015 3016 return (ZIO_PIPELINE_CONTINUE); 3017 } 3018 3019 /* 3020 * Undo an allocation. This is used by zio_done() when an I/O fails 3021 * and we want to give back the block we just allocated. 3022 * This handles both normal blocks and gang blocks. 3023 */ 3024 static void 3025 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 3026 { 3027 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 3028 ASSERT(zio->io_bp_override == NULL); 3029 3030 if (!BP_IS_HOLE(bp)) 3031 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 3032 3033 if (gn != NULL) { 3034 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 3035 zio_dva_unallocate(zio, gn->gn_child[g], 3036 &gn->gn_gbh->zg_blkptr[g]); 3037 } 3038 } 3039 } 3040 3041 /* 3042 * Try to allocate an intent log block. Return 0 on success, errno on failure. 3043 */ 3044 int 3045 zio_alloc_zil(spa_t *spa, uint64_t objset, uint64_t txg, blkptr_t *new_bp, 3046 blkptr_t *old_bp, uint64_t size, boolean_t *slog) 3047 { 3048 int error = 1; 3049 zio_alloc_list_t io_alloc_list; 3050 3051 ASSERT(txg > spa_syncing_txg(spa)); 3052 3053 metaslab_trace_init(&io_alloc_list); 3054 3055 /* 3056 * Block pointer fields are useful to metaslabs for stats and debugging. 3057 * Fill in the obvious ones before calling into metaslab_alloc(). 3058 */ 3059 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3060 BP_SET_PSIZE(new_bp, size); 3061 BP_SET_LEVEL(new_bp, 0); 3062 3063 /* 3064 * When allocating a zil block, we don't have information about 3065 * the final destination of the block except the objset it's part 3066 * of, so we just hash the objset ID to pick the allocator to get 3067 * some parallelism. 3068 */ 3069 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1, 3070 txg, old_bp, METASLAB_HINTBP_AVOID, &io_alloc_list, NULL, 3071 cityhash4(0, 0, 0, objset) % spa->spa_alloc_count); 3072 if (error == 0) { 3073 *slog = TRUE; 3074 } else { 3075 error = metaslab_alloc(spa, spa_normal_class(spa), size, 3076 new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID, 3077 &io_alloc_list, NULL, cityhash4(0, 0, 0, objset) % 3078 spa->spa_alloc_count); 3079 if (error == 0) 3080 *slog = FALSE; 3081 } 3082 metaslab_trace_fini(&io_alloc_list); 3083 3084 if (error == 0) { 3085 BP_SET_LSIZE(new_bp, size); 3086 BP_SET_PSIZE(new_bp, size); 3087 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 3088 BP_SET_CHECKSUM(new_bp, 3089 spa_version(spa) >= SPA_VERSION_SLIM_ZIL 3090 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 3091 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3092 BP_SET_LEVEL(new_bp, 0); 3093 BP_SET_DEDUP(new_bp, 0); 3094 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 3095 } else { 3096 zfs_dbgmsg("%s: zil block allocation failure: " 3097 "size %llu, error %d", spa_name(spa), size, error); 3098 } 3099 3100 return (error); 3101 } 3102 3103 /* 3104 * ========================================================================== 3105 * Read and write to physical devices 3106 * ========================================================================== 3107 */ 3108 3109 3110 /* 3111 * Issue an I/O to the underlying vdev. Typically the issue pipeline 3112 * stops after this stage and will resume upon I/O completion. 3113 * However, there are instances where the vdev layer may need to 3114 * continue the pipeline when an I/O was not issued. Since the I/O 3115 * that was sent to the vdev layer might be different than the one 3116 * currently active in the pipeline (see vdev_queue_io()), we explicitly 3117 * force the underlying vdev layers to call either zio_execute() or 3118 * zio_interrupt() to ensure that the pipeline continues with the correct I/O. 3119 */ 3120 static int 3121 zio_vdev_io_start(zio_t *zio) 3122 { 3123 vdev_t *vd = zio->io_vd; 3124 uint64_t align; 3125 spa_t *spa = zio->io_spa; 3126 3127 ASSERT(zio->io_error == 0); 3128 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 3129 3130 if (vd == NULL) { 3131 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3132 spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 3133 3134 /* 3135 * The mirror_ops handle multiple DVAs in a single BP. 3136 */ 3137 vdev_mirror_ops.vdev_op_io_start(zio); 3138 return (ZIO_PIPELINE_STOP); 3139 } 3140 3141 ASSERT3P(zio->io_logical, !=, zio); 3142 if (zio->io_type == ZIO_TYPE_WRITE) { 3143 ASSERT(spa->spa_trust_config); 3144 3145 if (zio->io_vd->vdev_removing) { 3146 /* 3147 * Note: the code can handle other kinds of writes, 3148 * but we don't expect them. 3149 */ 3150 ASSERT(zio->io_flags & 3151 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL | 3152 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)); 3153 } 3154 } 3155 3156 /* 3157 * We keep track of time-sensitive I/Os so that the scan thread 3158 * can quickly react to certain workloads. In particular, we care 3159 * about non-scrubbing, top-level reads and writes with the following 3160 * characteristics: 3161 * - synchronous writes of user data to non-slog devices 3162 * - any reads of user data 3163 * When these conditions are met, adjust the timestamp of spa_last_io 3164 * which allows the scan thread to adjust its workload accordingly. 3165 */ 3166 if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && 3167 vd == vd->vdev_top && !vd->vdev_islog && 3168 zio->io_bookmark.zb_objset != DMU_META_OBJSET && 3169 zio->io_txg != spa_syncing_txg(spa)) { 3170 uint64_t old = spa->spa_last_io; 3171 uint64_t new = ddi_get_lbolt64(); 3172 if (old != new) 3173 (void) atomic_cas_64(&spa->spa_last_io, old, new); 3174 } 3175 3176 align = 1ULL << vd->vdev_top->vdev_ashift; 3177 3178 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 3179 P2PHASE(zio->io_size, align) != 0) { 3180 /* Transform logical writes to be a full physical block size. */ 3181 uint64_t asize = P2ROUNDUP(zio->io_size, align); 3182 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize); 3183 ASSERT(vd == vd->vdev_top); 3184 if (zio->io_type == ZIO_TYPE_WRITE) { 3185 abd_copy(abuf, zio->io_abd, zio->io_size); 3186 abd_zero_off(abuf, zio->io_size, asize - zio->io_size); 3187 } 3188 zio_push_transform(zio, abuf, asize, asize, zio_subblock); 3189 } 3190 3191 /* 3192 * If this is not a physical io, make sure that it is properly aligned 3193 * before proceeding. 3194 */ 3195 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 3196 ASSERT0(P2PHASE(zio->io_offset, align)); 3197 ASSERT0(P2PHASE(zio->io_size, align)); 3198 } else { 3199 /* 3200 * For physical writes, we allow 512b aligned writes and assume 3201 * the device will perform a read-modify-write as necessary. 3202 */ 3203 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 3204 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 3205 } 3206 3207 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 3208 3209 /* 3210 * If this is a repair I/O, and there's no self-healing involved -- 3211 * that is, we're just resilvering what we expect to resilver -- 3212 * then don't do the I/O unless zio's txg is actually in vd's DTL. 3213 * This prevents spurious resilvering. 3214 * 3215 * There are a few ways that we can end up creating these spurious 3216 * resilver i/os: 3217 * 3218 * 1. A resilver i/o will be issued if any DVA in the BP has a 3219 * dirty DTL. The mirror code will issue resilver writes to 3220 * each DVA, including the one(s) that are not on vdevs with dirty 3221 * DTLs. 3222 * 3223 * 2. With nested replication, which happens when we have a 3224 * "replacing" or "spare" vdev that's a child of a mirror or raidz. 3225 * For example, given mirror(replacing(A+B), C), it's likely that 3226 * only A is out of date (it's the new device). In this case, we'll 3227 * read from C, then use the data to resilver A+B -- but we don't 3228 * actually want to resilver B, just A. The top-level mirror has no 3229 * way to know this, so instead we just discard unnecessary repairs 3230 * as we work our way down the vdev tree. 3231 * 3232 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc. 3233 * The same logic applies to any form of nested replication: ditto 3234 * + mirror, RAID-Z + replacing, etc. 3235 * 3236 * However, indirect vdevs point off to other vdevs which may have 3237 * DTL's, so we never bypass them. The child i/os on concrete vdevs 3238 * will be properly bypassed instead. 3239 */ 3240 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 3241 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 3242 zio->io_txg != 0 && /* not a delegated i/o */ 3243 vd->vdev_ops != &vdev_indirect_ops && 3244 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 3245 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3246 zio_vdev_io_bypass(zio); 3247 return (ZIO_PIPELINE_CONTINUE); 3248 } 3249 3250 if (vd->vdev_ops->vdev_op_leaf && 3251 (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { 3252 3253 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio)) 3254 return (ZIO_PIPELINE_CONTINUE); 3255 3256 if ((zio = vdev_queue_io(zio)) == NULL) 3257 return (ZIO_PIPELINE_STOP); 3258 3259 if (!vdev_accessible(vd, zio)) { 3260 zio->io_error = SET_ERROR(ENXIO); 3261 zio_interrupt(zio); 3262 return (ZIO_PIPELINE_STOP); 3263 } 3264 } 3265 3266 vd->vdev_ops->vdev_op_io_start(zio); 3267 return (ZIO_PIPELINE_STOP); 3268 } 3269 3270 static int 3271 zio_vdev_io_done(zio_t *zio) 3272 { 3273 vdev_t *vd = zio->io_vd; 3274 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 3275 boolean_t unexpected_error = B_FALSE; 3276 3277 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3278 return (ZIO_PIPELINE_STOP); 3279 } 3280 3281 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 3282 3283 if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { 3284 3285 vdev_queue_io_done(zio); 3286 3287 if (zio->io_type == ZIO_TYPE_WRITE) 3288 vdev_cache_write(zio); 3289 3290 if (zio_injection_enabled && zio->io_error == 0) 3291 zio->io_error = zio_handle_device_injection(vd, 3292 zio, EIO); 3293 3294 if (zio_injection_enabled && zio->io_error == 0) 3295 zio->io_error = zio_handle_label_injection(zio, EIO); 3296 3297 if (zio->io_error) { 3298 if (!vdev_accessible(vd, zio)) { 3299 zio->io_error = SET_ERROR(ENXIO); 3300 } else { 3301 unexpected_error = B_TRUE; 3302 } 3303 } 3304 } 3305 3306 ops->vdev_op_io_done(zio); 3307 3308 if (unexpected_error) 3309 VERIFY(vdev_probe(vd, zio) == NULL); 3310 3311 return (ZIO_PIPELINE_CONTINUE); 3312 } 3313 3314 /* 3315 * For non-raidz ZIOs, we can just copy aside the bad data read from the 3316 * disk, and use that to finish the checksum ereport later. 3317 */ 3318 static void 3319 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 3320 const void *good_buf) 3321 { 3322 /* no processing needed */ 3323 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 3324 } 3325 3326 /*ARGSUSED*/ 3327 void 3328 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 3329 { 3330 void *buf = zio_buf_alloc(zio->io_size); 3331 3332 abd_copy_to_buf(buf, zio->io_abd, zio->io_size); 3333 3334 zcr->zcr_cbinfo = zio->io_size; 3335 zcr->zcr_cbdata = buf; 3336 zcr->zcr_finish = zio_vsd_default_cksum_finish; 3337 zcr->zcr_free = zio_buf_free; 3338 } 3339 3340 static int 3341 zio_vdev_io_assess(zio_t *zio) 3342 { 3343 vdev_t *vd = zio->io_vd; 3344 3345 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3346 return (ZIO_PIPELINE_STOP); 3347 } 3348 3349 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3350 spa_config_exit(zio->io_spa, SCL_ZIO, zio); 3351 3352 if (zio->io_vsd != NULL) { 3353 zio->io_vsd_ops->vsd_free(zio); 3354 zio->io_vsd = NULL; 3355 } 3356 3357 if (zio_injection_enabled && zio->io_error == 0) 3358 zio->io_error = zio_handle_fault_injection(zio, EIO); 3359 3360 /* 3361 * If the I/O failed, determine whether we should attempt to retry it. 3362 * 3363 * On retry, we cut in line in the issue queue, since we don't want 3364 * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 3365 */ 3366 if (zio->io_error && vd == NULL && 3367 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 3368 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 3369 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 3370 zio->io_error = 0; 3371 zio->io_flags |= ZIO_FLAG_IO_RETRY | 3372 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 3373 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 3374 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 3375 zio_requeue_io_start_cut_in_line); 3376 return (ZIO_PIPELINE_STOP); 3377 } 3378 3379 /* 3380 * If we got an error on a leaf device, convert it to ENXIO 3381 * if the device is not accessible at all. 3382 */ 3383 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 3384 !vdev_accessible(vd, zio)) 3385 zio->io_error = SET_ERROR(ENXIO); 3386 3387 /* 3388 * If we can't write to an interior vdev (mirror or RAID-Z), 3389 * set vdev_cant_write so that we stop trying to allocate from it. 3390 */ 3391 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 3392 vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 3393 vd->vdev_cant_write = B_TRUE; 3394 } 3395 3396 /* 3397 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future 3398 * attempts will ever succeed. In this case we set a persistent bit so 3399 * that we don't bother with it in the future. 3400 */ 3401 if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) && 3402 zio->io_type == ZIO_TYPE_IOCTL && 3403 zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL) 3404 vd->vdev_nowritecache = B_TRUE; 3405 3406 if (zio->io_error) 3407 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3408 3409 if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 3410 zio->io_physdone != NULL) { 3411 ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); 3412 ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); 3413 zio->io_physdone(zio->io_logical); 3414 } 3415 3416 return (ZIO_PIPELINE_CONTINUE); 3417 } 3418 3419 void 3420 zio_vdev_io_reissue(zio_t *zio) 3421 { 3422 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3423 ASSERT(zio->io_error == 0); 3424 3425 zio->io_stage >>= 1; 3426 } 3427 3428 void 3429 zio_vdev_io_redone(zio_t *zio) 3430 { 3431 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 3432 3433 zio->io_stage >>= 1; 3434 } 3435 3436 void 3437 zio_vdev_io_bypass(zio_t *zio) 3438 { 3439 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3440 ASSERT(zio->io_error == 0); 3441 3442 zio->io_flags |= ZIO_FLAG_IO_BYPASS; 3443 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 3444 } 3445 3446 /* 3447 * ========================================================================== 3448 * Generate and verify checksums 3449 * ========================================================================== 3450 */ 3451 static int 3452 zio_checksum_generate(zio_t *zio) 3453 { 3454 blkptr_t *bp = zio->io_bp; 3455 enum zio_checksum checksum; 3456 3457 if (bp == NULL) { 3458 /* 3459 * This is zio_write_phys(). 3460 * We're either generating a label checksum, or none at all. 3461 */ 3462 checksum = zio->io_prop.zp_checksum; 3463 3464 if (checksum == ZIO_CHECKSUM_OFF) 3465 return (ZIO_PIPELINE_CONTINUE); 3466 3467 ASSERT(checksum == ZIO_CHECKSUM_LABEL); 3468 } else { 3469 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 3470 ASSERT(!IO_IS_ALLOCATING(zio)); 3471 checksum = ZIO_CHECKSUM_GANG_HEADER; 3472 } else { 3473 checksum = BP_GET_CHECKSUM(bp); 3474 } 3475 } 3476 3477 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size); 3478 3479 return (ZIO_PIPELINE_CONTINUE); 3480 } 3481 3482 static int 3483 zio_checksum_verify(zio_t *zio) 3484 { 3485 zio_bad_cksum_t info; 3486 blkptr_t *bp = zio->io_bp; 3487 int error; 3488 3489 ASSERT(zio->io_vd != NULL); 3490 3491 if (bp == NULL) { 3492 /* 3493 * This is zio_read_phys(). 3494 * We're either verifying a label checksum, or nothing at all. 3495 */ 3496 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 3497 return (ZIO_PIPELINE_CONTINUE); 3498 3499 ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 3500 } 3501 3502 if ((error = zio_checksum_error(zio, &info)) != 0) { 3503 zio->io_error = error; 3504 if (error == ECKSUM && 3505 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 3506 zfs_ereport_start_checksum(zio->io_spa, 3507 zio->io_vd, zio, zio->io_offset, 3508 zio->io_size, NULL, &info); 3509 } 3510 } 3511 3512 return (ZIO_PIPELINE_CONTINUE); 3513 } 3514 3515 /* 3516 * Called by RAID-Z to ensure we don't compute the checksum twice. 3517 */ 3518 void 3519 zio_checksum_verified(zio_t *zio) 3520 { 3521 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 3522 } 3523 3524 /* 3525 * ========================================================================== 3526 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 3527 * An error of 0 indicates success. ENXIO indicates whole-device failure, 3528 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 3529 * indicate errors that are specific to one I/O, and most likely permanent. 3530 * Any other error is presumed to be worse because we weren't expecting it. 3531 * ========================================================================== 3532 */ 3533 int 3534 zio_worst_error(int e1, int e2) 3535 { 3536 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 3537 int r1, r2; 3538 3539 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 3540 if (e1 == zio_error_rank[r1]) 3541 break; 3542 3543 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 3544 if (e2 == zio_error_rank[r2]) 3545 break; 3546 3547 return (r1 > r2 ? e1 : e2); 3548 } 3549 3550 /* 3551 * ========================================================================== 3552 * I/O completion 3553 * ========================================================================== 3554 */ 3555 static int 3556 zio_ready(zio_t *zio) 3557 { 3558 blkptr_t *bp = zio->io_bp; 3559 zio_t *pio, *pio_next; 3560 zio_link_t *zl = NULL; 3561 3562 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, 3563 ZIO_WAIT_READY)) { 3564 return (ZIO_PIPELINE_STOP); 3565 } 3566 3567 if (zio->io_ready) { 3568 ASSERT(IO_IS_ALLOCATING(zio)); 3569 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 3570 (zio->io_flags & ZIO_FLAG_NOPWRITE)); 3571 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 3572 3573 zio->io_ready(zio); 3574 } 3575 3576 if (bp != NULL && bp != &zio->io_bp_copy) 3577 zio->io_bp_copy = *bp; 3578 3579 if (zio->io_error != 0) { 3580 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3581 3582 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3583 ASSERT(IO_IS_ALLOCATING(zio)); 3584 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3585 ASSERT(zio->io_metaslab_class != NULL); 3586 3587 /* 3588 * We were unable to allocate anything, unreserve and 3589 * issue the next I/O to allocate. 3590 */ 3591 metaslab_class_throttle_unreserve( 3592 zio->io_metaslab_class, zio->io_prop.zp_copies, 3593 zio->io_allocator, zio); 3594 zio_allocate_dispatch(zio->io_spa, zio->io_allocator); 3595 } 3596 } 3597 3598 mutex_enter(&zio->io_lock); 3599 zio->io_state[ZIO_WAIT_READY] = 1; 3600 pio = zio_walk_parents(zio, &zl); 3601 mutex_exit(&zio->io_lock); 3602 3603 /* 3604 * As we notify zio's parents, new parents could be added. 3605 * New parents go to the head of zio's io_parent_list, however, 3606 * so we will (correctly) not notify them. The remainder of zio's 3607 * io_parent_list, from 'pio_next' onward, cannot change because 3608 * all parents must wait for us to be done before they can be done. 3609 */ 3610 for (; pio != NULL; pio = pio_next) { 3611 pio_next = zio_walk_parents(zio, &zl); 3612 zio_notify_parent(pio, zio, ZIO_WAIT_READY); 3613 } 3614 3615 if (zio->io_flags & ZIO_FLAG_NODATA) { 3616 if (BP_IS_GANG(bp)) { 3617 zio->io_flags &= ~ZIO_FLAG_NODATA; 3618 } else { 3619 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE); 3620 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 3621 } 3622 } 3623 3624 if (zio_injection_enabled && 3625 zio->io_spa->spa_syncing_txg == zio->io_txg) 3626 zio_handle_ignored_writes(zio); 3627 3628 return (ZIO_PIPELINE_CONTINUE); 3629 } 3630 3631 /* 3632 * Update the allocation throttle accounting. 3633 */ 3634 static void 3635 zio_dva_throttle_done(zio_t *zio) 3636 { 3637 zio_t *lio = zio->io_logical; 3638 zio_t *pio = zio_unique_parent(zio); 3639 vdev_t *vd = zio->io_vd; 3640 int flags = METASLAB_ASYNC_ALLOC; 3641 3642 ASSERT3P(zio->io_bp, !=, NULL); 3643 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 3644 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE); 3645 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 3646 ASSERT(vd != NULL); 3647 ASSERT3P(vd, ==, vd->vdev_top); 3648 ASSERT(!(zio->io_flags & (ZIO_FLAG_IO_REPAIR | ZIO_FLAG_IO_RETRY))); 3649 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); 3650 ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE)); 3651 ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA)); 3652 3653 /* 3654 * Parents of gang children can have two flavors -- ones that 3655 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set) 3656 * and ones that allocated the constituent blocks. The allocation 3657 * throttle needs to know the allocating parent zio so we must find 3658 * it here. 3659 */ 3660 if (pio->io_child_type == ZIO_CHILD_GANG) { 3661 /* 3662 * If our parent is a rewrite gang child then our grandparent 3663 * would have been the one that performed the allocation. 3664 */ 3665 if (pio->io_flags & ZIO_FLAG_IO_REWRITE) 3666 pio = zio_unique_parent(pio); 3667 flags |= METASLAB_GANG_CHILD; 3668 } 3669 3670 ASSERT(IO_IS_ALLOCATING(pio)); 3671 ASSERT3P(zio, !=, zio->io_logical); 3672 ASSERT(zio->io_logical != NULL); 3673 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 3674 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE); 3675 ASSERT(zio->io_metaslab_class != NULL); 3676 3677 mutex_enter(&pio->io_lock); 3678 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags, 3679 pio->io_allocator, B_TRUE); 3680 mutex_exit(&pio->io_lock); 3681 3682 metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1, 3683 pio->io_allocator, pio); 3684 3685 /* 3686 * Call into the pipeline to see if there is more work that 3687 * needs to be done. If there is work to be done it will be 3688 * dispatched to another taskq thread. 3689 */ 3690 zio_allocate_dispatch(zio->io_spa, pio->io_allocator); 3691 } 3692 3693 static int 3694 zio_done(zio_t *zio) 3695 { 3696 spa_t *spa = zio->io_spa; 3697 zio_t *lio = zio->io_logical; 3698 blkptr_t *bp = zio->io_bp; 3699 vdev_t *vd = zio->io_vd; 3700 uint64_t psize = zio->io_size; 3701 zio_t *pio, *pio_next; 3702 zio_link_t *zl = NULL; 3703 3704 /* 3705 * If our children haven't all completed, 3706 * wait for them and then repeat this pipeline stage. 3707 */ 3708 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) { 3709 return (ZIO_PIPELINE_STOP); 3710 } 3711 3712 /* 3713 * If the allocation throttle is enabled, then update the accounting. 3714 * We only track child I/Os that are part of an allocating async 3715 * write. We must do this since the allocation is performed 3716 * by the logical I/O but the actual write is done by child I/Os. 3717 */ 3718 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && 3719 zio->io_child_type == ZIO_CHILD_VDEV) { 3720 ASSERT(zio->io_metaslab_class != NULL); 3721 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled); 3722 zio_dva_throttle_done(zio); 3723 } 3724 3725 /* 3726 * If the allocation throttle is enabled, verify that 3727 * we have decremented the refcounts for every I/O that was throttled. 3728 */ 3729 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 3730 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3731 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3732 ASSERT(bp != NULL); 3733 3734 metaslab_group_alloc_verify(spa, zio->io_bp, zio, 3735 zio->io_allocator); 3736 VERIFY(zfs_refcount_not_held( 3737 &zio->io_metaslab_class->mc_alloc_slots[zio->io_allocator], 3738 zio)); 3739 } 3740 3741 for (int c = 0; c < ZIO_CHILD_TYPES; c++) 3742 for (int w = 0; w < ZIO_WAIT_TYPES; w++) 3743 ASSERT(zio->io_children[c][w] == 0); 3744 3745 if (bp != NULL && !BP_IS_EMBEDDED(bp)) { 3746 ASSERT(bp->blk_pad[0] == 0); 3747 ASSERT(bp->blk_pad[1] == 0); 3748 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 3749 (bp == zio_unique_parent(zio)->io_bp)); 3750 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 3751 zio->io_bp_override == NULL && 3752 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 3753 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 3754 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 3755 ASSERT(BP_COUNT_GANG(bp) == 0 || 3756 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 3757 } 3758 if (zio->io_flags & ZIO_FLAG_NOPWRITE) 3759 VERIFY(BP_EQUAL(bp, &zio->io_bp_orig)); 3760 } 3761 3762 /* 3763 * If there were child vdev/gang/ddt errors, they apply to us now. 3764 */ 3765 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 3766 zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 3767 zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 3768 3769 /* 3770 * If the I/O on the transformed data was successful, generate any 3771 * checksum reports now while we still have the transformed data. 3772 */ 3773 if (zio->io_error == 0) { 3774 while (zio->io_cksum_report != NULL) { 3775 zio_cksum_report_t *zcr = zio->io_cksum_report; 3776 uint64_t align = zcr->zcr_align; 3777 uint64_t asize = P2ROUNDUP(psize, align); 3778 char *abuf = NULL; 3779 abd_t *adata = zio->io_abd; 3780 3781 if (asize != psize) { 3782 adata = abd_alloc_linear(asize, B_TRUE); 3783 abd_copy(adata, zio->io_abd, psize); 3784 abd_zero_off(adata, psize, asize - psize); 3785 } 3786 3787 if (adata != NULL) 3788 abuf = abd_borrow_buf_copy(adata, asize); 3789 3790 zio->io_cksum_report = zcr->zcr_next; 3791 zcr->zcr_next = NULL; 3792 zcr->zcr_finish(zcr, abuf); 3793 zfs_ereport_free_checksum(zcr); 3794 3795 if (adata != NULL) 3796 abd_return_buf(adata, abuf, asize); 3797 3798 if (asize != psize) 3799 abd_free(adata); 3800 } 3801 } 3802 3803 zio_pop_transforms(zio); /* note: may set zio->io_error */ 3804 3805 vdev_stat_update(zio, psize); 3806 3807 if (zio->io_error) { 3808 /* 3809 * If this I/O is attached to a particular vdev, 3810 * generate an error message describing the I/O failure 3811 * at the block level. We ignore these errors if the 3812 * device is currently unavailable. 3813 */ 3814 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 3815 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 3816 3817 if ((zio->io_error == EIO || !(zio->io_flags & 3818 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 3819 zio == lio) { 3820 /* 3821 * For logical I/O requests, tell the SPA to log the 3822 * error and generate a logical data ereport. 3823 */ 3824 spa_log_error(spa, zio); 3825 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 3826 0, 0); 3827 } 3828 } 3829 3830 if (zio->io_error && zio == lio) { 3831 /* 3832 * Determine whether zio should be reexecuted. This will 3833 * propagate all the way to the root via zio_notify_parent(). 3834 */ 3835 ASSERT(vd == NULL && bp != NULL); 3836 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3837 3838 if (IO_IS_ALLOCATING(zio) && 3839 !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 3840 if (zio->io_error != ENOSPC) 3841 zio->io_reexecute |= ZIO_REEXECUTE_NOW; 3842 else 3843 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3844 } 3845 3846 if ((zio->io_type == ZIO_TYPE_READ || 3847 zio->io_type == ZIO_TYPE_FREE) && 3848 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 3849 zio->io_error == ENXIO && 3850 spa_load_state(spa) == SPA_LOAD_NONE && 3851 spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 3852 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3853 3854 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 3855 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3856 3857 /* 3858 * Here is a possibly good place to attempt to do 3859 * either combinatorial reconstruction or error correction 3860 * based on checksums. It also might be a good place 3861 * to send out preliminary ereports before we suspend 3862 * processing. 3863 */ 3864 } 3865 3866 /* 3867 * If there were logical child errors, they apply to us now. 3868 * We defer this until now to avoid conflating logical child 3869 * errors with errors that happened to the zio itself when 3870 * updating vdev stats and reporting FMA events above. 3871 */ 3872 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 3873 3874 if ((zio->io_error || zio->io_reexecute) && 3875 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 3876 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 3877 zio_dva_unallocate(zio, zio->io_gang_tree, bp); 3878 3879 zio_gang_tree_free(&zio->io_gang_tree); 3880 3881 /* 3882 * Godfather I/Os should never suspend. 3883 */ 3884 if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 3885 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 3886 zio->io_reexecute = 0; 3887 3888 if (zio->io_reexecute) { 3889 /* 3890 * This is a logical I/O that wants to reexecute. 3891 * 3892 * Reexecute is top-down. When an i/o fails, if it's not 3893 * the root, it simply notifies its parent and sticks around. 3894 * The parent, seeing that it still has children in zio_done(), 3895 * does the same. This percolates all the way up to the root. 3896 * The root i/o will reexecute or suspend the entire tree. 3897 * 3898 * This approach ensures that zio_reexecute() honors 3899 * all the original i/o dependency relationships, e.g. 3900 * parents not executing until children are ready. 3901 */ 3902 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3903 3904 zio->io_gang_leader = NULL; 3905 3906 mutex_enter(&zio->io_lock); 3907 zio->io_state[ZIO_WAIT_DONE] = 1; 3908 mutex_exit(&zio->io_lock); 3909 3910 /* 3911 * "The Godfather" I/O monitors its children but is 3912 * not a true parent to them. It will track them through 3913 * the pipeline but severs its ties whenever they get into 3914 * trouble (e.g. suspended). This allows "The Godfather" 3915 * I/O to return status without blocking. 3916 */ 3917 zl = NULL; 3918 for (pio = zio_walk_parents(zio, &zl); pio != NULL; 3919 pio = pio_next) { 3920 zio_link_t *remove_zl = zl; 3921 pio_next = zio_walk_parents(zio, &zl); 3922 3923 if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 3924 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 3925 zio_remove_child(pio, zio, remove_zl); 3926 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3927 } 3928 } 3929 3930 if ((pio = zio_unique_parent(zio)) != NULL) { 3931 /* 3932 * We're not a root i/o, so there's nothing to do 3933 * but notify our parent. Don't propagate errors 3934 * upward since we haven't permanently failed yet. 3935 */ 3936 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 3937 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 3938 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3939 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 3940 /* 3941 * We'd fail again if we reexecuted now, so suspend 3942 * until conditions improve (e.g. device comes online). 3943 */ 3944 zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR); 3945 } else { 3946 /* 3947 * Reexecution is potentially a huge amount of work. 3948 * Hand it off to the otherwise-unused claim taskq. 3949 */ 3950 ASSERT(zio->io_tqent.tqent_next == NULL); 3951 spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM, 3952 ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio, 3953 0, &zio->io_tqent); 3954 } 3955 return (ZIO_PIPELINE_STOP); 3956 } 3957 3958 ASSERT(zio->io_child_count == 0); 3959 ASSERT(zio->io_reexecute == 0); 3960 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 3961 3962 /* 3963 * Report any checksum errors, since the I/O is complete. 3964 */ 3965 while (zio->io_cksum_report != NULL) { 3966 zio_cksum_report_t *zcr = zio->io_cksum_report; 3967 zio->io_cksum_report = zcr->zcr_next; 3968 zcr->zcr_next = NULL; 3969 zcr->zcr_finish(zcr, NULL); 3970 zfs_ereport_free_checksum(zcr); 3971 } 3972 3973 /* 3974 * It is the responsibility of the done callback to ensure that this 3975 * particular zio is no longer discoverable for adoption, and as 3976 * such, cannot acquire any new parents. 3977 */ 3978 if (zio->io_done) 3979 zio->io_done(zio); 3980 3981 mutex_enter(&zio->io_lock); 3982 zio->io_state[ZIO_WAIT_DONE] = 1; 3983 mutex_exit(&zio->io_lock); 3984 3985 zl = NULL; 3986 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) { 3987 zio_link_t *remove_zl = zl; 3988 pio_next = zio_walk_parents(zio, &zl); 3989 zio_remove_child(pio, zio, remove_zl); 3990 zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3991 } 3992 3993 if (zio->io_waiter != NULL) { 3994 mutex_enter(&zio->io_lock); 3995 zio->io_executor = NULL; 3996 cv_broadcast(&zio->io_cv); 3997 mutex_exit(&zio->io_lock); 3998 } else { 3999 zio_destroy(zio); 4000 } 4001 4002 return (ZIO_PIPELINE_STOP); 4003 } 4004 4005 /* 4006 * ========================================================================== 4007 * I/O pipeline definition 4008 * ========================================================================== 4009 */ 4010 static zio_pipe_stage_t *zio_pipeline[] = { 4011 NULL, 4012 zio_read_bp_init, 4013 zio_write_bp_init, 4014 zio_free_bp_init, 4015 zio_issue_async, 4016 zio_write_compress, 4017 zio_checksum_generate, 4018 zio_nop_write, 4019 zio_ddt_read_start, 4020 zio_ddt_read_done, 4021 zio_ddt_write, 4022 zio_ddt_free, 4023 zio_gang_assemble, 4024 zio_gang_issue, 4025 zio_dva_throttle, 4026 zio_dva_allocate, 4027 zio_dva_free, 4028 zio_dva_claim, 4029 zio_ready, 4030 zio_vdev_io_start, 4031 zio_vdev_io_done, 4032 zio_vdev_io_assess, 4033 zio_checksum_verify, 4034 zio_done 4035 }; 4036 4037 4038 4039 4040 /* 4041 * Compare two zbookmark_phys_t's to see which we would reach first in a 4042 * pre-order traversal of the object tree. 4043 * 4044 * This is simple in every case aside from the meta-dnode object. For all other 4045 * objects, we traverse them in order (object 1 before object 2, and so on). 4046 * However, all of these objects are traversed while traversing object 0, since 4047 * the data it points to is the list of objects. Thus, we need to convert to a 4048 * canonical representation so we can compare meta-dnode bookmarks to 4049 * non-meta-dnode bookmarks. 4050 * 4051 * We do this by calculating "equivalents" for each field of the zbookmark. 4052 * zbookmarks outside of the meta-dnode use their own object and level, and 4053 * calculate the level 0 equivalent (the first L0 blkid that is contained in the 4054 * blocks this bookmark refers to) by multiplying their blkid by their span 4055 * (the number of L0 blocks contained within one block at their level). 4056 * zbookmarks inside the meta-dnode calculate their object equivalent 4057 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use 4058 * level + 1<<31 (any value larger than a level could ever be) for their level. 4059 * This causes them to always compare before a bookmark in their object 4060 * equivalent, compare appropriately to bookmarks in other objects, and to 4061 * compare appropriately to other bookmarks in the meta-dnode. 4062 */ 4063 int 4064 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, 4065 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) 4066 { 4067 /* 4068 * These variables represent the "equivalent" values for the zbookmark, 4069 * after converting zbookmarks inside the meta dnode to their 4070 * normal-object equivalents. 4071 */ 4072 uint64_t zb1obj, zb2obj; 4073 uint64_t zb1L0, zb2L0; 4074 uint64_t zb1level, zb2level; 4075 4076 if (zb1->zb_object == zb2->zb_object && 4077 zb1->zb_level == zb2->zb_level && 4078 zb1->zb_blkid == zb2->zb_blkid) 4079 return (0); 4080 4081 /* 4082 * BP_SPANB calculates the span in blocks. 4083 */ 4084 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); 4085 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); 4086 4087 if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 4088 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 4089 zb1L0 = 0; 4090 zb1level = zb1->zb_level + COMPARE_META_LEVEL; 4091 } else { 4092 zb1obj = zb1->zb_object; 4093 zb1level = zb1->zb_level; 4094 } 4095 4096 if (zb2->zb_object == DMU_META_DNODE_OBJECT) { 4097 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 4098 zb2L0 = 0; 4099 zb2level = zb2->zb_level + COMPARE_META_LEVEL; 4100 } else { 4101 zb2obj = zb2->zb_object; 4102 zb2level = zb2->zb_level; 4103 } 4104 4105 /* Now that we have a canonical representation, do the comparison. */ 4106 if (zb1obj != zb2obj) 4107 return (zb1obj < zb2obj ? -1 : 1); 4108 else if (zb1L0 != zb2L0) 4109 return (zb1L0 < zb2L0 ? -1 : 1); 4110 else if (zb1level != zb2level) 4111 return (zb1level > zb2level ? -1 : 1); 4112 /* 4113 * This can (theoretically) happen if the bookmarks have the same object 4114 * and level, but different blkids, if the block sizes are not the same. 4115 * There is presently no way to change the indirect block sizes 4116 */ 4117 return (0); 4118 } 4119 4120 /* 4121 * This function checks the following: given that last_block is the place that 4122 * our traversal stopped last time, does that guarantee that we've visited 4123 * every node under subtree_root? Therefore, we can't just use the raw output 4124 * of zbookmark_compare. We have to pass in a modified version of 4125 * subtree_root; by incrementing the block id, and then checking whether 4126 * last_block is before or equal to that, we can tell whether or not having 4127 * visited last_block implies that all of subtree_root's children have been 4128 * visited. 4129 */ 4130 boolean_t 4131 zbookmark_subtree_completed(const dnode_phys_t *dnp, 4132 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 4133 { 4134 zbookmark_phys_t mod_zb = *subtree_root; 4135 mod_zb.zb_blkid++; 4136 ASSERT(last_block->zb_level == 0); 4137 4138 /* The objset_phys_t isn't before anything. */ 4139 if (dnp == NULL) 4140 return (B_FALSE); 4141 4142 /* 4143 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the 4144 * data block size in sectors, because that variable is only used if 4145 * the bookmark refers to a block in the meta-dnode. Since we don't 4146 * know without examining it what object it refers to, and there's no 4147 * harm in passing in this value in other cases, we always pass it in. 4148 * 4149 * We pass in 0 for the indirect block size shift because zb2 must be 4150 * level 0. The indirect block size is only used to calculate the span 4151 * of the bookmark, but since the bookmark must be level 0, the span is 4152 * always 1, so the math works out. 4153 * 4154 * If you make changes to how the zbookmark_compare code works, be sure 4155 * to make sure that this code still works afterwards. 4156 */ 4157 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 4158 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, 4159 last_block) <= 0); 4160 } 4161