1 /* 2 * CDDL HEADER START 3 * 4 * This file and its contents are supplied under the terms of the 5 * Common Development and Distribution License ("CDDL"), version 1.0. 6 * You may only use this file in accordance with the terms of version 7 * 1.0 of the CDDL. 8 * 9 * A full copy of the text of the CDDL should have accompanied this 10 * source. A copy of the CDDL is also available via the Internet at 11 * http://www.illumos.org/license/CDDL. 12 * 13 * CDDL HEADER END 14 */ 15 16 /* 17 * Copyright (c) 2014, 2017 by Delphix. All rights reserved. 18 */ 19 20 #include <sys/zfs_context.h> 21 #include <sys/spa.h> 22 #include <sys/spa_impl.h> 23 #include <sys/vdev_impl.h> 24 #include <sys/fs/zfs.h> 25 #include <sys/zio.h> 26 #include <sys/zio_checksum.h> 27 #include <sys/metaslab.h> 28 #include <sys/refcount.h> 29 #include <sys/dmu.h> 30 #include <sys/vdev_indirect_mapping.h> 31 #include <sys/dmu_tx.h> 32 #include <sys/dsl_synctask.h> 33 #include <sys/zap.h> 34 #include <sys/abd.h> 35 #include <sys/zthr.h> 36 37 /* 38 * An indirect vdev corresponds to a vdev that has been removed. Since 39 * we cannot rewrite block pointers of snapshots, etc., we keep a 40 * mapping from old location on the removed device to the new location 41 * on another device in the pool and use this mapping whenever we need 42 * to access the DVA. Unfortunately, this mapping did not respect 43 * logical block boundaries when it was first created, and so a DVA on 44 * this indirect vdev may be "split" into multiple sections that each 45 * map to a different location. As a consequence, not all DVAs can be 46 * translated to an equivalent new DVA. Instead we must provide a 47 * "vdev_remap" operation that executes a callback on each contiguous 48 * segment of the new location. This function is used in multiple ways: 49 * 50 * - i/os to this vdev use the callback to determine where the 51 * data is now located, and issue child i/os for each segment's new 52 * location. 53 * 54 * - frees and claims to this vdev use the callback to free or claim 55 * each mapped segment. (Note that we don't actually need to claim 56 * log blocks on indirect vdevs, because we don't allocate to 57 * removing vdevs. However, zdb uses zio_claim() for its leak 58 * detection.) 59 */ 60 61 /* 62 * "Big theory statement" for how we mark blocks obsolete. 63 * 64 * When a block on an indirect vdev is freed or remapped, a section of 65 * that vdev's mapping may no longer be referenced (aka "obsolete"). We 66 * keep track of how much of each mapping entry is obsolete. When 67 * an entry becomes completely obsolete, we can remove it, thus reducing 68 * the memory used by the mapping. The complete picture of obsolescence 69 * is given by the following data structures, described below: 70 * - the entry-specific obsolete count 71 * - the vdev-specific obsolete spacemap 72 * - the pool-specific obsolete bpobj 73 * 74 * == On disk data structures used == 75 * 76 * We track the obsolete space for the pool using several objects. Each 77 * of these objects is created on demand and freed when no longer 78 * needed, and is assumed to be empty if it does not exist. 79 * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects. 80 * 81 * - Each vic_mapping_object (associated with an indirect vdev) can 82 * have a vimp_counts_object. This is an array of uint32_t's 83 * with the same number of entries as the vic_mapping_object. When 84 * the mapping is condensed, entries from the vic_obsolete_sm_object 85 * (see below) are folded into the counts. Therefore, each 86 * obsolete_counts entry tells us the number of bytes in the 87 * corresponding mapping entry that were not referenced when the 88 * mapping was last condensed. 89 * 90 * - Each indirect or removing vdev can have a vic_obsolete_sm_object. 91 * This is a space map containing an alloc entry for every DVA that 92 * has been obsoleted since the last time this indirect vdev was 93 * condensed. We use this object in order to improve performance 94 * when marking a DVA as obsolete. Instead of modifying an arbitrary 95 * offset of the vimp_counts_object, we only need to append an entry 96 * to the end of this object. When a DVA becomes obsolete, it is 97 * added to the obsolete space map. This happens when the DVA is 98 * freed, remapped and not referenced by a snapshot, or the last 99 * snapshot referencing it is destroyed. 100 * 101 * - Each dataset can have a ds_remap_deadlist object. This is a 102 * deadlist object containing all blocks that were remapped in this 103 * dataset but referenced in a previous snapshot. Blocks can *only* 104 * appear on this list if they were remapped (dsl_dataset_block_remapped); 105 * blocks that were killed in a head dataset are put on the normal 106 * ds_deadlist and marked obsolete when they are freed. 107 * 108 * - The pool can have a dp_obsolete_bpobj. This is a list of blocks 109 * in the pool that need to be marked obsolete. When a snapshot is 110 * destroyed, we move some of the ds_remap_deadlist to the obsolete 111 * bpobj (see dsl_destroy_snapshot_handle_remaps()). We then 112 * asynchronously process the obsolete bpobj, moving its entries to 113 * the specific vdevs' obsolete space maps. 114 * 115 * == Summary of how we mark blocks as obsolete == 116 * 117 * - When freeing a block: if any DVA is on an indirect vdev, append to 118 * vic_obsolete_sm_object. 119 * - When remapping a block, add dva to ds_remap_deadlist (if prev snap 120 * references; otherwise append to vic_obsolete_sm_object). 121 * - When freeing a snapshot: move parts of ds_remap_deadlist to 122 * dp_obsolete_bpobj (same algorithm as ds_deadlist). 123 * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to 124 * individual vdev's vic_obsolete_sm_object. 125 */ 126 127 /* 128 * "Big theory statement" for how we condense indirect vdevs. 129 * 130 * Condensing an indirect vdev's mapping is the process of determining 131 * the precise counts of obsolete space for each mapping entry (by 132 * integrating the obsolete spacemap into the obsolete counts) and 133 * writing out a new mapping that contains only referenced entries. 134 * 135 * We condense a vdev when we expect the mapping to shrink (see 136 * vdev_indirect_should_condense()), but only perform one condense at a 137 * time to limit the memory usage. In addition, we use a separate 138 * open-context thread (spa_condense_indirect_thread) to incrementally 139 * create the new mapping object in a way that minimizes the impact on 140 * the rest of the system. 141 * 142 * == Generating a new mapping == 143 * 144 * To generate a new mapping, we follow these steps: 145 * 146 * 1. Save the old obsolete space map and create a new mapping object 147 * (see spa_condense_indirect_start_sync()). This initializes the 148 * spa_condensing_indirect_phys with the "previous obsolete space map", 149 * which is now read only. Newly obsolete DVAs will be added to a 150 * new (initially empty) obsolete space map, and will not be 151 * considered as part of this condense operation. 152 * 153 * 2. Construct in memory the precise counts of obsolete space for each 154 * mapping entry, by incorporating the obsolete space map into the 155 * counts. (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().) 156 * 157 * 3. Iterate through each mapping entry, writing to the new mapping any 158 * entries that are not completely obsolete (i.e. which don't have 159 * obsolete count == mapping length). (See 160 * spa_condense_indirect_generate_new_mapping().) 161 * 162 * 4. Destroy the old mapping object and switch over to the new one 163 * (spa_condense_indirect_complete_sync). 164 * 165 * == Restarting from failure == 166 * 167 * To restart the condense when we import/open the pool, we must start 168 * at the 2nd step above: reconstruct the precise counts in memory, 169 * based on the space map + counts. Then in the 3rd step, we start 170 * iterating where we left off: at vimp_max_offset of the new mapping 171 * object. 172 */ 173 174 boolean_t zfs_condense_indirect_vdevs_enable = B_TRUE; 175 176 /* 177 * Condense if at least this percent of the bytes in the mapping is 178 * obsolete. With the default of 25%, the amount of space mapped 179 * will be reduced to 1% of its original size after at most 16 180 * condenses. Higher values will condense less often (causing less 181 * i/o); lower values will reduce the mapping size more quickly. 182 */ 183 int zfs_indirect_condense_obsolete_pct = 25; 184 185 /* 186 * Condense if the obsolete space map takes up more than this amount of 187 * space on disk (logically). This limits the amount of disk space 188 * consumed by the obsolete space map; the default of 1GB is small enough 189 * that we typically don't mind "wasting" it. 190 */ 191 uint64_t zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024; 192 193 /* 194 * Don't bother condensing if the mapping uses less than this amount of 195 * memory. The default of 128KB is considered a "trivial" amount of 196 * memory and not worth reducing. 197 */ 198 uint64_t zfs_condense_min_mapping_bytes = 128 * 1024; 199 200 /* 201 * This is used by the test suite so that it can ensure that certain 202 * actions happen while in the middle of a condense (which might otherwise 203 * complete too quickly). If used to reduce the performance impact of 204 * condensing in production, a maximum value of 1 should be sufficient. 205 */ 206 int zfs_condense_indirect_commit_entry_delay_ticks = 0; 207 208 /* 209 * If an indirect split block contains more than this many possible unique 210 * combinations when being reconstructed, consider it too computationally 211 * expensive to check them all. Instead, try at most 100 randomly-selected 212 * combinations each time the block is accessed. This allows all segment 213 * copies to participate fairly in the reconstruction when all combinations 214 * cannot be checked and prevents repeated use of one bad copy. 215 */ 216 int zfs_reconstruct_indirect_combinations_max = 256; 217 218 219 /* 220 * Enable to simulate damaged segments and validate reconstruction. 221 * Used by ztest 222 */ 223 unsigned long zfs_reconstruct_indirect_damage_fraction = 0; 224 225 /* 226 * The indirect_child_t represents the vdev that we will read from, when we 227 * need to read all copies of the data (e.g. for scrub or reconstruction). 228 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror), 229 * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs, 230 * ic_vdev is a child of the mirror. 231 */ 232 typedef struct indirect_child { 233 abd_t *ic_data; 234 vdev_t *ic_vdev; 235 236 /* 237 * ic_duplicate is NULL when the ic_data contents are unique, when it 238 * is determined to be a duplicate it references the primary child. 239 */ 240 struct indirect_child *ic_duplicate; 241 list_node_t ic_node; /* node on is_unique_child */ 242 } indirect_child_t; 243 244 /* 245 * The indirect_split_t represents one mapped segment of an i/o to the 246 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be 247 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size. 248 * For split blocks, there will be several of these. 249 */ 250 typedef struct indirect_split { 251 list_node_t is_node; /* link on iv_splits */ 252 253 /* 254 * is_split_offset is the offset into the i/o. 255 * This is the sum of the previous splits' is_size's. 256 */ 257 uint64_t is_split_offset; 258 259 vdev_t *is_vdev; /* top-level vdev */ 260 uint64_t is_target_offset; /* offset on is_vdev */ 261 uint64_t is_size; 262 int is_children; /* number of entries in is_child[] */ 263 int is_unique_children; /* number of entries in is_unique_child */ 264 list_t is_unique_child; 265 266 /* 267 * is_good_child is the child that we are currently using to 268 * attempt reconstruction. 269 */ 270 indirect_child_t *is_good_child; 271 272 indirect_child_t is_child[1]; /* variable-length */ 273 } indirect_split_t; 274 275 /* 276 * The indirect_vsd_t is associated with each i/o to the indirect vdev. 277 * It is the "Vdev-Specific Data" in the zio_t's io_vsd. 278 */ 279 typedef struct indirect_vsd { 280 boolean_t iv_split_block; 281 boolean_t iv_reconstruct; 282 uint64_t iv_unique_combinations; 283 uint64_t iv_attempts; 284 uint64_t iv_attempts_max; 285 286 list_t iv_splits; /* list of indirect_split_t's */ 287 } indirect_vsd_t; 288 289 static void 290 vdev_indirect_map_free(zio_t *zio) 291 { 292 indirect_vsd_t *iv = zio->io_vsd; 293 294 indirect_split_t *is; 295 while ((is = list_head(&iv->iv_splits)) != NULL) { 296 for (int c = 0; c < is->is_children; c++) { 297 indirect_child_t *ic = &is->is_child[c]; 298 if (ic->ic_data != NULL) 299 abd_free(ic->ic_data); 300 } 301 list_remove(&iv->iv_splits, is); 302 303 indirect_child_t *ic; 304 while ((ic = list_head(&is->is_unique_child)) != NULL) 305 list_remove(&is->is_unique_child, ic); 306 307 list_destroy(&is->is_unique_child); 308 309 kmem_free(is, 310 offsetof(indirect_split_t, is_child[is->is_children])); 311 } 312 kmem_free(iv, sizeof (*iv)); 313 } 314 315 static const zio_vsd_ops_t vdev_indirect_vsd_ops = { 316 vdev_indirect_map_free, 317 zio_vsd_default_cksum_report 318 }; 319 /* 320 * Mark the given offset and size as being obsolete. 321 */ 322 void 323 vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size) 324 { 325 spa_t *spa = vd->vdev_spa; 326 327 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0); 328 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops); 329 ASSERT(size > 0); 330 VERIFY(vdev_indirect_mapping_entry_for_offset( 331 vd->vdev_indirect_mapping, offset) != NULL); 332 333 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 334 mutex_enter(&vd->vdev_obsolete_lock); 335 range_tree_add(vd->vdev_obsolete_segments, offset, size); 336 mutex_exit(&vd->vdev_obsolete_lock); 337 vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa)); 338 } 339 } 340 341 /* 342 * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This 343 * wrapper is provided because the DMU does not know about vdev_t's and 344 * cannot directly call vdev_indirect_mark_obsolete. 345 */ 346 void 347 spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset, 348 uint64_t size, dmu_tx_t *tx) 349 { 350 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 351 ASSERT(dmu_tx_is_syncing(tx)); 352 353 /* The DMU can only remap indirect vdevs. */ 354 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 355 vdev_indirect_mark_obsolete(vd, offset, size); 356 } 357 358 static spa_condensing_indirect_t * 359 spa_condensing_indirect_create(spa_t *spa) 360 { 361 spa_condensing_indirect_phys_t *scip = 362 &spa->spa_condensing_indirect_phys; 363 spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP); 364 objset_t *mos = spa->spa_meta_objset; 365 366 for (int i = 0; i < TXG_SIZE; i++) { 367 list_create(&sci->sci_new_mapping_entries[i], 368 sizeof (vdev_indirect_mapping_entry_t), 369 offsetof(vdev_indirect_mapping_entry_t, vime_node)); 370 } 371 372 sci->sci_new_mapping = 373 vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object); 374 375 return (sci); 376 } 377 378 static void 379 spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci) 380 { 381 for (int i = 0; i < TXG_SIZE; i++) 382 list_destroy(&sci->sci_new_mapping_entries[i]); 383 384 if (sci->sci_new_mapping != NULL) 385 vdev_indirect_mapping_close(sci->sci_new_mapping); 386 387 kmem_free(sci, sizeof (*sci)); 388 } 389 390 boolean_t 391 vdev_indirect_should_condense(vdev_t *vd) 392 { 393 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 394 spa_t *spa = vd->vdev_spa; 395 396 ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool)); 397 398 if (!zfs_condense_indirect_vdevs_enable) 399 return (B_FALSE); 400 401 /* 402 * We can only condense one indirect vdev at a time. 403 */ 404 if (spa->spa_condensing_indirect != NULL) 405 return (B_FALSE); 406 407 if (spa_shutting_down(spa)) 408 return (B_FALSE); 409 410 /* 411 * The mapping object size must not change while we are 412 * condensing, so we can only condense indirect vdevs 413 * (not vdevs that are still in the middle of being removed). 414 */ 415 if (vd->vdev_ops != &vdev_indirect_ops) 416 return (B_FALSE); 417 418 /* 419 * If nothing new has been marked obsolete, there is no 420 * point in condensing. 421 */ 422 if (vd->vdev_obsolete_sm == NULL) { 423 ASSERT0(vdev_obsolete_sm_object(vd)); 424 return (B_FALSE); 425 } 426 427 ASSERT(vd->vdev_obsolete_sm != NULL); 428 429 ASSERT3U(vdev_obsolete_sm_object(vd), ==, 430 space_map_object(vd->vdev_obsolete_sm)); 431 432 uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim); 433 uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm); 434 uint64_t mapping_size = vdev_indirect_mapping_size(vim); 435 uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm); 436 437 ASSERT3U(bytes_obsolete, <=, bytes_mapped); 438 439 /* 440 * If a high percentage of the bytes that are mapped have become 441 * obsolete, condense (unless the mapping is already small enough). 442 * This has a good chance of reducing the amount of memory used 443 * by the mapping. 444 */ 445 if (bytes_obsolete * 100 / bytes_mapped >= 446 zfs_indirect_condense_obsolete_pct && 447 mapping_size > zfs_condense_min_mapping_bytes) { 448 zfs_dbgmsg("should condense vdev %llu because obsolete " 449 "spacemap covers %d%% of %lluMB mapping", 450 (u_longlong_t)vd->vdev_id, 451 (int)(bytes_obsolete * 100 / bytes_mapped), 452 (u_longlong_t)bytes_mapped / 1024 / 1024); 453 return (B_TRUE); 454 } 455 456 /* 457 * If the obsolete space map takes up too much space on disk, 458 * condense in order to free up this disk space. 459 */ 460 if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) { 461 zfs_dbgmsg("should condense vdev %llu because obsolete sm " 462 "length %lluMB >= max size %lluMB", 463 (u_longlong_t)vd->vdev_id, 464 (u_longlong_t)obsolete_sm_size / 1024 / 1024, 465 (u_longlong_t)zfs_condense_max_obsolete_bytes / 466 1024 / 1024); 467 return (B_TRUE); 468 } 469 470 return (B_FALSE); 471 } 472 473 /* 474 * This sync task completes (finishes) a condense, deleting the old 475 * mapping and replacing it with the new one. 476 */ 477 static void 478 spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx) 479 { 480 spa_condensing_indirect_t *sci = arg; 481 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 482 spa_condensing_indirect_phys_t *scip = 483 &spa->spa_condensing_indirect_phys; 484 vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev); 485 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 486 objset_t *mos = spa->spa_meta_objset; 487 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping; 488 uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping); 489 uint64_t new_count = 490 vdev_indirect_mapping_num_entries(sci->sci_new_mapping); 491 492 ASSERT(dmu_tx_is_syncing(tx)); 493 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 494 ASSERT3P(sci, ==, spa->spa_condensing_indirect); 495 for (int i = 0; i < TXG_SIZE; i++) { 496 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i])); 497 } 498 ASSERT(vic->vic_mapping_object != 0); 499 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev); 500 ASSERT(scip->scip_next_mapping_object != 0); 501 ASSERT(scip->scip_prev_obsolete_sm_object != 0); 502 503 /* 504 * Reset vdev_indirect_mapping to refer to the new object. 505 */ 506 rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER); 507 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 508 vd->vdev_indirect_mapping = sci->sci_new_mapping; 509 rw_exit(&vd->vdev_indirect_rwlock); 510 511 sci->sci_new_mapping = NULL; 512 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx); 513 vic->vic_mapping_object = scip->scip_next_mapping_object; 514 scip->scip_next_mapping_object = 0; 515 516 space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx); 517 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 518 scip->scip_prev_obsolete_sm_object = 0; 519 520 scip->scip_vdev = 0; 521 522 VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT, 523 DMU_POOL_CONDENSING_INDIRECT, tx)); 524 spa_condensing_indirect_destroy(spa->spa_condensing_indirect); 525 spa->spa_condensing_indirect = NULL; 526 527 zfs_dbgmsg("finished condense of vdev %llu in txg %llu: " 528 "new mapping object %llu has %llu entries " 529 "(was %llu entries)", 530 vd->vdev_id, dmu_tx_get_txg(tx), vic->vic_mapping_object, 531 new_count, old_count); 532 533 vdev_config_dirty(spa->spa_root_vdev); 534 } 535 536 /* 537 * This sync task appends entries to the new mapping object. 538 */ 539 static void 540 spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx) 541 { 542 spa_condensing_indirect_t *sci = arg; 543 uint64_t txg = dmu_tx_get_txg(tx); 544 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 545 546 ASSERT(dmu_tx_is_syncing(tx)); 547 ASSERT3P(sci, ==, spa->spa_condensing_indirect); 548 549 vdev_indirect_mapping_add_entries(sci->sci_new_mapping, 550 &sci->sci_new_mapping_entries[txg & TXG_MASK], tx); 551 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK])); 552 } 553 554 /* 555 * Open-context function to add one entry to the new mapping. The new 556 * entry will be remembered and written from syncing context. 557 */ 558 static void 559 spa_condense_indirect_commit_entry(spa_t *spa, 560 vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count) 561 { 562 spa_condensing_indirect_t *sci = spa->spa_condensing_indirect; 563 564 ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst)); 565 566 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 567 dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count)); 568 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 569 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 570 571 /* 572 * If we are the first entry committed this txg, kick off the sync 573 * task to write to the MOS on our behalf. 574 */ 575 if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) { 576 dsl_sync_task_nowait(dmu_tx_pool(tx), 577 spa_condense_indirect_commit_sync, sci, 578 0, ZFS_SPACE_CHECK_NONE, tx); 579 } 580 581 vdev_indirect_mapping_entry_t *vime = 582 kmem_alloc(sizeof (*vime), KM_SLEEP); 583 vime->vime_mapping = *vimep; 584 vime->vime_obsolete_count = count; 585 list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime); 586 587 dmu_tx_commit(tx); 588 } 589 590 static void 591 spa_condense_indirect_generate_new_mapping(vdev_t *vd, 592 uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr) 593 { 594 spa_t *spa = vd->vdev_spa; 595 uint64_t mapi = start_index; 596 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping; 597 uint64_t old_num_entries = 598 vdev_indirect_mapping_num_entries(old_mapping); 599 600 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 601 ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev); 602 603 zfs_dbgmsg("starting condense of vdev %llu from index %llu", 604 (u_longlong_t)vd->vdev_id, 605 (u_longlong_t)mapi); 606 607 while (mapi < old_num_entries) { 608 609 if (zthr_iscancelled(zthr)) { 610 zfs_dbgmsg("pausing condense of vdev %llu " 611 "at index %llu", (u_longlong_t)vd->vdev_id, 612 (u_longlong_t)mapi); 613 break; 614 } 615 616 vdev_indirect_mapping_entry_phys_t *entry = 617 &old_mapping->vim_entries[mapi]; 618 uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst); 619 ASSERT3U(obsolete_counts[mapi], <=, entry_size); 620 if (obsolete_counts[mapi] < entry_size) { 621 spa_condense_indirect_commit_entry(spa, entry, 622 obsolete_counts[mapi]); 623 624 /* 625 * This delay may be requested for testing, debugging, 626 * or performance reasons. 627 */ 628 delay(zfs_condense_indirect_commit_entry_delay_ticks); 629 } 630 631 mapi++; 632 } 633 } 634 635 /* ARGSUSED */ 636 static boolean_t 637 spa_condense_indirect_thread_check(void *arg, zthr_t *zthr) 638 { 639 spa_t *spa = arg; 640 641 return (spa->spa_condensing_indirect != NULL); 642 } 643 644 /* ARGSUSED */ 645 static int 646 spa_condense_indirect_thread(void *arg, zthr_t *zthr) 647 { 648 spa_t *spa = arg; 649 vdev_t *vd; 650 651 ASSERT3P(spa->spa_condensing_indirect, !=, NULL); 652 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 653 vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev); 654 ASSERT3P(vd, !=, NULL); 655 spa_config_exit(spa, SCL_VDEV, FTAG); 656 657 spa_condensing_indirect_t *sci = spa->spa_condensing_indirect; 658 spa_condensing_indirect_phys_t *scip = 659 &spa->spa_condensing_indirect_phys; 660 uint32_t *counts; 661 uint64_t start_index; 662 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping; 663 space_map_t *prev_obsolete_sm = NULL; 664 665 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev); 666 ASSERT(scip->scip_next_mapping_object != 0); 667 ASSERT(scip->scip_prev_obsolete_sm_object != 0); 668 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 669 670 for (int i = 0; i < TXG_SIZE; i++) { 671 /* 672 * The list must start out empty in order for the 673 * _commit_sync() sync task to be properly registered 674 * on the first call to _commit_entry(); so it's wise 675 * to double check and ensure we actually are starting 676 * with empty lists. 677 */ 678 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i])); 679 } 680 681 VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset, 682 scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0)); 683 counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping); 684 if (prev_obsolete_sm != NULL) { 685 vdev_indirect_mapping_load_obsolete_spacemap(old_mapping, 686 counts, prev_obsolete_sm); 687 } 688 space_map_close(prev_obsolete_sm); 689 690 /* 691 * Generate new mapping. Determine what index to continue from 692 * based on the max offset that we've already written in the 693 * new mapping. 694 */ 695 uint64_t max_offset = 696 vdev_indirect_mapping_max_offset(sci->sci_new_mapping); 697 if (max_offset == 0) { 698 /* We haven't written anything to the new mapping yet. */ 699 start_index = 0; 700 } else { 701 /* 702 * Pick up from where we left off. _entry_for_offset() 703 * returns a pointer into the vim_entries array. If 704 * max_offset is greater than any of the mappings 705 * contained in the table NULL will be returned and 706 * that indicates we've exhausted our iteration of the 707 * old_mapping. 708 */ 709 710 vdev_indirect_mapping_entry_phys_t *entry = 711 vdev_indirect_mapping_entry_for_offset_or_next(old_mapping, 712 max_offset); 713 714 if (entry == NULL) { 715 /* 716 * We've already written the whole new mapping. 717 * This special value will cause us to skip the 718 * generate_new_mapping step and just do the sync 719 * task to complete the condense. 720 */ 721 start_index = UINT64_MAX; 722 } else { 723 start_index = entry - old_mapping->vim_entries; 724 ASSERT3U(start_index, <, 725 vdev_indirect_mapping_num_entries(old_mapping)); 726 } 727 } 728 729 spa_condense_indirect_generate_new_mapping(vd, counts, 730 start_index, zthr); 731 732 vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts); 733 734 /* 735 * If the zthr has received a cancellation signal while running 736 * in generate_new_mapping() or at any point after that, then bail 737 * early. We don't want to complete the condense if the spa is 738 * shutting down. 739 */ 740 if (zthr_iscancelled(zthr)) 741 return (0); 742 743 VERIFY0(dsl_sync_task(spa_name(spa), NULL, 744 spa_condense_indirect_complete_sync, sci, 0, 745 ZFS_SPACE_CHECK_EXTRA_RESERVED)); 746 747 return (0); 748 } 749 750 /* 751 * Sync task to begin the condensing process. 752 */ 753 void 754 spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx) 755 { 756 spa_t *spa = vd->vdev_spa; 757 spa_condensing_indirect_phys_t *scip = 758 &spa->spa_condensing_indirect_phys; 759 760 ASSERT0(scip->scip_next_mapping_object); 761 ASSERT0(scip->scip_prev_obsolete_sm_object); 762 ASSERT0(scip->scip_vdev); 763 ASSERT(dmu_tx_is_syncing(tx)); 764 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 765 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS)); 766 ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping)); 767 768 uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd); 769 ASSERT(obsolete_sm_obj != 0); 770 771 scip->scip_vdev = vd->vdev_id; 772 scip->scip_next_mapping_object = 773 vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx); 774 775 scip->scip_prev_obsolete_sm_object = obsolete_sm_obj; 776 777 /* 778 * We don't need to allocate a new space map object, since 779 * vdev_indirect_sync_obsolete will allocate one when needed. 780 */ 781 space_map_close(vd->vdev_obsolete_sm); 782 vd->vdev_obsolete_sm = NULL; 783 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 784 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx)); 785 786 VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset, 787 DMU_POOL_DIRECTORY_OBJECT, 788 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t), 789 sizeof (*scip) / sizeof (uint64_t), scip, tx)); 790 791 ASSERT3P(spa->spa_condensing_indirect, ==, NULL); 792 spa->spa_condensing_indirect = spa_condensing_indirect_create(spa); 793 794 zfs_dbgmsg("starting condense of vdev %llu in txg %llu: " 795 "posm=%llu nm=%llu", 796 vd->vdev_id, dmu_tx_get_txg(tx), 797 (u_longlong_t)scip->scip_prev_obsolete_sm_object, 798 (u_longlong_t)scip->scip_next_mapping_object); 799 800 zthr_wakeup(spa->spa_condense_zthr); 801 } 802 803 /* 804 * Sync to the given vdev's obsolete space map any segments that are no longer 805 * referenced as of the given txg. 806 * 807 * If the obsolete space map doesn't exist yet, create and open it. 808 */ 809 void 810 vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx) 811 { 812 spa_t *spa = vd->vdev_spa; 813 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 814 815 ASSERT3U(vic->vic_mapping_object, !=, 0); 816 ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0); 817 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops); 818 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)); 819 820 if (vdev_obsolete_sm_object(vd) == 0) { 821 uint64_t obsolete_sm_object = 822 space_map_alloc(spa->spa_meta_objset, 823 vdev_standard_sm_blksz, tx); 824 825 ASSERT(vd->vdev_top_zap != 0); 826 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 827 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, 828 sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx)); 829 ASSERT3U(vdev_obsolete_sm_object(vd), !=, 0); 830 831 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 832 VERIFY0(space_map_open(&vd->vdev_obsolete_sm, 833 spa->spa_meta_objset, obsolete_sm_object, 834 0, vd->vdev_asize, 0)); 835 } 836 837 ASSERT(vd->vdev_obsolete_sm != NULL); 838 ASSERT3U(vdev_obsolete_sm_object(vd), ==, 839 space_map_object(vd->vdev_obsolete_sm)); 840 841 space_map_write(vd->vdev_obsolete_sm, 842 vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx); 843 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL); 844 } 845 846 int 847 spa_condense_init(spa_t *spa) 848 { 849 int error = zap_lookup(spa->spa_meta_objset, 850 DMU_POOL_DIRECTORY_OBJECT, 851 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t), 852 sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t), 853 &spa->spa_condensing_indirect_phys); 854 if (error == 0) { 855 if (spa_writeable(spa)) { 856 spa->spa_condensing_indirect = 857 spa_condensing_indirect_create(spa); 858 } 859 return (0); 860 } else if (error == ENOENT) { 861 return (0); 862 } else { 863 return (error); 864 } 865 } 866 867 void 868 spa_condense_fini(spa_t *spa) 869 { 870 if (spa->spa_condensing_indirect != NULL) { 871 spa_condensing_indirect_destroy(spa->spa_condensing_indirect); 872 spa->spa_condensing_indirect = NULL; 873 } 874 } 875 876 void 877 spa_start_indirect_condensing_thread(spa_t *spa) 878 { 879 ASSERT3P(spa->spa_condense_zthr, ==, NULL); 880 spa->spa_condense_zthr = zthr_create(spa_condense_indirect_thread_check, 881 spa_condense_indirect_thread, spa); 882 } 883 884 /* 885 * Gets the obsolete spacemap object from the vdev's ZAP. 886 * Returns the spacemap object, or 0 if it wasn't in the ZAP or the ZAP doesn't 887 * exist yet. 888 */ 889 int 890 vdev_obsolete_sm_object(vdev_t *vd) 891 { 892 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 893 if (vd->vdev_top_zap == 0) { 894 return (0); 895 } 896 897 uint64_t sm_obj = 0; 898 int err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 899 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (sm_obj), 1, &sm_obj); 900 901 ASSERT(err == 0 || err == ENOENT); 902 903 return (sm_obj); 904 } 905 906 boolean_t 907 vdev_obsolete_counts_are_precise(vdev_t *vd) 908 { 909 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); 910 if (vd->vdev_top_zap == 0) { 911 return (B_FALSE); 912 } 913 914 uint64_t val = 0; 915 int err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 916 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val); 917 918 ASSERT(err == 0 || err == ENOENT); 919 920 return (val != 0); 921 } 922 923 /* ARGSUSED */ 924 static void 925 vdev_indirect_close(vdev_t *vd) 926 { 927 } 928 929 /* ARGSUSED */ 930 static int 931 vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, 932 uint64_t *ashift) 933 { 934 *psize = *max_psize = vd->vdev_asize + 935 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; 936 *ashift = vd->vdev_ashift; 937 return (0); 938 } 939 940 typedef struct remap_segment { 941 vdev_t *rs_vd; 942 uint64_t rs_offset; 943 uint64_t rs_asize; 944 uint64_t rs_split_offset; 945 list_node_t rs_node; 946 } remap_segment_t; 947 948 remap_segment_t * 949 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset) 950 { 951 remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP); 952 rs->rs_vd = vd; 953 rs->rs_offset = offset; 954 rs->rs_asize = asize; 955 rs->rs_split_offset = split_offset; 956 return (rs); 957 } 958 959 /* 960 * Given an indirect vdev and an extent on that vdev, it duplicates the 961 * physical entries of the indirect mapping that correspond to the extent 962 * to a new array and returns a pointer to it. In addition, copied_entries 963 * is populated with the number of mapping entries that were duplicated. 964 * 965 * Note that the function assumes that the caller holds vdev_indirect_rwlock. 966 * This ensures that the mapping won't change due to condensing as we 967 * copy over its contents. 968 * 969 * Finally, since we are doing an allocation, it is up to the caller to 970 * free the array allocated in this function. 971 */ 972 vdev_indirect_mapping_entry_phys_t * 973 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset, 974 uint64_t asize, uint64_t *copied_entries) 975 { 976 vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL; 977 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 978 uint64_t entries = 0; 979 980 ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock)); 981 982 vdev_indirect_mapping_entry_phys_t *first_mapping = 983 vdev_indirect_mapping_entry_for_offset(vim, offset); 984 ASSERT3P(first_mapping, !=, NULL); 985 986 vdev_indirect_mapping_entry_phys_t *m = first_mapping; 987 while (asize > 0) { 988 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); 989 990 ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m)); 991 ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size); 992 993 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m); 994 uint64_t inner_size = MIN(asize, size - inner_offset); 995 996 offset += inner_size; 997 asize -= inner_size; 998 entries++; 999 m++; 1000 } 1001 1002 size_t copy_length = entries * sizeof (*first_mapping); 1003 duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP); 1004 bcopy(first_mapping, duplicate_mappings, copy_length); 1005 *copied_entries = entries; 1006 1007 return (duplicate_mappings); 1008 } 1009 1010 /* 1011 * Goes through the relevant indirect mappings until it hits a concrete vdev 1012 * and issues the callback. On the way to the concrete vdev, if any other 1013 * indirect vdevs are encountered, then the callback will also be called on 1014 * each of those indirect vdevs. For example, if the segment is mapped to 1015 * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is 1016 * mapped to segment B on concrete vdev 2, then the callback will be called on 1017 * both vdev 1 and vdev 2. 1018 * 1019 * While the callback passed to vdev_indirect_remap() is called on every vdev 1020 * the function encounters, certain callbacks only care about concrete vdevs. 1021 * These types of callbacks should return immediately and explicitly when they 1022 * are called on an indirect vdev. 1023 * 1024 * Because there is a possibility that a DVA section in the indirect device 1025 * has been split into multiple sections in our mapping, we keep track 1026 * of the relevant contiguous segments of the new location (remap_segment_t) 1027 * in a stack. This way we can call the callback for each of the new sections 1028 * created by a single section of the indirect device. Note though, that in 1029 * this scenario the callbacks in each split block won't occur in-order in 1030 * terms of offset, so callers should not make any assumptions about that. 1031 * 1032 * For callbacks that don't handle split blocks and immediately return when 1033 * they encounter them (as is the case for remap_blkptr_cb), the caller can 1034 * assume that its callback will be applied from the first indirect vdev 1035 * encountered to the last one and then the concrete vdev, in that order. 1036 */ 1037 static void 1038 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize, 1039 void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg) 1040 { 1041 list_t stack; 1042 spa_t *spa = vd->vdev_spa; 1043 1044 list_create(&stack, sizeof (remap_segment_t), 1045 offsetof(remap_segment_t, rs_node)); 1046 1047 for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0); 1048 rs != NULL; rs = list_remove_head(&stack)) { 1049 vdev_t *v = rs->rs_vd; 1050 uint64_t num_entries = 0; 1051 1052 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1053 ASSERT(rs->rs_asize > 0); 1054 1055 /* 1056 * Note: As this function can be called from open context 1057 * (e.g. zio_read()), we need the following rwlock to 1058 * prevent the mapping from being changed by condensing. 1059 * 1060 * So we grab the lock and we make a copy of the entries 1061 * that are relevant to the extent that we are working on. 1062 * Once that is done, we drop the lock and iterate over 1063 * our copy of the mapping. Once we are done with the with 1064 * the remap segment and we free it, we also free our copy 1065 * of the indirect mapping entries that are relevant to it. 1066 * 1067 * This way we don't need to wait until the function is 1068 * finished with a segment, to condense it. In addition, we 1069 * don't need a recursive rwlock for the case that a call to 1070 * vdev_indirect_remap() needs to call itself (through the 1071 * codepath of its callback) for the same vdev in the middle 1072 * of its execution. 1073 */ 1074 rw_enter(&v->vdev_indirect_rwlock, RW_READER); 1075 vdev_indirect_mapping_t *vim = v->vdev_indirect_mapping; 1076 ASSERT3P(vim, !=, NULL); 1077 1078 vdev_indirect_mapping_entry_phys_t *mapping = 1079 vdev_indirect_mapping_duplicate_adjacent_entries(v, 1080 rs->rs_offset, rs->rs_asize, &num_entries); 1081 ASSERT3P(mapping, !=, NULL); 1082 ASSERT3U(num_entries, >, 0); 1083 rw_exit(&v->vdev_indirect_rwlock); 1084 1085 for (uint64_t i = 0; i < num_entries; i++) { 1086 /* 1087 * Note: the vdev_indirect_mapping can not change 1088 * while we are running. It only changes while the 1089 * removal is in progress, and then only from syncing 1090 * context. While a removal is in progress, this 1091 * function is only called for frees, which also only 1092 * happen from syncing context. 1093 */ 1094 vdev_indirect_mapping_entry_phys_t *m = &mapping[i]; 1095 1096 ASSERT3P(m, !=, NULL); 1097 ASSERT3U(rs->rs_asize, >, 0); 1098 1099 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); 1100 uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst); 1101 uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst); 1102 1103 ASSERT3U(rs->rs_offset, >=, 1104 DVA_MAPPING_GET_SRC_OFFSET(m)); 1105 ASSERT3U(rs->rs_offset, <, 1106 DVA_MAPPING_GET_SRC_OFFSET(m) + size); 1107 ASSERT3U(dst_vdev, !=, v->vdev_id); 1108 1109 uint64_t inner_offset = rs->rs_offset - 1110 DVA_MAPPING_GET_SRC_OFFSET(m); 1111 uint64_t inner_size = 1112 MIN(rs->rs_asize, size - inner_offset); 1113 1114 vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev); 1115 ASSERT3P(dst_v, !=, NULL); 1116 1117 if (dst_v->vdev_ops == &vdev_indirect_ops) { 1118 list_insert_head(&stack, 1119 rs_alloc(dst_v, dst_offset + inner_offset, 1120 inner_size, rs->rs_split_offset)); 1121 1122 } 1123 1124 if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) && 1125 IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) { 1126 /* 1127 * Note: This clause exists only solely for 1128 * testing purposes. We use it to ensure that 1129 * split blocks work and that the callbacks 1130 * using them yield the same result if issued 1131 * in reverse order. 1132 */ 1133 uint64_t inner_half = inner_size / 2; 1134 1135 func(rs->rs_split_offset + inner_half, dst_v, 1136 dst_offset + inner_offset + inner_half, 1137 inner_half, arg); 1138 1139 func(rs->rs_split_offset, dst_v, 1140 dst_offset + inner_offset, 1141 inner_half, arg); 1142 } else { 1143 func(rs->rs_split_offset, dst_v, 1144 dst_offset + inner_offset, 1145 inner_size, arg); 1146 } 1147 1148 rs->rs_offset += inner_size; 1149 rs->rs_asize -= inner_size; 1150 rs->rs_split_offset += inner_size; 1151 } 1152 VERIFY0(rs->rs_asize); 1153 1154 kmem_free(mapping, num_entries * sizeof (*mapping)); 1155 kmem_free(rs, sizeof (remap_segment_t)); 1156 } 1157 list_destroy(&stack); 1158 } 1159 1160 static void 1161 vdev_indirect_child_io_done(zio_t *zio) 1162 { 1163 zio_t *pio = zio->io_private; 1164 1165 mutex_enter(&pio->io_lock); 1166 pio->io_error = zio_worst_error(pio->io_error, zio->io_error); 1167 mutex_exit(&pio->io_lock); 1168 1169 abd_put(zio->io_abd); 1170 } 1171 1172 /* 1173 * This is a callback for vdev_indirect_remap() which allocates an 1174 * indirect_split_t for each split segment and adds it to iv_splits. 1175 */ 1176 static void 1177 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset, 1178 uint64_t size, void *arg) 1179 { 1180 zio_t *zio = arg; 1181 indirect_vsd_t *iv = zio->io_vsd; 1182 1183 ASSERT3P(vd, !=, NULL); 1184 1185 if (vd->vdev_ops == &vdev_indirect_ops) 1186 return; 1187 1188 int n = 1; 1189 if (vd->vdev_ops == &vdev_mirror_ops) 1190 n = vd->vdev_children; 1191 1192 indirect_split_t *is = 1193 kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP); 1194 1195 is->is_children = n; 1196 is->is_size = size; 1197 is->is_split_offset = split_offset; 1198 is->is_target_offset = offset; 1199 is->is_vdev = vd; 1200 list_create(&is->is_unique_child, sizeof (indirect_child_t), 1201 offsetof(indirect_child_t, ic_node)); 1202 1203 /* 1204 * Note that we only consider multiple copies of the data for 1205 * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even 1206 * though they use the same ops as mirror, because there's only one 1207 * "good" copy under the replacing/spare. 1208 */ 1209 if (vd->vdev_ops == &vdev_mirror_ops) { 1210 for (int i = 0; i < n; i++) { 1211 is->is_child[i].ic_vdev = vd->vdev_child[i]; 1212 list_link_init(&is->is_child[i].ic_node); 1213 } 1214 } else { 1215 is->is_child[0].ic_vdev = vd; 1216 } 1217 1218 list_insert_tail(&iv->iv_splits, is); 1219 } 1220 1221 static void 1222 vdev_indirect_read_split_done(zio_t *zio) 1223 { 1224 indirect_child_t *ic = zio->io_private; 1225 1226 if (zio->io_error != 0) { 1227 /* 1228 * Clear ic_data to indicate that we do not have data for this 1229 * child. 1230 */ 1231 abd_free(ic->ic_data); 1232 ic->ic_data = NULL; 1233 } 1234 } 1235 1236 /* 1237 * Issue reads for all copies (mirror children) of all splits. 1238 */ 1239 static void 1240 vdev_indirect_read_all(zio_t *zio) 1241 { 1242 indirect_vsd_t *iv = zio->io_vsd; 1243 1244 for (indirect_split_t *is = list_head(&iv->iv_splits); 1245 is != NULL; is = list_next(&iv->iv_splits, is)) { 1246 for (int i = 0; i < is->is_children; i++) { 1247 indirect_child_t *ic = &is->is_child[i]; 1248 1249 if (!vdev_readable(ic->ic_vdev)) 1250 continue; 1251 1252 /* 1253 * Note, we may read from a child whose DTL 1254 * indicates that the data may not be present here. 1255 * While this might result in a few i/os that will 1256 * likely return incorrect data, it simplifies the 1257 * code since we can treat scrub and resilver 1258 * identically. (The incorrect data will be 1259 * detected and ignored when we verify the 1260 * checksum.) 1261 */ 1262 1263 ic->ic_data = abd_alloc_sametype(zio->io_abd, 1264 is->is_size); 1265 ic->ic_duplicate = NULL; 1266 1267 zio_nowait(zio_vdev_child_io(zio, NULL, 1268 ic->ic_vdev, is->is_target_offset, ic->ic_data, 1269 is->is_size, zio->io_type, zio->io_priority, 0, 1270 vdev_indirect_read_split_done, ic)); 1271 } 1272 } 1273 iv->iv_reconstruct = B_TRUE; 1274 } 1275 1276 static void 1277 vdev_indirect_io_start(zio_t *zio) 1278 { 1279 spa_t *spa = zio->io_spa; 1280 indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP); 1281 list_create(&iv->iv_splits, 1282 sizeof (indirect_split_t), offsetof(indirect_split_t, is_node)); 1283 1284 zio->io_vsd = iv; 1285 zio->io_vsd_ops = &vdev_indirect_vsd_ops; 1286 1287 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1288 if (zio->io_type != ZIO_TYPE_READ) { 1289 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 1290 /* 1291 * Note: this code can handle other kinds of writes, 1292 * but we don't expect them. 1293 */ 1294 ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL | 1295 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0); 1296 } 1297 1298 vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size, 1299 vdev_indirect_gather_splits, zio); 1300 1301 indirect_split_t *first = list_head(&iv->iv_splits); 1302 if (first->is_size == zio->io_size) { 1303 /* 1304 * This is not a split block; we are pointing to the entire 1305 * data, which will checksum the same as the original data. 1306 * Pass the BP down so that the child i/o can verify the 1307 * checksum, and try a different location if available 1308 * (e.g. on a mirror). 1309 * 1310 * While this special case could be handled the same as the 1311 * general (split block) case, doing it this way ensures 1312 * that the vast majority of blocks on indirect vdevs 1313 * (which are not split) are handled identically to blocks 1314 * on non-indirect vdevs. This allows us to be less strict 1315 * about performance in the general (but rare) case. 1316 */ 1317 ASSERT0(first->is_split_offset); 1318 ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL); 1319 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 1320 first->is_vdev, first->is_target_offset, 1321 abd_get_offset(zio->io_abd, 0), 1322 zio->io_size, zio->io_type, zio->io_priority, 0, 1323 vdev_indirect_child_io_done, zio)); 1324 } else { 1325 iv->iv_split_block = B_TRUE; 1326 if (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) { 1327 /* 1328 * Read all copies. Note that for simplicity, 1329 * we don't bother consulting the DTL in the 1330 * resilver case. 1331 */ 1332 vdev_indirect_read_all(zio); 1333 } else { 1334 /* 1335 * Read one copy of each split segment, from the 1336 * top-level vdev. Since we don't know the 1337 * checksum of each split individually, the child 1338 * zio can't ensure that we get the right data. 1339 * E.g. if it's a mirror, it will just read from a 1340 * random (healthy) leaf vdev. We have to verify 1341 * the checksum in vdev_indirect_io_done(). 1342 */ 1343 for (indirect_split_t *is = list_head(&iv->iv_splits); 1344 is != NULL; is = list_next(&iv->iv_splits, is)) { 1345 zio_nowait(zio_vdev_child_io(zio, NULL, 1346 is->is_vdev, is->is_target_offset, 1347 abd_get_offset(zio->io_abd, 1348 is->is_split_offset), 1349 is->is_size, zio->io_type, 1350 zio->io_priority, 0, 1351 vdev_indirect_child_io_done, zio)); 1352 } 1353 } 1354 } 1355 1356 zio_execute(zio); 1357 } 1358 1359 /* 1360 * Report a checksum error for a child. 1361 */ 1362 static void 1363 vdev_indirect_checksum_error(zio_t *zio, 1364 indirect_split_t *is, indirect_child_t *ic) 1365 { 1366 vdev_t *vd = ic->ic_vdev; 1367 1368 if (zio->io_flags & ZIO_FLAG_SPECULATIVE) 1369 return; 1370 1371 mutex_enter(&vd->vdev_stat_lock); 1372 vd->vdev_stat.vs_checksum_errors++; 1373 mutex_exit(&vd->vdev_stat_lock); 1374 1375 zio_bad_cksum_t zbc = { 0 }; 1376 void *bad_buf = abd_borrow_buf_copy(ic->ic_data, is->is_size); 1377 abd_t *good_abd = is->is_good_child->ic_data; 1378 void *good_buf = abd_borrow_buf_copy(good_abd, is->is_size); 1379 zfs_ereport_post_checksum(zio->io_spa, vd, zio, 1380 is->is_target_offset, is->is_size, good_buf, bad_buf, &zbc); 1381 abd_return_buf(ic->ic_data, bad_buf, is->is_size); 1382 abd_return_buf(good_abd, good_buf, is->is_size); 1383 } 1384 1385 /* 1386 * Issue repair i/os for any incorrect copies. We do this by comparing 1387 * each split segment's correct data (is_good_child's ic_data) with each 1388 * other copy of the data. If they differ, then we overwrite the bad data 1389 * with the good copy. Note that we do this without regard for the DTL's, 1390 * which simplifies this code and also issues the optimal number of writes 1391 * (based on which copies actually read bad data, as opposed to which we 1392 * think might be wrong). For the same reason, we always use 1393 * ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start(). 1394 */ 1395 static void 1396 vdev_indirect_repair(zio_t *zio) 1397 { 1398 indirect_vsd_t *iv = zio->io_vsd; 1399 1400 enum zio_flag flags = ZIO_FLAG_IO_REPAIR; 1401 1402 if (!(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) 1403 flags |= ZIO_FLAG_SELF_HEAL; 1404 1405 if (!spa_writeable(zio->io_spa)) 1406 return; 1407 1408 for (indirect_split_t *is = list_head(&iv->iv_splits); 1409 is != NULL; is = list_next(&iv->iv_splits, is)) { 1410 for (int c = 0; c < is->is_children; c++) { 1411 indirect_child_t *ic = &is->is_child[c]; 1412 if (ic == is->is_good_child) 1413 continue; 1414 if (ic->ic_data == NULL) 1415 continue; 1416 if (ic->ic_duplicate == is->is_good_child) 1417 continue; 1418 1419 zio_nowait(zio_vdev_child_io(zio, NULL, 1420 ic->ic_vdev, is->is_target_offset, 1421 is->is_good_child->ic_data, is->is_size, 1422 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE, 1423 ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL, 1424 NULL, NULL)); 1425 1426 vdev_indirect_checksum_error(zio, is, ic); 1427 } 1428 } 1429 } 1430 1431 /* 1432 * Report checksum errors on all children that we read from. 1433 */ 1434 static void 1435 vdev_indirect_all_checksum_errors(zio_t *zio) 1436 { 1437 indirect_vsd_t *iv = zio->io_vsd; 1438 1439 if (zio->io_flags & ZIO_FLAG_SPECULATIVE) 1440 return; 1441 1442 for (indirect_split_t *is = list_head(&iv->iv_splits); 1443 is != NULL; is = list_next(&iv->iv_splits, is)) { 1444 for (int c = 0; c < is->is_children; c++) { 1445 indirect_child_t *ic = &is->is_child[c]; 1446 1447 if (ic->ic_data == NULL) 1448 continue; 1449 1450 vdev_t *vd = ic->ic_vdev; 1451 1452 mutex_enter(&vd->vdev_stat_lock); 1453 vd->vdev_stat.vs_checksum_errors++; 1454 mutex_exit(&vd->vdev_stat_lock); 1455 1456 zfs_ereport_post_checksum(zio->io_spa, vd, zio, 1457 is->is_target_offset, is->is_size, 1458 NULL, NULL, NULL); 1459 } 1460 } 1461 } 1462 1463 /* 1464 * Copy data from all the splits to a main zio then validate the checksum. 1465 * If then checksum is successfully validated return success. 1466 */ 1467 static int 1468 vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio) 1469 { 1470 zio_bad_cksum_t zbc; 1471 1472 for (indirect_split_t *is = list_head(&iv->iv_splits); 1473 is != NULL; is = list_next(&iv->iv_splits, is)) { 1474 1475 ASSERT3P(is->is_good_child->ic_data, !=, NULL); 1476 ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL); 1477 1478 abd_copy_off(zio->io_abd, is->is_good_child->ic_data, 1479 is->is_split_offset, 0, is->is_size); 1480 } 1481 1482 return (zio_checksum_error(zio, &zbc)); 1483 } 1484 1485 /* 1486 * There are relatively few possible combinations making it feasible to 1487 * deterministically check them all. We do this by setting the good_child 1488 * to the next unique split version. If we reach the end of the list then 1489 * "carry over" to the next unique split version (like counting in base 1490 * is_unique_children, but each digit can have a different base). 1491 */ 1492 static int 1493 vdev_indirect_splits_enumerate_all(indirect_vsd_t *iv, zio_t *zio) 1494 { 1495 boolean_t more = B_TRUE; 1496 1497 iv->iv_attempts = 0; 1498 1499 for (indirect_split_t *is = list_head(&iv->iv_splits); 1500 is != NULL; is = list_next(&iv->iv_splits, is)) 1501 is->is_good_child = list_head(&is->is_unique_child); 1502 1503 while (more == B_TRUE) { 1504 iv->iv_attempts++; 1505 more = B_FALSE; 1506 1507 if (vdev_indirect_splits_checksum_validate(iv, zio) == 0) 1508 return (0); 1509 1510 for (indirect_split_t *is = list_head(&iv->iv_splits); 1511 is != NULL; is = list_next(&iv->iv_splits, is)) { 1512 is->is_good_child = list_next(&is->is_unique_child, 1513 is->is_good_child); 1514 if (is->is_good_child != NULL) { 1515 more = B_TRUE; 1516 break; 1517 } 1518 1519 is->is_good_child = list_head(&is->is_unique_child); 1520 } 1521 } 1522 1523 ASSERT3S(iv->iv_attempts, <=, iv->iv_unique_combinations); 1524 1525 return (SET_ERROR(ECKSUM)); 1526 } 1527 1528 /* 1529 * There are too many combinations to try all of them in a reasonable amount 1530 * of time. So try a fixed number of random combinations from the unique 1531 * split versions, after which we'll consider the block unrecoverable. 1532 */ 1533 static int 1534 vdev_indirect_splits_enumerate_randomly(indirect_vsd_t *iv, zio_t *zio) 1535 { 1536 iv->iv_attempts = 0; 1537 1538 while (iv->iv_attempts < iv->iv_attempts_max) { 1539 iv->iv_attempts++; 1540 1541 for (indirect_split_t *is = list_head(&iv->iv_splits); 1542 is != NULL; is = list_next(&iv->iv_splits, is)) { 1543 indirect_child_t *ic = list_head(&is->is_unique_child); 1544 int children = is->is_unique_children; 1545 1546 for (int i = spa_get_random(children); i > 0; i--) 1547 ic = list_next(&is->is_unique_child, ic); 1548 1549 ASSERT3P(ic, !=, NULL); 1550 is->is_good_child = ic; 1551 } 1552 1553 if (vdev_indirect_splits_checksum_validate(iv, zio) == 0) 1554 return (0); 1555 } 1556 1557 return (SET_ERROR(ECKSUM)); 1558 } 1559 1560 /* 1561 * This is a validation function for reconstruction. It randomly selects 1562 * a good combination, if one can be found, and then it intentionally 1563 * damages all other segment copes by zeroing them. This forces the 1564 * reconstruction algorithm to locate the one remaining known good copy. 1565 */ 1566 static int 1567 vdev_indirect_splits_damage(indirect_vsd_t *iv, zio_t *zio) 1568 { 1569 /* Presume all the copies are unique for initial selection. */ 1570 for (indirect_split_t *is = list_head(&iv->iv_splits); 1571 is != NULL; is = list_next(&iv->iv_splits, is)) { 1572 is->is_unique_children = 0; 1573 1574 for (int i = 0; i < is->is_children; i++) { 1575 indirect_child_t *ic = &is->is_child[i]; 1576 if (ic->ic_data != NULL) { 1577 is->is_unique_children++; 1578 list_insert_tail(&is->is_unique_child, ic); 1579 } 1580 } 1581 } 1582 1583 /* 1584 * Set each is_good_child to a randomly-selected child which 1585 * is known to contain validated data. 1586 */ 1587 int error = vdev_indirect_splits_enumerate_randomly(iv, zio); 1588 if (error) 1589 goto out; 1590 1591 /* 1592 * Damage all but the known good copy by zeroing it. This will 1593 * result in two or less unique copies per indirect_child_t. 1594 * Both may need to be checked in order to reconstruct the block. 1595 * Set iv->iv_attempts_max such that all unique combinations will 1596 * enumerated, but limit the damage to at most 16 indirect splits. 1597 */ 1598 iv->iv_attempts_max = 1; 1599 1600 for (indirect_split_t *is = list_head(&iv->iv_splits); 1601 is != NULL; is = list_next(&iv->iv_splits, is)) { 1602 for (int c = 0; c < is->is_children; c++) { 1603 indirect_child_t *ic = &is->is_child[c]; 1604 1605 if (ic == is->is_good_child) 1606 continue; 1607 if (ic->ic_data == NULL) 1608 continue; 1609 1610 abd_zero(ic->ic_data, ic->ic_data->abd_size); 1611 } 1612 1613 iv->iv_attempts_max *= 2; 1614 if (iv->iv_attempts_max > (1ULL << 16)) { 1615 iv->iv_attempts_max = UINT64_MAX; 1616 break; 1617 } 1618 } 1619 1620 out: 1621 /* Empty the unique children lists so they can be reconstructed. */ 1622 for (indirect_split_t *is = list_head(&iv->iv_splits); 1623 is != NULL; is = list_next(&iv->iv_splits, is)) { 1624 indirect_child_t *ic; 1625 while ((ic = list_head(&is->is_unique_child)) != NULL) 1626 list_remove(&is->is_unique_child, ic); 1627 1628 is->is_unique_children = 0; 1629 } 1630 1631 return (error); 1632 } 1633 1634 /* 1635 * This function is called when we have read all copies of the data and need 1636 * to try to find a combination of copies that gives us the right checksum. 1637 * 1638 * If we pointed to any mirror vdevs, this effectively does the job of the 1639 * mirror. The mirror vdev code can't do its own job because we don't know 1640 * the checksum of each split segment individually. 1641 * 1642 * We have to try every unique combination of copies of split segments, until 1643 * we find one that checksums correctly. Duplicate segment copies are first 1644 * identified and latter skipped during reconstruction. This optimization 1645 * reduces the search space and ensures that of the remaining combinations 1646 * at most one is correct. 1647 * 1648 * When the total number of combinations is small they can all be checked. 1649 * For example, if we have 3 segments in the split, and each points to a 1650 * 2-way mirror with unique copies, we will have the following pieces of data: 1651 * 1652 * | mirror child 1653 * split | [0] [1] 1654 * ======|===================== 1655 * A | data_A_0 data_A_1 1656 * B | data_B_0 data_B_1 1657 * C | data_C_0 data_C_1 1658 * 1659 * We will try the following (mirror children)^(number of splits) (2^3=8) 1660 * combinations, which is similar to bitwise-little-endian counting in 1661 * binary. In general each "digit" corresponds to a split segment, and the 1662 * base of each digit is is_children, which can be different for each 1663 * digit. 1664 * 1665 * "low bit" "high bit" 1666 * v v 1667 * data_A_0 data_B_0 data_C_0 1668 * data_A_1 data_B_0 data_C_0 1669 * data_A_0 data_B_1 data_C_0 1670 * data_A_1 data_B_1 data_C_0 1671 * data_A_0 data_B_0 data_C_1 1672 * data_A_1 data_B_0 data_C_1 1673 * data_A_0 data_B_1 data_C_1 1674 * data_A_1 data_B_1 data_C_1 1675 * 1676 * Note that the split segments may be on the same or different top-level 1677 * vdevs. In either case, we may need to try lots of combinations (see 1678 * zfs_reconstruct_indirect_combinations_max). This ensures that if a mirror 1679 * has small silent errors on all of its children, we can still reconstruct 1680 * the correct data, as long as those errors are at sufficiently-separated 1681 * offsets (specifically, separated by the largest block size - default of 1682 * 128KB, but up to 16MB). 1683 */ 1684 static void 1685 vdev_indirect_reconstruct_io_done(zio_t *zio) 1686 { 1687 indirect_vsd_t *iv = zio->io_vsd; 1688 boolean_t known_good = B_FALSE; 1689 int error; 1690 1691 iv->iv_unique_combinations = 1; 1692 iv->iv_attempts_max = UINT64_MAX; 1693 1694 if (zfs_reconstruct_indirect_combinations_max > 0) 1695 iv->iv_attempts_max = zfs_reconstruct_indirect_combinations_max; 1696 1697 /* 1698 * If nonzero, every 1/x blocks will be damaged, in order to validate 1699 * reconstruction when there are split segments with damaged copies. 1700 * Known_good will TRUE when reconstruction is known to be possible. 1701 */ 1702 if (zfs_reconstruct_indirect_damage_fraction != 0 && 1703 spa_get_random(zfs_reconstruct_indirect_damage_fraction) == 0) 1704 known_good = (vdev_indirect_splits_damage(iv, zio) == 0); 1705 1706 /* 1707 * Determine the unique children for a split segment and add them 1708 * to the is_unique_child list. By restricting reconstruction 1709 * to these children, only unique combinations will be considered. 1710 * This can vastly reduce the search space when there are a large 1711 * number of indirect splits. 1712 */ 1713 for (indirect_split_t *is = list_head(&iv->iv_splits); 1714 is != NULL; is = list_next(&iv->iv_splits, is)) { 1715 is->is_unique_children = 0; 1716 1717 for (int i = 0; i < is->is_children; i++) { 1718 indirect_child_t *ic_i = &is->is_child[i]; 1719 1720 if (ic_i->ic_data == NULL || 1721 ic_i->ic_duplicate != NULL) 1722 continue; 1723 1724 for (int j = i + 1; j < is->is_children; j++) { 1725 indirect_child_t *ic_j = &is->is_child[j]; 1726 1727 if (ic_j->ic_data == NULL || 1728 ic_j->ic_duplicate != NULL) 1729 continue; 1730 1731 if (abd_cmp(ic_i->ic_data, ic_j->ic_data, 1732 is->is_size) == 0) { 1733 ic_j->ic_duplicate = ic_i; 1734 } 1735 } 1736 1737 is->is_unique_children++; 1738 list_insert_tail(&is->is_unique_child, ic_i); 1739 } 1740 1741 /* Reconstruction is impossible, no valid children */ 1742 EQUIV(list_is_empty(&is->is_unique_child), 1743 is->is_unique_children == 0); 1744 if (list_is_empty(&is->is_unique_child)) { 1745 zio->io_error = EIO; 1746 vdev_indirect_all_checksum_errors(zio); 1747 zio_checksum_verified(zio); 1748 return; 1749 } 1750 1751 iv->iv_unique_combinations *= is->is_unique_children; 1752 } 1753 1754 if (iv->iv_unique_combinations <= iv->iv_attempts_max) 1755 error = vdev_indirect_splits_enumerate_all(iv, zio); 1756 else 1757 error = vdev_indirect_splits_enumerate_randomly(iv, zio); 1758 1759 if (error != 0) { 1760 /* All attempted combinations failed. */ 1761 ASSERT3B(known_good, ==, B_FALSE); 1762 zio->io_error = error; 1763 vdev_indirect_all_checksum_errors(zio); 1764 } else { 1765 /* 1766 * The checksum has been successfully validated. Issue 1767 * repair I/Os to any copies of splits which don't match 1768 * the validated version. 1769 */ 1770 ASSERT0(vdev_indirect_splits_checksum_validate(iv, zio)); 1771 vdev_indirect_repair(zio); 1772 zio_checksum_verified(zio); 1773 } 1774 } 1775 1776 static void 1777 vdev_indirect_io_done(zio_t *zio) 1778 { 1779 indirect_vsd_t *iv = zio->io_vsd; 1780 1781 if (iv->iv_reconstruct) { 1782 /* 1783 * We have read all copies of the data (e.g. from mirrors), 1784 * either because this was a scrub/resilver, or because the 1785 * one-copy read didn't checksum correctly. 1786 */ 1787 vdev_indirect_reconstruct_io_done(zio); 1788 return; 1789 } 1790 1791 if (!iv->iv_split_block) { 1792 /* 1793 * This was not a split block, so we passed the BP down, 1794 * and the checksum was handled by the (one) child zio. 1795 */ 1796 return; 1797 } 1798 1799 zio_bad_cksum_t zbc; 1800 int ret = zio_checksum_error(zio, &zbc); 1801 if (ret == 0) { 1802 zio_checksum_verified(zio); 1803 return; 1804 } 1805 1806 /* 1807 * The checksum didn't match. Read all copies of all splits, and 1808 * then we will try to reconstruct. The next time 1809 * vdev_indirect_io_done() is called, iv_reconstruct will be set. 1810 */ 1811 vdev_indirect_read_all(zio); 1812 1813 zio_vdev_io_redone(zio); 1814 } 1815 1816 vdev_ops_t vdev_indirect_ops = { 1817 vdev_indirect_open, 1818 vdev_indirect_close, 1819 vdev_default_asize, 1820 vdev_indirect_io_start, 1821 vdev_indirect_io_done, 1822 NULL, 1823 NULL, 1824 NULL, 1825 vdev_indirect_remap, 1826 NULL, 1827 VDEV_TYPE_INDIRECT, /* name of this vdev type */ 1828 B_FALSE /* leaf vdev */ 1829 }; 1830