1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright 2016 Gary Mills 25 * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 26 * Copyright 2017 Joyent, Inc. 27 * Copyright (c) 2017 Datto Inc. 28 */ 29 30 #include <sys/dsl_scan.h> 31 #include <sys/dsl_pool.h> 32 #include <sys/dsl_dataset.h> 33 #include <sys/dsl_prop.h> 34 #include <sys/dsl_dir.h> 35 #include <sys/dsl_synctask.h> 36 #include <sys/dnode.h> 37 #include <sys/dmu_tx.h> 38 #include <sys/dmu_objset.h> 39 #include <sys/arc.h> 40 #include <sys/zap.h> 41 #include <sys/zio.h> 42 #include <sys/zfs_context.h> 43 #include <sys/fs/zfs.h> 44 #include <sys/zfs_znode.h> 45 #include <sys/spa_impl.h> 46 #include <sys/vdev_impl.h> 47 #include <sys/zil_impl.h> 48 #include <sys/zio_checksum.h> 49 #include <sys/ddt.h> 50 #include <sys/sa.h> 51 #include <sys/sa_impl.h> 52 #include <sys/zfeature.h> 53 #include <sys/abd.h> 54 #include <sys/range_tree.h> 55 #ifdef _KERNEL 56 #include <sys/zfs_vfsops.h> 57 #endif 58 59 /* 60 * Grand theory statement on scan queue sorting 61 * 62 * Scanning is implemented by recursively traversing all indirection levels 63 * in an object and reading all blocks referenced from said objects. This 64 * results in us approximately traversing the object from lowest logical 65 * offset to the highest. For best performance, we would want the logical 66 * blocks to be physically contiguous. However, this is frequently not the 67 * case with pools given the allocation patterns of copy-on-write filesystems. 68 * So instead, we put the I/Os into a reordering queue and issue them in a 69 * way that will most benefit physical disks (LBA-order). 70 * 71 * Queue management: 72 * 73 * Ideally, we would want to scan all metadata and queue up all block I/O 74 * prior to starting to issue it, because that allows us to do an optimal 75 * sorting job. This can however consume large amounts of memory. Therefore 76 * we continuously monitor the size of the queues and constrain them to 5% 77 * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this 78 * limit, we clear out a few of the largest extents at the head of the queues 79 * to make room for more scanning. Hopefully, these extents will be fairly 80 * large and contiguous, allowing us to approach sequential I/O throughput 81 * even without a fully sorted tree. 82 * 83 * Metadata scanning takes place in dsl_scan_visit(), which is called from 84 * dsl_scan_sync() every spa_sync(). If we have either fully scanned all 85 * metadata on the pool, or we need to make room in memory because our 86 * queues are too large, dsl_scan_visit() is postponed and 87 * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies 88 * that metadata scanning and queued I/O issuing are mutually exclusive. This 89 * allows us to provide maximum sequential I/O throughput for the majority of 90 * I/O's issued since sequential I/O performance is significantly negatively 91 * impacted if it is interleaved with random I/O. 92 * 93 * Implementation Notes 94 * 95 * One side effect of the queued scanning algorithm is that the scanning code 96 * needs to be notified whenever a block is freed. This is needed to allow 97 * the scanning code to remove these I/Os from the issuing queue. Additionally, 98 * we do not attempt to queue gang blocks to be issued sequentially since this 99 * is very hard to do and would have an extremely limited performance benefit. 100 * Instead, we simply issue gang I/Os as soon as we find them using the legacy 101 * algorithm. 102 * 103 * Backwards compatibility 104 * 105 * This new algorithm is backwards compatible with the legacy on-disk data 106 * structures (and therefore does not require a new feature flag). 107 * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan 108 * will stop scanning metadata (in logical order) and wait for all outstanding 109 * sorted I/O to complete. Once this is done, we write out a checkpoint 110 * bookmark, indicating that we have scanned everything logically before it. 111 * If the pool is imported on a machine without the new sorting algorithm, 112 * the scan simply resumes from the last checkpoint using the legacy algorithm. 113 */ 114 115 typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, 116 const zbookmark_phys_t *); 117 118 static scan_cb_t dsl_scan_scrub_cb; 119 120 static int scan_ds_queue_compare(const void *a, const void *b); 121 static int scan_prefetch_queue_compare(const void *a, const void *b); 122 static void scan_ds_queue_clear(dsl_scan_t *scn); 123 static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, 124 uint64_t *txg); 125 static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg); 126 static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj); 127 static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx); 128 129 extern int zfs_vdev_async_write_active_min_dirty_percent; 130 131 /* 132 * By default zfs will check to ensure it is not over the hard memory 133 * limit before each txg. If finer-grained control of this is needed 134 * this value can be set to 1 to enable checking before scanning each 135 * block. 136 */ 137 int zfs_scan_strict_mem_lim = B_FALSE; 138 139 /* 140 * Maximum number of parallelly executing I/Os per top-level vdev. 141 * Tune with care. Very high settings (hundreds) are known to trigger 142 * some firmware bugs and resets on certain SSDs. 143 */ 144 int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */ 145 unsigned int zfs_resilver_delay = 2; /* number of ticks to delay resilver */ 146 unsigned int zfs_scrub_delay = 4; /* number of ticks to delay scrub */ 147 unsigned int zfs_scan_idle = 50; /* idle window in clock ticks */ 148 149 /* 150 * Maximum number of parallelly executed bytes per leaf vdev. We attempt 151 * to strike a balance here between keeping the vdev queues full of I/Os 152 * at all times and not overflowing the queues to cause long latency, 153 * which would cause long txg sync times. No matter what, we will not 154 * overload the drives with I/O, since that is protected by 155 * zfs_vdev_scrub_max_active. 156 */ 157 unsigned long zfs_scan_vdev_limit = 4 << 20; 158 159 int zfs_scan_issue_strategy = 0; 160 int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */ 161 uint64_t zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ 162 163 unsigned int zfs_scan_checkpoint_intval = 7200; /* seconds */ 164 #define ZFS_SCAN_CHECKPOINT_INTVAL SEC_TO_TICK(zfs_scan_checkpoint_intval) 165 166 /* 167 * fill_weight is non-tunable at runtime, so we copy it at module init from 168 * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would 169 * break queue sorting. 170 */ 171 uint64_t zfs_scan_fill_weight = 3; 172 static uint64_t fill_weight; 173 174 /* See dsl_scan_should_clear() for details on the memory limit tunables */ 175 uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */ 176 uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */ 177 int zfs_scan_mem_lim_fact = 20; /* fraction of physmem */ 178 int zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */ 179 180 unsigned int zfs_scrub_min_time_ms = 1000; /* min millisecs to scrub per txg */ 181 unsigned int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ 182 /* min millisecs to obsolete per txg */ 183 unsigned int zfs_obsolete_min_time_ms = 500; 184 /* min millisecs to resilver per txg */ 185 unsigned int zfs_resilver_min_time_ms = 3000; 186 boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ 187 boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ 188 enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; 189 /* max number of blocks to free in a single TXG */ 190 uint64_t zfs_async_block_max_blocks = UINT64_MAX; 191 192 /* 193 * We wait a few txgs after importing a pool to begin scanning so that 194 * the import / mounting code isn't held up by scrub / resilver IO. 195 * Unfortunately, it is a bit difficult to determine exactly how long 196 * this will take since userspace will trigger fs mounts asynchronously 197 * and the kernel will create zvol minors asynchronously. As a result, 198 * the value provided here is a bit arbitrary, but represents a 199 * reasonable estimate of how many txgs it will take to finish fully 200 * importing a pool 201 */ 202 #define SCAN_IMPORT_WAIT_TXGS 5 203 204 205 #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ 206 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ 207 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) 208 209 extern int zfs_txg_timeout; 210 211 /* 212 * Enable/disable the processing of the free_bpobj object. 213 */ 214 boolean_t zfs_free_bpobj_enabled = B_TRUE; 215 216 /* the order has to match pool_scan_type */ 217 static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { 218 NULL, 219 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ 220 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ 221 }; 222 223 /* In core node for the scn->scn_queue. Represents a dataset to be scanned */ 224 typedef struct { 225 uint64_t sds_dsobj; 226 uint64_t sds_txg; 227 avl_node_t sds_node; 228 } scan_ds_t; 229 230 /* 231 * This controls what conditions are placed on dsl_scan_sync_state(): 232 * SYNC_OPTIONAL) write out scn_phys iff scn_bytes_pending == 0 233 * SYNC_MANDATORY) write out scn_phys always. scn_bytes_pending must be 0. 234 * SYNC_CACHED) if scn_bytes_pending == 0, write out scn_phys. Otherwise 235 * write out the scn_phys_cached version. 236 * See dsl_scan_sync_state for details. 237 */ 238 typedef enum { 239 SYNC_OPTIONAL, 240 SYNC_MANDATORY, 241 SYNC_CACHED 242 } state_sync_type_t; 243 244 /* 245 * This struct represents the minimum information needed to reconstruct a 246 * zio for sequential scanning. This is useful because many of these will 247 * accumulate in the sequential IO queues before being issued, so saving 248 * memory matters here. 249 */ 250 typedef struct scan_io { 251 /* fields from blkptr_t */ 252 uint64_t sio_offset; 253 uint64_t sio_blk_prop; 254 uint64_t sio_phys_birth; 255 uint64_t sio_birth; 256 zio_cksum_t sio_cksum; 257 uint32_t sio_asize; 258 259 /* fields from zio_t */ 260 int sio_flags; 261 zbookmark_phys_t sio_zb; 262 263 /* members for queue sorting */ 264 union { 265 avl_node_t sio_addr_node; /* link into issueing queue */ 266 list_node_t sio_list_node; /* link for issuing to disk */ 267 } sio_nodes; 268 } scan_io_t; 269 270 struct dsl_scan_io_queue { 271 dsl_scan_t *q_scn; /* associated dsl_scan_t */ 272 vdev_t *q_vd; /* top-level vdev that this queue represents */ 273 274 /* trees used for sorting I/Os and extents of I/Os */ 275 range_tree_t *q_exts_by_addr; 276 avl_tree_t q_exts_by_size; 277 avl_tree_t q_sios_by_addr; 278 279 /* members for zio rate limiting */ 280 uint64_t q_maxinflight_bytes; 281 uint64_t q_inflight_bytes; 282 kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */ 283 284 /* per txg statistics */ 285 uint64_t q_total_seg_size_this_txg; 286 uint64_t q_segs_this_txg; 287 uint64_t q_total_zio_size_this_txg; 288 uint64_t q_zios_this_txg; 289 }; 290 291 /* private data for dsl_scan_prefetch_cb() */ 292 typedef struct scan_prefetch_ctx { 293 zfs_refcount_t spc_refcnt; /* refcount for memory management */ 294 dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */ 295 boolean_t spc_root; /* is this prefetch for an objset? */ 296 uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */ 297 uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */ 298 } scan_prefetch_ctx_t; 299 300 /* private data for dsl_scan_prefetch() */ 301 typedef struct scan_prefetch_issue_ctx { 302 avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */ 303 scan_prefetch_ctx_t *spic_spc; /* spc for the callback */ 304 blkptr_t spic_bp; /* bp to prefetch */ 305 zbookmark_phys_t spic_zb; /* bookmark to prefetch */ 306 } scan_prefetch_issue_ctx_t; 307 308 static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 309 const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue); 310 static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, 311 scan_io_t *sio); 312 313 static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd); 314 static void scan_io_queues_destroy(dsl_scan_t *scn); 315 316 static kmem_cache_t *sio_cache; 317 318 void 319 scan_init(void) 320 { 321 /* 322 * This is used in ext_size_compare() to weight segments 323 * based on how sparse they are. This cannot be changed 324 * mid-scan and the tree comparison functions don't currently 325 * have a mechansim for passing additional context to the 326 * compare functions. Thus we store this value globally and 327 * we only allow it to be set at module intiailization time 328 */ 329 fill_weight = zfs_scan_fill_weight; 330 331 sio_cache = kmem_cache_create("sio_cache", 332 sizeof (scan_io_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 333 } 334 335 void 336 scan_fini(void) 337 { 338 kmem_cache_destroy(sio_cache); 339 } 340 341 static inline boolean_t 342 dsl_scan_is_running(const dsl_scan_t *scn) 343 { 344 return (scn->scn_phys.scn_state == DSS_SCANNING); 345 } 346 347 boolean_t 348 dsl_scan_resilvering(dsl_pool_t *dp) 349 { 350 return (dsl_scan_is_running(dp->dp_scan) && 351 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); 352 } 353 354 static inline void 355 sio2bp(const scan_io_t *sio, blkptr_t *bp, uint64_t vdev_id) 356 { 357 bzero(bp, sizeof (*bp)); 358 DVA_SET_ASIZE(&bp->blk_dva[0], sio->sio_asize); 359 DVA_SET_VDEV(&bp->blk_dva[0], vdev_id); 360 DVA_SET_OFFSET(&bp->blk_dva[0], sio->sio_offset); 361 bp->blk_prop = sio->sio_blk_prop; 362 bp->blk_phys_birth = sio->sio_phys_birth; 363 bp->blk_birth = sio->sio_birth; 364 bp->blk_fill = 1; /* we always only work with data pointers */ 365 bp->blk_cksum = sio->sio_cksum; 366 } 367 368 static inline void 369 bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i) 370 { 371 /* we discard the vdev id, since we can deduce it from the queue */ 372 sio->sio_offset = DVA_GET_OFFSET(&bp->blk_dva[dva_i]); 373 sio->sio_asize = DVA_GET_ASIZE(&bp->blk_dva[dva_i]); 374 sio->sio_blk_prop = bp->blk_prop; 375 sio->sio_phys_birth = bp->blk_phys_birth; 376 sio->sio_birth = bp->blk_birth; 377 sio->sio_cksum = bp->blk_cksum; 378 } 379 380 int 381 dsl_scan_init(dsl_pool_t *dp, uint64_t txg) 382 { 383 int err; 384 dsl_scan_t *scn; 385 spa_t *spa = dp->dp_spa; 386 uint64_t f; 387 388 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); 389 scn->scn_dp = dp; 390 391 /* 392 * It's possible that we're resuming a scan after a reboot so 393 * make sure that the scan_async_destroying flag is initialized 394 * appropriately. 395 */ 396 ASSERT(!scn->scn_async_destroying); 397 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, 398 SPA_FEATURE_ASYNC_DESTROY); 399 400 bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys)); 401 avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t), 402 offsetof(scan_ds_t, sds_node)); 403 avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare, 404 sizeof (scan_prefetch_issue_ctx_t), 405 offsetof(scan_prefetch_issue_ctx_t, spic_avl_node)); 406 407 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 408 "scrub_func", sizeof (uint64_t), 1, &f); 409 if (err == 0) { 410 /* 411 * There was an old-style scrub in progress. Restart a 412 * new-style scrub from the beginning. 413 */ 414 scn->scn_restart_txg = txg; 415 zfs_dbgmsg("old-style scrub was in progress; " 416 "restarting new-style scrub in txg %llu", 417 (longlong_t)scn->scn_restart_txg); 418 419 /* 420 * Load the queue obj from the old location so that it 421 * can be freed by dsl_scan_done(). 422 */ 423 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 424 "scrub_queue", sizeof (uint64_t), 1, 425 &scn->scn_phys.scn_queue_obj); 426 } else { 427 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 428 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 429 &scn->scn_phys); 430 if (err == ENOENT) 431 return (0); 432 else if (err) 433 return (err); 434 435 /* 436 * We might be restarting after a reboot, so jump the issued 437 * counter to how far we've scanned. We know we're consistent 438 * up to here. 439 */ 440 scn->scn_issued_before_pass = scn->scn_phys.scn_examined; 441 442 if (dsl_scan_is_running(scn) && 443 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { 444 /* 445 * A new-type scrub was in progress on an old 446 * pool, and the pool was accessed by old 447 * software. Restart from the beginning, since 448 * the old software may have changed the pool in 449 * the meantime. 450 */ 451 scn->scn_restart_txg = txg; 452 zfs_dbgmsg("new-style scrub was modified " 453 "by old software; restarting in txg %llu", 454 (longlong_t)scn->scn_restart_txg); 455 } 456 } 457 458 /* reload the queue into the in-core state */ 459 if (scn->scn_phys.scn_queue_obj != 0) { 460 zap_cursor_t zc; 461 zap_attribute_t za; 462 463 for (zap_cursor_init(&zc, dp->dp_meta_objset, 464 scn->scn_phys.scn_queue_obj); 465 zap_cursor_retrieve(&zc, &za) == 0; 466 (void) zap_cursor_advance(&zc)) { 467 scan_ds_queue_insert(scn, 468 zfs_strtonum(za.za_name, NULL), 469 za.za_first_integer); 470 } 471 zap_cursor_fini(&zc); 472 } 473 474 spa_scan_stat_init(spa); 475 return (0); 476 } 477 478 void 479 dsl_scan_fini(dsl_pool_t *dp) 480 { 481 if (dp->dp_scan != NULL) { 482 dsl_scan_t *scn = dp->dp_scan; 483 484 if (scn->scn_taskq != NULL) 485 taskq_destroy(scn->scn_taskq); 486 scan_ds_queue_clear(scn); 487 avl_destroy(&scn->scn_queue); 488 avl_destroy(&scn->scn_prefetch_queue); 489 490 kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); 491 dp->dp_scan = NULL; 492 } 493 } 494 495 static boolean_t 496 dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx) 497 { 498 return (scn->scn_restart_txg != 0 && 499 scn->scn_restart_txg <= tx->tx_txg); 500 } 501 502 boolean_t 503 dsl_scan_scrubbing(const dsl_pool_t *dp) 504 { 505 dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys; 506 507 return (scn_phys->scn_state == DSS_SCANNING && 508 scn_phys->scn_func == POOL_SCAN_SCRUB); 509 } 510 511 boolean_t 512 dsl_scan_is_paused_scrub(const dsl_scan_t *scn) 513 { 514 return (dsl_scan_scrubbing(scn->scn_dp) && 515 scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED); 516 } 517 518 /* 519 * Writes out a persistent dsl_scan_phys_t record to the pool directory. 520 * Because we can be running in the block sorting algorithm, we do not always 521 * want to write out the record, only when it is "safe" to do so. This safety 522 * condition is achieved by making sure that the sorting queues are empty 523 * (scn_bytes_pending == 0). When this condition is not true, the sync'd state 524 * is inconsistent with how much actual scanning progress has been made. The 525 * kind of sync to be performed is specified by the sync_type argument. If the 526 * sync is optional, we only sync if the queues are empty. If the sync is 527 * mandatory, we do a hard ASSERT to make sure that the queues are empty. The 528 * third possible state is a "cached" sync. This is done in response to: 529 * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been 530 * destroyed, so we wouldn't be able to restart scanning from it. 531 * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been 532 * superseded by a newer snapshot. 533 * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been 534 * swapped with its clone. 535 * In all cases, a cached sync simply rewrites the last record we've written, 536 * just slightly modified. For the modifications that are performed to the 537 * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed, 538 * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped. 539 */ 540 static void 541 dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type) 542 { 543 int i; 544 spa_t *spa = scn->scn_dp->dp_spa; 545 546 ASSERT(sync_type != SYNC_MANDATORY || scn->scn_bytes_pending == 0); 547 if (scn->scn_bytes_pending == 0) { 548 for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 549 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 550 dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue; 551 552 if (q == NULL) 553 continue; 554 555 mutex_enter(&vd->vdev_scan_io_queue_lock); 556 ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL); 557 ASSERT3P(avl_first(&q->q_exts_by_size), ==, NULL); 558 ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL); 559 mutex_exit(&vd->vdev_scan_io_queue_lock); 560 } 561 562 if (scn->scn_phys.scn_queue_obj != 0) 563 scan_ds_queue_sync(scn, tx); 564 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 565 DMU_POOL_DIRECTORY_OBJECT, 566 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 567 &scn->scn_phys, tx)); 568 bcopy(&scn->scn_phys, &scn->scn_phys_cached, 569 sizeof (scn->scn_phys)); 570 571 if (scn->scn_checkpointing) 572 zfs_dbgmsg("finish scan checkpoint"); 573 574 scn->scn_checkpointing = B_FALSE; 575 scn->scn_last_checkpoint = ddi_get_lbolt(); 576 } else if (sync_type == SYNC_CACHED) { 577 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 578 DMU_POOL_DIRECTORY_OBJECT, 579 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 580 &scn->scn_phys_cached, tx)); 581 } 582 } 583 584 /* ARGSUSED */ 585 static int 586 dsl_scan_setup_check(void *arg, dmu_tx_t *tx) 587 { 588 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 589 590 if (dsl_scan_is_running(scn)) 591 return (SET_ERROR(EBUSY)); 592 593 return (0); 594 } 595 596 static void 597 dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) 598 { 599 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 600 pool_scan_func_t *funcp = arg; 601 dmu_object_type_t ot = 0; 602 dsl_pool_t *dp = scn->scn_dp; 603 spa_t *spa = dp->dp_spa; 604 605 ASSERT(!dsl_scan_is_running(scn)); 606 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); 607 bzero(&scn->scn_phys, sizeof (scn->scn_phys)); 608 scn->scn_phys.scn_func = *funcp; 609 scn->scn_phys.scn_state = DSS_SCANNING; 610 scn->scn_phys.scn_min_txg = 0; 611 scn->scn_phys.scn_max_txg = tx->tx_txg; 612 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ 613 scn->scn_phys.scn_start_time = gethrestime_sec(); 614 scn->scn_phys.scn_errors = 0; 615 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; 616 scn->scn_issued_before_pass = 0; 617 scn->scn_restart_txg = 0; 618 scn->scn_done_txg = 0; 619 scn->scn_last_checkpoint = 0; 620 scn->scn_checkpointing = B_FALSE; 621 spa_scan_stat_init(spa); 622 623 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 624 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; 625 626 /* rewrite all disk labels */ 627 vdev_config_dirty(spa->spa_root_vdev); 628 629 if (vdev_resilver_needed(spa->spa_root_vdev, 630 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { 631 spa_event_notify(spa, NULL, NULL, 632 ESC_ZFS_RESILVER_START); 633 } else { 634 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START); 635 } 636 637 spa->spa_scrub_started = B_TRUE; 638 /* 639 * If this is an incremental scrub, limit the DDT scrub phase 640 * to just the auto-ditto class (for correctness); the rest 641 * of the scrub should go faster using top-down pruning. 642 */ 643 if (scn->scn_phys.scn_min_txg > TXG_INITIAL) 644 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; 645 646 } 647 648 /* back to the generic stuff */ 649 650 if (dp->dp_blkstats == NULL) { 651 dp->dp_blkstats = 652 kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); 653 mutex_init(&dp->dp_blkstats->zab_lock, NULL, 654 MUTEX_DEFAULT, NULL); 655 } 656 bzero(&dp->dp_blkstats->zab_type, sizeof (dp->dp_blkstats->zab_type)); 657 658 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) 659 ot = DMU_OT_ZAP_OTHER; 660 661 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, 662 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); 663 664 bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys)); 665 666 dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); 667 668 spa_history_log_internal(spa, "scan setup", tx, 669 "func=%u mintxg=%llu maxtxg=%llu", 670 *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg); 671 } 672 673 /* 674 * Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver. 675 * Can also be called to resume a paused scrub. 676 */ 677 int 678 dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) 679 { 680 spa_t *spa = dp->dp_spa; 681 dsl_scan_t *scn = dp->dp_scan; 682 683 /* 684 * Purge all vdev caches and probe all devices. We do this here 685 * rather than in sync context because this requires a writer lock 686 * on the spa_config lock, which we can't do from sync context. The 687 * spa_scrub_reopen flag indicates that vdev_open() should not 688 * attempt to start another scrub. 689 */ 690 spa_vdev_state_enter(spa, SCL_NONE); 691 spa->spa_scrub_reopen = B_TRUE; 692 vdev_reopen(spa->spa_root_vdev); 693 spa->spa_scrub_reopen = B_FALSE; 694 (void) spa_vdev_state_exit(spa, NULL, 0); 695 696 if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) { 697 /* got scrub start cmd, resume paused scrub */ 698 int err = dsl_scrub_set_pause_resume(scn->scn_dp, 699 POOL_SCRUB_NORMAL); 700 if (err == 0) { 701 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME); 702 return (ECANCELED); 703 } 704 return (SET_ERROR(err)); 705 } 706 707 return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, 708 dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED)); 709 } 710 711 /* ARGSUSED */ 712 static void 713 dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) 714 { 715 static const char *old_names[] = { 716 "scrub_bookmark", 717 "scrub_ddt_bookmark", 718 "scrub_ddt_class_max", 719 "scrub_queue", 720 "scrub_min_txg", 721 "scrub_max_txg", 722 "scrub_func", 723 "scrub_errors", 724 NULL 725 }; 726 727 dsl_pool_t *dp = scn->scn_dp; 728 spa_t *spa = dp->dp_spa; 729 int i; 730 731 /* Remove any remnants of an old-style scrub. */ 732 for (i = 0; old_names[i]; i++) { 733 (void) zap_remove(dp->dp_meta_objset, 734 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); 735 } 736 737 if (scn->scn_phys.scn_queue_obj != 0) { 738 VERIFY0(dmu_object_free(dp->dp_meta_objset, 739 scn->scn_phys.scn_queue_obj, tx)); 740 scn->scn_phys.scn_queue_obj = 0; 741 } 742 scan_ds_queue_clear(scn); 743 744 scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; 745 746 /* 747 * If we were "restarted" from a stopped state, don't bother 748 * with anything else. 749 */ 750 if (!dsl_scan_is_running(scn)) { 751 ASSERT(!scn->scn_is_sorted); 752 return; 753 } 754 755 if (scn->scn_is_sorted) { 756 scan_io_queues_destroy(scn); 757 scn->scn_is_sorted = B_FALSE; 758 759 if (scn->scn_taskq != NULL) { 760 taskq_destroy(scn->scn_taskq); 761 scn->scn_taskq = NULL; 762 } 763 } 764 765 scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED; 766 767 if (dsl_scan_restarting(scn, tx)) 768 spa_history_log_internal(spa, "scan aborted, restarting", tx, 769 "errors=%llu", spa_get_errlog_size(spa)); 770 else if (!complete) 771 spa_history_log_internal(spa, "scan cancelled", tx, 772 "errors=%llu", spa_get_errlog_size(spa)); 773 else 774 spa_history_log_internal(spa, "scan done", tx, 775 "errors=%llu", spa_get_errlog_size(spa)); 776 777 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 778 spa->spa_scrub_started = B_FALSE; 779 spa->spa_scrub_active = B_FALSE; 780 781 /* 782 * If the scrub/resilver completed, update all DTLs to 783 * reflect this. Whether it succeeded or not, vacate 784 * all temporary scrub DTLs. 785 * 786 * As the scrub does not currently support traversing 787 * data that have been freed but are part of a checkpoint, 788 * we don't mark the scrub as done in the DTLs as faults 789 * may still exist in those vdevs. 790 */ 791 if (complete && 792 !spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 793 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 794 scn->scn_phys.scn_max_txg, B_TRUE); 795 796 spa_event_notify(spa, NULL, NULL, 797 scn->scn_phys.scn_min_txg ? 798 ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH); 799 } else { 800 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 801 0, B_TRUE); 802 } 803 spa_errlog_rotate(spa); 804 805 /* 806 * We may have finished replacing a device. 807 * Let the async thread assess this and handle the detach. 808 */ 809 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 810 } 811 812 scn->scn_phys.scn_end_time = gethrestime_sec(); 813 814 ASSERT(!dsl_scan_is_running(scn)); 815 } 816 817 /* ARGSUSED */ 818 static int 819 dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) 820 { 821 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 822 823 if (!dsl_scan_is_running(scn)) 824 return (SET_ERROR(ENOENT)); 825 return (0); 826 } 827 828 /* ARGSUSED */ 829 static void 830 dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) 831 { 832 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 833 834 dsl_scan_done(scn, B_FALSE, tx); 835 dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); 836 spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT); 837 } 838 839 int 840 dsl_scan_cancel(dsl_pool_t *dp) 841 { 842 return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, 843 dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); 844 } 845 846 static int 847 dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx) 848 { 849 pool_scrub_cmd_t *cmd = arg; 850 dsl_pool_t *dp = dmu_tx_pool(tx); 851 dsl_scan_t *scn = dp->dp_scan; 852 853 if (*cmd == POOL_SCRUB_PAUSE) { 854 /* can't pause a scrub when there is no in-progress scrub */ 855 if (!dsl_scan_scrubbing(dp)) 856 return (SET_ERROR(ENOENT)); 857 858 /* can't pause a paused scrub */ 859 if (dsl_scan_is_paused_scrub(scn)) 860 return (SET_ERROR(EBUSY)); 861 } else if (*cmd != POOL_SCRUB_NORMAL) { 862 return (SET_ERROR(ENOTSUP)); 863 } 864 865 return (0); 866 } 867 868 static void 869 dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx) 870 { 871 pool_scrub_cmd_t *cmd = arg; 872 dsl_pool_t *dp = dmu_tx_pool(tx); 873 spa_t *spa = dp->dp_spa; 874 dsl_scan_t *scn = dp->dp_scan; 875 876 if (*cmd == POOL_SCRUB_PAUSE) { 877 /* can't pause a scrub when there is no in-progress scrub */ 878 spa->spa_scan_pass_scrub_pause = gethrestime_sec(); 879 scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED; 880 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 881 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED); 882 } else { 883 ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL); 884 if (dsl_scan_is_paused_scrub(scn)) { 885 /* 886 * We need to keep track of how much time we spend 887 * paused per pass so that we can adjust the scrub rate 888 * shown in the output of 'zpool status' 889 */ 890 spa->spa_scan_pass_scrub_spent_paused += 891 gethrestime_sec() - spa->spa_scan_pass_scrub_pause; 892 spa->spa_scan_pass_scrub_pause = 0; 893 scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; 894 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 895 } 896 } 897 } 898 899 /* 900 * Set scrub pause/resume state if it makes sense to do so 901 */ 902 int 903 dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd) 904 { 905 return (dsl_sync_task(spa_name(dp->dp_spa), 906 dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3, 907 ZFS_SPACE_CHECK_RESERVED)); 908 } 909 910 911 /* start a new scan, or restart an existing one. */ 912 void 913 dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg) 914 { 915 if (txg == 0) { 916 dmu_tx_t *tx; 917 tx = dmu_tx_create_dd(dp->dp_mos_dir); 918 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); 919 920 txg = dmu_tx_get_txg(tx); 921 dp->dp_scan->scn_restart_txg = txg; 922 dmu_tx_commit(tx); 923 } else { 924 dp->dp_scan->scn_restart_txg = txg; 925 } 926 zfs_dbgmsg("restarting resilver txg=%llu", txg); 927 } 928 929 void 930 dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) 931 { 932 zio_free(dp->dp_spa, txg, bp); 933 } 934 935 void 936 dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) 937 { 938 ASSERT(dsl_pool_sync_context(dp)); 939 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags)); 940 } 941 942 static int 943 scan_ds_queue_compare(const void *a, const void *b) 944 { 945 const scan_ds_t *sds_a = a, *sds_b = b; 946 947 if (sds_a->sds_dsobj < sds_b->sds_dsobj) 948 return (-1); 949 if (sds_a->sds_dsobj == sds_b->sds_dsobj) 950 return (0); 951 return (1); 952 } 953 954 static void 955 scan_ds_queue_clear(dsl_scan_t *scn) 956 { 957 void *cookie = NULL; 958 scan_ds_t *sds; 959 while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) { 960 kmem_free(sds, sizeof (*sds)); 961 } 962 } 963 964 static boolean_t 965 scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg) 966 { 967 scan_ds_t srch, *sds; 968 969 srch.sds_dsobj = dsobj; 970 sds = avl_find(&scn->scn_queue, &srch, NULL); 971 if (sds != NULL && txg != NULL) 972 *txg = sds->sds_txg; 973 return (sds != NULL); 974 } 975 976 static void 977 scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg) 978 { 979 scan_ds_t *sds; 980 avl_index_t where; 981 982 sds = kmem_zalloc(sizeof (*sds), KM_SLEEP); 983 sds->sds_dsobj = dsobj; 984 sds->sds_txg = txg; 985 986 VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL); 987 avl_insert(&scn->scn_queue, sds, where); 988 } 989 990 static void 991 scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj) 992 { 993 scan_ds_t srch, *sds; 994 995 srch.sds_dsobj = dsobj; 996 997 sds = avl_find(&scn->scn_queue, &srch, NULL); 998 VERIFY(sds != NULL); 999 avl_remove(&scn->scn_queue, sds); 1000 kmem_free(sds, sizeof (*sds)); 1001 } 1002 1003 static void 1004 scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx) 1005 { 1006 dsl_pool_t *dp = scn->scn_dp; 1007 spa_t *spa = dp->dp_spa; 1008 dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ? 1009 DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER; 1010 1011 ASSERT0(scn->scn_bytes_pending); 1012 ASSERT(scn->scn_phys.scn_queue_obj != 0); 1013 1014 VERIFY0(dmu_object_free(dp->dp_meta_objset, 1015 scn->scn_phys.scn_queue_obj, tx)); 1016 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot, 1017 DMU_OT_NONE, 0, tx); 1018 for (scan_ds_t *sds = avl_first(&scn->scn_queue); 1019 sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) { 1020 VERIFY0(zap_add_int_key(dp->dp_meta_objset, 1021 scn->scn_phys.scn_queue_obj, sds->sds_dsobj, 1022 sds->sds_txg, tx)); 1023 } 1024 } 1025 1026 /* 1027 * Computes the memory limit state that we're currently in. A sorted scan 1028 * needs quite a bit of memory to hold the sorting queue, so we need to 1029 * reasonably constrain the size so it doesn't impact overall system 1030 * performance. We compute two limits: 1031 * 1) Hard memory limit: if the amount of memory used by the sorting 1032 * queues on a pool gets above this value, we stop the metadata 1033 * scanning portion and start issuing the queued up and sorted 1034 * I/Os to reduce memory usage. 1035 * This limit is calculated as a fraction of physmem (by default 5%). 1036 * We constrain the lower bound of the hard limit to an absolute 1037 * minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain 1038 * the upper bound to 5% of the total pool size - no chance we'll 1039 * ever need that much memory, but just to keep the value in check. 1040 * 2) Soft memory limit: once we hit the hard memory limit, we start 1041 * issuing I/O to reduce queue memory usage, but we don't want to 1042 * completely empty out the queues, since we might be able to find I/Os 1043 * that will fill in the gaps of our non-sequential IOs at some point 1044 * in the future. So we stop the issuing of I/Os once the amount of 1045 * memory used drops below the soft limit (at which point we stop issuing 1046 * I/O and start scanning metadata again). 1047 * 1048 * This limit is calculated by subtracting a fraction of the hard 1049 * limit from the hard limit. By default this fraction is 5%, so 1050 * the soft limit is 95% of the hard limit. We cap the size of the 1051 * difference between the hard and soft limits at an absolute 1052 * maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is 1053 * sufficient to not cause too frequent switching between the 1054 * metadata scan and I/O issue (even at 2k recordsize, 128 MiB's 1055 * worth of queues is about 1.2 GiB of on-pool data, so scanning 1056 * that should take at least a decent fraction of a second). 1057 */ 1058 static boolean_t 1059 dsl_scan_should_clear(dsl_scan_t *scn) 1060 { 1061 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; 1062 uint64_t mlim_hard, mlim_soft, mused; 1063 uint64_t alloc = metaslab_class_get_alloc(spa_normal_class( 1064 scn->scn_dp->dp_spa)); 1065 1066 mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE, 1067 zfs_scan_mem_lim_min); 1068 mlim_hard = MIN(mlim_hard, alloc / 20); 1069 mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact, 1070 zfs_scan_mem_lim_soft_max); 1071 mused = 0; 1072 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 1073 vdev_t *tvd = rvd->vdev_child[i]; 1074 dsl_scan_io_queue_t *queue; 1075 1076 mutex_enter(&tvd->vdev_scan_io_queue_lock); 1077 queue = tvd->vdev_scan_io_queue; 1078 if (queue != NULL) { 1079 /* #extents in exts_by_size = # in exts_by_addr */ 1080 mused += avl_numnodes(&queue->q_exts_by_size) * 1081 sizeof (range_seg_t) + 1082 avl_numnodes(&queue->q_sios_by_addr) * 1083 sizeof (scan_io_t); 1084 } 1085 mutex_exit(&tvd->vdev_scan_io_queue_lock); 1086 } 1087 1088 dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused); 1089 1090 if (mused == 0) 1091 ASSERT0(scn->scn_bytes_pending); 1092 1093 /* 1094 * If we are above our hard limit, we need to clear out memory. 1095 * If we are below our soft limit, we need to accumulate sequential IOs. 1096 * Otherwise, we should keep doing whatever we are currently doing. 1097 */ 1098 if (mused >= mlim_hard) 1099 return (B_TRUE); 1100 else if (mused < mlim_soft) 1101 return (B_FALSE); 1102 else 1103 return (scn->scn_clearing); 1104 } 1105 1106 static boolean_t 1107 dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) 1108 { 1109 /* we never skip user/group accounting objects */ 1110 if (zb && (int64_t)zb->zb_object < 0) 1111 return (B_FALSE); 1112 1113 if (scn->scn_suspending) 1114 return (B_TRUE); /* we're already suspending */ 1115 1116 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) 1117 return (B_FALSE); /* we're resuming */ 1118 1119 /* We only know how to resume from level-0 blocks. */ 1120 if (zb && zb->zb_level != 0) 1121 return (B_FALSE); 1122 1123 /* 1124 * We suspend if: 1125 * - we have scanned for at least the minimum time (default 1 sec 1126 * for scrub, 3 sec for resilver), and either we have sufficient 1127 * dirty data that we are starting to write more quickly 1128 * (default 30%), or someone is explicitly waiting for this txg 1129 * to complete. 1130 * or 1131 * - the spa is shutting down because this pool is being exported 1132 * or the machine is rebooting. 1133 * or 1134 * - the scan queue has reached its memory use limit 1135 */ 1136 hrtime_t curr_time_ns = gethrtime(); 1137 uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; 1138 uint64_t sync_time_ns = curr_time_ns - 1139 scn->scn_dp->dp_spa->spa_sync_starttime; 1140 1141 int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max; 1142 int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 1143 zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; 1144 1145 if ((NSEC2MSEC(scan_time_ns) > mintime && 1146 (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent || 1147 txg_sync_waiting(scn->scn_dp) || 1148 NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || 1149 spa_shutting_down(scn->scn_dp->dp_spa) || 1150 (zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) { 1151 if (zb) { 1152 dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n", 1153 (longlong_t)zb->zb_objset, 1154 (longlong_t)zb->zb_object, 1155 (longlong_t)zb->zb_level, 1156 (longlong_t)zb->zb_blkid); 1157 scn->scn_phys.scn_bookmark = *zb; 1158 } else { 1159 dsl_scan_phys_t *scnp = &scn->scn_phys; 1160 1161 dprintf("suspending at DDT bookmark " 1162 "%llx/%llx/%llx/%llx\n", 1163 (longlong_t)scnp->scn_ddt_bookmark.ddb_class, 1164 (longlong_t)scnp->scn_ddt_bookmark.ddb_type, 1165 (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, 1166 (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); 1167 } 1168 scn->scn_suspending = B_TRUE; 1169 return (B_TRUE); 1170 } 1171 return (B_FALSE); 1172 } 1173 1174 typedef struct zil_scan_arg { 1175 dsl_pool_t *zsa_dp; 1176 zil_header_t *zsa_zh; 1177 } zil_scan_arg_t; 1178 1179 /* ARGSUSED */ 1180 static int 1181 dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 1182 { 1183 zil_scan_arg_t *zsa = arg; 1184 dsl_pool_t *dp = zsa->zsa_dp; 1185 dsl_scan_t *scn = dp->dp_scan; 1186 zil_header_t *zh = zsa->zsa_zh; 1187 zbookmark_phys_t zb; 1188 1189 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 1190 return (0); 1191 1192 /* 1193 * One block ("stubby") can be allocated a long time ago; we 1194 * want to visit that one because it has been allocated 1195 * (on-disk) even if it hasn't been claimed (even though for 1196 * scrub there's nothing to do to it). 1197 */ 1198 if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa)) 1199 return (0); 1200 1201 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1202 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 1203 1204 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 1205 return (0); 1206 } 1207 1208 /* ARGSUSED */ 1209 static int 1210 dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) 1211 { 1212 if (lrc->lrc_txtype == TX_WRITE) { 1213 zil_scan_arg_t *zsa = arg; 1214 dsl_pool_t *dp = zsa->zsa_dp; 1215 dsl_scan_t *scn = dp->dp_scan; 1216 zil_header_t *zh = zsa->zsa_zh; 1217 lr_write_t *lr = (lr_write_t *)lrc; 1218 blkptr_t *bp = &lr->lr_blkptr; 1219 zbookmark_phys_t zb; 1220 1221 if (BP_IS_HOLE(bp) || 1222 bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 1223 return (0); 1224 1225 /* 1226 * birth can be < claim_txg if this record's txg is 1227 * already txg sync'ed (but this log block contains 1228 * other records that are not synced) 1229 */ 1230 if (claim_txg == 0 || bp->blk_birth < claim_txg) 1231 return (0); 1232 1233 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1234 lr->lr_foid, ZB_ZIL_LEVEL, 1235 lr->lr_offset / BP_GET_LSIZE(bp)); 1236 1237 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 1238 } 1239 return (0); 1240 } 1241 1242 static void 1243 dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) 1244 { 1245 uint64_t claim_txg = zh->zh_claim_txg; 1246 zil_scan_arg_t zsa = { dp, zh }; 1247 zilog_t *zilog; 1248 1249 ASSERT(spa_writeable(dp->dp_spa)); 1250 1251 /* 1252 * We only want to visit blocks that have been claimed 1253 * but not yet replayed. 1254 */ 1255 if (claim_txg == 0) 1256 return; 1257 1258 zilog = zil_alloc(dp->dp_meta_objset, zh); 1259 1260 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, 1261 claim_txg); 1262 1263 zil_free(zilog); 1264 } 1265 1266 /* 1267 * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea 1268 * here is to sort the AVL tree by the order each block will be needed. 1269 */ 1270 static int 1271 scan_prefetch_queue_compare(const void *a, const void *b) 1272 { 1273 const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b; 1274 const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc; 1275 const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc; 1276 1277 return (zbookmark_compare(spc_a->spc_datablkszsec, 1278 spc_a->spc_indblkshift, spc_b->spc_datablkszsec, 1279 spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb)); 1280 } 1281 1282 static void 1283 scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, void *tag) 1284 { 1285 if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) { 1286 zfs_refcount_destroy(&spc->spc_refcnt); 1287 kmem_free(spc, sizeof (scan_prefetch_ctx_t)); 1288 } 1289 } 1290 1291 static scan_prefetch_ctx_t * 1292 scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag) 1293 { 1294 scan_prefetch_ctx_t *spc; 1295 1296 spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP); 1297 zfs_refcount_create(&spc->spc_refcnt); 1298 zfs_refcount_add(&spc->spc_refcnt, tag); 1299 spc->spc_scn = scn; 1300 if (dnp != NULL) { 1301 spc->spc_datablkszsec = dnp->dn_datablkszsec; 1302 spc->spc_indblkshift = dnp->dn_indblkshift; 1303 spc->spc_root = B_FALSE; 1304 } else { 1305 spc->spc_datablkszsec = 0; 1306 spc->spc_indblkshift = 0; 1307 spc->spc_root = B_TRUE; 1308 } 1309 1310 return (spc); 1311 } 1312 1313 static void 1314 scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, void *tag) 1315 { 1316 zfs_refcount_add(&spc->spc_refcnt, tag); 1317 } 1318 1319 static boolean_t 1320 dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc, 1321 const zbookmark_phys_t *zb) 1322 { 1323 zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark; 1324 dnode_phys_t tmp_dnp; 1325 dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp; 1326 1327 if (zb->zb_objset != last_zb->zb_objset) 1328 return (B_TRUE); 1329 if ((int64_t)zb->zb_object < 0) 1330 return (B_FALSE); 1331 1332 tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec; 1333 tmp_dnp.dn_indblkshift = spc->spc_indblkshift; 1334 1335 if (zbookmark_subtree_completed(dnp, zb, last_zb)) 1336 return (B_TRUE); 1337 1338 return (B_FALSE); 1339 } 1340 1341 static void 1342 dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb) 1343 { 1344 avl_index_t idx; 1345 dsl_scan_t *scn = spc->spc_scn; 1346 spa_t *spa = scn->scn_dp->dp_spa; 1347 scan_prefetch_issue_ctx_t *spic; 1348 1349 if (zfs_no_scrub_prefetch) 1350 return; 1351 1352 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg || 1353 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE && 1354 BP_GET_TYPE(bp) != DMU_OT_OBJSET)) 1355 return; 1356 1357 if (dsl_scan_check_prefetch_resume(spc, zb)) 1358 return; 1359 1360 scan_prefetch_ctx_add_ref(spc, scn); 1361 spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP); 1362 spic->spic_spc = spc; 1363 spic->spic_bp = *bp; 1364 spic->spic_zb = *zb; 1365 1366 /* 1367 * Add the IO to the queue of blocks to prefetch. This allows us to 1368 * prioritize blocks that we will need first for the main traversal 1369 * thread. 1370 */ 1371 mutex_enter(&spa->spa_scrub_lock); 1372 if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) { 1373 /* this block is already queued for prefetch */ 1374 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1375 scan_prefetch_ctx_rele(spc, scn); 1376 mutex_exit(&spa->spa_scrub_lock); 1377 return; 1378 } 1379 1380 avl_insert(&scn->scn_prefetch_queue, spic, idx); 1381 cv_broadcast(&spa->spa_scrub_io_cv); 1382 mutex_exit(&spa->spa_scrub_lock); 1383 } 1384 1385 static void 1386 dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp, 1387 uint64_t objset, uint64_t object) 1388 { 1389 int i; 1390 zbookmark_phys_t zb; 1391 scan_prefetch_ctx_t *spc; 1392 1393 if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 1394 return; 1395 1396 SET_BOOKMARK(&zb, objset, object, 0, 0); 1397 1398 spc = scan_prefetch_ctx_create(scn, dnp, FTAG); 1399 1400 for (i = 0; i < dnp->dn_nblkptr; i++) { 1401 zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]); 1402 zb.zb_blkid = i; 1403 dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb); 1404 } 1405 1406 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 1407 zb.zb_level = 0; 1408 zb.zb_blkid = DMU_SPILL_BLKID; 1409 dsl_scan_prefetch(spc, &dnp->dn_spill, &zb); 1410 } 1411 1412 scan_prefetch_ctx_rele(spc, FTAG); 1413 } 1414 1415 void 1416 dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 1417 arc_buf_t *buf, void *private) 1418 { 1419 scan_prefetch_ctx_t *spc = private; 1420 dsl_scan_t *scn = spc->spc_scn; 1421 spa_t *spa = scn->scn_dp->dp_spa; 1422 1423 /* broadcast that the IO has completed for rate limitting purposes */ 1424 mutex_enter(&spa->spa_scrub_lock); 1425 ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); 1426 spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); 1427 cv_broadcast(&spa->spa_scrub_io_cv); 1428 mutex_exit(&spa->spa_scrub_lock); 1429 1430 /* if there was an error or we are done prefetching, just cleanup */ 1431 if (buf == NULL || scn->scn_suspending) 1432 goto out; 1433 1434 if (BP_GET_LEVEL(bp) > 0) { 1435 int i; 1436 blkptr_t *cbp; 1437 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 1438 zbookmark_phys_t czb; 1439 1440 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 1441 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 1442 zb->zb_level - 1, zb->zb_blkid * epb + i); 1443 dsl_scan_prefetch(spc, cbp, &czb); 1444 } 1445 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 1446 dnode_phys_t *cdnp = buf->b_data; 1447 int i; 1448 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 1449 1450 for (i = 0, cdnp = buf->b_data; i < epb; 1451 i += cdnp->dn_extra_slots + 1, 1452 cdnp += cdnp->dn_extra_slots + 1) { 1453 dsl_scan_prefetch_dnode(scn, cdnp, 1454 zb->zb_objset, zb->zb_blkid * epb + i); 1455 } 1456 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 1457 objset_phys_t *osp = buf->b_data; 1458 1459 dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode, 1460 zb->zb_objset, DMU_META_DNODE_OBJECT); 1461 1462 if (OBJSET_BUF_HAS_USERUSED(buf)) { 1463 dsl_scan_prefetch_dnode(scn, 1464 &osp->os_groupused_dnode, zb->zb_objset, 1465 DMU_GROUPUSED_OBJECT); 1466 dsl_scan_prefetch_dnode(scn, 1467 &osp->os_userused_dnode, zb->zb_objset, 1468 DMU_USERUSED_OBJECT); 1469 } 1470 } 1471 1472 out: 1473 if (buf != NULL) 1474 arc_buf_destroy(buf, private); 1475 scan_prefetch_ctx_rele(spc, scn); 1476 } 1477 1478 /* ARGSUSED */ 1479 static void 1480 dsl_scan_prefetch_thread(void *arg) 1481 { 1482 dsl_scan_t *scn = arg; 1483 spa_t *spa = scn->scn_dp->dp_spa; 1484 vdev_t *rvd = spa->spa_root_vdev; 1485 uint64_t maxinflight = rvd->vdev_children * zfs_top_maxinflight; 1486 scan_prefetch_issue_ctx_t *spic; 1487 1488 /* loop until we are told to stop */ 1489 while (!scn->scn_prefetch_stop) { 1490 arc_flags_t flags = ARC_FLAG_NOWAIT | 1491 ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH; 1492 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 1493 1494 mutex_enter(&spa->spa_scrub_lock); 1495 1496 /* 1497 * Wait until we have an IO to issue and are not above our 1498 * maximum in flight limit. 1499 */ 1500 while (!scn->scn_prefetch_stop && 1501 (avl_numnodes(&scn->scn_prefetch_queue) == 0 || 1502 spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) { 1503 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1504 } 1505 1506 /* recheck if we should stop since we waited for the cv */ 1507 if (scn->scn_prefetch_stop) { 1508 mutex_exit(&spa->spa_scrub_lock); 1509 break; 1510 } 1511 1512 /* remove the prefetch IO from the tree */ 1513 spic = avl_first(&scn->scn_prefetch_queue); 1514 spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp); 1515 avl_remove(&scn->scn_prefetch_queue, spic); 1516 1517 mutex_exit(&spa->spa_scrub_lock); 1518 1519 /* issue the prefetch asynchronously */ 1520 (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, 1521 &spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc, 1522 ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb); 1523 1524 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1525 } 1526 1527 ASSERT(scn->scn_prefetch_stop); 1528 1529 /* free any prefetches we didn't get to complete */ 1530 mutex_enter(&spa->spa_scrub_lock); 1531 while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) { 1532 avl_remove(&scn->scn_prefetch_queue, spic); 1533 scan_prefetch_ctx_rele(spic->spic_spc, scn); 1534 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1535 } 1536 ASSERT0(avl_numnodes(&scn->scn_prefetch_queue)); 1537 mutex_exit(&spa->spa_scrub_lock); 1538 } 1539 1540 static boolean_t 1541 dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, 1542 const zbookmark_phys_t *zb) 1543 { 1544 /* 1545 * We never skip over user/group accounting objects (obj<0) 1546 */ 1547 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && 1548 (int64_t)zb->zb_object >= 0) { 1549 /* 1550 * If we already visited this bp & everything below (in 1551 * a prior txg sync), don't bother doing it again. 1552 */ 1553 if (zbookmark_subtree_completed(dnp, zb, 1554 &scn->scn_phys.scn_bookmark)) 1555 return (B_TRUE); 1556 1557 /* 1558 * If we found the block we're trying to resume from, or 1559 * we went past it to a different object, zero it out to 1560 * indicate that it's OK to start checking for suspending 1561 * again. 1562 */ 1563 if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 || 1564 zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) { 1565 dprintf("resuming at %llx/%llx/%llx/%llx\n", 1566 (longlong_t)zb->zb_objset, 1567 (longlong_t)zb->zb_object, 1568 (longlong_t)zb->zb_level, 1569 (longlong_t)zb->zb_blkid); 1570 bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb)); 1571 } 1572 } 1573 return (B_FALSE); 1574 } 1575 1576 static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 1577 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 1578 dmu_objset_type_t ostype, dmu_tx_t *tx); 1579 static void dsl_scan_visitdnode( 1580 dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype, 1581 dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx); 1582 1583 /* 1584 * Return nonzero on i/o error. 1585 * Return new buf to write out in *bufp. 1586 */ 1587 static int 1588 dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, 1589 dnode_phys_t *dnp, const blkptr_t *bp, 1590 const zbookmark_phys_t *zb, dmu_tx_t *tx) 1591 { 1592 dsl_pool_t *dp = scn->scn_dp; 1593 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 1594 int err; 1595 1596 if (BP_GET_LEVEL(bp) > 0) { 1597 arc_flags_t flags = ARC_FLAG_WAIT; 1598 int i; 1599 blkptr_t *cbp; 1600 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 1601 arc_buf_t *buf; 1602 1603 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 1604 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 1605 if (err) { 1606 scn->scn_phys.scn_errors++; 1607 return (err); 1608 } 1609 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 1610 zbookmark_phys_t czb; 1611 1612 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 1613 zb->zb_level - 1, 1614 zb->zb_blkid * epb + i); 1615 dsl_scan_visitbp(cbp, &czb, dnp, 1616 ds, scn, ostype, tx); 1617 } 1618 arc_buf_destroy(buf, &buf); 1619 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 1620 arc_flags_t flags = ARC_FLAG_WAIT; 1621 dnode_phys_t *cdnp; 1622 int i; 1623 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 1624 arc_buf_t *buf; 1625 1626 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 1627 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 1628 if (err) { 1629 scn->scn_phys.scn_errors++; 1630 return (err); 1631 } 1632 for (i = 0, cdnp = buf->b_data; i < epb; 1633 i += cdnp->dn_extra_slots + 1, 1634 cdnp += cdnp->dn_extra_slots + 1) { 1635 dsl_scan_visitdnode(scn, ds, ostype, 1636 cdnp, zb->zb_blkid * epb + i, tx); 1637 } 1638 1639 arc_buf_destroy(buf, &buf); 1640 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 1641 arc_flags_t flags = ARC_FLAG_WAIT; 1642 objset_phys_t *osp; 1643 arc_buf_t *buf; 1644 1645 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 1646 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 1647 if (err) { 1648 scn->scn_phys.scn_errors++; 1649 return (err); 1650 } 1651 1652 osp = buf->b_data; 1653 1654 dsl_scan_visitdnode(scn, ds, osp->os_type, 1655 &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx); 1656 1657 if (OBJSET_BUF_HAS_USERUSED(buf)) { 1658 /* 1659 * We also always visit user/group accounting 1660 * objects, and never skip them, even if we are 1661 * suspending. This is necessary so that the space 1662 * deltas from this txg get integrated. 1663 */ 1664 dsl_scan_visitdnode(scn, ds, osp->os_type, 1665 &osp->os_groupused_dnode, 1666 DMU_GROUPUSED_OBJECT, tx); 1667 dsl_scan_visitdnode(scn, ds, osp->os_type, 1668 &osp->os_userused_dnode, 1669 DMU_USERUSED_OBJECT, tx); 1670 } 1671 arc_buf_destroy(buf, &buf); 1672 } 1673 1674 return (0); 1675 } 1676 1677 static void 1678 dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, 1679 dmu_objset_type_t ostype, dnode_phys_t *dnp, 1680 uint64_t object, dmu_tx_t *tx) 1681 { 1682 int j; 1683 1684 for (j = 0; j < dnp->dn_nblkptr; j++) { 1685 zbookmark_phys_t czb; 1686 1687 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 1688 dnp->dn_nlevels - 1, j); 1689 dsl_scan_visitbp(&dnp->dn_blkptr[j], 1690 &czb, dnp, ds, scn, ostype, tx); 1691 } 1692 1693 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 1694 zbookmark_phys_t czb; 1695 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 1696 0, DMU_SPILL_BLKID); 1697 dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp), 1698 &czb, dnp, ds, scn, ostype, tx); 1699 } 1700 } 1701 1702 /* 1703 * The arguments are in this order because mdb can only print the 1704 * first 5; we want them to be useful. 1705 */ 1706 static void 1707 dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 1708 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 1709 dmu_objset_type_t ostype, dmu_tx_t *tx) 1710 { 1711 dsl_pool_t *dp = scn->scn_dp; 1712 blkptr_t *bp_toread = NULL; 1713 1714 if (dsl_scan_check_suspend(scn, zb)) 1715 return; 1716 1717 if (dsl_scan_check_resume(scn, dnp, zb)) 1718 return; 1719 1720 scn->scn_visited_this_txg++; 1721 1722 /* 1723 * This debugging is commented out to conserve stack space. This 1724 * function is called recursively and the debugging addes several 1725 * bytes to the stack for each call. It can be commented back in 1726 * if required to debug an issue in dsl_scan_visitbp(). 1727 * 1728 * dprintf_bp(bp, 1729 * "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p", 1730 * ds, ds ? ds->ds_object : 0, 1731 * zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid, 1732 * bp); 1733 */ 1734 1735 if (BP_IS_HOLE(bp)) { 1736 scn->scn_holes_this_txg++; 1737 return; 1738 } 1739 1740 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) { 1741 scn->scn_lt_min_this_txg++; 1742 return; 1743 } 1744 1745 bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP); 1746 *bp_toread = *bp; 1747 1748 if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0) 1749 goto out; 1750 1751 /* 1752 * If dsl_scan_ddt() has already visited this block, it will have 1753 * already done any translations or scrubbing, so don't call the 1754 * callback again. 1755 */ 1756 if (ddt_class_contains(dp->dp_spa, 1757 scn->scn_phys.scn_ddt_class_max, bp)) { 1758 scn->scn_ddt_contained_this_txg++; 1759 goto out; 1760 } 1761 1762 /* 1763 * If this block is from the future (after cur_max_txg), then we 1764 * are doing this on behalf of a deleted snapshot, and we will 1765 * revisit the future block on the next pass of this dataset. 1766 * Don't scan it now unless we need to because something 1767 * under it was modified. 1768 */ 1769 if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) { 1770 scn->scn_gt_max_this_txg++; 1771 goto out; 1772 } 1773 1774 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); 1775 1776 out: 1777 kmem_free(bp_toread, sizeof (blkptr_t)); 1778 } 1779 1780 static void 1781 dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, 1782 dmu_tx_t *tx) 1783 { 1784 zbookmark_phys_t zb; 1785 scan_prefetch_ctx_t *spc; 1786 1787 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 1788 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 1789 1790 if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) { 1791 SET_BOOKMARK(&scn->scn_prefetch_bookmark, 1792 zb.zb_objset, 0, 0, 0); 1793 } else { 1794 scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark; 1795 } 1796 1797 scn->scn_objsets_visited_this_txg++; 1798 1799 spc = scan_prefetch_ctx_create(scn, NULL, FTAG); 1800 dsl_scan_prefetch(spc, bp, &zb); 1801 scan_prefetch_ctx_rele(spc, FTAG); 1802 1803 dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx); 1804 1805 dprintf_ds(ds, "finished scan%s", ""); 1806 } 1807 1808 static void 1809 ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys) 1810 { 1811 if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) { 1812 if (ds->ds_is_snapshot) { 1813 /* 1814 * Note: 1815 * - scn_cur_{min,max}_txg stays the same. 1816 * - Setting the flag is not really necessary if 1817 * scn_cur_max_txg == scn_max_txg, because there 1818 * is nothing after this snapshot that we care 1819 * about. However, we set it anyway and then 1820 * ignore it when we retraverse it in 1821 * dsl_scan_visitds(). 1822 */ 1823 scn_phys->scn_bookmark.zb_objset = 1824 dsl_dataset_phys(ds)->ds_next_snap_obj; 1825 zfs_dbgmsg("destroying ds %llu; currently traversing; " 1826 "reset zb_objset to %llu", 1827 (u_longlong_t)ds->ds_object, 1828 (u_longlong_t)dsl_dataset_phys(ds)-> 1829 ds_next_snap_obj); 1830 scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN; 1831 } else { 1832 SET_BOOKMARK(&scn_phys->scn_bookmark, 1833 ZB_DESTROYED_OBJSET, 0, 0, 0); 1834 zfs_dbgmsg("destroying ds %llu; currently traversing; " 1835 "reset bookmark to -1,0,0,0", 1836 (u_longlong_t)ds->ds_object); 1837 } 1838 } 1839 } 1840 1841 /* 1842 * Invoked when a dataset is destroyed. We need to make sure that: 1843 * 1844 * 1) If it is the dataset that was currently being scanned, we write 1845 * a new dsl_scan_phys_t and marking the objset reference in it 1846 * as destroyed. 1847 * 2) Remove it from the work queue, if it was present. 1848 * 1849 * If the dataset was actually a snapshot, instead of marking the dataset 1850 * as destroyed, we instead substitute the next snapshot in line. 1851 */ 1852 void 1853 dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) 1854 { 1855 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1856 dsl_scan_t *scn = dp->dp_scan; 1857 uint64_t mintxg; 1858 1859 if (!dsl_scan_is_running(scn)) 1860 return; 1861 1862 ds_destroyed_scn_phys(ds, &scn->scn_phys); 1863 ds_destroyed_scn_phys(ds, &scn->scn_phys_cached); 1864 1865 if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { 1866 scan_ds_queue_remove(scn, ds->ds_object); 1867 if (ds->ds_is_snapshot) 1868 scan_ds_queue_insert(scn, 1869 dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg); 1870 } 1871 1872 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 1873 ds->ds_object, &mintxg) == 0) { 1874 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1); 1875 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1876 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 1877 if (ds->ds_is_snapshot) { 1878 /* 1879 * We keep the same mintxg; it could be > 1880 * ds_creation_txg if the previous snapshot was 1881 * deleted too. 1882 */ 1883 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1884 scn->scn_phys.scn_queue_obj, 1885 dsl_dataset_phys(ds)->ds_next_snap_obj, 1886 mintxg, tx) == 0); 1887 zfs_dbgmsg("destroying ds %llu; in queue; " 1888 "replacing with %llu", 1889 (u_longlong_t)ds->ds_object, 1890 (u_longlong_t)dsl_dataset_phys(ds)-> 1891 ds_next_snap_obj); 1892 } else { 1893 zfs_dbgmsg("destroying ds %llu; in queue; removing", 1894 (u_longlong_t)ds->ds_object); 1895 } 1896 } 1897 1898 /* 1899 * dsl_scan_sync() should be called after this, and should sync 1900 * out our changed state, but just to be safe, do it here. 1901 */ 1902 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 1903 } 1904 1905 static void 1906 ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark) 1907 { 1908 if (scn_bookmark->zb_objset == ds->ds_object) { 1909 scn_bookmark->zb_objset = 1910 dsl_dataset_phys(ds)->ds_prev_snap_obj; 1911 zfs_dbgmsg("snapshotting ds %llu; currently traversing; " 1912 "reset zb_objset to %llu", 1913 (u_longlong_t)ds->ds_object, 1914 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 1915 } 1916 } 1917 1918 /* 1919 * Called when a dataset is snapshotted. If we were currently traversing 1920 * this snapshot, we reset our bookmark to point at the newly created 1921 * snapshot. We also modify our work queue to remove the old snapshot and 1922 * replace with the new one. 1923 */ 1924 void 1925 dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) 1926 { 1927 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1928 dsl_scan_t *scn = dp->dp_scan; 1929 uint64_t mintxg; 1930 1931 if (!dsl_scan_is_running(scn)) 1932 return; 1933 1934 ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0); 1935 1936 ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark); 1937 ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark); 1938 1939 if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { 1940 scan_ds_queue_remove(scn, ds->ds_object); 1941 scan_ds_queue_insert(scn, 1942 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg); 1943 } 1944 1945 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 1946 ds->ds_object, &mintxg) == 0) { 1947 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1948 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 1949 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1950 scn->scn_phys.scn_queue_obj, 1951 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0); 1952 zfs_dbgmsg("snapshotting ds %llu; in queue; " 1953 "replacing with %llu", 1954 (u_longlong_t)ds->ds_object, 1955 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 1956 } 1957 1958 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 1959 } 1960 1961 static void 1962 ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2, 1963 zbookmark_phys_t *scn_bookmark) 1964 { 1965 if (scn_bookmark->zb_objset == ds1->ds_object) { 1966 scn_bookmark->zb_objset = ds2->ds_object; 1967 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 1968 "reset zb_objset to %llu", 1969 (u_longlong_t)ds1->ds_object, 1970 (u_longlong_t)ds2->ds_object); 1971 } else if (scn_bookmark->zb_objset == ds2->ds_object) { 1972 scn_bookmark->zb_objset = ds1->ds_object; 1973 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 1974 "reset zb_objset to %llu", 1975 (u_longlong_t)ds2->ds_object, 1976 (u_longlong_t)ds1->ds_object); 1977 } 1978 } 1979 1980 /* 1981 * Called when a parent dataset and its clone are swapped. If we were 1982 * currently traversing the dataset, we need to switch to traversing the 1983 * newly promoted parent. 1984 */ 1985 void 1986 dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) 1987 { 1988 dsl_pool_t *dp = ds1->ds_dir->dd_pool; 1989 dsl_scan_t *scn = dp->dp_scan; 1990 uint64_t mintxg; 1991 1992 if (!dsl_scan_is_running(scn)) 1993 return; 1994 1995 ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark); 1996 ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark); 1997 1998 if (scan_ds_queue_contains(scn, ds1->ds_object, &mintxg)) { 1999 scan_ds_queue_remove(scn, ds1->ds_object); 2000 scan_ds_queue_insert(scn, ds2->ds_object, mintxg); 2001 } 2002 if (scan_ds_queue_contains(scn, ds2->ds_object, &mintxg)) { 2003 scan_ds_queue_remove(scn, ds2->ds_object); 2004 scan_ds_queue_insert(scn, ds1->ds_object, mintxg); 2005 } 2006 2007 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 2008 ds1->ds_object, &mintxg) == 0) { 2009 int err; 2010 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2011 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2012 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 2013 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); 2014 err = zap_add_int_key(dp->dp_meta_objset, 2015 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx); 2016 VERIFY(err == 0 || err == EEXIST); 2017 if (err == EEXIST) { 2018 /* Both were there to begin with */ 2019 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 2020 scn->scn_phys.scn_queue_obj, 2021 ds1->ds_object, mintxg, tx)); 2022 } 2023 zfs_dbgmsg("clone_swap ds %llu; in queue; " 2024 "replacing with %llu", 2025 (u_longlong_t)ds1->ds_object, 2026 (u_longlong_t)ds2->ds_object); 2027 } 2028 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 2029 ds2->ds_object, &mintxg) == 0) { 2030 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2031 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2032 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 2033 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); 2034 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 2035 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx)); 2036 zfs_dbgmsg("clone_swap ds %llu; in queue; " 2037 "replacing with %llu", 2038 (u_longlong_t)ds2->ds_object, 2039 (u_longlong_t)ds1->ds_object); 2040 } 2041 2042 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 2043 } 2044 2045 /* ARGSUSED */ 2046 static int 2047 enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 2048 { 2049 uint64_t originobj = *(uint64_t *)arg; 2050 dsl_dataset_t *ds; 2051 int err; 2052 dsl_scan_t *scn = dp->dp_scan; 2053 2054 if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj) 2055 return (0); 2056 2057 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 2058 if (err) 2059 return (err); 2060 2061 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) { 2062 dsl_dataset_t *prev; 2063 err = dsl_dataset_hold_obj(dp, 2064 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 2065 2066 dsl_dataset_rele(ds, FTAG); 2067 if (err) 2068 return (err); 2069 ds = prev; 2070 } 2071 scan_ds_queue_insert(scn, ds->ds_object, 2072 dsl_dataset_phys(ds)->ds_prev_snap_txg); 2073 dsl_dataset_rele(ds, FTAG); 2074 return (0); 2075 } 2076 2077 static void 2078 dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) 2079 { 2080 dsl_pool_t *dp = scn->scn_dp; 2081 dsl_dataset_t *ds; 2082 2083 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 2084 2085 if (scn->scn_phys.scn_cur_min_txg >= 2086 scn->scn_phys.scn_max_txg) { 2087 /* 2088 * This can happen if this snapshot was created after the 2089 * scan started, and we already completed a previous snapshot 2090 * that was created after the scan started. This snapshot 2091 * only references blocks with: 2092 * 2093 * birth < our ds_creation_txg 2094 * cur_min_txg is no less than ds_creation_txg. 2095 * We have already visited these blocks. 2096 * or 2097 * birth > scn_max_txg 2098 * The scan requested not to visit these blocks. 2099 * 2100 * Subsequent snapshots (and clones) can reference our 2101 * blocks, or blocks with even higher birth times. 2102 * Therefore we do not need to visit them either, 2103 * so we do not add them to the work queue. 2104 * 2105 * Note that checking for cur_min_txg >= cur_max_txg 2106 * is not sufficient, because in that case we may need to 2107 * visit subsequent snapshots. This happens when min_txg > 0, 2108 * which raises cur_min_txg. In this case we will visit 2109 * this dataset but skip all of its blocks, because the 2110 * rootbp's birth time is < cur_min_txg. Then we will 2111 * add the next snapshots/clones to the work queue. 2112 */ 2113 char *dsname = kmem_alloc(MAXNAMELEN, KM_SLEEP); 2114 dsl_dataset_name(ds, dsname); 2115 zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because " 2116 "cur_min_txg (%llu) >= max_txg (%llu)", 2117 (longlong_t)dsobj, dsname, 2118 (longlong_t)scn->scn_phys.scn_cur_min_txg, 2119 (longlong_t)scn->scn_phys.scn_max_txg); 2120 kmem_free(dsname, MAXNAMELEN); 2121 2122 goto out; 2123 } 2124 2125 /* 2126 * Only the ZIL in the head (non-snapshot) is valid. Even though 2127 * snapshots can have ZIL block pointers (which may be the same 2128 * BP as in the head), they must be ignored. In addition, $ORIGIN 2129 * doesn't have a objset (i.e. its ds_bp is a hole) so we don't 2130 * need to look for a ZIL in it either. So we traverse the ZIL here, 2131 * rather than in scan_recurse(), because the regular snapshot 2132 * block-sharing rules don't apply to it. 2133 */ 2134 if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !dsl_dataset_is_snapshot(ds) && 2135 (dp->dp_origin_snap == NULL || 2136 ds->ds_dir != dp->dp_origin_snap->ds_dir)) { 2137 objset_t *os; 2138 if (dmu_objset_from_ds(ds, &os) != 0) { 2139 goto out; 2140 } 2141 dsl_scan_zil(dp, &os->os_zil_header); 2142 } 2143 2144 /* 2145 * Iterate over the bps in this ds. 2146 */ 2147 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2148 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 2149 dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx); 2150 rrw_exit(&ds->ds_bp_rwlock, FTAG); 2151 2152 char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); 2153 dsl_dataset_name(ds, dsname); 2154 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " 2155 "suspending=%u", 2156 (longlong_t)dsobj, dsname, 2157 (longlong_t)scn->scn_phys.scn_cur_min_txg, 2158 (longlong_t)scn->scn_phys.scn_cur_max_txg, 2159 (int)scn->scn_suspending); 2160 kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN); 2161 2162 if (scn->scn_suspending) 2163 goto out; 2164 2165 /* 2166 * We've finished this pass over this dataset. 2167 */ 2168 2169 /* 2170 * If we did not completely visit this dataset, do another pass. 2171 */ 2172 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { 2173 zfs_dbgmsg("incomplete pass; visiting again"); 2174 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; 2175 scan_ds_queue_insert(scn, ds->ds_object, 2176 scn->scn_phys.scn_cur_max_txg); 2177 goto out; 2178 } 2179 2180 /* 2181 * Add descendent datasets to work queue. 2182 */ 2183 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) { 2184 scan_ds_queue_insert(scn, 2185 dsl_dataset_phys(ds)->ds_next_snap_obj, 2186 dsl_dataset_phys(ds)->ds_creation_txg); 2187 } 2188 if (dsl_dataset_phys(ds)->ds_num_children > 1) { 2189 boolean_t usenext = B_FALSE; 2190 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) { 2191 uint64_t count; 2192 /* 2193 * A bug in a previous version of the code could 2194 * cause upgrade_clones_cb() to not set 2195 * ds_next_snap_obj when it should, leading to a 2196 * missing entry. Therefore we can only use the 2197 * next_clones_obj when its count is correct. 2198 */ 2199 int err = zap_count(dp->dp_meta_objset, 2200 dsl_dataset_phys(ds)->ds_next_clones_obj, &count); 2201 if (err == 0 && 2202 count == dsl_dataset_phys(ds)->ds_num_children - 1) 2203 usenext = B_TRUE; 2204 } 2205 2206 if (usenext) { 2207 zap_cursor_t zc; 2208 zap_attribute_t za; 2209 for (zap_cursor_init(&zc, dp->dp_meta_objset, 2210 dsl_dataset_phys(ds)->ds_next_clones_obj); 2211 zap_cursor_retrieve(&zc, &za) == 0; 2212 (void) zap_cursor_advance(&zc)) { 2213 scan_ds_queue_insert(scn, 2214 zfs_strtonum(za.za_name, NULL), 2215 dsl_dataset_phys(ds)->ds_creation_txg); 2216 } 2217 zap_cursor_fini(&zc); 2218 } else { 2219 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2220 enqueue_clones_cb, &ds->ds_object, 2221 DS_FIND_CHILDREN)); 2222 } 2223 } 2224 2225 out: 2226 dsl_dataset_rele(ds, FTAG); 2227 } 2228 2229 /* ARGSUSED */ 2230 static int 2231 enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 2232 { 2233 dsl_dataset_t *ds; 2234 int err; 2235 dsl_scan_t *scn = dp->dp_scan; 2236 2237 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 2238 if (err) 2239 return (err); 2240 2241 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { 2242 dsl_dataset_t *prev; 2243 err = dsl_dataset_hold_obj(dp, 2244 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 2245 if (err) { 2246 dsl_dataset_rele(ds, FTAG); 2247 return (err); 2248 } 2249 2250 /* 2251 * If this is a clone, we don't need to worry about it for now. 2252 */ 2253 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) { 2254 dsl_dataset_rele(ds, FTAG); 2255 dsl_dataset_rele(prev, FTAG); 2256 return (0); 2257 } 2258 dsl_dataset_rele(ds, FTAG); 2259 ds = prev; 2260 } 2261 2262 scan_ds_queue_insert(scn, ds->ds_object, 2263 dsl_dataset_phys(ds)->ds_prev_snap_txg); 2264 dsl_dataset_rele(ds, FTAG); 2265 return (0); 2266 } 2267 2268 /* ARGSUSED */ 2269 void 2270 dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, 2271 ddt_entry_t *dde, dmu_tx_t *tx) 2272 { 2273 const ddt_key_t *ddk = &dde->dde_key; 2274 ddt_phys_t *ddp = dde->dde_phys; 2275 blkptr_t bp; 2276 zbookmark_phys_t zb = { 0 }; 2277 int p; 2278 2279 if (scn->scn_phys.scn_state != DSS_SCANNING) 2280 return; 2281 2282 for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2283 if (ddp->ddp_phys_birth == 0 || 2284 ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) 2285 continue; 2286 ddt_bp_create(checksum, ddk, ddp, &bp); 2287 2288 scn->scn_visited_this_txg++; 2289 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); 2290 } 2291 } 2292 2293 /* 2294 * Scrub/dedup interaction. 2295 * 2296 * If there are N references to a deduped block, we don't want to scrub it 2297 * N times -- ideally, we should scrub it exactly once. 2298 * 2299 * We leverage the fact that the dde's replication class (enum ddt_class) 2300 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest 2301 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. 2302 * 2303 * To prevent excess scrubbing, the scrub begins by walking the DDT 2304 * to find all blocks with refcnt > 1, and scrubs each of these once. 2305 * Since there are two replication classes which contain blocks with 2306 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. 2307 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. 2308 * 2309 * There would be nothing more to say if a block's refcnt couldn't change 2310 * during a scrub, but of course it can so we must account for changes 2311 * in a block's replication class. 2312 * 2313 * Here's an example of what can occur: 2314 * 2315 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 2316 * when visited during the top-down scrub phase, it will be scrubbed twice. 2317 * This negates our scrub optimization, but is otherwise harmless. 2318 * 2319 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 2320 * on each visit during the top-down scrub phase, it will never be scrubbed. 2321 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's 2322 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to 2323 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 2324 * while a scrub is in progress, it scrubs the block right then. 2325 */ 2326 static void 2327 dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) 2328 { 2329 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; 2330 ddt_entry_t dde = { 0 }; 2331 int error; 2332 uint64_t n = 0; 2333 2334 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { 2335 ddt_t *ddt; 2336 2337 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) 2338 break; 2339 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", 2340 (longlong_t)ddb->ddb_class, 2341 (longlong_t)ddb->ddb_type, 2342 (longlong_t)ddb->ddb_checksum, 2343 (longlong_t)ddb->ddb_cursor); 2344 2345 /* There should be no pending changes to the dedup table */ 2346 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; 2347 ASSERT(avl_first(&ddt->ddt_tree) == NULL); 2348 2349 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); 2350 n++; 2351 2352 if (dsl_scan_check_suspend(scn, NULL)) 2353 break; 2354 } 2355 2356 zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; " 2357 "suspending=%u", (longlong_t)n, 2358 (int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending); 2359 2360 ASSERT(error == 0 || error == ENOENT); 2361 ASSERT(error != ENOENT || 2362 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); 2363 } 2364 2365 static uint64_t 2366 dsl_scan_ds_maxtxg(dsl_dataset_t *ds) 2367 { 2368 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; 2369 if (ds->ds_is_snapshot) 2370 return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg)); 2371 return (smt); 2372 } 2373 2374 static void 2375 dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) 2376 { 2377 scan_ds_t *sds; 2378 dsl_pool_t *dp = scn->scn_dp; 2379 2380 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 2381 scn->scn_phys.scn_ddt_class_max) { 2382 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 2383 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 2384 dsl_scan_ddt(scn, tx); 2385 if (scn->scn_suspending) 2386 return; 2387 } 2388 2389 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { 2390 /* First do the MOS & ORIGIN */ 2391 2392 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 2393 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 2394 dsl_scan_visit_rootbp(scn, NULL, 2395 &dp->dp_meta_rootbp, tx); 2396 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 2397 if (scn->scn_suspending) 2398 return; 2399 2400 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { 2401 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2402 enqueue_cb, NULL, DS_FIND_CHILDREN)); 2403 } else { 2404 dsl_scan_visitds(scn, 2405 dp->dp_origin_snap->ds_object, tx); 2406 } 2407 ASSERT(!scn->scn_suspending); 2408 } else if (scn->scn_phys.scn_bookmark.zb_objset != 2409 ZB_DESTROYED_OBJSET) { 2410 uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset; 2411 /* 2412 * If we were suspended, continue from here. Note if the 2413 * ds we were suspended on was deleted, the zb_objset may 2414 * be -1, so we will skip this and find a new objset 2415 * below. 2416 */ 2417 dsl_scan_visitds(scn, dsobj, tx); 2418 if (scn->scn_suspending) 2419 return; 2420 } 2421 2422 /* 2423 * In case we suspended right at the end of the ds, zero the 2424 * bookmark so we don't think that we're still trying to resume. 2425 */ 2426 bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t)); 2427 2428 /* 2429 * Keep pulling things out of the dataset avl queue. Updates to the 2430 * persistent zap-object-as-queue happen only at checkpoints. 2431 */ 2432 while ((sds = avl_first(&scn->scn_queue)) != NULL) { 2433 dsl_dataset_t *ds; 2434 uint64_t dsobj = sds->sds_dsobj; 2435 uint64_t txg = sds->sds_txg; 2436 2437 /* dequeue and free the ds from the queue */ 2438 scan_ds_queue_remove(scn, dsobj); 2439 sds = NULL; /* must not be touched after removal */ 2440 2441 /* Set up min / max txg */ 2442 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 2443 if (txg != 0) { 2444 scn->scn_phys.scn_cur_min_txg = 2445 MAX(scn->scn_phys.scn_min_txg, txg); 2446 } else { 2447 scn->scn_phys.scn_cur_min_txg = 2448 MAX(scn->scn_phys.scn_min_txg, 2449 dsl_dataset_phys(ds)->ds_prev_snap_txg); 2450 } 2451 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); 2452 dsl_dataset_rele(ds, FTAG); 2453 2454 dsl_scan_visitds(scn, dsobj, tx); 2455 if (scn->scn_suspending) 2456 return; 2457 } 2458 /* No more objsets to fetch, we're done */ 2459 scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET; 2460 ASSERT0(scn->scn_suspending); 2461 } 2462 2463 static uint64_t 2464 dsl_scan_count_leaves(vdev_t *vd) 2465 { 2466 uint64_t i, leaves = 0; 2467 2468 /* we only count leaves that belong to the main pool and are readable */ 2469 if (vd->vdev_islog || vd->vdev_isspare || 2470 vd->vdev_isl2cache || !vdev_readable(vd)) 2471 return (0); 2472 2473 if (vd->vdev_ops->vdev_op_leaf) 2474 return (1); 2475 2476 for (i = 0; i < vd->vdev_children; i++) { 2477 leaves += dsl_scan_count_leaves(vd->vdev_child[i]); 2478 } 2479 2480 return (leaves); 2481 } 2482 2483 2484 static void 2485 scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp) 2486 { 2487 int i; 2488 uint64_t cur_size = 0; 2489 2490 for (i = 0; i < BP_GET_NDVAS(bp); i++) { 2491 cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]); 2492 } 2493 2494 q->q_total_zio_size_this_txg += cur_size; 2495 q->q_zios_this_txg++; 2496 } 2497 2498 static void 2499 scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start, 2500 uint64_t end) 2501 { 2502 q->q_total_seg_size_this_txg += end - start; 2503 q->q_segs_this_txg++; 2504 } 2505 2506 static boolean_t 2507 scan_io_queue_check_suspend(dsl_scan_t *scn) 2508 { 2509 /* See comment in dsl_scan_check_suspend() */ 2510 uint64_t curr_time_ns = gethrtime(); 2511 uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; 2512 uint64_t sync_time_ns = curr_time_ns - 2513 scn->scn_dp->dp_spa->spa_sync_starttime; 2514 int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max; 2515 int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 2516 zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; 2517 2518 return ((NSEC2MSEC(scan_time_ns) > mintime && 2519 (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent || 2520 txg_sync_waiting(scn->scn_dp) || 2521 NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || 2522 spa_shutting_down(scn->scn_dp->dp_spa)); 2523 } 2524 2525 /* 2526 * Given a list of scan_io_t's in io_list, this issues the io's out to 2527 * disk. This consumes the io_list and frees the scan_io_t's. This is 2528 * called when emptying queues, either when we're up against the memory 2529 * limit or when we have finished scanning. Returns B_TRUE if we stopped 2530 * processing the list before we finished. Any zios that were not issued 2531 * will remain in the io_list. 2532 */ 2533 static boolean_t 2534 scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list) 2535 { 2536 dsl_scan_t *scn = queue->q_scn; 2537 scan_io_t *sio; 2538 int64_t bytes_issued = 0; 2539 boolean_t suspended = B_FALSE; 2540 2541 while ((sio = list_head(io_list)) != NULL) { 2542 blkptr_t bp; 2543 2544 if (scan_io_queue_check_suspend(scn)) { 2545 suspended = B_TRUE; 2546 break; 2547 } 2548 2549 sio2bp(sio, &bp, queue->q_vd->vdev_id); 2550 bytes_issued += sio->sio_asize; 2551 scan_exec_io(scn->scn_dp, &bp, sio->sio_flags, 2552 &sio->sio_zb, queue); 2553 (void) list_remove_head(io_list); 2554 scan_io_queues_update_zio_stats(queue, &bp); 2555 kmem_free(sio, sizeof (*sio)); 2556 } 2557 2558 atomic_add_64(&scn->scn_bytes_pending, -bytes_issued); 2559 2560 return (suspended); 2561 } 2562 2563 /* 2564 * Given a range_seg_t (extent) and a list, this function passes over a 2565 * scan queue and gathers up the appropriate ios which fit into that 2566 * scan seg (starting from lowest LBA). At the end, we remove the segment 2567 * from the q_exts_by_addr range tree. 2568 */ 2569 static boolean_t 2570 scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list) 2571 { 2572 scan_io_t srch_sio, *sio, *next_sio; 2573 avl_index_t idx; 2574 uint_t num_sios = 0; 2575 int64_t bytes_issued = 0; 2576 2577 ASSERT(rs != NULL); 2578 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 2579 2580 srch_sio.sio_offset = rs->rs_start; 2581 2582 /* 2583 * The exact start of the extent might not contain any matching zios, 2584 * so if that's the case, examine the next one in the tree. 2585 */ 2586 sio = avl_find(&queue->q_sios_by_addr, &srch_sio, &idx); 2587 if (sio == NULL) 2588 sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER); 2589 2590 while (sio != NULL && sio->sio_offset < rs->rs_end && num_sios <= 32) { 2591 ASSERT3U(sio->sio_offset, >=, rs->rs_start); 2592 ASSERT3U(sio->sio_offset + sio->sio_asize, <=, rs->rs_end); 2593 2594 next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio); 2595 avl_remove(&queue->q_sios_by_addr, sio); 2596 2597 bytes_issued += sio->sio_asize; 2598 num_sios++; 2599 list_insert_tail(list, sio); 2600 sio = next_sio; 2601 } 2602 2603 /* 2604 * We limit the number of sios we process at once to 32 to avoid 2605 * biting off more than we can chew. If we didn't take everything 2606 * in the segment we update it to reflect the work we were able to 2607 * complete. Otherwise, we remove it from the range tree entirely. 2608 */ 2609 if (sio != NULL && sio->sio_offset < rs->rs_end) { 2610 range_tree_adjust_fill(queue->q_exts_by_addr, rs, 2611 -bytes_issued); 2612 range_tree_resize_segment(queue->q_exts_by_addr, rs, 2613 sio->sio_offset, rs->rs_end - sio->sio_offset); 2614 2615 return (B_TRUE); 2616 } else { 2617 range_tree_remove(queue->q_exts_by_addr, rs->rs_start, 2618 rs->rs_end - rs->rs_start); 2619 return (B_FALSE); 2620 } 2621 } 2622 2623 2624 /* 2625 * This is called from the queue emptying thread and selects the next 2626 * extent from which we are to issue io's. The behavior of this function 2627 * depends on the state of the scan, the current memory consumption and 2628 * whether or not we are performing a scan shutdown. 2629 * 1) We select extents in an elevator algorithm (LBA-order) if the scan 2630 * needs to perform a checkpoint 2631 * 2) We select the largest available extent if we are up against the 2632 * memory limit. 2633 * 3) Otherwise we don't select any extents. 2634 */ 2635 static const range_seg_t * 2636 scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue) 2637 { 2638 dsl_scan_t *scn = queue->q_scn; 2639 2640 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 2641 ASSERT(scn->scn_is_sorted); 2642 2643 /* handle tunable overrides */ 2644 if (scn->scn_checkpointing || scn->scn_clearing) { 2645 if (zfs_scan_issue_strategy == 1) { 2646 return (range_tree_first(queue->q_exts_by_addr)); 2647 } else if (zfs_scan_issue_strategy == 2) { 2648 return (avl_first(&queue->q_exts_by_size)); 2649 } 2650 } 2651 2652 /* 2653 * During normal clearing, we want to issue our largest segments 2654 * first, keeping IO as sequential as possible, and leaving the 2655 * smaller extents for later with the hope that they might eventually 2656 * grow to larger sequential segments. However, when the scan is 2657 * checkpointing, no new extents will be added to the sorting queue, 2658 * so the way we are sorted now is as good as it will ever get. 2659 * In this case, we instead switch to issuing extents in LBA order. 2660 */ 2661 if (scn->scn_checkpointing) { 2662 return (range_tree_first(queue->q_exts_by_addr)); 2663 } else if (scn->scn_clearing) { 2664 return (avl_first(&queue->q_exts_by_size)); 2665 } else { 2666 return (NULL); 2667 } 2668 } 2669 2670 static void 2671 scan_io_queues_run_one(void *arg) 2672 { 2673 dsl_scan_io_queue_t *queue = arg; 2674 kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; 2675 boolean_t suspended = B_FALSE; 2676 range_seg_t *rs = NULL; 2677 scan_io_t *sio = NULL; 2678 list_t sio_list; 2679 uint64_t bytes_per_leaf = zfs_scan_vdev_limit; 2680 uint64_t nr_leaves = dsl_scan_count_leaves(queue->q_vd); 2681 2682 ASSERT(queue->q_scn->scn_is_sorted); 2683 2684 list_create(&sio_list, sizeof (scan_io_t), 2685 offsetof(scan_io_t, sio_nodes.sio_list_node)); 2686 mutex_enter(q_lock); 2687 2688 /* calculate maximum in-flight bytes for this txg (min 1MB) */ 2689 queue->q_maxinflight_bytes = 2690 MAX(nr_leaves * bytes_per_leaf, 1ULL << 20); 2691 2692 /* reset per-queue scan statistics for this txg */ 2693 queue->q_total_seg_size_this_txg = 0; 2694 queue->q_segs_this_txg = 0; 2695 queue->q_total_zio_size_this_txg = 0; 2696 queue->q_zios_this_txg = 0; 2697 2698 /* loop until we have run out of time or sios */ 2699 while ((rs = (range_seg_t *)scan_io_queue_fetch_ext(queue)) != NULL) { 2700 uint64_t seg_start = 0, seg_end = 0; 2701 boolean_t more_left = B_TRUE; 2702 2703 ASSERT(list_is_empty(&sio_list)); 2704 2705 /* loop while we still have sios left to process in this rs */ 2706 while (more_left) { 2707 scan_io_t *first_sio, *last_sio; 2708 2709 /* 2710 * We have selected which extent needs to be 2711 * processed next. Gather up the corresponding sios. 2712 */ 2713 more_left = scan_io_queue_gather(queue, rs, &sio_list); 2714 ASSERT(!list_is_empty(&sio_list)); 2715 first_sio = list_head(&sio_list); 2716 last_sio = list_tail(&sio_list); 2717 2718 seg_end = last_sio->sio_offset + last_sio->sio_asize; 2719 if (seg_start == 0) 2720 seg_start = first_sio->sio_offset; 2721 2722 /* 2723 * Issuing sios can take a long time so drop the 2724 * queue lock. The sio queue won't be updated by 2725 * other threads since we're in syncing context so 2726 * we can be sure that our trees will remain exactly 2727 * as we left them. 2728 */ 2729 mutex_exit(q_lock); 2730 suspended = scan_io_queue_issue(queue, &sio_list); 2731 mutex_enter(q_lock); 2732 2733 if (suspended) 2734 break; 2735 } 2736 /* update statistics for debugging purposes */ 2737 scan_io_queues_update_seg_stats(queue, seg_start, seg_end); 2738 2739 if (suspended) 2740 break; 2741 } 2742 2743 2744 /* 2745 * If we were suspended in the middle of processing, 2746 * requeue any unfinished sios and exit. 2747 */ 2748 while ((sio = list_head(&sio_list)) != NULL) { 2749 list_remove(&sio_list, sio); 2750 scan_io_queue_insert_impl(queue, sio); 2751 } 2752 2753 mutex_exit(q_lock); 2754 list_destroy(&sio_list); 2755 } 2756 2757 /* 2758 * Performs an emptying run on all scan queues in the pool. This just 2759 * punches out one thread per top-level vdev, each of which processes 2760 * only that vdev's scan queue. We can parallelize the I/O here because 2761 * we know that each queue's io's only affect its own top-level vdev. 2762 * 2763 * This function waits for the queue runs to complete, and must be 2764 * called from dsl_scan_sync (or in general, syncing context). 2765 */ 2766 static void 2767 scan_io_queues_run(dsl_scan_t *scn) 2768 { 2769 spa_t *spa = scn->scn_dp->dp_spa; 2770 2771 ASSERT(scn->scn_is_sorted); 2772 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 2773 2774 if (scn->scn_bytes_pending == 0) 2775 return; 2776 2777 if (scn->scn_taskq == NULL) { 2778 char *tq_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN + 16, 2779 KM_SLEEP); 2780 int nthreads = spa->spa_root_vdev->vdev_children; 2781 2782 /* 2783 * We need to make this taskq *always* execute as many 2784 * threads in parallel as we have top-level vdevs and no 2785 * less, otherwise strange serialization of the calls to 2786 * scan_io_queues_run_one can occur during spa_sync runs 2787 * and that significantly impacts performance. 2788 */ 2789 (void) snprintf(tq_name, ZFS_MAX_DATASET_NAME_LEN + 16, 2790 "dsl_scan_tq_%s", spa->spa_name); 2791 scn->scn_taskq = taskq_create(tq_name, nthreads, minclsyspri, 2792 nthreads, nthreads, TASKQ_PREPOPULATE); 2793 kmem_free(tq_name, ZFS_MAX_DATASET_NAME_LEN + 16); 2794 } 2795 2796 for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 2797 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 2798 2799 mutex_enter(&vd->vdev_scan_io_queue_lock); 2800 if (vd->vdev_scan_io_queue != NULL) { 2801 VERIFY(taskq_dispatch(scn->scn_taskq, 2802 scan_io_queues_run_one, vd->vdev_scan_io_queue, 2803 TQ_SLEEP) != TASKQID_INVALID); 2804 } 2805 mutex_exit(&vd->vdev_scan_io_queue_lock); 2806 } 2807 2808 /* 2809 * Wait for the queues to finish issuing thir IOs for this run 2810 * before we return. There may still be IOs in flight at this 2811 * point. 2812 */ 2813 taskq_wait(scn->scn_taskq); 2814 } 2815 2816 static boolean_t 2817 dsl_scan_async_block_should_pause(dsl_scan_t *scn) 2818 { 2819 uint64_t elapsed_nanosecs; 2820 2821 if (zfs_recover) 2822 return (B_FALSE); 2823 2824 if (scn->scn_visited_this_txg >= zfs_async_block_max_blocks) 2825 return (B_TRUE); 2826 2827 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 2828 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 2829 (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms && 2830 txg_sync_waiting(scn->scn_dp)) || 2831 spa_shutting_down(scn->scn_dp->dp_spa)); 2832 } 2833 2834 static int 2835 dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 2836 { 2837 dsl_scan_t *scn = arg; 2838 2839 if (!scn->scn_is_bptree || 2840 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { 2841 if (dsl_scan_async_block_should_pause(scn)) 2842 return (SET_ERROR(ERESTART)); 2843 } 2844 2845 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, 2846 dmu_tx_get_txg(tx), bp, 0)); 2847 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 2848 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), 2849 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 2850 scn->scn_visited_this_txg++; 2851 return (0); 2852 } 2853 2854 static void 2855 dsl_scan_update_stats(dsl_scan_t *scn) 2856 { 2857 spa_t *spa = scn->scn_dp->dp_spa; 2858 uint64_t i; 2859 uint64_t seg_size_total = 0, zio_size_total = 0; 2860 uint64_t seg_count_total = 0, zio_count_total = 0; 2861 2862 for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 2863 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 2864 dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue; 2865 2866 if (queue == NULL) 2867 continue; 2868 2869 seg_size_total += queue->q_total_seg_size_this_txg; 2870 zio_size_total += queue->q_total_zio_size_this_txg; 2871 seg_count_total += queue->q_segs_this_txg; 2872 zio_count_total += queue->q_zios_this_txg; 2873 } 2874 2875 if (seg_count_total == 0 || zio_count_total == 0) { 2876 scn->scn_avg_seg_size_this_txg = 0; 2877 scn->scn_avg_zio_size_this_txg = 0; 2878 scn->scn_segs_this_txg = 0; 2879 scn->scn_zios_this_txg = 0; 2880 return; 2881 } 2882 2883 scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total; 2884 scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total; 2885 scn->scn_segs_this_txg = seg_count_total; 2886 scn->scn_zios_this_txg = zio_count_total; 2887 } 2888 2889 static int 2890 dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 2891 { 2892 dsl_scan_t *scn = arg; 2893 const dva_t *dva = &bp->blk_dva[0]; 2894 2895 if (dsl_scan_async_block_should_pause(scn)) 2896 return (SET_ERROR(ERESTART)); 2897 2898 spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa, 2899 DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), 2900 DVA_GET_ASIZE(dva), tx); 2901 scn->scn_visited_this_txg++; 2902 return (0); 2903 } 2904 2905 boolean_t 2906 dsl_scan_active(dsl_scan_t *scn) 2907 { 2908 spa_t *spa = scn->scn_dp->dp_spa; 2909 uint64_t used = 0, comp, uncomp; 2910 2911 if (spa->spa_load_state != SPA_LOAD_NONE) 2912 return (B_FALSE); 2913 if (spa_shutting_down(spa)) 2914 return (B_FALSE); 2915 if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) || 2916 (scn->scn_async_destroying && !scn->scn_async_stalled)) 2917 return (B_TRUE); 2918 2919 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 2920 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, 2921 &used, &comp, &uncomp); 2922 } 2923 return (used != 0); 2924 } 2925 2926 static boolean_t 2927 dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize, 2928 uint64_t phys_birth) 2929 { 2930 vdev_t *vd; 2931 2932 vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 2933 2934 if (vd->vdev_ops == &vdev_indirect_ops) { 2935 /* 2936 * The indirect vdev can point to multiple 2937 * vdevs. For simplicity, always create 2938 * the resilver zio_t. zio_vdev_io_start() 2939 * will bypass the child resilver i/o's if 2940 * they are on vdevs that don't have DTL's. 2941 */ 2942 return (B_TRUE); 2943 } 2944 2945 if (DVA_GET_GANG(dva)) { 2946 /* 2947 * Gang members may be spread across multiple 2948 * vdevs, so the best estimate we have is the 2949 * scrub range, which has already been checked. 2950 * XXX -- it would be better to change our 2951 * allocation policy to ensure that all 2952 * gang members reside on the same vdev. 2953 */ 2954 return (B_TRUE); 2955 } 2956 2957 /* 2958 * Check if the txg falls within the range which must be 2959 * resilvered. DVAs outside this range can always be skipped. 2960 */ 2961 if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1)) 2962 return (B_FALSE); 2963 2964 /* 2965 * Check if the top-level vdev must resilver this offset. 2966 * When the offset does not intersect with a dirty leaf DTL 2967 * then it may be possible to skip the resilver IO. The psize 2968 * is provided instead of asize to simplify the check for RAIDZ. 2969 */ 2970 if (!vdev_dtl_need_resilver(vd, DVA_GET_OFFSET(dva), psize)) 2971 return (B_FALSE); 2972 2973 return (B_TRUE); 2974 } 2975 2976 static int 2977 dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx) 2978 { 2979 int err = 0; 2980 dsl_scan_t *scn = dp->dp_scan; 2981 spa_t *spa = dp->dp_spa; 2982 2983 if (spa_suspend_async_destroy(spa)) 2984 return (0); 2985 2986 if (zfs_free_bpobj_enabled && 2987 spa_version(spa) >= SPA_VERSION_DEADLISTS) { 2988 scn->scn_is_bptree = B_FALSE; 2989 scn->scn_async_block_min_time_ms = zfs_free_min_time_ms; 2990 scn->scn_zio_root = zio_root(spa, NULL, 2991 NULL, ZIO_FLAG_MUSTSUCCEED); 2992 err = bpobj_iterate(&dp->dp_free_bpobj, 2993 dsl_scan_free_block_cb, scn, tx); 2994 VERIFY0(zio_wait(scn->scn_zio_root)); 2995 scn->scn_zio_root = NULL; 2996 2997 if (err != 0 && err != ERESTART) 2998 zfs_panic_recover("error %u from bpobj_iterate()", err); 2999 } 3000 3001 if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 3002 ASSERT(scn->scn_async_destroying); 3003 scn->scn_is_bptree = B_TRUE; 3004 scn->scn_zio_root = zio_root(spa, NULL, 3005 NULL, ZIO_FLAG_MUSTSUCCEED); 3006 err = bptree_iterate(dp->dp_meta_objset, 3007 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); 3008 VERIFY0(zio_wait(scn->scn_zio_root)); 3009 scn->scn_zio_root = NULL; 3010 3011 if (err == EIO || err == ECKSUM) { 3012 err = 0; 3013 } else if (err != 0 && err != ERESTART) { 3014 zfs_panic_recover("error %u from " 3015 "traverse_dataset_destroyed()", err); 3016 } 3017 3018 if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { 3019 /* finished; deactivate async destroy feature */ 3020 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); 3021 ASSERT(!spa_feature_is_active(spa, 3022 SPA_FEATURE_ASYNC_DESTROY)); 3023 VERIFY0(zap_remove(dp->dp_meta_objset, 3024 DMU_POOL_DIRECTORY_OBJECT, 3025 DMU_POOL_BPTREE_OBJ, tx)); 3026 VERIFY0(bptree_free(dp->dp_meta_objset, 3027 dp->dp_bptree_obj, tx)); 3028 dp->dp_bptree_obj = 0; 3029 scn->scn_async_destroying = B_FALSE; 3030 scn->scn_async_stalled = B_FALSE; 3031 } else { 3032 /* 3033 * If we didn't make progress, mark the async 3034 * destroy as stalled, so that we will not initiate 3035 * a spa_sync() on its behalf. Note that we only 3036 * check this if we are not finished, because if the 3037 * bptree had no blocks for us to visit, we can 3038 * finish without "making progress". 3039 */ 3040 scn->scn_async_stalled = 3041 (scn->scn_visited_this_txg == 0); 3042 } 3043 } 3044 if (scn->scn_visited_this_txg) { 3045 zfs_dbgmsg("freed %llu blocks in %llums from " 3046 "free_bpobj/bptree txg %llu; err=%d", 3047 (longlong_t)scn->scn_visited_this_txg, 3048 (longlong_t) 3049 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), 3050 (longlong_t)tx->tx_txg, err); 3051 scn->scn_visited_this_txg = 0; 3052 3053 /* 3054 * Write out changes to the DDT that may be required as a 3055 * result of the blocks freed. This ensures that the DDT 3056 * is clean when a scrub/resilver runs. 3057 */ 3058 ddt_sync(spa, tx->tx_txg); 3059 } 3060 if (err != 0) 3061 return (err); 3062 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && 3063 zfs_free_leak_on_eio && 3064 (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 || 3065 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 || 3066 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) { 3067 /* 3068 * We have finished background destroying, but there is still 3069 * some space left in the dp_free_dir. Transfer this leaked 3070 * space to the dp_leak_dir. 3071 */ 3072 if (dp->dp_leak_dir == NULL) { 3073 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 3074 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 3075 LEAK_DIR_NAME, tx); 3076 VERIFY0(dsl_pool_open_special_dir(dp, 3077 LEAK_DIR_NAME, &dp->dp_leak_dir)); 3078 rrw_exit(&dp->dp_config_rwlock, FTAG); 3079 } 3080 dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, 3081 dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 3082 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 3083 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 3084 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, 3085 -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 3086 -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 3087 -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 3088 } 3089 3090 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying) { 3091 /* finished; verify that space accounting went to zero */ 3092 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes); 3093 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes); 3094 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes); 3095 } 3096 3097 EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj), 3098 0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 3099 DMU_POOL_OBSOLETE_BPOBJ)); 3100 if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) { 3101 ASSERT(spa_feature_is_active(dp->dp_spa, 3102 SPA_FEATURE_OBSOLETE_COUNTS)); 3103 3104 scn->scn_is_bptree = B_FALSE; 3105 scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms; 3106 err = bpobj_iterate(&dp->dp_obsolete_bpobj, 3107 dsl_scan_obsolete_block_cb, scn, tx); 3108 if (err != 0 && err != ERESTART) 3109 zfs_panic_recover("error %u from bpobj_iterate()", err); 3110 3111 if (bpobj_is_empty(&dp->dp_obsolete_bpobj)) 3112 dsl_pool_destroy_obsolete_bpobj(dp, tx); 3113 } 3114 3115 return (0); 3116 } 3117 3118 /* 3119 * This is the primary entry point for scans that is called from syncing 3120 * context. Scans must happen entirely during syncing context so that we 3121 * cna guarantee that blocks we are currently scanning will not change out 3122 * from under us. While a scan is active, this funciton controls how quickly 3123 * transaction groups proceed, instead of the normal handling provided by 3124 * txg_sync_thread(). 3125 */ 3126 void 3127 dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) 3128 { 3129 dsl_scan_t *scn = dp->dp_scan; 3130 spa_t *spa = dp->dp_spa; 3131 int err = 0; 3132 state_sync_type_t sync_type = SYNC_OPTIONAL; 3133 3134 /* 3135 * Check for scn_restart_txg before checking spa_load_state, so 3136 * that we can restart an old-style scan while the pool is being 3137 * imported (see dsl_scan_init). 3138 */ 3139 if (dsl_scan_restarting(scn, tx)) { 3140 pool_scan_func_t func = POOL_SCAN_SCRUB; 3141 dsl_scan_done(scn, B_FALSE, tx); 3142 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 3143 func = POOL_SCAN_RESILVER; 3144 zfs_dbgmsg("restarting scan func=%u txg=%llu", 3145 func, (longlong_t)tx->tx_txg); 3146 dsl_scan_setup_sync(&func, tx); 3147 } 3148 3149 /* 3150 * Only process scans in sync pass 1. 3151 */ 3152 if (spa_sync_pass(dp->dp_spa) > 1) 3153 return; 3154 3155 /* 3156 * If the spa is shutting down, then stop scanning. This will 3157 * ensure that the scan does not dirty any new data during the 3158 * shutdown phase. 3159 */ 3160 if (spa_shutting_down(spa)) 3161 return; 3162 3163 /* 3164 * If the scan is inactive due to a stalled async destroy, try again. 3165 */ 3166 if (!scn->scn_async_stalled && !dsl_scan_active(scn)) 3167 return; 3168 3169 /* reset scan statistics */ 3170 scn->scn_visited_this_txg = 0; 3171 scn->scn_holes_this_txg = 0; 3172 scn->scn_lt_min_this_txg = 0; 3173 scn->scn_gt_max_this_txg = 0; 3174 scn->scn_ddt_contained_this_txg = 0; 3175 scn->scn_objsets_visited_this_txg = 0; 3176 scn->scn_avg_seg_size_this_txg = 0; 3177 scn->scn_segs_this_txg = 0; 3178 scn->scn_avg_zio_size_this_txg = 0; 3179 scn->scn_zios_this_txg = 0; 3180 scn->scn_suspending = B_FALSE; 3181 scn->scn_sync_start_time = gethrtime(); 3182 spa->spa_scrub_active = B_TRUE; 3183 3184 /* 3185 * First process the async destroys. If we pause, don't do 3186 * any scrubbing or resilvering. This ensures that there are no 3187 * async destroys while we are scanning, so the scan code doesn't 3188 * have to worry about traversing it. It is also faster to free the 3189 * blocks than to scrub them. 3190 */ 3191 err = dsl_process_async_destroys(dp, tx); 3192 if (err != 0) 3193 return; 3194 3195 if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn)) 3196 return; 3197 3198 /* 3199 * Wait a few txgs after importing to begin scanning so that 3200 * we can get the pool imported quickly. 3201 */ 3202 if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS) 3203 return; 3204 3205 /* 3206 * It is possible to switch from unsorted to sorted at any time, 3207 * but afterwards the scan will remain sorted unless reloaded from 3208 * a checkpoint after a reboot. 3209 */ 3210 if (!zfs_scan_legacy) { 3211 scn->scn_is_sorted = B_TRUE; 3212 if (scn->scn_last_checkpoint == 0) 3213 scn->scn_last_checkpoint = ddi_get_lbolt(); 3214 } 3215 3216 /* 3217 * For sorted scans, determine what kind of work we will be doing 3218 * this txg based on our memory limitations and whether or not we 3219 * need to perform a checkpoint. 3220 */ 3221 if (scn->scn_is_sorted) { 3222 /* 3223 * If we are over our checkpoint interval, set scn_clearing 3224 * so that we can begin checkpointing immediately. The 3225 * checkpoint allows us to save a consisent bookmark 3226 * representing how much data we have scrubbed so far. 3227 * Otherwise, use the memory limit to determine if we should 3228 * scan for metadata or start issue scrub IOs. We accumulate 3229 * metadata until we hit our hard memory limit at which point 3230 * we issue scrub IOs until we are at our soft memory limit. 3231 */ 3232 if (scn->scn_checkpointing || 3233 ddi_get_lbolt() - scn->scn_last_checkpoint > 3234 SEC_TO_TICK(zfs_scan_checkpoint_intval)) { 3235 if (!scn->scn_checkpointing) 3236 zfs_dbgmsg("begin scan checkpoint"); 3237 3238 scn->scn_checkpointing = B_TRUE; 3239 scn->scn_clearing = B_TRUE; 3240 } else { 3241 boolean_t should_clear = dsl_scan_should_clear(scn); 3242 if (should_clear && !scn->scn_clearing) { 3243 zfs_dbgmsg("begin scan clearing"); 3244 scn->scn_clearing = B_TRUE; 3245 } else if (!should_clear && scn->scn_clearing) { 3246 zfs_dbgmsg("finish scan clearing"); 3247 scn->scn_clearing = B_FALSE; 3248 } 3249 } 3250 } else { 3251 ASSERT0(scn->scn_checkpointing); 3252 ASSERT0(scn->scn_clearing); 3253 } 3254 3255 if (!scn->scn_clearing && scn->scn_done_txg == 0) { 3256 /* Need to scan metadata for more blocks to scrub */ 3257 dsl_scan_phys_t *scnp = &scn->scn_phys; 3258 taskqid_t prefetch_tqid; 3259 uint64_t bytes_per_leaf = zfs_scan_vdev_limit; 3260 uint64_t nr_leaves = dsl_scan_count_leaves(spa->spa_root_vdev); 3261 3262 /* 3263 * Calculate the max number of in-flight bytes for pool-wide 3264 * scanning operations (minimum 1MB). Limits for the issuing 3265 * phase are done per top-level vdev and are handled separately. 3266 */ 3267 scn->scn_maxinflight_bytes = 3268 MAX(nr_leaves * bytes_per_leaf, 1ULL << 20); 3269 3270 if (scnp->scn_ddt_bookmark.ddb_class <= 3271 scnp->scn_ddt_class_max) { 3272 ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark)); 3273 zfs_dbgmsg("doing scan sync txg %llu; " 3274 "ddt bm=%llu/%llu/%llu/%llx", 3275 (longlong_t)tx->tx_txg, 3276 (longlong_t)scnp->scn_ddt_bookmark.ddb_class, 3277 (longlong_t)scnp->scn_ddt_bookmark.ddb_type, 3278 (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, 3279 (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); 3280 } else { 3281 zfs_dbgmsg("doing scan sync txg %llu; " 3282 "bm=%llu/%llu/%llu/%llu", 3283 (longlong_t)tx->tx_txg, 3284 (longlong_t)scnp->scn_bookmark.zb_objset, 3285 (longlong_t)scnp->scn_bookmark.zb_object, 3286 (longlong_t)scnp->scn_bookmark.zb_level, 3287 (longlong_t)scnp->scn_bookmark.zb_blkid); 3288 } 3289 3290 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 3291 NULL, ZIO_FLAG_CANFAIL); 3292 3293 scn->scn_prefetch_stop = B_FALSE; 3294 prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq, 3295 dsl_scan_prefetch_thread, scn, TQ_SLEEP); 3296 ASSERT(prefetch_tqid != TASKQID_INVALID); 3297 3298 dsl_pool_config_enter(dp, FTAG); 3299 dsl_scan_visit(scn, tx); 3300 dsl_pool_config_exit(dp, FTAG); 3301 3302 mutex_enter(&dp->dp_spa->spa_scrub_lock); 3303 scn->scn_prefetch_stop = B_TRUE; 3304 cv_broadcast(&spa->spa_scrub_io_cv); 3305 mutex_exit(&dp->dp_spa->spa_scrub_lock); 3306 3307 taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid); 3308 (void) zio_wait(scn->scn_zio_root); 3309 scn->scn_zio_root = NULL; 3310 3311 zfs_dbgmsg("scan visited %llu blocks in %llums " 3312 "(%llu os's, %llu holes, %llu < mintxg, " 3313 "%llu in ddt, %llu > maxtxg)", 3314 (longlong_t)scn->scn_visited_this_txg, 3315 (longlong_t)NSEC2MSEC(gethrtime() - 3316 scn->scn_sync_start_time), 3317 (longlong_t)scn->scn_objsets_visited_this_txg, 3318 (longlong_t)scn->scn_holes_this_txg, 3319 (longlong_t)scn->scn_lt_min_this_txg, 3320 (longlong_t)scn->scn_ddt_contained_this_txg, 3321 (longlong_t)scn->scn_gt_max_this_txg); 3322 3323 if (!scn->scn_suspending) { 3324 ASSERT0(avl_numnodes(&scn->scn_queue)); 3325 scn->scn_done_txg = tx->tx_txg + 1; 3326 if (scn->scn_is_sorted) { 3327 scn->scn_checkpointing = B_TRUE; 3328 scn->scn_clearing = B_TRUE; 3329 } 3330 zfs_dbgmsg("scan complete txg %llu", 3331 (longlong_t)tx->tx_txg); 3332 } 3333 } else if (scn->scn_is_sorted && scn->scn_bytes_pending != 0) { 3334 /* need to issue scrubbing IOs from per-vdev queues */ 3335 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 3336 NULL, ZIO_FLAG_CANFAIL); 3337 scan_io_queues_run(scn); 3338 (void) zio_wait(scn->scn_zio_root); 3339 scn->scn_zio_root = NULL; 3340 3341 /* calculate and dprintf the current memory usage */ 3342 (void) dsl_scan_should_clear(scn); 3343 dsl_scan_update_stats(scn); 3344 3345 zfs_dbgmsg("scrubbed %llu blocks (%llu segs) in %llums " 3346 "(avg_block_size = %llu, avg_seg_size = %llu)", 3347 (longlong_t)scn->scn_zios_this_txg, 3348 (longlong_t)scn->scn_segs_this_txg, 3349 (longlong_t)NSEC2MSEC(gethrtime() - 3350 scn->scn_sync_start_time), 3351 (longlong_t)scn->scn_avg_zio_size_this_txg, 3352 (longlong_t)scn->scn_avg_seg_size_this_txg); 3353 } else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) { 3354 /* Finished with everything. Mark the scrub as complete */ 3355 zfs_dbgmsg("scan issuing complete txg %llu", 3356 (longlong_t)tx->tx_txg); 3357 ASSERT3U(scn->scn_done_txg, !=, 0); 3358 ASSERT0(spa->spa_scrub_inflight); 3359 ASSERT0(scn->scn_bytes_pending); 3360 dsl_scan_done(scn, B_TRUE, tx); 3361 sync_type = SYNC_MANDATORY; 3362 } 3363 3364 dsl_scan_sync_state(scn, tx, sync_type); 3365 } 3366 3367 static void 3368 count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp) 3369 { 3370 int i; 3371 3372 /* update the spa's stats on how many bytes we have issued */ 3373 for (i = 0; i < BP_GET_NDVAS(bp); i++) { 3374 atomic_add_64(&scn->scn_dp->dp_spa->spa_scan_pass_issued, 3375 DVA_GET_ASIZE(&bp->blk_dva[i])); 3376 } 3377 3378 /* 3379 * If we resume after a reboot, zab will be NULL; don't record 3380 * incomplete stats in that case. 3381 */ 3382 if (zab == NULL) 3383 return; 3384 3385 mutex_enter(&zab->zab_lock); 3386 3387 for (i = 0; i < 4; i++) { 3388 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; 3389 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; 3390 if (t & DMU_OT_NEWTYPE) 3391 t = DMU_OT_OTHER; 3392 zfs_blkstat_t *zb = &zab->zab_type[l][t]; 3393 int equal; 3394 3395 zb->zb_count++; 3396 zb->zb_asize += BP_GET_ASIZE(bp); 3397 zb->zb_lsize += BP_GET_LSIZE(bp); 3398 zb->zb_psize += BP_GET_PSIZE(bp); 3399 zb->zb_gangs += BP_COUNT_GANG(bp); 3400 3401 switch (BP_GET_NDVAS(bp)) { 3402 case 2: 3403 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 3404 DVA_GET_VDEV(&bp->blk_dva[1])) 3405 zb->zb_ditto_2_of_2_samevdev++; 3406 break; 3407 case 3: 3408 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 3409 DVA_GET_VDEV(&bp->blk_dva[1])) + 3410 (DVA_GET_VDEV(&bp->blk_dva[0]) == 3411 DVA_GET_VDEV(&bp->blk_dva[2])) + 3412 (DVA_GET_VDEV(&bp->blk_dva[1]) == 3413 DVA_GET_VDEV(&bp->blk_dva[2])); 3414 if (equal == 1) 3415 zb->zb_ditto_2_of_3_samevdev++; 3416 else if (equal == 3) 3417 zb->zb_ditto_3_of_3_samevdev++; 3418 break; 3419 } 3420 } 3421 3422 mutex_exit(&zab->zab_lock); 3423 } 3424 3425 static void 3426 scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio) 3427 { 3428 avl_index_t idx; 3429 int64_t asize = sio->sio_asize; 3430 dsl_scan_t *scn = queue->q_scn; 3431 3432 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 3433 3434 if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) { 3435 /* block is already scheduled for reading */ 3436 atomic_add_64(&scn->scn_bytes_pending, -asize); 3437 kmem_free(sio, sizeof (*sio)); 3438 return; 3439 } 3440 avl_insert(&queue->q_sios_by_addr, sio, idx); 3441 range_tree_add(queue->q_exts_by_addr, sio->sio_offset, asize); 3442 } 3443 3444 /* 3445 * Given all the info we got from our metadata scanning process, we 3446 * construct a scan_io_t and insert it into the scan sorting queue. The 3447 * I/O must already be suitable for us to process. This is controlled 3448 * by dsl_scan_enqueue(). 3449 */ 3450 static void 3451 scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i, 3452 int zio_flags, const zbookmark_phys_t *zb) 3453 { 3454 dsl_scan_t *scn = queue->q_scn; 3455 scan_io_t *sio = kmem_zalloc(sizeof (*sio), KM_SLEEP); 3456 3457 ASSERT0(BP_IS_GANG(bp)); 3458 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 3459 3460 bp2sio(bp, sio, dva_i); 3461 sio->sio_flags = zio_flags; 3462 sio->sio_zb = *zb; 3463 3464 /* 3465 * Increment the bytes pending counter now so that we can't 3466 * get an integer underflow in case the worker processes the 3467 * zio before we get to incrementing this counter. 3468 */ 3469 atomic_add_64(&scn->scn_bytes_pending, sio->sio_asize); 3470 3471 scan_io_queue_insert_impl(queue, sio); 3472 } 3473 3474 /* 3475 * Given a set of I/O parameters as discovered by the metadata traversal 3476 * process, attempts to place the I/O into the sorted queues (if allowed), 3477 * or immediately executes the I/O. 3478 */ 3479 static void 3480 dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 3481 const zbookmark_phys_t *zb) 3482 { 3483 spa_t *spa = dp->dp_spa; 3484 3485 ASSERT(!BP_IS_EMBEDDED(bp)); 3486 3487 /* 3488 * Gang blocks are hard to issue sequentially, so we just issue them 3489 * here immediately instead of queuing them. 3490 */ 3491 if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) { 3492 scan_exec_io(dp, bp, zio_flags, zb, NULL); 3493 return; 3494 } 3495 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 3496 dva_t dva; 3497 vdev_t *vdev; 3498 3499 dva = bp->blk_dva[i]; 3500 vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva)); 3501 ASSERT(vdev != NULL); 3502 3503 mutex_enter(&vdev->vdev_scan_io_queue_lock); 3504 if (vdev->vdev_scan_io_queue == NULL) 3505 vdev->vdev_scan_io_queue = scan_io_queue_create(vdev); 3506 ASSERT(dp->dp_scan != NULL); 3507 scan_io_queue_insert(vdev->vdev_scan_io_queue, bp, 3508 i, zio_flags, zb); 3509 mutex_exit(&vdev->vdev_scan_io_queue_lock); 3510 } 3511 } 3512 3513 static int 3514 dsl_scan_scrub_cb(dsl_pool_t *dp, 3515 const blkptr_t *bp, const zbookmark_phys_t *zb) 3516 { 3517 dsl_scan_t *scn = dp->dp_scan; 3518 spa_t *spa = dp->dp_spa; 3519 uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); 3520 size_t psize = BP_GET_PSIZE(bp); 3521 boolean_t needs_io; 3522 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; 3523 int d; 3524 3525 if (phys_birth <= scn->scn_phys.scn_min_txg || 3526 phys_birth >= scn->scn_phys.scn_max_txg) { 3527 count_block(scn, dp->dp_blkstats, bp); 3528 return (0); 3529 } 3530 3531 /* Embedded BP's have phys_birth==0, so we reject them above. */ 3532 ASSERT(!BP_IS_EMBEDDED(bp)); 3533 3534 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); 3535 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { 3536 zio_flags |= ZIO_FLAG_SCRUB; 3537 needs_io = B_TRUE; 3538 } else { 3539 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); 3540 zio_flags |= ZIO_FLAG_RESILVER; 3541 needs_io = B_FALSE; 3542 } 3543 3544 /* If it's an intent log block, failure is expected. */ 3545 if (zb->zb_level == ZB_ZIL_LEVEL) 3546 zio_flags |= ZIO_FLAG_SPECULATIVE; 3547 3548 for (d = 0; d < BP_GET_NDVAS(bp); d++) { 3549 const dva_t *dva = &bp->blk_dva[d]; 3550 3551 /* 3552 * Keep track of how much data we've examined so that 3553 * zpool(1M) status can make useful progress reports. 3554 */ 3555 scn->scn_phys.scn_examined += DVA_GET_ASIZE(dva); 3556 spa->spa_scan_pass_exam += DVA_GET_ASIZE(dva); 3557 3558 /* if it's a resilver, this may not be in the target range */ 3559 if (!needs_io) 3560 needs_io = dsl_scan_need_resilver(spa, dva, psize, 3561 phys_birth); 3562 } 3563 3564 if (needs_io && !zfs_no_scrub_io) { 3565 dsl_scan_enqueue(dp, bp, zio_flags, zb); 3566 } else { 3567 count_block(scn, dp->dp_blkstats, bp); 3568 } 3569 3570 /* do not relocate this block */ 3571 return (0); 3572 } 3573 3574 static void 3575 dsl_scan_scrub_done(zio_t *zio) 3576 { 3577 spa_t *spa = zio->io_spa; 3578 blkptr_t *bp = zio->io_bp; 3579 dsl_scan_io_queue_t *queue = zio->io_private; 3580 3581 abd_free(zio->io_abd); 3582 3583 if (queue == NULL) { 3584 mutex_enter(&spa->spa_scrub_lock); 3585 ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); 3586 spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); 3587 cv_broadcast(&spa->spa_scrub_io_cv); 3588 mutex_exit(&spa->spa_scrub_lock); 3589 } else { 3590 mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock); 3591 ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp)); 3592 queue->q_inflight_bytes -= BP_GET_PSIZE(bp); 3593 cv_broadcast(&queue->q_zio_cv); 3594 mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock); 3595 } 3596 3597 if (zio->io_error && (zio->io_error != ECKSUM || 3598 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { 3599 atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors); 3600 } 3601 } 3602 3603 /* 3604 * Given a scanning zio's information, executes the zio. The zio need 3605 * not necessarily be only sortable, this function simply executes the 3606 * zio, no matter what it is. The optional queue argument allows the 3607 * caller to specify that they want per top level vdev IO rate limiting 3608 * instead of the legacy global limiting. 3609 */ 3610 static void 3611 scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 3612 const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue) 3613 { 3614 spa_t *spa = dp->dp_spa; 3615 dsl_scan_t *scn = dp->dp_scan; 3616 size_t size = BP_GET_PSIZE(bp); 3617 abd_t *data = abd_alloc_for_io(size, B_FALSE); 3618 3619 if (queue == NULL) { 3620 mutex_enter(&spa->spa_scrub_lock); 3621 while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes) 3622 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 3623 spa->spa_scrub_inflight += BP_GET_PSIZE(bp); 3624 mutex_exit(&spa->spa_scrub_lock); 3625 } else { 3626 kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; 3627 3628 mutex_enter(q_lock); 3629 while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes) 3630 cv_wait(&queue->q_zio_cv, q_lock); 3631 queue->q_inflight_bytes += BP_GET_PSIZE(bp); 3632 mutex_exit(q_lock); 3633 } 3634 3635 count_block(dp->dp_scan, dp->dp_blkstats, bp); 3636 zio_nowait(zio_read(dp->dp_scan->scn_zio_root, spa, bp, data, size, 3637 dsl_scan_scrub_done, queue, ZIO_PRIORITY_SCRUB, zio_flags, zb)); 3638 } 3639 3640 /* 3641 * This is the primary extent sorting algorithm. We balance two parameters: 3642 * 1) how many bytes of I/O are in an extent 3643 * 2) how well the extent is filled with I/O (as a fraction of its total size) 3644 * Since we allow extents to have gaps between their constituent I/Os, it's 3645 * possible to have a fairly large extent that contains the same amount of 3646 * I/O bytes than a much smaller extent, which just packs the I/O more tightly. 3647 * The algorithm sorts based on a score calculated from the extent's size, 3648 * the relative fill volume (in %) and a "fill weight" parameter that controls 3649 * the split between whether we prefer larger extents or more well populated 3650 * extents: 3651 * 3652 * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT) 3653 * 3654 * Example: 3655 * 1) assume extsz = 64 MiB 3656 * 2) assume fill = 32 MiB (extent is half full) 3657 * 3) assume fill_weight = 3 3658 * 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100 3659 * SCORE = 32M + (50 * 3 * 32M) / 100 3660 * SCORE = 32M + (4800M / 100) 3661 * SCORE = 32M + 48M 3662 * ^ ^ 3663 * | +--- final total relative fill-based score 3664 * +--------- final total fill-based score 3665 * SCORE = 80M 3666 * 3667 * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards 3668 * extents that are more completely filled (in a 3:2 ratio) vs just larger. 3669 * Note that as an optimization, we replace multiplication and division by 3670 * 100 with bitshifting by 7 (which effecitvely multiplies and divides by 128). 3671 */ 3672 static int 3673 ext_size_compare(const void *x, const void *y) 3674 { 3675 const range_seg_t *rsa = x, *rsb = y; 3676 uint64_t sa = rsa->rs_end - rsa->rs_start, 3677 sb = rsb->rs_end - rsb->rs_start; 3678 uint64_t score_a, score_b; 3679 3680 score_a = rsa->rs_fill + ((((rsa->rs_fill << 7) / sa) * 3681 fill_weight * rsa->rs_fill) >> 7); 3682 score_b = rsb->rs_fill + ((((rsb->rs_fill << 7) / sb) * 3683 fill_weight * rsb->rs_fill) >> 7); 3684 3685 if (score_a > score_b) 3686 return (-1); 3687 if (score_a == score_b) { 3688 if (rsa->rs_start < rsb->rs_start) 3689 return (-1); 3690 if (rsa->rs_start == rsb->rs_start) 3691 return (0); 3692 return (1); 3693 } 3694 return (1); 3695 } 3696 3697 /* 3698 * Comparator for the q_sios_by_addr tree. Sorting is simply performed 3699 * based on LBA-order (from lowest to highest). 3700 */ 3701 static int 3702 io_addr_compare(const void *x, const void *y) 3703 { 3704 const scan_io_t *a = x, *b = y; 3705 3706 if (a->sio_offset < b->sio_offset) 3707 return (-1); 3708 if (a->sio_offset == b->sio_offset) 3709 return (0); 3710 return (1); 3711 } 3712 3713 /* IO queues are created on demand when they are needed. */ 3714 static dsl_scan_io_queue_t * 3715 scan_io_queue_create(vdev_t *vd) 3716 { 3717 dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan; 3718 dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP); 3719 3720 q->q_scn = scn; 3721 q->q_vd = vd; 3722 cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL); 3723 q->q_exts_by_addr = range_tree_create_impl(&rt_avl_ops, 3724 &q->q_exts_by_size, ext_size_compare, zfs_scan_max_ext_gap); 3725 avl_create(&q->q_sios_by_addr, io_addr_compare, 3726 sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node)); 3727 3728 return (q); 3729 } 3730 3731 /* 3732 * Destroys a scan queue and all segments and scan_io_t's contained in it. 3733 * No further execution of I/O occurs, anything pending in the queue is 3734 * simply freed without being executed. 3735 */ 3736 void 3737 dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue) 3738 { 3739 dsl_scan_t *scn = queue->q_scn; 3740 scan_io_t *sio; 3741 void *cookie = NULL; 3742 int64_t bytes_dequeued = 0; 3743 3744 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 3745 3746 while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) != 3747 NULL) { 3748 ASSERT(range_tree_contains(queue->q_exts_by_addr, 3749 sio->sio_offset, sio->sio_asize)); 3750 bytes_dequeued += sio->sio_asize; 3751 kmem_free(sio, sizeof (*sio)); 3752 } 3753 3754 atomic_add_64(&scn->scn_bytes_pending, -bytes_dequeued); 3755 range_tree_vacate(queue->q_exts_by_addr, NULL, queue); 3756 range_tree_destroy(queue->q_exts_by_addr); 3757 avl_destroy(&queue->q_sios_by_addr); 3758 cv_destroy(&queue->q_zio_cv); 3759 3760 kmem_free(queue, sizeof (*queue)); 3761 } 3762 3763 /* 3764 * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is 3765 * called on behalf of vdev_top_transfer when creating or destroying 3766 * a mirror vdev due to zpool attach/detach. 3767 */ 3768 void 3769 dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd) 3770 { 3771 mutex_enter(&svd->vdev_scan_io_queue_lock); 3772 mutex_enter(&tvd->vdev_scan_io_queue_lock); 3773 3774 VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL); 3775 tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue; 3776 svd->vdev_scan_io_queue = NULL; 3777 if (tvd->vdev_scan_io_queue != NULL) 3778 tvd->vdev_scan_io_queue->q_vd = tvd; 3779 3780 mutex_exit(&tvd->vdev_scan_io_queue_lock); 3781 mutex_exit(&svd->vdev_scan_io_queue_lock); 3782 } 3783 3784 static void 3785 scan_io_queues_destroy(dsl_scan_t *scn) 3786 { 3787 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; 3788 3789 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 3790 vdev_t *tvd = rvd->vdev_child[i]; 3791 3792 mutex_enter(&tvd->vdev_scan_io_queue_lock); 3793 if (tvd->vdev_scan_io_queue != NULL) 3794 dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue); 3795 tvd->vdev_scan_io_queue = NULL; 3796 mutex_exit(&tvd->vdev_scan_io_queue_lock); 3797 } 3798 } 3799 3800 static void 3801 dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i) 3802 { 3803 dsl_pool_t *dp = spa->spa_dsl_pool; 3804 dsl_scan_t *scn = dp->dp_scan; 3805 vdev_t *vdev; 3806 kmutex_t *q_lock; 3807 dsl_scan_io_queue_t *queue; 3808 scan_io_t srch, *sio; 3809 avl_index_t idx; 3810 uint64_t start, size; 3811 3812 vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i])); 3813 ASSERT(vdev != NULL); 3814 q_lock = &vdev->vdev_scan_io_queue_lock; 3815 queue = vdev->vdev_scan_io_queue; 3816 3817 mutex_enter(q_lock); 3818 if (queue == NULL) { 3819 mutex_exit(q_lock); 3820 return; 3821 } 3822 3823 bp2sio(bp, &srch, dva_i); 3824 start = srch.sio_offset; 3825 size = srch.sio_asize; 3826 3827 /* 3828 * We can find the zio in two states: 3829 * 1) Cold, just sitting in the queue of zio's to be issued at 3830 * some point in the future. In this case, all we do is 3831 * remove the zio from the q_sios_by_addr tree, decrement 3832 * its data volume from the containing range_seg_t and 3833 * resort the q_exts_by_size tree to reflect that the 3834 * range_seg_t has lost some of its 'fill'. We don't shorten 3835 * the range_seg_t - this is usually rare enough not to be 3836 * worth the extra hassle of trying keep track of precise 3837 * extent boundaries. 3838 * 2) Hot, where the zio is currently in-flight in 3839 * dsl_scan_issue_ios. In this case, we can't simply 3840 * reach in and stop the in-flight zio's, so we instead 3841 * block the caller. Eventually, dsl_scan_issue_ios will 3842 * be done with issuing the zio's it gathered and will 3843 * signal us. 3844 */ 3845 sio = avl_find(&queue->q_sios_by_addr, &srch, &idx); 3846 if (sio != NULL) { 3847 int64_t asize = sio->sio_asize; 3848 blkptr_t tmpbp; 3849 3850 /* Got it while it was cold in the queue */ 3851 ASSERT3U(start, ==, sio->sio_offset); 3852 ASSERT3U(size, ==, asize); 3853 avl_remove(&queue->q_sios_by_addr, sio); 3854 3855 ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size)); 3856 range_tree_remove_fill(queue->q_exts_by_addr, start, size); 3857 3858 /* 3859 * We only update scn_bytes_pending in the cold path, 3860 * otherwise it will already have been accounted for as 3861 * part of the zio's execution. 3862 */ 3863 atomic_add_64(&scn->scn_bytes_pending, -asize); 3864 3865 /* count the block as though we issued it */ 3866 sio2bp(sio, &tmpbp, dva_i); 3867 count_block(scn, dp->dp_blkstats, &tmpbp); 3868 3869 kmem_free(sio, sizeof (*sio)); 3870 } 3871 mutex_exit(q_lock); 3872 } 3873 3874 /* 3875 * Callback invoked when a zio_free() zio is executing. This needs to be 3876 * intercepted to prevent the zio from deallocating a particular portion 3877 * of disk space and it then getting reallocated and written to, while we 3878 * still have it queued up for processing. 3879 */ 3880 void 3881 dsl_scan_freed(spa_t *spa, const blkptr_t *bp) 3882 { 3883 dsl_pool_t *dp = spa->spa_dsl_pool; 3884 dsl_scan_t *scn = dp->dp_scan; 3885 3886 ASSERT(!BP_IS_EMBEDDED(bp)); 3887 ASSERT(scn != NULL); 3888 if (!dsl_scan_is_running(scn)) 3889 return; 3890 3891 for (int i = 0; i < BP_GET_NDVAS(bp); i++) 3892 dsl_scan_freed_dva(spa, bp, i); 3893 } 3894