xref: /illumos-gate/usr/src/uts/common/fs/zfs/dsl_scan.c (revision ee2f9ca4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
24  * Copyright 2016 Gary Mills
25  * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
26  * Copyright 2017 Joyent, Inc.
27  * Copyright (c) 2017 Datto Inc.
28  */
29 
30 #include <sys/dsl_scan.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_prop.h>
34 #include <sys/dsl_dir.h>
35 #include <sys/dsl_synctask.h>
36 #include <sys/dnode.h>
37 #include <sys/dmu_tx.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/arc.h>
40 #include <sys/zap.h>
41 #include <sys/zio.h>
42 #include <sys/zfs_context.h>
43 #include <sys/fs/zfs.h>
44 #include <sys/zfs_znode.h>
45 #include <sys/spa_impl.h>
46 #include <sys/vdev_impl.h>
47 #include <sys/zil_impl.h>
48 #include <sys/zio_checksum.h>
49 #include <sys/ddt.h>
50 #include <sys/sa.h>
51 #include <sys/sa_impl.h>
52 #include <sys/zfeature.h>
53 #include <sys/abd.h>
54 #include <sys/range_tree.h>
55 #ifdef _KERNEL
56 #include <sys/zfs_vfsops.h>
57 #endif
58 
59 /*
60  * Grand theory statement on scan queue sorting
61  *
62  * Scanning is implemented by recursively traversing all indirection levels
63  * in an object and reading all blocks referenced from said objects. This
64  * results in us approximately traversing the object from lowest logical
65  * offset to the highest. For best performance, we would want the logical
66  * blocks to be physically contiguous. However, this is frequently not the
67  * case with pools given the allocation patterns of copy-on-write filesystems.
68  * So instead, we put the I/Os into a reordering queue and issue them in a
69  * way that will most benefit physical disks (LBA-order).
70  *
71  * Queue management:
72  *
73  * Ideally, we would want to scan all metadata and queue up all block I/O
74  * prior to starting to issue it, because that allows us to do an optimal
75  * sorting job. This can however consume large amounts of memory. Therefore
76  * we continuously monitor the size of the queues and constrain them to 5%
77  * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this
78  * limit, we clear out a few of the largest extents at the head of the queues
79  * to make room for more scanning. Hopefully, these extents will be fairly
80  * large and contiguous, allowing us to approach sequential I/O throughput
81  * even without a fully sorted tree.
82  *
83  * Metadata scanning takes place in dsl_scan_visit(), which is called from
84  * dsl_scan_sync() every spa_sync(). If we have either fully scanned all
85  * metadata on the pool, or we need to make room in memory because our
86  * queues are too large, dsl_scan_visit() is postponed and
87  * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies
88  * that metadata scanning and queued I/O issuing are mutually exclusive. This
89  * allows us to provide maximum sequential I/O throughput for the majority of
90  * I/O's issued since sequential I/O performance is significantly negatively
91  * impacted if it is interleaved with random I/O.
92  *
93  * Implementation Notes
94  *
95  * One side effect of the queued scanning algorithm is that the scanning code
96  * needs to be notified whenever a block is freed. This is needed to allow
97  * the scanning code to remove these I/Os from the issuing queue. Additionally,
98  * we do not attempt to queue gang blocks to be issued sequentially since this
99  * is very hard to do and would have an extremely limited performance benefit.
100  * Instead, we simply issue gang I/Os as soon as we find them using the legacy
101  * algorithm.
102  *
103  * Backwards compatibility
104  *
105  * This new algorithm is backwards compatible with the legacy on-disk data
106  * structures (and therefore does not require a new feature flag).
107  * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan
108  * will stop scanning metadata (in logical order) and wait for all outstanding
109  * sorted I/O to complete. Once this is done, we write out a checkpoint
110  * bookmark, indicating that we have scanned everything logically before it.
111  * If the pool is imported on a machine without the new sorting algorithm,
112  * the scan simply resumes from the last checkpoint using the legacy algorithm.
113  */
114 
115 typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *,
116     const zbookmark_phys_t *);
117 
118 static scan_cb_t dsl_scan_scrub_cb;
119 
120 static int scan_ds_queue_compare(const void *a, const void *b);
121 static int scan_prefetch_queue_compare(const void *a, const void *b);
122 static void scan_ds_queue_clear(dsl_scan_t *scn);
123 static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj,
124     uint64_t *txg);
125 static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg);
126 static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj);
127 static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
128 
129 extern int zfs_vdev_async_write_active_min_dirty_percent;
130 
131 /*
132  * By default zfs will check to ensure it is not over the hard memory
133  * limit before each txg. If finer-grained control of this is needed
134  * this value can be set to 1 to enable checking before scanning each
135  * block.
136  */
137 int zfs_scan_strict_mem_lim = B_FALSE;
138 
139 /*
140  * Maximum number of parallelly executing I/Os per top-level vdev.
141  * Tune with care. Very high settings (hundreds) are known to trigger
142  * some firmware bugs and resets on certain SSDs.
143  */
144 int zfs_top_maxinflight = 32;		/* maximum I/Os per top-level */
145 unsigned int zfs_resilver_delay = 2;	/* number of ticks to delay resilver */
146 unsigned int zfs_scrub_delay = 4;	/* number of ticks to delay scrub */
147 unsigned int zfs_scan_idle = 50;	/* idle window in clock ticks */
148 
149 /*
150  * Maximum number of parallelly executed bytes per leaf vdev. We attempt
151  * to strike a balance here between keeping the vdev queues full of I/Os
152  * at all times and not overflowing the queues to cause long latency,
153  * which would cause long txg sync times. No matter what, we will not
154  * overload the drives with I/O, since that is protected by
155  * zfs_vdev_scrub_max_active.
156  */
157 unsigned long zfs_scan_vdev_limit = 4 << 20;
158 
159 int zfs_scan_issue_strategy = 0;
160 int zfs_scan_legacy = B_FALSE;	/* don't queue & sort zios, go direct */
161 uint64_t zfs_scan_max_ext_gap = 2 << 20;	/* in bytes */
162 
163 unsigned int zfs_scan_checkpoint_intval = 7200;	/* seconds */
164 #define	ZFS_SCAN_CHECKPOINT_INTVAL	SEC_TO_TICK(zfs_scan_checkpoint_intval)
165 
166 /*
167  * fill_weight is non-tunable at runtime, so we copy it at module init from
168  * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
169  * break queue sorting.
170  */
171 uint64_t zfs_scan_fill_weight = 3;
172 static uint64_t fill_weight;
173 
174 /* See dsl_scan_should_clear() for details on the memory limit tunables */
175 uint64_t zfs_scan_mem_lim_min = 16 << 20;	/* bytes */
176 uint64_t zfs_scan_mem_lim_soft_max = 128 << 20;	/* bytes */
177 int zfs_scan_mem_lim_fact = 20;		/* fraction of physmem */
178 int zfs_scan_mem_lim_soft_fact = 20;	/* fraction of mem lim above */
179 
180 unsigned int zfs_scrub_min_time_ms = 1000; /* min millisecs to scrub per txg */
181 unsigned int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */
182 /* min millisecs to obsolete per txg */
183 unsigned int zfs_obsolete_min_time_ms = 500;
184 /* min millisecs to resilver per txg */
185 unsigned int zfs_resilver_min_time_ms = 3000;
186 boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
187 boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
188 enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
189 /* max number of blocks to free in a single TXG */
190 uint64_t zfs_async_block_max_blocks = UINT64_MAX;
191 
192 /*
193  * We wait a few txgs after importing a pool to begin scanning so that
194  * the import / mounting code isn't held up by scrub / resilver IO.
195  * Unfortunately, it is a bit difficult to determine exactly how long
196  * this will take since userspace will trigger fs mounts asynchronously
197  * and the kernel will create zvol minors asynchronously. As a result,
198  * the value provided here is a bit arbitrary, but represents a
199  * reasonable estimate of how many txgs it will take to finish fully
200  * importing a pool
201  */
202 #define	SCAN_IMPORT_WAIT_TXGS		5
203 
204 
205 #define	DSL_SCAN_IS_SCRUB_RESILVER(scn) \
206 	((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
207 	(scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
208 
209 extern int zfs_txg_timeout;
210 
211 /*
212  * Enable/disable the processing of the free_bpobj object.
213  */
214 boolean_t zfs_free_bpobj_enabled = B_TRUE;
215 
216 /* the order has to match pool_scan_type */
217 static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
218 	NULL,
219 	dsl_scan_scrub_cb,	/* POOL_SCAN_SCRUB */
220 	dsl_scan_scrub_cb,	/* POOL_SCAN_RESILVER */
221 };
222 
223 /* In core node for the scn->scn_queue. Represents a dataset to be scanned */
224 typedef struct {
225 	uint64_t	sds_dsobj;
226 	uint64_t	sds_txg;
227 	avl_node_t	sds_node;
228 } scan_ds_t;
229 
230 /*
231  * This controls what conditions are placed on dsl_scan_sync_state():
232  * SYNC_OPTIONAL) write out scn_phys iff scn_bytes_pending == 0
233  * SYNC_MANDATORY) write out scn_phys always. scn_bytes_pending must be 0.
234  * SYNC_CACHED) if scn_bytes_pending == 0, write out scn_phys. Otherwise
235  *	write out the scn_phys_cached version.
236  * See dsl_scan_sync_state for details.
237  */
238 typedef enum {
239 	SYNC_OPTIONAL,
240 	SYNC_MANDATORY,
241 	SYNC_CACHED
242 } state_sync_type_t;
243 
244 /*
245  * This struct represents the minimum information needed to reconstruct a
246  * zio for sequential scanning. This is useful because many of these will
247  * accumulate in the sequential IO queues before being issued, so saving
248  * memory matters here.
249  */
250 typedef struct scan_io {
251 	/* fields from blkptr_t */
252 	uint64_t		sio_blk_prop;
253 	uint64_t		sio_phys_birth;
254 	uint64_t		sio_birth;
255 	zio_cksum_t		sio_cksum;
256 	uint32_t		sio_nr_dvas;
257 
258 	/* fields from zio_t */
259 	uint32_t		sio_flags;
260 	zbookmark_phys_t	sio_zb;
261 
262 	/* members for queue sorting */
263 	union {
264 		avl_node_t	sio_addr_node; /* link into issuing queue */
265 		list_node_t	sio_list_node; /* link for issuing to disk */
266 	} sio_nodes;
267 
268 	/*
269 	 * There may be up to SPA_DVAS_PER_BP DVAs here from the bp,
270 	 * depending on how many were in the original bp. Only the
271 	 * first DVA is really used for sorting and issuing purposes.
272 	 * The other DVAs (if provided) simply exist so that the zio
273 	 * layer can find additional copies to repair from in the
274 	 * event of an error. This array must go at the end of the
275 	 * struct to allow this for the variable number of elements.
276 	 */
277 	dva_t			sio_dva[0];
278 } scan_io_t;
279 
280 #define	SIO_SET_OFFSET(sio, x)		DVA_SET_OFFSET(&(sio)->sio_dva[0], x)
281 #define	SIO_SET_ASIZE(sio, x)		DVA_SET_ASIZE(&(sio)->sio_dva[0], x)
282 #define	SIO_GET_OFFSET(sio)		DVA_GET_OFFSET(&(sio)->sio_dva[0])
283 #define	SIO_GET_ASIZE(sio)		DVA_GET_ASIZE(&(sio)->sio_dva[0])
284 #define	SIO_GET_END_OFFSET(sio)		\
285 	(SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio))
286 #define	SIO_GET_MUSED(sio)		\
287 	(sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t)))
288 
289 struct dsl_scan_io_queue {
290 	dsl_scan_t	*q_scn; /* associated dsl_scan_t */
291 	vdev_t		*q_vd; /* top-level vdev that this queue represents */
292 
293 	/* trees used for sorting I/Os and extents of I/Os */
294 	range_tree_t	*q_exts_by_addr;
295 	avl_tree_t	q_exts_by_size;
296 	avl_tree_t	q_sios_by_addr;
297 	uint64_t	q_sio_memused;
298 
299 	/* members for zio rate limiting */
300 	uint64_t	q_maxinflight_bytes;
301 	uint64_t	q_inflight_bytes;
302 	kcondvar_t	q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */
303 
304 	/* per txg statistics */
305 	uint64_t	q_total_seg_size_this_txg;
306 	uint64_t	q_segs_this_txg;
307 	uint64_t	q_total_zio_size_this_txg;
308 	uint64_t	q_zios_this_txg;
309 };
310 
311 /* private data for dsl_scan_prefetch_cb() */
312 typedef struct scan_prefetch_ctx {
313 	zfs_refcount_t spc_refcnt;	/* refcount for memory management */
314 	dsl_scan_t *spc_scn;		/* dsl_scan_t for the pool */
315 	boolean_t spc_root;		/* is this prefetch for an objset? */
316 	uint8_t spc_indblkshift;	/* dn_indblkshift of current dnode */
317 	uint16_t spc_datablkszsec;	/* dn_idatablkszsec of current dnode */
318 } scan_prefetch_ctx_t;
319 
320 /* private data for dsl_scan_prefetch() */
321 typedef struct scan_prefetch_issue_ctx {
322 	avl_node_t spic_avl_node;	/* link into scn->scn_prefetch_queue */
323 	scan_prefetch_ctx_t *spic_spc;	/* spc for the callback */
324 	blkptr_t spic_bp;		/* bp to prefetch */
325 	zbookmark_phys_t spic_zb;	/* bookmark to prefetch */
326 } scan_prefetch_issue_ctx_t;
327 
328 static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
329     const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue);
330 static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue,
331     scan_io_t *sio);
332 
333 static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd);
334 static void scan_io_queues_destroy(dsl_scan_t *scn);
335 
336 static kmem_cache_t *sio_cache[SPA_DVAS_PER_BP];
337 
338 /* sio->sio_nr_dvas must be set so we know which cache to free from */
339 static void
340 sio_free(scan_io_t *sio)
341 {
342 	ASSERT3U(sio->sio_nr_dvas, >, 0);
343 	ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
344 
345 	kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio);
346 }
347 
348 /* It is up to the caller to set sio->sio_nr_dvas for freeing */
349 static scan_io_t *
350 sio_alloc(unsigned short nr_dvas)
351 {
352 	ASSERT3U(nr_dvas, >, 0);
353 	ASSERT3U(nr_dvas, <=, SPA_DVAS_PER_BP);
354 
355 	return (kmem_cache_alloc(sio_cache[nr_dvas - 1], KM_SLEEP));
356 }
357 
358 void
359 scan_init(void)
360 {
361 	/*
362 	 * This is used in ext_size_compare() to weight segments
363 	 * based on how sparse they are. This cannot be changed
364 	 * mid-scan and the tree comparison functions don't currently
365 	 * have a mechansim for passing additional context to the
366 	 * compare functions. Thus we store this value globally and
367 	 * we only allow it to be set at module intiailization time
368 	 */
369 	fill_weight = zfs_scan_fill_weight;
370 
371 	for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
372 		char name[36];
373 
374 		(void) sprintf(name, "sio_cache_%d", i);
375 		sio_cache[i] = kmem_cache_create(name,
376 		    (sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))),
377 		    0, NULL, NULL, NULL, NULL, NULL, 0);
378 	}
379 }
380 
381 void
382 scan_fini(void)
383 {
384 	for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
385 		kmem_cache_destroy(sio_cache[i]);
386 	}
387 }
388 
389 static inline boolean_t
390 dsl_scan_is_running(const dsl_scan_t *scn)
391 {
392 	return (scn->scn_phys.scn_state == DSS_SCANNING);
393 }
394 
395 boolean_t
396 dsl_scan_resilvering(dsl_pool_t *dp)
397 {
398 	return (dsl_scan_is_running(dp->dp_scan) &&
399 	    dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER);
400 }
401 
402 static inline void
403 sio2bp(const scan_io_t *sio, blkptr_t *bp)
404 {
405 	bzero(bp, sizeof (*bp));
406 	bp->blk_prop = sio->sio_blk_prop;
407 	bp->blk_phys_birth = sio->sio_phys_birth;
408 	bp->blk_birth = sio->sio_birth;
409 	bp->blk_fill = 1;	/* we always only work with data pointers */
410 	bp->blk_cksum = sio->sio_cksum;
411 
412 	ASSERT3U(sio->sio_nr_dvas, >, 0);
413 	ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
414 
415 	bcopy(sio->sio_dva, bp->blk_dva, sio->sio_nr_dvas * sizeof (dva_t));
416 }
417 
418 static inline void
419 bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i)
420 {
421 	sio->sio_blk_prop = bp->blk_prop;
422 	sio->sio_phys_birth = bp->blk_phys_birth;
423 	sio->sio_birth = bp->blk_birth;
424 	sio->sio_cksum = bp->blk_cksum;
425 	sio->sio_nr_dvas = BP_GET_NDVAS(bp);
426 
427 	/*
428 	 * Copy the DVAs to the sio. We need all copies of the block so
429 	 * that the self healing code can use the alternate copies if the
430 	 * first is corrupted. We want the DVA at index dva_i to be first
431 	 * in the sio since this is the primary one that we want to issue.
432 	 */
433 	for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) {
434 		sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas];
435 	}
436 }
437 
438 int
439 dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
440 {
441 	int err;
442 	dsl_scan_t *scn;
443 	spa_t *spa = dp->dp_spa;
444 	uint64_t f;
445 
446 	scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
447 	scn->scn_dp = dp;
448 
449 	/*
450 	 * It's possible that we're resuming a scan after a reboot so
451 	 * make sure that the scan_async_destroying flag is initialized
452 	 * appropriately.
453 	 */
454 	ASSERT(!scn->scn_async_destroying);
455 	scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
456 	    SPA_FEATURE_ASYNC_DESTROY);
457 
458 	bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
459 	avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t),
460 	    offsetof(scan_ds_t, sds_node));
461 	avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare,
462 	    sizeof (scan_prefetch_issue_ctx_t),
463 	    offsetof(scan_prefetch_issue_ctx_t, spic_avl_node));
464 
465 	err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
466 	    "scrub_func", sizeof (uint64_t), 1, &f);
467 	if (err == 0) {
468 		/*
469 		 * There was an old-style scrub in progress.  Restart a
470 		 * new-style scrub from the beginning.
471 		 */
472 		scn->scn_restart_txg = txg;
473 		zfs_dbgmsg("old-style scrub was in progress; "
474 		    "restarting new-style scrub in txg %llu",
475 		    (longlong_t)scn->scn_restart_txg);
476 
477 		/*
478 		 * Load the queue obj from the old location so that it
479 		 * can be freed by dsl_scan_done().
480 		 */
481 		(void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
482 		    "scrub_queue", sizeof (uint64_t), 1,
483 		    &scn->scn_phys.scn_queue_obj);
484 	} else {
485 		err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
486 		    DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
487 		    &scn->scn_phys);
488 		if (err == ENOENT)
489 			return (0);
490 		else if (err)
491 			return (err);
492 
493 		/*
494 		 * We might be restarting after a reboot, so jump the issued
495 		 * counter to how far we've scanned. We know we're consistent
496 		 * up to here.
497 		 */
498 		scn->scn_issued_before_pass = scn->scn_phys.scn_examined;
499 
500 		if (dsl_scan_is_running(scn) &&
501 		    spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) {
502 			/*
503 			 * A new-type scrub was in progress on an old
504 			 * pool, and the pool was accessed by old
505 			 * software.  Restart from the beginning, since
506 			 * the old software may have changed the pool in
507 			 * the meantime.
508 			 */
509 			scn->scn_restart_txg = txg;
510 			zfs_dbgmsg("new-style scrub was modified "
511 			    "by old software; restarting in txg %llu",
512 			    (longlong_t)scn->scn_restart_txg);
513 		}
514 	}
515 
516 	/* reload the queue into the in-core state */
517 	if (scn->scn_phys.scn_queue_obj != 0) {
518 		zap_cursor_t zc;
519 		zap_attribute_t za;
520 
521 		for (zap_cursor_init(&zc, dp->dp_meta_objset,
522 		    scn->scn_phys.scn_queue_obj);
523 		    zap_cursor_retrieve(&zc, &za) == 0;
524 		    (void) zap_cursor_advance(&zc)) {
525 			scan_ds_queue_insert(scn,
526 			    zfs_strtonum(za.za_name, NULL),
527 			    za.za_first_integer);
528 		}
529 		zap_cursor_fini(&zc);
530 	}
531 
532 	spa_scan_stat_init(spa);
533 	return (0);
534 }
535 
536 void
537 dsl_scan_fini(dsl_pool_t *dp)
538 {
539 	if (dp->dp_scan != NULL) {
540 		dsl_scan_t *scn = dp->dp_scan;
541 
542 		if (scn->scn_taskq != NULL)
543 			taskq_destroy(scn->scn_taskq);
544 		scan_ds_queue_clear(scn);
545 		avl_destroy(&scn->scn_queue);
546 		avl_destroy(&scn->scn_prefetch_queue);
547 
548 		kmem_free(dp->dp_scan, sizeof (dsl_scan_t));
549 		dp->dp_scan = NULL;
550 	}
551 }
552 
553 static boolean_t
554 dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx)
555 {
556 	return (scn->scn_restart_txg != 0 &&
557 	    scn->scn_restart_txg <= tx->tx_txg);
558 }
559 
560 boolean_t
561 dsl_scan_scrubbing(const dsl_pool_t *dp)
562 {
563 	dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys;
564 
565 	return (scn_phys->scn_state == DSS_SCANNING &&
566 	    scn_phys->scn_func == POOL_SCAN_SCRUB);
567 }
568 
569 boolean_t
570 dsl_scan_is_paused_scrub(const dsl_scan_t *scn)
571 {
572 	return (dsl_scan_scrubbing(scn->scn_dp) &&
573 	    scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED);
574 }
575 
576 /*
577  * Writes out a persistent dsl_scan_phys_t record to the pool directory.
578  * Because we can be running in the block sorting algorithm, we do not always
579  * want to write out the record, only when it is "safe" to do so. This safety
580  * condition is achieved by making sure that the sorting queues are empty
581  * (scn_bytes_pending == 0). When this condition is not true, the sync'd state
582  * is inconsistent with how much actual scanning progress has been made. The
583  * kind of sync to be performed is specified by the sync_type argument. If the
584  * sync is optional, we only sync if the queues are empty. If the sync is
585  * mandatory, we do a hard ASSERT to make sure that the queues are empty. The
586  * third possible state is a "cached" sync. This is done in response to:
587  * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been
588  *	destroyed, so we wouldn't be able to restart scanning from it.
589  * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been
590  *	superseded by a newer snapshot.
591  * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been
592  *	swapped with its clone.
593  * In all cases, a cached sync simply rewrites the last record we've written,
594  * just slightly modified. For the modifications that are performed to the
595  * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed,
596  * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped.
597  */
598 static void
599 dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type)
600 {
601 	int i;
602 	spa_t *spa = scn->scn_dp->dp_spa;
603 
604 	ASSERT(sync_type != SYNC_MANDATORY || scn->scn_bytes_pending == 0);
605 	if (scn->scn_bytes_pending == 0) {
606 		for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
607 			vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
608 			dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue;
609 
610 			if (q == NULL)
611 				continue;
612 
613 			mutex_enter(&vd->vdev_scan_io_queue_lock);
614 			ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL);
615 			ASSERT3P(avl_first(&q->q_exts_by_size), ==, NULL);
616 			ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL);
617 			mutex_exit(&vd->vdev_scan_io_queue_lock);
618 		}
619 
620 		if (scn->scn_phys.scn_queue_obj != 0)
621 			scan_ds_queue_sync(scn, tx);
622 		VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
623 		    DMU_POOL_DIRECTORY_OBJECT,
624 		    DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
625 		    &scn->scn_phys, tx));
626 		bcopy(&scn->scn_phys, &scn->scn_phys_cached,
627 		    sizeof (scn->scn_phys));
628 
629 		if (scn->scn_checkpointing)
630 			zfs_dbgmsg("finish scan checkpoint");
631 
632 		scn->scn_checkpointing = B_FALSE;
633 		scn->scn_last_checkpoint = ddi_get_lbolt();
634 	} else if (sync_type == SYNC_CACHED) {
635 		VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
636 		    DMU_POOL_DIRECTORY_OBJECT,
637 		    DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
638 		    &scn->scn_phys_cached, tx));
639 	}
640 }
641 
642 /* ARGSUSED */
643 static int
644 dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
645 {
646 	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
647 
648 	if (dsl_scan_is_running(scn))
649 		return (SET_ERROR(EBUSY));
650 
651 	return (0);
652 }
653 
654 static void
655 dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
656 {
657 	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
658 	pool_scan_func_t *funcp = arg;
659 	dmu_object_type_t ot = 0;
660 	dsl_pool_t *dp = scn->scn_dp;
661 	spa_t *spa = dp->dp_spa;
662 
663 	ASSERT(!dsl_scan_is_running(scn));
664 	ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
665 	bzero(&scn->scn_phys, sizeof (scn->scn_phys));
666 	scn->scn_phys.scn_func = *funcp;
667 	scn->scn_phys.scn_state = DSS_SCANNING;
668 	scn->scn_phys.scn_min_txg = 0;
669 	scn->scn_phys.scn_max_txg = tx->tx_txg;
670 	scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
671 	scn->scn_phys.scn_start_time = gethrestime_sec();
672 	scn->scn_phys.scn_errors = 0;
673 	scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc;
674 	scn->scn_issued_before_pass = 0;
675 	scn->scn_restart_txg = 0;
676 	scn->scn_done_txg = 0;
677 	scn->scn_last_checkpoint = 0;
678 	scn->scn_checkpointing = B_FALSE;
679 	spa_scan_stat_init(spa);
680 
681 	if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
682 		scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
683 
684 		/* rewrite all disk labels */
685 		vdev_config_dirty(spa->spa_root_vdev);
686 
687 		if (vdev_resilver_needed(spa->spa_root_vdev,
688 		    &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
689 			spa_event_notify(spa, NULL, NULL,
690 			    ESC_ZFS_RESILVER_START);
691 		} else {
692 			spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START);
693 		}
694 
695 		spa->spa_scrub_started = B_TRUE;
696 		/*
697 		 * If this is an incremental scrub, limit the DDT scrub phase
698 		 * to just the auto-ditto class (for correctness); the rest
699 		 * of the scrub should go faster using top-down pruning.
700 		 */
701 		if (scn->scn_phys.scn_min_txg > TXG_INITIAL)
702 			scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO;
703 
704 	}
705 
706 	/* back to the generic stuff */
707 
708 	if (dp->dp_blkstats == NULL) {
709 		dp->dp_blkstats =
710 		    kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
711 		mutex_init(&dp->dp_blkstats->zab_lock, NULL,
712 		    MUTEX_DEFAULT, NULL);
713 	}
714 	bzero(&dp->dp_blkstats->zab_type, sizeof (dp->dp_blkstats->zab_type));
715 
716 	if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
717 		ot = DMU_OT_ZAP_OTHER;
718 
719 	scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
720 	    ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
721 
722 	bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
723 
724 	dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
725 
726 	spa_history_log_internal(spa, "scan setup", tx,
727 	    "func=%u mintxg=%llu maxtxg=%llu",
728 	    *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg);
729 }
730 
731 /*
732  * Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver.
733  * Can also be called to resume a paused scrub.
734  */
735 int
736 dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
737 {
738 	spa_t *spa = dp->dp_spa;
739 	dsl_scan_t *scn = dp->dp_scan;
740 
741 	/*
742 	 * Purge all vdev caches and probe all devices.  We do this here
743 	 * rather than in sync context because this requires a writer lock
744 	 * on the spa_config lock, which we can't do from sync context.  The
745 	 * spa_scrub_reopen flag indicates that vdev_open() should not
746 	 * attempt to start another scrub.
747 	 */
748 	spa_vdev_state_enter(spa, SCL_NONE);
749 	spa->spa_scrub_reopen = B_TRUE;
750 	vdev_reopen(spa->spa_root_vdev);
751 	spa->spa_scrub_reopen = B_FALSE;
752 	(void) spa_vdev_state_exit(spa, NULL, 0);
753 
754 	if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) {
755 		/* got scrub start cmd, resume paused scrub */
756 		int err = dsl_scrub_set_pause_resume(scn->scn_dp,
757 		    POOL_SCRUB_NORMAL);
758 		if (err == 0) {
759 			spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME);
760 			return (ECANCELED);
761 		}
762 		return (SET_ERROR(err));
763 	}
764 
765 	return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
766 	    dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED));
767 }
768 
769 /* ARGSUSED */
770 static void
771 dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
772 {
773 	static const char *old_names[] = {
774 		"scrub_bookmark",
775 		"scrub_ddt_bookmark",
776 		"scrub_ddt_class_max",
777 		"scrub_queue",
778 		"scrub_min_txg",
779 		"scrub_max_txg",
780 		"scrub_func",
781 		"scrub_errors",
782 		NULL
783 	};
784 
785 	dsl_pool_t *dp = scn->scn_dp;
786 	spa_t *spa = dp->dp_spa;
787 	int i;
788 
789 	/* Remove any remnants of an old-style scrub. */
790 	for (i = 0; old_names[i]; i++) {
791 		(void) zap_remove(dp->dp_meta_objset,
792 		    DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx);
793 	}
794 
795 	if (scn->scn_phys.scn_queue_obj != 0) {
796 		VERIFY0(dmu_object_free(dp->dp_meta_objset,
797 		    scn->scn_phys.scn_queue_obj, tx));
798 		scn->scn_phys.scn_queue_obj = 0;
799 	}
800 	scan_ds_queue_clear(scn);
801 
802 	scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
803 
804 	/*
805 	 * If we were "restarted" from a stopped state, don't bother
806 	 * with anything else.
807 	 */
808 	if (!dsl_scan_is_running(scn)) {
809 		ASSERT(!scn->scn_is_sorted);
810 		return;
811 	}
812 
813 	if (scn->scn_is_sorted) {
814 		scan_io_queues_destroy(scn);
815 		scn->scn_is_sorted = B_FALSE;
816 
817 		if (scn->scn_taskq != NULL) {
818 			taskq_destroy(scn->scn_taskq);
819 			scn->scn_taskq = NULL;
820 		}
821 	}
822 
823 	scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED;
824 
825 	if (dsl_scan_restarting(scn, tx))
826 		spa_history_log_internal(spa, "scan aborted, restarting", tx,
827 		    "errors=%llu", spa_get_errlog_size(spa));
828 	else if (!complete)
829 		spa_history_log_internal(spa, "scan cancelled", tx,
830 		    "errors=%llu", spa_get_errlog_size(spa));
831 	else
832 		spa_history_log_internal(spa, "scan done", tx,
833 		    "errors=%llu", spa_get_errlog_size(spa));
834 
835 	if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
836 		spa->spa_scrub_started = B_FALSE;
837 		spa->spa_scrub_active = B_FALSE;
838 
839 		/*
840 		 * If the scrub/resilver completed, update all DTLs to
841 		 * reflect this.  Whether it succeeded or not, vacate
842 		 * all temporary scrub DTLs.
843 		 *
844 		 * As the scrub does not currently support traversing
845 		 * data that have been freed but are part of a checkpoint,
846 		 * we don't mark the scrub as done in the DTLs as faults
847 		 * may still exist in those vdevs.
848 		 */
849 		if (complete &&
850 		    !spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
851 			vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
852 			    scn->scn_phys.scn_max_txg, B_TRUE);
853 
854 			spa_event_notify(spa, NULL, NULL,
855 			    scn->scn_phys.scn_min_txg ?
856 			    ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH);
857 		} else {
858 			vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
859 			    0, B_TRUE);
860 		}
861 		spa_errlog_rotate(spa);
862 
863 		/*
864 		 * We may have finished replacing a device.
865 		 * Let the async thread assess this and handle the detach.
866 		 */
867 		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
868 	}
869 
870 	scn->scn_phys.scn_end_time = gethrestime_sec();
871 
872 	ASSERT(!dsl_scan_is_running(scn));
873 }
874 
875 /* ARGSUSED */
876 static int
877 dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
878 {
879 	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
880 
881 	if (!dsl_scan_is_running(scn))
882 		return (SET_ERROR(ENOENT));
883 	return (0);
884 }
885 
886 /* ARGSUSED */
887 static void
888 dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx)
889 {
890 	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
891 
892 	dsl_scan_done(scn, B_FALSE, tx);
893 	dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
894 	spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT);
895 }
896 
897 int
898 dsl_scan_cancel(dsl_pool_t *dp)
899 {
900 	return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check,
901 	    dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED));
902 }
903 
904 static int
905 dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx)
906 {
907 	pool_scrub_cmd_t *cmd = arg;
908 	dsl_pool_t *dp = dmu_tx_pool(tx);
909 	dsl_scan_t *scn = dp->dp_scan;
910 
911 	if (*cmd == POOL_SCRUB_PAUSE) {
912 		/* can't pause a scrub when there is no in-progress scrub */
913 		if (!dsl_scan_scrubbing(dp))
914 			return (SET_ERROR(ENOENT));
915 
916 		/* can't pause a paused scrub */
917 		if (dsl_scan_is_paused_scrub(scn))
918 			return (SET_ERROR(EBUSY));
919 	} else if (*cmd != POOL_SCRUB_NORMAL) {
920 		return (SET_ERROR(ENOTSUP));
921 	}
922 
923 	return (0);
924 }
925 
926 static void
927 dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
928 {
929 	pool_scrub_cmd_t *cmd = arg;
930 	dsl_pool_t *dp = dmu_tx_pool(tx);
931 	spa_t *spa = dp->dp_spa;
932 	dsl_scan_t *scn = dp->dp_scan;
933 
934 	if (*cmd == POOL_SCRUB_PAUSE) {
935 		/* can't pause a scrub when there is no in-progress scrub */
936 		spa->spa_scan_pass_scrub_pause = gethrestime_sec();
937 		scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED;
938 		dsl_scan_sync_state(scn, tx, SYNC_CACHED);
939 		spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED);
940 	} else {
941 		ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
942 		if (dsl_scan_is_paused_scrub(scn)) {
943 			/*
944 			 * We need to keep track of how much time we spend
945 			 * paused per pass so that we can adjust the scrub rate
946 			 * shown in the output of 'zpool status'
947 			 */
948 			spa->spa_scan_pass_scrub_spent_paused +=
949 			    gethrestime_sec() - spa->spa_scan_pass_scrub_pause;
950 			spa->spa_scan_pass_scrub_pause = 0;
951 			scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
952 			dsl_scan_sync_state(scn, tx, SYNC_CACHED);
953 		}
954 	}
955 }
956 
957 /*
958  * Set scrub pause/resume state if it makes sense to do so
959  */
960 int
961 dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd)
962 {
963 	return (dsl_sync_task(spa_name(dp->dp_spa),
964 	    dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3,
965 	    ZFS_SPACE_CHECK_RESERVED));
966 }
967 
968 
969 /* start a new scan, or restart an existing one. */
970 void
971 dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg)
972 {
973 	if (txg == 0) {
974 		dmu_tx_t *tx;
975 		tx = dmu_tx_create_dd(dp->dp_mos_dir);
976 		VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
977 
978 		txg = dmu_tx_get_txg(tx);
979 		dp->dp_scan->scn_restart_txg = txg;
980 		dmu_tx_commit(tx);
981 	} else {
982 		dp->dp_scan->scn_restart_txg = txg;
983 	}
984 	zfs_dbgmsg("restarting resilver txg=%llu", txg);
985 }
986 
987 void
988 dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp)
989 {
990 	zio_free(dp->dp_spa, txg, bp);
991 }
992 
993 void
994 dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp)
995 {
996 	ASSERT(dsl_pool_sync_context(dp));
997 	zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags));
998 }
999 
1000 static int
1001 scan_ds_queue_compare(const void *a, const void *b)
1002 {
1003 	const scan_ds_t *sds_a = a, *sds_b = b;
1004 
1005 	if (sds_a->sds_dsobj < sds_b->sds_dsobj)
1006 		return (-1);
1007 	if (sds_a->sds_dsobj == sds_b->sds_dsobj)
1008 		return (0);
1009 	return (1);
1010 }
1011 
1012 static void
1013 scan_ds_queue_clear(dsl_scan_t *scn)
1014 {
1015 	void *cookie = NULL;
1016 	scan_ds_t *sds;
1017 	while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) {
1018 		kmem_free(sds, sizeof (*sds));
1019 	}
1020 }
1021 
1022 static boolean_t
1023 scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg)
1024 {
1025 	scan_ds_t srch, *sds;
1026 
1027 	srch.sds_dsobj = dsobj;
1028 	sds = avl_find(&scn->scn_queue, &srch, NULL);
1029 	if (sds != NULL && txg != NULL)
1030 		*txg = sds->sds_txg;
1031 	return (sds != NULL);
1032 }
1033 
1034 static void
1035 scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg)
1036 {
1037 	scan_ds_t *sds;
1038 	avl_index_t where;
1039 
1040 	sds = kmem_zalloc(sizeof (*sds), KM_SLEEP);
1041 	sds->sds_dsobj = dsobj;
1042 	sds->sds_txg = txg;
1043 
1044 	VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL);
1045 	avl_insert(&scn->scn_queue, sds, where);
1046 }
1047 
1048 static void
1049 scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj)
1050 {
1051 	scan_ds_t srch, *sds;
1052 
1053 	srch.sds_dsobj = dsobj;
1054 
1055 	sds = avl_find(&scn->scn_queue, &srch, NULL);
1056 	VERIFY(sds != NULL);
1057 	avl_remove(&scn->scn_queue, sds);
1058 	kmem_free(sds, sizeof (*sds));
1059 }
1060 
1061 static void
1062 scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx)
1063 {
1064 	dsl_pool_t *dp = scn->scn_dp;
1065 	spa_t *spa = dp->dp_spa;
1066 	dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ?
1067 	    DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER;
1068 
1069 	ASSERT0(scn->scn_bytes_pending);
1070 	ASSERT(scn->scn_phys.scn_queue_obj != 0);
1071 
1072 	VERIFY0(dmu_object_free(dp->dp_meta_objset,
1073 	    scn->scn_phys.scn_queue_obj, tx));
1074 	scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot,
1075 	    DMU_OT_NONE, 0, tx);
1076 	for (scan_ds_t *sds = avl_first(&scn->scn_queue);
1077 	    sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) {
1078 		VERIFY0(zap_add_int_key(dp->dp_meta_objset,
1079 		    scn->scn_phys.scn_queue_obj, sds->sds_dsobj,
1080 		    sds->sds_txg, tx));
1081 	}
1082 }
1083 
1084 /*
1085  * Computes the memory limit state that we're currently in. A sorted scan
1086  * needs quite a bit of memory to hold the sorting queue, so we need to
1087  * reasonably constrain the size so it doesn't impact overall system
1088  * performance. We compute two limits:
1089  * 1) Hard memory limit: if the amount of memory used by the sorting
1090  *	queues on a pool gets above this value, we stop the metadata
1091  *	scanning portion and start issuing the queued up and sorted
1092  *	I/Os to reduce memory usage.
1093  *	This limit is calculated as a fraction of physmem (by default 5%).
1094  *	We constrain the lower bound of the hard limit to an absolute
1095  *	minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain
1096  *	the upper bound to 5% of the total pool size - no chance we'll
1097  *	ever need that much memory, but just to keep the value in check.
1098  * 2) Soft memory limit: once we hit the hard memory limit, we start
1099  *	issuing I/O to reduce queue memory usage, but we don't want to
1100  *	completely empty out the queues, since we might be able to find I/Os
1101  *	that will fill in the gaps of our non-sequential IOs at some point
1102  *	in the future. So we stop the issuing of I/Os once the amount of
1103  *	memory used drops below the soft limit (at which point we stop issuing
1104  *	I/O and start scanning metadata again).
1105  *
1106  *	This limit is calculated by subtracting a fraction of the hard
1107  *	limit from the hard limit. By default this fraction is 5%, so
1108  *	the soft limit is 95% of the hard limit. We cap the size of the
1109  *	difference between the hard and soft limits at an absolute
1110  *	maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is
1111  *	sufficient to not cause too frequent switching between the
1112  *	metadata scan and I/O issue (even at 2k recordsize, 128 MiB's
1113  *	worth of queues is about 1.2 GiB of on-pool data, so scanning
1114  *	that should take at least a decent fraction of a second).
1115  */
1116 static boolean_t
1117 dsl_scan_should_clear(dsl_scan_t *scn)
1118 {
1119 	vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
1120 	uint64_t mlim_hard, mlim_soft, mused;
1121 	uint64_t alloc = metaslab_class_get_alloc(spa_normal_class(
1122 	    scn->scn_dp->dp_spa));
1123 
1124 	mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE,
1125 	    zfs_scan_mem_lim_min);
1126 	mlim_hard = MIN(mlim_hard, alloc / 20);
1127 	mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact,
1128 	    zfs_scan_mem_lim_soft_max);
1129 	mused = 0;
1130 	for (uint64_t i = 0; i < rvd->vdev_children; i++) {
1131 		vdev_t *tvd = rvd->vdev_child[i];
1132 		dsl_scan_io_queue_t *queue;
1133 
1134 		mutex_enter(&tvd->vdev_scan_io_queue_lock);
1135 		queue = tvd->vdev_scan_io_queue;
1136 		if (queue != NULL) {
1137 			/* # extents in exts_by_size = # in exts_by_addr */
1138 			mused += avl_numnodes(&queue->q_exts_by_size) *
1139 			    sizeof (range_seg_t) + queue->q_sio_memused;
1140 		}
1141 		mutex_exit(&tvd->vdev_scan_io_queue_lock);
1142 	}
1143 
1144 	dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused);
1145 
1146 	if (mused == 0)
1147 		ASSERT0(scn->scn_bytes_pending);
1148 
1149 	/*
1150 	 * If we are above our hard limit, we need to clear out memory.
1151 	 * If we are below our soft limit, we need to accumulate sequential IOs.
1152 	 * Otherwise, we should keep doing whatever we are currently doing.
1153 	 */
1154 	if (mused >= mlim_hard)
1155 		return (B_TRUE);
1156 	else if (mused < mlim_soft)
1157 		return (B_FALSE);
1158 	else
1159 		return (scn->scn_clearing);
1160 }
1161 
1162 static boolean_t
1163 dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
1164 {
1165 	/* we never skip user/group accounting objects */
1166 	if (zb && (int64_t)zb->zb_object < 0)
1167 		return (B_FALSE);
1168 
1169 	if (scn->scn_suspending)
1170 		return (B_TRUE); /* we're already suspending */
1171 
1172 	if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark))
1173 		return (B_FALSE); /* we're resuming */
1174 
1175 	/* We only know how to resume from level-0 blocks. */
1176 	if (zb && zb->zb_level != 0)
1177 		return (B_FALSE);
1178 
1179 	/*
1180 	 * We suspend if:
1181 	 *  - we have scanned for at least the minimum time (default 1 sec
1182 	 *    for scrub, 3 sec for resilver), and either we have sufficient
1183 	 *    dirty data that we are starting to write more quickly
1184 	 *    (default 30%), or someone is explicitly waiting for this txg
1185 	 *    to complete.
1186 	 *  or
1187 	 *  - the spa is shutting down because this pool is being exported
1188 	 *    or the machine is rebooting.
1189 	 *  or
1190 	 *  - the scan queue has reached its memory use limit
1191 	 */
1192 	hrtime_t curr_time_ns = gethrtime();
1193 	uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
1194 	uint64_t sync_time_ns = curr_time_ns -
1195 	    scn->scn_dp->dp_spa->spa_sync_starttime;
1196 
1197 	int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
1198 	int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
1199 	    zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
1200 
1201 	if ((NSEC2MSEC(scan_time_ns) > mintime &&
1202 	    (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent ||
1203 	    txg_sync_waiting(scn->scn_dp) ||
1204 	    NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
1205 	    spa_shutting_down(scn->scn_dp->dp_spa) ||
1206 	    (zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) {
1207 		if (zb) {
1208 			dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n",
1209 			    (longlong_t)zb->zb_objset,
1210 			    (longlong_t)zb->zb_object,
1211 			    (longlong_t)zb->zb_level,
1212 			    (longlong_t)zb->zb_blkid);
1213 			scn->scn_phys.scn_bookmark = *zb;
1214 		} else {
1215 			dsl_scan_phys_t *scnp = &scn->scn_phys;
1216 
1217 			dprintf("suspending at DDT bookmark "
1218 			    "%llx/%llx/%llx/%llx\n",
1219 			    (longlong_t)scnp->scn_ddt_bookmark.ddb_class,
1220 			    (longlong_t)scnp->scn_ddt_bookmark.ddb_type,
1221 			    (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
1222 			    (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
1223 		}
1224 		scn->scn_suspending = B_TRUE;
1225 		return (B_TRUE);
1226 	}
1227 	return (B_FALSE);
1228 }
1229 
1230 typedef struct zil_scan_arg {
1231 	dsl_pool_t	*zsa_dp;
1232 	zil_header_t	*zsa_zh;
1233 } zil_scan_arg_t;
1234 
1235 /* ARGSUSED */
1236 static int
1237 dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
1238 {
1239 	zil_scan_arg_t *zsa = arg;
1240 	dsl_pool_t *dp = zsa->zsa_dp;
1241 	dsl_scan_t *scn = dp->dp_scan;
1242 	zil_header_t *zh = zsa->zsa_zh;
1243 	zbookmark_phys_t zb;
1244 
1245 	if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
1246 		return (0);
1247 
1248 	/*
1249 	 * One block ("stubby") can be allocated a long time ago; we
1250 	 * want to visit that one because it has been allocated
1251 	 * (on-disk) even if it hasn't been claimed (even though for
1252 	 * scrub there's nothing to do to it).
1253 	 */
1254 	if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa))
1255 		return (0);
1256 
1257 	SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
1258 	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
1259 
1260 	VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
1261 	return (0);
1262 }
1263 
1264 /* ARGSUSED */
1265 static int
1266 dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
1267 {
1268 	if (lrc->lrc_txtype == TX_WRITE) {
1269 		zil_scan_arg_t *zsa = arg;
1270 		dsl_pool_t *dp = zsa->zsa_dp;
1271 		dsl_scan_t *scn = dp->dp_scan;
1272 		zil_header_t *zh = zsa->zsa_zh;
1273 		lr_write_t *lr = (lr_write_t *)lrc;
1274 		blkptr_t *bp = &lr->lr_blkptr;
1275 		zbookmark_phys_t zb;
1276 
1277 		if (BP_IS_HOLE(bp) ||
1278 		    bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
1279 			return (0);
1280 
1281 		/*
1282 		 * birth can be < claim_txg if this record's txg is
1283 		 * already txg sync'ed (but this log block contains
1284 		 * other records that are not synced)
1285 		 */
1286 		if (claim_txg == 0 || bp->blk_birth < claim_txg)
1287 			return (0);
1288 
1289 		SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
1290 		    lr->lr_foid, ZB_ZIL_LEVEL,
1291 		    lr->lr_offset / BP_GET_LSIZE(bp));
1292 
1293 		VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
1294 	}
1295 	return (0);
1296 }
1297 
1298 static void
1299 dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
1300 {
1301 	uint64_t claim_txg = zh->zh_claim_txg;
1302 	zil_scan_arg_t zsa = { dp, zh };
1303 	zilog_t *zilog;
1304 
1305 	ASSERT(spa_writeable(dp->dp_spa));
1306 
1307 	/*
1308 	 * We only want to visit blocks that have been claimed
1309 	 * but not yet replayed.
1310 	 */
1311 	if (claim_txg == 0)
1312 		return;
1313 
1314 	zilog = zil_alloc(dp->dp_meta_objset, zh);
1315 
1316 	(void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
1317 	    claim_txg);
1318 
1319 	zil_free(zilog);
1320 }
1321 
1322 /*
1323  * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea
1324  * here is to sort the AVL tree by the order each block will be needed.
1325  */
1326 static int
1327 scan_prefetch_queue_compare(const void *a, const void *b)
1328 {
1329 	const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b;
1330 	const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc;
1331 	const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc;
1332 
1333 	return (zbookmark_compare(spc_a->spc_datablkszsec,
1334 	    spc_a->spc_indblkshift, spc_b->spc_datablkszsec,
1335 	    spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb));
1336 }
1337 
1338 static void
1339 scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, void *tag)
1340 {
1341 	if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) {
1342 		zfs_refcount_destroy(&spc->spc_refcnt);
1343 		kmem_free(spc, sizeof (scan_prefetch_ctx_t));
1344 	}
1345 }
1346 
1347 static scan_prefetch_ctx_t *
1348 scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag)
1349 {
1350 	scan_prefetch_ctx_t *spc;
1351 
1352 	spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP);
1353 	zfs_refcount_create(&spc->spc_refcnt);
1354 	zfs_refcount_add(&spc->spc_refcnt, tag);
1355 	spc->spc_scn = scn;
1356 	if (dnp != NULL) {
1357 		spc->spc_datablkszsec = dnp->dn_datablkszsec;
1358 		spc->spc_indblkshift = dnp->dn_indblkshift;
1359 		spc->spc_root = B_FALSE;
1360 	} else {
1361 		spc->spc_datablkszsec = 0;
1362 		spc->spc_indblkshift = 0;
1363 		spc->spc_root = B_TRUE;
1364 	}
1365 
1366 	return (spc);
1367 }
1368 
1369 static void
1370 scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, void *tag)
1371 {
1372 	zfs_refcount_add(&spc->spc_refcnt, tag);
1373 }
1374 
1375 static boolean_t
1376 dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc,
1377     const zbookmark_phys_t *zb)
1378 {
1379 	zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark;
1380 	dnode_phys_t tmp_dnp;
1381 	dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp;
1382 
1383 	if (zb->zb_objset != last_zb->zb_objset)
1384 		return (B_TRUE);
1385 	if ((int64_t)zb->zb_object < 0)
1386 		return (B_FALSE);
1387 
1388 	tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec;
1389 	tmp_dnp.dn_indblkshift = spc->spc_indblkshift;
1390 
1391 	if (zbookmark_subtree_completed(dnp, zb, last_zb))
1392 		return (B_TRUE);
1393 
1394 	return (B_FALSE);
1395 }
1396 
1397 static void
1398 dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb)
1399 {
1400 	avl_index_t idx;
1401 	dsl_scan_t *scn = spc->spc_scn;
1402 	spa_t *spa = scn->scn_dp->dp_spa;
1403 	scan_prefetch_issue_ctx_t *spic;
1404 
1405 	if (zfs_no_scrub_prefetch)
1406 		return;
1407 
1408 	if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg ||
1409 	    (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE &&
1410 	    BP_GET_TYPE(bp) != DMU_OT_OBJSET))
1411 		return;
1412 
1413 	if (dsl_scan_check_prefetch_resume(spc, zb))
1414 		return;
1415 
1416 	scan_prefetch_ctx_add_ref(spc, scn);
1417 	spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP);
1418 	spic->spic_spc = spc;
1419 	spic->spic_bp = *bp;
1420 	spic->spic_zb = *zb;
1421 
1422 	/*
1423 	 * Add the IO to the queue of blocks to prefetch. This allows us to
1424 	 * prioritize blocks that we will need first for the main traversal
1425 	 * thread.
1426 	 */
1427 	mutex_enter(&spa->spa_scrub_lock);
1428 	if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) {
1429 		/* this block is already queued for prefetch */
1430 		kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
1431 		scan_prefetch_ctx_rele(spc, scn);
1432 		mutex_exit(&spa->spa_scrub_lock);
1433 		return;
1434 	}
1435 
1436 	avl_insert(&scn->scn_prefetch_queue, spic, idx);
1437 	cv_broadcast(&spa->spa_scrub_io_cv);
1438 	mutex_exit(&spa->spa_scrub_lock);
1439 }
1440 
1441 static void
1442 dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp,
1443     uint64_t objset, uint64_t object)
1444 {
1445 	int i;
1446 	zbookmark_phys_t zb;
1447 	scan_prefetch_ctx_t *spc;
1448 
1449 	if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1450 		return;
1451 
1452 	SET_BOOKMARK(&zb, objset, object, 0, 0);
1453 
1454 	spc = scan_prefetch_ctx_create(scn, dnp, FTAG);
1455 
1456 	for (i = 0; i < dnp->dn_nblkptr; i++) {
1457 		zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]);
1458 		zb.zb_blkid = i;
1459 		dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb);
1460 	}
1461 
1462 	if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1463 		zb.zb_level = 0;
1464 		zb.zb_blkid = DMU_SPILL_BLKID;
1465 		dsl_scan_prefetch(spc, &dnp->dn_spill, &zb);
1466 	}
1467 
1468 	scan_prefetch_ctx_rele(spc, FTAG);
1469 }
1470 
1471 void
1472 dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1473     arc_buf_t *buf, void *private)
1474 {
1475 	scan_prefetch_ctx_t *spc = private;
1476 	dsl_scan_t *scn = spc->spc_scn;
1477 	spa_t *spa = scn->scn_dp->dp_spa;
1478 
1479 	/* broadcast that the IO has completed for rate limitting purposes */
1480 	mutex_enter(&spa->spa_scrub_lock);
1481 	ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
1482 	spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
1483 	cv_broadcast(&spa->spa_scrub_io_cv);
1484 	mutex_exit(&spa->spa_scrub_lock);
1485 
1486 	/* if there was an error or we are done prefetching, just cleanup */
1487 	if (buf == NULL || scn->scn_suspending)
1488 		goto out;
1489 
1490 	if (BP_GET_LEVEL(bp) > 0) {
1491 		int i;
1492 		blkptr_t *cbp;
1493 		int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
1494 		zbookmark_phys_t czb;
1495 
1496 		for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
1497 			SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
1498 			    zb->zb_level - 1, zb->zb_blkid * epb + i);
1499 			dsl_scan_prefetch(spc, cbp, &czb);
1500 		}
1501 	} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
1502 		dnode_phys_t *cdnp = buf->b_data;
1503 		int i;
1504 		int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
1505 
1506 		for (i = 0, cdnp = buf->b_data; i < epb;
1507 		    i += cdnp->dn_extra_slots + 1,
1508 		    cdnp += cdnp->dn_extra_slots + 1) {
1509 			dsl_scan_prefetch_dnode(scn, cdnp,
1510 			    zb->zb_objset, zb->zb_blkid * epb + i);
1511 		}
1512 	} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
1513 		objset_phys_t *osp = buf->b_data;
1514 
1515 		dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode,
1516 		    zb->zb_objset, DMU_META_DNODE_OBJECT);
1517 
1518 		if (OBJSET_BUF_HAS_USERUSED(buf)) {
1519 			dsl_scan_prefetch_dnode(scn,
1520 			    &osp->os_groupused_dnode, zb->zb_objset,
1521 			    DMU_GROUPUSED_OBJECT);
1522 			dsl_scan_prefetch_dnode(scn,
1523 			    &osp->os_userused_dnode, zb->zb_objset,
1524 			    DMU_USERUSED_OBJECT);
1525 		}
1526 	}
1527 
1528 out:
1529 	if (buf != NULL)
1530 		arc_buf_destroy(buf, private);
1531 	scan_prefetch_ctx_rele(spc, scn);
1532 }
1533 
1534 /* ARGSUSED */
1535 static void
1536 dsl_scan_prefetch_thread(void *arg)
1537 {
1538 	dsl_scan_t *scn = arg;
1539 	spa_t *spa = scn->scn_dp->dp_spa;
1540 	vdev_t *rvd = spa->spa_root_vdev;
1541 	uint64_t maxinflight = rvd->vdev_children * zfs_top_maxinflight;
1542 	scan_prefetch_issue_ctx_t *spic;
1543 
1544 	/* loop until we are told to stop */
1545 	while (!scn->scn_prefetch_stop) {
1546 		arc_flags_t flags = ARC_FLAG_NOWAIT |
1547 		    ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH;
1548 		int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
1549 
1550 		mutex_enter(&spa->spa_scrub_lock);
1551 
1552 		/*
1553 		 * Wait until we have an IO to issue and are not above our
1554 		 * maximum in flight limit.
1555 		 */
1556 		while (!scn->scn_prefetch_stop &&
1557 		    (avl_numnodes(&scn->scn_prefetch_queue) == 0 ||
1558 		    spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) {
1559 			cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
1560 		}
1561 
1562 		/* recheck if we should stop since we waited for the cv */
1563 		if (scn->scn_prefetch_stop) {
1564 			mutex_exit(&spa->spa_scrub_lock);
1565 			break;
1566 		}
1567 
1568 		/* remove the prefetch IO from the tree */
1569 		spic = avl_first(&scn->scn_prefetch_queue);
1570 		spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp);
1571 		avl_remove(&scn->scn_prefetch_queue, spic);
1572 
1573 		mutex_exit(&spa->spa_scrub_lock);
1574 
1575 		/* issue the prefetch asynchronously */
1576 		(void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa,
1577 		    &spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc,
1578 		    ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb);
1579 
1580 		kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
1581 	}
1582 
1583 	ASSERT(scn->scn_prefetch_stop);
1584 
1585 	/* free any prefetches we didn't get to complete */
1586 	mutex_enter(&spa->spa_scrub_lock);
1587 	while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) {
1588 		avl_remove(&scn->scn_prefetch_queue, spic);
1589 		scan_prefetch_ctx_rele(spic->spic_spc, scn);
1590 		kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
1591 	}
1592 	ASSERT0(avl_numnodes(&scn->scn_prefetch_queue));
1593 	mutex_exit(&spa->spa_scrub_lock);
1594 }
1595 
1596 static boolean_t
1597 dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
1598     const zbookmark_phys_t *zb)
1599 {
1600 	/*
1601 	 * We never skip over user/group accounting objects (obj<0)
1602 	 */
1603 	if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) &&
1604 	    (int64_t)zb->zb_object >= 0) {
1605 		/*
1606 		 * If we already visited this bp & everything below (in
1607 		 * a prior txg sync), don't bother doing it again.
1608 		 */
1609 		if (zbookmark_subtree_completed(dnp, zb,
1610 		    &scn->scn_phys.scn_bookmark))
1611 			return (B_TRUE);
1612 
1613 		/*
1614 		 * If we found the block we're trying to resume from, or
1615 		 * we went past it to a different object, zero it out to
1616 		 * indicate that it's OK to start checking for suspending
1617 		 * again.
1618 		 */
1619 		if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 ||
1620 		    zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) {
1621 			dprintf("resuming at %llx/%llx/%llx/%llx\n",
1622 			    (longlong_t)zb->zb_objset,
1623 			    (longlong_t)zb->zb_object,
1624 			    (longlong_t)zb->zb_level,
1625 			    (longlong_t)zb->zb_blkid);
1626 			bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb));
1627 		}
1628 	}
1629 	return (B_FALSE);
1630 }
1631 
1632 static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
1633     dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
1634     dmu_objset_type_t ostype, dmu_tx_t *tx);
1635 static void dsl_scan_visitdnode(
1636     dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype,
1637     dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx);
1638 
1639 /*
1640  * Return nonzero on i/o error.
1641  * Return new buf to write out in *bufp.
1642  */
1643 static int
1644 dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
1645     dnode_phys_t *dnp, const blkptr_t *bp,
1646     const zbookmark_phys_t *zb, dmu_tx_t *tx)
1647 {
1648 	dsl_pool_t *dp = scn->scn_dp;
1649 	int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
1650 	int err;
1651 
1652 	if (BP_GET_LEVEL(bp) > 0) {
1653 		arc_flags_t flags = ARC_FLAG_WAIT;
1654 		int i;
1655 		blkptr_t *cbp;
1656 		int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
1657 		arc_buf_t *buf;
1658 
1659 		err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
1660 		    ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
1661 		if (err) {
1662 			scn->scn_phys.scn_errors++;
1663 			return (err);
1664 		}
1665 		for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
1666 			zbookmark_phys_t czb;
1667 
1668 			SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
1669 			    zb->zb_level - 1,
1670 			    zb->zb_blkid * epb + i);
1671 			dsl_scan_visitbp(cbp, &czb, dnp,
1672 			    ds, scn, ostype, tx);
1673 		}
1674 		arc_buf_destroy(buf, &buf);
1675 	} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
1676 		arc_flags_t flags = ARC_FLAG_WAIT;
1677 		dnode_phys_t *cdnp;
1678 		int i;
1679 		int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
1680 		arc_buf_t *buf;
1681 
1682 		err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
1683 		    ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
1684 		if (err) {
1685 			scn->scn_phys.scn_errors++;
1686 			return (err);
1687 		}
1688 		for (i = 0, cdnp = buf->b_data; i < epb;
1689 		    i += cdnp->dn_extra_slots + 1,
1690 		    cdnp += cdnp->dn_extra_slots + 1) {
1691 			dsl_scan_visitdnode(scn, ds, ostype,
1692 			    cdnp, zb->zb_blkid * epb + i, tx);
1693 		}
1694 
1695 		arc_buf_destroy(buf, &buf);
1696 	} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
1697 		arc_flags_t flags = ARC_FLAG_WAIT;
1698 		objset_phys_t *osp;
1699 		arc_buf_t *buf;
1700 
1701 		err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
1702 		    ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
1703 		if (err) {
1704 			scn->scn_phys.scn_errors++;
1705 			return (err);
1706 		}
1707 
1708 		osp = buf->b_data;
1709 
1710 		dsl_scan_visitdnode(scn, ds, osp->os_type,
1711 		    &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx);
1712 
1713 		if (OBJSET_BUF_HAS_USERUSED(buf)) {
1714 			/*
1715 			 * We also always visit user/group accounting
1716 			 * objects, and never skip them, even if we are
1717 			 * suspending.  This is necessary so that the space
1718 			 * deltas from this txg get integrated.
1719 			 */
1720 			dsl_scan_visitdnode(scn, ds, osp->os_type,
1721 			    &osp->os_groupused_dnode,
1722 			    DMU_GROUPUSED_OBJECT, tx);
1723 			dsl_scan_visitdnode(scn, ds, osp->os_type,
1724 			    &osp->os_userused_dnode,
1725 			    DMU_USERUSED_OBJECT, tx);
1726 		}
1727 		arc_buf_destroy(buf, &buf);
1728 	}
1729 
1730 	return (0);
1731 }
1732 
1733 static void
1734 dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds,
1735     dmu_objset_type_t ostype, dnode_phys_t *dnp,
1736     uint64_t object, dmu_tx_t *tx)
1737 {
1738 	int j;
1739 
1740 	for (j = 0; j < dnp->dn_nblkptr; j++) {
1741 		zbookmark_phys_t czb;
1742 
1743 		SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
1744 		    dnp->dn_nlevels - 1, j);
1745 		dsl_scan_visitbp(&dnp->dn_blkptr[j],
1746 		    &czb, dnp, ds, scn, ostype, tx);
1747 	}
1748 
1749 	if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1750 		zbookmark_phys_t czb;
1751 		SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
1752 		    0, DMU_SPILL_BLKID);
1753 		dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp),
1754 		    &czb, dnp, ds, scn, ostype, tx);
1755 	}
1756 }
1757 
1758 /*
1759  * The arguments are in this order because mdb can only print the
1760  * first 5; we want them to be useful.
1761  */
1762 static void
1763 dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
1764     dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
1765     dmu_objset_type_t ostype, dmu_tx_t *tx)
1766 {
1767 	dsl_pool_t *dp = scn->scn_dp;
1768 	blkptr_t *bp_toread = NULL;
1769 
1770 	if (dsl_scan_check_suspend(scn, zb))
1771 		return;
1772 
1773 	if (dsl_scan_check_resume(scn, dnp, zb))
1774 		return;
1775 
1776 	scn->scn_visited_this_txg++;
1777 
1778 	/*
1779 	 * This debugging is commented out to conserve stack space.  This
1780 	 * function is called recursively and the debugging addes several
1781 	 * bytes to the stack for each call.  It can be commented back in
1782 	 * if required to debug an issue in dsl_scan_visitbp().
1783 	 *
1784 	 * dprintf_bp(bp,
1785 	 *	"visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p",
1786 	 *	ds, ds ? ds->ds_object : 0,
1787 	 *	zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid,
1788 	 *	bp);
1789 	 */
1790 
1791 	if (BP_IS_HOLE(bp)) {
1792 		scn->scn_holes_this_txg++;
1793 		return;
1794 	}
1795 
1796 	if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) {
1797 		scn->scn_lt_min_this_txg++;
1798 		return;
1799 	}
1800 
1801 	bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP);
1802 	*bp_toread = *bp;
1803 
1804 	if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0)
1805 		goto out;
1806 
1807 	/*
1808 	 * If dsl_scan_ddt() has already visited this block, it will have
1809 	 * already done any translations or scrubbing, so don't call the
1810 	 * callback again.
1811 	 */
1812 	if (ddt_class_contains(dp->dp_spa,
1813 	    scn->scn_phys.scn_ddt_class_max, bp)) {
1814 		scn->scn_ddt_contained_this_txg++;
1815 		goto out;
1816 	}
1817 
1818 	/*
1819 	 * If this block is from the future (after cur_max_txg), then we
1820 	 * are doing this on behalf of a deleted snapshot, and we will
1821 	 * revisit the future block on the next pass of this dataset.
1822 	 * Don't scan it now unless we need to because something
1823 	 * under it was modified.
1824 	 */
1825 	if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) {
1826 		scn->scn_gt_max_this_txg++;
1827 		goto out;
1828 	}
1829 
1830 	scan_funcs[scn->scn_phys.scn_func](dp, bp, zb);
1831 
1832 out:
1833 	kmem_free(bp_toread, sizeof (blkptr_t));
1834 }
1835 
1836 static void
1837 dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp,
1838     dmu_tx_t *tx)
1839 {
1840 	zbookmark_phys_t zb;
1841 	scan_prefetch_ctx_t *spc;
1842 
1843 	SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1844 	    ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
1845 
1846 	if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) {
1847 		SET_BOOKMARK(&scn->scn_prefetch_bookmark,
1848 		    zb.zb_objset, 0, 0, 0);
1849 	} else {
1850 		scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark;
1851 	}
1852 
1853 	scn->scn_objsets_visited_this_txg++;
1854 
1855 	spc = scan_prefetch_ctx_create(scn, NULL, FTAG);
1856 	dsl_scan_prefetch(spc, bp, &zb);
1857 	scan_prefetch_ctx_rele(spc, FTAG);
1858 
1859 	dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx);
1860 
1861 	dprintf_ds(ds, "finished scan%s", "");
1862 }
1863 
1864 static void
1865 ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys)
1866 {
1867 	if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) {
1868 		if (ds->ds_is_snapshot) {
1869 			/*
1870 			 * Note:
1871 			 *  - scn_cur_{min,max}_txg stays the same.
1872 			 *  - Setting the flag is not really necessary if
1873 			 *    scn_cur_max_txg == scn_max_txg, because there
1874 			 *    is nothing after this snapshot that we care
1875 			 *    about.  However, we set it anyway and then
1876 			 *    ignore it when we retraverse it in
1877 			 *    dsl_scan_visitds().
1878 			 */
1879 			scn_phys->scn_bookmark.zb_objset =
1880 			    dsl_dataset_phys(ds)->ds_next_snap_obj;
1881 			zfs_dbgmsg("destroying ds %llu; currently traversing; "
1882 			    "reset zb_objset to %llu",
1883 			    (u_longlong_t)ds->ds_object,
1884 			    (u_longlong_t)dsl_dataset_phys(ds)->
1885 			    ds_next_snap_obj);
1886 			scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN;
1887 		} else {
1888 			SET_BOOKMARK(&scn_phys->scn_bookmark,
1889 			    ZB_DESTROYED_OBJSET, 0, 0, 0);
1890 			zfs_dbgmsg("destroying ds %llu; currently traversing; "
1891 			    "reset bookmark to -1,0,0,0",
1892 			    (u_longlong_t)ds->ds_object);
1893 		}
1894 	}
1895 }
1896 
1897 /*
1898  * Invoked when a dataset is destroyed. We need to make sure that:
1899  *
1900  * 1) If it is the dataset that was currently being scanned, we write
1901  *	a new dsl_scan_phys_t and marking the objset reference in it
1902  *	as destroyed.
1903  * 2) Remove it from the work queue, if it was present.
1904  *
1905  * If the dataset was actually a snapshot, instead of marking the dataset
1906  * as destroyed, we instead substitute the next snapshot in line.
1907  */
1908 void
1909 dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
1910 {
1911 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1912 	dsl_scan_t *scn = dp->dp_scan;
1913 	uint64_t mintxg;
1914 
1915 	if (!dsl_scan_is_running(scn))
1916 		return;
1917 
1918 	ds_destroyed_scn_phys(ds, &scn->scn_phys);
1919 	ds_destroyed_scn_phys(ds, &scn->scn_phys_cached);
1920 
1921 	if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
1922 		scan_ds_queue_remove(scn, ds->ds_object);
1923 		if (ds->ds_is_snapshot)
1924 			scan_ds_queue_insert(scn,
1925 			    dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg);
1926 	}
1927 
1928 	if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
1929 	    ds->ds_object, &mintxg) == 0) {
1930 		ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
1931 		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
1932 		    scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
1933 		if (ds->ds_is_snapshot) {
1934 			/*
1935 			 * We keep the same mintxg; it could be >
1936 			 * ds_creation_txg if the previous snapshot was
1937 			 * deleted too.
1938 			 */
1939 			VERIFY(zap_add_int_key(dp->dp_meta_objset,
1940 			    scn->scn_phys.scn_queue_obj,
1941 			    dsl_dataset_phys(ds)->ds_next_snap_obj,
1942 			    mintxg, tx) == 0);
1943 			zfs_dbgmsg("destroying ds %llu; in queue; "
1944 			    "replacing with %llu",
1945 			    (u_longlong_t)ds->ds_object,
1946 			    (u_longlong_t)dsl_dataset_phys(ds)->
1947 			    ds_next_snap_obj);
1948 		} else {
1949 			zfs_dbgmsg("destroying ds %llu; in queue; removing",
1950 			    (u_longlong_t)ds->ds_object);
1951 		}
1952 	}
1953 
1954 	/*
1955 	 * dsl_scan_sync() should be called after this, and should sync
1956 	 * out our changed state, but just to be safe, do it here.
1957 	 */
1958 	dsl_scan_sync_state(scn, tx, SYNC_CACHED);
1959 }
1960 
1961 static void
1962 ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark)
1963 {
1964 	if (scn_bookmark->zb_objset == ds->ds_object) {
1965 		scn_bookmark->zb_objset =
1966 		    dsl_dataset_phys(ds)->ds_prev_snap_obj;
1967 		zfs_dbgmsg("snapshotting ds %llu; currently traversing; "
1968 		    "reset zb_objset to %llu",
1969 		    (u_longlong_t)ds->ds_object,
1970 		    (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
1971 	}
1972 }
1973 
1974 /*
1975  * Called when a dataset is snapshotted. If we were currently traversing
1976  * this snapshot, we reset our bookmark to point at the newly created
1977  * snapshot. We also modify our work queue to remove the old snapshot and
1978  * replace with the new one.
1979  */
1980 void
1981 dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
1982 {
1983 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1984 	dsl_scan_t *scn = dp->dp_scan;
1985 	uint64_t mintxg;
1986 
1987 	if (!dsl_scan_is_running(scn))
1988 		return;
1989 
1990 	ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
1991 
1992 	ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark);
1993 	ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark);
1994 
1995 	if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
1996 		scan_ds_queue_remove(scn, ds->ds_object);
1997 		scan_ds_queue_insert(scn,
1998 		    dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg);
1999 	}
2000 
2001 	if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
2002 	    ds->ds_object, &mintxg) == 0) {
2003 		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2004 		    scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
2005 		VERIFY(zap_add_int_key(dp->dp_meta_objset,
2006 		    scn->scn_phys.scn_queue_obj,
2007 		    dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0);
2008 		zfs_dbgmsg("snapshotting ds %llu; in queue; "
2009 		    "replacing with %llu",
2010 		    (u_longlong_t)ds->ds_object,
2011 		    (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
2012 	}
2013 
2014 	dsl_scan_sync_state(scn, tx, SYNC_CACHED);
2015 }
2016 
2017 static void
2018 ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2,
2019     zbookmark_phys_t *scn_bookmark)
2020 {
2021 	if (scn_bookmark->zb_objset == ds1->ds_object) {
2022 		scn_bookmark->zb_objset = ds2->ds_object;
2023 		zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
2024 		    "reset zb_objset to %llu",
2025 		    (u_longlong_t)ds1->ds_object,
2026 		    (u_longlong_t)ds2->ds_object);
2027 	} else if (scn_bookmark->zb_objset == ds2->ds_object) {
2028 		scn_bookmark->zb_objset = ds1->ds_object;
2029 		zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
2030 		    "reset zb_objset to %llu",
2031 		    (u_longlong_t)ds2->ds_object,
2032 		    (u_longlong_t)ds1->ds_object);
2033 	}
2034 }
2035 
2036 /*
2037  * Called when a parent dataset and its clone are swapped. If we were
2038  * currently traversing the dataset, we need to switch to traversing the
2039  * newly promoted parent.
2040  */
2041 void
2042 dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
2043 {
2044 	dsl_pool_t *dp = ds1->ds_dir->dd_pool;
2045 	dsl_scan_t *scn = dp->dp_scan;
2046 	uint64_t mintxg;
2047 
2048 	if (!dsl_scan_is_running(scn))
2049 		return;
2050 
2051 	ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark);
2052 	ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark);
2053 
2054 	if (scan_ds_queue_contains(scn, ds1->ds_object, &mintxg)) {
2055 		scan_ds_queue_remove(scn, ds1->ds_object);
2056 		scan_ds_queue_insert(scn, ds2->ds_object, mintxg);
2057 	}
2058 	if (scan_ds_queue_contains(scn, ds2->ds_object, &mintxg)) {
2059 		scan_ds_queue_remove(scn, ds2->ds_object);
2060 		scan_ds_queue_insert(scn, ds1->ds_object, mintxg);
2061 	}
2062 
2063 	if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
2064 	    ds1->ds_object, &mintxg) == 0) {
2065 		int err;
2066 		ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
2067 		ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
2068 		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2069 		    scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
2070 		err = zap_add_int_key(dp->dp_meta_objset,
2071 		    scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx);
2072 		VERIFY(err == 0 || err == EEXIST);
2073 		if (err == EEXIST) {
2074 			/* Both were there to begin with */
2075 			VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
2076 			    scn->scn_phys.scn_queue_obj,
2077 			    ds1->ds_object, mintxg, tx));
2078 		}
2079 		zfs_dbgmsg("clone_swap ds %llu; in queue; "
2080 		    "replacing with %llu",
2081 		    (u_longlong_t)ds1->ds_object,
2082 		    (u_longlong_t)ds2->ds_object);
2083 	}
2084 	if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
2085 	    ds2->ds_object, &mintxg) == 0) {
2086 		ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
2087 		ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
2088 		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2089 		    scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
2090 		VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
2091 		    scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx));
2092 		zfs_dbgmsg("clone_swap ds %llu; in queue; "
2093 		    "replacing with %llu",
2094 		    (u_longlong_t)ds2->ds_object,
2095 		    (u_longlong_t)ds1->ds_object);
2096 	}
2097 
2098 	dsl_scan_sync_state(scn, tx, SYNC_CACHED);
2099 }
2100 
2101 /* ARGSUSED */
2102 static int
2103 enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
2104 {
2105 	uint64_t originobj = *(uint64_t *)arg;
2106 	dsl_dataset_t *ds;
2107 	int err;
2108 	dsl_scan_t *scn = dp->dp_scan;
2109 
2110 	if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj)
2111 		return (0);
2112 
2113 	err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
2114 	if (err)
2115 		return (err);
2116 
2117 	while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) {
2118 		dsl_dataset_t *prev;
2119 		err = dsl_dataset_hold_obj(dp,
2120 		    dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
2121 
2122 		dsl_dataset_rele(ds, FTAG);
2123 		if (err)
2124 			return (err);
2125 		ds = prev;
2126 	}
2127 	scan_ds_queue_insert(scn, ds->ds_object,
2128 	    dsl_dataset_phys(ds)->ds_prev_snap_txg);
2129 	dsl_dataset_rele(ds, FTAG);
2130 	return (0);
2131 }
2132 
2133 static void
2134 dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
2135 {
2136 	dsl_pool_t *dp = scn->scn_dp;
2137 	dsl_dataset_t *ds;
2138 
2139 	VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
2140 
2141 	if (scn->scn_phys.scn_cur_min_txg >=
2142 	    scn->scn_phys.scn_max_txg) {
2143 		/*
2144 		 * This can happen if this snapshot was created after the
2145 		 * scan started, and we already completed a previous snapshot
2146 		 * that was created after the scan started.  This snapshot
2147 		 * only references blocks with:
2148 		 *
2149 		 *	birth < our ds_creation_txg
2150 		 *	cur_min_txg is no less than ds_creation_txg.
2151 		 *	We have already visited these blocks.
2152 		 * or
2153 		 *	birth > scn_max_txg
2154 		 *	The scan requested not to visit these blocks.
2155 		 *
2156 		 * Subsequent snapshots (and clones) can reference our
2157 		 * blocks, or blocks with even higher birth times.
2158 		 * Therefore we do not need to visit them either,
2159 		 * so we do not add them to the work queue.
2160 		 *
2161 		 * Note that checking for cur_min_txg >= cur_max_txg
2162 		 * is not sufficient, because in that case we may need to
2163 		 * visit subsequent snapshots.  This happens when min_txg > 0,
2164 		 * which raises cur_min_txg.  In this case we will visit
2165 		 * this dataset but skip all of its blocks, because the
2166 		 * rootbp's birth time is < cur_min_txg.  Then we will
2167 		 * add the next snapshots/clones to the work queue.
2168 		 */
2169 		char *dsname = kmem_alloc(MAXNAMELEN, KM_SLEEP);
2170 		dsl_dataset_name(ds, dsname);
2171 		zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because "
2172 		    "cur_min_txg (%llu) >= max_txg (%llu)",
2173 		    (longlong_t)dsobj, dsname,
2174 		    (longlong_t)scn->scn_phys.scn_cur_min_txg,
2175 		    (longlong_t)scn->scn_phys.scn_max_txg);
2176 		kmem_free(dsname, MAXNAMELEN);
2177 
2178 		goto out;
2179 	}
2180 
2181 	/*
2182 	 * Only the ZIL in the head (non-snapshot) is valid. Even though
2183 	 * snapshots can have ZIL block pointers (which may be the same
2184 	 * BP as in the head), they must be ignored. In addition, $ORIGIN
2185 	 * doesn't have a objset (i.e. its ds_bp is a hole) so we don't
2186 	 * need to look for a ZIL in it either. So we traverse the ZIL here,
2187 	 * rather than in scan_recurse(), because the regular snapshot
2188 	 * block-sharing rules don't apply to it.
2189 	 */
2190 	if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !dsl_dataset_is_snapshot(ds) &&
2191 	    (dp->dp_origin_snap == NULL ||
2192 	    ds->ds_dir != dp->dp_origin_snap->ds_dir)) {
2193 		objset_t *os;
2194 		if (dmu_objset_from_ds(ds, &os) != 0) {
2195 			goto out;
2196 		}
2197 		dsl_scan_zil(dp, &os->os_zil_header);
2198 	}
2199 
2200 	/*
2201 	 * Iterate over the bps in this ds.
2202 	 */
2203 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
2204 	rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
2205 	dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx);
2206 	rrw_exit(&ds->ds_bp_rwlock, FTAG);
2207 
2208 	char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
2209 	dsl_dataset_name(ds, dsname);
2210 	zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
2211 	    "suspending=%u",
2212 	    (longlong_t)dsobj, dsname,
2213 	    (longlong_t)scn->scn_phys.scn_cur_min_txg,
2214 	    (longlong_t)scn->scn_phys.scn_cur_max_txg,
2215 	    (int)scn->scn_suspending);
2216 	kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN);
2217 
2218 	if (scn->scn_suspending)
2219 		goto out;
2220 
2221 	/*
2222 	 * We've finished this pass over this dataset.
2223 	 */
2224 
2225 	/*
2226 	 * If we did not completely visit this dataset, do another pass.
2227 	 */
2228 	if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) {
2229 		zfs_dbgmsg("incomplete pass; visiting again");
2230 		scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN;
2231 		scan_ds_queue_insert(scn, ds->ds_object,
2232 		    scn->scn_phys.scn_cur_max_txg);
2233 		goto out;
2234 	}
2235 
2236 	/*
2237 	 * Add descendent datasets to work queue.
2238 	 */
2239 	if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
2240 		scan_ds_queue_insert(scn,
2241 		    dsl_dataset_phys(ds)->ds_next_snap_obj,
2242 		    dsl_dataset_phys(ds)->ds_creation_txg);
2243 	}
2244 	if (dsl_dataset_phys(ds)->ds_num_children > 1) {
2245 		boolean_t usenext = B_FALSE;
2246 		if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
2247 			uint64_t count;
2248 			/*
2249 			 * A bug in a previous version of the code could
2250 			 * cause upgrade_clones_cb() to not set
2251 			 * ds_next_snap_obj when it should, leading to a
2252 			 * missing entry.  Therefore we can only use the
2253 			 * next_clones_obj when its count is correct.
2254 			 */
2255 			int err = zap_count(dp->dp_meta_objset,
2256 			    dsl_dataset_phys(ds)->ds_next_clones_obj, &count);
2257 			if (err == 0 &&
2258 			    count == dsl_dataset_phys(ds)->ds_num_children - 1)
2259 				usenext = B_TRUE;
2260 		}
2261 
2262 		if (usenext) {
2263 			zap_cursor_t zc;
2264 			zap_attribute_t za;
2265 			for (zap_cursor_init(&zc, dp->dp_meta_objset,
2266 			    dsl_dataset_phys(ds)->ds_next_clones_obj);
2267 			    zap_cursor_retrieve(&zc, &za) == 0;
2268 			    (void) zap_cursor_advance(&zc)) {
2269 				scan_ds_queue_insert(scn,
2270 				    zfs_strtonum(za.za_name, NULL),
2271 				    dsl_dataset_phys(ds)->ds_creation_txg);
2272 			}
2273 			zap_cursor_fini(&zc);
2274 		} else {
2275 			VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2276 			    enqueue_clones_cb, &ds->ds_object,
2277 			    DS_FIND_CHILDREN));
2278 		}
2279 	}
2280 
2281 out:
2282 	dsl_dataset_rele(ds, FTAG);
2283 }
2284 
2285 /* ARGSUSED */
2286 static int
2287 enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
2288 {
2289 	dsl_dataset_t *ds;
2290 	int err;
2291 	dsl_scan_t *scn = dp->dp_scan;
2292 
2293 	err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
2294 	if (err)
2295 		return (err);
2296 
2297 	while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
2298 		dsl_dataset_t *prev;
2299 		err = dsl_dataset_hold_obj(dp,
2300 		    dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
2301 		if (err) {
2302 			dsl_dataset_rele(ds, FTAG);
2303 			return (err);
2304 		}
2305 
2306 		/*
2307 		 * If this is a clone, we don't need to worry about it for now.
2308 		 */
2309 		if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) {
2310 			dsl_dataset_rele(ds, FTAG);
2311 			dsl_dataset_rele(prev, FTAG);
2312 			return (0);
2313 		}
2314 		dsl_dataset_rele(ds, FTAG);
2315 		ds = prev;
2316 	}
2317 
2318 	scan_ds_queue_insert(scn, ds->ds_object,
2319 	    dsl_dataset_phys(ds)->ds_prev_snap_txg);
2320 	dsl_dataset_rele(ds, FTAG);
2321 	return (0);
2322 }
2323 
2324 /* ARGSUSED */
2325 void
2326 dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
2327     ddt_entry_t *dde, dmu_tx_t *tx)
2328 {
2329 	const ddt_key_t *ddk = &dde->dde_key;
2330 	ddt_phys_t *ddp = dde->dde_phys;
2331 	blkptr_t bp;
2332 	zbookmark_phys_t zb = { 0 };
2333 	int p;
2334 
2335 	if (scn->scn_phys.scn_state != DSS_SCANNING)
2336 		return;
2337 
2338 	for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
2339 		if (ddp->ddp_phys_birth == 0 ||
2340 		    ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg)
2341 			continue;
2342 		ddt_bp_create(checksum, ddk, ddp, &bp);
2343 
2344 		scn->scn_visited_this_txg++;
2345 		scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb);
2346 	}
2347 }
2348 
2349 /*
2350  * Scrub/dedup interaction.
2351  *
2352  * If there are N references to a deduped block, we don't want to scrub it
2353  * N times -- ideally, we should scrub it exactly once.
2354  *
2355  * We leverage the fact that the dde's replication class (enum ddt_class)
2356  * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
2357  * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
2358  *
2359  * To prevent excess scrubbing, the scrub begins by walking the DDT
2360  * to find all blocks with refcnt > 1, and scrubs each of these once.
2361  * Since there are two replication classes which contain blocks with
2362  * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
2363  * Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
2364  *
2365  * There would be nothing more to say if a block's refcnt couldn't change
2366  * during a scrub, but of course it can so we must account for changes
2367  * in a block's replication class.
2368  *
2369  * Here's an example of what can occur:
2370  *
2371  * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
2372  * when visited during the top-down scrub phase, it will be scrubbed twice.
2373  * This negates our scrub optimization, but is otherwise harmless.
2374  *
2375  * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
2376  * on each visit during the top-down scrub phase, it will never be scrubbed.
2377  * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
2378  * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
2379  * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
2380  * while a scrub is in progress, it scrubs the block right then.
2381  */
2382 static void
2383 dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
2384 {
2385 	ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
2386 	ddt_entry_t dde = { 0 };
2387 	int error;
2388 	uint64_t n = 0;
2389 
2390 	while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) {
2391 		ddt_t *ddt;
2392 
2393 		if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max)
2394 			break;
2395 		dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
2396 		    (longlong_t)ddb->ddb_class,
2397 		    (longlong_t)ddb->ddb_type,
2398 		    (longlong_t)ddb->ddb_checksum,
2399 		    (longlong_t)ddb->ddb_cursor);
2400 
2401 		/* There should be no pending changes to the dedup table */
2402 		ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum];
2403 		ASSERT(avl_first(&ddt->ddt_tree) == NULL);
2404 
2405 		dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx);
2406 		n++;
2407 
2408 		if (dsl_scan_check_suspend(scn, NULL))
2409 			break;
2410 	}
2411 
2412 	zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; "
2413 	    "suspending=%u", (longlong_t)n,
2414 	    (int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending);
2415 
2416 	ASSERT(error == 0 || error == ENOENT);
2417 	ASSERT(error != ENOENT ||
2418 	    ddb->ddb_class > scn->scn_phys.scn_ddt_class_max);
2419 }
2420 
2421 static uint64_t
2422 dsl_scan_ds_maxtxg(dsl_dataset_t *ds)
2423 {
2424 	uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg;
2425 	if (ds->ds_is_snapshot)
2426 		return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg));
2427 	return (smt);
2428 }
2429 
2430 static void
2431 dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
2432 {
2433 	scan_ds_t *sds;
2434 	dsl_pool_t *dp = scn->scn_dp;
2435 
2436 	if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
2437 	    scn->scn_phys.scn_ddt_class_max) {
2438 		scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
2439 		scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
2440 		dsl_scan_ddt(scn, tx);
2441 		if (scn->scn_suspending)
2442 			return;
2443 	}
2444 
2445 	if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) {
2446 		/* First do the MOS & ORIGIN */
2447 
2448 		scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
2449 		scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
2450 		dsl_scan_visit_rootbp(scn, NULL,
2451 		    &dp->dp_meta_rootbp, tx);
2452 		spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
2453 		if (scn->scn_suspending)
2454 			return;
2455 
2456 		if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
2457 			VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2458 			    enqueue_cb, NULL, DS_FIND_CHILDREN));
2459 		} else {
2460 			dsl_scan_visitds(scn,
2461 			    dp->dp_origin_snap->ds_object, tx);
2462 		}
2463 		ASSERT(!scn->scn_suspending);
2464 	} else if (scn->scn_phys.scn_bookmark.zb_objset !=
2465 	    ZB_DESTROYED_OBJSET) {
2466 		uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset;
2467 		/*
2468 		 * If we were suspended, continue from here. Note if the
2469 		 * ds we were suspended on was deleted, the zb_objset may
2470 		 * be -1, so we will skip this and find a new objset
2471 		 * below.
2472 		 */
2473 		dsl_scan_visitds(scn, dsobj, tx);
2474 		if (scn->scn_suspending)
2475 			return;
2476 	}
2477 
2478 	/*
2479 	 * In case we suspended right at the end of the ds, zero the
2480 	 * bookmark so we don't think that we're still trying to resume.
2481 	 */
2482 	bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t));
2483 
2484 	/*
2485 	 * Keep pulling things out of the dataset avl queue. Updates to the
2486 	 * persistent zap-object-as-queue happen only at checkpoints.
2487 	 */
2488 	while ((sds = avl_first(&scn->scn_queue)) != NULL) {
2489 		dsl_dataset_t *ds;
2490 		uint64_t dsobj = sds->sds_dsobj;
2491 		uint64_t txg = sds->sds_txg;
2492 
2493 		/* dequeue and free the ds from the queue */
2494 		scan_ds_queue_remove(scn, dsobj);
2495 		sds = NULL;	/* must not be touched after removal */
2496 
2497 		/* Set up min / max txg */
2498 		VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
2499 		if (txg != 0) {
2500 			scn->scn_phys.scn_cur_min_txg =
2501 			    MAX(scn->scn_phys.scn_min_txg, txg);
2502 		} else {
2503 			scn->scn_phys.scn_cur_min_txg =
2504 			    MAX(scn->scn_phys.scn_min_txg,
2505 			    dsl_dataset_phys(ds)->ds_prev_snap_txg);
2506 		}
2507 		scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
2508 		dsl_dataset_rele(ds, FTAG);
2509 
2510 		dsl_scan_visitds(scn, dsobj, tx);
2511 		if (scn->scn_suspending)
2512 			return;
2513 	}
2514 	/* No more objsets to fetch, we're done */
2515 	scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET;
2516 	ASSERT0(scn->scn_suspending);
2517 }
2518 
2519 static uint64_t
2520 dsl_scan_count_leaves(vdev_t *vd)
2521 {
2522 	uint64_t i, leaves = 0;
2523 
2524 	/* we only count leaves that belong to the main pool and are readable */
2525 	if (vd->vdev_islog || vd->vdev_isspare ||
2526 	    vd->vdev_isl2cache || !vdev_readable(vd))
2527 		return (0);
2528 
2529 	if (vd->vdev_ops->vdev_op_leaf)
2530 		return (1);
2531 
2532 	for (i = 0; i < vd->vdev_children; i++) {
2533 		leaves += dsl_scan_count_leaves(vd->vdev_child[i]);
2534 	}
2535 
2536 	return (leaves);
2537 }
2538 
2539 
2540 static void
2541 scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp)
2542 {
2543 	int i;
2544 	uint64_t cur_size = 0;
2545 
2546 	for (i = 0; i < BP_GET_NDVAS(bp); i++) {
2547 		cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]);
2548 	}
2549 
2550 	q->q_total_zio_size_this_txg += cur_size;
2551 	q->q_zios_this_txg++;
2552 }
2553 
2554 static void
2555 scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start,
2556     uint64_t end)
2557 {
2558 	q->q_total_seg_size_this_txg += end - start;
2559 	q->q_segs_this_txg++;
2560 }
2561 
2562 static boolean_t
2563 scan_io_queue_check_suspend(dsl_scan_t *scn)
2564 {
2565 	/* See comment in dsl_scan_check_suspend() */
2566 	uint64_t curr_time_ns = gethrtime();
2567 	uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
2568 	uint64_t sync_time_ns = curr_time_ns -
2569 	    scn->scn_dp->dp_spa->spa_sync_starttime;
2570 	int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
2571 	int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
2572 	    zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
2573 
2574 	return ((NSEC2MSEC(scan_time_ns) > mintime &&
2575 	    (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent ||
2576 	    txg_sync_waiting(scn->scn_dp) ||
2577 	    NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
2578 	    spa_shutting_down(scn->scn_dp->dp_spa));
2579 }
2580 
2581 /*
2582  * Given a list of scan_io_t's in io_list, this issues the io's out to
2583  * disk. This consumes the io_list and frees the scan_io_t's. This is
2584  * called when emptying queues, either when we're up against the memory
2585  * limit or when we have finished scanning. Returns B_TRUE if we stopped
2586  * processing the list before we finished. Any zios that were not issued
2587  * will remain in the io_list.
2588  */
2589 static boolean_t
2590 scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list)
2591 {
2592 	dsl_scan_t *scn = queue->q_scn;
2593 	scan_io_t *sio;
2594 	int64_t bytes_issued = 0;
2595 	boolean_t suspended = B_FALSE;
2596 
2597 	while ((sio = list_head(io_list)) != NULL) {
2598 		blkptr_t bp;
2599 
2600 		if (scan_io_queue_check_suspend(scn)) {
2601 			suspended = B_TRUE;
2602 			break;
2603 		}
2604 
2605 		sio2bp(sio, &bp);
2606 		bytes_issued += SIO_GET_ASIZE(sio);
2607 		scan_exec_io(scn->scn_dp, &bp, sio->sio_flags,
2608 		    &sio->sio_zb, queue);
2609 		(void) list_remove_head(io_list);
2610 		scan_io_queues_update_zio_stats(queue, &bp);
2611 		sio_free(sio);
2612 	}
2613 
2614 	atomic_add_64(&scn->scn_bytes_pending, -bytes_issued);
2615 
2616 	return (suspended);
2617 }
2618 
2619 /*
2620  * Given a range_seg_t (extent) and a list, this function passes over a
2621  * scan queue and gathers up the appropriate ios which fit into that
2622  * scan seg (starting from lowest LBA). At the end, we remove the segment
2623  * from the q_exts_by_addr range tree.
2624  */
2625 static boolean_t
2626 scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list)
2627 {
2628 	scan_io_t *srch_sio, *sio, *next_sio;
2629 	avl_index_t idx;
2630 	uint_t num_sios = 0;
2631 	int64_t bytes_issued = 0;
2632 
2633 	ASSERT(rs != NULL);
2634 	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
2635 
2636 	srch_sio = sio_alloc(1);
2637 	srch_sio->sio_nr_dvas = 1;
2638 	SIO_SET_OFFSET(srch_sio, rs->rs_start);
2639 
2640 	/*
2641 	 * The exact start of the extent might not contain any matching zios,
2642 	 * so if that's the case, examine the next one in the tree.
2643 	 */
2644 	sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
2645 	sio_free(srch_sio);
2646 
2647 	if (sio == NULL)
2648 		sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER);
2649 
2650 	while (sio != NULL &&
2651 	    SIO_GET_OFFSET(sio) < rs->rs_end && num_sios <= 32) {
2652 		ASSERT3U(SIO_GET_OFFSET(sio), >=, rs->rs_start);
2653 		ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs->rs_end);
2654 
2655 		next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio);
2656 		avl_remove(&queue->q_sios_by_addr, sio);
2657 		queue->q_sio_memused -= SIO_GET_MUSED(sio);
2658 
2659 		bytes_issued += SIO_GET_ASIZE(sio);
2660 		num_sios++;
2661 		list_insert_tail(list, sio);
2662 		sio = next_sio;
2663 	}
2664 
2665 	/*
2666 	 * We limit the number of sios we process at once to 32 to avoid
2667 	 * biting off more than we can chew. If we didn't take everything
2668 	 * in the segment we update it to reflect the work we were able to
2669 	 * complete. Otherwise, we remove it from the range tree entirely.
2670 	 */
2671 	if (sio != NULL && SIO_GET_OFFSET(sio) < rs->rs_end) {
2672 		range_tree_adjust_fill(queue->q_exts_by_addr, rs,
2673 		    -bytes_issued);
2674 		range_tree_resize_segment(queue->q_exts_by_addr, rs,
2675 		    SIO_GET_OFFSET(sio), rs->rs_end - SIO_GET_OFFSET(sio));
2676 
2677 		return (B_TRUE);
2678 	} else {
2679 		range_tree_remove(queue->q_exts_by_addr, rs->rs_start,
2680 		    rs->rs_end - rs->rs_start);
2681 		return (B_FALSE);
2682 	}
2683 }
2684 
2685 
2686 /*
2687  * This is called from the queue emptying thread and selects the next
2688  * extent from which we are to issue io's. The behavior of this function
2689  * depends on the state of the scan, the current memory consumption and
2690  * whether or not we are performing a scan shutdown.
2691  * 1) We select extents in an elevator algorithm (LBA-order) if the scan
2692  *	needs to perform a checkpoint
2693  * 2) We select the largest available extent if we are up against the
2694  *	memory limit.
2695  * 3) Otherwise we don't select any extents.
2696  */
2697 static const range_seg_t *
2698 scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
2699 {
2700 	dsl_scan_t *scn = queue->q_scn;
2701 
2702 	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
2703 	ASSERT(scn->scn_is_sorted);
2704 
2705 	/* handle tunable overrides */
2706 	if (scn->scn_checkpointing || scn->scn_clearing) {
2707 		if (zfs_scan_issue_strategy == 1) {
2708 			return (range_tree_first(queue->q_exts_by_addr));
2709 		} else if (zfs_scan_issue_strategy == 2) {
2710 			return (avl_first(&queue->q_exts_by_size));
2711 		}
2712 	}
2713 
2714 	/*
2715 	 * During normal clearing, we want to issue our largest segments
2716 	 * first, keeping IO as sequential as possible, and leaving the
2717 	 * smaller extents for later with the hope that they might eventually
2718 	 * grow to larger sequential segments. However, when the scan is
2719 	 * checkpointing, no new extents will be added to the sorting queue,
2720 	 * so the way we are sorted now is as good as it will ever get.
2721 	 * In this case, we instead switch to issuing extents in LBA order.
2722 	 */
2723 	if (scn->scn_checkpointing) {
2724 		return (range_tree_first(queue->q_exts_by_addr));
2725 	} else if (scn->scn_clearing) {
2726 		return (avl_first(&queue->q_exts_by_size));
2727 	} else {
2728 		return (NULL);
2729 	}
2730 }
2731 
2732 static void
2733 scan_io_queues_run_one(void *arg)
2734 {
2735 	dsl_scan_io_queue_t *queue = arg;
2736 	kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
2737 	boolean_t suspended = B_FALSE;
2738 	range_seg_t *rs = NULL;
2739 	scan_io_t *sio = NULL;
2740 	list_t sio_list;
2741 	uint64_t bytes_per_leaf = zfs_scan_vdev_limit;
2742 	uint64_t nr_leaves = dsl_scan_count_leaves(queue->q_vd);
2743 
2744 	ASSERT(queue->q_scn->scn_is_sorted);
2745 
2746 	list_create(&sio_list, sizeof (scan_io_t),
2747 	    offsetof(scan_io_t, sio_nodes.sio_list_node));
2748 	mutex_enter(q_lock);
2749 
2750 	/* calculate maximum in-flight bytes for this txg (min 1MB) */
2751 	queue->q_maxinflight_bytes =
2752 	    MAX(nr_leaves * bytes_per_leaf, 1ULL << 20);
2753 
2754 	/* reset per-queue scan statistics for this txg */
2755 	queue->q_total_seg_size_this_txg = 0;
2756 	queue->q_segs_this_txg = 0;
2757 	queue->q_total_zio_size_this_txg = 0;
2758 	queue->q_zios_this_txg = 0;
2759 
2760 	/* loop until we have run out of time or sios */
2761 	while ((rs = (range_seg_t *)scan_io_queue_fetch_ext(queue)) != NULL) {
2762 		uint64_t seg_start = 0, seg_end = 0;
2763 		boolean_t more_left = B_TRUE;
2764 
2765 		ASSERT(list_is_empty(&sio_list));
2766 
2767 		/* loop while we still have sios left to process in this rs */
2768 		while (more_left) {
2769 			scan_io_t *first_sio, *last_sio;
2770 
2771 			/*
2772 			 * We have selected which extent needs to be
2773 			 * processed next. Gather up the corresponding sios.
2774 			 */
2775 			more_left = scan_io_queue_gather(queue, rs, &sio_list);
2776 			ASSERT(!list_is_empty(&sio_list));
2777 			first_sio = list_head(&sio_list);
2778 			last_sio = list_tail(&sio_list);
2779 
2780 			seg_end = SIO_GET_END_OFFSET(last_sio);
2781 			if (seg_start == 0)
2782 				seg_start = SIO_GET_OFFSET(first_sio);
2783 
2784 			/*
2785 			 * Issuing sios can take a long time so drop the
2786 			 * queue lock. The sio queue won't be updated by
2787 			 * other threads since we're in syncing context so
2788 			 * we can be sure that our trees will remain exactly
2789 			 * as we left them.
2790 			 */
2791 			mutex_exit(q_lock);
2792 			suspended = scan_io_queue_issue(queue, &sio_list);
2793 			mutex_enter(q_lock);
2794 
2795 			if (suspended)
2796 				break;
2797 		}
2798 		/* update statistics for debugging purposes */
2799 		scan_io_queues_update_seg_stats(queue, seg_start, seg_end);
2800 
2801 		if (suspended)
2802 			break;
2803 	}
2804 
2805 
2806 	/*
2807 	 * If we were suspended in the middle of processing,
2808 	 * requeue any unfinished sios and exit.
2809 	 */
2810 	while ((sio = list_head(&sio_list)) != NULL) {
2811 		list_remove(&sio_list, sio);
2812 		scan_io_queue_insert_impl(queue, sio);
2813 	}
2814 
2815 	mutex_exit(q_lock);
2816 	list_destroy(&sio_list);
2817 }
2818 
2819 /*
2820  * Performs an emptying run on all scan queues in the pool. This just
2821  * punches out one thread per top-level vdev, each of which processes
2822  * only that vdev's scan queue. We can parallelize the I/O here because
2823  * we know that each queue's io's only affect its own top-level vdev.
2824  *
2825  * This function waits for the queue runs to complete, and must be
2826  * called from dsl_scan_sync (or in general, syncing context).
2827  */
2828 static void
2829 scan_io_queues_run(dsl_scan_t *scn)
2830 {
2831 	spa_t *spa = scn->scn_dp->dp_spa;
2832 
2833 	ASSERT(scn->scn_is_sorted);
2834 	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
2835 
2836 	if (scn->scn_bytes_pending == 0)
2837 		return;
2838 
2839 	if (scn->scn_taskq == NULL) {
2840 		char *tq_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN + 16,
2841 		    KM_SLEEP);
2842 		int nthreads = spa->spa_root_vdev->vdev_children;
2843 
2844 		/*
2845 		 * We need to make this taskq *always* execute as many
2846 		 * threads in parallel as we have top-level vdevs and no
2847 		 * less, otherwise strange serialization of the calls to
2848 		 * scan_io_queues_run_one can occur during spa_sync runs
2849 		 * and that significantly impacts performance.
2850 		 */
2851 		(void) snprintf(tq_name, ZFS_MAX_DATASET_NAME_LEN + 16,
2852 		    "dsl_scan_tq_%s", spa->spa_name);
2853 		scn->scn_taskq = taskq_create(tq_name, nthreads, minclsyspri,
2854 		    nthreads, nthreads, TASKQ_PREPOPULATE);
2855 		kmem_free(tq_name, ZFS_MAX_DATASET_NAME_LEN + 16);
2856 	}
2857 
2858 	for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
2859 		vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
2860 
2861 		mutex_enter(&vd->vdev_scan_io_queue_lock);
2862 		if (vd->vdev_scan_io_queue != NULL) {
2863 			VERIFY(taskq_dispatch(scn->scn_taskq,
2864 			    scan_io_queues_run_one, vd->vdev_scan_io_queue,
2865 			    TQ_SLEEP) != TASKQID_INVALID);
2866 		}
2867 		mutex_exit(&vd->vdev_scan_io_queue_lock);
2868 	}
2869 
2870 	/*
2871 	 * Wait for the queues to finish issuing thir IOs for this run
2872 	 * before we return. There may still be IOs in flight at this
2873 	 * point.
2874 	 */
2875 	taskq_wait(scn->scn_taskq);
2876 }
2877 
2878 static boolean_t
2879 dsl_scan_async_block_should_pause(dsl_scan_t *scn)
2880 {
2881 	uint64_t elapsed_nanosecs;
2882 
2883 	if (zfs_recover)
2884 		return (B_FALSE);
2885 
2886 	if (scn->scn_visited_this_txg >= zfs_async_block_max_blocks)
2887 		return (B_TRUE);
2888 
2889 	elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
2890 	return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
2891 	    (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms &&
2892 	    txg_sync_waiting(scn->scn_dp)) ||
2893 	    spa_shutting_down(scn->scn_dp->dp_spa));
2894 }
2895 
2896 static int
2897 dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
2898 {
2899 	dsl_scan_t *scn = arg;
2900 
2901 	if (!scn->scn_is_bptree ||
2902 	    (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
2903 		if (dsl_scan_async_block_should_pause(scn))
2904 			return (SET_ERROR(ERESTART));
2905 	}
2906 
2907 	zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
2908 	    dmu_tx_get_txg(tx), bp, 0));
2909 	dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
2910 	    -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
2911 	    -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
2912 	scn->scn_visited_this_txg++;
2913 	return (0);
2914 }
2915 
2916 static void
2917 dsl_scan_update_stats(dsl_scan_t *scn)
2918 {
2919 	spa_t *spa = scn->scn_dp->dp_spa;
2920 	uint64_t i;
2921 	uint64_t seg_size_total = 0, zio_size_total = 0;
2922 	uint64_t seg_count_total = 0, zio_count_total = 0;
2923 
2924 	for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
2925 		vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
2926 		dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue;
2927 
2928 		if (queue == NULL)
2929 			continue;
2930 
2931 		seg_size_total += queue->q_total_seg_size_this_txg;
2932 		zio_size_total += queue->q_total_zio_size_this_txg;
2933 		seg_count_total += queue->q_segs_this_txg;
2934 		zio_count_total += queue->q_zios_this_txg;
2935 	}
2936 
2937 	if (seg_count_total == 0 || zio_count_total == 0) {
2938 		scn->scn_avg_seg_size_this_txg = 0;
2939 		scn->scn_avg_zio_size_this_txg = 0;
2940 		scn->scn_segs_this_txg = 0;
2941 		scn->scn_zios_this_txg = 0;
2942 		return;
2943 	}
2944 
2945 	scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total;
2946 	scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total;
2947 	scn->scn_segs_this_txg = seg_count_total;
2948 	scn->scn_zios_this_txg = zio_count_total;
2949 }
2950 
2951 static int
2952 dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
2953 {
2954 	dsl_scan_t *scn = arg;
2955 	const dva_t *dva = &bp->blk_dva[0];
2956 
2957 	if (dsl_scan_async_block_should_pause(scn))
2958 		return (SET_ERROR(ERESTART));
2959 
2960 	spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa,
2961 	    DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva),
2962 	    DVA_GET_ASIZE(dva), tx);
2963 	scn->scn_visited_this_txg++;
2964 	return (0);
2965 }
2966 
2967 boolean_t
2968 dsl_scan_active(dsl_scan_t *scn)
2969 {
2970 	spa_t *spa = scn->scn_dp->dp_spa;
2971 	uint64_t used = 0, comp, uncomp;
2972 
2973 	if (spa->spa_load_state != SPA_LOAD_NONE)
2974 		return (B_FALSE);
2975 	if (spa_shutting_down(spa))
2976 		return (B_FALSE);
2977 	if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) ||
2978 	    (scn->scn_async_destroying && !scn->scn_async_stalled))
2979 		return (B_TRUE);
2980 
2981 	if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
2982 		(void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
2983 		    &used, &comp, &uncomp);
2984 	}
2985 	return (used != 0);
2986 }
2987 
2988 static boolean_t
2989 dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize,
2990     uint64_t phys_birth)
2991 {
2992 	vdev_t *vd;
2993 
2994 	vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
2995 
2996 	if (vd->vdev_ops == &vdev_indirect_ops) {
2997 		/*
2998 		 * The indirect vdev can point to multiple
2999 		 * vdevs.  For simplicity, always create
3000 		 * the resilver zio_t. zio_vdev_io_start()
3001 		 * will bypass the child resilver i/o's if
3002 		 * they are on vdevs that don't have DTL's.
3003 		 */
3004 		return (B_TRUE);
3005 	}
3006 
3007 	if (DVA_GET_GANG(dva)) {
3008 		/*
3009 		 * Gang members may be spread across multiple
3010 		 * vdevs, so the best estimate we have is the
3011 		 * scrub range, which has already been checked.
3012 		 * XXX -- it would be better to change our
3013 		 * allocation policy to ensure that all
3014 		 * gang members reside on the same vdev.
3015 		 */
3016 		return (B_TRUE);
3017 	}
3018 
3019 	/*
3020 	 * Check if the txg falls within the range which must be
3021 	 * resilvered.  DVAs outside this range can always be skipped.
3022 	 */
3023 	if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1))
3024 		return (B_FALSE);
3025 
3026 	/*
3027 	 * Check if the top-level vdev must resilver this offset.
3028 	 * When the offset does not intersect with a dirty leaf DTL
3029 	 * then it may be possible to skip the resilver IO.  The psize
3030 	 * is provided instead of asize to simplify the check for RAIDZ.
3031 	 */
3032 	if (!vdev_dtl_need_resilver(vd, DVA_GET_OFFSET(dva), psize))
3033 		return (B_FALSE);
3034 
3035 	return (B_TRUE);
3036 }
3037 
3038 static int
3039 dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx)
3040 {
3041 	int err = 0;
3042 	dsl_scan_t *scn = dp->dp_scan;
3043 	spa_t *spa = dp->dp_spa;
3044 
3045 	if (spa_suspend_async_destroy(spa))
3046 		return (0);
3047 
3048 	if (zfs_free_bpobj_enabled &&
3049 	    spa_version(spa) >= SPA_VERSION_DEADLISTS) {
3050 		scn->scn_is_bptree = B_FALSE;
3051 		scn->scn_async_block_min_time_ms = zfs_free_min_time_ms;
3052 		scn->scn_zio_root = zio_root(spa, NULL,
3053 		    NULL, ZIO_FLAG_MUSTSUCCEED);
3054 		err = bpobj_iterate(&dp->dp_free_bpobj,
3055 		    dsl_scan_free_block_cb, scn, tx);
3056 		VERIFY0(zio_wait(scn->scn_zio_root));
3057 		scn->scn_zio_root = NULL;
3058 
3059 		if (err != 0 && err != ERESTART)
3060 			zfs_panic_recover("error %u from bpobj_iterate()", err);
3061 	}
3062 
3063 	if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
3064 		ASSERT(scn->scn_async_destroying);
3065 		scn->scn_is_bptree = B_TRUE;
3066 		scn->scn_zio_root = zio_root(spa, NULL,
3067 		    NULL, ZIO_FLAG_MUSTSUCCEED);
3068 		err = bptree_iterate(dp->dp_meta_objset,
3069 		    dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx);
3070 		VERIFY0(zio_wait(scn->scn_zio_root));
3071 		scn->scn_zio_root = NULL;
3072 
3073 		if (err == EIO || err == ECKSUM) {
3074 			err = 0;
3075 		} else if (err != 0 && err != ERESTART) {
3076 			zfs_panic_recover("error %u from "
3077 			    "traverse_dataset_destroyed()", err);
3078 		}
3079 
3080 		if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) {
3081 			/* finished; deactivate async destroy feature */
3082 			spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx);
3083 			ASSERT(!spa_feature_is_active(spa,
3084 			    SPA_FEATURE_ASYNC_DESTROY));
3085 			VERIFY0(zap_remove(dp->dp_meta_objset,
3086 			    DMU_POOL_DIRECTORY_OBJECT,
3087 			    DMU_POOL_BPTREE_OBJ, tx));
3088 			VERIFY0(bptree_free(dp->dp_meta_objset,
3089 			    dp->dp_bptree_obj, tx));
3090 			dp->dp_bptree_obj = 0;
3091 			scn->scn_async_destroying = B_FALSE;
3092 			scn->scn_async_stalled = B_FALSE;
3093 		} else {
3094 			/*
3095 			 * If we didn't make progress, mark the async
3096 			 * destroy as stalled, so that we will not initiate
3097 			 * a spa_sync() on its behalf.  Note that we only
3098 			 * check this if we are not finished, because if the
3099 			 * bptree had no blocks for us to visit, we can
3100 			 * finish without "making progress".
3101 			 */
3102 			scn->scn_async_stalled =
3103 			    (scn->scn_visited_this_txg == 0);
3104 		}
3105 	}
3106 	if (scn->scn_visited_this_txg) {
3107 		zfs_dbgmsg("freed %llu blocks in %llums from "
3108 		    "free_bpobj/bptree txg %llu; err=%d",
3109 		    (longlong_t)scn->scn_visited_this_txg,
3110 		    (longlong_t)
3111 		    NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
3112 		    (longlong_t)tx->tx_txg, err);
3113 		scn->scn_visited_this_txg = 0;
3114 
3115 		/*
3116 		 * Write out changes to the DDT that may be required as a
3117 		 * result of the blocks freed.  This ensures that the DDT
3118 		 * is clean when a scrub/resilver runs.
3119 		 */
3120 		ddt_sync(spa, tx->tx_txg);
3121 	}
3122 	if (err != 0)
3123 		return (err);
3124 	if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
3125 	    zfs_free_leak_on_eio &&
3126 	    (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 ||
3127 	    dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 ||
3128 	    dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) {
3129 		/*
3130 		 * We have finished background destroying, but there is still
3131 		 * some space left in the dp_free_dir. Transfer this leaked
3132 		 * space to the dp_leak_dir.
3133 		 */
3134 		if (dp->dp_leak_dir == NULL) {
3135 			rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
3136 			(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
3137 			    LEAK_DIR_NAME, tx);
3138 			VERIFY0(dsl_pool_open_special_dir(dp,
3139 			    LEAK_DIR_NAME, &dp->dp_leak_dir));
3140 			rrw_exit(&dp->dp_config_rwlock, FTAG);
3141 		}
3142 		dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD,
3143 		    dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
3144 		    dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
3145 		    dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
3146 		dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
3147 		    -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
3148 		    -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
3149 		    -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
3150 	}
3151 
3152 	if (dp->dp_free_dir != NULL && !scn->scn_async_destroying) {
3153 		/* finished; verify that space accounting went to zero */
3154 		ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes);
3155 		ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes);
3156 		ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes);
3157 	}
3158 
3159 	EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj),
3160 	    0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3161 	    DMU_POOL_OBSOLETE_BPOBJ));
3162 	if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) {
3163 		ASSERT(spa_feature_is_active(dp->dp_spa,
3164 		    SPA_FEATURE_OBSOLETE_COUNTS));
3165 
3166 		scn->scn_is_bptree = B_FALSE;
3167 		scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms;
3168 		err = bpobj_iterate(&dp->dp_obsolete_bpobj,
3169 		    dsl_scan_obsolete_block_cb, scn, tx);
3170 		if (err != 0 && err != ERESTART)
3171 			zfs_panic_recover("error %u from bpobj_iterate()", err);
3172 
3173 		if (bpobj_is_empty(&dp->dp_obsolete_bpobj))
3174 			dsl_pool_destroy_obsolete_bpobj(dp, tx);
3175 	}
3176 
3177 	return (0);
3178 }
3179 
3180 /*
3181  * This is the primary entry point for scans that is called from syncing
3182  * context. Scans must happen entirely during syncing context so that we
3183  * cna guarantee that blocks we are currently scanning will not change out
3184  * from under us. While a scan is active, this funciton controls how quickly
3185  * transaction groups proceed, instead of the normal handling provided by
3186  * txg_sync_thread().
3187  */
3188 void
3189 dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
3190 {
3191 	dsl_scan_t *scn = dp->dp_scan;
3192 	spa_t *spa = dp->dp_spa;
3193 	int err = 0;
3194 	state_sync_type_t sync_type = SYNC_OPTIONAL;
3195 
3196 	/*
3197 	 * Check for scn_restart_txg before checking spa_load_state, so
3198 	 * that we can restart an old-style scan while the pool is being
3199 	 * imported (see dsl_scan_init).
3200 	 */
3201 	if (dsl_scan_restarting(scn, tx)) {
3202 		pool_scan_func_t func = POOL_SCAN_SCRUB;
3203 		dsl_scan_done(scn, B_FALSE, tx);
3204 		if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
3205 			func = POOL_SCAN_RESILVER;
3206 		zfs_dbgmsg("restarting scan func=%u txg=%llu",
3207 		    func, (longlong_t)tx->tx_txg);
3208 		dsl_scan_setup_sync(&func, tx);
3209 	}
3210 
3211 	/*
3212 	 * Only process scans in sync pass 1.
3213 	 */
3214 	if (spa_sync_pass(dp->dp_spa) > 1)
3215 		return;
3216 
3217 	/*
3218 	 * If the spa is shutting down, then stop scanning. This will
3219 	 * ensure that the scan does not dirty any new data during the
3220 	 * shutdown phase.
3221 	 */
3222 	if (spa_shutting_down(spa))
3223 		return;
3224 
3225 	/*
3226 	 * If the scan is inactive due to a stalled async destroy, try again.
3227 	 */
3228 	if (!scn->scn_async_stalled && !dsl_scan_active(scn))
3229 		return;
3230 
3231 	/* reset scan statistics */
3232 	scn->scn_visited_this_txg = 0;
3233 	scn->scn_holes_this_txg = 0;
3234 	scn->scn_lt_min_this_txg = 0;
3235 	scn->scn_gt_max_this_txg = 0;
3236 	scn->scn_ddt_contained_this_txg = 0;
3237 	scn->scn_objsets_visited_this_txg = 0;
3238 	scn->scn_avg_seg_size_this_txg = 0;
3239 	scn->scn_segs_this_txg = 0;
3240 	scn->scn_avg_zio_size_this_txg = 0;
3241 	scn->scn_zios_this_txg = 0;
3242 	scn->scn_suspending = B_FALSE;
3243 	scn->scn_sync_start_time = gethrtime();
3244 	spa->spa_scrub_active = B_TRUE;
3245 
3246 	/*
3247 	 * First process the async destroys.  If we pause, don't do
3248 	 * any scrubbing or resilvering.  This ensures that there are no
3249 	 * async destroys while we are scanning, so the scan code doesn't
3250 	 * have to worry about traversing it.  It is also faster to free the
3251 	 * blocks than to scrub them.
3252 	 */
3253 	err = dsl_process_async_destroys(dp, tx);
3254 	if (err != 0)
3255 		return;
3256 
3257 	if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn))
3258 		return;
3259 
3260 	/*
3261 	 * Wait a few txgs after importing to begin scanning so that
3262 	 * we can get the pool imported quickly.
3263 	 */
3264 	if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS)
3265 		return;
3266 
3267 	/*
3268 	 * It is possible to switch from unsorted to sorted at any time,
3269 	 * but afterwards the scan will remain sorted unless reloaded from
3270 	 * a checkpoint after a reboot.
3271 	 */
3272 	if (!zfs_scan_legacy) {
3273 		scn->scn_is_sorted = B_TRUE;
3274 		if (scn->scn_last_checkpoint == 0)
3275 			scn->scn_last_checkpoint = ddi_get_lbolt();
3276 	}
3277 
3278 	/*
3279 	 * For sorted scans, determine what kind of work we will be doing
3280 	 * this txg based on our memory limitations and whether or not we
3281 	 * need to perform a checkpoint.
3282 	 */
3283 	if (scn->scn_is_sorted) {
3284 		/*
3285 		 * If we are over our checkpoint interval, set scn_clearing
3286 		 * so that we can begin checkpointing immediately. The
3287 		 * checkpoint allows us to save a consisent bookmark
3288 		 * representing how much data we have scrubbed so far.
3289 		 * Otherwise, use the memory limit to determine if we should
3290 		 * scan for metadata or start issue scrub IOs. We accumulate
3291 		 * metadata until we hit our hard memory limit at which point
3292 		 * we issue scrub IOs until we are at our soft memory limit.
3293 		 */
3294 		if (scn->scn_checkpointing ||
3295 		    ddi_get_lbolt() - scn->scn_last_checkpoint >
3296 		    SEC_TO_TICK(zfs_scan_checkpoint_intval)) {
3297 			if (!scn->scn_checkpointing)
3298 				zfs_dbgmsg("begin scan checkpoint");
3299 
3300 			scn->scn_checkpointing = B_TRUE;
3301 			scn->scn_clearing = B_TRUE;
3302 		} else {
3303 			boolean_t should_clear = dsl_scan_should_clear(scn);
3304 			if (should_clear && !scn->scn_clearing) {
3305 				zfs_dbgmsg("begin scan clearing");
3306 				scn->scn_clearing = B_TRUE;
3307 			} else if (!should_clear && scn->scn_clearing) {
3308 				zfs_dbgmsg("finish scan clearing");
3309 				scn->scn_clearing = B_FALSE;
3310 			}
3311 		}
3312 	} else {
3313 		ASSERT0(scn->scn_checkpointing);
3314 		ASSERT0(scn->scn_clearing);
3315 	}
3316 
3317 	if (!scn->scn_clearing && scn->scn_done_txg == 0) {
3318 		/* Need to scan metadata for more blocks to scrub */
3319 		dsl_scan_phys_t *scnp = &scn->scn_phys;
3320 		taskqid_t prefetch_tqid;
3321 		uint64_t bytes_per_leaf = zfs_scan_vdev_limit;
3322 		uint64_t nr_leaves = dsl_scan_count_leaves(spa->spa_root_vdev);
3323 
3324 		/*
3325 		 * Calculate the max number of in-flight bytes for pool-wide
3326 		 * scanning operations (minimum 1MB). Limits for the issuing
3327 		 * phase are done per top-level vdev and are handled separately.
3328 		 */
3329 		scn->scn_maxinflight_bytes =
3330 		    MAX(nr_leaves * bytes_per_leaf, 1ULL << 20);
3331 
3332 		if (scnp->scn_ddt_bookmark.ddb_class <=
3333 		    scnp->scn_ddt_class_max) {
3334 			ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark));
3335 			zfs_dbgmsg("doing scan sync txg %llu; "
3336 			    "ddt bm=%llu/%llu/%llu/%llx",
3337 			    (longlong_t)tx->tx_txg,
3338 			    (longlong_t)scnp->scn_ddt_bookmark.ddb_class,
3339 			    (longlong_t)scnp->scn_ddt_bookmark.ddb_type,
3340 			    (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
3341 			    (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
3342 		} else {
3343 			zfs_dbgmsg("doing scan sync txg %llu; "
3344 			    "bm=%llu/%llu/%llu/%llu",
3345 			    (longlong_t)tx->tx_txg,
3346 			    (longlong_t)scnp->scn_bookmark.zb_objset,
3347 			    (longlong_t)scnp->scn_bookmark.zb_object,
3348 			    (longlong_t)scnp->scn_bookmark.zb_level,
3349 			    (longlong_t)scnp->scn_bookmark.zb_blkid);
3350 		}
3351 
3352 		scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
3353 		    NULL, ZIO_FLAG_CANFAIL);
3354 
3355 		scn->scn_prefetch_stop = B_FALSE;
3356 		prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq,
3357 		    dsl_scan_prefetch_thread, scn, TQ_SLEEP);
3358 		ASSERT(prefetch_tqid != TASKQID_INVALID);
3359 
3360 		dsl_pool_config_enter(dp, FTAG);
3361 		dsl_scan_visit(scn, tx);
3362 		dsl_pool_config_exit(dp, FTAG);
3363 
3364 		mutex_enter(&dp->dp_spa->spa_scrub_lock);
3365 		scn->scn_prefetch_stop = B_TRUE;
3366 		cv_broadcast(&spa->spa_scrub_io_cv);
3367 		mutex_exit(&dp->dp_spa->spa_scrub_lock);
3368 
3369 		taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid);
3370 		(void) zio_wait(scn->scn_zio_root);
3371 		scn->scn_zio_root = NULL;
3372 
3373 		zfs_dbgmsg("scan visited %llu blocks in %llums "
3374 		    "(%llu os's, %llu holes, %llu < mintxg, "
3375 		    "%llu in ddt, %llu > maxtxg)",
3376 		    (longlong_t)scn->scn_visited_this_txg,
3377 		    (longlong_t)NSEC2MSEC(gethrtime() -
3378 		    scn->scn_sync_start_time),
3379 		    (longlong_t)scn->scn_objsets_visited_this_txg,
3380 		    (longlong_t)scn->scn_holes_this_txg,
3381 		    (longlong_t)scn->scn_lt_min_this_txg,
3382 		    (longlong_t)scn->scn_ddt_contained_this_txg,
3383 		    (longlong_t)scn->scn_gt_max_this_txg);
3384 
3385 		if (!scn->scn_suspending) {
3386 			ASSERT0(avl_numnodes(&scn->scn_queue));
3387 			scn->scn_done_txg = tx->tx_txg + 1;
3388 			if (scn->scn_is_sorted) {
3389 				scn->scn_checkpointing = B_TRUE;
3390 				scn->scn_clearing = B_TRUE;
3391 			}
3392 			zfs_dbgmsg("scan complete txg %llu",
3393 			    (longlong_t)tx->tx_txg);
3394 		}
3395 	} else if (scn->scn_is_sorted && scn->scn_bytes_pending != 0) {
3396 		/* need to issue scrubbing IOs from per-vdev queues */
3397 		scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
3398 		    NULL, ZIO_FLAG_CANFAIL);
3399 		scan_io_queues_run(scn);
3400 		(void) zio_wait(scn->scn_zio_root);
3401 		scn->scn_zio_root = NULL;
3402 
3403 		/* calculate and dprintf the current memory usage */
3404 		(void) dsl_scan_should_clear(scn);
3405 		dsl_scan_update_stats(scn);
3406 
3407 		zfs_dbgmsg("scrubbed %llu blocks (%llu segs) in %llums "
3408 		    "(avg_block_size = %llu, avg_seg_size = %llu)",
3409 		    (longlong_t)scn->scn_zios_this_txg,
3410 		    (longlong_t)scn->scn_segs_this_txg,
3411 		    (longlong_t)NSEC2MSEC(gethrtime() -
3412 		    scn->scn_sync_start_time),
3413 		    (longlong_t)scn->scn_avg_zio_size_this_txg,
3414 		    (longlong_t)scn->scn_avg_seg_size_this_txg);
3415 	} else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) {
3416 		/* Finished with everything. Mark the scrub as complete */
3417 		zfs_dbgmsg("scan issuing complete txg %llu",
3418 		    (longlong_t)tx->tx_txg);
3419 		ASSERT3U(scn->scn_done_txg, !=, 0);
3420 		ASSERT0(spa->spa_scrub_inflight);
3421 		ASSERT0(scn->scn_bytes_pending);
3422 		dsl_scan_done(scn, B_TRUE, tx);
3423 		sync_type = SYNC_MANDATORY;
3424 	}
3425 
3426 	dsl_scan_sync_state(scn, tx, sync_type);
3427 }
3428 
3429 static void
3430 count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp)
3431 {
3432 	int i;
3433 
3434 	/*
3435 	 * Don't count embedded bp's, since we already did the work of
3436 	 * scanning these when we scanned the containing block.
3437 	 */
3438 	if (BP_IS_EMBEDDED(bp))
3439 		return;
3440 
3441 	/*
3442 	 * Update the spa's stats on how many bytes we have issued.
3443 	 * Sequential scrubs create a zio for each DVA of the bp. Each
3444 	 * of these will include all DVAs for repair purposes, but the
3445 	 * zio code will only try the first one unless there is an issue.
3446 	 * Therefore, we should only count the first DVA for these IOs.
3447 	 */
3448 	if (scn->scn_is_sorted) {
3449 		atomic_add_64(&scn->scn_dp->dp_spa->spa_scan_pass_issued,
3450 		    DVA_GET_ASIZE(&bp->blk_dva[0]));
3451 	} else {
3452 		spa_t *spa = scn->scn_dp->dp_spa;
3453 
3454 		for (i = 0; i < BP_GET_NDVAS(bp); i++) {
3455 			atomic_add_64(&spa->spa_scan_pass_issued,
3456 			    DVA_GET_ASIZE(&bp->blk_dva[i]));
3457 		}
3458 	}
3459 
3460 	/*
3461 	 * If we resume after a reboot, zab will be NULL; don't record
3462 	 * incomplete stats in that case.
3463 	 */
3464 	if (zab == NULL)
3465 		return;
3466 
3467 	mutex_enter(&zab->zab_lock);
3468 
3469 	for (i = 0; i < 4; i++) {
3470 		int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
3471 		int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
3472 		if (t & DMU_OT_NEWTYPE)
3473 			t = DMU_OT_OTHER;
3474 		zfs_blkstat_t *zb = &zab->zab_type[l][t];
3475 		int equal;
3476 
3477 		zb->zb_count++;
3478 		zb->zb_asize += BP_GET_ASIZE(bp);
3479 		zb->zb_lsize += BP_GET_LSIZE(bp);
3480 		zb->zb_psize += BP_GET_PSIZE(bp);
3481 		zb->zb_gangs += BP_COUNT_GANG(bp);
3482 
3483 		switch (BP_GET_NDVAS(bp)) {
3484 		case 2:
3485 			if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3486 			    DVA_GET_VDEV(&bp->blk_dva[1]))
3487 				zb->zb_ditto_2_of_2_samevdev++;
3488 			break;
3489 		case 3:
3490 			equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3491 			    DVA_GET_VDEV(&bp->blk_dva[1])) +
3492 			    (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3493 			    DVA_GET_VDEV(&bp->blk_dva[2])) +
3494 			    (DVA_GET_VDEV(&bp->blk_dva[1]) ==
3495 			    DVA_GET_VDEV(&bp->blk_dva[2]));
3496 			if (equal == 1)
3497 				zb->zb_ditto_2_of_3_samevdev++;
3498 			else if (equal == 3)
3499 				zb->zb_ditto_3_of_3_samevdev++;
3500 			break;
3501 		}
3502 	}
3503 
3504 	mutex_exit(&zab->zab_lock);
3505 }
3506 
3507 static void
3508 scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio)
3509 {
3510 	avl_index_t idx;
3511 	int64_t asize = SIO_GET_ASIZE(sio);
3512 	dsl_scan_t *scn = queue->q_scn;
3513 
3514 	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
3515 
3516 	if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) {
3517 		/* block is already scheduled for reading */
3518 		atomic_add_64(&scn->scn_bytes_pending, -asize);
3519 		sio_free(sio);
3520 		return;
3521 	}
3522 	avl_insert(&queue->q_sios_by_addr, sio, idx);
3523 	queue->q_sio_memused += SIO_GET_MUSED(sio);
3524 	range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio), asize);
3525 }
3526 
3527 /*
3528  * Given all the info we got from our metadata scanning process, we
3529  * construct a scan_io_t and insert it into the scan sorting queue. The
3530  * I/O must already be suitable for us to process. This is controlled
3531  * by dsl_scan_enqueue().
3532  */
3533 static void
3534 scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i,
3535     int zio_flags, const zbookmark_phys_t *zb)
3536 {
3537 	dsl_scan_t *scn = queue->q_scn;
3538 	scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp));
3539 
3540 	ASSERT0(BP_IS_GANG(bp));
3541 	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
3542 
3543 	bp2sio(bp, sio, dva_i);
3544 	sio->sio_flags = zio_flags;
3545 	sio->sio_zb = *zb;
3546 
3547 	/*
3548 	 * Increment the bytes pending counter now so that we can't
3549 	 * get an integer underflow in case the worker processes the
3550 	 * zio before we get to incrementing this counter.
3551 	 */
3552 	atomic_add_64(&scn->scn_bytes_pending, SIO_GET_ASIZE(sio));
3553 
3554 	scan_io_queue_insert_impl(queue, sio);
3555 }
3556 
3557 /*
3558  * Given a set of I/O parameters as discovered by the metadata traversal
3559  * process, attempts to place the I/O into the sorted queues (if allowed),
3560  * or immediately executes the I/O.
3561  */
3562 static void
3563 dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
3564     const zbookmark_phys_t *zb)
3565 {
3566 	spa_t *spa = dp->dp_spa;
3567 
3568 	ASSERT(!BP_IS_EMBEDDED(bp));
3569 
3570 	/*
3571 	 * Gang blocks are hard to issue sequentially, so we just issue them
3572 	 * here immediately instead of queuing them.
3573 	 */
3574 	if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) {
3575 		scan_exec_io(dp, bp, zio_flags, zb, NULL);
3576 		return;
3577 	}
3578 	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
3579 		dva_t dva;
3580 		vdev_t *vdev;
3581 
3582 		dva = bp->blk_dva[i];
3583 		vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva));
3584 		ASSERT(vdev != NULL);
3585 
3586 		mutex_enter(&vdev->vdev_scan_io_queue_lock);
3587 		if (vdev->vdev_scan_io_queue == NULL)
3588 			vdev->vdev_scan_io_queue = scan_io_queue_create(vdev);
3589 		ASSERT(dp->dp_scan != NULL);
3590 		scan_io_queue_insert(vdev->vdev_scan_io_queue, bp,
3591 		    i, zio_flags, zb);
3592 		mutex_exit(&vdev->vdev_scan_io_queue_lock);
3593 	}
3594 }
3595 
3596 static int
3597 dsl_scan_scrub_cb(dsl_pool_t *dp,
3598     const blkptr_t *bp, const zbookmark_phys_t *zb)
3599 {
3600 	dsl_scan_t *scn = dp->dp_scan;
3601 	spa_t *spa = dp->dp_spa;
3602 	uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp);
3603 	size_t psize = BP_GET_PSIZE(bp);
3604 	boolean_t needs_io;
3605 	int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
3606 	int d;
3607 
3608 	if (phys_birth <= scn->scn_phys.scn_min_txg ||
3609 	    phys_birth >= scn->scn_phys.scn_max_txg) {
3610 		count_block(scn, dp->dp_blkstats, bp);
3611 		return (0);
3612 	}
3613 
3614 	/* Embedded BP's have phys_birth==0, so we reject them above. */
3615 	ASSERT(!BP_IS_EMBEDDED(bp));
3616 
3617 	ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn));
3618 	if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) {
3619 		zio_flags |= ZIO_FLAG_SCRUB;
3620 		needs_io = B_TRUE;
3621 	} else {
3622 		ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER);
3623 		zio_flags |= ZIO_FLAG_RESILVER;
3624 		needs_io = B_FALSE;
3625 	}
3626 
3627 	/* If it's an intent log block, failure is expected. */
3628 	if (zb->zb_level == ZB_ZIL_LEVEL)
3629 		zio_flags |= ZIO_FLAG_SPECULATIVE;
3630 
3631 	for (d = 0; d < BP_GET_NDVAS(bp); d++) {
3632 		const dva_t *dva = &bp->blk_dva[d];
3633 
3634 		/*
3635 		 * Keep track of how much data we've examined so that
3636 		 * zpool(1M) status can make useful progress reports.
3637 		 */
3638 		scn->scn_phys.scn_examined += DVA_GET_ASIZE(dva);
3639 		spa->spa_scan_pass_exam += DVA_GET_ASIZE(dva);
3640 
3641 		/* if it's a resilver, this may not be in the target range */
3642 		if (!needs_io)
3643 			needs_io = dsl_scan_need_resilver(spa, dva, psize,
3644 			    phys_birth);
3645 	}
3646 
3647 	if (needs_io && !zfs_no_scrub_io) {
3648 		dsl_scan_enqueue(dp, bp, zio_flags, zb);
3649 	} else {
3650 		count_block(scn, dp->dp_blkstats, bp);
3651 	}
3652 
3653 	/* do not relocate this block */
3654 	return (0);
3655 }
3656 
3657 static void
3658 dsl_scan_scrub_done(zio_t *zio)
3659 {
3660 	spa_t *spa = zio->io_spa;
3661 	blkptr_t *bp = zio->io_bp;
3662 	dsl_scan_io_queue_t *queue = zio->io_private;
3663 
3664 	abd_free(zio->io_abd);
3665 
3666 	if (queue == NULL) {
3667 		mutex_enter(&spa->spa_scrub_lock);
3668 		ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
3669 		spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
3670 		cv_broadcast(&spa->spa_scrub_io_cv);
3671 		mutex_exit(&spa->spa_scrub_lock);
3672 	} else {
3673 		mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock);
3674 		ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp));
3675 		queue->q_inflight_bytes -= BP_GET_PSIZE(bp);
3676 		cv_broadcast(&queue->q_zio_cv);
3677 		mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock);
3678 	}
3679 
3680 	if (zio->io_error && (zio->io_error != ECKSUM ||
3681 	    !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) {
3682 		atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors);
3683 	}
3684 }
3685 
3686 /*
3687  * Given a scanning zio's information, executes the zio. The zio need
3688  * not necessarily be only sortable, this function simply executes the
3689  * zio, no matter what it is. The optional queue argument allows the
3690  * caller to specify that they want per top level vdev IO rate limiting
3691  * instead of the legacy global limiting.
3692  */
3693 static void
3694 scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
3695     const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue)
3696 {
3697 	spa_t *spa = dp->dp_spa;
3698 	dsl_scan_t *scn = dp->dp_scan;
3699 	size_t size = BP_GET_PSIZE(bp);
3700 	abd_t *data = abd_alloc_for_io(size, B_FALSE);
3701 
3702 	if (queue == NULL) {
3703 		mutex_enter(&spa->spa_scrub_lock);
3704 		while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)
3705 			cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
3706 		spa->spa_scrub_inflight += BP_GET_PSIZE(bp);
3707 		mutex_exit(&spa->spa_scrub_lock);
3708 	} else {
3709 		kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
3710 
3711 		mutex_enter(q_lock);
3712 		while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes)
3713 			cv_wait(&queue->q_zio_cv, q_lock);
3714 		queue->q_inflight_bytes += BP_GET_PSIZE(bp);
3715 		mutex_exit(q_lock);
3716 	}
3717 
3718 	count_block(dp->dp_scan, dp->dp_blkstats, bp);
3719 	zio_nowait(zio_read(dp->dp_scan->scn_zio_root, spa, bp, data, size,
3720 	    dsl_scan_scrub_done, queue, ZIO_PRIORITY_SCRUB, zio_flags, zb));
3721 }
3722 
3723 /*
3724  * This is the primary extent sorting algorithm. We balance two parameters:
3725  * 1) how many bytes of I/O are in an extent
3726  * 2) how well the extent is filled with I/O (as a fraction of its total size)
3727  * Since we allow extents to have gaps between their constituent I/Os, it's
3728  * possible to have a fairly large extent that contains the same amount of
3729  * I/O bytes than a much smaller extent, which just packs the I/O more tightly.
3730  * The algorithm sorts based on a score calculated from the extent's size,
3731  * the relative fill volume (in %) and a "fill weight" parameter that controls
3732  * the split between whether we prefer larger extents or more well populated
3733  * extents:
3734  *
3735  * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT)
3736  *
3737  * Example:
3738  * 1) assume extsz = 64 MiB
3739  * 2) assume fill = 32 MiB (extent is half full)
3740  * 3) assume fill_weight = 3
3741  * 4)	SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100
3742  *	SCORE = 32M + (50 * 3 * 32M) / 100
3743  *	SCORE = 32M + (4800M / 100)
3744  *	SCORE = 32M + 48M
3745  *		^	^
3746  *		|	+--- final total relative fill-based score
3747  *		+--------- final total fill-based score
3748  *	SCORE = 80M
3749  *
3750  * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
3751  * extents that are more completely filled (in a 3:2 ratio) vs just larger.
3752  * Note that as an optimization, we replace multiplication and division by
3753  * 100 with bitshifting by 7 (which effecitvely multiplies and divides by 128).
3754  */
3755 static int
3756 ext_size_compare(const void *x, const void *y)
3757 {
3758 	const range_seg_t *rsa = x, *rsb = y;
3759 	uint64_t sa = rsa->rs_end - rsa->rs_start,
3760 	    sb = rsb->rs_end - rsb->rs_start;
3761 	uint64_t score_a, score_b;
3762 
3763 	score_a = rsa->rs_fill + ((((rsa->rs_fill << 7) / sa) *
3764 	    fill_weight * rsa->rs_fill) >> 7);
3765 	score_b = rsb->rs_fill + ((((rsb->rs_fill << 7) / sb) *
3766 	    fill_weight * rsb->rs_fill) >> 7);
3767 
3768 	if (score_a > score_b)
3769 		return (-1);
3770 	if (score_a == score_b) {
3771 		if (rsa->rs_start < rsb->rs_start)
3772 			return (-1);
3773 		if (rsa->rs_start == rsb->rs_start)
3774 			return (0);
3775 		return (1);
3776 	}
3777 	return (1);
3778 }
3779 
3780 /*
3781  * Comparator for the q_sios_by_addr tree. Sorting is simply performed
3782  * based on LBA-order (from lowest to highest).
3783  */
3784 static int
3785 sio_addr_compare(const void *x, const void *y)
3786 {
3787 	const scan_io_t *a = x, *b = y;
3788 
3789 	return (AVL_CMP(SIO_GET_OFFSET(a), SIO_GET_OFFSET(b)));
3790 }
3791 
3792 /* IO queues are created on demand when they are needed. */
3793 static dsl_scan_io_queue_t *
3794 scan_io_queue_create(vdev_t *vd)
3795 {
3796 	dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
3797 	dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP);
3798 
3799 	q->q_scn = scn;
3800 	q->q_vd = vd;
3801 	q->q_sio_memused = 0;
3802 	cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL);
3803 	q->q_exts_by_addr = range_tree_create_impl(&rt_avl_ops,
3804 	    &q->q_exts_by_size, ext_size_compare, zfs_scan_max_ext_gap);
3805 	avl_create(&q->q_sios_by_addr, sio_addr_compare,
3806 	    sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node));
3807 
3808 	return (q);
3809 }
3810 
3811 /*
3812  * Destroys a scan queue and all segments and scan_io_t's contained in it.
3813  * No further execution of I/O occurs, anything pending in the queue is
3814  * simply freed without being executed.
3815  */
3816 void
3817 dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue)
3818 {
3819 	dsl_scan_t *scn = queue->q_scn;
3820 	scan_io_t *sio;
3821 	void *cookie = NULL;
3822 	int64_t bytes_dequeued = 0;
3823 
3824 	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
3825 
3826 	while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) !=
3827 	    NULL) {
3828 		ASSERT(range_tree_contains(queue->q_exts_by_addr,
3829 		    SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio)));
3830 		bytes_dequeued += SIO_GET_ASIZE(sio);
3831 		queue->q_sio_memused -= SIO_GET_MUSED(sio);
3832 		sio_free(sio);
3833 	}
3834 
3835 	ASSERT0(queue->q_sio_memused);
3836 	atomic_add_64(&scn->scn_bytes_pending, -bytes_dequeued);
3837 	range_tree_vacate(queue->q_exts_by_addr, NULL, queue);
3838 	range_tree_destroy(queue->q_exts_by_addr);
3839 	avl_destroy(&queue->q_sios_by_addr);
3840 	cv_destroy(&queue->q_zio_cv);
3841 
3842 	kmem_free(queue, sizeof (*queue));
3843 }
3844 
3845 /*
3846  * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is
3847  * called on behalf of vdev_top_transfer when creating or destroying
3848  * a mirror vdev due to zpool attach/detach.
3849  */
3850 void
3851 dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd)
3852 {
3853 	mutex_enter(&svd->vdev_scan_io_queue_lock);
3854 	mutex_enter(&tvd->vdev_scan_io_queue_lock);
3855 
3856 	VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL);
3857 	tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue;
3858 	svd->vdev_scan_io_queue = NULL;
3859 	if (tvd->vdev_scan_io_queue != NULL)
3860 		tvd->vdev_scan_io_queue->q_vd = tvd;
3861 
3862 	mutex_exit(&tvd->vdev_scan_io_queue_lock);
3863 	mutex_exit(&svd->vdev_scan_io_queue_lock);
3864 }
3865 
3866 static void
3867 scan_io_queues_destroy(dsl_scan_t *scn)
3868 {
3869 	vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
3870 
3871 	for (uint64_t i = 0; i < rvd->vdev_children; i++) {
3872 		vdev_t *tvd = rvd->vdev_child[i];
3873 
3874 		mutex_enter(&tvd->vdev_scan_io_queue_lock);
3875 		if (tvd->vdev_scan_io_queue != NULL)
3876 			dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue);
3877 		tvd->vdev_scan_io_queue = NULL;
3878 		mutex_exit(&tvd->vdev_scan_io_queue_lock);
3879 	}
3880 }
3881 
3882 static void
3883 dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i)
3884 {
3885 	dsl_pool_t *dp = spa->spa_dsl_pool;
3886 	dsl_scan_t *scn = dp->dp_scan;
3887 	vdev_t *vdev;
3888 	kmutex_t *q_lock;
3889 	dsl_scan_io_queue_t *queue;
3890 	scan_io_t *srch_sio, *sio;
3891 	avl_index_t idx;
3892 	uint64_t start, size;
3893 
3894 	vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i]));
3895 	ASSERT(vdev != NULL);
3896 	q_lock = &vdev->vdev_scan_io_queue_lock;
3897 	queue = vdev->vdev_scan_io_queue;
3898 
3899 	mutex_enter(q_lock);
3900 	if (queue == NULL) {
3901 		mutex_exit(q_lock);
3902 		return;
3903 	}
3904 
3905 	srch_sio = sio_alloc(BP_GET_NDVAS(bp));
3906 	bp2sio(bp, srch_sio, dva_i);
3907 	start = SIO_GET_OFFSET(srch_sio);
3908 	size = SIO_GET_ASIZE(srch_sio);
3909 
3910 	/*
3911 	 * We can find the zio in two states:
3912 	 * 1) Cold, just sitting in the queue of zio's to be issued at
3913 	 *	some point in the future. In this case, all we do is
3914 	 *	remove the zio from the q_sios_by_addr tree, decrement
3915 	 *	its data volume from the containing range_seg_t and
3916 	 *	resort the q_exts_by_size tree to reflect that the
3917 	 *	range_seg_t has lost some of its 'fill'. We don't shorten
3918 	 *	the range_seg_t - this is usually rare enough not to be
3919 	 *	worth the extra hassle of trying keep track of precise
3920 	 *	extent boundaries.
3921 	 * 2) Hot, where the zio is currently in-flight in
3922 	 *	dsl_scan_issue_ios. In this case, we can't simply
3923 	 *	reach in and stop the in-flight zio's, so we instead
3924 	 *	block the caller. Eventually, dsl_scan_issue_ios will
3925 	 *	be done with issuing the zio's it gathered and will
3926 	 *	signal us.
3927 	 */
3928 	sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
3929 	sio_free(srch_sio);
3930 
3931 	if (sio != NULL) {
3932 		int64_t asize = SIO_GET_ASIZE(sio);
3933 		blkptr_t tmpbp;
3934 
3935 		/* Got it while it was cold in the queue */
3936 		ASSERT3U(start, ==, SIO_GET_OFFSET(sio));
3937 		ASSERT3U(size, ==, asize);
3938 		avl_remove(&queue->q_sios_by_addr, sio);
3939 		queue->q_sio_memused -= SIO_GET_MUSED(sio);
3940 
3941 		ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size));
3942 		range_tree_remove_fill(queue->q_exts_by_addr, start, size);
3943 
3944 		/*
3945 		 * We only update scn_bytes_pending in the cold path,
3946 		 * otherwise it will already have been accounted for as
3947 		 * part of the zio's execution.
3948 		 */
3949 		atomic_add_64(&scn->scn_bytes_pending, -asize);
3950 
3951 		/* count the block as though we issued it */
3952 		sio2bp(sio, &tmpbp);
3953 		count_block(scn, dp->dp_blkstats, &tmpbp);
3954 
3955 		sio_free(sio);
3956 	}
3957 	mutex_exit(q_lock);
3958 }
3959 
3960 /*
3961  * Callback invoked when a zio_free() zio is executing. This needs to be
3962  * intercepted to prevent the zio from deallocating a particular portion
3963  * of disk space and it then getting reallocated and written to, while we
3964  * still have it queued up for processing.
3965  */
3966 void
3967 dsl_scan_freed(spa_t *spa, const blkptr_t *bp)
3968 {
3969 	dsl_pool_t *dp = spa->spa_dsl_pool;
3970 	dsl_scan_t *scn = dp->dp_scan;
3971 
3972 	ASSERT(!BP_IS_EMBEDDED(bp));
3973 	ASSERT(scn != NULL);
3974 	if (!dsl_scan_is_running(scn))
3975 		return;
3976 
3977 	for (int i = 0; i < BP_GET_NDVAS(bp); i++)
3978 		dsl_scan_freed_dva(spa, bp, i);
3979 }
3980