1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
24 * Copyright 2016 Gary Mills
25 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
26 * Copyright 2017 Joyent, Inc.
27 * Copyright (c) 2017 Datto Inc.
28 */
29
30#include <sys/dsl_scan.h>
31#include <sys/dsl_pool.h>
32#include <sys/dsl_dataset.h>
33#include <sys/dsl_prop.h>
34#include <sys/dsl_dir.h>
35#include <sys/dsl_synctask.h>
36#include <sys/dnode.h>
37#include <sys/dmu_tx.h>
38#include <sys/dmu_objset.h>
39#include <sys/arc.h>
40#include <sys/zap.h>
41#include <sys/zio.h>
42#include <sys/zfs_context.h>
43#include <sys/fs/zfs.h>
44#include <sys/zfs_znode.h>
45#include <sys/spa_impl.h>
46#include <sys/vdev_impl.h>
47#include <sys/zil_impl.h>
48#include <sys/zio_checksum.h>
49#include <sys/ddt.h>
50#include <sys/sa.h>
51#include <sys/sa_impl.h>
52#include <sys/zfeature.h>
53#include <sys/abd.h>
54#include <sys/range_tree.h>
55#ifdef _KERNEL
56#include <sys/zfs_vfsops.h>
57#endif
58
59/*
60 * Grand theory statement on scan queue sorting
61 *
62 * Scanning is implemented by recursively traversing all indirection levels
63 * in an object and reading all blocks referenced from said objects. This
64 * results in us approximately traversing the object from lowest logical
65 * offset to the highest. For best performance, we would want the logical
66 * blocks to be physically contiguous. However, this is frequently not the
67 * case with pools given the allocation patterns of copy-on-write filesystems.
68 * So instead, we put the I/Os into a reordering queue and issue them in a
69 * way that will most benefit physical disks (LBA-order).
70 *
71 * Queue management:
72 *
73 * Ideally, we would want to scan all metadata and queue up all block I/O
74 * prior to starting to issue it, because that allows us to do an optimal
75 * sorting job. This can however consume large amounts of memory. Therefore
76 * we continuously monitor the size of the queues and constrain them to 5%
77 * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this
78 * limit, we clear out a few of the largest extents at the head of the queues
79 * to make room for more scanning. Hopefully, these extents will be fairly
80 * large and contiguous, allowing us to approach sequential I/O throughput
81 * even without a fully sorted tree.
82 *
83 * Metadata scanning takes place in dsl_scan_visit(), which is called from
84 * dsl_scan_sync() every spa_sync(). If we have either fully scanned all
85 * metadata on the pool, or we need to make room in memory because our
86 * queues are too large, dsl_scan_visit() is postponed and
87 * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies
88 * that metadata scanning and queued I/O issuing are mutually exclusive. This
89 * allows us to provide maximum sequential I/O throughput for the majority of
90 * I/O's issued since sequential I/O performance is significantly negatively
91 * impacted if it is interleaved with random I/O.
92 *
93 * Implementation Notes
94 *
95 * One side effect of the queued scanning algorithm is that the scanning code
96 * needs to be notified whenever a block is freed. This is needed to allow
97 * the scanning code to remove these I/Os from the issuing queue. Additionally,
98 * we do not attempt to queue gang blocks to be issued sequentially since this
99 * is very hard to do and would have an extremely limited performance benefit.
100 * Instead, we simply issue gang I/Os as soon as we find them using the legacy
101 * algorithm.
102 *
103 * Backwards compatibility
104 *
105 * This new algorithm is backwards compatible with the legacy on-disk data
106 * structures (and therefore does not require a new feature flag).
107 * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan
108 * will stop scanning metadata (in logical order) and wait for all outstanding
109 * sorted I/O to complete. Once this is done, we write out a checkpoint
110 * bookmark, indicating that we have scanned everything logically before it.
111 * If the pool is imported on a machine without the new sorting algorithm,
112 * the scan simply resumes from the last checkpoint using the legacy algorithm.
113 */
114
115typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *,
116    const zbookmark_phys_t *);
117
118static scan_cb_t dsl_scan_scrub_cb;
119
120static int scan_ds_queue_compare(const void *a, const void *b);
121static int scan_prefetch_queue_compare(const void *a, const void *b);
122static void scan_ds_queue_clear(dsl_scan_t *scn);
123static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj,
124    uint64_t *txg);
125static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg);
126static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj);
127static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
128
129extern int zfs_vdev_async_write_active_min_dirty_percent;
130
131/*
132 * By default zfs will check to ensure it is not over the hard memory
133 * limit before each txg. If finer-grained control of this is needed
134 * this value can be set to 1 to enable checking before scanning each
135 * block.
136 */
137int zfs_scan_strict_mem_lim = B_FALSE;
138
139/*
140 * Maximum number of parallelly executing I/Os per top-level vdev.
141 * Tune with care. Very high settings (hundreds) are known to trigger
142 * some firmware bugs and resets on certain SSDs.
143 */
144int zfs_top_maxinflight = 32;		/* maximum I/Os per top-level */
145unsigned int zfs_resilver_delay = 2;	/* number of ticks to delay resilver */
146unsigned int zfs_scrub_delay = 4;	/* number of ticks to delay scrub */
147unsigned int zfs_scan_idle = 50;	/* idle window in clock ticks */
148
149/*
150 * Maximum number of parallelly executed bytes per leaf vdev. We attempt
151 * to strike a balance here between keeping the vdev queues full of I/Os
152 * at all times and not overflowing the queues to cause long latency,
153 * which would cause long txg sync times. No matter what, we will not
154 * overload the drives with I/O, since that is protected by
155 * zfs_vdev_scrub_max_active.
156 */
157unsigned long zfs_scan_vdev_limit = 4 << 20;
158
159int zfs_scan_issue_strategy = 0;
160int zfs_scan_legacy = B_FALSE;	/* don't queue & sort zios, go direct */
161uint64_t zfs_scan_max_ext_gap = 2 << 20;	/* in bytes */
162
163unsigned int zfs_scan_checkpoint_intval = 7200;	/* seconds */
164#define	ZFS_SCAN_CHECKPOINT_INTVAL	SEC_TO_TICK(zfs_scan_checkpoint_intval)
165
166/*
167 * fill_weight is non-tunable at runtime, so we copy it at module init from
168 * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
169 * break queue sorting.
170 */
171uint64_t zfs_scan_fill_weight = 3;
172static uint64_t fill_weight;
173
174/* See dsl_scan_should_clear() for details on the memory limit tunables */
175uint64_t zfs_scan_mem_lim_min = 16 << 20;	/* bytes */
176uint64_t zfs_scan_mem_lim_soft_max = 128 << 20;	/* bytes */
177int zfs_scan_mem_lim_fact = 20;		/* fraction of physmem */
178int zfs_scan_mem_lim_soft_fact = 20;	/* fraction of mem lim above */
179
180unsigned int zfs_scrub_min_time_ms = 1000; /* min millisecs to scrub per txg */
181unsigned int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */
182/* min millisecs to obsolete per txg */
183unsigned int zfs_obsolete_min_time_ms = 500;
184/* min millisecs to resilver per txg */
185unsigned int zfs_resilver_min_time_ms = 3000;
186int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */
187boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
188boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
189enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
190/* max number of blocks to free in a single TXG */
191uint64_t zfs_async_block_max_blocks = UINT64_MAX;
192
193int zfs_resilver_disable_defer = 0; /* set to disable resilver deferring */
194
195/*
196 * We wait a few txgs after importing a pool to begin scanning so that
197 * the import / mounting code isn't held up by scrub / resilver IO.
198 * Unfortunately, it is a bit difficult to determine exactly how long
199 * this will take since userspace will trigger fs mounts asynchronously
200 * and the kernel will create zvol minors asynchronously. As a result,
201 * the value provided here is a bit arbitrary, but represents a
202 * reasonable estimate of how many txgs it will take to finish fully
203 * importing a pool
204 */
205#define	SCAN_IMPORT_WAIT_TXGS		5
206
207
208#define	DSL_SCAN_IS_SCRUB_RESILVER(scn) \
209	((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
210	(scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
211
212extern int zfs_txg_timeout;
213
214/*
215 * Enable/disable the processing of the free_bpobj object.
216 */
217boolean_t zfs_free_bpobj_enabled = B_TRUE;
218
219/* the order has to match pool_scan_type */
220static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
221	NULL,
222	dsl_scan_scrub_cb,	/* POOL_SCAN_SCRUB */
223	dsl_scan_scrub_cb,	/* POOL_SCAN_RESILVER */
224};
225
226/* In core node for the scn->scn_queue. Represents a dataset to be scanned */
227typedef struct {
228	uint64_t	sds_dsobj;
229	uint64_t	sds_txg;
230	avl_node_t	sds_node;
231} scan_ds_t;
232
233/*
234 * This controls what conditions are placed on dsl_scan_sync_state():
235 * SYNC_OPTIONAL) write out scn_phys iff scn_bytes_pending == 0
236 * SYNC_MANDATORY) write out scn_phys always. scn_bytes_pending must be 0.
237 * SYNC_CACHED) if scn_bytes_pending == 0, write out scn_phys. Otherwise
238 *	write out the scn_phys_cached version.
239 * See dsl_scan_sync_state for details.
240 */
241typedef enum {
242	SYNC_OPTIONAL,
243	SYNC_MANDATORY,
244	SYNC_CACHED
245} state_sync_type_t;
246
247/*
248 * This struct represents the minimum information needed to reconstruct a
249 * zio for sequential scanning. This is useful because many of these will
250 * accumulate in the sequential IO queues before being issued, so saving
251 * memory matters here.
252 */
253typedef struct scan_io {
254	/* fields from blkptr_t */
255	uint64_t		sio_blk_prop;
256	uint64_t		sio_phys_birth;
257	uint64_t		sio_birth;
258	zio_cksum_t		sio_cksum;
259	uint32_t		sio_nr_dvas;
260
261	/* fields from zio_t */
262	uint32_t		sio_flags;
263	zbookmark_phys_t	sio_zb;
264
265	/* members for queue sorting */
266	union {
267		avl_node_t	sio_addr_node; /* link into issuing queue */
268		list_node_t	sio_list_node; /* link for issuing to disk */
269	} sio_nodes;
270
271	/*
272	 * There may be up to SPA_DVAS_PER_BP DVAs here from the bp,
273	 * depending on how many were in the original bp. Only the
274	 * first DVA is really used for sorting and issuing purposes.
275	 * The other DVAs (if provided) simply exist so that the zio
276	 * layer can find additional copies to repair from in the
277	 * event of an error. This array must go at the end of the
278	 * struct to allow this for the variable number of elements.
279	 */
280	dva_t			sio_dva[0];
281} scan_io_t;
282
283#define	SIO_SET_OFFSET(sio, x)		DVA_SET_OFFSET(&(sio)->sio_dva[0], x)
284#define	SIO_SET_ASIZE(sio, x)		DVA_SET_ASIZE(&(sio)->sio_dva[0], x)
285#define	SIO_GET_OFFSET(sio)		DVA_GET_OFFSET(&(sio)->sio_dva[0])
286#define	SIO_GET_ASIZE(sio)		DVA_GET_ASIZE(&(sio)->sio_dva[0])
287#define	SIO_GET_END_OFFSET(sio)		\
288	(SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio))
289#define	SIO_GET_MUSED(sio)		\
290	(sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t)))
291
292struct dsl_scan_io_queue {
293	dsl_scan_t	*q_scn; /* associated dsl_scan_t */
294	vdev_t		*q_vd; /* top-level vdev that this queue represents */
295
296	/* trees used for sorting I/Os and extents of I/Os */
297	range_tree_t	*q_exts_by_addr;
298	avl_tree_t	q_exts_by_size;
299	avl_tree_t	q_sios_by_addr;
300	uint64_t	q_sio_memused;
301
302	/* members for zio rate limiting */
303	uint64_t	q_maxinflight_bytes;
304	uint64_t	q_inflight_bytes;
305	kcondvar_t	q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */
306
307	/* per txg statistics */
308	uint64_t	q_total_seg_size_this_txg;
309	uint64_t	q_segs_this_txg;
310	uint64_t	q_total_zio_size_this_txg;
311	uint64_t	q_zios_this_txg;
312};
313
314/* private data for dsl_scan_prefetch_cb() */
315typedef struct scan_prefetch_ctx {
316	zfs_refcount_t spc_refcnt;	/* refcount for memory management */
317	dsl_scan_t *spc_scn;		/* dsl_scan_t for the pool */
318	boolean_t spc_root;		/* is this prefetch for an objset? */
319	uint8_t spc_indblkshift;	/* dn_indblkshift of current dnode */
320	uint16_t spc_datablkszsec;	/* dn_idatablkszsec of current dnode */
321} scan_prefetch_ctx_t;
322
323/* private data for dsl_scan_prefetch() */
324typedef struct scan_prefetch_issue_ctx {
325	avl_node_t spic_avl_node;	/* link into scn->scn_prefetch_queue */
326	scan_prefetch_ctx_t *spic_spc;	/* spc for the callback */
327	blkptr_t spic_bp;		/* bp to prefetch */
328	zbookmark_phys_t spic_zb;	/* bookmark to prefetch */
329} scan_prefetch_issue_ctx_t;
330
331static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
332    const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue);
333static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue,
334    scan_io_t *sio);
335
336static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd);
337static void scan_io_queues_destroy(dsl_scan_t *scn);
338
339static kmem_cache_t *sio_cache[SPA_DVAS_PER_BP];
340
341/* sio->sio_nr_dvas must be set so we know which cache to free from */
342static void
343sio_free(scan_io_t *sio)
344{
345	ASSERT3U(sio->sio_nr_dvas, >, 0);
346	ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
347
348	kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio);
349}
350
351/* It is up to the caller to set sio->sio_nr_dvas for freeing */
352static scan_io_t *
353sio_alloc(unsigned short nr_dvas)
354{
355	ASSERT3U(nr_dvas, >, 0);
356	ASSERT3U(nr_dvas, <=, SPA_DVAS_PER_BP);
357
358	return (kmem_cache_alloc(sio_cache[nr_dvas - 1], KM_SLEEP));
359}
360
361void
362scan_init(void)
363{
364	/*
365	 * This is used in ext_size_compare() to weight segments
366	 * based on how sparse they are. This cannot be changed
367	 * mid-scan and the tree comparison functions don't currently
368	 * have a mechansim for passing additional context to the
369	 * compare functions. Thus we store this value globally and
370	 * we only allow it to be set at module intiailization time
371	 */
372	fill_weight = zfs_scan_fill_weight;
373
374	for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
375		char name[36];
376
377		(void) sprintf(name, "sio_cache_%d", i);
378		sio_cache[i] = kmem_cache_create(name,
379		    (sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))),
380		    0, NULL, NULL, NULL, NULL, NULL, 0);
381	}
382}
383
384void
385scan_fini(void)
386{
387	for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
388		kmem_cache_destroy(sio_cache[i]);
389	}
390}
391
392static inline boolean_t
393dsl_scan_is_running(const dsl_scan_t *scn)
394{
395	return (scn->scn_phys.scn_state == DSS_SCANNING);
396}
397
398boolean_t
399dsl_scan_resilvering(dsl_pool_t *dp)
400{
401	return (dsl_scan_is_running(dp->dp_scan) &&
402	    dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER);
403}
404
405static inline void
406sio2bp(const scan_io_t *sio, blkptr_t *bp)
407{
408	bzero(bp, sizeof (*bp));
409	bp->blk_prop = sio->sio_blk_prop;
410	bp->blk_phys_birth = sio->sio_phys_birth;
411	bp->blk_birth = sio->sio_birth;
412	bp->blk_fill = 1;	/* we always only work with data pointers */
413	bp->blk_cksum = sio->sio_cksum;
414
415	ASSERT3U(sio->sio_nr_dvas, >, 0);
416	ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
417
418	bcopy(sio->sio_dva, bp->blk_dva, sio->sio_nr_dvas * sizeof (dva_t));
419}
420
421static inline void
422bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i)
423{
424	sio->sio_blk_prop = bp->blk_prop;
425	sio->sio_phys_birth = bp->blk_phys_birth;
426	sio->sio_birth = bp->blk_birth;
427	sio->sio_cksum = bp->blk_cksum;
428	sio->sio_nr_dvas = BP_GET_NDVAS(bp);
429
430	/*
431	 * Copy the DVAs to the sio. We need all copies of the block so
432	 * that the self healing code can use the alternate copies if the
433	 * first is corrupted. We want the DVA at index dva_i to be first
434	 * in the sio since this is the primary one that we want to issue.
435	 */
436	for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) {
437		sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas];
438	}
439}
440
441int
442dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
443{
444	int err;
445	dsl_scan_t *scn;
446	spa_t *spa = dp->dp_spa;
447	uint64_t f;
448
449	scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
450	scn->scn_dp = dp;
451
452	/*
453	 * It's possible that we're resuming a scan after a reboot so
454	 * make sure that the scan_async_destroying flag is initialized
455	 * appropriately.
456	 */
457	ASSERT(!scn->scn_async_destroying);
458	scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
459	    SPA_FEATURE_ASYNC_DESTROY);
460
461	avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t),
462	    offsetof(scan_ds_t, sds_node));
463	avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare,
464	    sizeof (scan_prefetch_issue_ctx_t),
465	    offsetof(scan_prefetch_issue_ctx_t, spic_avl_node));
466
467	err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
468	    "scrub_func", sizeof (uint64_t), 1, &f);
469	if (err == 0) {
470		/*
471		 * There was an old-style scrub in progress.  Restart a
472		 * new-style scrub from the beginning.
473		 */
474		scn->scn_restart_txg = txg;
475		zfs_dbgmsg("old-style scrub was in progress; "
476		    "restarting new-style scrub in txg %llu",
477		    (longlong_t)scn->scn_restart_txg);
478
479		/*
480		 * Load the queue obj from the old location so that it
481		 * can be freed by dsl_scan_done().
482		 */
483		(void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
484		    "scrub_queue", sizeof (uint64_t), 1,
485		    &scn->scn_phys.scn_queue_obj);
486	} else {
487		err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
488		    DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
489		    &scn->scn_phys);
490
491		/*
492		 * Detect if the pool contains the signature of #2094.  If it
493		 * does properly update the scn->scn_phys structure and notify
494		 * the administrator by setting an errata for the pool.
495		 */
496		if (err == EOVERFLOW) {
497			uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1];
498			VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24);
499			VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==,
500			    (23 * sizeof (uint64_t)));
501
502			err = zap_lookup(dp->dp_meta_objset,
503			    DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN,
504			    sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp);
505			if (err == 0) {
506				uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS];
507
508				if (overflow & ~DSF_VISIT_DS_AGAIN ||
509				    scn->scn_async_destroying) {
510					spa->spa_errata =
511					    ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY;
512					return (EOVERFLOW);
513				}
514
515				bcopy(zaptmp, &scn->scn_phys,
516				    SCAN_PHYS_NUMINTS * sizeof (uint64_t));
517				scn->scn_phys.scn_flags = overflow;
518
519				/* Required scrub already in progress. */
520				if (scn->scn_phys.scn_state == DSS_FINISHED ||
521				    scn->scn_phys.scn_state == DSS_CANCELED)
522					spa->spa_errata =
523					    ZPOOL_ERRATA_ZOL_2094_SCRUB;
524			}
525		}
526
527		if (err == ENOENT)
528			return (0);
529		else if (err)
530			return (err);
531
532		/*
533		 * We might be restarting after a reboot, so jump the issued
534		 * counter to how far we've scanned. We know we're consistent
535		 * up to here.
536		 */
537		scn->scn_issued_before_pass = scn->scn_phys.scn_examined;
538
539		if (dsl_scan_is_running(scn) &&
540		    spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) {
541			/*
542			 * A new-type scrub was in progress on an old
543			 * pool, and the pool was accessed by old
544			 * software.  Restart from the beginning, since
545			 * the old software may have changed the pool in
546			 * the meantime.
547			 */
548			scn->scn_restart_txg = txg;
549			zfs_dbgmsg("new-style scrub was modified "
550			    "by old software; restarting in txg %llu",
551			    (longlong_t)scn->scn_restart_txg);
552		}
553	}
554
555	bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
556
557	/* reload the queue into the in-core state */
558	if (scn->scn_phys.scn_queue_obj != 0) {
559		zap_cursor_t zc;
560		zap_attribute_t za;
561
562		for (zap_cursor_init(&zc, dp->dp_meta_objset,
563		    scn->scn_phys.scn_queue_obj);
564		    zap_cursor_retrieve(&zc, &za) == 0;
565		    (void) zap_cursor_advance(&zc)) {
566			scan_ds_queue_insert(scn,
567			    zfs_strtonum(za.za_name, NULL),
568			    za.za_first_integer);
569		}
570		zap_cursor_fini(&zc);
571	}
572
573	spa_scan_stat_init(spa);
574	return (0);
575}
576
577void
578dsl_scan_fini(dsl_pool_t *dp)
579{
580	if (dp->dp_scan != NULL) {
581		dsl_scan_t *scn = dp->dp_scan;
582
583		if (scn->scn_taskq != NULL)
584			taskq_destroy(scn->scn_taskq);
585		scan_ds_queue_clear(scn);
586		avl_destroy(&scn->scn_queue);
587		avl_destroy(&scn->scn_prefetch_queue);
588
589		kmem_free(dp->dp_scan, sizeof (dsl_scan_t));
590		dp->dp_scan = NULL;
591	}
592}
593
594static boolean_t
595dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx)
596{
597	return (scn->scn_restart_txg != 0 &&
598	    scn->scn_restart_txg <= tx->tx_txg);
599}
600
601boolean_t
602dsl_scan_scrubbing(const dsl_pool_t *dp)
603{
604	dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys;
605
606	return (scn_phys->scn_state == DSS_SCANNING &&
607	    scn_phys->scn_func == POOL_SCAN_SCRUB);
608}
609
610boolean_t
611dsl_scan_is_paused_scrub(const dsl_scan_t *scn)
612{
613	return (dsl_scan_scrubbing(scn->scn_dp) &&
614	    scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED);
615}
616
617/*
618 * Writes out a persistent dsl_scan_phys_t record to the pool directory.
619 * Because we can be running in the block sorting algorithm, we do not always
620 * want to write out the record, only when it is "safe" to do so. This safety
621 * condition is achieved by making sure that the sorting queues are empty
622 * (scn_bytes_pending == 0). When this condition is not true, the sync'd state
623 * is inconsistent with how much actual scanning progress has been made. The
624 * kind of sync to be performed is specified by the sync_type argument. If the
625 * sync is optional, we only sync if the queues are empty. If the sync is
626 * mandatory, we do a hard ASSERT to make sure that the queues are empty. The
627 * third possible state is a "cached" sync. This is done in response to:
628 * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been
629 *	destroyed, so we wouldn't be able to restart scanning from it.
630 * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been
631 *	superseded by a newer snapshot.
632 * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been
633 *	swapped with its clone.
634 * In all cases, a cached sync simply rewrites the last record we've written,
635 * just slightly modified. For the modifications that are performed to the
636 * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed,
637 * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped.
638 */
639static void
640dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type)
641{
642	int i;
643	spa_t *spa = scn->scn_dp->dp_spa;
644
645	ASSERT(sync_type != SYNC_MANDATORY || scn->scn_bytes_pending == 0);
646	if (scn->scn_bytes_pending == 0) {
647		for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
648			vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
649			dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue;
650
651			if (q == NULL)
652				continue;
653
654			mutex_enter(&vd->vdev_scan_io_queue_lock);
655			ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL);
656			ASSERT3P(avl_first(&q->q_exts_by_size), ==, NULL);
657			ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL);
658			mutex_exit(&vd->vdev_scan_io_queue_lock);
659		}
660
661		if (scn->scn_phys.scn_queue_obj != 0)
662			scan_ds_queue_sync(scn, tx);
663		VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
664		    DMU_POOL_DIRECTORY_OBJECT,
665		    DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
666		    &scn->scn_phys, tx));
667		bcopy(&scn->scn_phys, &scn->scn_phys_cached,
668		    sizeof (scn->scn_phys));
669
670		if (scn->scn_checkpointing)
671			zfs_dbgmsg("finish scan checkpoint");
672
673		scn->scn_checkpointing = B_FALSE;
674		scn->scn_last_checkpoint = ddi_get_lbolt();
675	} else if (sync_type == SYNC_CACHED) {
676		VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
677		    DMU_POOL_DIRECTORY_OBJECT,
678		    DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
679		    &scn->scn_phys_cached, tx));
680	}
681}
682
683/* ARGSUSED */
684static int
685dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
686{
687	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
688
689	if (dsl_scan_is_running(scn))
690		return (SET_ERROR(EBUSY));
691
692	return (0);
693}
694
695static void
696dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
697{
698	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
699	pool_scan_func_t *funcp = arg;
700	dmu_object_type_t ot = 0;
701	dsl_pool_t *dp = scn->scn_dp;
702	spa_t *spa = dp->dp_spa;
703
704	ASSERT(!dsl_scan_is_running(scn));
705	ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
706	bzero(&scn->scn_phys, sizeof (scn->scn_phys));
707	scn->scn_phys.scn_func = *funcp;
708	scn->scn_phys.scn_state = DSS_SCANNING;
709	scn->scn_phys.scn_min_txg = 0;
710	scn->scn_phys.scn_max_txg = tx->tx_txg;
711	scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
712	scn->scn_phys.scn_start_time = gethrestime_sec();
713	scn->scn_phys.scn_errors = 0;
714	scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc;
715	scn->scn_issued_before_pass = 0;
716	scn->scn_restart_txg = 0;
717	scn->scn_done_txg = 0;
718	scn->scn_last_checkpoint = 0;
719	scn->scn_checkpointing = B_FALSE;
720	spa_scan_stat_init(spa);
721
722	if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
723		scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
724
725		/* rewrite all disk labels */
726		vdev_config_dirty(spa->spa_root_vdev);
727
728		if (vdev_resilver_needed(spa->spa_root_vdev,
729		    &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
730			spa_event_notify(spa, NULL, NULL,
731			    ESC_ZFS_RESILVER_START);
732		} else {
733			spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START);
734		}
735
736		spa->spa_scrub_started = B_TRUE;
737		/*
738		 * If this is an incremental scrub, limit the DDT scrub phase
739		 * to just the auto-ditto class (for correctness); the rest
740		 * of the scrub should go faster using top-down pruning.
741		 */
742		if (scn->scn_phys.scn_min_txg > TXG_INITIAL)
743			scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO;
744
745	}
746
747	/* back to the generic stuff */
748
749	if (dp->dp_blkstats == NULL) {
750		dp->dp_blkstats =
751		    kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
752		mutex_init(&dp->dp_blkstats->zab_lock, NULL,
753		    MUTEX_DEFAULT, NULL);
754	}
755	bzero(&dp->dp_blkstats->zab_type, sizeof (dp->dp_blkstats->zab_type));
756
757	if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
758		ot = DMU_OT_ZAP_OTHER;
759
760	scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
761	    ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
762
763	bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
764
765	dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
766
767	spa_history_log_internal(spa, "scan setup", tx,
768	    "func=%u mintxg=%llu maxtxg=%llu",
769	    *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg);
770}
771
772/*
773 * Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver.
774 * Can also be called to resume a paused scrub.
775 */
776int
777dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
778{
779	spa_t *spa = dp->dp_spa;
780	dsl_scan_t *scn = dp->dp_scan;
781
782	/*
783	 * Purge all vdev caches and probe all devices.  We do this here
784	 * rather than in sync context because this requires a writer lock
785	 * on the spa_config lock, which we can't do from sync context.  The
786	 * spa_scrub_reopen flag indicates that vdev_open() should not
787	 * attempt to start another scrub.
788	 */
789	spa_vdev_state_enter(spa, SCL_NONE);
790	spa->spa_scrub_reopen = B_TRUE;
791	vdev_reopen(spa->spa_root_vdev);
792	spa->spa_scrub_reopen = B_FALSE;
793	(void) spa_vdev_state_exit(spa, NULL, 0);
794
795	if (func == POOL_SCAN_RESILVER) {
796		dsl_resilver_restart(spa->spa_dsl_pool, 0);
797		return (0);
798	}
799
800	if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) {
801		/* got scrub start cmd, resume paused scrub */
802		int err = dsl_scrub_set_pause_resume(scn->scn_dp,
803		    POOL_SCRUB_NORMAL);
804		if (err == 0) {
805			spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME);
806			return (ECANCELED);
807		}
808		return (SET_ERROR(err));
809	}
810
811	return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
812	    dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED));
813}
814
815/*
816 * Sets the resilver defer flag to B_FALSE on all leaf devs under vd. Returns
817 * B_TRUE if we have devices that need to be resilvered and are available to
818 * accept resilver I/Os.
819 */
820static boolean_t
821dsl_scan_clear_deferred(vdev_t *vd, dmu_tx_t *tx)
822{
823	boolean_t resilver_needed = B_FALSE;
824	spa_t *spa = vd->vdev_spa;
825
826	for (int c = 0; c < vd->vdev_children; c++) {
827		resilver_needed |=
828		    dsl_scan_clear_deferred(vd->vdev_child[c], tx);
829	}
830
831	if (vd == spa->spa_root_vdev &&
832	    spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
833		spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
834		vdev_config_dirty(vd);
835		spa->spa_resilver_deferred = B_FALSE;
836		return (resilver_needed);
837	}
838
839	if (!vdev_is_concrete(vd) || vd->vdev_aux ||
840	    !vd->vdev_ops->vdev_op_leaf)
841		return (resilver_needed);
842
843	if (vd->vdev_resilver_deferred)
844		vd->vdev_resilver_deferred = B_FALSE;
845
846	return (!vdev_is_dead(vd) && !vd->vdev_offline &&
847	    vdev_resilver_needed(vd, NULL, NULL));
848}
849
850/* ARGSUSED */
851static void
852dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
853{
854	static const char *old_names[] = {
855		"scrub_bookmark",
856		"scrub_ddt_bookmark",
857		"scrub_ddt_class_max",
858		"scrub_queue",
859		"scrub_min_txg",
860		"scrub_max_txg",
861		"scrub_func",
862		"scrub_errors",
863		NULL
864	};
865
866	dsl_pool_t *dp = scn->scn_dp;
867	spa_t *spa = dp->dp_spa;
868	int i;
869
870	/* Remove any remnants of an old-style scrub. */
871	for (i = 0; old_names[i]; i++) {
872		(void) zap_remove(dp->dp_meta_objset,
873		    DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx);
874	}
875
876	if (scn->scn_phys.scn_queue_obj != 0) {
877		VERIFY0(dmu_object_free(dp->dp_meta_objset,
878		    scn->scn_phys.scn_queue_obj, tx));
879		scn->scn_phys.scn_queue_obj = 0;
880	}
881	scan_ds_queue_clear(scn);
882
883	scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
884
885	/*
886	 * If we were "restarted" from a stopped state, don't bother
887	 * with anything else.
888	 */
889	if (!dsl_scan_is_running(scn)) {
890		ASSERT(!scn->scn_is_sorted);
891		return;
892	}
893
894	if (scn->scn_is_sorted) {
895		scan_io_queues_destroy(scn);
896		scn->scn_is_sorted = B_FALSE;
897
898		if (scn->scn_taskq != NULL) {
899			taskq_destroy(scn->scn_taskq);
900			scn->scn_taskq = NULL;
901		}
902	}
903
904	scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED;
905
906	if (dsl_scan_restarting(scn, tx))
907		spa_history_log_internal(spa, "scan aborted, restarting", tx,
908		    "errors=%llu", spa_get_errlog_size(spa));
909	else if (!complete)
910		spa_history_log_internal(spa, "scan cancelled", tx,
911		    "errors=%llu", spa_get_errlog_size(spa));
912	else
913		spa_history_log_internal(spa, "scan done", tx,
914		    "errors=%llu", spa_get_errlog_size(spa));
915
916	if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
917		spa->spa_scrub_started = B_FALSE;
918		spa->spa_scrub_active = B_FALSE;
919
920		/*
921		 * If the scrub/resilver completed, update all DTLs to
922		 * reflect this.  Whether it succeeded or not, vacate
923		 * all temporary scrub DTLs.
924		 *
925		 * As the scrub does not currently support traversing
926		 * data that have been freed but are part of a checkpoint,
927		 * we don't mark the scrub as done in the DTLs as faults
928		 * may still exist in those vdevs.
929		 */
930		if (complete &&
931		    !spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
932			vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
933			    scn->scn_phys.scn_max_txg, B_TRUE);
934
935			spa_event_notify(spa, NULL, NULL,
936			    scn->scn_phys.scn_min_txg ?
937			    ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH);
938		} else {
939			vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
940			    0, B_TRUE);
941		}
942		spa_errlog_rotate(spa);
943
944		/*
945		 * We may have finished replacing a device.
946		 * Let the async thread assess this and handle the detach.
947		 */
948		spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
949
950		/*
951		 * Clear any deferred_resilver flags in the config.
952		 * If there are drives that need resilvering, kick
953		 * off an asynchronous request to start resilver.
954		 * dsl_scan_clear_deferred() may update the config
955		 * before the resilver can restart. In the event of
956		 * a crash during this period, the spa loading code
957		 * will find the drives that need to be resilvered
958		 * when the machine reboots and start the resilver then.
959		 */
960		boolean_t resilver_needed =
961		    dsl_scan_clear_deferred(spa->spa_root_vdev, tx);
962		if (resilver_needed) {
963			spa_history_log_internal(spa,
964			    "starting deferred resilver", tx,
965			    "errors=%llu", spa_get_errlog_size(spa));
966			spa_async_request(spa, SPA_ASYNC_RESILVER);
967		}
968	}
969
970	scn->scn_phys.scn_end_time = gethrestime_sec();
971
972	ASSERT(!dsl_scan_is_running(scn));
973}
974
975/* ARGSUSED */
976static int
977dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
978{
979	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
980
981	if (!dsl_scan_is_running(scn))
982		return (SET_ERROR(ENOENT));
983	return (0);
984}
985
986/* ARGSUSED */
987static void
988dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx)
989{
990	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
991
992	dsl_scan_done(scn, B_FALSE, tx);
993	dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
994	spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT);
995}
996
997int
998dsl_scan_cancel(dsl_pool_t *dp)
999{
1000	return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check,
1001	    dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED));
1002}
1003
1004static int
1005dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx)
1006{
1007	pool_scrub_cmd_t *cmd = arg;
1008	dsl_pool_t *dp = dmu_tx_pool(tx);
1009	dsl_scan_t *scn = dp->dp_scan;
1010
1011	if (*cmd == POOL_SCRUB_PAUSE) {
1012		/* can't pause a scrub when there is no in-progress scrub */
1013		if (!dsl_scan_scrubbing(dp))
1014			return (SET_ERROR(ENOENT));
1015
1016		/* can't pause a paused scrub */
1017		if (dsl_scan_is_paused_scrub(scn))
1018			return (SET_ERROR(EBUSY));
1019	} else if (*cmd != POOL_SCRUB_NORMAL) {
1020		return (SET_ERROR(ENOTSUP));
1021	}
1022
1023	return (0);
1024}
1025
1026static void
1027dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
1028{
1029	pool_scrub_cmd_t *cmd = arg;
1030	dsl_pool_t *dp = dmu_tx_pool(tx);
1031	spa_t *spa = dp->dp_spa;
1032	dsl_scan_t *scn = dp->dp_scan;
1033
1034	if (*cmd == POOL_SCRUB_PAUSE) {
1035		/* can't pause a scrub when there is no in-progress scrub */
1036		spa->spa_scan_pass_scrub_pause = gethrestime_sec();
1037		scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED;
1038		scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED;
1039		dsl_scan_sync_state(scn, tx, SYNC_CACHED);
1040		spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED);
1041	} else {
1042		ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
1043		if (dsl_scan_is_paused_scrub(scn)) {
1044			/*
1045			 * We need to keep track of how much time we spend
1046			 * paused per pass so that we can adjust the scrub rate
1047			 * shown in the output of 'zpool status'
1048			 */
1049			spa->spa_scan_pass_scrub_spent_paused +=
1050			    gethrestime_sec() - spa->spa_scan_pass_scrub_pause;
1051			spa->spa_scan_pass_scrub_pause = 0;
1052			scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
1053			scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED;
1054			dsl_scan_sync_state(scn, tx, SYNC_CACHED);
1055		}
1056	}
1057}
1058
1059/*
1060 * Set scrub pause/resume state if it makes sense to do so
1061 */
1062int
1063dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd)
1064{
1065	return (dsl_sync_task(spa_name(dp->dp_spa),
1066	    dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3,
1067	    ZFS_SPACE_CHECK_RESERVED));
1068}
1069
1070
1071/* start a new scan, or restart an existing one. */
1072void
1073dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg)
1074{
1075	if (txg == 0) {
1076		dmu_tx_t *tx;
1077		tx = dmu_tx_create_dd(dp->dp_mos_dir);
1078		VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
1079
1080		txg = dmu_tx_get_txg(tx);
1081		dp->dp_scan->scn_restart_txg = txg;
1082		dmu_tx_commit(tx);
1083	} else {
1084		dp->dp_scan->scn_restart_txg = txg;
1085	}
1086	zfs_dbgmsg("restarting resilver txg=%llu", txg);
1087}
1088
1089void
1090dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp)
1091{
1092	zio_free(dp->dp_spa, txg, bp);
1093}
1094
1095void
1096dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp)
1097{
1098	ASSERT(dsl_pool_sync_context(dp));
1099	zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags));
1100}
1101
1102static int
1103scan_ds_queue_compare(const void *a, const void *b)
1104{
1105	const scan_ds_t *sds_a = a, *sds_b = b;
1106
1107	if (sds_a->sds_dsobj < sds_b->sds_dsobj)
1108		return (-1);
1109	if (sds_a->sds_dsobj == sds_b->sds_dsobj)
1110		return (0);
1111	return (1);
1112}
1113
1114static void
1115scan_ds_queue_clear(dsl_scan_t *scn)
1116{
1117	void *cookie = NULL;
1118	scan_ds_t *sds;
1119	while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) {
1120		kmem_free(sds, sizeof (*sds));
1121	}
1122}
1123
1124static boolean_t
1125scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg)
1126{
1127	scan_ds_t srch, *sds;
1128
1129	srch.sds_dsobj = dsobj;
1130	sds = avl_find(&scn->scn_queue, &srch, NULL);
1131	if (sds != NULL && txg != NULL)
1132		*txg = sds->sds_txg;
1133	return (sds != NULL);
1134}
1135
1136static void
1137scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg)
1138{
1139	scan_ds_t *sds;
1140	avl_index_t where;
1141
1142	sds = kmem_zalloc(sizeof (*sds), KM_SLEEP);
1143	sds->sds_dsobj = dsobj;
1144	sds->sds_txg = txg;
1145
1146	VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL);
1147	avl_insert(&scn->scn_queue, sds, where);
1148}
1149
1150static void
1151scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj)
1152{
1153	scan_ds_t srch, *sds;
1154
1155	srch.sds_dsobj = dsobj;
1156
1157	sds = avl_find(&scn->scn_queue, &srch, NULL);
1158	VERIFY(sds != NULL);
1159	avl_remove(&scn->scn_queue, sds);
1160	kmem_free(sds, sizeof (*sds));
1161}
1162
1163static void
1164scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx)
1165{
1166	dsl_pool_t *dp = scn->scn_dp;
1167	spa_t *spa = dp->dp_spa;
1168	dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ?
1169	    DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER;
1170
1171	ASSERT0(scn->scn_bytes_pending);
1172	ASSERT(scn->scn_phys.scn_queue_obj != 0);
1173
1174	VERIFY0(dmu_object_free(dp->dp_meta_objset,
1175	    scn->scn_phys.scn_queue_obj, tx));
1176	scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot,
1177	    DMU_OT_NONE, 0, tx);
1178	for (scan_ds_t *sds = avl_first(&scn->scn_queue);
1179	    sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) {
1180		VERIFY0(zap_add_int_key(dp->dp_meta_objset,
1181		    scn->scn_phys.scn_queue_obj, sds->sds_dsobj,
1182		    sds->sds_txg, tx));
1183	}
1184}
1185
1186/*
1187 * Computes the memory limit state that we're currently in. A sorted scan
1188 * needs quite a bit of memory to hold the sorting queue, so we need to
1189 * reasonably constrain the size so it doesn't impact overall system
1190 * performance. We compute two limits:
1191 * 1) Hard memory limit: if the amount of memory used by the sorting
1192 *	queues on a pool gets above this value, we stop the metadata
1193 *	scanning portion and start issuing the queued up and sorted
1194 *	I/Os to reduce memory usage.
1195 *	This limit is calculated as a fraction of physmem (by default 5%).
1196 *	We constrain the lower bound of the hard limit to an absolute
1197 *	minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain
1198 *	the upper bound to 5% of the total pool size - no chance we'll
1199 *	ever need that much memory, but just to keep the value in check.
1200 * 2) Soft memory limit: once we hit the hard memory limit, we start
1201 *	issuing I/O to reduce queue memory usage, but we don't want to
1202 *	completely empty out the queues, since we might be able to find I/Os
1203 *	that will fill in the gaps of our non-sequential IOs at some point
1204 *	in the future. So we stop the issuing of I/Os once the amount of
1205 *	memory used drops below the soft limit (at which point we stop issuing
1206 *	I/O and start scanning metadata again).
1207 *
1208 *	This limit is calculated by subtracting a fraction of the hard
1209 *	limit from the hard limit. By default this fraction is 5%, so
1210 *	the soft limit is 95% of the hard limit. We cap the size of the
1211 *	difference between the hard and soft limits at an absolute
1212 *	maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is
1213 *	sufficient to not cause too frequent switching between the
1214 *	metadata scan and I/O issue (even at 2k recordsize, 128 MiB's
1215 *	worth of queues is about 1.2 GiB of on-pool data, so scanning
1216 *	that should take at least a decent fraction of a second).
1217 */
1218static boolean_t
1219dsl_scan_should_clear(dsl_scan_t *scn)
1220{
1221	vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
1222	uint64_t mlim_hard, mlim_soft, mused;
1223	uint64_t alloc = metaslab_class_get_alloc(spa_normal_class(
1224	    scn->scn_dp->dp_spa));
1225
1226	mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE,
1227	    zfs_scan_mem_lim_min);
1228	mlim_hard = MIN(mlim_hard, alloc / 20);
1229	mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact,
1230	    zfs_scan_mem_lim_soft_max);
1231	mused = 0;
1232	for (uint64_t i = 0; i < rvd->vdev_children; i++) {
1233		vdev_t *tvd = rvd->vdev_child[i];
1234		dsl_scan_io_queue_t *queue;
1235
1236		mutex_enter(&tvd->vdev_scan_io_queue_lock);
1237		queue = tvd->vdev_scan_io_queue;
1238		if (queue != NULL) {
1239			/* # extents in exts_by_size = # in exts_by_addr */
1240			mused += avl_numnodes(&queue->q_exts_by_size) *
1241			    sizeof (range_seg_t) + queue->q_sio_memused;
1242		}
1243		mutex_exit(&tvd->vdev_scan_io_queue_lock);
1244	}
1245
1246	dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused);
1247
1248	if (mused == 0)
1249		ASSERT0(scn->scn_bytes_pending);
1250
1251	/*
1252	 * If we are above our hard limit, we need to clear out memory.
1253	 * If we are below our soft limit, we need to accumulate sequential IOs.
1254	 * Otherwise, we should keep doing whatever we are currently doing.
1255	 */
1256	if (mused >= mlim_hard)
1257		return (B_TRUE);
1258	else if (mused < mlim_soft)
1259		return (B_FALSE);
1260	else
1261		return (scn->scn_clearing);
1262}
1263
1264static boolean_t
1265dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
1266{
1267	/* we never skip user/group accounting objects */
1268	if (zb && (int64_t)zb->zb_object < 0)
1269		return (B_FALSE);
1270
1271	if (scn->scn_suspending)
1272		return (B_TRUE); /* we're already suspending */
1273
1274	if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark))
1275		return (B_FALSE); /* we're resuming */
1276
1277	/* We only know how to resume from level-0 blocks. */
1278	if (zb && zb->zb_level != 0)
1279		return (B_FALSE);
1280
1281	/*
1282	 * We suspend if:
1283	 *  - we have scanned for at least the minimum time (default 1 sec
1284	 *    for scrub, 3 sec for resilver), and either we have sufficient
1285	 *    dirty data that we are starting to write more quickly
1286	 *    (default 30%), or someone is explicitly waiting for this txg
1287	 *    to complete.
1288	 *  or
1289	 *  - the spa is shutting down because this pool is being exported
1290	 *    or the machine is rebooting.
1291	 *  or
1292	 *  - the scan queue has reached its memory use limit
1293	 */
1294	hrtime_t curr_time_ns = gethrtime();
1295	uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
1296	uint64_t sync_time_ns = curr_time_ns -
1297	    scn->scn_dp->dp_spa->spa_sync_starttime;
1298
1299	int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
1300	int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
1301	    zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
1302
1303	if ((NSEC2MSEC(scan_time_ns) > mintime &&
1304	    (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent ||
1305	    txg_sync_waiting(scn->scn_dp) ||
1306	    NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
1307	    spa_shutting_down(scn->scn_dp->dp_spa) ||
1308	    (zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) {
1309		if (zb) {
1310			dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n",
1311			    (longlong_t)zb->zb_objset,
1312			    (longlong_t)zb->zb_object,
1313			    (longlong_t)zb->zb_level,
1314			    (longlong_t)zb->zb_blkid);
1315			scn->scn_phys.scn_bookmark = *zb;
1316		} else {
1317			dsl_scan_phys_t *scnp = &scn->scn_phys;
1318
1319			dprintf("suspending at DDT bookmark "
1320			    "%llx/%llx/%llx/%llx\n",
1321			    (longlong_t)scnp->scn_ddt_bookmark.ddb_class,
1322			    (longlong_t)scnp->scn_ddt_bookmark.ddb_type,
1323			    (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
1324			    (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
1325		}
1326		scn->scn_suspending = B_TRUE;
1327		return (B_TRUE);
1328	}
1329	return (B_FALSE);
1330}
1331
1332typedef struct zil_scan_arg {
1333	dsl_pool_t	*zsa_dp;
1334	zil_header_t	*zsa_zh;
1335} zil_scan_arg_t;
1336
1337/* ARGSUSED */
1338static int
1339dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
1340{
1341	zil_scan_arg_t *zsa = arg;
1342	dsl_pool_t *dp = zsa->zsa_dp;
1343	dsl_scan_t *scn = dp->dp_scan;
1344	zil_header_t *zh = zsa->zsa_zh;
1345	zbookmark_phys_t zb;
1346
1347	if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
1348		return (0);
1349
1350	/*
1351	 * One block ("stubby") can be allocated a long time ago; we
1352	 * want to visit that one because it has been allocated
1353	 * (on-disk) even if it hasn't been claimed (even though for
1354	 * scrub there's nothing to do to it).
1355	 */
1356	if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa))
1357		return (0);
1358
1359	SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
1360	    ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
1361
1362	VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
1363	return (0);
1364}
1365
1366/* ARGSUSED */
1367static int
1368dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
1369{
1370	if (lrc->lrc_txtype == TX_WRITE) {
1371		zil_scan_arg_t *zsa = arg;
1372		dsl_pool_t *dp = zsa->zsa_dp;
1373		dsl_scan_t *scn = dp->dp_scan;
1374		zil_header_t *zh = zsa->zsa_zh;
1375		lr_write_t *lr = (lr_write_t *)lrc;
1376		blkptr_t *bp = &lr->lr_blkptr;
1377		zbookmark_phys_t zb;
1378
1379		if (BP_IS_HOLE(bp) ||
1380		    bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
1381			return (0);
1382
1383		/*
1384		 * birth can be < claim_txg if this record's txg is
1385		 * already txg sync'ed (but this log block contains
1386		 * other records that are not synced)
1387		 */
1388		if (claim_txg == 0 || bp->blk_birth < claim_txg)
1389			return (0);
1390
1391		SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
1392		    lr->lr_foid, ZB_ZIL_LEVEL,
1393		    lr->lr_offset / BP_GET_LSIZE(bp));
1394
1395		VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
1396	}
1397	return (0);
1398}
1399
1400static void
1401dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
1402{
1403	uint64_t claim_txg = zh->zh_claim_txg;
1404	zil_scan_arg_t zsa = { dp, zh };
1405	zilog_t *zilog;
1406
1407	ASSERT(spa_writeable(dp->dp_spa));
1408
1409	/*
1410	 * We only want to visit blocks that have been claimed
1411	 * but not yet replayed.
1412	 */
1413	if (claim_txg == 0)
1414		return;
1415
1416	zilog = zil_alloc(dp->dp_meta_objset, zh);
1417
1418	(void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
1419	    claim_txg, B_FALSE);
1420
1421	zil_free(zilog);
1422}
1423
1424/*
1425 * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea
1426 * here is to sort the AVL tree by the order each block will be needed.
1427 */
1428static int
1429scan_prefetch_queue_compare(const void *a, const void *b)
1430{
1431	const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b;
1432	const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc;
1433	const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc;
1434
1435	return (zbookmark_compare(spc_a->spc_datablkszsec,
1436	    spc_a->spc_indblkshift, spc_b->spc_datablkszsec,
1437	    spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb));
1438}
1439
1440static void
1441scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, void *tag)
1442{
1443	if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) {
1444		zfs_refcount_destroy(&spc->spc_refcnt);
1445		kmem_free(spc, sizeof (scan_prefetch_ctx_t));
1446	}
1447}
1448
1449static scan_prefetch_ctx_t *
1450scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag)
1451{
1452	scan_prefetch_ctx_t *spc;
1453
1454	spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP);
1455	zfs_refcount_create(&spc->spc_refcnt);
1456	zfs_refcount_add(&spc->spc_refcnt, tag);
1457	spc->spc_scn = scn;
1458	if (dnp != NULL) {
1459		spc->spc_datablkszsec = dnp->dn_datablkszsec;
1460		spc->spc_indblkshift = dnp->dn_indblkshift;
1461		spc->spc_root = B_FALSE;
1462	} else {
1463		spc->spc_datablkszsec = 0;
1464		spc->spc_indblkshift = 0;
1465		spc->spc_root = B_TRUE;
1466	}
1467
1468	return (spc);
1469}
1470
1471static void
1472scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, void *tag)
1473{
1474	zfs_refcount_add(&spc->spc_refcnt, tag);
1475}
1476
1477static boolean_t
1478dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc,
1479    const zbookmark_phys_t *zb)
1480{
1481	zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark;
1482	dnode_phys_t tmp_dnp;
1483	dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp;
1484
1485	if (zb->zb_objset != last_zb->zb_objset)
1486		return (B_TRUE);
1487	if ((int64_t)zb->zb_object < 0)
1488		return (B_FALSE);
1489
1490	tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec;
1491	tmp_dnp.dn_indblkshift = spc->spc_indblkshift;
1492
1493	if (zbookmark_subtree_completed(dnp, zb, last_zb))
1494		return (B_TRUE);
1495
1496	return (B_FALSE);
1497}
1498
1499static void
1500dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb)
1501{
1502	avl_index_t idx;
1503	dsl_scan_t *scn = spc->spc_scn;
1504	spa_t *spa = scn->scn_dp->dp_spa;
1505	scan_prefetch_issue_ctx_t *spic;
1506
1507	if (zfs_no_scrub_prefetch)
1508		return;
1509
1510	if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg ||
1511	    (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE &&
1512	    BP_GET_TYPE(bp) != DMU_OT_OBJSET))
1513		return;
1514
1515	if (dsl_scan_check_prefetch_resume(spc, zb))
1516		return;
1517
1518	scan_prefetch_ctx_add_ref(spc, scn);
1519	spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP);
1520	spic->spic_spc = spc;
1521	spic->spic_bp = *bp;
1522	spic->spic_zb = *zb;
1523
1524	/*
1525	 * Add the IO to the queue of blocks to prefetch. This allows us to
1526	 * prioritize blocks that we will need first for the main traversal
1527	 * thread.
1528	 */
1529	mutex_enter(&spa->spa_scrub_lock);
1530	if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) {
1531		/* this block is already queued for prefetch */
1532		kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
1533		scan_prefetch_ctx_rele(spc, scn);
1534		mutex_exit(&spa->spa_scrub_lock);
1535		return;
1536	}
1537
1538	avl_insert(&scn->scn_prefetch_queue, spic, idx);
1539	cv_broadcast(&spa->spa_scrub_io_cv);
1540	mutex_exit(&spa->spa_scrub_lock);
1541}
1542
1543static void
1544dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp,
1545    uint64_t objset, uint64_t object)
1546{
1547	int i;
1548	zbookmark_phys_t zb;
1549	scan_prefetch_ctx_t *spc;
1550
1551	if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1552		return;
1553
1554	SET_BOOKMARK(&zb, objset, object, 0, 0);
1555
1556	spc = scan_prefetch_ctx_create(scn, dnp, FTAG);
1557
1558	for (i = 0; i < dnp->dn_nblkptr; i++) {
1559		zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]);
1560		zb.zb_blkid = i;
1561		dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb);
1562	}
1563
1564	if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1565		zb.zb_level = 0;
1566		zb.zb_blkid = DMU_SPILL_BLKID;
1567		dsl_scan_prefetch(spc, &dnp->dn_spill, &zb);
1568	}
1569
1570	scan_prefetch_ctx_rele(spc, FTAG);
1571}
1572
1573void
1574dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1575    arc_buf_t *buf, void *private)
1576{
1577	scan_prefetch_ctx_t *spc = private;
1578	dsl_scan_t *scn = spc->spc_scn;
1579	spa_t *spa = scn->scn_dp->dp_spa;
1580
1581	/* broadcast that the IO has completed for rate limitting purposes */
1582	mutex_enter(&spa->spa_scrub_lock);
1583	ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
1584	spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
1585	cv_broadcast(&spa->spa_scrub_io_cv);
1586	mutex_exit(&spa->spa_scrub_lock);
1587
1588	/* if there was an error or we are done prefetching, just cleanup */
1589	if (buf == NULL || scn->scn_suspending)
1590		goto out;
1591
1592	if (BP_GET_LEVEL(bp) > 0) {
1593		int i;
1594		blkptr_t *cbp;
1595		int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
1596		zbookmark_phys_t czb;
1597
1598		for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
1599			SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
1600			    zb->zb_level - 1, zb->zb_blkid * epb + i);
1601			dsl_scan_prefetch(spc, cbp, &czb);
1602		}
1603	} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
1604		dnode_phys_t *cdnp = buf->b_data;
1605		int i;
1606		int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
1607
1608		for (i = 0, cdnp = buf->b_data; i < epb;
1609		    i += cdnp->dn_extra_slots + 1,
1610		    cdnp += cdnp->dn_extra_slots + 1) {
1611			dsl_scan_prefetch_dnode(scn, cdnp,
1612			    zb->zb_objset, zb->zb_blkid * epb + i);
1613		}
1614	} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
1615		objset_phys_t *osp = buf->b_data;
1616
1617		dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode,
1618		    zb->zb_objset, DMU_META_DNODE_OBJECT);
1619
1620		if (OBJSET_BUF_HAS_USERUSED(buf)) {
1621			dsl_scan_prefetch_dnode(scn,
1622			    &osp->os_groupused_dnode, zb->zb_objset,
1623			    DMU_GROUPUSED_OBJECT);
1624			dsl_scan_prefetch_dnode(scn,
1625			    &osp->os_userused_dnode, zb->zb_objset,
1626			    DMU_USERUSED_OBJECT);
1627		}
1628	}
1629
1630out:
1631	if (buf != NULL)
1632		arc_buf_destroy(buf, private);
1633	scan_prefetch_ctx_rele(spc, scn);
1634}
1635
1636/* ARGSUSED */
1637static void
1638dsl_scan_prefetch_thread(void *arg)
1639{
1640	dsl_scan_t *scn = arg;
1641	spa_t *spa = scn->scn_dp->dp_spa;
1642	vdev_t *rvd = spa->spa_root_vdev;
1643	uint64_t maxinflight = rvd->vdev_children * zfs_top_maxinflight;
1644	scan_prefetch_issue_ctx_t *spic;
1645
1646	/* loop until we are told to stop */
1647	while (!scn->scn_prefetch_stop) {
1648		arc_flags_t flags = ARC_FLAG_NOWAIT |
1649		    ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH;
1650		int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
1651
1652		mutex_enter(&spa->spa_scrub_lock);
1653
1654		/*
1655		 * Wait until we have an IO to issue and are not above our
1656		 * maximum in flight limit.
1657		 */
1658		while (!scn->scn_prefetch_stop &&
1659		    (avl_numnodes(&scn->scn_prefetch_queue) == 0 ||
1660		    spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) {
1661			cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
1662		}
1663
1664		/* recheck if we should stop since we waited for the cv */
1665		if (scn->scn_prefetch_stop) {
1666			mutex_exit(&spa->spa_scrub_lock);
1667			break;
1668		}
1669
1670		/* remove the prefetch IO from the tree */
1671		spic = avl_first(&scn->scn_prefetch_queue);
1672		spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp);
1673		avl_remove(&scn->scn_prefetch_queue, spic);
1674
1675		mutex_exit(&spa->spa_scrub_lock);
1676
1677		if (BP_IS_PROTECTED(&spic->spic_bp)) {
1678			ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE ||
1679			    BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET);
1680			ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0);
1681			zio_flags |= ZIO_FLAG_RAW;
1682		}
1683
1684		/* issue the prefetch asynchronously */
1685		(void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa,
1686		    &spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc,
1687		    ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb);
1688
1689		kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
1690	}
1691
1692	ASSERT(scn->scn_prefetch_stop);
1693
1694	/* free any prefetches we didn't get to complete */
1695	mutex_enter(&spa->spa_scrub_lock);
1696	while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) {
1697		avl_remove(&scn->scn_prefetch_queue, spic);
1698		scan_prefetch_ctx_rele(spic->spic_spc, scn);
1699		kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
1700	}
1701	ASSERT0(avl_numnodes(&scn->scn_prefetch_queue));
1702	mutex_exit(&spa->spa_scrub_lock);
1703}
1704
1705static boolean_t
1706dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
1707    const zbookmark_phys_t *zb)
1708{
1709	/*
1710	 * We never skip over user/group accounting objects (obj<0)
1711	 */
1712	if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) &&
1713	    (int64_t)zb->zb_object >= 0) {
1714		/*
1715		 * If we already visited this bp & everything below (in
1716		 * a prior txg sync), don't bother doing it again.
1717		 */
1718		if (zbookmark_subtree_completed(dnp, zb,
1719		    &scn->scn_phys.scn_bookmark))
1720			return (B_TRUE);
1721
1722		/*
1723		 * If we found the block we're trying to resume from, or
1724		 * we went past it to a different object, zero it out to
1725		 * indicate that it's OK to start checking for suspending
1726		 * again.
1727		 */
1728		if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 ||
1729		    zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) {
1730			dprintf("resuming at %llx/%llx/%llx/%llx\n",
1731			    (longlong_t)zb->zb_objset,
1732			    (longlong_t)zb->zb_object,
1733			    (longlong_t)zb->zb_level,
1734			    (longlong_t)zb->zb_blkid);
1735			bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb));
1736		}
1737	}
1738	return (B_FALSE);
1739}
1740
1741static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
1742    dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
1743    dmu_objset_type_t ostype, dmu_tx_t *tx);
1744static void dsl_scan_visitdnode(
1745    dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype,
1746    dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx);
1747
1748/*
1749 * Return nonzero on i/o error.
1750 * Return new buf to write out in *bufp.
1751 */
1752static int
1753dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
1754    dnode_phys_t *dnp, const blkptr_t *bp,
1755    const zbookmark_phys_t *zb, dmu_tx_t *tx)
1756{
1757	dsl_pool_t *dp = scn->scn_dp;
1758	int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
1759	int err;
1760
1761	if (BP_GET_LEVEL(bp) > 0) {
1762		arc_flags_t flags = ARC_FLAG_WAIT;
1763		int i;
1764		blkptr_t *cbp;
1765		int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
1766		arc_buf_t *buf;
1767
1768		err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
1769		    ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
1770		if (err) {
1771			scn->scn_phys.scn_errors++;
1772			return (err);
1773		}
1774		for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
1775			zbookmark_phys_t czb;
1776
1777			SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
1778			    zb->zb_level - 1,
1779			    zb->zb_blkid * epb + i);
1780			dsl_scan_visitbp(cbp, &czb, dnp,
1781			    ds, scn, ostype, tx);
1782		}
1783		arc_buf_destroy(buf, &buf);
1784	} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
1785		arc_flags_t flags = ARC_FLAG_WAIT;
1786		dnode_phys_t *cdnp;
1787		int i;
1788		int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
1789		arc_buf_t *buf;
1790
1791		if (BP_IS_PROTECTED(bp)) {
1792			ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
1793			zio_flags |= ZIO_FLAG_RAW;
1794		}
1795
1796		err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
1797		    ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
1798		if (err) {
1799			scn->scn_phys.scn_errors++;
1800			return (err);
1801		}
1802		for (i = 0, cdnp = buf->b_data; i < epb;
1803		    i += cdnp->dn_extra_slots + 1,
1804		    cdnp += cdnp->dn_extra_slots + 1) {
1805			dsl_scan_visitdnode(scn, ds, ostype,
1806			    cdnp, zb->zb_blkid * epb + i, tx);
1807		}
1808
1809		arc_buf_destroy(buf, &buf);
1810	} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
1811		arc_flags_t flags = ARC_FLAG_WAIT;
1812		objset_phys_t *osp;
1813		arc_buf_t *buf;
1814
1815		err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
1816		    ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
1817		if (err) {
1818			scn->scn_phys.scn_errors++;
1819			return (err);
1820		}
1821
1822		osp = buf->b_data;
1823
1824		dsl_scan_visitdnode(scn, ds, osp->os_type,
1825		    &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx);
1826
1827		if (OBJSET_BUF_HAS_USERUSED(buf)) {
1828			/*
1829			 * We also always visit user/group accounting
1830			 * objects, and never skip them, even if we are
1831			 * suspending.  This is necessary so that the space
1832			 * deltas from this txg get integrated.
1833			 */
1834			dsl_scan_visitdnode(scn, ds, osp->os_type,
1835			    &osp->os_groupused_dnode,
1836			    DMU_GROUPUSED_OBJECT, tx);
1837			dsl_scan_visitdnode(scn, ds, osp->os_type,
1838			    &osp->os_userused_dnode,
1839			    DMU_USERUSED_OBJECT, tx);
1840		}
1841		arc_buf_destroy(buf, &buf);
1842	}
1843
1844	return (0);
1845}
1846
1847static void
1848dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds,
1849    dmu_objset_type_t ostype, dnode_phys_t *dnp,
1850    uint64_t object, dmu_tx_t *tx)
1851{
1852	int j;
1853
1854	for (j = 0; j < dnp->dn_nblkptr; j++) {
1855		zbookmark_phys_t czb;
1856
1857		SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
1858		    dnp->dn_nlevels - 1, j);
1859		dsl_scan_visitbp(&dnp->dn_blkptr[j],
1860		    &czb, dnp, ds, scn, ostype, tx);
1861	}
1862
1863	if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1864		zbookmark_phys_t czb;
1865		SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
1866		    0, DMU_SPILL_BLKID);
1867		dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp),
1868		    &czb, dnp, ds, scn, ostype, tx);
1869	}
1870}
1871
1872/*
1873 * The arguments are in this order because mdb can only print the
1874 * first 5; we want them to be useful.
1875 */
1876static void
1877dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
1878    dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
1879    dmu_objset_type_t ostype, dmu_tx_t *tx)
1880{
1881	dsl_pool_t *dp = scn->scn_dp;
1882	blkptr_t *bp_toread = NULL;
1883
1884	if (dsl_scan_check_suspend(scn, zb))
1885		return;
1886
1887	if (dsl_scan_check_resume(scn, dnp, zb))
1888		return;
1889
1890	scn->scn_visited_this_txg++;
1891
1892	/*
1893	 * This debugging is commented out to conserve stack space.  This
1894	 * function is called recursively and the debugging addes several
1895	 * bytes to the stack for each call.  It can be commented back in
1896	 * if required to debug an issue in dsl_scan_visitbp().
1897	 *
1898	 * dprintf_bp(bp,
1899	 *	"visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p",
1900	 *	ds, ds ? ds->ds_object : 0,
1901	 *	zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid,
1902	 *	bp);
1903	 */
1904
1905	if (BP_IS_HOLE(bp)) {
1906		scn->scn_holes_this_txg++;
1907		return;
1908	}
1909
1910	if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) {
1911		scn->scn_lt_min_this_txg++;
1912		return;
1913	}
1914
1915	bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP);
1916	*bp_toread = *bp;
1917
1918	if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0)
1919		goto out;
1920
1921	/*
1922	 * If dsl_scan_ddt() has already visited this block, it will have
1923	 * already done any translations or scrubbing, so don't call the
1924	 * callback again.
1925	 */
1926	if (ddt_class_contains(dp->dp_spa,
1927	    scn->scn_phys.scn_ddt_class_max, bp)) {
1928		scn->scn_ddt_contained_this_txg++;
1929		goto out;
1930	}
1931
1932	/*
1933	 * If this block is from the future (after cur_max_txg), then we
1934	 * are doing this on behalf of a deleted snapshot, and we will
1935	 * revisit the future block on the next pass of this dataset.
1936	 * Don't scan it now unless we need to because something
1937	 * under it was modified.
1938	 */
1939	if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) {
1940		scn->scn_gt_max_this_txg++;
1941		goto out;
1942	}
1943
1944	scan_funcs[scn->scn_phys.scn_func](dp, bp, zb);
1945
1946out:
1947	kmem_free(bp_toread, sizeof (blkptr_t));
1948}
1949
1950static void
1951dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp,
1952    dmu_tx_t *tx)
1953{
1954	zbookmark_phys_t zb;
1955	scan_prefetch_ctx_t *spc;
1956
1957	SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1958	    ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
1959
1960	if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) {
1961		SET_BOOKMARK(&scn->scn_prefetch_bookmark,
1962		    zb.zb_objset, 0, 0, 0);
1963	} else {
1964		scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark;
1965	}
1966
1967	scn->scn_objsets_visited_this_txg++;
1968
1969	spc = scan_prefetch_ctx_create(scn, NULL, FTAG);
1970	dsl_scan_prefetch(spc, bp, &zb);
1971	scan_prefetch_ctx_rele(spc, FTAG);
1972
1973	dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx);
1974
1975	dprintf_ds(ds, "finished scan%s", "");
1976}
1977
1978static void
1979ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys)
1980{
1981	if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) {
1982		if (ds->ds_is_snapshot) {
1983			/*
1984			 * Note:
1985			 *  - scn_cur_{min,max}_txg stays the same.
1986			 *  - Setting the flag is not really necessary if
1987			 *    scn_cur_max_txg == scn_max_txg, because there
1988			 *    is nothing after this snapshot that we care
1989			 *    about.  However, we set it anyway and then
1990			 *    ignore it when we retraverse it in
1991			 *    dsl_scan_visitds().
1992			 */
1993			scn_phys->scn_bookmark.zb_objset =
1994			    dsl_dataset_phys(ds)->ds_next_snap_obj;
1995			zfs_dbgmsg("destroying ds %llu; currently traversing; "
1996			    "reset zb_objset to %llu",
1997			    (u_longlong_t)ds->ds_object,
1998			    (u_longlong_t)dsl_dataset_phys(ds)->
1999			    ds_next_snap_obj);
2000			scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN;
2001		} else {
2002			SET_BOOKMARK(&scn_phys->scn_bookmark,
2003			    ZB_DESTROYED_OBJSET, 0, 0, 0);
2004			zfs_dbgmsg("destroying ds %llu; currently traversing; "
2005			    "reset bookmark to -1,0,0,0",
2006			    (u_longlong_t)ds->ds_object);
2007		}
2008	}
2009}
2010
2011/*
2012 * Invoked when a dataset is destroyed. We need to make sure that:
2013 *
2014 * 1) If it is the dataset that was currently being scanned, we write
2015 *	a new dsl_scan_phys_t and marking the objset reference in it
2016 *	as destroyed.
2017 * 2) Remove it from the work queue, if it was present.
2018 *
2019 * If the dataset was actually a snapshot, instead of marking the dataset
2020 * as destroyed, we instead substitute the next snapshot in line.
2021 */
2022void
2023dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
2024{
2025	dsl_pool_t *dp = ds->ds_dir->dd_pool;
2026	dsl_scan_t *scn = dp->dp_scan;
2027	uint64_t mintxg;
2028
2029	if (!dsl_scan_is_running(scn))
2030		return;
2031
2032	ds_destroyed_scn_phys(ds, &scn->scn_phys);
2033	ds_destroyed_scn_phys(ds, &scn->scn_phys_cached);
2034
2035	if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
2036		scan_ds_queue_remove(scn, ds->ds_object);
2037		if (ds->ds_is_snapshot)
2038			scan_ds_queue_insert(scn,
2039			    dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg);
2040	}
2041
2042	if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
2043	    ds->ds_object, &mintxg) == 0) {
2044		ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
2045		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2046		    scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
2047		if (ds->ds_is_snapshot) {
2048			/*
2049			 * We keep the same mintxg; it could be >
2050			 * ds_creation_txg if the previous snapshot was
2051			 * deleted too.
2052			 */
2053			VERIFY(zap_add_int_key(dp->dp_meta_objset,
2054			    scn->scn_phys.scn_queue_obj,
2055			    dsl_dataset_phys(ds)->ds_next_snap_obj,
2056			    mintxg, tx) == 0);
2057			zfs_dbgmsg("destroying ds %llu; in queue; "
2058			    "replacing with %llu",
2059			    (u_longlong_t)ds->ds_object,
2060			    (u_longlong_t)dsl_dataset_phys(ds)->
2061			    ds_next_snap_obj);
2062		} else {
2063			zfs_dbgmsg("destroying ds %llu; in queue; removing",
2064			    (u_longlong_t)ds->ds_object);
2065		}
2066	}
2067
2068	/*
2069	 * dsl_scan_sync() should be called after this, and should sync
2070	 * out our changed state, but just to be safe, do it here.
2071	 */
2072	dsl_scan_sync_state(scn, tx, SYNC_CACHED);
2073}
2074
2075static void
2076ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark)
2077{
2078	if (scn_bookmark->zb_objset == ds->ds_object) {
2079		scn_bookmark->zb_objset =
2080		    dsl_dataset_phys(ds)->ds_prev_snap_obj;
2081		zfs_dbgmsg("snapshotting ds %llu; currently traversing; "
2082		    "reset zb_objset to %llu",
2083		    (u_longlong_t)ds->ds_object,
2084		    (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
2085	}
2086}
2087
2088/*
2089 * Called when a dataset is snapshotted. If we were currently traversing
2090 * this snapshot, we reset our bookmark to point at the newly created
2091 * snapshot. We also modify our work queue to remove the old snapshot and
2092 * replace with the new one.
2093 */
2094void
2095dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
2096{
2097	dsl_pool_t *dp = ds->ds_dir->dd_pool;
2098	dsl_scan_t *scn = dp->dp_scan;
2099	uint64_t mintxg;
2100
2101	if (!dsl_scan_is_running(scn))
2102		return;
2103
2104	ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
2105
2106	ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark);
2107	ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark);
2108
2109	if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
2110		scan_ds_queue_remove(scn, ds->ds_object);
2111		scan_ds_queue_insert(scn,
2112		    dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg);
2113	}
2114
2115	if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
2116	    ds->ds_object, &mintxg) == 0) {
2117		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2118		    scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
2119		VERIFY(zap_add_int_key(dp->dp_meta_objset,
2120		    scn->scn_phys.scn_queue_obj,
2121		    dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0);
2122		zfs_dbgmsg("snapshotting ds %llu; in queue; "
2123		    "replacing with %llu",
2124		    (u_longlong_t)ds->ds_object,
2125		    (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
2126	}
2127
2128	dsl_scan_sync_state(scn, tx, SYNC_CACHED);
2129}
2130
2131static void
2132ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2,
2133    zbookmark_phys_t *scn_bookmark)
2134{
2135	if (scn_bookmark->zb_objset == ds1->ds_object) {
2136		scn_bookmark->zb_objset = ds2->ds_object;
2137		zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
2138		    "reset zb_objset to %llu",
2139		    (u_longlong_t)ds1->ds_object,
2140		    (u_longlong_t)ds2->ds_object);
2141	} else if (scn_bookmark->zb_objset == ds2->ds_object) {
2142		scn_bookmark->zb_objset = ds1->ds_object;
2143		zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
2144		    "reset zb_objset to %llu",
2145		    (u_longlong_t)ds2->ds_object,
2146		    (u_longlong_t)ds1->ds_object);
2147	}
2148}
2149
2150/*
2151 * Called when a parent dataset and its clone are swapped. If we were
2152 * currently traversing the dataset, we need to switch to traversing the
2153 * newly promoted parent.
2154 */
2155void
2156dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
2157{
2158	dsl_pool_t *dp = ds1->ds_dir->dd_pool;
2159	dsl_scan_t *scn = dp->dp_scan;
2160	uint64_t mintxg;
2161
2162	if (!dsl_scan_is_running(scn))
2163		return;
2164
2165	ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark);
2166	ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark);
2167
2168	if (scan_ds_queue_contains(scn, ds1->ds_object, &mintxg)) {
2169		scan_ds_queue_remove(scn, ds1->ds_object);
2170		scan_ds_queue_insert(scn, ds2->ds_object, mintxg);
2171	}
2172	if (scan_ds_queue_contains(scn, ds2->ds_object, &mintxg)) {
2173		scan_ds_queue_remove(scn, ds2->ds_object);
2174		scan_ds_queue_insert(scn, ds1->ds_object, mintxg);
2175	}
2176
2177	if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
2178	    ds1->ds_object, &mintxg) == 0) {
2179		int err;
2180		ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
2181		ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
2182		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2183		    scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
2184		err = zap_add_int_key(dp->dp_meta_objset,
2185		    scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx);
2186		VERIFY(err == 0 || err == EEXIST);
2187		if (err == EEXIST) {
2188			/* Both were there to begin with */
2189			VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
2190			    scn->scn_phys.scn_queue_obj,
2191			    ds1->ds_object, mintxg, tx));
2192		}
2193		zfs_dbgmsg("clone_swap ds %llu; in queue; "
2194		    "replacing with %llu",
2195		    (u_longlong_t)ds1->ds_object,
2196		    (u_longlong_t)ds2->ds_object);
2197	}
2198	if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
2199	    ds2->ds_object, &mintxg) == 0) {
2200		ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
2201		ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
2202		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2203		    scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
2204		VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
2205		    scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx));
2206		zfs_dbgmsg("clone_swap ds %llu; in queue; "
2207		    "replacing with %llu",
2208		    (u_longlong_t)ds2->ds_object,
2209		    (u_longlong_t)ds1->ds_object);
2210	}
2211
2212	dsl_scan_sync_state(scn, tx, SYNC_CACHED);
2213}
2214
2215/* ARGSUSED */
2216static int
2217enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
2218{
2219	uint64_t originobj = *(uint64_t *)arg;
2220	dsl_dataset_t *ds;
2221	int err;
2222	dsl_scan_t *scn = dp->dp_scan;
2223
2224	if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj)
2225		return (0);
2226
2227	err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
2228	if (err)
2229		return (err);
2230
2231	while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) {
2232		dsl_dataset_t *prev;
2233		err = dsl_dataset_hold_obj(dp,
2234		    dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
2235
2236		dsl_dataset_rele(ds, FTAG);
2237		if (err)
2238			return (err);
2239		ds = prev;
2240	}
2241	scan_ds_queue_insert(scn, ds->ds_object,
2242	    dsl_dataset_phys(ds)->ds_prev_snap_txg);
2243	dsl_dataset_rele(ds, FTAG);
2244	return (0);
2245}
2246
2247static void
2248dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
2249{
2250	dsl_pool_t *dp = scn->scn_dp;
2251	dsl_dataset_t *ds;
2252
2253	VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
2254
2255	if (scn->scn_phys.scn_cur_min_txg >=
2256	    scn->scn_phys.scn_max_txg) {
2257		/*
2258		 * This can happen if this snapshot was created after the
2259		 * scan started, and we already completed a previous snapshot
2260		 * that was created after the scan started.  This snapshot
2261		 * only references blocks with:
2262		 *
2263		 *	birth < our ds_creation_txg
2264		 *	cur_min_txg is no less than ds_creation_txg.
2265		 *	We have already visited these blocks.
2266		 * or
2267		 *	birth > scn_max_txg
2268		 *	The scan requested not to visit these blocks.
2269		 *
2270		 * Subsequent snapshots (and clones) can reference our
2271		 * blocks, or blocks with even higher birth times.
2272		 * Therefore we do not need to visit them either,
2273		 * so we do not add them to the work queue.
2274		 *
2275		 * Note that checking for cur_min_txg >= cur_max_txg
2276		 * is not sufficient, because in that case we may need to
2277		 * visit subsequent snapshots.  This happens when min_txg > 0,
2278		 * which raises cur_min_txg.  In this case we will visit
2279		 * this dataset but skip all of its blocks, because the
2280		 * rootbp's birth time is < cur_min_txg.  Then we will
2281		 * add the next snapshots/clones to the work queue.
2282		 */
2283		char *dsname = kmem_alloc(MAXNAMELEN, KM_SLEEP);
2284		dsl_dataset_name(ds, dsname);
2285		zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because "
2286		    "cur_min_txg (%llu) >= max_txg (%llu)",
2287		    (longlong_t)dsobj, dsname,
2288		    (longlong_t)scn->scn_phys.scn_cur_min_txg,
2289		    (longlong_t)scn->scn_phys.scn_max_txg);
2290		kmem_free(dsname, MAXNAMELEN);
2291
2292		goto out;
2293	}
2294
2295	/*
2296	 * Only the ZIL in the head (non-snapshot) is valid. Even though
2297	 * snapshots can have ZIL block pointers (which may be the same
2298	 * BP as in the head), they must be ignored. In addition, $ORIGIN
2299	 * doesn't have a objset (i.e. its ds_bp is a hole) so we don't
2300	 * need to look for a ZIL in it either. So we traverse the ZIL here,
2301	 * rather than in scan_recurse(), because the regular snapshot
2302	 * block-sharing rules don't apply to it.
2303	 */
2304	if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !dsl_dataset_is_snapshot(ds) &&
2305	    (dp->dp_origin_snap == NULL ||
2306	    ds->ds_dir != dp->dp_origin_snap->ds_dir)) {
2307		objset_t *os;
2308		if (dmu_objset_from_ds(ds, &os) != 0) {
2309			goto out;
2310		}
2311		dsl_scan_zil(dp, &os->os_zil_header);
2312	}
2313
2314	/*
2315	 * Iterate over the bps in this ds.
2316	 */
2317	dmu_buf_will_dirty(ds->ds_dbuf, tx);
2318	rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
2319	dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx);
2320	rrw_exit(&ds->ds_bp_rwlock, FTAG);
2321
2322	char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
2323	dsl_dataset_name(ds, dsname);
2324	zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
2325	    "suspending=%u",
2326	    (longlong_t)dsobj, dsname,
2327	    (longlong_t)scn->scn_phys.scn_cur_min_txg,
2328	    (longlong_t)scn->scn_phys.scn_cur_max_txg,
2329	    (int)scn->scn_suspending);
2330	kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN);
2331
2332	if (scn->scn_suspending)
2333		goto out;
2334
2335	/*
2336	 * We've finished this pass over this dataset.
2337	 */
2338
2339	/*
2340	 * If we did not completely visit this dataset, do another pass.
2341	 */
2342	if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) {
2343		zfs_dbgmsg("incomplete pass; visiting again");
2344		scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN;
2345		scan_ds_queue_insert(scn, ds->ds_object,
2346		    scn->scn_phys.scn_cur_max_txg);
2347		goto out;
2348	}
2349
2350	/*
2351	 * Add descendent datasets to work queue.
2352	 */
2353	if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
2354		scan_ds_queue_insert(scn,
2355		    dsl_dataset_phys(ds)->ds_next_snap_obj,
2356		    dsl_dataset_phys(ds)->ds_creation_txg);
2357	}
2358	if (dsl_dataset_phys(ds)->ds_num_children > 1) {
2359		boolean_t usenext = B_FALSE;
2360		if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
2361			uint64_t count;
2362			/*
2363			 * A bug in a previous version of the code could
2364			 * cause upgrade_clones_cb() to not set
2365			 * ds_next_snap_obj when it should, leading to a
2366			 * missing entry.  Therefore we can only use the
2367			 * next_clones_obj when its count is correct.
2368			 */
2369			int err = zap_count(dp->dp_meta_objset,
2370			    dsl_dataset_phys(ds)->ds_next_clones_obj, &count);
2371			if (err == 0 &&
2372			    count == dsl_dataset_phys(ds)->ds_num_children - 1)
2373				usenext = B_TRUE;
2374		}
2375
2376		if (usenext) {
2377			zap_cursor_t zc;
2378			zap_attribute_t za;
2379			for (zap_cursor_init(&zc, dp->dp_meta_objset,
2380			    dsl_dataset_phys(ds)->ds_next_clones_obj);
2381			    zap_cursor_retrieve(&zc, &za) == 0;
2382			    (void) zap_cursor_advance(&zc)) {
2383				scan_ds_queue_insert(scn,
2384				    zfs_strtonum(za.za_name, NULL),
2385				    dsl_dataset_phys(ds)->ds_creation_txg);
2386			}
2387			zap_cursor_fini(&zc);
2388		} else {
2389			VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2390			    enqueue_clones_cb, &ds->ds_object,
2391			    DS_FIND_CHILDREN));
2392		}
2393	}
2394
2395out:
2396	dsl_dataset_rele(ds, FTAG);
2397}
2398
2399/* ARGSUSED */
2400static int
2401enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
2402{
2403	dsl_dataset_t *ds;
2404	int err;
2405	dsl_scan_t *scn = dp->dp_scan;
2406
2407	err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
2408	if (err)
2409		return (err);
2410
2411	while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
2412		dsl_dataset_t *prev;
2413		err = dsl_dataset_hold_obj(dp,
2414		    dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
2415		if (err) {
2416			dsl_dataset_rele(ds, FTAG);
2417			return (err);
2418		}
2419
2420		/*
2421		 * If this is a clone, we don't need to worry about it for now.
2422		 */
2423		if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) {
2424			dsl_dataset_rele(ds, FTAG);
2425			dsl_dataset_rele(prev, FTAG);
2426			return (0);
2427		}
2428		dsl_dataset_rele(ds, FTAG);
2429		ds = prev;
2430	}
2431
2432	scan_ds_queue_insert(scn, ds->ds_object,
2433	    dsl_dataset_phys(ds)->ds_prev_snap_txg);
2434	dsl_dataset_rele(ds, FTAG);
2435	return (0);
2436}
2437
2438/* ARGSUSED */
2439void
2440dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
2441    ddt_entry_t *dde, dmu_tx_t *tx)
2442{
2443	const ddt_key_t *ddk = &dde->dde_key;
2444	ddt_phys_t *ddp = dde->dde_phys;
2445	blkptr_t bp;
2446	zbookmark_phys_t zb = { 0 };
2447	int p;
2448
2449	if (scn->scn_phys.scn_state != DSS_SCANNING)
2450		return;
2451
2452	/*
2453	 * This function is special because it is the only thing
2454	 * that can add scan_io_t's to the vdev scan queues from
2455	 * outside dsl_scan_sync(). For the most part this is ok
2456	 * as long as it is called from within syncing context.
2457	 * However, dsl_scan_sync() expects that no new sio's will
2458	 * be added between when all the work for a scan is done
2459	 * and the next txg when the scan is actually marked as
2460	 * completed. This check ensures we do not issue new sio's
2461	 * during this period.
2462	 */
2463	if (scn->scn_done_txg != 0)
2464		return;
2465
2466	for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
2467		if (ddp->ddp_phys_birth == 0 ||
2468		    ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg)
2469			continue;
2470		ddt_bp_create(checksum, ddk, ddp, &bp);
2471
2472		scn->scn_visited_this_txg++;
2473		scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb);
2474	}
2475}
2476
2477/*
2478 * Scrub/dedup interaction.
2479 *
2480 * If there are N references to a deduped block, we don't want to scrub it
2481 * N times -- ideally, we should scrub it exactly once.
2482 *
2483 * We leverage the fact that the dde's replication class (enum ddt_class)
2484 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
2485 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
2486 *
2487 * To prevent excess scrubbing, the scrub begins by walking the DDT
2488 * to find all blocks with refcnt > 1, and scrubs each of these once.
2489 * Since there are two replication classes which contain blocks with
2490 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
2491 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
2492 *
2493 * There would be nothing more to say if a block's refcnt couldn't change
2494 * during a scrub, but of course it can so we must account for changes
2495 * in a block's replication class.
2496 *
2497 * Here's an example of what can occur:
2498 *
2499 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
2500 * when visited during the top-down scrub phase, it will be scrubbed twice.
2501 * This negates our scrub optimization, but is otherwise harmless.
2502 *
2503 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
2504 * on each visit during the top-down scrub phase, it will never be scrubbed.
2505 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
2506 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
2507 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
2508 * while a scrub is in progress, it scrubs the block right then.
2509 */
2510static void
2511dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
2512{
2513	ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
2514	ddt_entry_t dde = { 0 };
2515	int error;
2516	uint64_t n = 0;
2517
2518	while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) {
2519		ddt_t *ddt;
2520
2521		if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max)
2522			break;
2523		dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
2524		    (longlong_t)ddb->ddb_class,
2525		    (longlong_t)ddb->ddb_type,
2526		    (longlong_t)ddb->ddb_checksum,
2527		    (longlong_t)ddb->ddb_cursor);
2528
2529		/* There should be no pending changes to the dedup table */
2530		ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum];
2531		ASSERT(avl_first(&ddt->ddt_tree) == NULL);
2532
2533		dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx);
2534		n++;
2535
2536		if (dsl_scan_check_suspend(scn, NULL))
2537			break;
2538	}
2539
2540	zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; "
2541	    "suspending=%u", (longlong_t)n,
2542	    (int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending);
2543
2544	ASSERT(error == 0 || error == ENOENT);
2545	ASSERT(error != ENOENT ||
2546	    ddb->ddb_class > scn->scn_phys.scn_ddt_class_max);
2547}
2548
2549static uint64_t
2550dsl_scan_ds_maxtxg(dsl_dataset_t *ds)
2551{
2552	uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg;
2553	if (ds->ds_is_snapshot)
2554		return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg));
2555	return (smt);
2556}
2557
2558static void
2559dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
2560{
2561	scan_ds_t *sds;
2562	dsl_pool_t *dp = scn->scn_dp;
2563
2564	if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
2565	    scn->scn_phys.scn_ddt_class_max) {
2566		scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
2567		scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
2568		dsl_scan_ddt(scn, tx);
2569		if (scn->scn_suspending)
2570			return;
2571	}
2572
2573	if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) {
2574		/* First do the MOS & ORIGIN */
2575
2576		scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
2577		scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
2578		dsl_scan_visit_rootbp(scn, NULL,
2579		    &dp->dp_meta_rootbp, tx);
2580		spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
2581		if (scn->scn_suspending)
2582			return;
2583
2584		if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
2585			VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2586			    enqueue_cb, NULL, DS_FIND_CHILDREN));
2587		} else {
2588			dsl_scan_visitds(scn,
2589			    dp->dp_origin_snap->ds_object, tx);
2590		}
2591		ASSERT(!scn->scn_suspending);
2592	} else if (scn->scn_phys.scn_bookmark.zb_objset !=
2593	    ZB_DESTROYED_OBJSET) {
2594		uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset;
2595		/*
2596		 * If we were suspended, continue from here. Note if the
2597		 * ds we were suspended on was deleted, the zb_objset may
2598		 * be -1, so we will skip this and find a new objset
2599		 * below.
2600		 */
2601		dsl_scan_visitds(scn, dsobj, tx);
2602		if (scn->scn_suspending)
2603			return;
2604	}
2605
2606	/*
2607	 * In case we suspended right at the end of the ds, zero the
2608	 * bookmark so we don't think that we're still trying to resume.
2609	 */
2610	bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t));
2611
2612	/*
2613	 * Keep pulling things out of the dataset avl queue. Updates to the
2614	 * persistent zap-object-as-queue happen only at checkpoints.
2615	 */
2616	while ((sds = avl_first(&scn->scn_queue)) != NULL) {
2617		dsl_dataset_t *ds;
2618		uint64_t dsobj = sds->sds_dsobj;
2619		uint64_t txg = sds->sds_txg;
2620
2621		/* dequeue and free the ds from the queue */
2622		scan_ds_queue_remove(scn, dsobj);
2623		sds = NULL;	/* must not be touched after removal */
2624
2625		/* Set up min / max txg */
2626		VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
2627		if (txg != 0) {
2628			scn->scn_phys.scn_cur_min_txg =
2629			    MAX(scn->scn_phys.scn_min_txg, txg);
2630		} else {
2631			scn->scn_phys.scn_cur_min_txg =
2632			    MAX(scn->scn_phys.scn_min_txg,
2633			    dsl_dataset_phys(ds)->ds_prev_snap_txg);
2634		}
2635		scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
2636		dsl_dataset_rele(ds, FTAG);
2637
2638		dsl_scan_visitds(scn, dsobj, tx);
2639		if (scn->scn_suspending)
2640			return;
2641	}
2642	/* No more objsets to fetch, we're done */
2643	scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET;
2644	ASSERT0(scn->scn_suspending);
2645}
2646
2647static uint64_t
2648dsl_scan_count_leaves(vdev_t *vd)
2649{
2650	uint64_t i, leaves = 0;
2651
2652	/* we only count leaves that belong to the main pool and are readable */
2653	if (vd->vdev_islog || vd->vdev_isspare ||
2654	    vd->vdev_isl2cache || !vdev_readable(vd))
2655		return (0);
2656
2657	if (vd->vdev_ops->vdev_op_leaf)
2658		return (1);
2659
2660	for (i = 0; i < vd->vdev_children; i++) {
2661		leaves += dsl_scan_count_leaves(vd->vdev_child[i]);
2662	}
2663
2664	return (leaves);
2665}
2666
2667
2668static void
2669scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp)
2670{
2671	int i;
2672	uint64_t cur_size = 0;
2673
2674	for (i = 0; i < BP_GET_NDVAS(bp); i++) {
2675		cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]);
2676	}
2677
2678	q->q_total_zio_size_this_txg += cur_size;
2679	q->q_zios_this_txg++;
2680}
2681
2682static void
2683scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start,
2684    uint64_t end)
2685{
2686	q->q_total_seg_size_this_txg += end - start;
2687	q->q_segs_this_txg++;
2688}
2689
2690static boolean_t
2691scan_io_queue_check_suspend(dsl_scan_t *scn)
2692{
2693	/* See comment in dsl_scan_check_suspend() */
2694	uint64_t curr_time_ns = gethrtime();
2695	uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
2696	uint64_t sync_time_ns = curr_time_ns -
2697	    scn->scn_dp->dp_spa->spa_sync_starttime;
2698	int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
2699	int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
2700	    zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
2701
2702	return ((NSEC2MSEC(scan_time_ns) > mintime &&
2703	    (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent ||
2704	    txg_sync_waiting(scn->scn_dp) ||
2705	    NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
2706	    spa_shutting_down(scn->scn_dp->dp_spa));
2707}
2708
2709/*
2710 * Given a list of scan_io_t's in io_list, this issues the io's out to
2711 * disk. This consumes the io_list and frees the scan_io_t's. This is
2712 * called when emptying queues, either when we're up against the memory
2713 * limit or when we have finished scanning. Returns B_TRUE if we stopped
2714 * processing the list before we finished. Any zios that were not issued
2715 * will remain in the io_list.
2716 */
2717static boolean_t
2718scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list)
2719{
2720	dsl_scan_t *scn = queue->q_scn;
2721	scan_io_t *sio;
2722	int64_t bytes_issued = 0;
2723	boolean_t suspended = B_FALSE;
2724
2725	while ((sio = list_head(io_list)) != NULL) {
2726		blkptr_t bp;
2727
2728		if (scan_io_queue_check_suspend(scn)) {
2729			suspended = B_TRUE;
2730			break;
2731		}
2732
2733		sio2bp(sio, &bp);
2734		bytes_issued += SIO_GET_ASIZE(sio);
2735		scan_exec_io(scn->scn_dp, &bp, sio->sio_flags,
2736		    &sio->sio_zb, queue);
2737		(void) list_remove_head(io_list);
2738		scan_io_queues_update_zio_stats(queue, &bp);
2739		sio_free(sio);
2740	}
2741
2742	atomic_add_64(&scn->scn_bytes_pending, -bytes_issued);
2743
2744	return (suspended);
2745}
2746
2747/*
2748 * Given a range_seg_t (extent) and a list, this function passes over a
2749 * scan queue and gathers up the appropriate ios which fit into that
2750 * scan seg (starting from lowest LBA). At the end, we remove the segment
2751 * from the q_exts_by_addr range tree.
2752 */
2753static boolean_t
2754scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list)
2755{
2756	scan_io_t *srch_sio, *sio, *next_sio;
2757	avl_index_t idx;
2758	uint_t num_sios = 0;
2759	int64_t bytes_issued = 0;
2760
2761	ASSERT(rs != NULL);
2762	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
2763
2764	srch_sio = sio_alloc(1);
2765	srch_sio->sio_nr_dvas = 1;
2766	SIO_SET_OFFSET(srch_sio, rs->rs_start);
2767
2768	/*
2769	 * The exact start of the extent might not contain any matching zios,
2770	 * so if that's the case, examine the next one in the tree.
2771	 */
2772	sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
2773	sio_free(srch_sio);
2774
2775	if (sio == NULL)
2776		sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER);
2777
2778	while (sio != NULL &&
2779	    SIO_GET_OFFSET(sio) < rs->rs_end && num_sios <= 32) {
2780		ASSERT3U(SIO_GET_OFFSET(sio), >=, rs->rs_start);
2781		ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs->rs_end);
2782
2783		next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio);
2784		avl_remove(&queue->q_sios_by_addr, sio);
2785		queue->q_sio_memused -= SIO_GET_MUSED(sio);
2786
2787		bytes_issued += SIO_GET_ASIZE(sio);
2788		num_sios++;
2789		list_insert_tail(list, sio);
2790		sio = next_sio;
2791	}
2792
2793	/*
2794	 * We limit the number of sios we process at once to 32 to avoid
2795	 * biting off more than we can chew. If we didn't take everything
2796	 * in the segment we update it to reflect the work we were able to
2797	 * complete. Otherwise, we remove it from the range tree entirely.
2798	 */
2799	if (sio != NULL && SIO_GET_OFFSET(sio) < rs->rs_end) {
2800		range_tree_adjust_fill(queue->q_exts_by_addr, rs,
2801		    -bytes_issued);
2802		range_tree_resize_segment(queue->q_exts_by_addr, rs,
2803		    SIO_GET_OFFSET(sio), rs->rs_end - SIO_GET_OFFSET(sio));
2804
2805		return (B_TRUE);
2806	} else {
2807		range_tree_remove(queue->q_exts_by_addr, rs->rs_start,
2808		    rs->rs_end - rs->rs_start);
2809		return (B_FALSE);
2810	}
2811}
2812
2813
2814/*
2815 * This is called from the queue emptying thread and selects the next
2816 * extent from which we are to issue io's. The behavior of this function
2817 * depends on the state of the scan, the current memory consumption and
2818 * whether or not we are performing a scan shutdown.
2819 * 1) We select extents in an elevator algorithm (LBA-order) if the scan
2820 *	needs to perform a checkpoint
2821 * 2) We select the largest available extent if we are up against the
2822 *	memory limit.
2823 * 3) Otherwise we don't select any extents.
2824 */
2825static const range_seg_t *
2826scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
2827{
2828	dsl_scan_t *scn = queue->q_scn;
2829
2830	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
2831	ASSERT(scn->scn_is_sorted);
2832
2833	/* handle tunable overrides */
2834	if (scn->scn_checkpointing || scn->scn_clearing) {
2835		if (zfs_scan_issue_strategy == 1) {
2836			return (range_tree_first(queue->q_exts_by_addr));
2837		} else if (zfs_scan_issue_strategy == 2) {
2838			return (avl_first(&queue->q_exts_by_size));
2839		}
2840	}
2841
2842	/*
2843	 * During normal clearing, we want to issue our largest segments
2844	 * first, keeping IO as sequential as possible, and leaving the
2845	 * smaller extents for later with the hope that they might eventually
2846	 * grow to larger sequential segments. However, when the scan is
2847	 * checkpointing, no new extents will be added to the sorting queue,
2848	 * so the way we are sorted now is as good as it will ever get.
2849	 * In this case, we instead switch to issuing extents in LBA order.
2850	 */
2851	if (scn->scn_checkpointing) {
2852		return (range_tree_first(queue->q_exts_by_addr));
2853	} else if (scn->scn_clearing) {
2854		return (avl_first(&queue->q_exts_by_size));
2855	} else {
2856		return (NULL);
2857	}
2858}
2859
2860static void
2861scan_io_queues_run_one(void *arg)
2862{
2863	dsl_scan_io_queue_t *queue = arg;
2864	kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
2865	boolean_t suspended = B_FALSE;
2866	range_seg_t *rs = NULL;
2867	scan_io_t *sio = NULL;
2868	list_t sio_list;
2869	uint64_t bytes_per_leaf = zfs_scan_vdev_limit;
2870	uint64_t nr_leaves = dsl_scan_count_leaves(queue->q_vd);
2871
2872	ASSERT(queue->q_scn->scn_is_sorted);
2873
2874	list_create(&sio_list, sizeof (scan_io_t),
2875	    offsetof(scan_io_t, sio_nodes.sio_list_node));
2876	mutex_enter(q_lock);
2877
2878	/* calculate maximum in-flight bytes for this txg (min 1MB) */
2879	queue->q_maxinflight_bytes =
2880	    MAX(nr_leaves * bytes_per_leaf, 1ULL << 20);
2881
2882	/* reset per-queue scan statistics for this txg */
2883	queue->q_total_seg_size_this_txg = 0;
2884	queue->q_segs_this_txg = 0;
2885	queue->q_total_zio_size_this_txg = 0;
2886	queue->q_zios_this_txg = 0;
2887
2888	/* loop until we have run out of time or sios */
2889	while ((rs = (range_seg_t *)scan_io_queue_fetch_ext(queue)) != NULL) {
2890		uint64_t seg_start = 0, seg_end = 0;
2891		boolean_t more_left = B_TRUE;
2892
2893		ASSERT(list_is_empty(&sio_list));
2894
2895		/* loop while we still have sios left to process in this rs */
2896		while (more_left) {
2897			scan_io_t *first_sio, *last_sio;
2898
2899			/*
2900			 * We have selected which extent needs to be
2901			 * processed next. Gather up the corresponding sios.
2902			 */
2903			more_left = scan_io_queue_gather(queue, rs, &sio_list);
2904			ASSERT(!list_is_empty(&sio_list));
2905			first_sio = list_head(&sio_list);
2906			last_sio = list_tail(&sio_list);
2907
2908			seg_end = SIO_GET_END_OFFSET(last_sio);
2909			if (seg_start == 0)
2910				seg_start = SIO_GET_OFFSET(first_sio);
2911
2912			/*
2913			 * Issuing sios can take a long time so drop the
2914			 * queue lock. The sio queue won't be updated by
2915			 * other threads since we're in syncing context so
2916			 * we can be sure that our trees will remain exactly
2917			 * as we left them.
2918			 */
2919			mutex_exit(q_lock);
2920			suspended = scan_io_queue_issue(queue, &sio_list);
2921			mutex_enter(q_lock);
2922
2923			if (suspended)
2924				break;
2925		}
2926		/* update statistics for debugging purposes */
2927		scan_io_queues_update_seg_stats(queue, seg_start, seg_end);
2928
2929		if (suspended)
2930			break;
2931	}
2932
2933
2934	/*
2935	 * If we were suspended in the middle of processing,
2936	 * requeue any unfinished sios and exit.
2937	 */
2938	while ((sio = list_head(&sio_list)) != NULL) {
2939		list_remove(&sio_list, sio);
2940		scan_io_queue_insert_impl(queue, sio);
2941	}
2942
2943	mutex_exit(q_lock);
2944	list_destroy(&sio_list);
2945}
2946
2947/*
2948 * Performs an emptying run on all scan queues in the pool. This just
2949 * punches out one thread per top-level vdev, each of which processes
2950 * only that vdev's scan queue. We can parallelize the I/O here because
2951 * we know that each queue's io's only affect its own top-level vdev.
2952 *
2953 * This function waits for the queue runs to complete, and must be
2954 * called from dsl_scan_sync (or in general, syncing context).
2955 */
2956static void
2957scan_io_queues_run(dsl_scan_t *scn)
2958{
2959	spa_t *spa = scn->scn_dp->dp_spa;
2960
2961	ASSERT(scn->scn_is_sorted);
2962	ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
2963
2964	if (scn->scn_bytes_pending == 0)
2965		return;
2966
2967	if (scn->scn_taskq == NULL) {
2968		char *tq_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN + 16,
2969		    KM_SLEEP);
2970		int nthreads = spa->spa_root_vdev->vdev_children;
2971
2972		/*
2973		 * We need to make this taskq *always* execute as many
2974		 * threads in parallel as we have top-level vdevs and no
2975		 * less, otherwise strange serialization of the calls to
2976		 * scan_io_queues_run_one can occur during spa_sync runs
2977		 * and that significantly impacts performance.
2978		 */
2979		(void) snprintf(tq_name, ZFS_MAX_DATASET_NAME_LEN + 16,
2980		    "dsl_scan_tq_%s", spa->spa_name);
2981		scn->scn_taskq = taskq_create(tq_name, nthreads, minclsyspri,
2982		    nthreads, nthreads, TASKQ_PREPOPULATE);
2983		kmem_free(tq_name, ZFS_MAX_DATASET_NAME_LEN + 16);
2984	}
2985
2986	for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
2987		vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
2988
2989		mutex_enter(&vd->vdev_scan_io_queue_lock);
2990		if (vd->vdev_scan_io_queue != NULL) {
2991			VERIFY(taskq_dispatch(scn->scn_taskq,
2992			    scan_io_queues_run_one, vd->vdev_scan_io_queue,
2993			    TQ_SLEEP) != TASKQID_INVALID);
2994		}
2995		mutex_exit(&vd->vdev_scan_io_queue_lock);
2996	}
2997
2998	/*
2999	 * Wait for the queues to finish issuing thir IOs for this run
3000	 * before we return. There may still be IOs in flight at this
3001	 * point.
3002	 */
3003	taskq_wait(scn->scn_taskq);
3004}
3005
3006static boolean_t
3007dsl_scan_async_block_should_pause(dsl_scan_t *scn)
3008{
3009	uint64_t elapsed_nanosecs;
3010
3011	if (zfs_recover)
3012		return (B_FALSE);
3013
3014	if (scn->scn_visited_this_txg >= zfs_async_block_max_blocks)
3015		return (B_TRUE);
3016
3017	elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
3018	return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
3019	    (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms &&
3020	    txg_sync_waiting(scn->scn_dp)) ||
3021	    spa_shutting_down(scn->scn_dp->dp_spa));
3022}
3023
3024static int
3025dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
3026{
3027	dsl_scan_t *scn = arg;
3028
3029	if (!scn->scn_is_bptree ||
3030	    (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
3031		if (dsl_scan_async_block_should_pause(scn))
3032			return (SET_ERROR(ERESTART));
3033	}
3034
3035	zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
3036	    dmu_tx_get_txg(tx), bp, 0));
3037	dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
3038	    -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
3039	    -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
3040	scn->scn_visited_this_txg++;
3041	return (0);
3042}
3043
3044static void
3045dsl_scan_update_stats(dsl_scan_t *scn)
3046{
3047	spa_t *spa = scn->scn_dp->dp_spa;
3048	uint64_t i;
3049	uint64_t seg_size_total = 0, zio_size_total = 0;
3050	uint64_t seg_count_total = 0, zio_count_total = 0;
3051
3052	for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
3053		vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
3054		dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue;
3055
3056		if (queue == NULL)
3057			continue;
3058
3059		seg_size_total += queue->q_total_seg_size_this_txg;
3060		zio_size_total += queue->q_total_zio_size_this_txg;
3061		seg_count_total += queue->q_segs_this_txg;
3062		zio_count_total += queue->q_zios_this_txg;
3063	}
3064
3065	if (seg_count_total == 0 || zio_count_total == 0) {
3066		scn->scn_avg_seg_size_this_txg = 0;
3067		scn->scn_avg_zio_size_this_txg = 0;
3068		scn->scn_segs_this_txg = 0;
3069		scn->scn_zios_this_txg = 0;
3070		return;
3071	}
3072
3073	scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total;
3074	scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total;
3075	scn->scn_segs_this_txg = seg_count_total;
3076	scn->scn_zios_this_txg = zio_count_total;
3077}
3078
3079static int
3080dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
3081{
3082	dsl_scan_t *scn = arg;
3083	const dva_t *dva = &bp->blk_dva[0];
3084
3085	if (dsl_scan_async_block_should_pause(scn))
3086		return (SET_ERROR(ERESTART));
3087
3088	spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa,
3089	    DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva),
3090	    DVA_GET_ASIZE(dva), tx);
3091	scn->scn_visited_this_txg++;
3092	return (0);
3093}
3094
3095boolean_t
3096dsl_scan_active(dsl_scan_t *scn)
3097{
3098	spa_t *spa = scn->scn_dp->dp_spa;
3099	uint64_t used = 0, comp, uncomp;
3100
3101	if (spa->spa_load_state != SPA_LOAD_NONE)
3102		return (B_FALSE);
3103	if (spa_shutting_down(spa))
3104		return (B_FALSE);
3105	if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) ||
3106	    (scn->scn_async_destroying && !scn->scn_async_stalled))
3107		return (B_TRUE);
3108
3109	if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
3110		(void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
3111		    &used, &comp, &uncomp);
3112	}
3113	return (used != 0);
3114}
3115
3116static boolean_t
3117dsl_scan_check_deferred(vdev_t *vd)
3118{
3119	boolean_t need_resilver = B_FALSE;
3120
3121	for (int c = 0; c < vd->vdev_children; c++) {
3122		need_resilver |=
3123		    dsl_scan_check_deferred(vd->vdev_child[c]);
3124	}
3125
3126	if (!vdev_is_concrete(vd) || vd->vdev_aux ||
3127	    !vd->vdev_ops->vdev_op_leaf)
3128		return (need_resilver);
3129
3130	if (!vd->vdev_resilver_deferred)
3131		need_resilver = B_TRUE;
3132
3133	return (need_resilver);
3134}
3135
3136static boolean_t
3137dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize,
3138    uint64_t phys_birth)
3139{
3140	vdev_t *vd;
3141
3142	vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
3143
3144	if (vd->vdev_ops == &vdev_indirect_ops) {
3145		/*
3146		 * The indirect vdev can point to multiple
3147		 * vdevs.  For simplicity, always create
3148		 * the resilver zio_t. zio_vdev_io_start()
3149		 * will bypass the child resilver i/o's if
3150		 * they are on vdevs that don't have DTL's.
3151		 */
3152		return (B_TRUE);
3153	}
3154
3155	if (DVA_GET_GANG(dva)) {
3156		/*
3157		 * Gang members may be spread across multiple
3158		 * vdevs, so the best estimate we have is the
3159		 * scrub range, which has already been checked.
3160		 * XXX -- it would be better to change our
3161		 * allocation policy to ensure that all
3162		 * gang members reside on the same vdev.
3163		 */
3164		return (B_TRUE);
3165	}
3166
3167	/*
3168	 * Check if the txg falls within the range which must be
3169	 * resilvered.  DVAs outside this range can always be skipped.
3170	 */
3171	if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1))
3172		return (B_FALSE);
3173
3174	/*
3175	 * Check if the top-level vdev must resilver this offset.
3176	 * When the offset does not intersect with a dirty leaf DTL
3177	 * then it may be possible to skip the resilver IO.  The psize
3178	 * is provided instead of asize to simplify the check for RAIDZ.
3179	 */
3180	if (!vdev_dtl_need_resilver(vd, DVA_GET_OFFSET(dva), psize))
3181		return (B_FALSE);
3182
3183	/*
3184	 * Check that this top-level vdev has a device under it which
3185	 * is resilvering and is not deferred.
3186	 */
3187	if (!dsl_scan_check_deferred(vd))
3188		return (B_FALSE);
3189
3190	return (B_TRUE);
3191}
3192
3193static int
3194dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx)
3195{
3196	int err = 0;
3197	dsl_scan_t *scn = dp->dp_scan;
3198	spa_t *spa = dp->dp_spa;
3199
3200	if (spa_suspend_async_destroy(spa))
3201		return (0);
3202
3203	if (zfs_free_bpobj_enabled &&
3204	    spa_version(spa) >= SPA_VERSION_DEADLISTS) {
3205		scn->scn_is_bptree = B_FALSE;
3206		scn->scn_async_block_min_time_ms = zfs_free_min_time_ms;
3207		scn->scn_zio_root = zio_root(spa, NULL,
3208		    NULL, ZIO_FLAG_MUSTSUCCEED);
3209		err = bpobj_iterate(&dp->dp_free_bpobj,
3210		    dsl_scan_free_block_cb, scn, tx);
3211		VERIFY0(zio_wait(scn->scn_zio_root));
3212		scn->scn_zio_root = NULL;
3213
3214		if (err != 0 && err != ERESTART)
3215			zfs_panic_recover("error %u from bpobj_iterate()", err);
3216	}
3217
3218	if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
3219		ASSERT(scn->scn_async_destroying);
3220		scn->scn_is_bptree = B_TRUE;
3221		scn->scn_zio_root = zio_root(spa, NULL,
3222		    NULL, ZIO_FLAG_MUSTSUCCEED);
3223		err = bptree_iterate(dp->dp_meta_objset,
3224		    dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx);
3225		VERIFY0(zio_wait(scn->scn_zio_root));
3226		scn->scn_zio_root = NULL;
3227
3228		if (err == EIO || err == ECKSUM) {
3229			err = 0;
3230		} else if (err != 0 && err != ERESTART) {
3231			zfs_panic_recover("error %u from "
3232			    "traverse_dataset_destroyed()", err);
3233		}
3234
3235		if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) {
3236			/* finished; deactivate async destroy feature */
3237			spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx);
3238			ASSERT(!spa_feature_is_active(spa,
3239			    SPA_FEATURE_ASYNC_DESTROY));
3240			VERIFY0(zap_remove(dp->dp_meta_objset,
3241			    DMU_POOL_DIRECTORY_OBJECT,
3242			    DMU_POOL_BPTREE_OBJ, tx));
3243			VERIFY0(bptree_free(dp->dp_meta_objset,
3244			    dp->dp_bptree_obj, tx));
3245			dp->dp_bptree_obj = 0;
3246			scn->scn_async_destroying = B_FALSE;
3247			scn->scn_async_stalled = B_FALSE;
3248		} else {
3249			/*
3250			 * If we didn't make progress, mark the async
3251			 * destroy as stalled, so that we will not initiate
3252			 * a spa_sync() on its behalf.  Note that we only
3253			 * check this if we are not finished, because if the
3254			 * bptree had no blocks for us to visit, we can
3255			 * finish without "making progress".
3256			 */
3257			scn->scn_async_stalled =
3258			    (scn->scn_visited_this_txg == 0);
3259		}
3260	}
3261	if (scn->scn_visited_this_txg) {
3262		zfs_dbgmsg("freed %llu blocks in %llums from "
3263		    "free_bpobj/bptree txg %llu; err=%d",
3264		    (longlong_t)scn->scn_visited_this_txg,
3265		    (longlong_t)
3266		    NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
3267		    (longlong_t)tx->tx_txg, err);
3268		scn->scn_visited_this_txg = 0;
3269
3270		/*
3271		 * Write out changes to the DDT that may be required as a
3272		 * result of the blocks freed.  This ensures that the DDT
3273		 * is clean when a scrub/resilver runs.
3274		 */
3275		ddt_sync(spa, tx->tx_txg);
3276	}
3277	if (err != 0)
3278		return (err);
3279	if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
3280	    zfs_free_leak_on_eio &&
3281	    (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 ||
3282	    dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 ||
3283	    dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) {
3284		/*
3285		 * We have finished background destroying, but there is still
3286		 * some space left in the dp_free_dir. Transfer this leaked
3287		 * space to the dp_leak_dir.
3288		 */
3289		if (dp->dp_leak_dir == NULL) {
3290			rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
3291			(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
3292			    LEAK_DIR_NAME, tx);
3293			VERIFY0(dsl_pool_open_special_dir(dp,
3294			    LEAK_DIR_NAME, &dp->dp_leak_dir));
3295			rrw_exit(&dp->dp_config_rwlock, FTAG);
3296		}
3297		dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD,
3298		    dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
3299		    dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
3300		    dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
3301		dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
3302		    -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
3303		    -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
3304		    -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
3305	}
3306
3307	if (dp->dp_free_dir != NULL && !scn->scn_async_destroying) {
3308		/* finished; verify that space accounting went to zero */
3309		ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes);
3310		ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes);
3311		ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes);
3312	}
3313
3314	EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj),
3315	    0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
3316	    DMU_POOL_OBSOLETE_BPOBJ));
3317	if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) {
3318		ASSERT(spa_feature_is_active(dp->dp_spa,
3319		    SPA_FEATURE_OBSOLETE_COUNTS));
3320
3321		scn->scn_is_bptree = B_FALSE;
3322		scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms;
3323		err = bpobj_iterate(&dp->dp_obsolete_bpobj,
3324		    dsl_scan_obsolete_block_cb, scn, tx);
3325		if (err != 0 && err != ERESTART)
3326			zfs_panic_recover("error %u from bpobj_iterate()", err);
3327
3328		if (bpobj_is_empty(&dp->dp_obsolete_bpobj))
3329			dsl_pool_destroy_obsolete_bpobj(dp, tx);
3330	}
3331
3332	return (0);
3333}
3334
3335/*
3336 * This is the primary entry point for scans that is called from syncing
3337 * context. Scans must happen entirely during syncing context so that we
3338 * cna guarantee that blocks we are currently scanning will not change out
3339 * from under us. While a scan is active, this funciton controls how quickly
3340 * transaction groups proceed, instead of the normal handling provided by
3341 * txg_sync_thread().
3342 */
3343void
3344dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
3345{
3346	dsl_scan_t *scn = dp->dp_scan;
3347	spa_t *spa = dp->dp_spa;
3348	int err = 0;
3349	state_sync_type_t sync_type = SYNC_OPTIONAL;
3350
3351	if (spa->spa_resilver_deferred &&
3352	    !spa_feature_is_active(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
3353		spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
3354
3355	/*
3356	 * Check for scn_restart_txg before checking spa_load_state, so
3357	 * that we can restart an old-style scan while the pool is being
3358	 * imported (see dsl_scan_init). We also restart scans if there
3359	 * is a deferred resilver and the user has manually disabled
3360	 * deferred resilvers via the tunable.
3361	 */
3362	if (dsl_scan_restarting(scn, tx) ||
3363	    (spa->spa_resilver_deferred && zfs_resilver_disable_defer)) {
3364		pool_scan_func_t func = POOL_SCAN_SCRUB;
3365		dsl_scan_done(scn, B_FALSE, tx);
3366		if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
3367			func = POOL_SCAN_RESILVER;
3368		zfs_dbgmsg("restarting scan func=%u txg=%llu",
3369		    func, (longlong_t)tx->tx_txg);
3370		dsl_scan_setup_sync(&func, tx);
3371	}
3372
3373	/*
3374	 * Only process scans in sync pass 1.
3375	 */
3376	if (spa_sync_pass(dp->dp_spa) > 1)
3377		return;
3378
3379	/*
3380	 * If the spa is shutting down, then stop scanning. This will
3381	 * ensure that the scan does not dirty any new data during the
3382	 * shutdown phase.
3383	 */
3384	if (spa_shutting_down(spa))
3385		return;
3386
3387	/*
3388	 * If the scan is inactive due to a stalled async destroy, try again.
3389	 */
3390	if (!scn->scn_async_stalled && !dsl_scan_active(scn))
3391		return;
3392
3393	/* reset scan statistics */
3394	scn->scn_visited_this_txg = 0;
3395	scn->scn_holes_this_txg = 0;
3396	scn->scn_lt_min_this_txg = 0;
3397	scn->scn_gt_max_this_txg = 0;
3398	scn->scn_ddt_contained_this_txg = 0;
3399	scn->scn_objsets_visited_this_txg = 0;
3400	scn->scn_avg_seg_size_this_txg = 0;
3401	scn->scn_segs_this_txg = 0;
3402	scn->scn_avg_zio_size_this_txg = 0;
3403	scn->scn_zios_this_txg = 0;
3404	scn->scn_suspending = B_FALSE;
3405	scn->scn_sync_start_time = gethrtime();
3406	spa->spa_scrub_active = B_TRUE;
3407
3408	/*
3409	 * First process the async destroys.  If we pause, don't do
3410	 * any scrubbing or resilvering.  This ensures that there are no
3411	 * async destroys while we are scanning, so the scan code doesn't
3412	 * have to worry about traversing it.  It is also faster to free the
3413	 * blocks than to scrub them.
3414	 */
3415	err = dsl_process_async_destroys(dp, tx);
3416	if (err != 0)
3417		return;
3418
3419	if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn))
3420		return;
3421
3422	/*
3423	 * Wait a few txgs after importing to begin scanning so that
3424	 * we can get the pool imported quickly.
3425	 */
3426	if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS)
3427		return;
3428
3429	/*
3430	 * zfs_scan_suspend_progress can be set to disable scan progress.
3431	 * We don't want to spin the txg_sync thread, so we add a delay
3432	 * here to simulate the time spent doing a scan. This is mostly
3433	 * useful for testing and debugging.
3434	 */
3435	if (zfs_scan_suspend_progress) {
3436		uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
3437		int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
3438		    zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
3439
3440		while (zfs_scan_suspend_progress &&
3441		    !txg_sync_waiting(scn->scn_dp) &&
3442		    !spa_shutting_down(scn->scn_dp->dp_spa) &&
3443		    NSEC2MSEC(scan_time_ns) < mintime) {
3444			delay(hz);
3445			scan_time_ns = gethrtime() - scn->scn_sync_start_time;
3446		}
3447		return;
3448	}
3449
3450	/*
3451	 * It is possible to switch from unsorted to sorted at any time,
3452	 * but afterwards the scan will remain sorted unless reloaded from
3453	 * a checkpoint after a reboot.
3454	 */
3455	if (!zfs_scan_legacy) {
3456		scn->scn_is_sorted = B_TRUE;
3457		if (scn->scn_last_checkpoint == 0)
3458			scn->scn_last_checkpoint = ddi_get_lbolt();
3459	}
3460
3461	/*
3462	 * For sorted scans, determine what kind of work we will be doing
3463	 * this txg based on our memory limitations and whether or not we
3464	 * need to perform a checkpoint.
3465	 */
3466	if (scn->scn_is_sorted) {
3467		/*
3468		 * If we are over our checkpoint interval, set scn_clearing
3469		 * so that we can begin checkpointing immediately. The
3470		 * checkpoint allows us to save a consisent bookmark
3471		 * representing how much data we have scrubbed so far.
3472		 * Otherwise, use the memory limit to determine if we should
3473		 * scan for metadata or start issue scrub IOs. We accumulate
3474		 * metadata until we hit our hard memory limit at which point
3475		 * we issue scrub IOs until we are at our soft memory limit.
3476		 */
3477		if (scn->scn_checkpointing ||
3478		    ddi_get_lbolt() - scn->scn_last_checkpoint >
3479		    SEC_TO_TICK(zfs_scan_checkpoint_intval)) {
3480			if (!scn->scn_checkpointing)
3481				zfs_dbgmsg("begin scan checkpoint");
3482
3483			scn->scn_checkpointing = B_TRUE;
3484			scn->scn_clearing = B_TRUE;
3485		} else {
3486			boolean_t should_clear = dsl_scan_should_clear(scn);
3487			if (should_clear && !scn->scn_clearing) {
3488				zfs_dbgmsg("begin scan clearing");
3489				scn->scn_clearing = B_TRUE;
3490			} else if (!should_clear && scn->scn_clearing) {
3491				zfs_dbgmsg("finish scan clearing");
3492				scn->scn_clearing = B_FALSE;
3493			}
3494		}
3495	} else {
3496		ASSERT0(scn->scn_checkpointing);
3497		ASSERT0(scn->scn_clearing);
3498	}
3499
3500	if (!scn->scn_clearing && scn->scn_done_txg == 0) {
3501		/* Need to scan metadata for more blocks to scrub */
3502		dsl_scan_phys_t *scnp = &scn->scn_phys;
3503		taskqid_t prefetch_tqid;
3504		uint64_t bytes_per_leaf = zfs_scan_vdev_limit;
3505		uint64_t nr_leaves = dsl_scan_count_leaves(spa->spa_root_vdev);
3506
3507		/*
3508		 * Calculate the max number of in-flight bytes for pool-wide
3509		 * scanning operations (minimum 1MB). Limits for the issuing
3510		 * phase are done per top-level vdev and are handled separately.
3511		 */
3512		scn->scn_maxinflight_bytes =
3513		    MAX(nr_leaves * bytes_per_leaf, 1ULL << 20);
3514
3515		if (scnp->scn_ddt_bookmark.ddb_class <=
3516		    scnp->scn_ddt_class_max) {
3517			ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark));
3518			zfs_dbgmsg("doing scan sync txg %llu; "
3519			    "ddt bm=%llu/%llu/%llu/%llx",
3520			    (longlong_t)tx->tx_txg,
3521			    (longlong_t)scnp->scn_ddt_bookmark.ddb_class,
3522			    (longlong_t)scnp->scn_ddt_bookmark.ddb_type,
3523			    (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
3524			    (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
3525		} else {
3526			zfs_dbgmsg("doing scan sync txg %llu; "
3527			    "bm=%llu/%llu/%llu/%llu",
3528			    (longlong_t)tx->tx_txg,
3529			    (longlong_t)scnp->scn_bookmark.zb_objset,
3530			    (longlong_t)scnp->scn_bookmark.zb_object,
3531			    (longlong_t)scnp->scn_bookmark.zb_level,
3532			    (longlong_t)scnp->scn_bookmark.zb_blkid);
3533		}
3534
3535		scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
3536		    NULL, ZIO_FLAG_CANFAIL);
3537
3538		scn->scn_prefetch_stop = B_FALSE;
3539		prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq,
3540		    dsl_scan_prefetch_thread, scn, TQ_SLEEP);
3541		ASSERT(prefetch_tqid != TASKQID_INVALID);
3542
3543		dsl_pool_config_enter(dp, FTAG);
3544		dsl_scan_visit(scn, tx);
3545		dsl_pool_config_exit(dp, FTAG);
3546
3547		mutex_enter(&dp->dp_spa->spa_scrub_lock);
3548		scn->scn_prefetch_stop = B_TRUE;
3549		cv_broadcast(&spa->spa_scrub_io_cv);
3550		mutex_exit(&dp->dp_spa->spa_scrub_lock);
3551
3552		taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid);
3553		(void) zio_wait(scn->scn_zio_root);
3554		scn->scn_zio_root = NULL;
3555
3556		zfs_dbgmsg("scan visited %llu blocks in %llums "
3557		    "(%llu os's, %llu holes, %llu < mintxg, "
3558		    "%llu in ddt, %llu > maxtxg)",
3559		    (longlong_t)scn->scn_visited_this_txg,
3560		    (longlong_t)NSEC2MSEC(gethrtime() -
3561		    scn->scn_sync_start_time),
3562		    (longlong_t)scn->scn_objsets_visited_this_txg,
3563		    (longlong_t)scn->scn_holes_this_txg,
3564		    (longlong_t)scn->scn_lt_min_this_txg,
3565		    (longlong_t)scn->scn_ddt_contained_this_txg,
3566		    (longlong_t)scn->scn_gt_max_this_txg);
3567
3568		if (!scn->scn_suspending) {
3569			ASSERT0(avl_numnodes(&scn->scn_queue));
3570			scn->scn_done_txg = tx->tx_txg + 1;
3571			if (scn->scn_is_sorted) {
3572				scn->scn_checkpointing = B_TRUE;
3573				scn->scn_clearing = B_TRUE;
3574			}
3575			zfs_dbgmsg("scan complete txg %llu",
3576			    (longlong_t)tx->tx_txg);
3577		}
3578	} else if (scn->scn_is_sorted && scn->scn_bytes_pending != 0) {
3579		ASSERT(scn->scn_clearing);
3580
3581		/* need to issue scrubbing IOs from per-vdev queues */
3582		scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
3583		    NULL, ZIO_FLAG_CANFAIL);
3584		scan_io_queues_run(scn);
3585		(void) zio_wait(scn->scn_zio_root);
3586		scn->scn_zio_root = NULL;
3587
3588		/* calculate and dprintf the current memory usage */
3589		(void) dsl_scan_should_clear(scn);
3590		dsl_scan_update_stats(scn);
3591
3592		zfs_dbgmsg("scrubbed %llu blocks (%llu segs) in %llums "
3593		    "(avg_block_size = %llu, avg_seg_size = %llu)",
3594		    (longlong_t)scn->scn_zios_this_txg,
3595		    (longlong_t)scn->scn_segs_this_txg,
3596		    (longlong_t)NSEC2MSEC(gethrtime() -
3597		    scn->scn_sync_start_time),
3598		    (longlong_t)scn->scn_avg_zio_size_this_txg,
3599		    (longlong_t)scn->scn_avg_seg_size_this_txg);
3600	} else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) {
3601		/* Finished with everything. Mark the scrub as complete */
3602		zfs_dbgmsg("scan issuing complete txg %llu",
3603		    (longlong_t)tx->tx_txg);
3604		ASSERT3U(scn->scn_done_txg, !=, 0);
3605		ASSERT0(spa->spa_scrub_inflight);
3606		ASSERT0(scn->scn_bytes_pending);
3607		dsl_scan_done(scn, B_TRUE, tx);
3608		sync_type = SYNC_MANDATORY;
3609	}
3610
3611	dsl_scan_sync_state(scn, tx, sync_type);
3612}
3613
3614static void
3615count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp)
3616{
3617	int i;
3618
3619	/*
3620	 * Don't count embedded bp's, since we already did the work of
3621	 * scanning these when we scanned the containing block.
3622	 */
3623	if (BP_IS_EMBEDDED(bp))
3624		return;
3625
3626	/*
3627	 * Update the spa's stats on how many bytes we have issued.
3628	 * Sequential scrubs create a zio for each DVA of the bp. Each
3629	 * of these will include all DVAs for repair purposes, but the
3630	 * zio code will only try the first one unless there is an issue.
3631	 * Therefore, we should only count the first DVA for these IOs.
3632	 */
3633	if (scn->scn_is_sorted) {
3634		atomic_add_64(&scn->scn_dp->dp_spa->spa_scan_pass_issued,
3635		    DVA_GET_ASIZE(&bp->blk_dva[0]));
3636	} else {
3637		spa_t *spa = scn->scn_dp->dp_spa;
3638
3639		for (i = 0; i < BP_GET_NDVAS(bp); i++) {
3640			atomic_add_64(&spa->spa_scan_pass_issued,
3641			    DVA_GET_ASIZE(&bp->blk_dva[i]));
3642		}
3643	}
3644
3645	/*
3646	 * If we resume after a reboot, zab will be NULL; don't record
3647	 * incomplete stats in that case.
3648	 */
3649	if (zab == NULL)
3650		return;
3651
3652	mutex_enter(&zab->zab_lock);
3653
3654	for (i = 0; i < 4; i++) {
3655		int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
3656		int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
3657		if (t & DMU_OT_NEWTYPE)
3658			t = DMU_OT_OTHER;
3659		zfs_blkstat_t *zb = &zab->zab_type[l][t];
3660		int equal;
3661
3662		zb->zb_count++;
3663		zb->zb_asize += BP_GET_ASIZE(bp);
3664		zb->zb_lsize += BP_GET_LSIZE(bp);
3665		zb->zb_psize += BP_GET_PSIZE(bp);
3666		zb->zb_gangs += BP_COUNT_GANG(bp);
3667
3668		switch (BP_GET_NDVAS(bp)) {
3669		case 2:
3670			if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3671			    DVA_GET_VDEV(&bp->blk_dva[1]))
3672				zb->zb_ditto_2_of_2_samevdev++;
3673			break;
3674		case 3:
3675			equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3676			    DVA_GET_VDEV(&bp->blk_dva[1])) +
3677			    (DVA_GET_VDEV(&bp->blk_dva[0]) ==
3678			    DVA_GET_VDEV(&bp->blk_dva[2])) +
3679			    (DVA_GET_VDEV(&bp->blk_dva[1]) ==
3680			    DVA_GET_VDEV(&bp->blk_dva[2]));
3681			if (equal == 1)
3682				zb->zb_ditto_2_of_3_samevdev++;
3683			else if (equal == 3)
3684				zb->zb_ditto_3_of_3_samevdev++;
3685			break;
3686		}
3687	}
3688
3689	mutex_exit(&zab->zab_lock);
3690}
3691
3692static void
3693scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio)
3694{
3695	avl_index_t idx;
3696	int64_t asize = SIO_GET_ASIZE(sio);
3697	dsl_scan_t *scn = queue->q_scn;
3698
3699	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
3700
3701	if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) {
3702		/* block is already scheduled for reading */
3703		atomic_add_64(&scn->scn_bytes_pending, -asize);
3704		sio_free(sio);
3705		return;
3706	}
3707	avl_insert(&queue->q_sios_by_addr, sio, idx);
3708	queue->q_sio_memused += SIO_GET_MUSED(sio);
3709	range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio), asize);
3710}
3711
3712/*
3713 * Given all the info we got from our metadata scanning process, we
3714 * construct a scan_io_t and insert it into the scan sorting queue. The
3715 * I/O must already be suitable for us to process. This is controlled
3716 * by dsl_scan_enqueue().
3717 */
3718static void
3719scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i,
3720    int zio_flags, const zbookmark_phys_t *zb)
3721{
3722	dsl_scan_t *scn = queue->q_scn;
3723	scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp));
3724
3725	ASSERT0(BP_IS_GANG(bp));
3726	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
3727
3728	bp2sio(bp, sio, dva_i);
3729	sio->sio_flags = zio_flags;
3730	sio->sio_zb = *zb;
3731
3732	/*
3733	 * Increment the bytes pending counter now so that we can't
3734	 * get an integer underflow in case the worker processes the
3735	 * zio before we get to incrementing this counter.
3736	 */
3737	atomic_add_64(&scn->scn_bytes_pending, SIO_GET_ASIZE(sio));
3738
3739	scan_io_queue_insert_impl(queue, sio);
3740}
3741
3742/*
3743 * Given a set of I/O parameters as discovered by the metadata traversal
3744 * process, attempts to place the I/O into the sorted queues (if allowed),
3745 * or immediately executes the I/O.
3746 */
3747static void
3748dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
3749    const zbookmark_phys_t *zb)
3750{
3751	spa_t *spa = dp->dp_spa;
3752
3753	ASSERT(!BP_IS_EMBEDDED(bp));
3754
3755	/*
3756	 * Gang blocks are hard to issue sequentially, so we just issue them
3757	 * here immediately instead of queuing them.
3758	 */
3759	if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) {
3760		scan_exec_io(dp, bp, zio_flags, zb, NULL);
3761		return;
3762	}
3763	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
3764		dva_t dva;
3765		vdev_t *vdev;
3766
3767		dva = bp->blk_dva[i];
3768		vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva));
3769		ASSERT(vdev != NULL);
3770
3771		mutex_enter(&vdev->vdev_scan_io_queue_lock);
3772		if (vdev->vdev_scan_io_queue == NULL)
3773			vdev->vdev_scan_io_queue = scan_io_queue_create(vdev);
3774		ASSERT(dp->dp_scan != NULL);
3775		scan_io_queue_insert(vdev->vdev_scan_io_queue, bp,
3776		    i, zio_flags, zb);
3777		mutex_exit(&vdev->vdev_scan_io_queue_lock);
3778	}
3779}
3780
3781static int
3782dsl_scan_scrub_cb(dsl_pool_t *dp,
3783    const blkptr_t *bp, const zbookmark_phys_t *zb)
3784{
3785	dsl_scan_t *scn = dp->dp_scan;
3786	spa_t *spa = dp->dp_spa;
3787	uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp);
3788	size_t psize = BP_GET_PSIZE(bp);
3789	boolean_t needs_io;
3790	int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
3791	int d;
3792
3793	if (phys_birth <= scn->scn_phys.scn_min_txg ||
3794	    phys_birth >= scn->scn_phys.scn_max_txg) {
3795		count_block(scn, dp->dp_blkstats, bp);
3796		return (0);
3797	}
3798
3799	/* Embedded BP's have phys_birth==0, so we reject them above. */
3800	ASSERT(!BP_IS_EMBEDDED(bp));
3801
3802	ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn));
3803	if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) {
3804		zio_flags |= ZIO_FLAG_SCRUB;
3805		needs_io = B_TRUE;
3806	} else {
3807		ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER);
3808		zio_flags |= ZIO_FLAG_RESILVER;
3809		needs_io = B_FALSE;
3810	}
3811
3812	/* If it's an intent log block, failure is expected. */
3813	if (zb->zb_level == ZB_ZIL_LEVEL)
3814		zio_flags |= ZIO_FLAG_SPECULATIVE;
3815
3816	for (d = 0; d < BP_GET_NDVAS(bp); d++) {
3817		const dva_t *dva = &bp->blk_dva[d];
3818
3819		/*
3820		 * Keep track of how much data we've examined so that
3821		 * zpool(1M) status can make useful progress reports.
3822		 */
3823		scn->scn_phys.scn_examined += DVA_GET_ASIZE(dva);
3824		spa->spa_scan_pass_exam += DVA_GET_ASIZE(dva);
3825
3826		/* if it's a resilver, this may not be in the target range */
3827		if (!needs_io)
3828			needs_io = dsl_scan_need_resilver(spa, dva, psize,
3829			    phys_birth);
3830	}
3831
3832	if (needs_io && !zfs_no_scrub_io) {
3833		dsl_scan_enqueue(dp, bp, zio_flags, zb);
3834	} else {
3835		count_block(scn, dp->dp_blkstats, bp);
3836	}
3837
3838	/* do not relocate this block */
3839	return (0);
3840}
3841
3842static void
3843dsl_scan_scrub_done(zio_t *zio)
3844{
3845	spa_t *spa = zio->io_spa;
3846	blkptr_t *bp = zio->io_bp;
3847	dsl_scan_io_queue_t *queue = zio->io_private;
3848
3849	abd_free(zio->io_abd);
3850
3851	if (queue == NULL) {
3852		mutex_enter(&spa->spa_scrub_lock);
3853		ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
3854		spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
3855		cv_broadcast(&spa->spa_scrub_io_cv);
3856		mutex_exit(&spa->spa_scrub_lock);
3857	} else {
3858		mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock);
3859		ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp));
3860		queue->q_inflight_bytes -= BP_GET_PSIZE(bp);
3861		cv_broadcast(&queue->q_zio_cv);
3862		mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock);
3863	}
3864
3865	if (zio->io_error && (zio->io_error != ECKSUM ||
3866	    !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) {
3867		atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors);
3868	}
3869}
3870
3871/*
3872 * Given a scanning zio's information, executes the zio. The zio need
3873 * not necessarily be only sortable, this function simply executes the
3874 * zio, no matter what it is. The optional queue argument allows the
3875 * caller to specify that they want per top level vdev IO rate limiting
3876 * instead of the legacy global limiting.
3877 */
3878static void
3879scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
3880    const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue)
3881{
3882	spa_t *spa = dp->dp_spa;
3883	dsl_scan_t *scn = dp->dp_scan;
3884	size_t size = BP_GET_PSIZE(bp);
3885	abd_t *data = abd_alloc_for_io(size, B_FALSE);
3886
3887	if (queue == NULL) {
3888		mutex_enter(&spa->spa_scrub_lock);
3889		while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)
3890			cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
3891		spa->spa_scrub_inflight += BP_GET_PSIZE(bp);
3892		mutex_exit(&spa->spa_scrub_lock);
3893	} else {
3894		kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
3895
3896		mutex_enter(q_lock);
3897		while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes)
3898			cv_wait(&queue->q_zio_cv, q_lock);
3899		queue->q_inflight_bytes += BP_GET_PSIZE(bp);
3900		mutex_exit(q_lock);
3901	}
3902
3903	count_block(dp->dp_scan, dp->dp_blkstats, bp);
3904	zio_nowait(zio_read(dp->dp_scan->scn_zio_root, spa, bp, data, size,
3905	    dsl_scan_scrub_done, queue, ZIO_PRIORITY_SCRUB, zio_flags, zb));
3906}
3907
3908/*
3909 * This is the primary extent sorting algorithm. We balance two parameters:
3910 * 1) how many bytes of I/O are in an extent
3911 * 2) how well the extent is filled with I/O (as a fraction of its total size)
3912 * Since we allow extents to have gaps between their constituent I/Os, it's
3913 * possible to have a fairly large extent that contains the same amount of
3914 * I/O bytes than a much smaller extent, which just packs the I/O more tightly.
3915 * The algorithm sorts based on a score calculated from the extent's size,
3916 * the relative fill volume (in %) and a "fill weight" parameter that controls
3917 * the split between whether we prefer larger extents or more well populated
3918 * extents:
3919 *
3920 * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT)
3921 *
3922 * Example:
3923 * 1) assume extsz = 64 MiB
3924 * 2) assume fill = 32 MiB (extent is half full)
3925 * 3) assume fill_weight = 3
3926 * 4)	SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100
3927 *	SCORE = 32M + (50 * 3 * 32M) / 100
3928 *	SCORE = 32M + (4800M / 100)
3929 *	SCORE = 32M + 48M
3930 *		^	^
3931 *		|	+--- final total relative fill-based score
3932 *		+--------- final total fill-based score
3933 *	SCORE = 80M
3934 *
3935 * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
3936 * extents that are more completely filled (in a 3:2 ratio) vs just larger.
3937 * Note that as an optimization, we replace multiplication and division by
3938 * 100 with bitshifting by 7 (which effecitvely multiplies and divides by 128).
3939 */
3940static int
3941ext_size_compare(const void *x, const void *y)
3942{
3943	const range_seg_t *rsa = x, *rsb = y;
3944	uint64_t sa = rsa->rs_end - rsa->rs_start,
3945	    sb = rsb->rs_end - rsb->rs_start;
3946	uint64_t score_a, score_b;
3947
3948	score_a = rsa->rs_fill + ((((rsa->rs_fill << 7) / sa) *
3949	    fill_weight * rsa->rs_fill) >> 7);
3950	score_b = rsb->rs_fill + ((((rsb->rs_fill << 7) / sb) *
3951	    fill_weight * rsb->rs_fill) >> 7);
3952
3953	if (score_a > score_b)
3954		return (-1);
3955	if (score_a == score_b) {
3956		if (rsa->rs_start < rsb->rs_start)
3957			return (-1);
3958		if (rsa->rs_start == rsb->rs_start)
3959			return (0);
3960		return (1);
3961	}
3962	return (1);
3963}
3964
3965/*
3966 * Comparator for the q_sios_by_addr tree. Sorting is simply performed
3967 * based on LBA-order (from lowest to highest).
3968 */
3969static int
3970sio_addr_compare(const void *x, const void *y)
3971{
3972	const scan_io_t *a = x, *b = y;
3973
3974	return (AVL_CMP(SIO_GET_OFFSET(a), SIO_GET_OFFSET(b)));
3975}
3976
3977/* IO queues are created on demand when they are needed. */
3978static dsl_scan_io_queue_t *
3979scan_io_queue_create(vdev_t *vd)
3980{
3981	dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
3982	dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP);
3983
3984	q->q_scn = scn;
3985	q->q_vd = vd;
3986	q->q_sio_memused = 0;
3987	cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL);
3988	q->q_exts_by_addr = range_tree_create_impl(&rt_avl_ops,
3989	    &q->q_exts_by_size, ext_size_compare, zfs_scan_max_ext_gap);
3990	avl_create(&q->q_sios_by_addr, sio_addr_compare,
3991	    sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node));
3992
3993	return (q);
3994}
3995
3996/*
3997 * Destroys a scan queue and all segments and scan_io_t's contained in it.
3998 * No further execution of I/O occurs, anything pending in the queue is
3999 * simply freed without being executed.
4000 */
4001void
4002dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue)
4003{
4004	dsl_scan_t *scn = queue->q_scn;
4005	scan_io_t *sio;
4006	void *cookie = NULL;
4007	int64_t bytes_dequeued = 0;
4008
4009	ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
4010
4011	while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) !=
4012	    NULL) {
4013		ASSERT(range_tree_contains(queue->q_exts_by_addr,
4014		    SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio)));
4015		bytes_dequeued += SIO_GET_ASIZE(sio);
4016		queue->q_sio_memused -= SIO_GET_MUSED(sio);
4017		sio_free(sio);
4018	}
4019
4020	ASSERT0(queue->q_sio_memused);
4021	atomic_add_64(&scn->scn_bytes_pending, -bytes_dequeued);
4022	range_tree_vacate(queue->q_exts_by_addr, NULL, queue);
4023	range_tree_destroy(queue->q_exts_by_addr);
4024	avl_destroy(&queue->q_sios_by_addr);
4025	cv_destroy(&queue->q_zio_cv);
4026
4027	kmem_free(queue, sizeof (*queue));
4028}
4029
4030/*
4031 * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is
4032 * called on behalf of vdev_top_transfer when creating or destroying
4033 * a mirror vdev due to zpool attach/detach.
4034 */
4035void
4036dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd)
4037{
4038	mutex_enter(&svd->vdev_scan_io_queue_lock);
4039	mutex_enter(&tvd->vdev_scan_io_queue_lock);
4040
4041	VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL);
4042	tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue;
4043	svd->vdev_scan_io_queue = NULL;
4044	if (tvd->vdev_scan_io_queue != NULL)
4045		tvd->vdev_scan_io_queue->q_vd = tvd;
4046
4047	mutex_exit(&tvd->vdev_scan_io_queue_lock);
4048	mutex_exit(&svd->vdev_scan_io_queue_lock);
4049}
4050
4051static void
4052scan_io_queues_destroy(dsl_scan_t *scn)
4053{
4054	vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
4055
4056	for (uint64_t i = 0; i < rvd->vdev_children; i++) {
4057		vdev_t *tvd = rvd->vdev_child[i];
4058
4059		mutex_enter(&tvd->vdev_scan_io_queue_lock);
4060		if (tvd->vdev_scan_io_queue != NULL)
4061			dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue);
4062		tvd->vdev_scan_io_queue = NULL;
4063		mutex_exit(&tvd->vdev_scan_io_queue_lock);
4064	}
4065}
4066
4067static void
4068dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i)
4069{
4070	dsl_pool_t *dp = spa->spa_dsl_pool;
4071	dsl_scan_t *scn = dp->dp_scan;
4072	vdev_t *vdev;
4073	kmutex_t *q_lock;
4074	dsl_scan_io_queue_t *queue;
4075	scan_io_t *srch_sio, *sio;
4076	avl_index_t idx;
4077	uint64_t start, size;
4078
4079	vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i]));
4080	ASSERT(vdev != NULL);
4081	q_lock = &vdev->vdev_scan_io_queue_lock;
4082	queue = vdev->vdev_scan_io_queue;
4083
4084	mutex_enter(q_lock);
4085	if (queue == NULL) {
4086		mutex_exit(q_lock);
4087		return;
4088	}
4089
4090	srch_sio = sio_alloc(BP_GET_NDVAS(bp));
4091	bp2sio(bp, srch_sio, dva_i);
4092	start = SIO_GET_OFFSET(srch_sio);
4093	size = SIO_GET_ASIZE(srch_sio);
4094
4095	/*
4096	 * We can find the zio in two states:
4097	 * 1) Cold, just sitting in the queue of zio's to be issued at
4098	 *	some point in the future. In this case, all we do is
4099	 *	remove the zio from the q_sios_by_addr tree, decrement
4100	 *	its data volume from the containing range_seg_t and
4101	 *	resort the q_exts_by_size tree to reflect that the
4102	 *	range_seg_t has lost some of its 'fill'. We don't shorten
4103	 *	the range_seg_t - this is usually rare enough not to be
4104	 *	worth the extra hassle of trying keep track of precise
4105	 *	extent boundaries.
4106	 * 2) Hot, where the zio is currently in-flight in
4107	 *	dsl_scan_issue_ios. In this case, we can't simply
4108	 *	reach in and stop the in-flight zio's, so we instead
4109	 *	block the caller. Eventually, dsl_scan_issue_ios will
4110	 *	be done with issuing the zio's it gathered and will
4111	 *	signal us.
4112	 */
4113	sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
4114	sio_free(srch_sio);
4115
4116	if (sio != NULL) {
4117		int64_t asize = SIO_GET_ASIZE(sio);
4118		blkptr_t tmpbp;
4119
4120		/* Got it while it was cold in the queue */
4121		ASSERT3U(start, ==, SIO_GET_OFFSET(sio));
4122		ASSERT3U(size, ==, asize);
4123		avl_remove(&queue->q_sios_by_addr, sio);
4124		queue->q_sio_memused -= SIO_GET_MUSED(sio);
4125
4126		ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size));
4127		range_tree_remove_fill(queue->q_exts_by_addr, start, size);
4128
4129		/*
4130		 * We only update scn_bytes_pending in the cold path,
4131		 * otherwise it will already have been accounted for as
4132		 * part of the zio's execution.
4133		 */
4134		atomic_add_64(&scn->scn_bytes_pending, -asize);
4135
4136		/* count the block as though we issued it */
4137		sio2bp(sio, &tmpbp);
4138		count_block(scn, dp->dp_blkstats, &tmpbp);
4139
4140		sio_free(sio);
4141	}
4142	mutex_exit(q_lock);
4143}
4144
4145/*
4146 * Callback invoked when a zio_free() zio is executing. This needs to be
4147 * intercepted to prevent the zio from deallocating a particular portion
4148 * of disk space and it then getting reallocated and written to, while we
4149 * still have it queued up for processing.
4150 */
4151void
4152dsl_scan_freed(spa_t *spa, const blkptr_t *bp)
4153{
4154	dsl_pool_t *dp = spa->spa_dsl_pool;
4155	dsl_scan_t *scn = dp->dp_scan;
4156
4157	ASSERT(!BP_IS_EMBEDDED(bp));
4158	ASSERT(scn != NULL);
4159	if (!dsl_scan_is_running(scn))
4160		return;
4161
4162	for (int i = 0; i < BP_GET_NDVAS(bp); i++)
4163		dsl_scan_freed_dva(spa, bp, i);
4164}
4165