xref: /illumos-gate/usr/src/uts/common/fs/zfs/zio.c (revision 94c2d0eb22e9624151ee84a7edbf7178e1bf4087)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
24  * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
25  * Copyright (c) 2014 Integros [integros.com]
26  */
27 
28 #include <sys/sysmacros.h>
29 #include <sys/zfs_context.h>
30 #include <sys/fm/fs/zfs.h>
31 #include <sys/spa.h>
32 #include <sys/txg.h>
33 #include <sys/spa_impl.h>
34 #include <sys/vdev_impl.h>
35 #include <sys/zio_impl.h>
36 #include <sys/zio_compress.h>
37 #include <sys/zio_checksum.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/arc.h>
40 #include <sys/ddt.h>
41 #include <sys/blkptr.h>
42 #include <sys/zfeature.h>
43 #include <sys/metaslab_impl.h>
44 
45 /*
46  * ==========================================================================
47  * I/O type descriptions
48  * ==========================================================================
49  */
50 const char *zio_type_name[ZIO_TYPES] = {
51 	"zio_null", "zio_read", "zio_write", "zio_free", "zio_claim",
52 	"zio_ioctl"
53 };
54 
55 boolean_t zio_dva_throttle_enabled = B_TRUE;
56 
57 /*
58  * ==========================================================================
59  * I/O kmem caches
60  * ==========================================================================
61  */
62 kmem_cache_t *zio_cache;
63 kmem_cache_t *zio_link_cache;
64 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
65 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
66 
67 #ifdef _KERNEL
68 extern vmem_t *zio_alloc_arena;
69 #endif
70 
71 #define	ZIO_PIPELINE_CONTINUE		0x100
72 #define	ZIO_PIPELINE_STOP		0x101
73 
74 #define	BP_SPANB(indblkshift, level) \
75 	(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
76 #define	COMPARE_META_LEVEL	0x80000000ul
77 /*
78  * The following actions directly effect the spa's sync-to-convergence logic.
79  * The values below define the sync pass when we start performing the action.
80  * Care should be taken when changing these values as they directly impact
81  * spa_sync() performance. Tuning these values may introduce subtle performance
82  * pathologies and should only be done in the context of performance analysis.
83  * These tunables will eventually be removed and replaced with #defines once
84  * enough analysis has been done to determine optimal values.
85  *
86  * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
87  * regular blocks are not deferred.
88  */
89 int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */
90 int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */
91 int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */
92 
93 /*
94  * An allocating zio is one that either currently has the DVA allocate
95  * stage set or will have it later in its lifetime.
96  */
97 #define	IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
98 
99 boolean_t	zio_requeue_io_start_cut_in_line = B_TRUE;
100 
101 #ifdef ZFS_DEBUG
102 int zio_buf_debug_limit = 16384;
103 #else
104 int zio_buf_debug_limit = 0;
105 #endif
106 
107 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
108 
109 void
110 zio_init(void)
111 {
112 	size_t c;
113 	vmem_t *data_alloc_arena = NULL;
114 
115 #ifdef _KERNEL
116 	data_alloc_arena = zio_alloc_arena;
117 #endif
118 	zio_cache = kmem_cache_create("zio_cache",
119 	    sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
120 	zio_link_cache = kmem_cache_create("zio_link_cache",
121 	    sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
122 
123 	/*
124 	 * For small buffers, we want a cache for each multiple of
125 	 * SPA_MINBLOCKSIZE.  For larger buffers, we want a cache
126 	 * for each quarter-power of 2.
127 	 */
128 	for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
129 		size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
130 		size_t p2 = size;
131 		size_t align = 0;
132 		size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0;
133 
134 		while (!ISP2(p2))
135 			p2 &= p2 - 1;
136 
137 #ifndef _KERNEL
138 		/*
139 		 * If we are using watchpoints, put each buffer on its own page,
140 		 * to eliminate the performance overhead of trapping to the
141 		 * kernel when modifying a non-watched buffer that shares the
142 		 * page with a watched buffer.
143 		 */
144 		if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
145 			continue;
146 #endif
147 		if (size <= 4 * SPA_MINBLOCKSIZE) {
148 			align = SPA_MINBLOCKSIZE;
149 		} else if (IS_P2ALIGNED(size, p2 >> 2)) {
150 			align = MIN(p2 >> 2, PAGESIZE);
151 		}
152 
153 		if (align != 0) {
154 			char name[36];
155 			(void) sprintf(name, "zio_buf_%lu", (ulong_t)size);
156 			zio_buf_cache[c] = kmem_cache_create(name, size,
157 			    align, NULL, NULL, NULL, NULL, NULL, cflags);
158 
159 			/*
160 			 * Since zio_data bufs do not appear in crash dumps, we
161 			 * pass KMC_NOTOUCH so that no allocator metadata is
162 			 * stored with the buffers.
163 			 */
164 			(void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size);
165 			zio_data_buf_cache[c] = kmem_cache_create(name, size,
166 			    align, NULL, NULL, NULL, NULL, data_alloc_arena,
167 			    cflags | KMC_NOTOUCH);
168 		}
169 	}
170 
171 	while (--c != 0) {
172 		ASSERT(zio_buf_cache[c] != NULL);
173 		if (zio_buf_cache[c - 1] == NULL)
174 			zio_buf_cache[c - 1] = zio_buf_cache[c];
175 
176 		ASSERT(zio_data_buf_cache[c] != NULL);
177 		if (zio_data_buf_cache[c - 1] == NULL)
178 			zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
179 	}
180 
181 	zio_inject_init();
182 }
183 
184 void
185 zio_fini(void)
186 {
187 	size_t c;
188 	kmem_cache_t *last_cache = NULL;
189 	kmem_cache_t *last_data_cache = NULL;
190 
191 	for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
192 		if (zio_buf_cache[c] != last_cache) {
193 			last_cache = zio_buf_cache[c];
194 			kmem_cache_destroy(zio_buf_cache[c]);
195 		}
196 		zio_buf_cache[c] = NULL;
197 
198 		if (zio_data_buf_cache[c] != last_data_cache) {
199 			last_data_cache = zio_data_buf_cache[c];
200 			kmem_cache_destroy(zio_data_buf_cache[c]);
201 		}
202 		zio_data_buf_cache[c] = NULL;
203 	}
204 
205 	kmem_cache_destroy(zio_link_cache);
206 	kmem_cache_destroy(zio_cache);
207 
208 	zio_inject_fini();
209 }
210 
211 /*
212  * ==========================================================================
213  * Allocate and free I/O buffers
214  * ==========================================================================
215  */
216 
217 /*
218  * Use zio_buf_alloc to allocate ZFS metadata.  This data will appear in a
219  * crashdump if the kernel panics, so use it judiciously.  Obviously, it's
220  * useful to inspect ZFS metadata, but if possible, we should avoid keeping
221  * excess / transient data in-core during a crashdump.
222  */
223 void *
224 zio_buf_alloc(size_t size)
225 {
226 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
227 
228 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
229 
230 	return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
231 }
232 
233 /*
234  * Use zio_data_buf_alloc to allocate data.  The data will not appear in a
235  * crashdump if the kernel panics.  This exists so that we will limit the amount
236  * of ZFS data that shows up in a kernel crashdump.  (Thus reducing the amount
237  * of kernel heap dumped to disk when the kernel panics)
238  */
239 void *
240 zio_data_buf_alloc(size_t size)
241 {
242 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
243 
244 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
245 
246 	return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
247 }
248 
249 void
250 zio_buf_free(void *buf, size_t size)
251 {
252 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
253 
254 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
255 
256 	kmem_cache_free(zio_buf_cache[c], buf);
257 }
258 
259 void
260 zio_data_buf_free(void *buf, size_t size)
261 {
262 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
263 
264 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
265 
266 	kmem_cache_free(zio_data_buf_cache[c], buf);
267 }
268 
269 /*
270  * ==========================================================================
271  * Push and pop I/O transform buffers
272  * ==========================================================================
273  */
274 void
275 zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize,
276     zio_transform_func_t *transform)
277 {
278 	zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
279 
280 	zt->zt_orig_data = zio->io_data;
281 	zt->zt_orig_size = zio->io_size;
282 	zt->zt_bufsize = bufsize;
283 	zt->zt_transform = transform;
284 
285 	zt->zt_next = zio->io_transform_stack;
286 	zio->io_transform_stack = zt;
287 
288 	zio->io_data = data;
289 	zio->io_size = size;
290 }
291 
292 void
293 zio_pop_transforms(zio_t *zio)
294 {
295 	zio_transform_t *zt;
296 
297 	while ((zt = zio->io_transform_stack) != NULL) {
298 		if (zt->zt_transform != NULL)
299 			zt->zt_transform(zio,
300 			    zt->zt_orig_data, zt->zt_orig_size);
301 
302 		if (zt->zt_bufsize != 0)
303 			zio_buf_free(zio->io_data, zt->zt_bufsize);
304 
305 		zio->io_data = zt->zt_orig_data;
306 		zio->io_size = zt->zt_orig_size;
307 		zio->io_transform_stack = zt->zt_next;
308 
309 		kmem_free(zt, sizeof (zio_transform_t));
310 	}
311 }
312 
313 /*
314  * ==========================================================================
315  * I/O transform callbacks for subblocks and decompression
316  * ==========================================================================
317  */
318 static void
319 zio_subblock(zio_t *zio, void *data, uint64_t size)
320 {
321 	ASSERT(zio->io_size > size);
322 
323 	if (zio->io_type == ZIO_TYPE_READ)
324 		bcopy(zio->io_data, data, size);
325 }
326 
327 static void
328 zio_decompress(zio_t *zio, void *data, uint64_t size)
329 {
330 	if (zio->io_error == 0 &&
331 	    zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
332 	    zio->io_data, data, zio->io_size, size) != 0)
333 		zio->io_error = SET_ERROR(EIO);
334 }
335 
336 /*
337  * ==========================================================================
338  * I/O parent/child relationships and pipeline interlocks
339  * ==========================================================================
340  */
341 zio_t *
342 zio_walk_parents(zio_t *cio, zio_link_t **zl)
343 {
344 	list_t *pl = &cio->io_parent_list;
345 
346 	*zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
347 	if (*zl == NULL)
348 		return (NULL);
349 
350 	ASSERT((*zl)->zl_child == cio);
351 	return ((*zl)->zl_parent);
352 }
353 
354 zio_t *
355 zio_walk_children(zio_t *pio, zio_link_t **zl)
356 {
357 	list_t *cl = &pio->io_child_list;
358 
359 	*zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
360 	if (*zl == NULL)
361 		return (NULL);
362 
363 	ASSERT((*zl)->zl_parent == pio);
364 	return ((*zl)->zl_child);
365 }
366 
367 zio_t *
368 zio_unique_parent(zio_t *cio)
369 {
370 	zio_link_t *zl = NULL;
371 	zio_t *pio = zio_walk_parents(cio, &zl);
372 
373 	VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
374 	return (pio);
375 }
376 
377 void
378 zio_add_child(zio_t *pio, zio_t *cio)
379 {
380 	zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
381 
382 	/*
383 	 * Logical I/Os can have logical, gang, or vdev children.
384 	 * Gang I/Os can have gang or vdev children.
385 	 * Vdev I/Os can only have vdev children.
386 	 * The following ASSERT captures all of these constraints.
387 	 */
388 	ASSERT(cio->io_child_type <= pio->io_child_type);
389 
390 	zl->zl_parent = pio;
391 	zl->zl_child = cio;
392 
393 	mutex_enter(&cio->io_lock);
394 	mutex_enter(&pio->io_lock);
395 
396 	ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
397 
398 	for (int w = 0; w < ZIO_WAIT_TYPES; w++)
399 		pio->io_children[cio->io_child_type][w] += !cio->io_state[w];
400 
401 	list_insert_head(&pio->io_child_list, zl);
402 	list_insert_head(&cio->io_parent_list, zl);
403 
404 	pio->io_child_count++;
405 	cio->io_parent_count++;
406 
407 	mutex_exit(&pio->io_lock);
408 	mutex_exit(&cio->io_lock);
409 }
410 
411 static void
412 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
413 {
414 	ASSERT(zl->zl_parent == pio);
415 	ASSERT(zl->zl_child == cio);
416 
417 	mutex_enter(&cio->io_lock);
418 	mutex_enter(&pio->io_lock);
419 
420 	list_remove(&pio->io_child_list, zl);
421 	list_remove(&cio->io_parent_list, zl);
422 
423 	pio->io_child_count--;
424 	cio->io_parent_count--;
425 
426 	mutex_exit(&pio->io_lock);
427 	mutex_exit(&cio->io_lock);
428 
429 	kmem_cache_free(zio_link_cache, zl);
430 }
431 
432 static boolean_t
433 zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait)
434 {
435 	uint64_t *countp = &zio->io_children[child][wait];
436 	boolean_t waiting = B_FALSE;
437 
438 	mutex_enter(&zio->io_lock);
439 	ASSERT(zio->io_stall == NULL);
440 	if (*countp != 0) {
441 		zio->io_stage >>= 1;
442 		ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
443 		zio->io_stall = countp;
444 		waiting = B_TRUE;
445 	}
446 	mutex_exit(&zio->io_lock);
447 
448 	return (waiting);
449 }
450 
451 static void
452 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait)
453 {
454 	uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
455 	int *errorp = &pio->io_child_error[zio->io_child_type];
456 
457 	mutex_enter(&pio->io_lock);
458 	if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
459 		*errorp = zio_worst_error(*errorp, zio->io_error);
460 	pio->io_reexecute |= zio->io_reexecute;
461 	ASSERT3U(*countp, >, 0);
462 
463 	(*countp)--;
464 
465 	if (*countp == 0 && pio->io_stall == countp) {
466 		zio_taskq_type_t type =
467 		    pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
468 		    ZIO_TASKQ_INTERRUPT;
469 		pio->io_stall = NULL;
470 		mutex_exit(&pio->io_lock);
471 		/*
472 		 * Dispatch the parent zio in its own taskq so that
473 		 * the child can continue to make progress. This also
474 		 * prevents overflowing the stack when we have deeply nested
475 		 * parent-child relationships.
476 		 */
477 		zio_taskq_dispatch(pio, type, B_FALSE);
478 	} else {
479 		mutex_exit(&pio->io_lock);
480 	}
481 }
482 
483 static void
484 zio_inherit_child_errors(zio_t *zio, enum zio_child c)
485 {
486 	if (zio->io_child_error[c] != 0 && zio->io_error == 0)
487 		zio->io_error = zio->io_child_error[c];
488 }
489 
490 int
491 zio_bookmark_compare(const void *x1, const void *x2)
492 {
493 	const zio_t *z1 = x1;
494 	const zio_t *z2 = x2;
495 
496 	if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
497 		return (-1);
498 	if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
499 		return (1);
500 
501 	if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
502 		return (-1);
503 	if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
504 		return (1);
505 
506 	if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
507 		return (-1);
508 	if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
509 		return (1);
510 
511 	if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
512 		return (-1);
513 	if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
514 		return (1);
515 
516 	if (z1 < z2)
517 		return (-1);
518 	if (z1 > z2)
519 		return (1);
520 
521 	return (0);
522 }
523 
524 /*
525  * ==========================================================================
526  * Create the various types of I/O (read, write, free, etc)
527  * ==========================================================================
528  */
529 static zio_t *
530 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
531     void *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
532     void *private, zio_type_t type, zio_priority_t priority,
533     enum zio_flag flags, vdev_t *vd, uint64_t offset,
534     const zbookmark_phys_t *zb, enum zio_stage stage, enum zio_stage pipeline)
535 {
536 	zio_t *zio;
537 
538 	ASSERT3U(psize, <=, SPA_MAXBLOCKSIZE);
539 	ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
540 	ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
541 
542 	ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
543 	ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
544 	ASSERT(vd || stage == ZIO_STAGE_OPEN);
545 
546 	IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW) != 0);
547 
548 	zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
549 	bzero(zio, sizeof (zio_t));
550 
551 	mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL);
552 	cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
553 
554 	list_create(&zio->io_parent_list, sizeof (zio_link_t),
555 	    offsetof(zio_link_t, zl_parent_node));
556 	list_create(&zio->io_child_list, sizeof (zio_link_t),
557 	    offsetof(zio_link_t, zl_child_node));
558 	metaslab_trace_init(&zio->io_alloc_list);
559 
560 	if (vd != NULL)
561 		zio->io_child_type = ZIO_CHILD_VDEV;
562 	else if (flags & ZIO_FLAG_GANG_CHILD)
563 		zio->io_child_type = ZIO_CHILD_GANG;
564 	else if (flags & ZIO_FLAG_DDT_CHILD)
565 		zio->io_child_type = ZIO_CHILD_DDT;
566 	else
567 		zio->io_child_type = ZIO_CHILD_LOGICAL;
568 
569 	if (bp != NULL) {
570 		zio->io_bp = (blkptr_t *)bp;
571 		zio->io_bp_copy = *bp;
572 		zio->io_bp_orig = *bp;
573 		if (type != ZIO_TYPE_WRITE ||
574 		    zio->io_child_type == ZIO_CHILD_DDT)
575 			zio->io_bp = &zio->io_bp_copy;	/* so caller can free */
576 		if (zio->io_child_type == ZIO_CHILD_LOGICAL)
577 			zio->io_logical = zio;
578 		if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
579 			pipeline |= ZIO_GANG_STAGES;
580 	}
581 
582 	zio->io_spa = spa;
583 	zio->io_txg = txg;
584 	zio->io_done = done;
585 	zio->io_private = private;
586 	zio->io_type = type;
587 	zio->io_priority = priority;
588 	zio->io_vd = vd;
589 	zio->io_offset = offset;
590 	zio->io_orig_data = zio->io_data = data;
591 	zio->io_orig_size = zio->io_size = psize;
592 	zio->io_lsize = lsize;
593 	zio->io_orig_flags = zio->io_flags = flags;
594 	zio->io_orig_stage = zio->io_stage = stage;
595 	zio->io_orig_pipeline = zio->io_pipeline = pipeline;
596 	zio->io_pipeline_trace = ZIO_STAGE_OPEN;
597 
598 	zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY);
599 	zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
600 
601 	if (zb != NULL)
602 		zio->io_bookmark = *zb;
603 
604 	if (pio != NULL) {
605 		if (zio->io_logical == NULL)
606 			zio->io_logical = pio->io_logical;
607 		if (zio->io_child_type == ZIO_CHILD_GANG)
608 			zio->io_gang_leader = pio->io_gang_leader;
609 		zio_add_child(pio, zio);
610 	}
611 
612 	return (zio);
613 }
614 
615 static void
616 zio_destroy(zio_t *zio)
617 {
618 	metaslab_trace_fini(&zio->io_alloc_list);
619 	list_destroy(&zio->io_parent_list);
620 	list_destroy(&zio->io_child_list);
621 	mutex_destroy(&zio->io_lock);
622 	cv_destroy(&zio->io_cv);
623 	kmem_cache_free(zio_cache, zio);
624 }
625 
626 zio_t *
627 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
628     void *private, enum zio_flag flags)
629 {
630 	zio_t *zio;
631 
632 	zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
633 	    ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
634 	    ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
635 
636 	return (zio);
637 }
638 
639 zio_t *
640 zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags)
641 {
642 	return (zio_null(NULL, spa, NULL, done, private, flags));
643 }
644 
645 void
646 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp)
647 {
648 	if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
649 		zfs_panic_recover("blkptr at %p has invalid TYPE %llu",
650 		    bp, (longlong_t)BP_GET_TYPE(bp));
651 	}
652 	if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS ||
653 	    BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) {
654 		zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu",
655 		    bp, (longlong_t)BP_GET_CHECKSUM(bp));
656 	}
657 	if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS ||
658 	    BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) {
659 		zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu",
660 		    bp, (longlong_t)BP_GET_COMPRESS(bp));
661 	}
662 	if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
663 		zfs_panic_recover("blkptr at %p has invalid LSIZE %llu",
664 		    bp, (longlong_t)BP_GET_LSIZE(bp));
665 	}
666 	if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
667 		zfs_panic_recover("blkptr at %p has invalid PSIZE %llu",
668 		    bp, (longlong_t)BP_GET_PSIZE(bp));
669 	}
670 
671 	if (BP_IS_EMBEDDED(bp)) {
672 		if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) {
673 			zfs_panic_recover("blkptr at %p has invalid ETYPE %llu",
674 			    bp, (longlong_t)BPE_GET_ETYPE(bp));
675 		}
676 	}
677 
678 	/*
679 	 * Pool-specific checks.
680 	 *
681 	 * Note: it would be nice to verify that the blk_birth and
682 	 * BP_PHYSICAL_BIRTH() are not too large.  However, spa_freeze()
683 	 * allows the birth time of log blocks (and dmu_sync()-ed blocks
684 	 * that are in the log) to be arbitrarily large.
685 	 */
686 	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
687 		uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]);
688 		if (vdevid >= spa->spa_root_vdev->vdev_children) {
689 			zfs_panic_recover("blkptr at %p DVA %u has invalid "
690 			    "VDEV %llu",
691 			    bp, i, (longlong_t)vdevid);
692 			continue;
693 		}
694 		vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
695 		if (vd == NULL) {
696 			zfs_panic_recover("blkptr at %p DVA %u has invalid "
697 			    "VDEV %llu",
698 			    bp, i, (longlong_t)vdevid);
699 			continue;
700 		}
701 		if (vd->vdev_ops == &vdev_hole_ops) {
702 			zfs_panic_recover("blkptr at %p DVA %u has hole "
703 			    "VDEV %llu",
704 			    bp, i, (longlong_t)vdevid);
705 			continue;
706 		}
707 		if (vd->vdev_ops == &vdev_missing_ops) {
708 			/*
709 			 * "missing" vdevs are valid during import, but we
710 			 * don't have their detailed info (e.g. asize), so
711 			 * we can't perform any more checks on them.
712 			 */
713 			continue;
714 		}
715 		uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
716 		uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]);
717 		if (BP_IS_GANG(bp))
718 			asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
719 		if (offset + asize > vd->vdev_asize) {
720 			zfs_panic_recover("blkptr at %p DVA %u has invalid "
721 			    "OFFSET %llu",
722 			    bp, i, (longlong_t)offset);
723 		}
724 	}
725 }
726 
727 zio_t *
728 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
729     void *data, uint64_t size, zio_done_func_t *done, void *private,
730     zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb)
731 {
732 	zio_t *zio;
733 
734 	zfs_blkptr_verify(spa, bp);
735 
736 	zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
737 	    data, size, size, done, private,
738 	    ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
739 	    ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
740 	    ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
741 
742 	return (zio);
743 }
744 
745 zio_t *
746 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
747     void *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
748     zio_done_func_t *ready, zio_done_func_t *children_ready,
749     zio_done_func_t *physdone, zio_done_func_t *done,
750     void *private, zio_priority_t priority, enum zio_flag flags,
751     const zbookmark_phys_t *zb)
752 {
753 	zio_t *zio;
754 
755 	ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
756 	    zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
757 	    zp->zp_compress >= ZIO_COMPRESS_OFF &&
758 	    zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
759 	    DMU_OT_IS_VALID(zp->zp_type) &&
760 	    zp->zp_level < 32 &&
761 	    zp->zp_copies > 0 &&
762 	    zp->zp_copies <= spa_max_replication(spa));
763 
764 	zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
765 	    ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
766 	    ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
767 	    ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
768 
769 	zio->io_ready = ready;
770 	zio->io_children_ready = children_ready;
771 	zio->io_physdone = physdone;
772 	zio->io_prop = *zp;
773 
774 	/*
775 	 * Data can be NULL if we are going to call zio_write_override() to
776 	 * provide the already-allocated BP.  But we may need the data to
777 	 * verify a dedup hit (if requested).  In this case, don't try to
778 	 * dedup (just take the already-allocated BP verbatim).
779 	 */
780 	if (data == NULL && zio->io_prop.zp_dedup_verify) {
781 		zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
782 	}
783 
784 	return (zio);
785 }
786 
787 zio_t *
788 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data,
789     uint64_t size, zio_done_func_t *done, void *private,
790     zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb)
791 {
792 	zio_t *zio;
793 
794 	zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
795 	    ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
796 	    ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
797 
798 	return (zio);
799 }
800 
801 void
802 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite)
803 {
804 	ASSERT(zio->io_type == ZIO_TYPE_WRITE);
805 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
806 	ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
807 	ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
808 
809 	/*
810 	 * We must reset the io_prop to match the values that existed
811 	 * when the bp was first written by dmu_sync() keeping in mind
812 	 * that nopwrite and dedup are mutually exclusive.
813 	 */
814 	zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
815 	zio->io_prop.zp_nopwrite = nopwrite;
816 	zio->io_prop.zp_copies = copies;
817 	zio->io_bp_override = bp;
818 }
819 
820 void
821 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
822 {
823 
824 	/*
825 	 * The check for EMBEDDED is a performance optimization.  We
826 	 * process the free here (by ignoring it) rather than
827 	 * putting it on the list and then processing it in zio_free_sync().
828 	 */
829 	if (BP_IS_EMBEDDED(bp))
830 		return;
831 	metaslab_check_free(spa, bp);
832 
833 	/*
834 	 * Frees that are for the currently-syncing txg, are not going to be
835 	 * deferred, and which will not need to do a read (i.e. not GANG or
836 	 * DEDUP), can be processed immediately.  Otherwise, put them on the
837 	 * in-memory list for later processing.
838 	 */
839 	if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp) ||
840 	    txg != spa->spa_syncing_txg ||
841 	    spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) {
842 		bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
843 	} else {
844 		VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0)));
845 	}
846 }
847 
848 zio_t *
849 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
850     enum zio_flag flags)
851 {
852 	zio_t *zio;
853 	enum zio_stage stage = ZIO_FREE_PIPELINE;
854 
855 	ASSERT(!BP_IS_HOLE(bp));
856 	ASSERT(spa_syncing_txg(spa) == txg);
857 	ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free);
858 
859 	if (BP_IS_EMBEDDED(bp))
860 		return (zio_null(pio, spa, NULL, NULL, NULL, 0));
861 
862 	metaslab_check_free(spa, bp);
863 	arc_freed(spa, bp);
864 
865 	/*
866 	 * GANG and DEDUP blocks can induce a read (for the gang block header,
867 	 * or the DDT), so issue them asynchronously so that this thread is
868 	 * not tied up.
869 	 */
870 	if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp))
871 		stage |= ZIO_STAGE_ISSUE_ASYNC;
872 
873 	zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
874 	    BP_GET_PSIZE(bp), NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
875 	    flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage);
876 
877 	return (zio);
878 }
879 
880 zio_t *
881 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
882     zio_done_func_t *done, void *private, enum zio_flag flags)
883 {
884 	zio_t *zio;
885 
886 	dprintf_bp(bp, "claiming in txg %llu", txg);
887 
888 	if (BP_IS_EMBEDDED(bp))
889 		return (zio_null(pio, spa, NULL, NULL, NULL, 0));
890 
891 	/*
892 	 * A claim is an allocation of a specific block.  Claims are needed
893 	 * to support immediate writes in the intent log.  The issue is that
894 	 * immediate writes contain committed data, but in a txg that was
895 	 * *not* committed.  Upon opening the pool after an unclean shutdown,
896 	 * the intent log claims all blocks that contain immediate write data
897 	 * so that the SPA knows they're in use.
898 	 *
899 	 * All claims *must* be resolved in the first txg -- before the SPA
900 	 * starts allocating blocks -- so that nothing is allocated twice.
901 	 * If txg == 0 we just verify that the block is claimable.
902 	 */
903 	ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa));
904 	ASSERT(txg == spa_first_txg(spa) || txg == 0);
905 	ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa));	/* zdb(1M) */
906 
907 	zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
908 	    BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
909 	    flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
910 	ASSERT0(zio->io_queued_timestamp);
911 
912 	return (zio);
913 }
914 
915 zio_t *
916 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
917     zio_done_func_t *done, void *private, enum zio_flag flags)
918 {
919 	zio_t *zio;
920 	int c;
921 
922 	if (vd->vdev_children == 0) {
923 		zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
924 		    ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
925 		    ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
926 
927 		zio->io_cmd = cmd;
928 	} else {
929 		zio = zio_null(pio, spa, NULL, NULL, NULL, flags);
930 
931 		for (c = 0; c < vd->vdev_children; c++)
932 			zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd,
933 			    done, private, flags));
934 	}
935 
936 	return (zio);
937 }
938 
939 zio_t *
940 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
941     void *data, int checksum, zio_done_func_t *done, void *private,
942     zio_priority_t priority, enum zio_flag flags, boolean_t labels)
943 {
944 	zio_t *zio;
945 
946 	ASSERT(vd->vdev_children == 0);
947 	ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
948 	    offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
949 	ASSERT3U(offset + size, <=, vd->vdev_psize);
950 
951 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
952 	    private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
953 	    offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
954 
955 	zio->io_prop.zp_checksum = checksum;
956 
957 	return (zio);
958 }
959 
960 zio_t *
961 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
962     void *data, int checksum, zio_done_func_t *done, void *private,
963     zio_priority_t priority, enum zio_flag flags, boolean_t labels)
964 {
965 	zio_t *zio;
966 
967 	ASSERT(vd->vdev_children == 0);
968 	ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
969 	    offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
970 	ASSERT3U(offset + size, <=, vd->vdev_psize);
971 
972 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
973 	    private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
974 	    offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
975 
976 	zio->io_prop.zp_checksum = checksum;
977 
978 	if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
979 		/*
980 		 * zec checksums are necessarily destructive -- they modify
981 		 * the end of the write buffer to hold the verifier/checksum.
982 		 * Therefore, we must make a local copy in case the data is
983 		 * being written to multiple places in parallel.
984 		 */
985 		void *wbuf = zio_buf_alloc(size);
986 		bcopy(data, wbuf, size);
987 		zio_push_transform(zio, wbuf, size, size, NULL);
988 	}
989 
990 	return (zio);
991 }
992 
993 /*
994  * Create a child I/O to do some work for us.
995  */
996 zio_t *
997 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
998     void *data, uint64_t size, int type, zio_priority_t priority,
999     enum zio_flag flags, zio_done_func_t *done, void *private)
1000 {
1001 	enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
1002 	zio_t *zio;
1003 
1004 	ASSERT(vd->vdev_parent ==
1005 	    (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev));
1006 
1007 	if (type == ZIO_TYPE_READ && bp != NULL) {
1008 		/*
1009 		 * If we have the bp, then the child should perform the
1010 		 * checksum and the parent need not.  This pushes error
1011 		 * detection as close to the leaves as possible and
1012 		 * eliminates redundant checksums in the interior nodes.
1013 		 */
1014 		pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
1015 		pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
1016 	}
1017 
1018 	if (vd->vdev_children == 0)
1019 		offset += VDEV_LABEL_START_SIZE;
1020 
1021 	flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE;
1022 
1023 	/*
1024 	 * If we've decided to do a repair, the write is not speculative --
1025 	 * even if the original read was.
1026 	 */
1027 	if (flags & ZIO_FLAG_IO_REPAIR)
1028 		flags &= ~ZIO_FLAG_SPECULATIVE;
1029 
1030 	/*
1031 	 * If we're creating a child I/O that is not associated with a
1032 	 * top-level vdev, then the child zio is not an allocating I/O.
1033 	 * If this is a retried I/O then we ignore it since we will
1034 	 * have already processed the original allocating I/O.
1035 	 */
1036 	if (flags & ZIO_FLAG_IO_ALLOCATING &&
1037 	    (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
1038 		metaslab_class_t *mc = spa_normal_class(pio->io_spa);
1039 
1040 		ASSERT(mc->mc_alloc_throttle_enabled);
1041 		ASSERT(type == ZIO_TYPE_WRITE);
1042 		ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
1043 		ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
1044 		ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
1045 		    pio->io_child_type == ZIO_CHILD_GANG);
1046 
1047 		flags &= ~ZIO_FLAG_IO_ALLOCATING;
1048 	}
1049 
1050 	zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
1051 	    done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
1052 	    ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
1053 	ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
1054 
1055 	zio->io_physdone = pio->io_physdone;
1056 	if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL)
1057 		zio->io_logical->io_phys_children++;
1058 
1059 	return (zio);
1060 }
1061 
1062 zio_t *
1063 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size,
1064     int type, zio_priority_t priority, enum zio_flag flags,
1065     zio_done_func_t *done, void *private)
1066 {
1067 	zio_t *zio;
1068 
1069 	ASSERT(vd->vdev_ops->vdev_op_leaf);
1070 
1071 	zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
1072 	    data, size, size, done, private, type, priority,
1073 	    flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
1074 	    vd, offset, NULL,
1075 	    ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
1076 
1077 	return (zio);
1078 }
1079 
1080 void
1081 zio_flush(zio_t *zio, vdev_t *vd)
1082 {
1083 	zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE,
1084 	    NULL, NULL,
1085 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY));
1086 }
1087 
1088 void
1089 zio_shrink(zio_t *zio, uint64_t size)
1090 {
1091 	ASSERT(zio->io_executor == NULL);
1092 	ASSERT(zio->io_orig_size == zio->io_size);
1093 	ASSERT(size <= zio->io_size);
1094 
1095 	/*
1096 	 * We don't shrink for raidz because of problems with the
1097 	 * reconstruction when reading back less than the block size.
1098 	 * Note, BP_IS_RAIDZ() assumes no compression.
1099 	 */
1100 	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
1101 	if (!BP_IS_RAIDZ(zio->io_bp)) {
1102 		/* we are not doing a raw write */
1103 		ASSERT3U(zio->io_size, ==, zio->io_lsize);
1104 		zio->io_orig_size = zio->io_size = zio->io_lsize = size;
1105 	}
1106 }
1107 
1108 /*
1109  * ==========================================================================
1110  * Prepare to read and write logical blocks
1111  * ==========================================================================
1112  */
1113 
1114 static int
1115 zio_read_bp_init(zio_t *zio)
1116 {
1117 	blkptr_t *bp = zio->io_bp;
1118 
1119 	if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
1120 	    zio->io_child_type == ZIO_CHILD_LOGICAL &&
1121 	    !(zio->io_flags & ZIO_FLAG_RAW)) {
1122 		uint64_t psize =
1123 		    BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
1124 		void *cbuf = zio_buf_alloc(psize);
1125 
1126 		zio_push_transform(zio, cbuf, psize, psize, zio_decompress);
1127 	}
1128 
1129 	if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
1130 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1131 		decode_embedded_bp_compressed(bp, zio->io_data);
1132 	} else {
1133 		ASSERT(!BP_IS_EMBEDDED(bp));
1134 	}
1135 
1136 	if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0)
1137 		zio->io_flags |= ZIO_FLAG_DONT_CACHE;
1138 
1139 	if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP)
1140 		zio->io_flags |= ZIO_FLAG_DONT_CACHE;
1141 
1142 	if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
1143 		zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
1144 
1145 	return (ZIO_PIPELINE_CONTINUE);
1146 }
1147 
1148 static int
1149 zio_write_bp_init(zio_t *zio)
1150 {
1151 	if (!IO_IS_ALLOCATING(zio))
1152 		return (ZIO_PIPELINE_CONTINUE);
1153 
1154 	ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1155 
1156 	if (zio->io_bp_override) {
1157 		blkptr_t *bp = zio->io_bp;
1158 		zio_prop_t *zp = &zio->io_prop;
1159 
1160 		ASSERT(bp->blk_birth != zio->io_txg);
1161 		ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0);
1162 
1163 		*bp = *zio->io_bp_override;
1164 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1165 
1166 		if (BP_IS_EMBEDDED(bp))
1167 			return (ZIO_PIPELINE_CONTINUE);
1168 
1169 		/*
1170 		 * If we've been overridden and nopwrite is set then
1171 		 * set the flag accordingly to indicate that a nopwrite
1172 		 * has already occurred.
1173 		 */
1174 		if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
1175 			ASSERT(!zp->zp_dedup);
1176 			ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
1177 			zio->io_flags |= ZIO_FLAG_NOPWRITE;
1178 			return (ZIO_PIPELINE_CONTINUE);
1179 		}
1180 
1181 		ASSERT(!zp->zp_nopwrite);
1182 
1183 		if (BP_IS_HOLE(bp) || !zp->zp_dedup)
1184 			return (ZIO_PIPELINE_CONTINUE);
1185 
1186 		ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
1187 		    ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
1188 
1189 		if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) {
1190 			BP_SET_DEDUP(bp, 1);
1191 			zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
1192 			return (ZIO_PIPELINE_CONTINUE);
1193 		}
1194 
1195 		/*
1196 		 * We were unable to handle this as an override bp, treat
1197 		 * it as a regular write I/O.
1198 		 */
1199 		zio->io_bp_override = NULL;
1200 		*bp = zio->io_bp_orig;
1201 		zio->io_pipeline = zio->io_orig_pipeline;
1202 	}
1203 
1204 	return (ZIO_PIPELINE_CONTINUE);
1205 }
1206 
1207 static int
1208 zio_write_compress(zio_t *zio)
1209 {
1210 	spa_t *spa = zio->io_spa;
1211 	zio_prop_t *zp = &zio->io_prop;
1212 	enum zio_compress compress = zp->zp_compress;
1213 	blkptr_t *bp = zio->io_bp;
1214 	uint64_t lsize = zio->io_lsize;
1215 	uint64_t psize = zio->io_size;
1216 	int pass = 1;
1217 
1218 	EQUIV(lsize != psize, (zio->io_flags & ZIO_FLAG_RAW) != 0);
1219 
1220 	/*
1221 	 * If our children haven't all reached the ready stage,
1222 	 * wait for them and then repeat this pipeline stage.
1223 	 */
1224 	if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) ||
1225 	    zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY))
1226 		return (ZIO_PIPELINE_STOP);
1227 
1228 	if (!IO_IS_ALLOCATING(zio))
1229 		return (ZIO_PIPELINE_CONTINUE);
1230 
1231 	if (zio->io_children_ready != NULL) {
1232 		/*
1233 		 * Now that all our children are ready, run the callback
1234 		 * associated with this zio in case it wants to modify the
1235 		 * data to be written.
1236 		 */
1237 		ASSERT3U(zp->zp_level, >, 0);
1238 		zio->io_children_ready(zio);
1239 	}
1240 
1241 	ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1242 	ASSERT(zio->io_bp_override == NULL);
1243 
1244 	if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) {
1245 		/*
1246 		 * We're rewriting an existing block, which means we're
1247 		 * working on behalf of spa_sync().  For spa_sync() to
1248 		 * converge, it must eventually be the case that we don't
1249 		 * have to allocate new blocks.  But compression changes
1250 		 * the blocksize, which forces a reallocate, and makes
1251 		 * convergence take longer.  Therefore, after the first
1252 		 * few passes, stop compressing to ensure convergence.
1253 		 */
1254 		pass = spa_sync_pass(spa);
1255 
1256 		ASSERT(zio->io_txg == spa_syncing_txg(spa));
1257 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1258 		ASSERT(!BP_GET_DEDUP(bp));
1259 
1260 		if (pass >= zfs_sync_pass_dont_compress)
1261 			compress = ZIO_COMPRESS_OFF;
1262 
1263 		/* Make sure someone doesn't change their mind on overwrites */
1264 		ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp),
1265 		    spa_max_replication(spa)) == BP_GET_NDVAS(bp));
1266 	}
1267 
1268 	/* If it's a compressed write that is not raw, compress the buffer. */
1269 	if (compress != ZIO_COMPRESS_OFF && psize == lsize) {
1270 		void *cbuf = zio_buf_alloc(lsize);
1271 		psize = zio_compress_data(compress, zio->io_data, cbuf, lsize);
1272 		if (psize == 0 || psize == lsize) {
1273 			compress = ZIO_COMPRESS_OFF;
1274 			zio_buf_free(cbuf, lsize);
1275 		} else if (!zp->zp_dedup && psize <= BPE_PAYLOAD_SIZE &&
1276 		    zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
1277 		    spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
1278 			encode_embedded_bp_compressed(bp,
1279 			    cbuf, compress, lsize, psize);
1280 			BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
1281 			BP_SET_TYPE(bp, zio->io_prop.zp_type);
1282 			BP_SET_LEVEL(bp, zio->io_prop.zp_level);
1283 			zio_buf_free(cbuf, lsize);
1284 			bp->blk_birth = zio->io_txg;
1285 			zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1286 			ASSERT(spa_feature_is_active(spa,
1287 			    SPA_FEATURE_EMBEDDED_DATA));
1288 			return (ZIO_PIPELINE_CONTINUE);
1289 		} else {
1290 			/*
1291 			 * Round up compressed size up to the ashift
1292 			 * of the smallest-ashift device, and zero the tail.
1293 			 * This ensures that the compressed size of the BP
1294 			 * (and thus compressratio property) are correct,
1295 			 * in that we charge for the padding used to fill out
1296 			 * the last sector.
1297 			 */
1298 			ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
1299 			size_t rounded = (size_t)P2ROUNDUP(psize,
1300 			    1ULL << spa->spa_min_ashift);
1301 			if (rounded >= lsize) {
1302 				compress = ZIO_COMPRESS_OFF;
1303 				zio_buf_free(cbuf, lsize);
1304 				psize = lsize;
1305 			} else {
1306 				bzero((char *)cbuf + psize, rounded - psize);
1307 				psize = rounded;
1308 				zio_push_transform(zio, cbuf,
1309 				    psize, lsize, NULL);
1310 			}
1311 		}
1312 
1313 		/*
1314 		 * We were unable to handle this as an override bp, treat
1315 		 * it as a regular write I/O.
1316 		 */
1317 		zio->io_bp_override = NULL;
1318 		*bp = zio->io_bp_orig;
1319 		zio->io_pipeline = zio->io_orig_pipeline;
1320 	} else {
1321 		ASSERT3U(psize, !=, 0);
1322 	}
1323 
1324 	/*
1325 	 * The final pass of spa_sync() must be all rewrites, but the first
1326 	 * few passes offer a trade-off: allocating blocks defers convergence,
1327 	 * but newly allocated blocks are sequential, so they can be written
1328 	 * to disk faster.  Therefore, we allow the first few passes of
1329 	 * spa_sync() to allocate new blocks, but force rewrites after that.
1330 	 * There should only be a handful of blocks after pass 1 in any case.
1331 	 */
1332 	if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg &&
1333 	    BP_GET_PSIZE(bp) == psize &&
1334 	    pass >= zfs_sync_pass_rewrite) {
1335 		ASSERT(psize != 0);
1336 		enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
1337 		zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
1338 		zio->io_flags |= ZIO_FLAG_IO_REWRITE;
1339 	} else {
1340 		BP_ZERO(bp);
1341 		zio->io_pipeline = ZIO_WRITE_PIPELINE;
1342 	}
1343 
1344 	if (psize == 0) {
1345 		if (zio->io_bp_orig.blk_birth != 0 &&
1346 		    spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
1347 			BP_SET_LSIZE(bp, lsize);
1348 			BP_SET_TYPE(bp, zp->zp_type);
1349 			BP_SET_LEVEL(bp, zp->zp_level);
1350 			BP_SET_BIRTH(bp, zio->io_txg, 0);
1351 		}
1352 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1353 	} else {
1354 		ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
1355 		BP_SET_LSIZE(bp, lsize);
1356 		BP_SET_TYPE(bp, zp->zp_type);
1357 		BP_SET_LEVEL(bp, zp->zp_level);
1358 		BP_SET_PSIZE(bp, psize);
1359 		BP_SET_COMPRESS(bp, compress);
1360 		BP_SET_CHECKSUM(bp, zp->zp_checksum);
1361 		BP_SET_DEDUP(bp, zp->zp_dedup);
1362 		BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
1363 		if (zp->zp_dedup) {
1364 			ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1365 			ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1366 			zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
1367 		}
1368 		if (zp->zp_nopwrite) {
1369 			ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1370 			ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1371 			zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
1372 		}
1373 	}
1374 	return (ZIO_PIPELINE_CONTINUE);
1375 }
1376 
1377 static int
1378 zio_free_bp_init(zio_t *zio)
1379 {
1380 	blkptr_t *bp = zio->io_bp;
1381 
1382 	if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
1383 		if (BP_GET_DEDUP(bp))
1384 			zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
1385 	}
1386 
1387 	return (ZIO_PIPELINE_CONTINUE);
1388 }
1389 
1390 /*
1391  * ==========================================================================
1392  * Execute the I/O pipeline
1393  * ==========================================================================
1394  */
1395 
1396 static void
1397 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
1398 {
1399 	spa_t *spa = zio->io_spa;
1400 	zio_type_t t = zio->io_type;
1401 	int flags = (cutinline ? TQ_FRONT : 0);
1402 
1403 	/*
1404 	 * If we're a config writer or a probe, the normal issue and
1405 	 * interrupt threads may all be blocked waiting for the config lock.
1406 	 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
1407 	 */
1408 	if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
1409 		t = ZIO_TYPE_NULL;
1410 
1411 	/*
1412 	 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
1413 	 */
1414 	if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
1415 		t = ZIO_TYPE_NULL;
1416 
1417 	/*
1418 	 * If this is a high priority I/O, then use the high priority taskq if
1419 	 * available.
1420 	 */
1421 	if (zio->io_priority == ZIO_PRIORITY_NOW &&
1422 	    spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
1423 		q++;
1424 
1425 	ASSERT3U(q, <, ZIO_TASKQ_TYPES);
1426 
1427 	/*
1428 	 * NB: We are assuming that the zio can only be dispatched
1429 	 * to a single taskq at a time.  It would be a grievous error
1430 	 * to dispatch the zio to another taskq at the same time.
1431 	 */
1432 	ASSERT(zio->io_tqent.tqent_next == NULL);
1433 	spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio,
1434 	    flags, &zio->io_tqent);
1435 }
1436 
1437 static boolean_t
1438 zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
1439 {
1440 	kthread_t *executor = zio->io_executor;
1441 	spa_t *spa = zio->io_spa;
1442 
1443 	for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
1444 		spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1445 		uint_t i;
1446 		for (i = 0; i < tqs->stqs_count; i++) {
1447 			if (taskq_member(tqs->stqs_taskq[i], executor))
1448 				return (B_TRUE);
1449 		}
1450 	}
1451 
1452 	return (B_FALSE);
1453 }
1454 
1455 static int
1456 zio_issue_async(zio_t *zio)
1457 {
1458 	zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
1459 
1460 	return (ZIO_PIPELINE_STOP);
1461 }
1462 
1463 void
1464 zio_interrupt(zio_t *zio)
1465 {
1466 	zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
1467 }
1468 
1469 void
1470 zio_delay_interrupt(zio_t *zio)
1471 {
1472 	/*
1473 	 * The timeout_generic() function isn't defined in userspace, so
1474 	 * rather than trying to implement the function, the zio delay
1475 	 * functionality has been disabled for userspace builds.
1476 	 */
1477 
1478 #ifdef _KERNEL
1479 	/*
1480 	 * If io_target_timestamp is zero, then no delay has been registered
1481 	 * for this IO, thus jump to the end of this function and "skip" the
1482 	 * delay; issuing it directly to the zio layer.
1483 	 */
1484 	if (zio->io_target_timestamp != 0) {
1485 		hrtime_t now = gethrtime();
1486 
1487 		if (now >= zio->io_target_timestamp) {
1488 			/*
1489 			 * This IO has already taken longer than the target
1490 			 * delay to complete, so we don't want to delay it
1491 			 * any longer; we "miss" the delay and issue it
1492 			 * directly to the zio layer. This is likely due to
1493 			 * the target latency being set to a value less than
1494 			 * the underlying hardware can satisfy (e.g. delay
1495 			 * set to 1ms, but the disks take 10ms to complete an
1496 			 * IO request).
1497 			 */
1498 
1499 			DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
1500 			    hrtime_t, now);
1501 
1502 			zio_interrupt(zio);
1503 		} else {
1504 			hrtime_t diff = zio->io_target_timestamp - now;
1505 
1506 			DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
1507 			    hrtime_t, now, hrtime_t, diff);
1508 
1509 			(void) timeout_generic(CALLOUT_NORMAL,
1510 			    (void (*)(void *))zio_interrupt, zio, diff, 1, 0);
1511 		}
1512 
1513 		return;
1514 	}
1515 #endif
1516 
1517 	DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
1518 	zio_interrupt(zio);
1519 }
1520 
1521 /*
1522  * Execute the I/O pipeline until one of the following occurs:
1523  *
1524  *	(1) the I/O completes
1525  *	(2) the pipeline stalls waiting for dependent child I/Os
1526  *	(3) the I/O issues, so we're waiting for an I/O completion interrupt
1527  *	(4) the I/O is delegated by vdev-level caching or aggregation
1528  *	(5) the I/O is deferred due to vdev-level queueing
1529  *	(6) the I/O is handed off to another thread.
1530  *
1531  * In all cases, the pipeline stops whenever there's no CPU work; it never
1532  * burns a thread in cv_wait().
1533  *
1534  * There's no locking on io_stage because there's no legitimate way
1535  * for multiple threads to be attempting to process the same I/O.
1536  */
1537 static zio_pipe_stage_t *zio_pipeline[];
1538 
1539 void
1540 zio_execute(zio_t *zio)
1541 {
1542 	zio->io_executor = curthread;
1543 
1544 	ASSERT3U(zio->io_queued_timestamp, >, 0);
1545 
1546 	while (zio->io_stage < ZIO_STAGE_DONE) {
1547 		enum zio_stage pipeline = zio->io_pipeline;
1548 		enum zio_stage stage = zio->io_stage;
1549 		int rv;
1550 
1551 		ASSERT(!MUTEX_HELD(&zio->io_lock));
1552 		ASSERT(ISP2(stage));
1553 		ASSERT(zio->io_stall == NULL);
1554 
1555 		do {
1556 			stage <<= 1;
1557 		} while ((stage & pipeline) == 0);
1558 
1559 		ASSERT(stage <= ZIO_STAGE_DONE);
1560 
1561 		/*
1562 		 * If we are in interrupt context and this pipeline stage
1563 		 * will grab a config lock that is held across I/O,
1564 		 * or may wait for an I/O that needs an interrupt thread
1565 		 * to complete, issue async to avoid deadlock.
1566 		 *
1567 		 * For VDEV_IO_START, we cut in line so that the io will
1568 		 * be sent to disk promptly.
1569 		 */
1570 		if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
1571 		    zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
1572 			boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
1573 			    zio_requeue_io_start_cut_in_line : B_FALSE;
1574 			zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
1575 			return;
1576 		}
1577 
1578 		zio->io_stage = stage;
1579 		zio->io_pipeline_trace |= zio->io_stage;
1580 		rv = zio_pipeline[highbit64(stage) - 1](zio);
1581 
1582 		if (rv == ZIO_PIPELINE_STOP)
1583 			return;
1584 
1585 		ASSERT(rv == ZIO_PIPELINE_CONTINUE);
1586 	}
1587 }
1588 
1589 /*
1590  * ==========================================================================
1591  * Initiate I/O, either sync or async
1592  * ==========================================================================
1593  */
1594 int
1595 zio_wait(zio_t *zio)
1596 {
1597 	int error;
1598 
1599 	ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
1600 	ASSERT(zio->io_executor == NULL);
1601 
1602 	zio->io_waiter = curthread;
1603 	ASSERT0(zio->io_queued_timestamp);
1604 	zio->io_queued_timestamp = gethrtime();
1605 
1606 	zio_execute(zio);
1607 
1608 	mutex_enter(&zio->io_lock);
1609 	while (zio->io_executor != NULL)
1610 		cv_wait(&zio->io_cv, &zio->io_lock);
1611 	mutex_exit(&zio->io_lock);
1612 
1613 	error = zio->io_error;
1614 	zio_destroy(zio);
1615 
1616 	return (error);
1617 }
1618 
1619 void
1620 zio_nowait(zio_t *zio)
1621 {
1622 	ASSERT(zio->io_executor == NULL);
1623 
1624 	if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
1625 	    zio_unique_parent(zio) == NULL) {
1626 		/*
1627 		 * This is a logical async I/O with no parent to wait for it.
1628 		 * We add it to the spa_async_root_zio "Godfather" I/O which
1629 		 * will ensure they complete prior to unloading the pool.
1630 		 */
1631 		spa_t *spa = zio->io_spa;
1632 
1633 		zio_add_child(spa->spa_async_zio_root[CPU_SEQID], zio);
1634 	}
1635 
1636 	ASSERT0(zio->io_queued_timestamp);
1637 	zio->io_queued_timestamp = gethrtime();
1638 	zio_execute(zio);
1639 }
1640 
1641 /*
1642  * ==========================================================================
1643  * Reexecute or suspend/resume failed I/O
1644  * ==========================================================================
1645  */
1646 
1647 static void
1648 zio_reexecute(zio_t *pio)
1649 {
1650 	zio_t *cio, *cio_next;
1651 
1652 	ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
1653 	ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
1654 	ASSERT(pio->io_gang_leader == NULL);
1655 	ASSERT(pio->io_gang_tree == NULL);
1656 
1657 	pio->io_flags = pio->io_orig_flags;
1658 	pio->io_stage = pio->io_orig_stage;
1659 	pio->io_pipeline = pio->io_orig_pipeline;
1660 	pio->io_reexecute = 0;
1661 	pio->io_flags |= ZIO_FLAG_REEXECUTED;
1662 	pio->io_pipeline_trace = 0;
1663 	pio->io_error = 0;
1664 	for (int w = 0; w < ZIO_WAIT_TYPES; w++)
1665 		pio->io_state[w] = 0;
1666 	for (int c = 0; c < ZIO_CHILD_TYPES; c++)
1667 		pio->io_child_error[c] = 0;
1668 
1669 	if (IO_IS_ALLOCATING(pio))
1670 		BP_ZERO(pio->io_bp);
1671 
1672 	/*
1673 	 * As we reexecute pio's children, new children could be created.
1674 	 * New children go to the head of pio's io_child_list, however,
1675 	 * so we will (correctly) not reexecute them.  The key is that
1676 	 * the remainder of pio's io_child_list, from 'cio_next' onward,
1677 	 * cannot be affected by any side effects of reexecuting 'cio'.
1678 	 */
1679 	zio_link_t *zl = NULL;
1680 	for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
1681 		cio_next = zio_walk_children(pio, &zl);
1682 		mutex_enter(&pio->io_lock);
1683 		for (int w = 0; w < ZIO_WAIT_TYPES; w++)
1684 			pio->io_children[cio->io_child_type][w]++;
1685 		mutex_exit(&pio->io_lock);
1686 		zio_reexecute(cio);
1687 	}
1688 
1689 	/*
1690 	 * Now that all children have been reexecuted, execute the parent.
1691 	 * We don't reexecute "The Godfather" I/O here as it's the
1692 	 * responsibility of the caller to wait on it.
1693 	 */
1694 	if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
1695 		pio->io_queued_timestamp = gethrtime();
1696 		zio_execute(pio);
1697 	}
1698 }
1699 
1700 void
1701 zio_suspend(spa_t *spa, zio_t *zio)
1702 {
1703 	if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
1704 		fm_panic("Pool '%s' has encountered an uncorrectable I/O "
1705 		    "failure and the failure mode property for this pool "
1706 		    "is set to panic.", spa_name(spa));
1707 
1708 	zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0);
1709 
1710 	mutex_enter(&spa->spa_suspend_lock);
1711 
1712 	if (spa->spa_suspend_zio_root == NULL)
1713 		spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
1714 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
1715 		    ZIO_FLAG_GODFATHER);
1716 
1717 	spa->spa_suspended = B_TRUE;
1718 
1719 	if (zio != NULL) {
1720 		ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
1721 		ASSERT(zio != spa->spa_suspend_zio_root);
1722 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1723 		ASSERT(zio_unique_parent(zio) == NULL);
1724 		ASSERT(zio->io_stage == ZIO_STAGE_DONE);
1725 		zio_add_child(spa->spa_suspend_zio_root, zio);
1726 	}
1727 
1728 	mutex_exit(&spa->spa_suspend_lock);
1729 }
1730 
1731 int
1732 zio_resume(spa_t *spa)
1733 {
1734 	zio_t *pio;
1735 
1736 	/*
1737 	 * Reexecute all previously suspended i/o.
1738 	 */
1739 	mutex_enter(&spa->spa_suspend_lock);
1740 	spa->spa_suspended = B_FALSE;
1741 	cv_broadcast(&spa->spa_suspend_cv);
1742 	pio = spa->spa_suspend_zio_root;
1743 	spa->spa_suspend_zio_root = NULL;
1744 	mutex_exit(&spa->spa_suspend_lock);
1745 
1746 	if (pio == NULL)
1747 		return (0);
1748 
1749 	zio_reexecute(pio);
1750 	return (zio_wait(pio));
1751 }
1752 
1753 void
1754 zio_resume_wait(spa_t *spa)
1755 {
1756 	mutex_enter(&spa->spa_suspend_lock);
1757 	while (spa_suspended(spa))
1758 		cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
1759 	mutex_exit(&spa->spa_suspend_lock);
1760 }
1761 
1762 /*
1763  * ==========================================================================
1764  * Gang blocks.
1765  *
1766  * A gang block is a collection of small blocks that looks to the DMU
1767  * like one large block.  When zio_dva_allocate() cannot find a block
1768  * of the requested size, due to either severe fragmentation or the pool
1769  * being nearly full, it calls zio_write_gang_block() to construct the
1770  * block from smaller fragments.
1771  *
1772  * A gang block consists of a gang header (zio_gbh_phys_t) and up to
1773  * three (SPA_GBH_NBLKPTRS) gang members.  The gang header is just like
1774  * an indirect block: it's an array of block pointers.  It consumes
1775  * only one sector and hence is allocatable regardless of fragmentation.
1776  * The gang header's bps point to its gang members, which hold the data.
1777  *
1778  * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
1779  * as the verifier to ensure uniqueness of the SHA256 checksum.
1780  * Critically, the gang block bp's blk_cksum is the checksum of the data,
1781  * not the gang header.  This ensures that data block signatures (needed for
1782  * deduplication) are independent of how the block is physically stored.
1783  *
1784  * Gang blocks can be nested: a gang member may itself be a gang block.
1785  * Thus every gang block is a tree in which root and all interior nodes are
1786  * gang headers, and the leaves are normal blocks that contain user data.
1787  * The root of the gang tree is called the gang leader.
1788  *
1789  * To perform any operation (read, rewrite, free, claim) on a gang block,
1790  * zio_gang_assemble() first assembles the gang tree (minus data leaves)
1791  * in the io_gang_tree field of the original logical i/o by recursively
1792  * reading the gang leader and all gang headers below it.  This yields
1793  * an in-core tree containing the contents of every gang header and the
1794  * bps for every constituent of the gang block.
1795  *
1796  * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
1797  * and invokes a callback on each bp.  To free a gang block, zio_gang_issue()
1798  * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
1799  * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
1800  * zio_read_gang() is a wrapper around zio_read() that omits reading gang
1801  * headers, since we already have those in io_gang_tree.  zio_rewrite_gang()
1802  * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
1803  * of the gang header plus zio_checksum_compute() of the data to update the
1804  * gang header's blk_cksum as described above.
1805  *
1806  * The two-phase assemble/issue model solves the problem of partial failure --
1807  * what if you'd freed part of a gang block but then couldn't read the
1808  * gang header for another part?  Assembling the entire gang tree first
1809  * ensures that all the necessary gang header I/O has succeeded before
1810  * starting the actual work of free, claim, or write.  Once the gang tree
1811  * is assembled, free and claim are in-memory operations that cannot fail.
1812  *
1813  * In the event that a gang write fails, zio_dva_unallocate() walks the
1814  * gang tree to immediately free (i.e. insert back into the space map)
1815  * everything we've allocated.  This ensures that we don't get ENOSPC
1816  * errors during repeated suspend/resume cycles due to a flaky device.
1817  *
1818  * Gang rewrites only happen during sync-to-convergence.  If we can't assemble
1819  * the gang tree, we won't modify the block, so we can safely defer the free
1820  * (knowing that the block is still intact).  If we *can* assemble the gang
1821  * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
1822  * each constituent bp and we can allocate a new block on the next sync pass.
1823  *
1824  * In all cases, the gang tree allows complete recovery from partial failure.
1825  * ==========================================================================
1826  */
1827 
1828 static zio_t *
1829 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data)
1830 {
1831 	if (gn != NULL)
1832 		return (pio);
1833 
1834 	return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp),
1835 	    NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
1836 	    &pio->io_bookmark));
1837 }
1838 
1839 zio_t *
1840 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data)
1841 {
1842 	zio_t *zio;
1843 
1844 	if (gn != NULL) {
1845 		zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
1846 		    gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority,
1847 		    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
1848 		/*
1849 		 * As we rewrite each gang header, the pipeline will compute
1850 		 * a new gang block header checksum for it; but no one will
1851 		 * compute a new data checksum, so we do that here.  The one
1852 		 * exception is the gang leader: the pipeline already computed
1853 		 * its data checksum because that stage precedes gang assembly.
1854 		 * (Presently, nothing actually uses interior data checksums;
1855 		 * this is just good hygiene.)
1856 		 */
1857 		if (gn != pio->io_gang_leader->io_gang_tree) {
1858 			zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
1859 			    data, BP_GET_PSIZE(bp));
1860 		}
1861 		/*
1862 		 * If we are here to damage data for testing purposes,
1863 		 * leave the GBH alone so that we can detect the damage.
1864 		 */
1865 		if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
1866 			zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
1867 	} else {
1868 		zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
1869 		    data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority,
1870 		    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
1871 	}
1872 
1873 	return (zio);
1874 }
1875 
1876 /* ARGSUSED */
1877 zio_t *
1878 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data)
1879 {
1880 	return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
1881 	    ZIO_GANG_CHILD_FLAGS(pio)));
1882 }
1883 
1884 /* ARGSUSED */
1885 zio_t *
1886 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data)
1887 {
1888 	return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
1889 	    NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
1890 }
1891 
1892 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
1893 	NULL,
1894 	zio_read_gang,
1895 	zio_rewrite_gang,
1896 	zio_free_gang,
1897 	zio_claim_gang,
1898 	NULL
1899 };
1900 
1901 static void zio_gang_tree_assemble_done(zio_t *zio);
1902 
1903 static zio_gang_node_t *
1904 zio_gang_node_alloc(zio_gang_node_t **gnpp)
1905 {
1906 	zio_gang_node_t *gn;
1907 
1908 	ASSERT(*gnpp == NULL);
1909 
1910 	gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
1911 	gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
1912 	*gnpp = gn;
1913 
1914 	return (gn);
1915 }
1916 
1917 static void
1918 zio_gang_node_free(zio_gang_node_t **gnpp)
1919 {
1920 	zio_gang_node_t *gn = *gnpp;
1921 
1922 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
1923 		ASSERT(gn->gn_child[g] == NULL);
1924 
1925 	zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
1926 	kmem_free(gn, sizeof (*gn));
1927 	*gnpp = NULL;
1928 }
1929 
1930 static void
1931 zio_gang_tree_free(zio_gang_node_t **gnpp)
1932 {
1933 	zio_gang_node_t *gn = *gnpp;
1934 
1935 	if (gn == NULL)
1936 		return;
1937 
1938 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
1939 		zio_gang_tree_free(&gn->gn_child[g]);
1940 
1941 	zio_gang_node_free(gnpp);
1942 }
1943 
1944 static void
1945 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
1946 {
1947 	zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
1948 
1949 	ASSERT(gio->io_gang_leader == gio);
1950 	ASSERT(BP_IS_GANG(bp));
1951 
1952 	zio_nowait(zio_read(gio, gio->io_spa, bp, gn->gn_gbh,
1953 	    SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn,
1954 	    gio->io_priority, ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
1955 }
1956 
1957 static void
1958 zio_gang_tree_assemble_done(zio_t *zio)
1959 {
1960 	zio_t *gio = zio->io_gang_leader;
1961 	zio_gang_node_t *gn = zio->io_private;
1962 	blkptr_t *bp = zio->io_bp;
1963 
1964 	ASSERT(gio == zio_unique_parent(zio));
1965 	ASSERT(zio->io_child_count == 0);
1966 
1967 	if (zio->io_error)
1968 		return;
1969 
1970 	if (BP_SHOULD_BYTESWAP(bp))
1971 		byteswap_uint64_array(zio->io_data, zio->io_size);
1972 
1973 	ASSERT(zio->io_data == gn->gn_gbh);
1974 	ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
1975 	ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
1976 
1977 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
1978 		blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
1979 		if (!BP_IS_GANG(gbp))
1980 			continue;
1981 		zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
1982 	}
1983 }
1984 
1985 static void
1986 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data)
1987 {
1988 	zio_t *gio = pio->io_gang_leader;
1989 	zio_t *zio;
1990 
1991 	ASSERT(BP_IS_GANG(bp) == !!gn);
1992 	ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
1993 	ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
1994 
1995 	/*
1996 	 * If you're a gang header, your data is in gn->gn_gbh.
1997 	 * If you're a gang member, your data is in 'data' and gn == NULL.
1998 	 */
1999 	zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data);
2000 
2001 	if (gn != NULL) {
2002 		ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2003 
2004 		for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2005 			blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2006 			if (BP_IS_HOLE(gbp))
2007 				continue;
2008 			zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data);
2009 			data = (char *)data + BP_GET_PSIZE(gbp);
2010 		}
2011 	}
2012 
2013 	if (gn == gio->io_gang_tree)
2014 		ASSERT3P((char *)gio->io_data + gio->io_size, ==, data);
2015 
2016 	if (zio != pio)
2017 		zio_nowait(zio);
2018 }
2019 
2020 static int
2021 zio_gang_assemble(zio_t *zio)
2022 {
2023 	blkptr_t *bp = zio->io_bp;
2024 
2025 	ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
2026 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2027 
2028 	zio->io_gang_leader = zio;
2029 
2030 	zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
2031 
2032 	return (ZIO_PIPELINE_CONTINUE);
2033 }
2034 
2035 static int
2036 zio_gang_issue(zio_t *zio)
2037 {
2038 	blkptr_t *bp = zio->io_bp;
2039 
2040 	if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE))
2041 		return (ZIO_PIPELINE_STOP);
2042 
2043 	ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
2044 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2045 
2046 	if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
2047 		zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_data);
2048 	else
2049 		zio_gang_tree_free(&zio->io_gang_tree);
2050 
2051 	zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2052 
2053 	return (ZIO_PIPELINE_CONTINUE);
2054 }
2055 
2056 static void
2057 zio_write_gang_member_ready(zio_t *zio)
2058 {
2059 	zio_t *pio = zio_unique_parent(zio);
2060 	zio_t *gio = zio->io_gang_leader;
2061 	dva_t *cdva = zio->io_bp->blk_dva;
2062 	dva_t *pdva = pio->io_bp->blk_dva;
2063 	uint64_t asize;
2064 
2065 	if (BP_IS_HOLE(zio->io_bp))
2066 		return;
2067 
2068 	ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
2069 
2070 	ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
2071 	ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
2072 	ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
2073 	ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
2074 	ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
2075 
2076 	mutex_enter(&pio->io_lock);
2077 	for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
2078 		ASSERT(DVA_GET_GANG(&pdva[d]));
2079 		asize = DVA_GET_ASIZE(&pdva[d]);
2080 		asize += DVA_GET_ASIZE(&cdva[d]);
2081 		DVA_SET_ASIZE(&pdva[d], asize);
2082 	}
2083 	mutex_exit(&pio->io_lock);
2084 }
2085 
2086 static int
2087 zio_write_gang_block(zio_t *pio)
2088 {
2089 	spa_t *spa = pio->io_spa;
2090 	metaslab_class_t *mc = spa_normal_class(spa);
2091 	blkptr_t *bp = pio->io_bp;
2092 	zio_t *gio = pio->io_gang_leader;
2093 	zio_t *zio;
2094 	zio_gang_node_t *gn, **gnpp;
2095 	zio_gbh_phys_t *gbh;
2096 	uint64_t txg = pio->io_txg;
2097 	uint64_t resid = pio->io_size;
2098 	uint64_t lsize;
2099 	int copies = gio->io_prop.zp_copies;
2100 	int gbh_copies = MIN(copies + 1, spa_max_replication(spa));
2101 	zio_prop_t zp;
2102 	int error;
2103 
2104 	int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
2105 	if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
2106 		ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
2107 		ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA));
2108 
2109 		flags |= METASLAB_ASYNC_ALLOC;
2110 		VERIFY(refcount_held(&mc->mc_alloc_slots, pio));
2111 
2112 		/*
2113 		 * The logical zio has already placed a reservation for
2114 		 * 'copies' allocation slots but gang blocks may require
2115 		 * additional copies. These additional copies
2116 		 * (i.e. gbh_copies - copies) are guaranteed to succeed
2117 		 * since metaslab_class_throttle_reserve() always allows
2118 		 * additional reservations for gang blocks.
2119 		 */
2120 		VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
2121 		    pio, flags));
2122 	}
2123 
2124 	error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
2125 	    bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
2126 	    &pio->io_alloc_list, pio);
2127 	if (error) {
2128 		if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
2129 			ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
2130 			ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA));
2131 
2132 			/*
2133 			 * If we failed to allocate the gang block header then
2134 			 * we remove any additional allocation reservations that
2135 			 * we placed here. The original reservation will
2136 			 * be removed when the logical I/O goes to the ready
2137 			 * stage.
2138 			 */
2139 			metaslab_class_throttle_unreserve(mc,
2140 			    gbh_copies - copies, pio);
2141 		}
2142 		pio->io_error = error;
2143 		return (ZIO_PIPELINE_CONTINUE);
2144 	}
2145 
2146 	if (pio == gio) {
2147 		gnpp = &gio->io_gang_tree;
2148 	} else {
2149 		gnpp = pio->io_private;
2150 		ASSERT(pio->io_ready == zio_write_gang_member_ready);
2151 	}
2152 
2153 	gn = zio_gang_node_alloc(gnpp);
2154 	gbh = gn->gn_gbh;
2155 	bzero(gbh, SPA_GANGBLOCKSIZE);
2156 
2157 	/*
2158 	 * Create the gang header.
2159 	 */
2160 	zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL,
2161 	    pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2162 
2163 	/*
2164 	 * Create and nowait the gang children.
2165 	 */
2166 	for (int g = 0; resid != 0; resid -= lsize, g++) {
2167 		lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
2168 		    SPA_MINBLOCKSIZE);
2169 		ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
2170 
2171 		zp.zp_checksum = gio->io_prop.zp_checksum;
2172 		zp.zp_compress = ZIO_COMPRESS_OFF;
2173 		zp.zp_type = DMU_OT_NONE;
2174 		zp.zp_level = 0;
2175 		zp.zp_copies = gio->io_prop.zp_copies;
2176 		zp.zp_dedup = B_FALSE;
2177 		zp.zp_dedup_verify = B_FALSE;
2178 		zp.zp_nopwrite = B_FALSE;
2179 
2180 		zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
2181 		    (char *)pio->io_data + (pio->io_size - resid), lsize, lsize,
2182 		    &zp, zio_write_gang_member_ready, NULL, NULL, NULL,
2183 		    &gn->gn_child[g], pio->io_priority,
2184 		    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2185 
2186 		if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
2187 			ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
2188 			ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA));
2189 
2190 			/*
2191 			 * Gang children won't throttle but we should
2192 			 * account for their work, so reserve an allocation
2193 			 * slot for them here.
2194 			 */
2195 			VERIFY(metaslab_class_throttle_reserve(mc,
2196 			    zp.zp_copies, cio, flags));
2197 		}
2198 		zio_nowait(cio);
2199 	}
2200 
2201 	/*
2202 	 * Set pio's pipeline to just wait for zio to finish.
2203 	 */
2204 	pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2205 
2206 	zio_nowait(zio);
2207 
2208 	return (ZIO_PIPELINE_CONTINUE);
2209 }
2210 
2211 /*
2212  * The zio_nop_write stage in the pipeline determines if allocating a
2213  * new bp is necessary.  The nopwrite feature can handle writes in
2214  * either syncing or open context (i.e. zil writes) and as a result is
2215  * mutually exclusive with dedup.
2216  *
2217  * By leveraging a cryptographically secure checksum, such as SHA256, we
2218  * can compare the checksums of the new data and the old to determine if
2219  * allocating a new block is required.  Note that our requirements for
2220  * cryptographic strength are fairly weak: there can't be any accidental
2221  * hash collisions, but we don't need to be secure against intentional
2222  * (malicious) collisions.  To trigger a nopwrite, you have to be able
2223  * to write the file to begin with, and triggering an incorrect (hash
2224  * collision) nopwrite is no worse than simply writing to the file.
2225  * That said, there are no known attacks against the checksum algorithms
2226  * used for nopwrite, assuming that the salt and the checksums
2227  * themselves remain secret.
2228  */
2229 static int
2230 zio_nop_write(zio_t *zio)
2231 {
2232 	blkptr_t *bp = zio->io_bp;
2233 	blkptr_t *bp_orig = &zio->io_bp_orig;
2234 	zio_prop_t *zp = &zio->io_prop;
2235 
2236 	ASSERT(BP_GET_LEVEL(bp) == 0);
2237 	ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
2238 	ASSERT(zp->zp_nopwrite);
2239 	ASSERT(!zp->zp_dedup);
2240 	ASSERT(zio->io_bp_override == NULL);
2241 	ASSERT(IO_IS_ALLOCATING(zio));
2242 
2243 	/*
2244 	 * Check to see if the original bp and the new bp have matching
2245 	 * characteristics (i.e. same checksum, compression algorithms, etc).
2246 	 * If they don't then just continue with the pipeline which will
2247 	 * allocate a new bp.
2248 	 */
2249 	if (BP_IS_HOLE(bp_orig) ||
2250 	    !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
2251 	    ZCHECKSUM_FLAG_NOPWRITE) ||
2252 	    BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
2253 	    BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
2254 	    BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
2255 	    zp->zp_copies != BP_GET_NDVAS(bp_orig))
2256 		return (ZIO_PIPELINE_CONTINUE);
2257 
2258 	/*
2259 	 * If the checksums match then reset the pipeline so that we
2260 	 * avoid allocating a new bp and issuing any I/O.
2261 	 */
2262 	if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
2263 		ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
2264 		    ZCHECKSUM_FLAG_NOPWRITE);
2265 		ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
2266 		ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
2267 		ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
2268 		ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop,
2269 		    sizeof (uint64_t)) == 0);
2270 
2271 		*bp = *bp_orig;
2272 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2273 		zio->io_flags |= ZIO_FLAG_NOPWRITE;
2274 	}
2275 
2276 	return (ZIO_PIPELINE_CONTINUE);
2277 }
2278 
2279 /*
2280  * ==========================================================================
2281  * Dedup
2282  * ==========================================================================
2283  */
2284 static void
2285 zio_ddt_child_read_done(zio_t *zio)
2286 {
2287 	blkptr_t *bp = zio->io_bp;
2288 	ddt_entry_t *dde = zio->io_private;
2289 	ddt_phys_t *ddp;
2290 	zio_t *pio = zio_unique_parent(zio);
2291 
2292 	mutex_enter(&pio->io_lock);
2293 	ddp = ddt_phys_select(dde, bp);
2294 	if (zio->io_error == 0)
2295 		ddt_phys_clear(ddp);	/* this ddp doesn't need repair */
2296 	if (zio->io_error == 0 && dde->dde_repair_data == NULL)
2297 		dde->dde_repair_data = zio->io_data;
2298 	else
2299 		zio_buf_free(zio->io_data, zio->io_size);
2300 	mutex_exit(&pio->io_lock);
2301 }
2302 
2303 static int
2304 zio_ddt_read_start(zio_t *zio)
2305 {
2306 	blkptr_t *bp = zio->io_bp;
2307 
2308 	ASSERT(BP_GET_DEDUP(bp));
2309 	ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
2310 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2311 
2312 	if (zio->io_child_error[ZIO_CHILD_DDT]) {
2313 		ddt_t *ddt = ddt_select(zio->io_spa, bp);
2314 		ddt_entry_t *dde = ddt_repair_start(ddt, bp);
2315 		ddt_phys_t *ddp = dde->dde_phys;
2316 		ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
2317 		blkptr_t blk;
2318 
2319 		ASSERT(zio->io_vsd == NULL);
2320 		zio->io_vsd = dde;
2321 
2322 		if (ddp_self == NULL)
2323 			return (ZIO_PIPELINE_CONTINUE);
2324 
2325 		for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
2326 			if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
2327 				continue;
2328 			ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
2329 			    &blk);
2330 			zio_nowait(zio_read(zio, zio->io_spa, &blk,
2331 			    zio_buf_alloc(zio->io_size), zio->io_size,
2332 			    zio_ddt_child_read_done, dde, zio->io_priority,
2333 			    ZIO_DDT_CHILD_FLAGS(zio) | ZIO_FLAG_DONT_PROPAGATE,
2334 			    &zio->io_bookmark));
2335 		}
2336 		return (ZIO_PIPELINE_CONTINUE);
2337 	}
2338 
2339 	zio_nowait(zio_read(zio, zio->io_spa, bp,
2340 	    zio->io_data, zio->io_size, NULL, NULL, zio->io_priority,
2341 	    ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
2342 
2343 	return (ZIO_PIPELINE_CONTINUE);
2344 }
2345 
2346 static int
2347 zio_ddt_read_done(zio_t *zio)
2348 {
2349 	blkptr_t *bp = zio->io_bp;
2350 
2351 	if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE))
2352 		return (ZIO_PIPELINE_STOP);
2353 
2354 	ASSERT(BP_GET_DEDUP(bp));
2355 	ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
2356 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2357 
2358 	if (zio->io_child_error[ZIO_CHILD_DDT]) {
2359 		ddt_t *ddt = ddt_select(zio->io_spa, bp);
2360 		ddt_entry_t *dde = zio->io_vsd;
2361 		if (ddt == NULL) {
2362 			ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
2363 			return (ZIO_PIPELINE_CONTINUE);
2364 		}
2365 		if (dde == NULL) {
2366 			zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
2367 			zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
2368 			return (ZIO_PIPELINE_STOP);
2369 		}
2370 		if (dde->dde_repair_data != NULL) {
2371 			bcopy(dde->dde_repair_data, zio->io_data, zio->io_size);
2372 			zio->io_child_error[ZIO_CHILD_DDT] = 0;
2373 		}
2374 		ddt_repair_done(ddt, dde);
2375 		zio->io_vsd = NULL;
2376 	}
2377 
2378 	ASSERT(zio->io_vsd == NULL);
2379 
2380 	return (ZIO_PIPELINE_CONTINUE);
2381 }
2382 
2383 static boolean_t
2384 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
2385 {
2386 	spa_t *spa = zio->io_spa;
2387 	boolean_t do_raw = (zio->io_flags & ZIO_FLAG_RAW);
2388 
2389 	/* We should never get a raw, override zio */
2390 	ASSERT(!(zio->io_bp_override && do_raw));
2391 
2392 	/*
2393 	 * Note: we compare the original data, not the transformed data,
2394 	 * because when zio->io_bp is an override bp, we will not have
2395 	 * pushed the I/O transforms.  That's an important optimization
2396 	 * because otherwise we'd compress/encrypt all dmu_sync() data twice.
2397 	 */
2398 	for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
2399 		zio_t *lio = dde->dde_lead_zio[p];
2400 
2401 		if (lio != NULL) {
2402 			return (lio->io_orig_size != zio->io_orig_size ||
2403 			    bcmp(zio->io_orig_data, lio->io_orig_data,
2404 			    zio->io_orig_size) != 0);
2405 		}
2406 	}
2407 
2408 	for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
2409 		ddt_phys_t *ddp = &dde->dde_phys[p];
2410 
2411 		if (ddp->ddp_phys_birth != 0) {
2412 			arc_buf_t *abuf = NULL;
2413 			arc_flags_t aflags = ARC_FLAG_WAIT;
2414 			int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
2415 			blkptr_t blk = *zio->io_bp;
2416 			int error;
2417 
2418 			ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
2419 
2420 			ddt_exit(ddt);
2421 
2422 			/*
2423 			 * Intuitively, it would make more sense to compare
2424 			 * io_data than io_orig_data in the raw case since you
2425 			 * don't want to look at any transformations that have
2426 			 * happened to the data. However, for raw I/Os the
2427 			 * data will actually be the same in io_data and
2428 			 * io_orig_data, so all we have to do is issue this as
2429 			 * a raw ARC read.
2430 			 */
2431 			if (do_raw) {
2432 				zio_flags |= ZIO_FLAG_RAW;
2433 				ASSERT3U(zio->io_size, ==, zio->io_orig_size);
2434 				ASSERT0(bcmp(zio->io_data, zio->io_orig_data,
2435 				    zio->io_size));
2436 				ASSERT3P(zio->io_transform_stack, ==, NULL);
2437 			}
2438 
2439 			error = arc_read(NULL, spa, &blk,
2440 			    arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
2441 			    zio_flags, &aflags, &zio->io_bookmark);
2442 
2443 			if (error == 0) {
2444 				if (arc_buf_size(abuf) != zio->io_orig_size ||
2445 				    bcmp(abuf->b_data, zio->io_orig_data,
2446 				    zio->io_orig_size) != 0)
2447 					error = SET_ERROR(EEXIST);
2448 				arc_buf_destroy(abuf, &abuf);
2449 			}
2450 
2451 			ddt_enter(ddt);
2452 			return (error != 0);
2453 		}
2454 	}
2455 
2456 	return (B_FALSE);
2457 }
2458 
2459 static void
2460 zio_ddt_child_write_ready(zio_t *zio)
2461 {
2462 	int p = zio->io_prop.zp_copies;
2463 	ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
2464 	ddt_entry_t *dde = zio->io_private;
2465 	ddt_phys_t *ddp = &dde->dde_phys[p];
2466 	zio_t *pio;
2467 
2468 	if (zio->io_error)
2469 		return;
2470 
2471 	ddt_enter(ddt);
2472 
2473 	ASSERT(dde->dde_lead_zio[p] == zio);
2474 
2475 	ddt_phys_fill(ddp, zio->io_bp);
2476 
2477 	zio_link_t *zl = NULL;
2478 	while ((pio = zio_walk_parents(zio, &zl)) != NULL)
2479 		ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
2480 
2481 	ddt_exit(ddt);
2482 }
2483 
2484 static void
2485 zio_ddt_child_write_done(zio_t *zio)
2486 {
2487 	int p = zio->io_prop.zp_copies;
2488 	ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
2489 	ddt_entry_t *dde = zio->io_private;
2490 	ddt_phys_t *ddp = &dde->dde_phys[p];
2491 
2492 	ddt_enter(ddt);
2493 
2494 	ASSERT(ddp->ddp_refcnt == 0);
2495 	ASSERT(dde->dde_lead_zio[p] == zio);
2496 	dde->dde_lead_zio[p] = NULL;
2497 
2498 	if (zio->io_error == 0) {
2499 		zio_link_t *zl = NULL;
2500 		while (zio_walk_parents(zio, &zl) != NULL)
2501 			ddt_phys_addref(ddp);
2502 	} else {
2503 		ddt_phys_clear(ddp);
2504 	}
2505 
2506 	ddt_exit(ddt);
2507 }
2508 
2509 static void
2510 zio_ddt_ditto_write_done(zio_t *zio)
2511 {
2512 	int p = DDT_PHYS_DITTO;
2513 	zio_prop_t *zp = &zio->io_prop;
2514 	blkptr_t *bp = zio->io_bp;
2515 	ddt_t *ddt = ddt_select(zio->io_spa, bp);
2516 	ddt_entry_t *dde = zio->io_private;
2517 	ddt_phys_t *ddp = &dde->dde_phys[p];
2518 	ddt_key_t *ddk = &dde->dde_key;
2519 
2520 	ddt_enter(ddt);
2521 
2522 	ASSERT(ddp->ddp_refcnt == 0);
2523 	ASSERT(dde->dde_lead_zio[p] == zio);
2524 	dde->dde_lead_zio[p] = NULL;
2525 
2526 	if (zio->io_error == 0) {
2527 		ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum));
2528 		ASSERT(zp->zp_copies < SPA_DVAS_PER_BP);
2529 		ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp));
2530 		if (ddp->ddp_phys_birth != 0)
2531 			ddt_phys_free(ddt, ddk, ddp, zio->io_txg);
2532 		ddt_phys_fill(ddp, bp);
2533 	}
2534 
2535 	ddt_exit(ddt);
2536 }
2537 
2538 static int
2539 zio_ddt_write(zio_t *zio)
2540 {
2541 	spa_t *spa = zio->io_spa;
2542 	blkptr_t *bp = zio->io_bp;
2543 	uint64_t txg = zio->io_txg;
2544 	zio_prop_t *zp = &zio->io_prop;
2545 	int p = zp->zp_copies;
2546 	int ditto_copies;
2547 	zio_t *cio = NULL;
2548 	zio_t *dio = NULL;
2549 	ddt_t *ddt = ddt_select(spa, bp);
2550 	ddt_entry_t *dde;
2551 	ddt_phys_t *ddp;
2552 
2553 	ASSERT(BP_GET_DEDUP(bp));
2554 	ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
2555 	ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
2556 	ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
2557 
2558 	ddt_enter(ddt);
2559 	dde = ddt_lookup(ddt, bp, B_TRUE);
2560 	ddp = &dde->dde_phys[p];
2561 
2562 	if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
2563 		/*
2564 		 * If we're using a weak checksum, upgrade to a strong checksum
2565 		 * and try again.  If we're already using a strong checksum,
2566 		 * we can't resolve it, so just convert to an ordinary write.
2567 		 * (And automatically e-mail a paper to Nature?)
2568 		 */
2569 		if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
2570 		    ZCHECKSUM_FLAG_DEDUP)) {
2571 			zp->zp_checksum = spa_dedup_checksum(spa);
2572 			zio_pop_transforms(zio);
2573 			zio->io_stage = ZIO_STAGE_OPEN;
2574 			BP_ZERO(bp);
2575 		} else {
2576 			zp->zp_dedup = B_FALSE;
2577 			BP_SET_DEDUP(bp, B_FALSE);
2578 		}
2579 		ASSERT(!BP_GET_DEDUP(bp));
2580 		zio->io_pipeline = ZIO_WRITE_PIPELINE;
2581 		ddt_exit(ddt);
2582 		return (ZIO_PIPELINE_CONTINUE);
2583 	}
2584 
2585 	ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp);
2586 	ASSERT(ditto_copies < SPA_DVAS_PER_BP);
2587 
2588 	if (ditto_copies > ddt_ditto_copies_present(dde) &&
2589 	    dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) {
2590 		zio_prop_t czp = *zp;
2591 
2592 		czp.zp_copies = ditto_copies;
2593 
2594 		/*
2595 		 * If we arrived here with an override bp, we won't have run
2596 		 * the transform stack, so we won't have the data we need to
2597 		 * generate a child i/o.  So, toss the override bp and restart.
2598 		 * This is safe, because using the override bp is just an
2599 		 * optimization; and it's rare, so the cost doesn't matter.
2600 		 */
2601 		if (zio->io_bp_override) {
2602 			zio_pop_transforms(zio);
2603 			zio->io_stage = ZIO_STAGE_OPEN;
2604 			zio->io_pipeline = ZIO_WRITE_PIPELINE;
2605 			zio->io_bp_override = NULL;
2606 			BP_ZERO(bp);
2607 			ddt_exit(ddt);
2608 			return (ZIO_PIPELINE_CONTINUE);
2609 		}
2610 
2611 		dio = zio_write(zio, spa, txg, bp, zio->io_orig_data,
2612 		    zio->io_orig_size, zio->io_orig_size, &czp, NULL, NULL,
2613 		    NULL, zio_ddt_ditto_write_done, dde, zio->io_priority,
2614 		    ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
2615 
2616 		zio_push_transform(dio, zio->io_data, zio->io_size, 0, NULL);
2617 		dde->dde_lead_zio[DDT_PHYS_DITTO] = dio;
2618 	}
2619 
2620 	if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
2621 		if (ddp->ddp_phys_birth != 0)
2622 			ddt_bp_fill(ddp, bp, txg);
2623 		if (dde->dde_lead_zio[p] != NULL)
2624 			zio_add_child(zio, dde->dde_lead_zio[p]);
2625 		else
2626 			ddt_phys_addref(ddp);
2627 	} else if (zio->io_bp_override) {
2628 		ASSERT(bp->blk_birth == txg);
2629 		ASSERT(BP_EQUAL(bp, zio->io_bp_override));
2630 		ddt_phys_fill(ddp, bp);
2631 		ddt_phys_addref(ddp);
2632 	} else {
2633 		cio = zio_write(zio, spa, txg, bp, zio->io_orig_data,
2634 		    zio->io_orig_size, zio->io_orig_size, zp,
2635 		    zio_ddt_child_write_ready, NULL, NULL,
2636 		    zio_ddt_child_write_done, dde, zio->io_priority,
2637 		    ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
2638 
2639 		zio_push_transform(cio, zio->io_data, zio->io_size, 0, NULL);
2640 		dde->dde_lead_zio[p] = cio;
2641 	}
2642 
2643 	ddt_exit(ddt);
2644 
2645 	if (cio)
2646 		zio_nowait(cio);
2647 	if (dio)
2648 		zio_nowait(dio);
2649 
2650 	return (ZIO_PIPELINE_CONTINUE);
2651 }
2652 
2653 ddt_entry_t *freedde; /* for debugging */
2654 
2655 static int
2656 zio_ddt_free(zio_t *zio)
2657 {
2658 	spa_t *spa = zio->io_spa;
2659 	blkptr_t *bp = zio->io_bp;
2660 	ddt_t *ddt = ddt_select(spa, bp);
2661 	ddt_entry_t *dde;
2662 	ddt_phys_t *ddp;
2663 
2664 	ASSERT(BP_GET_DEDUP(bp));
2665 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2666 
2667 	ddt_enter(ddt);
2668 	freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
2669 	ddp = ddt_phys_select(dde, bp);
2670 	ddt_phys_decref(ddp);
2671 	ddt_exit(ddt);
2672 
2673 	return (ZIO_PIPELINE_CONTINUE);
2674 }
2675 
2676 /*
2677  * ==========================================================================
2678  * Allocate and free blocks
2679  * ==========================================================================
2680  */
2681 
2682 static zio_t *
2683 zio_io_to_allocate(spa_t *spa)
2684 {
2685 	zio_t *zio;
2686 
2687 	ASSERT(MUTEX_HELD(&spa->spa_alloc_lock));
2688 
2689 	zio = avl_first(&spa->spa_alloc_tree);
2690 	if (zio == NULL)
2691 		return (NULL);
2692 
2693 	ASSERT(IO_IS_ALLOCATING(zio));
2694 
2695 	/*
2696 	 * Try to place a reservation for this zio. If we're unable to
2697 	 * reserve then we throttle.
2698 	 */
2699 	if (!metaslab_class_throttle_reserve(spa_normal_class(spa),
2700 	    zio->io_prop.zp_copies, zio, 0)) {
2701 		return (NULL);
2702 	}
2703 
2704 	avl_remove(&spa->spa_alloc_tree, zio);
2705 	ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
2706 
2707 	return (zio);
2708 }
2709 
2710 static int
2711 zio_dva_throttle(zio_t *zio)
2712 {
2713 	spa_t *spa = zio->io_spa;
2714 	zio_t *nio;
2715 
2716 	if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
2717 	    !spa_normal_class(zio->io_spa)->mc_alloc_throttle_enabled ||
2718 	    zio->io_child_type == ZIO_CHILD_GANG ||
2719 	    zio->io_flags & ZIO_FLAG_NODATA) {
2720 		return (ZIO_PIPELINE_CONTINUE);
2721 	}
2722 
2723 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2724 
2725 	ASSERT3U(zio->io_queued_timestamp, >, 0);
2726 	ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
2727 
2728 	mutex_enter(&spa->spa_alloc_lock);
2729 
2730 	ASSERT(zio->io_type == ZIO_TYPE_WRITE);
2731 	avl_add(&spa->spa_alloc_tree, zio);
2732 
2733 	nio = zio_io_to_allocate(zio->io_spa);
2734 	mutex_exit(&spa->spa_alloc_lock);
2735 
2736 	if (nio == zio)
2737 		return (ZIO_PIPELINE_CONTINUE);
2738 
2739 	if (nio != NULL) {
2740 		ASSERT(nio->io_stage == ZIO_STAGE_DVA_THROTTLE);
2741 		/*
2742 		 * We are passing control to a new zio so make sure that
2743 		 * it is processed by a different thread. We do this to
2744 		 * avoid stack overflows that can occur when parents are
2745 		 * throttled and children are making progress. We allow
2746 		 * it to go to the head of the taskq since it's already
2747 		 * been waiting.
2748 		 */
2749 		zio_taskq_dispatch(nio, ZIO_TASKQ_ISSUE, B_TRUE);
2750 	}
2751 	return (ZIO_PIPELINE_STOP);
2752 }
2753 
2754 void
2755 zio_allocate_dispatch(spa_t *spa)
2756 {
2757 	zio_t *zio;
2758 
2759 	mutex_enter(&spa->spa_alloc_lock);
2760 	zio = zio_io_to_allocate(spa);
2761 	mutex_exit(&spa->spa_alloc_lock);
2762 	if (zio == NULL)
2763 		return;
2764 
2765 	ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
2766 	ASSERT0(zio->io_error);
2767 	zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
2768 }
2769 
2770 static int
2771 zio_dva_allocate(zio_t *zio)
2772 {
2773 	spa_t *spa = zio->io_spa;
2774 	metaslab_class_t *mc = spa_normal_class(spa);
2775 	blkptr_t *bp = zio->io_bp;
2776 	int error;
2777 	int flags = 0;
2778 
2779 	if (zio->io_gang_leader == NULL) {
2780 		ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2781 		zio->io_gang_leader = zio;
2782 	}
2783 
2784 	ASSERT(BP_IS_HOLE(bp));
2785 	ASSERT0(BP_GET_NDVAS(bp));
2786 	ASSERT3U(zio->io_prop.zp_copies, >, 0);
2787 	ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
2788 	ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
2789 
2790 	if (zio->io_flags & ZIO_FLAG_NODATA) {
2791 		flags |= METASLAB_DONT_THROTTLE;
2792 	}
2793 	if (zio->io_flags & ZIO_FLAG_GANG_CHILD) {
2794 		flags |= METASLAB_GANG_CHILD;
2795 	}
2796 	if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) {
2797 		flags |= METASLAB_ASYNC_ALLOC;
2798 	}
2799 
2800 	error = metaslab_alloc(spa, mc, zio->io_size, bp,
2801 	    zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
2802 	    &zio->io_alloc_list, zio);
2803 
2804 	if (error != 0) {
2805 		spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, "
2806 		    "size %llu, error %d", spa_name(spa), zio, zio->io_size,
2807 		    error);
2808 		if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE)
2809 			return (zio_write_gang_block(zio));
2810 		zio->io_error = error;
2811 	}
2812 
2813 	return (ZIO_PIPELINE_CONTINUE);
2814 }
2815 
2816 static int
2817 zio_dva_free(zio_t *zio)
2818 {
2819 	metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
2820 
2821 	return (ZIO_PIPELINE_CONTINUE);
2822 }
2823 
2824 static int
2825 zio_dva_claim(zio_t *zio)
2826 {
2827 	int error;
2828 
2829 	error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
2830 	if (error)
2831 		zio->io_error = error;
2832 
2833 	return (ZIO_PIPELINE_CONTINUE);
2834 }
2835 
2836 /*
2837  * Undo an allocation.  This is used by zio_done() when an I/O fails
2838  * and we want to give back the block we just allocated.
2839  * This handles both normal blocks and gang blocks.
2840  */
2841 static void
2842 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
2843 {
2844 	ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
2845 	ASSERT(zio->io_bp_override == NULL);
2846 
2847 	if (!BP_IS_HOLE(bp))
2848 		metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE);
2849 
2850 	if (gn != NULL) {
2851 		for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2852 			zio_dva_unallocate(zio, gn->gn_child[g],
2853 			    &gn->gn_gbh->zg_blkptr[g]);
2854 		}
2855 	}
2856 }
2857 
2858 /*
2859  * Try to allocate an intent log block.  Return 0 on success, errno on failure.
2860  */
2861 int
2862 zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp,
2863     uint64_t size, boolean_t use_slog)
2864 {
2865 	int error = 1;
2866 	zio_alloc_list_t io_alloc_list;
2867 
2868 	ASSERT(txg > spa_syncing_txg(spa));
2869 
2870 	metaslab_trace_init(&io_alloc_list);
2871 
2872 	if (use_slog) {
2873 		error = metaslab_alloc(spa, spa_log_class(spa), size,
2874 		    new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID,
2875 		    &io_alloc_list, NULL);
2876 	}
2877 
2878 	if (error) {
2879 		error = metaslab_alloc(spa, spa_normal_class(spa), size,
2880 		    new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID,
2881 		    &io_alloc_list, NULL);
2882 	}
2883 	metaslab_trace_fini(&io_alloc_list);
2884 
2885 	if (error == 0) {
2886 		BP_SET_LSIZE(new_bp, size);
2887 		BP_SET_PSIZE(new_bp, size);
2888 		BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
2889 		BP_SET_CHECKSUM(new_bp,
2890 		    spa_version(spa) >= SPA_VERSION_SLIM_ZIL
2891 		    ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
2892 		BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
2893 		BP_SET_LEVEL(new_bp, 0);
2894 		BP_SET_DEDUP(new_bp, 0);
2895 		BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
2896 	}
2897 
2898 	return (error);
2899 }
2900 
2901 /*
2902  * Free an intent log block.
2903  */
2904 void
2905 zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp)
2906 {
2907 	ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG);
2908 	ASSERT(!BP_IS_GANG(bp));
2909 
2910 	zio_free(spa, txg, bp);
2911 }
2912 
2913 /*
2914  * ==========================================================================
2915  * Read and write to physical devices
2916  * ==========================================================================
2917  */
2918 
2919 
2920 /*
2921  * Issue an I/O to the underlying vdev. Typically the issue pipeline
2922  * stops after this stage and will resume upon I/O completion.
2923  * However, there are instances where the vdev layer may need to
2924  * continue the pipeline when an I/O was not issued. Since the I/O
2925  * that was sent to the vdev layer might be different than the one
2926  * currently active in the pipeline (see vdev_queue_io()), we explicitly
2927  * force the underlying vdev layers to call either zio_execute() or
2928  * zio_interrupt() to ensure that the pipeline continues with the correct I/O.
2929  */
2930 static int
2931 zio_vdev_io_start(zio_t *zio)
2932 {
2933 	vdev_t *vd = zio->io_vd;
2934 	uint64_t align;
2935 	spa_t *spa = zio->io_spa;
2936 
2937 	ASSERT(zio->io_error == 0);
2938 	ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
2939 
2940 	if (vd == NULL) {
2941 		if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
2942 			spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
2943 
2944 		/*
2945 		 * The mirror_ops handle multiple DVAs in a single BP.
2946 		 */
2947 		vdev_mirror_ops.vdev_op_io_start(zio);
2948 		return (ZIO_PIPELINE_STOP);
2949 	}
2950 
2951 	ASSERT3P(zio->io_logical, !=, zio);
2952 
2953 	/*
2954 	 * We keep track of time-sensitive I/Os so that the scan thread
2955 	 * can quickly react to certain workloads.  In particular, we care
2956 	 * about non-scrubbing, top-level reads and writes with the following
2957 	 * characteristics:
2958 	 *	- synchronous writes of user data to non-slog devices
2959 	 *	- any reads of user data
2960 	 * When these conditions are met, adjust the timestamp of spa_last_io
2961 	 * which allows the scan thread to adjust its workload accordingly.
2962 	 */
2963 	if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL &&
2964 	    vd == vd->vdev_top && !vd->vdev_islog &&
2965 	    zio->io_bookmark.zb_objset != DMU_META_OBJSET &&
2966 	    zio->io_txg != spa_syncing_txg(spa)) {
2967 		uint64_t old = spa->spa_last_io;
2968 		uint64_t new = ddi_get_lbolt64();
2969 		if (old != new)
2970 			(void) atomic_cas_64(&spa->spa_last_io, old, new);
2971 	}
2972 
2973 	align = 1ULL << vd->vdev_top->vdev_ashift;
2974 
2975 	if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
2976 	    P2PHASE(zio->io_size, align) != 0) {
2977 		/* Transform logical writes to be a full physical block size. */
2978 		uint64_t asize = P2ROUNDUP(zio->io_size, align);
2979 		char *abuf = zio_buf_alloc(asize);
2980 		ASSERT(vd == vd->vdev_top);
2981 		if (zio->io_type == ZIO_TYPE_WRITE) {
2982 			bcopy(zio->io_data, abuf, zio->io_size);
2983 			bzero(abuf + zio->io_size, asize - zio->io_size);
2984 		}
2985 		zio_push_transform(zio, abuf, asize, asize, zio_subblock);
2986 	}
2987 
2988 	/*
2989 	 * If this is not a physical io, make sure that it is properly aligned
2990 	 * before proceeding.
2991 	 */
2992 	if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
2993 		ASSERT0(P2PHASE(zio->io_offset, align));
2994 		ASSERT0(P2PHASE(zio->io_size, align));
2995 	} else {
2996 		/*
2997 		 * For physical writes, we allow 512b aligned writes and assume
2998 		 * the device will perform a read-modify-write as necessary.
2999 		 */
3000 		ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
3001 		ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
3002 	}
3003 
3004 	VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
3005 
3006 	/*
3007 	 * If this is a repair I/O, and there's no self-healing involved --
3008 	 * that is, we're just resilvering what we expect to resilver --
3009 	 * then don't do the I/O unless zio's txg is actually in vd's DTL.
3010 	 * This prevents spurious resilvering with nested replication.
3011 	 * For example, given a mirror of mirrors, (A+B)+(C+D), if only
3012 	 * A is out of date, we'll read from C+D, then use the data to
3013 	 * resilver A+B -- but we don't actually want to resilver B, just A.
3014 	 * The top-level mirror has no way to know this, so instead we just
3015 	 * discard unnecessary repairs as we work our way down the vdev tree.
3016 	 * The same logic applies to any form of nested replication:
3017 	 * ditto + mirror, RAID-Z + replacing, etc.  This covers them all.
3018 	 */
3019 	if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
3020 	    !(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
3021 	    zio->io_txg != 0 &&	/* not a delegated i/o */
3022 	    !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
3023 		ASSERT(zio->io_type == ZIO_TYPE_WRITE);
3024 		zio_vdev_io_bypass(zio);
3025 		return (ZIO_PIPELINE_CONTINUE);
3026 	}
3027 
3028 	if (vd->vdev_ops->vdev_op_leaf &&
3029 	    (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) {
3030 
3031 		if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio))
3032 			return (ZIO_PIPELINE_CONTINUE);
3033 
3034 		if ((zio = vdev_queue_io(zio)) == NULL)
3035 			return (ZIO_PIPELINE_STOP);
3036 
3037 		if (!vdev_accessible(vd, zio)) {
3038 			zio->io_error = SET_ERROR(ENXIO);
3039 			zio_interrupt(zio);
3040 			return (ZIO_PIPELINE_STOP);
3041 		}
3042 	}
3043 
3044 	vd->vdev_ops->vdev_op_io_start(zio);
3045 	return (ZIO_PIPELINE_STOP);
3046 }
3047 
3048 static int
3049 zio_vdev_io_done(zio_t *zio)
3050 {
3051 	vdev_t *vd = zio->io_vd;
3052 	vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
3053 	boolean_t unexpected_error = B_FALSE;
3054 
3055 	if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE))
3056 		return (ZIO_PIPELINE_STOP);
3057 
3058 	ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
3059 
3060 	if (vd != NULL && vd->vdev_ops->vdev_op_leaf) {
3061 
3062 		vdev_queue_io_done(zio);
3063 
3064 		if (zio->io_type == ZIO_TYPE_WRITE)
3065 			vdev_cache_write(zio);
3066 
3067 		if (zio_injection_enabled && zio->io_error == 0)
3068 			zio->io_error = zio_handle_device_injection(vd,
3069 			    zio, EIO);
3070 
3071 		if (zio_injection_enabled && zio->io_error == 0)
3072 			zio->io_error = zio_handle_label_injection(zio, EIO);
3073 
3074 		if (zio->io_error) {
3075 			if (!vdev_accessible(vd, zio)) {
3076 				zio->io_error = SET_ERROR(ENXIO);
3077 			} else {
3078 				unexpected_error = B_TRUE;
3079 			}
3080 		}
3081 	}
3082 
3083 	ops->vdev_op_io_done(zio);
3084 
3085 	if (unexpected_error)
3086 		VERIFY(vdev_probe(vd, zio) == NULL);
3087 
3088 	return (ZIO_PIPELINE_CONTINUE);
3089 }
3090 
3091 /*
3092  * For non-raidz ZIOs, we can just copy aside the bad data read from the
3093  * disk, and use that to finish the checksum ereport later.
3094  */
3095 static void
3096 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
3097     const void *good_buf)
3098 {
3099 	/* no processing needed */
3100 	zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
3101 }
3102 
3103 /*ARGSUSED*/
3104 void
3105 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored)
3106 {
3107 	void *buf = zio_buf_alloc(zio->io_size);
3108 
3109 	bcopy(zio->io_data, buf, zio->io_size);
3110 
3111 	zcr->zcr_cbinfo = zio->io_size;
3112 	zcr->zcr_cbdata = buf;
3113 	zcr->zcr_finish = zio_vsd_default_cksum_finish;
3114 	zcr->zcr_free = zio_buf_free;
3115 }
3116 
3117 static int
3118 zio_vdev_io_assess(zio_t *zio)
3119 {
3120 	vdev_t *vd = zio->io_vd;
3121 
3122 	if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE))
3123 		return (ZIO_PIPELINE_STOP);
3124 
3125 	if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
3126 		spa_config_exit(zio->io_spa, SCL_ZIO, zio);
3127 
3128 	if (zio->io_vsd != NULL) {
3129 		zio->io_vsd_ops->vsd_free(zio);
3130 		zio->io_vsd = NULL;
3131 	}
3132 
3133 	if (zio_injection_enabled && zio->io_error == 0)
3134 		zio->io_error = zio_handle_fault_injection(zio, EIO);
3135 
3136 	/*
3137 	 * If the I/O failed, determine whether we should attempt to retry it.
3138 	 *
3139 	 * On retry, we cut in line in the issue queue, since we don't want
3140 	 * compression/checksumming/etc. work to prevent our (cheap) IO reissue.
3141 	 */
3142 	if (zio->io_error && vd == NULL &&
3143 	    !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
3144 		ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE));	/* not a leaf */
3145 		ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS));	/* not a leaf */
3146 		zio->io_error = 0;
3147 		zio->io_flags |= ZIO_FLAG_IO_RETRY |
3148 		    ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE;
3149 		zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
3150 		zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
3151 		    zio_requeue_io_start_cut_in_line);
3152 		return (ZIO_PIPELINE_STOP);
3153 	}
3154 
3155 	/*
3156 	 * If we got an error on a leaf device, convert it to ENXIO
3157 	 * if the device is not accessible at all.
3158 	 */
3159 	if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
3160 	    !vdev_accessible(vd, zio))
3161 		zio->io_error = SET_ERROR(ENXIO);
3162 
3163 	/*
3164 	 * If we can't write to an interior vdev (mirror or RAID-Z),
3165 	 * set vdev_cant_write so that we stop trying to allocate from it.
3166 	 */
3167 	if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
3168 	    vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
3169 		vd->vdev_cant_write = B_TRUE;
3170 	}
3171 
3172 	/*
3173 	 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future
3174 	 * attempts will ever succeed. In this case we set a persistent bit so
3175 	 * that we don't bother with it in the future.
3176 	 */
3177 	if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
3178 	    zio->io_type == ZIO_TYPE_IOCTL &&
3179 	    zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL)
3180 		vd->vdev_nowritecache = B_TRUE;
3181 
3182 	if (zio->io_error)
3183 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3184 
3185 	if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
3186 	    zio->io_physdone != NULL) {
3187 		ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED));
3188 		ASSERT(zio->io_child_type == ZIO_CHILD_VDEV);
3189 		zio->io_physdone(zio->io_logical);
3190 	}
3191 
3192 	return (ZIO_PIPELINE_CONTINUE);
3193 }
3194 
3195 void
3196 zio_vdev_io_reissue(zio_t *zio)
3197 {
3198 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
3199 	ASSERT(zio->io_error == 0);
3200 
3201 	zio->io_stage >>= 1;
3202 }
3203 
3204 void
3205 zio_vdev_io_redone(zio_t *zio)
3206 {
3207 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
3208 
3209 	zio->io_stage >>= 1;
3210 }
3211 
3212 void
3213 zio_vdev_io_bypass(zio_t *zio)
3214 {
3215 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
3216 	ASSERT(zio->io_error == 0);
3217 
3218 	zio->io_flags |= ZIO_FLAG_IO_BYPASS;
3219 	zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
3220 }
3221 
3222 /*
3223  * ==========================================================================
3224  * Generate and verify checksums
3225  * ==========================================================================
3226  */
3227 static int
3228 zio_checksum_generate(zio_t *zio)
3229 {
3230 	blkptr_t *bp = zio->io_bp;
3231 	enum zio_checksum checksum;
3232 
3233 	if (bp == NULL) {
3234 		/*
3235 		 * This is zio_write_phys().
3236 		 * We're either generating a label checksum, or none at all.
3237 		 */
3238 		checksum = zio->io_prop.zp_checksum;
3239 
3240 		if (checksum == ZIO_CHECKSUM_OFF)
3241 			return (ZIO_PIPELINE_CONTINUE);
3242 
3243 		ASSERT(checksum == ZIO_CHECKSUM_LABEL);
3244 	} else {
3245 		if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
3246 			ASSERT(!IO_IS_ALLOCATING(zio));
3247 			checksum = ZIO_CHECKSUM_GANG_HEADER;
3248 		} else {
3249 			checksum = BP_GET_CHECKSUM(bp);
3250 		}
3251 	}
3252 
3253 	zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size);
3254 
3255 	return (ZIO_PIPELINE_CONTINUE);
3256 }
3257 
3258 static int
3259 zio_checksum_verify(zio_t *zio)
3260 {
3261 	zio_bad_cksum_t info;
3262 	blkptr_t *bp = zio->io_bp;
3263 	int error;
3264 
3265 	ASSERT(zio->io_vd != NULL);
3266 
3267 	if (bp == NULL) {
3268 		/*
3269 		 * This is zio_read_phys().
3270 		 * We're either verifying a label checksum, or nothing at all.
3271 		 */
3272 		if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
3273 			return (ZIO_PIPELINE_CONTINUE);
3274 
3275 		ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL);
3276 	}
3277 
3278 	if ((error = zio_checksum_error(zio, &info)) != 0) {
3279 		zio->io_error = error;
3280 		if (error == ECKSUM &&
3281 		    !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
3282 			zfs_ereport_start_checksum(zio->io_spa,
3283 			    zio->io_vd, zio, zio->io_offset,
3284 			    zio->io_size, NULL, &info);
3285 		}
3286 	}
3287 
3288 	return (ZIO_PIPELINE_CONTINUE);
3289 }
3290 
3291 /*
3292  * Called by RAID-Z to ensure we don't compute the checksum twice.
3293  */
3294 void
3295 zio_checksum_verified(zio_t *zio)
3296 {
3297 	zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
3298 }
3299 
3300 /*
3301  * ==========================================================================
3302  * Error rank.  Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
3303  * An error of 0 indicates success.  ENXIO indicates whole-device failure,
3304  * which may be transient (e.g. unplugged) or permament.  ECKSUM and EIO
3305  * indicate errors that are specific to one I/O, and most likely permanent.
3306  * Any other error is presumed to be worse because we weren't expecting it.
3307  * ==========================================================================
3308  */
3309 int
3310 zio_worst_error(int e1, int e2)
3311 {
3312 	static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
3313 	int r1, r2;
3314 
3315 	for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
3316 		if (e1 == zio_error_rank[r1])
3317 			break;
3318 
3319 	for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
3320 		if (e2 == zio_error_rank[r2])
3321 			break;
3322 
3323 	return (r1 > r2 ? e1 : e2);
3324 }
3325 
3326 /*
3327  * ==========================================================================
3328  * I/O completion
3329  * ==========================================================================
3330  */
3331 static int
3332 zio_ready(zio_t *zio)
3333 {
3334 	blkptr_t *bp = zio->io_bp;
3335 	zio_t *pio, *pio_next;
3336 	zio_link_t *zl = NULL;
3337 
3338 	if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) ||
3339 	    zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY))
3340 		return (ZIO_PIPELINE_STOP);
3341 
3342 	if (zio->io_ready) {
3343 		ASSERT(IO_IS_ALLOCATING(zio));
3344 		ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) ||
3345 		    (zio->io_flags & ZIO_FLAG_NOPWRITE));
3346 		ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
3347 
3348 		zio->io_ready(zio);
3349 	}
3350 
3351 	if (bp != NULL && bp != &zio->io_bp_copy)
3352 		zio->io_bp_copy = *bp;
3353 
3354 	if (zio->io_error != 0) {
3355 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3356 
3357 		if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3358 			ASSERT(IO_IS_ALLOCATING(zio));
3359 			ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3360 			/*
3361 			 * We were unable to allocate anything, unreserve and
3362 			 * issue the next I/O to allocate.
3363 			 */
3364 			metaslab_class_throttle_unreserve(
3365 			    spa_normal_class(zio->io_spa),
3366 			    zio->io_prop.zp_copies, zio);
3367 			zio_allocate_dispatch(zio->io_spa);
3368 		}
3369 	}
3370 
3371 	mutex_enter(&zio->io_lock);
3372 	zio->io_state[ZIO_WAIT_READY] = 1;
3373 	pio = zio_walk_parents(zio, &zl);
3374 	mutex_exit(&zio->io_lock);
3375 
3376 	/*
3377 	 * As we notify zio's parents, new parents could be added.
3378 	 * New parents go to the head of zio's io_parent_list, however,
3379 	 * so we will (correctly) not notify them.  The remainder of zio's
3380 	 * io_parent_list, from 'pio_next' onward, cannot change because
3381 	 * all parents must wait for us to be done before they can be done.
3382 	 */
3383 	for (; pio != NULL; pio = pio_next) {
3384 		pio_next = zio_walk_parents(zio, &zl);
3385 		zio_notify_parent(pio, zio, ZIO_WAIT_READY);
3386 	}
3387 
3388 	if (zio->io_flags & ZIO_FLAG_NODATA) {
3389 		if (BP_IS_GANG(bp)) {
3390 			zio->io_flags &= ~ZIO_FLAG_NODATA;
3391 		} else {
3392 			ASSERT((uintptr_t)zio->io_data < SPA_MAXBLOCKSIZE);
3393 			zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
3394 		}
3395 	}
3396 
3397 	if (zio_injection_enabled &&
3398 	    zio->io_spa->spa_syncing_txg == zio->io_txg)
3399 		zio_handle_ignored_writes(zio);
3400 
3401 	return (ZIO_PIPELINE_CONTINUE);
3402 }
3403 
3404 /*
3405  * Update the allocation throttle accounting.
3406  */
3407 static void
3408 zio_dva_throttle_done(zio_t *zio)
3409 {
3410 	zio_t *lio = zio->io_logical;
3411 	zio_t *pio = zio_unique_parent(zio);
3412 	vdev_t *vd = zio->io_vd;
3413 	int flags = METASLAB_ASYNC_ALLOC;
3414 
3415 	ASSERT3P(zio->io_bp, !=, NULL);
3416 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
3417 	ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
3418 	ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
3419 	ASSERT(vd != NULL);
3420 	ASSERT3P(vd, ==, vd->vdev_top);
3421 	ASSERT(!(zio->io_flags & (ZIO_FLAG_IO_REPAIR | ZIO_FLAG_IO_RETRY)));
3422 	ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
3423 	ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
3424 	ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
3425 
3426 	/*
3427 	 * Parents of gang children can have two flavors -- ones that
3428 	 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
3429 	 * and ones that allocated the constituent blocks. The allocation
3430 	 * throttle needs to know the allocating parent zio so we must find
3431 	 * it here.
3432 	 */
3433 	if (pio->io_child_type == ZIO_CHILD_GANG) {
3434 		/*
3435 		 * If our parent is a rewrite gang child then our grandparent
3436 		 * would have been the one that performed the allocation.
3437 		 */
3438 		if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
3439 			pio = zio_unique_parent(pio);
3440 		flags |= METASLAB_GANG_CHILD;
3441 	}
3442 
3443 	ASSERT(IO_IS_ALLOCATING(pio));
3444 	ASSERT3P(zio, !=, zio->io_logical);
3445 	ASSERT(zio->io_logical != NULL);
3446 	ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
3447 	ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
3448 
3449 	mutex_enter(&pio->io_lock);
3450 	metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags);
3451 	mutex_exit(&pio->io_lock);
3452 
3453 	metaslab_class_throttle_unreserve(spa_normal_class(zio->io_spa),
3454 	    1, pio);
3455 
3456 	/*
3457 	 * Call into the pipeline to see if there is more work that
3458 	 * needs to be done. If there is work to be done it will be
3459 	 * dispatched to another taskq thread.
3460 	 */
3461 	zio_allocate_dispatch(zio->io_spa);
3462 }
3463 
3464 static int
3465 zio_done(zio_t *zio)
3466 {
3467 	spa_t *spa = zio->io_spa;
3468 	zio_t *lio = zio->io_logical;
3469 	blkptr_t *bp = zio->io_bp;
3470 	vdev_t *vd = zio->io_vd;
3471 	uint64_t psize = zio->io_size;
3472 	zio_t *pio, *pio_next;
3473 	metaslab_class_t *mc = spa_normal_class(spa);
3474 	zio_link_t *zl = NULL;
3475 
3476 	/*
3477 	 * If our children haven't all completed,
3478 	 * wait for them and then repeat this pipeline stage.
3479 	 */
3480 	if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) ||
3481 	    zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) ||
3482 	    zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) ||
3483 	    zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE))
3484 		return (ZIO_PIPELINE_STOP);
3485 
3486 	/*
3487 	 * If the allocation throttle is enabled, then update the accounting.
3488 	 * We only track child I/Os that are part of an allocating async
3489 	 * write. We must do this since the allocation is performed
3490 	 * by the logical I/O but the actual write is done by child I/Os.
3491 	 */
3492 	if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
3493 	    zio->io_child_type == ZIO_CHILD_VDEV) {
3494 		ASSERT(mc->mc_alloc_throttle_enabled);
3495 		zio_dva_throttle_done(zio);
3496 	}
3497 
3498 	/*
3499 	 * If the allocation throttle is enabled, verify that
3500 	 * we have decremented the refcounts for every I/O that was throttled.
3501 	 */
3502 	if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3503 		ASSERT(zio->io_type == ZIO_TYPE_WRITE);
3504 		ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3505 		ASSERT(bp != NULL);
3506 		metaslab_group_alloc_verify(spa, zio->io_bp, zio);
3507 		VERIFY(refcount_not_held(&mc->mc_alloc_slots, zio));
3508 	}
3509 
3510 	for (int c = 0; c < ZIO_CHILD_TYPES; c++)
3511 		for (int w = 0; w < ZIO_WAIT_TYPES; w++)
3512 			ASSERT(zio->io_children[c][w] == 0);
3513 
3514 	if (bp != NULL && !BP_IS_EMBEDDED(bp)) {
3515 		ASSERT(bp->blk_pad[0] == 0);
3516 		ASSERT(bp->blk_pad[1] == 0);
3517 		ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 ||
3518 		    (bp == zio_unique_parent(zio)->io_bp));
3519 		if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) &&
3520 		    zio->io_bp_override == NULL &&
3521 		    !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
3522 			ASSERT(!BP_SHOULD_BYTESWAP(bp));
3523 			ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp));
3524 			ASSERT(BP_COUNT_GANG(bp) == 0 ||
3525 			    (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp)));
3526 		}
3527 		if (zio->io_flags & ZIO_FLAG_NOPWRITE)
3528 			VERIFY(BP_EQUAL(bp, &zio->io_bp_orig));
3529 	}
3530 
3531 	/*
3532 	 * If there were child vdev/gang/ddt errors, they apply to us now.
3533 	 */
3534 	zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
3535 	zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
3536 	zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
3537 
3538 	/*
3539 	 * If the I/O on the transformed data was successful, generate any
3540 	 * checksum reports now while we still have the transformed data.
3541 	 */
3542 	if (zio->io_error == 0) {
3543 		while (zio->io_cksum_report != NULL) {
3544 			zio_cksum_report_t *zcr = zio->io_cksum_report;
3545 			uint64_t align = zcr->zcr_align;
3546 			uint64_t asize = P2ROUNDUP(psize, align);
3547 			char *abuf = zio->io_data;
3548 
3549 			if (asize != psize) {
3550 				abuf = zio_buf_alloc(asize);
3551 				bcopy(zio->io_data, abuf, psize);
3552 				bzero(abuf + psize, asize - psize);
3553 			}
3554 
3555 			zio->io_cksum_report = zcr->zcr_next;
3556 			zcr->zcr_next = NULL;
3557 			zcr->zcr_finish(zcr, abuf);
3558 			zfs_ereport_free_checksum(zcr);
3559 
3560 			if (asize != psize)
3561 				zio_buf_free(abuf, asize);
3562 		}
3563 	}
3564 
3565 	zio_pop_transforms(zio);	/* note: may set zio->io_error */
3566 
3567 	vdev_stat_update(zio, psize);
3568 
3569 	if (zio->io_error) {
3570 		/*
3571 		 * If this I/O is attached to a particular vdev,
3572 		 * generate an error message describing the I/O failure
3573 		 * at the block level.  We ignore these errors if the
3574 		 * device is currently unavailable.
3575 		 */
3576 		if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd))
3577 			zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0);
3578 
3579 		if ((zio->io_error == EIO || !(zio->io_flags &
3580 		    (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
3581 		    zio == lio) {
3582 			/*
3583 			 * For logical I/O requests, tell the SPA to log the
3584 			 * error and generate a logical data ereport.
3585 			 */
3586 			spa_log_error(spa, zio);
3587 			zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio,
3588 			    0, 0);
3589 		}
3590 	}
3591 
3592 	if (zio->io_error && zio == lio) {
3593 		/*
3594 		 * Determine whether zio should be reexecuted.  This will
3595 		 * propagate all the way to the root via zio_notify_parent().
3596 		 */
3597 		ASSERT(vd == NULL && bp != NULL);
3598 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3599 
3600 		if (IO_IS_ALLOCATING(zio) &&
3601 		    !(zio->io_flags & ZIO_FLAG_CANFAIL)) {
3602 			if (zio->io_error != ENOSPC)
3603 				zio->io_reexecute |= ZIO_REEXECUTE_NOW;
3604 			else
3605 				zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
3606 		}
3607 
3608 		if ((zio->io_type == ZIO_TYPE_READ ||
3609 		    zio->io_type == ZIO_TYPE_FREE) &&
3610 		    !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
3611 		    zio->io_error == ENXIO &&
3612 		    spa_load_state(spa) == SPA_LOAD_NONE &&
3613 		    spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE)
3614 			zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
3615 
3616 		if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
3617 			zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
3618 
3619 		/*
3620 		 * Here is a possibly good place to attempt to do
3621 		 * either combinatorial reconstruction or error correction
3622 		 * based on checksums.  It also might be a good place
3623 		 * to send out preliminary ereports before we suspend
3624 		 * processing.
3625 		 */
3626 	}
3627 
3628 	/*
3629 	 * If there were logical child errors, they apply to us now.
3630 	 * We defer this until now to avoid conflating logical child
3631 	 * errors with errors that happened to the zio itself when
3632 	 * updating vdev stats and reporting FMA events above.
3633 	 */
3634 	zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
3635 
3636 	if ((zio->io_error || zio->io_reexecute) &&
3637 	    IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
3638 	    !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
3639 		zio_dva_unallocate(zio, zio->io_gang_tree, bp);
3640 
3641 	zio_gang_tree_free(&zio->io_gang_tree);
3642 
3643 	/*
3644 	 * Godfather I/Os should never suspend.
3645 	 */
3646 	if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
3647 	    (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
3648 		zio->io_reexecute = 0;
3649 
3650 	if (zio->io_reexecute) {
3651 		/*
3652 		 * This is a logical I/O that wants to reexecute.
3653 		 *
3654 		 * Reexecute is top-down.  When an i/o fails, if it's not
3655 		 * the root, it simply notifies its parent and sticks around.
3656 		 * The parent, seeing that it still has children in zio_done(),
3657 		 * does the same.  This percolates all the way up to the root.
3658 		 * The root i/o will reexecute or suspend the entire tree.
3659 		 *
3660 		 * This approach ensures that zio_reexecute() honors
3661 		 * all the original i/o dependency relationships, e.g.
3662 		 * parents not executing until children are ready.
3663 		 */
3664 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3665 
3666 		zio->io_gang_leader = NULL;
3667 
3668 		mutex_enter(&zio->io_lock);
3669 		zio->io_state[ZIO_WAIT_DONE] = 1;
3670 		mutex_exit(&zio->io_lock);
3671 
3672 		/*
3673 		 * "The Godfather" I/O monitors its children but is
3674 		 * not a true parent to them. It will track them through
3675 		 * the pipeline but severs its ties whenever they get into
3676 		 * trouble (e.g. suspended). This allows "The Godfather"
3677 		 * I/O to return status without blocking.
3678 		 */
3679 		zl = NULL;
3680 		for (pio = zio_walk_parents(zio, &zl); pio != NULL;
3681 		    pio = pio_next) {
3682 			zio_link_t *remove_zl = zl;
3683 			pio_next = zio_walk_parents(zio, &zl);
3684 
3685 			if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
3686 			    (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
3687 				zio_remove_child(pio, zio, remove_zl);
3688 				zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
3689 			}
3690 		}
3691 
3692 		if ((pio = zio_unique_parent(zio)) != NULL) {
3693 			/*
3694 			 * We're not a root i/o, so there's nothing to do
3695 			 * but notify our parent.  Don't propagate errors
3696 			 * upward since we haven't permanently failed yet.
3697 			 */
3698 			ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
3699 			zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
3700 			zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
3701 		} else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
3702 			/*
3703 			 * We'd fail again if we reexecuted now, so suspend
3704 			 * until conditions improve (e.g. device comes online).
3705 			 */
3706 			zio_suspend(spa, zio);
3707 		} else {
3708 			/*
3709 			 * Reexecution is potentially a huge amount of work.
3710 			 * Hand it off to the otherwise-unused claim taskq.
3711 			 */
3712 			ASSERT(zio->io_tqent.tqent_next == NULL);
3713 			spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM,
3714 			    ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio,
3715 			    0, &zio->io_tqent);
3716 		}
3717 		return (ZIO_PIPELINE_STOP);
3718 	}
3719 
3720 	ASSERT(zio->io_child_count == 0);
3721 	ASSERT(zio->io_reexecute == 0);
3722 	ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
3723 
3724 	/*
3725 	 * Report any checksum errors, since the I/O is complete.
3726 	 */
3727 	while (zio->io_cksum_report != NULL) {
3728 		zio_cksum_report_t *zcr = zio->io_cksum_report;
3729 		zio->io_cksum_report = zcr->zcr_next;
3730 		zcr->zcr_next = NULL;
3731 		zcr->zcr_finish(zcr, NULL);
3732 		zfs_ereport_free_checksum(zcr);
3733 	}
3734 
3735 	/*
3736 	 * It is the responsibility of the done callback to ensure that this
3737 	 * particular zio is no longer discoverable for adoption, and as
3738 	 * such, cannot acquire any new parents.
3739 	 */
3740 	if (zio->io_done)
3741 		zio->io_done(zio);
3742 
3743 	mutex_enter(&zio->io_lock);
3744 	zio->io_state[ZIO_WAIT_DONE] = 1;
3745 	mutex_exit(&zio->io_lock);
3746 
3747 	zl = NULL;
3748 	for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
3749 		zio_link_t *remove_zl = zl;
3750 		pio_next = zio_walk_parents(zio, &zl);
3751 		zio_remove_child(pio, zio, remove_zl);
3752 		zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
3753 	}
3754 
3755 	if (zio->io_waiter != NULL) {
3756 		mutex_enter(&zio->io_lock);
3757 		zio->io_executor = NULL;
3758 		cv_broadcast(&zio->io_cv);
3759 		mutex_exit(&zio->io_lock);
3760 	} else {
3761 		zio_destroy(zio);
3762 	}
3763 
3764 	return (ZIO_PIPELINE_STOP);
3765 }
3766 
3767 /*
3768  * ==========================================================================
3769  * I/O pipeline definition
3770  * ==========================================================================
3771  */
3772 static zio_pipe_stage_t *zio_pipeline[] = {
3773 	NULL,
3774 	zio_read_bp_init,
3775 	zio_write_bp_init,
3776 	zio_free_bp_init,
3777 	zio_issue_async,
3778 	zio_write_compress,
3779 	zio_checksum_generate,
3780 	zio_nop_write,
3781 	zio_ddt_read_start,
3782 	zio_ddt_read_done,
3783 	zio_ddt_write,
3784 	zio_ddt_free,
3785 	zio_gang_assemble,
3786 	zio_gang_issue,
3787 	zio_dva_throttle,
3788 	zio_dva_allocate,
3789 	zio_dva_free,
3790 	zio_dva_claim,
3791 	zio_ready,
3792 	zio_vdev_io_start,
3793 	zio_vdev_io_done,
3794 	zio_vdev_io_assess,
3795 	zio_checksum_verify,
3796 	zio_done
3797 };
3798 
3799 
3800 
3801 
3802 /*
3803  * Compare two zbookmark_phys_t's to see which we would reach first in a
3804  * pre-order traversal of the object tree.
3805  *
3806  * This is simple in every case aside from the meta-dnode object. For all other
3807  * objects, we traverse them in order (object 1 before object 2, and so on).
3808  * However, all of these objects are traversed while traversing object 0, since
3809  * the data it points to is the list of objects.  Thus, we need to convert to a
3810  * canonical representation so we can compare meta-dnode bookmarks to
3811  * non-meta-dnode bookmarks.
3812  *
3813  * We do this by calculating "equivalents" for each field of the zbookmark.
3814  * zbookmarks outside of the meta-dnode use their own object and level, and
3815  * calculate the level 0 equivalent (the first L0 blkid that is contained in the
3816  * blocks this bookmark refers to) by multiplying their blkid by their span
3817  * (the number of L0 blocks contained within one block at their level).
3818  * zbookmarks inside the meta-dnode calculate their object equivalent
3819  * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
3820  * level + 1<<31 (any value larger than a level could ever be) for their level.
3821  * This causes them to always compare before a bookmark in their object
3822  * equivalent, compare appropriately to bookmarks in other objects, and to
3823  * compare appropriately to other bookmarks in the meta-dnode.
3824  */
3825 int
3826 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
3827     const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
3828 {
3829 	/*
3830 	 * These variables represent the "equivalent" values for the zbookmark,
3831 	 * after converting zbookmarks inside the meta dnode to their
3832 	 * normal-object equivalents.
3833 	 */
3834 	uint64_t zb1obj, zb2obj;
3835 	uint64_t zb1L0, zb2L0;
3836 	uint64_t zb1level, zb2level;
3837 
3838 	if (zb1->zb_object == zb2->zb_object &&
3839 	    zb1->zb_level == zb2->zb_level &&
3840 	    zb1->zb_blkid == zb2->zb_blkid)
3841 		return (0);
3842 
3843 	/*
3844 	 * BP_SPANB calculates the span in blocks.
3845 	 */
3846 	zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
3847 	zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
3848 
3849 	if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
3850 		zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
3851 		zb1L0 = 0;
3852 		zb1level = zb1->zb_level + COMPARE_META_LEVEL;
3853 	} else {
3854 		zb1obj = zb1->zb_object;
3855 		zb1level = zb1->zb_level;
3856 	}
3857 
3858 	if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
3859 		zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
3860 		zb2L0 = 0;
3861 		zb2level = zb2->zb_level + COMPARE_META_LEVEL;
3862 	} else {
3863 		zb2obj = zb2->zb_object;
3864 		zb2level = zb2->zb_level;
3865 	}
3866 
3867 	/* Now that we have a canonical representation, do the comparison. */
3868 	if (zb1obj != zb2obj)
3869 		return (zb1obj < zb2obj ? -1 : 1);
3870 	else if (zb1L0 != zb2L0)
3871 		return (zb1L0 < zb2L0 ? -1 : 1);
3872 	else if (zb1level != zb2level)
3873 		return (zb1level > zb2level ? -1 : 1);
3874 	/*
3875 	 * This can (theoretically) happen if the bookmarks have the same object
3876 	 * and level, but different blkids, if the block sizes are not the same.
3877 	 * There is presently no way to change the indirect block sizes
3878 	 */
3879 	return (0);
3880 }
3881 
3882 /*
3883  *  This function checks the following: given that last_block is the place that
3884  *  our traversal stopped last time, does that guarantee that we've visited
3885  *  every node under subtree_root?  Therefore, we can't just use the raw output
3886  *  of zbookmark_compare.  We have to pass in a modified version of
3887  *  subtree_root; by incrementing the block id, and then checking whether
3888  *  last_block is before or equal to that, we can tell whether or not having
3889  *  visited last_block implies that all of subtree_root's children have been
3890  *  visited.
3891  */
3892 boolean_t
3893 zbookmark_subtree_completed(const dnode_phys_t *dnp,
3894     const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
3895 {
3896 	zbookmark_phys_t mod_zb = *subtree_root;
3897 	mod_zb.zb_blkid++;
3898 	ASSERT(last_block->zb_level == 0);
3899 
3900 	/* The objset_phys_t isn't before anything. */
3901 	if (dnp == NULL)
3902 		return (B_FALSE);
3903 
3904 	/*
3905 	 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
3906 	 * data block size in sectors, because that variable is only used if
3907 	 * the bookmark refers to a block in the meta-dnode.  Since we don't
3908 	 * know without examining it what object it refers to, and there's no
3909 	 * harm in passing in this value in other cases, we always pass it in.
3910 	 *
3911 	 * We pass in 0 for the indirect block size shift because zb2 must be
3912 	 * level 0.  The indirect block size is only used to calculate the span
3913 	 * of the bookmark, but since the bookmark must be level 0, the span is
3914 	 * always 1, so the math works out.
3915 	 *
3916 	 * If you make changes to how the zbookmark_compare code works, be sure
3917 	 * to make sure that this code still works afterwards.
3918 	 */
3919 	return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
3920 	    1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
3921 	    last_block) <= 0);
3922 }
3923