xref: /illumos-gate/usr/src/uts/common/fs/zfs/zio.c (revision e914ace2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
24  * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
25  * Copyright (c) 2014 Integros [integros.com]
26  */
27 
28 #include <sys/sysmacros.h>
29 #include <sys/zfs_context.h>
30 #include <sys/fm/fs/zfs.h>
31 #include <sys/spa.h>
32 #include <sys/txg.h>
33 #include <sys/spa_impl.h>
34 #include <sys/vdev_impl.h>
35 #include <sys/zio_impl.h>
36 #include <sys/zio_compress.h>
37 #include <sys/zio_checksum.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/arc.h>
40 #include <sys/ddt.h>
41 #include <sys/blkptr.h>
42 #include <sys/zfeature.h>
43 #include <sys/metaslab_impl.h>
44 #include <sys/abd.h>
45 #include <sys/cityhash.h>
46 
47 /*
48  * ==========================================================================
49  * I/O type descriptions
50  * ==========================================================================
51  */
52 const char *zio_type_name[ZIO_TYPES] = {
53 	"zio_null", "zio_read", "zio_write", "zio_free", "zio_claim",
54 	"zio_ioctl"
55 };
56 
57 boolean_t zio_dva_throttle_enabled = B_TRUE;
58 
59 /*
60  * ==========================================================================
61  * I/O kmem caches
62  * ==========================================================================
63  */
64 kmem_cache_t *zio_cache;
65 kmem_cache_t *zio_link_cache;
66 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
67 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
68 
69 #ifdef _KERNEL
70 extern vmem_t *zio_alloc_arena;
71 #endif
72 
73 #define	ZIO_PIPELINE_CONTINUE		0x100
74 #define	ZIO_PIPELINE_STOP		0x101
75 
76 #define	BP_SPANB(indblkshift, level) \
77 	(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
78 #define	COMPARE_META_LEVEL	0x80000000ul
79 /*
80  * The following actions directly effect the spa's sync-to-convergence logic.
81  * The values below define the sync pass when we start performing the action.
82  * Care should be taken when changing these values as they directly impact
83  * spa_sync() performance. Tuning these values may introduce subtle performance
84  * pathologies and should only be done in the context of performance analysis.
85  * These tunables will eventually be removed and replaced with #defines once
86  * enough analysis has been done to determine optimal values.
87  *
88  * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
89  * regular blocks are not deferred.
90  */
91 int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */
92 int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */
93 int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */
94 
95 /*
96  * An allocating zio is one that either currently has the DVA allocate
97  * stage set or will have it later in its lifetime.
98  */
99 #define	IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
100 
101 boolean_t	zio_requeue_io_start_cut_in_line = B_TRUE;
102 
103 #ifdef ZFS_DEBUG
104 int zio_buf_debug_limit = 16384;
105 #else
106 int zio_buf_debug_limit = 0;
107 #endif
108 
109 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
110 
111 void
112 zio_init(void)
113 {
114 	size_t c;
115 	vmem_t *data_alloc_arena = NULL;
116 
117 #ifdef _KERNEL
118 	data_alloc_arena = zio_alloc_arena;
119 #endif
120 	zio_cache = kmem_cache_create("zio_cache",
121 	    sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
122 	zio_link_cache = kmem_cache_create("zio_link_cache",
123 	    sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
124 
125 	/*
126 	 * For small buffers, we want a cache for each multiple of
127 	 * SPA_MINBLOCKSIZE.  For larger buffers, we want a cache
128 	 * for each quarter-power of 2.
129 	 */
130 	for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
131 		size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
132 		size_t p2 = size;
133 		size_t align = 0;
134 		size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0;
135 
136 		while (!ISP2(p2))
137 			p2 &= p2 - 1;
138 
139 #ifndef _KERNEL
140 		/*
141 		 * If we are using watchpoints, put each buffer on its own page,
142 		 * to eliminate the performance overhead of trapping to the
143 		 * kernel when modifying a non-watched buffer that shares the
144 		 * page with a watched buffer.
145 		 */
146 		if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
147 			continue;
148 #endif
149 		if (size <= 4 * SPA_MINBLOCKSIZE) {
150 			align = SPA_MINBLOCKSIZE;
151 		} else if (IS_P2ALIGNED(size, p2 >> 2)) {
152 			align = MIN(p2 >> 2, PAGESIZE);
153 		}
154 
155 		if (align != 0) {
156 			char name[36];
157 			(void) sprintf(name, "zio_buf_%lu", (ulong_t)size);
158 			zio_buf_cache[c] = kmem_cache_create(name, size,
159 			    align, NULL, NULL, NULL, NULL, NULL, cflags);
160 
161 			/*
162 			 * Since zio_data bufs do not appear in crash dumps, we
163 			 * pass KMC_NOTOUCH so that no allocator metadata is
164 			 * stored with the buffers.
165 			 */
166 			(void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size);
167 			zio_data_buf_cache[c] = kmem_cache_create(name, size,
168 			    align, NULL, NULL, NULL, NULL, data_alloc_arena,
169 			    cflags | KMC_NOTOUCH);
170 		}
171 	}
172 
173 	while (--c != 0) {
174 		ASSERT(zio_buf_cache[c] != NULL);
175 		if (zio_buf_cache[c - 1] == NULL)
176 			zio_buf_cache[c - 1] = zio_buf_cache[c];
177 
178 		ASSERT(zio_data_buf_cache[c] != NULL);
179 		if (zio_data_buf_cache[c - 1] == NULL)
180 			zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
181 	}
182 
183 	zio_inject_init();
184 }
185 
186 void
187 zio_fini(void)
188 {
189 	size_t c;
190 	kmem_cache_t *last_cache = NULL;
191 	kmem_cache_t *last_data_cache = NULL;
192 
193 	for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
194 		if (zio_buf_cache[c] != last_cache) {
195 			last_cache = zio_buf_cache[c];
196 			kmem_cache_destroy(zio_buf_cache[c]);
197 		}
198 		zio_buf_cache[c] = NULL;
199 
200 		if (zio_data_buf_cache[c] != last_data_cache) {
201 			last_data_cache = zio_data_buf_cache[c];
202 			kmem_cache_destroy(zio_data_buf_cache[c]);
203 		}
204 		zio_data_buf_cache[c] = NULL;
205 	}
206 
207 	kmem_cache_destroy(zio_link_cache);
208 	kmem_cache_destroy(zio_cache);
209 
210 	zio_inject_fini();
211 }
212 
213 /*
214  * ==========================================================================
215  * Allocate and free I/O buffers
216  * ==========================================================================
217  */
218 
219 /*
220  * Use zio_buf_alloc to allocate ZFS metadata.  This data will appear in a
221  * crashdump if the kernel panics, so use it judiciously.  Obviously, it's
222  * useful to inspect ZFS metadata, but if possible, we should avoid keeping
223  * excess / transient data in-core during a crashdump.
224  */
225 void *
226 zio_buf_alloc(size_t size)
227 {
228 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
229 
230 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
231 
232 	return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
233 }
234 
235 /*
236  * Use zio_data_buf_alloc to allocate data.  The data will not appear in a
237  * crashdump if the kernel panics.  This exists so that we will limit the amount
238  * of ZFS data that shows up in a kernel crashdump.  (Thus reducing the amount
239  * of kernel heap dumped to disk when the kernel panics)
240  */
241 void *
242 zio_data_buf_alloc(size_t size)
243 {
244 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
245 
246 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
247 
248 	return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
249 }
250 
251 void
252 zio_buf_free(void *buf, size_t size)
253 {
254 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
255 
256 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
257 
258 	kmem_cache_free(zio_buf_cache[c], buf);
259 }
260 
261 void
262 zio_data_buf_free(void *buf, size_t size)
263 {
264 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
265 
266 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
267 
268 	kmem_cache_free(zio_data_buf_cache[c], buf);
269 }
270 
271 /*
272  * ==========================================================================
273  * Push and pop I/O transform buffers
274  * ==========================================================================
275  */
276 void
277 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
278     zio_transform_func_t *transform)
279 {
280 	zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
281 
282 	/*
283 	 * Ensure that anyone expecting this zio to contain a linear ABD isn't
284 	 * going to get a nasty surprise when they try to access the data.
285 	 */
286 	IMPLY(abd_is_linear(zio->io_abd), abd_is_linear(data));
287 
288 	zt->zt_orig_abd = zio->io_abd;
289 	zt->zt_orig_size = zio->io_size;
290 	zt->zt_bufsize = bufsize;
291 	zt->zt_transform = transform;
292 
293 	zt->zt_next = zio->io_transform_stack;
294 	zio->io_transform_stack = zt;
295 
296 	zio->io_abd = data;
297 	zio->io_size = size;
298 }
299 
300 void
301 zio_pop_transforms(zio_t *zio)
302 {
303 	zio_transform_t *zt;
304 
305 	while ((zt = zio->io_transform_stack) != NULL) {
306 		if (zt->zt_transform != NULL)
307 			zt->zt_transform(zio,
308 			    zt->zt_orig_abd, zt->zt_orig_size);
309 
310 		if (zt->zt_bufsize != 0)
311 			abd_free(zio->io_abd);
312 
313 		zio->io_abd = zt->zt_orig_abd;
314 		zio->io_size = zt->zt_orig_size;
315 		zio->io_transform_stack = zt->zt_next;
316 
317 		kmem_free(zt, sizeof (zio_transform_t));
318 	}
319 }
320 
321 /*
322  * ==========================================================================
323  * I/O transform callbacks for subblocks and decompression
324  * ==========================================================================
325  */
326 static void
327 zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
328 {
329 	ASSERT(zio->io_size > size);
330 
331 	if (zio->io_type == ZIO_TYPE_READ)
332 		abd_copy(data, zio->io_abd, size);
333 }
334 
335 static void
336 zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
337 {
338 	if (zio->io_error == 0) {
339 		void *tmp = abd_borrow_buf(data, size);
340 		int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
341 		    zio->io_abd, tmp, zio->io_size, size);
342 		abd_return_buf_copy(data, tmp, size);
343 
344 		if (ret != 0)
345 			zio->io_error = SET_ERROR(EIO);
346 	}
347 }
348 
349 /*
350  * ==========================================================================
351  * I/O parent/child relationships and pipeline interlocks
352  * ==========================================================================
353  */
354 zio_t *
355 zio_walk_parents(zio_t *cio, zio_link_t **zl)
356 {
357 	list_t *pl = &cio->io_parent_list;
358 
359 	*zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
360 	if (*zl == NULL)
361 		return (NULL);
362 
363 	ASSERT((*zl)->zl_child == cio);
364 	return ((*zl)->zl_parent);
365 }
366 
367 zio_t *
368 zio_walk_children(zio_t *pio, zio_link_t **zl)
369 {
370 	list_t *cl = &pio->io_child_list;
371 
372 	*zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
373 	if (*zl == NULL)
374 		return (NULL);
375 
376 	ASSERT((*zl)->zl_parent == pio);
377 	return ((*zl)->zl_child);
378 }
379 
380 zio_t *
381 zio_unique_parent(zio_t *cio)
382 {
383 	zio_link_t *zl = NULL;
384 	zio_t *pio = zio_walk_parents(cio, &zl);
385 
386 	VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
387 	return (pio);
388 }
389 
390 void
391 zio_add_child(zio_t *pio, zio_t *cio)
392 {
393 	zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
394 
395 	/*
396 	 * Logical I/Os can have logical, gang, or vdev children.
397 	 * Gang I/Os can have gang or vdev children.
398 	 * Vdev I/Os can only have vdev children.
399 	 * The following ASSERT captures all of these constraints.
400 	 */
401 	ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
402 
403 	zl->zl_parent = pio;
404 	zl->zl_child = cio;
405 
406 	mutex_enter(&cio->io_lock);
407 	mutex_enter(&pio->io_lock);
408 
409 	ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
410 
411 	for (int w = 0; w < ZIO_WAIT_TYPES; w++)
412 		pio->io_children[cio->io_child_type][w] += !cio->io_state[w];
413 
414 	list_insert_head(&pio->io_child_list, zl);
415 	list_insert_head(&cio->io_parent_list, zl);
416 
417 	pio->io_child_count++;
418 	cio->io_parent_count++;
419 
420 	mutex_exit(&pio->io_lock);
421 	mutex_exit(&cio->io_lock);
422 }
423 
424 static void
425 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
426 {
427 	ASSERT(zl->zl_parent == pio);
428 	ASSERT(zl->zl_child == cio);
429 
430 	mutex_enter(&cio->io_lock);
431 	mutex_enter(&pio->io_lock);
432 
433 	list_remove(&pio->io_child_list, zl);
434 	list_remove(&cio->io_parent_list, zl);
435 
436 	pio->io_child_count--;
437 	cio->io_parent_count--;
438 
439 	mutex_exit(&pio->io_lock);
440 	mutex_exit(&cio->io_lock);
441 
442 	kmem_cache_free(zio_link_cache, zl);
443 }
444 
445 static boolean_t
446 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
447 {
448 	boolean_t waiting = B_FALSE;
449 
450 	mutex_enter(&zio->io_lock);
451 	ASSERT(zio->io_stall == NULL);
452 	for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
453 		if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
454 			continue;
455 
456 		uint64_t *countp = &zio->io_children[c][wait];
457 		if (*countp != 0) {
458 			zio->io_stage >>= 1;
459 			ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
460 			zio->io_stall = countp;
461 			waiting = B_TRUE;
462 			break;
463 		}
464 	}
465 	mutex_exit(&zio->io_lock);
466 	return (waiting);
467 }
468 
469 static void
470 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait)
471 {
472 	uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
473 	int *errorp = &pio->io_child_error[zio->io_child_type];
474 
475 	mutex_enter(&pio->io_lock);
476 	if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
477 		*errorp = zio_worst_error(*errorp, zio->io_error);
478 	pio->io_reexecute |= zio->io_reexecute;
479 	ASSERT3U(*countp, >, 0);
480 
481 	(*countp)--;
482 
483 	if (*countp == 0 && pio->io_stall == countp) {
484 		zio_taskq_type_t type =
485 		    pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
486 		    ZIO_TASKQ_INTERRUPT;
487 		pio->io_stall = NULL;
488 		mutex_exit(&pio->io_lock);
489 		/*
490 		 * Dispatch the parent zio in its own taskq so that
491 		 * the child can continue to make progress. This also
492 		 * prevents overflowing the stack when we have deeply nested
493 		 * parent-child relationships.
494 		 */
495 		zio_taskq_dispatch(pio, type, B_FALSE);
496 	} else {
497 		mutex_exit(&pio->io_lock);
498 	}
499 }
500 
501 static void
502 zio_inherit_child_errors(zio_t *zio, enum zio_child c)
503 {
504 	if (zio->io_child_error[c] != 0 && zio->io_error == 0)
505 		zio->io_error = zio->io_child_error[c];
506 }
507 
508 int
509 zio_bookmark_compare(const void *x1, const void *x2)
510 {
511 	const zio_t *z1 = x1;
512 	const zio_t *z2 = x2;
513 
514 	if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
515 		return (-1);
516 	if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
517 		return (1);
518 
519 	if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
520 		return (-1);
521 	if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
522 		return (1);
523 
524 	if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
525 		return (-1);
526 	if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
527 		return (1);
528 
529 	if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
530 		return (-1);
531 	if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
532 		return (1);
533 
534 	if (z1 < z2)
535 		return (-1);
536 	if (z1 > z2)
537 		return (1);
538 
539 	return (0);
540 }
541 
542 /*
543  * ==========================================================================
544  * Create the various types of I/O (read, write, free, etc)
545  * ==========================================================================
546  */
547 static zio_t *
548 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
549     abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
550     void *private, zio_type_t type, zio_priority_t priority,
551     enum zio_flag flags, vdev_t *vd, uint64_t offset,
552     const zbookmark_phys_t *zb, enum zio_stage stage, enum zio_stage pipeline)
553 {
554 	zio_t *zio;
555 
556 	ASSERT3U(psize, <=, SPA_MAXBLOCKSIZE);
557 	ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
558 	ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
559 
560 	ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
561 	ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
562 	ASSERT(vd || stage == ZIO_STAGE_OPEN);
563 
564 	IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW) != 0);
565 
566 	zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
567 	bzero(zio, sizeof (zio_t));
568 
569 	mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL);
570 	cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
571 
572 	list_create(&zio->io_parent_list, sizeof (zio_link_t),
573 	    offsetof(zio_link_t, zl_parent_node));
574 	list_create(&zio->io_child_list, sizeof (zio_link_t),
575 	    offsetof(zio_link_t, zl_child_node));
576 	metaslab_trace_init(&zio->io_alloc_list);
577 
578 	if (vd != NULL)
579 		zio->io_child_type = ZIO_CHILD_VDEV;
580 	else if (flags & ZIO_FLAG_GANG_CHILD)
581 		zio->io_child_type = ZIO_CHILD_GANG;
582 	else if (flags & ZIO_FLAG_DDT_CHILD)
583 		zio->io_child_type = ZIO_CHILD_DDT;
584 	else
585 		zio->io_child_type = ZIO_CHILD_LOGICAL;
586 
587 	if (bp != NULL) {
588 		zio->io_bp = (blkptr_t *)bp;
589 		zio->io_bp_copy = *bp;
590 		zio->io_bp_orig = *bp;
591 		if (type != ZIO_TYPE_WRITE ||
592 		    zio->io_child_type == ZIO_CHILD_DDT)
593 			zio->io_bp = &zio->io_bp_copy;	/* so caller can free */
594 		if (zio->io_child_type == ZIO_CHILD_LOGICAL)
595 			zio->io_logical = zio;
596 		if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
597 			pipeline |= ZIO_GANG_STAGES;
598 	}
599 
600 	zio->io_spa = spa;
601 	zio->io_txg = txg;
602 	zio->io_done = done;
603 	zio->io_private = private;
604 	zio->io_type = type;
605 	zio->io_priority = priority;
606 	zio->io_vd = vd;
607 	zio->io_offset = offset;
608 	zio->io_orig_abd = zio->io_abd = data;
609 	zio->io_orig_size = zio->io_size = psize;
610 	zio->io_lsize = lsize;
611 	zio->io_orig_flags = zio->io_flags = flags;
612 	zio->io_orig_stage = zio->io_stage = stage;
613 	zio->io_orig_pipeline = zio->io_pipeline = pipeline;
614 	zio->io_pipeline_trace = ZIO_STAGE_OPEN;
615 
616 	zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY);
617 	zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
618 
619 	if (zb != NULL)
620 		zio->io_bookmark = *zb;
621 
622 	if (pio != NULL) {
623 		if (zio->io_logical == NULL)
624 			zio->io_logical = pio->io_logical;
625 		if (zio->io_child_type == ZIO_CHILD_GANG)
626 			zio->io_gang_leader = pio->io_gang_leader;
627 		zio_add_child(pio, zio);
628 	}
629 
630 	return (zio);
631 }
632 
633 static void
634 zio_destroy(zio_t *zio)
635 {
636 	metaslab_trace_fini(&zio->io_alloc_list);
637 	list_destroy(&zio->io_parent_list);
638 	list_destroy(&zio->io_child_list);
639 	mutex_destroy(&zio->io_lock);
640 	cv_destroy(&zio->io_cv);
641 	kmem_cache_free(zio_cache, zio);
642 }
643 
644 zio_t *
645 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
646     void *private, enum zio_flag flags)
647 {
648 	zio_t *zio;
649 
650 	zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
651 	    ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
652 	    ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
653 
654 	return (zio);
655 }
656 
657 zio_t *
658 zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags)
659 {
660 	return (zio_null(NULL, spa, NULL, done, private, flags));
661 }
662 
663 void
664 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp)
665 {
666 	if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
667 		zfs_panic_recover("blkptr at %p has invalid TYPE %llu",
668 		    bp, (longlong_t)BP_GET_TYPE(bp));
669 	}
670 	if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS ||
671 	    BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) {
672 		zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu",
673 		    bp, (longlong_t)BP_GET_CHECKSUM(bp));
674 	}
675 	if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS ||
676 	    BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) {
677 		zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu",
678 		    bp, (longlong_t)BP_GET_COMPRESS(bp));
679 	}
680 	if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
681 		zfs_panic_recover("blkptr at %p has invalid LSIZE %llu",
682 		    bp, (longlong_t)BP_GET_LSIZE(bp));
683 	}
684 	if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
685 		zfs_panic_recover("blkptr at %p has invalid PSIZE %llu",
686 		    bp, (longlong_t)BP_GET_PSIZE(bp));
687 	}
688 
689 	if (BP_IS_EMBEDDED(bp)) {
690 		if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) {
691 			zfs_panic_recover("blkptr at %p has invalid ETYPE %llu",
692 			    bp, (longlong_t)BPE_GET_ETYPE(bp));
693 		}
694 	}
695 
696 	/*
697 	 * Do not verify individual DVAs if the config is not trusted. This
698 	 * will be done once the zio is executed in vdev_mirror_map_alloc.
699 	 */
700 	if (!spa->spa_trust_config)
701 		return;
702 
703 	/*
704 	 * Pool-specific checks.
705 	 *
706 	 * Note: it would be nice to verify that the blk_birth and
707 	 * BP_PHYSICAL_BIRTH() are not too large.  However, spa_freeze()
708 	 * allows the birth time of log blocks (and dmu_sync()-ed blocks
709 	 * that are in the log) to be arbitrarily large.
710 	 */
711 	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
712 		uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]);
713 		if (vdevid >= spa->spa_root_vdev->vdev_children) {
714 			zfs_panic_recover("blkptr at %p DVA %u has invalid "
715 			    "VDEV %llu",
716 			    bp, i, (longlong_t)vdevid);
717 			continue;
718 		}
719 		vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
720 		if (vd == NULL) {
721 			zfs_panic_recover("blkptr at %p DVA %u has invalid "
722 			    "VDEV %llu",
723 			    bp, i, (longlong_t)vdevid);
724 			continue;
725 		}
726 		if (vd->vdev_ops == &vdev_hole_ops) {
727 			zfs_panic_recover("blkptr at %p DVA %u has hole "
728 			    "VDEV %llu",
729 			    bp, i, (longlong_t)vdevid);
730 			continue;
731 		}
732 		if (vd->vdev_ops == &vdev_missing_ops) {
733 			/*
734 			 * "missing" vdevs are valid during import, but we
735 			 * don't have their detailed info (e.g. asize), so
736 			 * we can't perform any more checks on them.
737 			 */
738 			continue;
739 		}
740 		uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
741 		uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]);
742 		if (BP_IS_GANG(bp))
743 			asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
744 		if (offset + asize > vd->vdev_asize) {
745 			zfs_panic_recover("blkptr at %p DVA %u has invalid "
746 			    "OFFSET %llu",
747 			    bp, i, (longlong_t)offset);
748 		}
749 	}
750 }
751 
752 boolean_t
753 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
754 {
755 	uint64_t vdevid = DVA_GET_VDEV(dva);
756 
757 	if (vdevid >= spa->spa_root_vdev->vdev_children)
758 		return (B_FALSE);
759 
760 	vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
761 	if (vd == NULL)
762 		return (B_FALSE);
763 
764 	if (vd->vdev_ops == &vdev_hole_ops)
765 		return (B_FALSE);
766 
767 	if (vd->vdev_ops == &vdev_missing_ops) {
768 		return (B_FALSE);
769 	}
770 
771 	uint64_t offset = DVA_GET_OFFSET(dva);
772 	uint64_t asize = DVA_GET_ASIZE(dva);
773 
774 	if (BP_IS_GANG(bp))
775 		asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
776 	if (offset + asize > vd->vdev_asize)
777 		return (B_FALSE);
778 
779 	return (B_TRUE);
780 }
781 
782 zio_t *
783 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
784     abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
785     zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb)
786 {
787 	zio_t *zio;
788 
789 	zfs_blkptr_verify(spa, bp);
790 
791 	zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
792 	    data, size, size, done, private,
793 	    ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
794 	    ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
795 	    ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
796 
797 	return (zio);
798 }
799 
800 zio_t *
801 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
802     abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
803     zio_done_func_t *ready, zio_done_func_t *children_ready,
804     zio_done_func_t *physdone, zio_done_func_t *done,
805     void *private, zio_priority_t priority, enum zio_flag flags,
806     const zbookmark_phys_t *zb)
807 {
808 	zio_t *zio;
809 
810 	ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
811 	    zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
812 	    zp->zp_compress >= ZIO_COMPRESS_OFF &&
813 	    zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
814 	    DMU_OT_IS_VALID(zp->zp_type) &&
815 	    zp->zp_level < 32 &&
816 	    zp->zp_copies > 0 &&
817 	    zp->zp_copies <= spa_max_replication(spa));
818 
819 	zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
820 	    ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
821 	    ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
822 	    ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
823 
824 	zio->io_ready = ready;
825 	zio->io_children_ready = children_ready;
826 	zio->io_physdone = physdone;
827 	zio->io_prop = *zp;
828 
829 	/*
830 	 * Data can be NULL if we are going to call zio_write_override() to
831 	 * provide the already-allocated BP.  But we may need the data to
832 	 * verify a dedup hit (if requested).  In this case, don't try to
833 	 * dedup (just take the already-allocated BP verbatim).
834 	 */
835 	if (data == NULL && zio->io_prop.zp_dedup_verify) {
836 		zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
837 	}
838 
839 	return (zio);
840 }
841 
842 zio_t *
843 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
844     uint64_t size, zio_done_func_t *done, void *private,
845     zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb)
846 {
847 	zio_t *zio;
848 
849 	zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
850 	    ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
851 	    ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
852 
853 	return (zio);
854 }
855 
856 void
857 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite)
858 {
859 	ASSERT(zio->io_type == ZIO_TYPE_WRITE);
860 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
861 	ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
862 	ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
863 
864 	/*
865 	 * We must reset the io_prop to match the values that existed
866 	 * when the bp was first written by dmu_sync() keeping in mind
867 	 * that nopwrite and dedup are mutually exclusive.
868 	 */
869 	zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
870 	zio->io_prop.zp_nopwrite = nopwrite;
871 	zio->io_prop.zp_copies = copies;
872 	zio->io_bp_override = bp;
873 }
874 
875 void
876 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
877 {
878 
879 	zfs_blkptr_verify(spa, bp);
880 
881 	/*
882 	 * The check for EMBEDDED is a performance optimization.  We
883 	 * process the free here (by ignoring it) rather than
884 	 * putting it on the list and then processing it in zio_free_sync().
885 	 */
886 	if (BP_IS_EMBEDDED(bp))
887 		return;
888 	metaslab_check_free(spa, bp);
889 
890 	/*
891 	 * Frees that are for the currently-syncing txg, are not going to be
892 	 * deferred, and which will not need to do a read (i.e. not GANG or
893 	 * DEDUP), can be processed immediately.  Otherwise, put them on the
894 	 * in-memory list for later processing.
895 	 */
896 	if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp) ||
897 	    txg != spa->spa_syncing_txg ||
898 	    spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) {
899 		bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
900 	} else {
901 		VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0)));
902 	}
903 }
904 
905 zio_t *
906 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
907     enum zio_flag flags)
908 {
909 	zio_t *zio;
910 	enum zio_stage stage = ZIO_FREE_PIPELINE;
911 
912 	ASSERT(!BP_IS_HOLE(bp));
913 	ASSERT(spa_syncing_txg(spa) == txg);
914 	ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free);
915 
916 	if (BP_IS_EMBEDDED(bp))
917 		return (zio_null(pio, spa, NULL, NULL, NULL, 0));
918 
919 	metaslab_check_free(spa, bp);
920 	arc_freed(spa, bp);
921 
922 	/*
923 	 * GANG and DEDUP blocks can induce a read (for the gang block header,
924 	 * or the DDT), so issue them asynchronously so that this thread is
925 	 * not tied up.
926 	 */
927 	if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp))
928 		stage |= ZIO_STAGE_ISSUE_ASYNC;
929 
930 	zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
931 	    BP_GET_PSIZE(bp), NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
932 	    flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage);
933 
934 	return (zio);
935 }
936 
937 zio_t *
938 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
939     zio_done_func_t *done, void *private, enum zio_flag flags)
940 {
941 	zio_t *zio;
942 
943 	zfs_blkptr_verify(spa, bp);
944 
945 	if (BP_IS_EMBEDDED(bp))
946 		return (zio_null(pio, spa, NULL, NULL, NULL, 0));
947 
948 	/*
949 	 * A claim is an allocation of a specific block.  Claims are needed
950 	 * to support immediate writes in the intent log.  The issue is that
951 	 * immediate writes contain committed data, but in a txg that was
952 	 * *not* committed.  Upon opening the pool after an unclean shutdown,
953 	 * the intent log claims all blocks that contain immediate write data
954 	 * so that the SPA knows they're in use.
955 	 *
956 	 * All claims *must* be resolved in the first txg -- before the SPA
957 	 * starts allocating blocks -- so that nothing is allocated twice.
958 	 * If txg == 0 we just verify that the block is claimable.
959 	 */
960 	ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <,
961 	    spa_min_claim_txg(spa));
962 	ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
963 	ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa));	/* zdb(1M) */
964 
965 	zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
966 	    BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
967 	    flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
968 	ASSERT0(zio->io_queued_timestamp);
969 
970 	return (zio);
971 }
972 
973 zio_t *
974 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
975     zio_done_func_t *done, void *private, enum zio_flag flags)
976 {
977 	zio_t *zio;
978 	int c;
979 
980 	if (vd->vdev_children == 0) {
981 		zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
982 		    ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
983 		    ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
984 
985 		zio->io_cmd = cmd;
986 	} else {
987 		zio = zio_null(pio, spa, NULL, NULL, NULL, flags);
988 
989 		for (c = 0; c < vd->vdev_children; c++)
990 			zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd,
991 			    done, private, flags));
992 	}
993 
994 	return (zio);
995 }
996 
997 zio_t *
998 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
999     abd_t *data, int checksum, zio_done_func_t *done, void *private,
1000     zio_priority_t priority, enum zio_flag flags, boolean_t labels)
1001 {
1002 	zio_t *zio;
1003 
1004 	ASSERT(vd->vdev_children == 0);
1005 	ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1006 	    offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1007 	ASSERT3U(offset + size, <=, vd->vdev_psize);
1008 
1009 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1010 	    private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1011 	    offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
1012 
1013 	zio->io_prop.zp_checksum = checksum;
1014 
1015 	return (zio);
1016 }
1017 
1018 zio_t *
1019 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1020     abd_t *data, int checksum, zio_done_func_t *done, void *private,
1021     zio_priority_t priority, enum zio_flag flags, boolean_t labels)
1022 {
1023 	zio_t *zio;
1024 
1025 	ASSERT(vd->vdev_children == 0);
1026 	ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1027 	    offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1028 	ASSERT3U(offset + size, <=, vd->vdev_psize);
1029 
1030 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1031 	    private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1032 	    offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
1033 
1034 	zio->io_prop.zp_checksum = checksum;
1035 
1036 	if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
1037 		/*
1038 		 * zec checksums are necessarily destructive -- they modify
1039 		 * the end of the write buffer to hold the verifier/checksum.
1040 		 * Therefore, we must make a local copy in case the data is
1041 		 * being written to multiple places in parallel.
1042 		 */
1043 		abd_t *wbuf = abd_alloc_sametype(data, size);
1044 		abd_copy(wbuf, data, size);
1045 
1046 		zio_push_transform(zio, wbuf, size, size, NULL);
1047 	}
1048 
1049 	return (zio);
1050 }
1051 
1052 /*
1053  * Create a child I/O to do some work for us.
1054  */
1055 zio_t *
1056 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
1057     abd_t *data, uint64_t size, int type, zio_priority_t priority,
1058     enum zio_flag flags, zio_done_func_t *done, void *private)
1059 {
1060 	enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
1061 	zio_t *zio;
1062 
1063 	/*
1064 	 * vdev child I/Os do not propagate their error to the parent.
1065 	 * Therefore, for correct operation the caller *must* check for
1066 	 * and handle the error in the child i/o's done callback.
1067 	 * The only exceptions are i/os that we don't care about
1068 	 * (OPTIONAL or REPAIR).
1069 	 */
1070 	ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
1071 	    done != NULL);
1072 
1073 	if (type == ZIO_TYPE_READ && bp != NULL) {
1074 		/*
1075 		 * If we have the bp, then the child should perform the
1076 		 * checksum and the parent need not.  This pushes error
1077 		 * detection as close to the leaves as possible and
1078 		 * eliminates redundant checksums in the interior nodes.
1079 		 */
1080 		pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
1081 		pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
1082 	}
1083 
1084 	if (vd->vdev_ops->vdev_op_leaf) {
1085 		ASSERT0(vd->vdev_children);
1086 		offset += VDEV_LABEL_START_SIZE;
1087 	}
1088 
1089 	flags |= ZIO_VDEV_CHILD_FLAGS(pio);
1090 
1091 	/*
1092 	 * If we've decided to do a repair, the write is not speculative --
1093 	 * even if the original read was.
1094 	 */
1095 	if (flags & ZIO_FLAG_IO_REPAIR)
1096 		flags &= ~ZIO_FLAG_SPECULATIVE;
1097 
1098 	/*
1099 	 * If we're creating a child I/O that is not associated with a
1100 	 * top-level vdev, then the child zio is not an allocating I/O.
1101 	 * If this is a retried I/O then we ignore it since we will
1102 	 * have already processed the original allocating I/O.
1103 	 */
1104 	if (flags & ZIO_FLAG_IO_ALLOCATING &&
1105 	    (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
1106 		metaslab_class_t *mc = spa_normal_class(pio->io_spa);
1107 
1108 		ASSERT(mc->mc_alloc_throttle_enabled);
1109 		ASSERT(type == ZIO_TYPE_WRITE);
1110 		ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
1111 		ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
1112 		ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
1113 		    pio->io_child_type == ZIO_CHILD_GANG);
1114 
1115 		flags &= ~ZIO_FLAG_IO_ALLOCATING;
1116 	}
1117 
1118 	zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
1119 	    done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
1120 	    ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
1121 	ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
1122 
1123 	zio->io_physdone = pio->io_physdone;
1124 	if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL)
1125 		zio->io_logical->io_phys_children++;
1126 
1127 	return (zio);
1128 }
1129 
1130 zio_t *
1131 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
1132     zio_type_t type, zio_priority_t priority, enum zio_flag flags,
1133     zio_done_func_t *done, void *private)
1134 {
1135 	zio_t *zio;
1136 
1137 	ASSERT(vd->vdev_ops->vdev_op_leaf);
1138 
1139 	zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
1140 	    data, size, size, done, private, type, priority,
1141 	    flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
1142 	    vd, offset, NULL,
1143 	    ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
1144 
1145 	return (zio);
1146 }
1147 
1148 void
1149 zio_flush(zio_t *zio, vdev_t *vd)
1150 {
1151 	zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE,
1152 	    NULL, NULL,
1153 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY));
1154 }
1155 
1156 void
1157 zio_shrink(zio_t *zio, uint64_t size)
1158 {
1159 	ASSERT3P(zio->io_executor, ==, NULL);
1160 	ASSERT3P(zio->io_orig_size, ==, zio->io_size);
1161 	ASSERT3U(size, <=, zio->io_size);
1162 
1163 	/*
1164 	 * We don't shrink for raidz because of problems with the
1165 	 * reconstruction when reading back less than the block size.
1166 	 * Note, BP_IS_RAIDZ() assumes no compression.
1167 	 */
1168 	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
1169 	if (!BP_IS_RAIDZ(zio->io_bp)) {
1170 		/* we are not doing a raw write */
1171 		ASSERT3U(zio->io_size, ==, zio->io_lsize);
1172 		zio->io_orig_size = zio->io_size = zio->io_lsize = size;
1173 	}
1174 }
1175 
1176 /*
1177  * ==========================================================================
1178  * Prepare to read and write logical blocks
1179  * ==========================================================================
1180  */
1181 
1182 static int
1183 zio_read_bp_init(zio_t *zio)
1184 {
1185 	blkptr_t *bp = zio->io_bp;
1186 
1187 	ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1188 
1189 	if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
1190 	    zio->io_child_type == ZIO_CHILD_LOGICAL &&
1191 	    !(zio->io_flags & ZIO_FLAG_RAW)) {
1192 		uint64_t psize =
1193 		    BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
1194 		zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1195 		    psize, psize, zio_decompress);
1196 	}
1197 
1198 	if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
1199 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1200 
1201 		int psize = BPE_GET_PSIZE(bp);
1202 		void *data = abd_borrow_buf(zio->io_abd, psize);
1203 		decode_embedded_bp_compressed(bp, data);
1204 		abd_return_buf_copy(zio->io_abd, data, psize);
1205 	} else {
1206 		ASSERT(!BP_IS_EMBEDDED(bp));
1207 		ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1208 	}
1209 
1210 	if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0)
1211 		zio->io_flags |= ZIO_FLAG_DONT_CACHE;
1212 
1213 	if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP)
1214 		zio->io_flags |= ZIO_FLAG_DONT_CACHE;
1215 
1216 	if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
1217 		zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
1218 
1219 	return (ZIO_PIPELINE_CONTINUE);
1220 }
1221 
1222 static int
1223 zio_write_bp_init(zio_t *zio)
1224 {
1225 	if (!IO_IS_ALLOCATING(zio))
1226 		return (ZIO_PIPELINE_CONTINUE);
1227 
1228 	ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1229 
1230 	if (zio->io_bp_override) {
1231 		blkptr_t *bp = zio->io_bp;
1232 		zio_prop_t *zp = &zio->io_prop;
1233 
1234 		ASSERT(bp->blk_birth != zio->io_txg);
1235 		ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0);
1236 
1237 		*bp = *zio->io_bp_override;
1238 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1239 
1240 		if (BP_IS_EMBEDDED(bp))
1241 			return (ZIO_PIPELINE_CONTINUE);
1242 
1243 		/*
1244 		 * If we've been overridden and nopwrite is set then
1245 		 * set the flag accordingly to indicate that a nopwrite
1246 		 * has already occurred.
1247 		 */
1248 		if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
1249 			ASSERT(!zp->zp_dedup);
1250 			ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
1251 			zio->io_flags |= ZIO_FLAG_NOPWRITE;
1252 			return (ZIO_PIPELINE_CONTINUE);
1253 		}
1254 
1255 		ASSERT(!zp->zp_nopwrite);
1256 
1257 		if (BP_IS_HOLE(bp) || !zp->zp_dedup)
1258 			return (ZIO_PIPELINE_CONTINUE);
1259 
1260 		ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
1261 		    ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
1262 
1263 		if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) {
1264 			BP_SET_DEDUP(bp, 1);
1265 			zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
1266 			return (ZIO_PIPELINE_CONTINUE);
1267 		}
1268 
1269 		/*
1270 		 * We were unable to handle this as an override bp, treat
1271 		 * it as a regular write I/O.
1272 		 */
1273 		zio->io_bp_override = NULL;
1274 		*bp = zio->io_bp_orig;
1275 		zio->io_pipeline = zio->io_orig_pipeline;
1276 	}
1277 
1278 	return (ZIO_PIPELINE_CONTINUE);
1279 }
1280 
1281 static int
1282 zio_write_compress(zio_t *zio)
1283 {
1284 	spa_t *spa = zio->io_spa;
1285 	zio_prop_t *zp = &zio->io_prop;
1286 	enum zio_compress compress = zp->zp_compress;
1287 	blkptr_t *bp = zio->io_bp;
1288 	uint64_t lsize = zio->io_lsize;
1289 	uint64_t psize = zio->io_size;
1290 	int pass = 1;
1291 
1292 	EQUIV(lsize != psize, (zio->io_flags & ZIO_FLAG_RAW) != 0);
1293 
1294 	/*
1295 	 * If our children haven't all reached the ready stage,
1296 	 * wait for them and then repeat this pipeline stage.
1297 	 */
1298 	if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
1299 	    ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
1300 		return (ZIO_PIPELINE_STOP);
1301 	}
1302 
1303 	if (!IO_IS_ALLOCATING(zio))
1304 		return (ZIO_PIPELINE_CONTINUE);
1305 
1306 	if (zio->io_children_ready != NULL) {
1307 		/*
1308 		 * Now that all our children are ready, run the callback
1309 		 * associated with this zio in case it wants to modify the
1310 		 * data to be written.
1311 		 */
1312 		ASSERT3U(zp->zp_level, >, 0);
1313 		zio->io_children_ready(zio);
1314 	}
1315 
1316 	ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1317 	ASSERT(zio->io_bp_override == NULL);
1318 
1319 	if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) {
1320 		/*
1321 		 * We're rewriting an existing block, which means we're
1322 		 * working on behalf of spa_sync().  For spa_sync() to
1323 		 * converge, it must eventually be the case that we don't
1324 		 * have to allocate new blocks.  But compression changes
1325 		 * the blocksize, which forces a reallocate, and makes
1326 		 * convergence take longer.  Therefore, after the first
1327 		 * few passes, stop compressing to ensure convergence.
1328 		 */
1329 		pass = spa_sync_pass(spa);
1330 
1331 		ASSERT(zio->io_txg == spa_syncing_txg(spa));
1332 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1333 		ASSERT(!BP_GET_DEDUP(bp));
1334 
1335 		if (pass >= zfs_sync_pass_dont_compress)
1336 			compress = ZIO_COMPRESS_OFF;
1337 
1338 		/* Make sure someone doesn't change their mind on overwrites */
1339 		ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp),
1340 		    spa_max_replication(spa)) == BP_GET_NDVAS(bp));
1341 	}
1342 
1343 	/* If it's a compressed write that is not raw, compress the buffer. */
1344 	if (compress != ZIO_COMPRESS_OFF && psize == lsize) {
1345 		void *cbuf = zio_buf_alloc(lsize);
1346 		psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize);
1347 		if (psize == 0 || psize == lsize) {
1348 			compress = ZIO_COMPRESS_OFF;
1349 			zio_buf_free(cbuf, lsize);
1350 		} else if (!zp->zp_dedup && psize <= BPE_PAYLOAD_SIZE &&
1351 		    zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
1352 		    spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
1353 			encode_embedded_bp_compressed(bp,
1354 			    cbuf, compress, lsize, psize);
1355 			BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
1356 			BP_SET_TYPE(bp, zio->io_prop.zp_type);
1357 			BP_SET_LEVEL(bp, zio->io_prop.zp_level);
1358 			zio_buf_free(cbuf, lsize);
1359 			bp->blk_birth = zio->io_txg;
1360 			zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1361 			ASSERT(spa_feature_is_active(spa,
1362 			    SPA_FEATURE_EMBEDDED_DATA));
1363 			return (ZIO_PIPELINE_CONTINUE);
1364 		} else {
1365 			/*
1366 			 * Round up compressed size up to the ashift
1367 			 * of the smallest-ashift device, and zero the tail.
1368 			 * This ensures that the compressed size of the BP
1369 			 * (and thus compressratio property) are correct,
1370 			 * in that we charge for the padding used to fill out
1371 			 * the last sector.
1372 			 */
1373 			ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
1374 			size_t rounded = (size_t)P2ROUNDUP(psize,
1375 			    1ULL << spa->spa_min_ashift);
1376 			if (rounded >= lsize) {
1377 				compress = ZIO_COMPRESS_OFF;
1378 				zio_buf_free(cbuf, lsize);
1379 				psize = lsize;
1380 			} else {
1381 				abd_t *cdata = abd_get_from_buf(cbuf, lsize);
1382 				abd_take_ownership_of_buf(cdata, B_TRUE);
1383 				abd_zero_off(cdata, psize, rounded - psize);
1384 				psize = rounded;
1385 				zio_push_transform(zio, cdata,
1386 				    psize, lsize, NULL);
1387 			}
1388 		}
1389 
1390 		/*
1391 		 * We were unable to handle this as an override bp, treat
1392 		 * it as a regular write I/O.
1393 		 */
1394 		zio->io_bp_override = NULL;
1395 		*bp = zio->io_bp_orig;
1396 		zio->io_pipeline = zio->io_orig_pipeline;
1397 	} else {
1398 		ASSERT3U(psize, !=, 0);
1399 	}
1400 
1401 	/*
1402 	 * The final pass of spa_sync() must be all rewrites, but the first
1403 	 * few passes offer a trade-off: allocating blocks defers convergence,
1404 	 * but newly allocated blocks are sequential, so they can be written
1405 	 * to disk faster.  Therefore, we allow the first few passes of
1406 	 * spa_sync() to allocate new blocks, but force rewrites after that.
1407 	 * There should only be a handful of blocks after pass 1 in any case.
1408 	 */
1409 	if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg &&
1410 	    BP_GET_PSIZE(bp) == psize &&
1411 	    pass >= zfs_sync_pass_rewrite) {
1412 		ASSERT(psize != 0);
1413 		enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
1414 		zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
1415 		zio->io_flags |= ZIO_FLAG_IO_REWRITE;
1416 	} else {
1417 		BP_ZERO(bp);
1418 		zio->io_pipeline = ZIO_WRITE_PIPELINE;
1419 	}
1420 
1421 	if (psize == 0) {
1422 		if (zio->io_bp_orig.blk_birth != 0 &&
1423 		    spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
1424 			BP_SET_LSIZE(bp, lsize);
1425 			BP_SET_TYPE(bp, zp->zp_type);
1426 			BP_SET_LEVEL(bp, zp->zp_level);
1427 			BP_SET_BIRTH(bp, zio->io_txg, 0);
1428 		}
1429 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1430 	} else {
1431 		ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
1432 		BP_SET_LSIZE(bp, lsize);
1433 		BP_SET_TYPE(bp, zp->zp_type);
1434 		BP_SET_LEVEL(bp, zp->zp_level);
1435 		BP_SET_PSIZE(bp, psize);
1436 		BP_SET_COMPRESS(bp, compress);
1437 		BP_SET_CHECKSUM(bp, zp->zp_checksum);
1438 		BP_SET_DEDUP(bp, zp->zp_dedup);
1439 		BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
1440 		if (zp->zp_dedup) {
1441 			ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1442 			ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1443 			zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
1444 		}
1445 		if (zp->zp_nopwrite) {
1446 			ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1447 			ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1448 			zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
1449 		}
1450 	}
1451 	return (ZIO_PIPELINE_CONTINUE);
1452 }
1453 
1454 static int
1455 zio_free_bp_init(zio_t *zio)
1456 {
1457 	blkptr_t *bp = zio->io_bp;
1458 
1459 	if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
1460 		if (BP_GET_DEDUP(bp))
1461 			zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
1462 	}
1463 
1464 	ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1465 
1466 	return (ZIO_PIPELINE_CONTINUE);
1467 }
1468 
1469 /*
1470  * ==========================================================================
1471  * Execute the I/O pipeline
1472  * ==========================================================================
1473  */
1474 
1475 static void
1476 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
1477 {
1478 	spa_t *spa = zio->io_spa;
1479 	zio_type_t t = zio->io_type;
1480 	int flags = (cutinline ? TQ_FRONT : 0);
1481 
1482 	/*
1483 	 * If we're a config writer or a probe, the normal issue and
1484 	 * interrupt threads may all be blocked waiting for the config lock.
1485 	 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
1486 	 */
1487 	if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
1488 		t = ZIO_TYPE_NULL;
1489 
1490 	/*
1491 	 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
1492 	 */
1493 	if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
1494 		t = ZIO_TYPE_NULL;
1495 
1496 	/*
1497 	 * If this is a high priority I/O, then use the high priority taskq if
1498 	 * available.
1499 	 */
1500 	if ((zio->io_priority == ZIO_PRIORITY_NOW ||
1501 	    zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) &&
1502 	    spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
1503 		q++;
1504 
1505 	ASSERT3U(q, <, ZIO_TASKQ_TYPES);
1506 
1507 	/*
1508 	 * NB: We are assuming that the zio can only be dispatched
1509 	 * to a single taskq at a time.  It would be a grievous error
1510 	 * to dispatch the zio to another taskq at the same time.
1511 	 */
1512 	ASSERT(zio->io_tqent.tqent_next == NULL);
1513 	spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio,
1514 	    flags, &zio->io_tqent);
1515 }
1516 
1517 static boolean_t
1518 zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
1519 {
1520 	kthread_t *executor = zio->io_executor;
1521 	spa_t *spa = zio->io_spa;
1522 
1523 	for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
1524 		spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1525 		uint_t i;
1526 		for (i = 0; i < tqs->stqs_count; i++) {
1527 			if (taskq_member(tqs->stqs_taskq[i], executor))
1528 				return (B_TRUE);
1529 		}
1530 	}
1531 
1532 	return (B_FALSE);
1533 }
1534 
1535 static int
1536 zio_issue_async(zio_t *zio)
1537 {
1538 	zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
1539 
1540 	return (ZIO_PIPELINE_STOP);
1541 }
1542 
1543 void
1544 zio_interrupt(zio_t *zio)
1545 {
1546 	zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
1547 }
1548 
1549 void
1550 zio_delay_interrupt(zio_t *zio)
1551 {
1552 	/*
1553 	 * The timeout_generic() function isn't defined in userspace, so
1554 	 * rather than trying to implement the function, the zio delay
1555 	 * functionality has been disabled for userspace builds.
1556 	 */
1557 
1558 #ifdef _KERNEL
1559 	/*
1560 	 * If io_target_timestamp is zero, then no delay has been registered
1561 	 * for this IO, thus jump to the end of this function and "skip" the
1562 	 * delay; issuing it directly to the zio layer.
1563 	 */
1564 	if (zio->io_target_timestamp != 0) {
1565 		hrtime_t now = gethrtime();
1566 
1567 		if (now >= zio->io_target_timestamp) {
1568 			/*
1569 			 * This IO has already taken longer than the target
1570 			 * delay to complete, so we don't want to delay it
1571 			 * any longer; we "miss" the delay and issue it
1572 			 * directly to the zio layer. This is likely due to
1573 			 * the target latency being set to a value less than
1574 			 * the underlying hardware can satisfy (e.g. delay
1575 			 * set to 1ms, but the disks take 10ms to complete an
1576 			 * IO request).
1577 			 */
1578 
1579 			DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
1580 			    hrtime_t, now);
1581 
1582 			zio_interrupt(zio);
1583 		} else {
1584 			hrtime_t diff = zio->io_target_timestamp - now;
1585 
1586 			DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
1587 			    hrtime_t, now, hrtime_t, diff);
1588 
1589 			(void) timeout_generic(CALLOUT_NORMAL,
1590 			    (void (*)(void *))zio_interrupt, zio, diff, 1, 0);
1591 		}
1592 
1593 		return;
1594 	}
1595 #endif
1596 
1597 	DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
1598 	zio_interrupt(zio);
1599 }
1600 
1601 /*
1602  * Execute the I/O pipeline until one of the following occurs:
1603  *
1604  *	(1) the I/O completes
1605  *	(2) the pipeline stalls waiting for dependent child I/Os
1606  *	(3) the I/O issues, so we're waiting for an I/O completion interrupt
1607  *	(4) the I/O is delegated by vdev-level caching or aggregation
1608  *	(5) the I/O is deferred due to vdev-level queueing
1609  *	(6) the I/O is handed off to another thread.
1610  *
1611  * In all cases, the pipeline stops whenever there's no CPU work; it never
1612  * burns a thread in cv_wait().
1613  *
1614  * There's no locking on io_stage because there's no legitimate way
1615  * for multiple threads to be attempting to process the same I/O.
1616  */
1617 static zio_pipe_stage_t *zio_pipeline[];
1618 
1619 void
1620 zio_execute(zio_t *zio)
1621 {
1622 	zio->io_executor = curthread;
1623 
1624 	ASSERT3U(zio->io_queued_timestamp, >, 0);
1625 
1626 	while (zio->io_stage < ZIO_STAGE_DONE) {
1627 		enum zio_stage pipeline = zio->io_pipeline;
1628 		enum zio_stage stage = zio->io_stage;
1629 		int rv;
1630 
1631 		ASSERT(!MUTEX_HELD(&zio->io_lock));
1632 		ASSERT(ISP2(stage));
1633 		ASSERT(zio->io_stall == NULL);
1634 
1635 		do {
1636 			stage <<= 1;
1637 		} while ((stage & pipeline) == 0);
1638 
1639 		ASSERT(stage <= ZIO_STAGE_DONE);
1640 
1641 		/*
1642 		 * If we are in interrupt context and this pipeline stage
1643 		 * will grab a config lock that is held across I/O,
1644 		 * or may wait for an I/O that needs an interrupt thread
1645 		 * to complete, issue async to avoid deadlock.
1646 		 *
1647 		 * For VDEV_IO_START, we cut in line so that the io will
1648 		 * be sent to disk promptly.
1649 		 */
1650 		if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
1651 		    zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
1652 			boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
1653 			    zio_requeue_io_start_cut_in_line : B_FALSE;
1654 			zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
1655 			return;
1656 		}
1657 
1658 		zio->io_stage = stage;
1659 		zio->io_pipeline_trace |= zio->io_stage;
1660 		rv = zio_pipeline[highbit64(stage) - 1](zio);
1661 
1662 		if (rv == ZIO_PIPELINE_STOP)
1663 			return;
1664 
1665 		ASSERT(rv == ZIO_PIPELINE_CONTINUE);
1666 	}
1667 }
1668 
1669 /*
1670  * ==========================================================================
1671  * Initiate I/O, either sync or async
1672  * ==========================================================================
1673  */
1674 int
1675 zio_wait(zio_t *zio)
1676 {
1677 	int error;
1678 
1679 	ASSERT3P(zio->io_stage, ==, ZIO_STAGE_OPEN);
1680 	ASSERT3P(zio->io_executor, ==, NULL);
1681 
1682 	zio->io_waiter = curthread;
1683 	ASSERT0(zio->io_queued_timestamp);
1684 	zio->io_queued_timestamp = gethrtime();
1685 
1686 	zio_execute(zio);
1687 
1688 	mutex_enter(&zio->io_lock);
1689 	while (zio->io_executor != NULL)
1690 		cv_wait(&zio->io_cv, &zio->io_lock);
1691 	mutex_exit(&zio->io_lock);
1692 
1693 	error = zio->io_error;
1694 	zio_destroy(zio);
1695 
1696 	return (error);
1697 }
1698 
1699 void
1700 zio_nowait(zio_t *zio)
1701 {
1702 	ASSERT3P(zio->io_executor, ==, NULL);
1703 
1704 	if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
1705 	    zio_unique_parent(zio) == NULL) {
1706 		/*
1707 		 * This is a logical async I/O with no parent to wait for it.
1708 		 * We add it to the spa_async_root_zio "Godfather" I/O which
1709 		 * will ensure they complete prior to unloading the pool.
1710 		 */
1711 		spa_t *spa = zio->io_spa;
1712 
1713 		zio_add_child(spa->spa_async_zio_root[CPU_SEQID], zio);
1714 	}
1715 
1716 	ASSERT0(zio->io_queued_timestamp);
1717 	zio->io_queued_timestamp = gethrtime();
1718 	zio_execute(zio);
1719 }
1720 
1721 /*
1722  * ==========================================================================
1723  * Reexecute, cancel, or suspend/resume failed I/O
1724  * ==========================================================================
1725  */
1726 
1727 static void
1728 zio_reexecute(zio_t *pio)
1729 {
1730 	zio_t *cio, *cio_next;
1731 
1732 	ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
1733 	ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
1734 	ASSERT(pio->io_gang_leader == NULL);
1735 	ASSERT(pio->io_gang_tree == NULL);
1736 
1737 	pio->io_flags = pio->io_orig_flags;
1738 	pio->io_stage = pio->io_orig_stage;
1739 	pio->io_pipeline = pio->io_orig_pipeline;
1740 	pio->io_reexecute = 0;
1741 	pio->io_flags |= ZIO_FLAG_REEXECUTED;
1742 	pio->io_pipeline_trace = 0;
1743 	pio->io_error = 0;
1744 	for (int w = 0; w < ZIO_WAIT_TYPES; w++)
1745 		pio->io_state[w] = 0;
1746 	for (int c = 0; c < ZIO_CHILD_TYPES; c++)
1747 		pio->io_child_error[c] = 0;
1748 
1749 	if (IO_IS_ALLOCATING(pio))
1750 		BP_ZERO(pio->io_bp);
1751 
1752 	/*
1753 	 * As we reexecute pio's children, new children could be created.
1754 	 * New children go to the head of pio's io_child_list, however,
1755 	 * so we will (correctly) not reexecute them.  The key is that
1756 	 * the remainder of pio's io_child_list, from 'cio_next' onward,
1757 	 * cannot be affected by any side effects of reexecuting 'cio'.
1758 	 */
1759 	zio_link_t *zl = NULL;
1760 	for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
1761 		cio_next = zio_walk_children(pio, &zl);
1762 		mutex_enter(&pio->io_lock);
1763 		for (int w = 0; w < ZIO_WAIT_TYPES; w++)
1764 			pio->io_children[cio->io_child_type][w]++;
1765 		mutex_exit(&pio->io_lock);
1766 		zio_reexecute(cio);
1767 	}
1768 
1769 	/*
1770 	 * Now that all children have been reexecuted, execute the parent.
1771 	 * We don't reexecute "The Godfather" I/O here as it's the
1772 	 * responsibility of the caller to wait on it.
1773 	 */
1774 	if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
1775 		pio->io_queued_timestamp = gethrtime();
1776 		zio_execute(pio);
1777 	}
1778 }
1779 
1780 void
1781 zio_suspend(spa_t *spa, zio_t *zio)
1782 {
1783 	if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
1784 		fm_panic("Pool '%s' has encountered an uncorrectable I/O "
1785 		    "failure and the failure mode property for this pool "
1786 		    "is set to panic.", spa_name(spa));
1787 
1788 	zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0);
1789 
1790 	mutex_enter(&spa->spa_suspend_lock);
1791 
1792 	if (spa->spa_suspend_zio_root == NULL)
1793 		spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
1794 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
1795 		    ZIO_FLAG_GODFATHER);
1796 
1797 	spa->spa_suspended = B_TRUE;
1798 
1799 	if (zio != NULL) {
1800 		ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
1801 		ASSERT(zio != spa->spa_suspend_zio_root);
1802 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1803 		ASSERT(zio_unique_parent(zio) == NULL);
1804 		ASSERT(zio->io_stage == ZIO_STAGE_DONE);
1805 		zio_add_child(spa->spa_suspend_zio_root, zio);
1806 	}
1807 
1808 	mutex_exit(&spa->spa_suspend_lock);
1809 }
1810 
1811 int
1812 zio_resume(spa_t *spa)
1813 {
1814 	zio_t *pio;
1815 
1816 	/*
1817 	 * Reexecute all previously suspended i/o.
1818 	 */
1819 	mutex_enter(&spa->spa_suspend_lock);
1820 	spa->spa_suspended = B_FALSE;
1821 	cv_broadcast(&spa->spa_suspend_cv);
1822 	pio = spa->spa_suspend_zio_root;
1823 	spa->spa_suspend_zio_root = NULL;
1824 	mutex_exit(&spa->spa_suspend_lock);
1825 
1826 	if (pio == NULL)
1827 		return (0);
1828 
1829 	zio_reexecute(pio);
1830 	return (zio_wait(pio));
1831 }
1832 
1833 void
1834 zio_resume_wait(spa_t *spa)
1835 {
1836 	mutex_enter(&spa->spa_suspend_lock);
1837 	while (spa_suspended(spa))
1838 		cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
1839 	mutex_exit(&spa->spa_suspend_lock);
1840 }
1841 
1842 /*
1843  * ==========================================================================
1844  * Gang blocks.
1845  *
1846  * A gang block is a collection of small blocks that looks to the DMU
1847  * like one large block.  When zio_dva_allocate() cannot find a block
1848  * of the requested size, due to either severe fragmentation or the pool
1849  * being nearly full, it calls zio_write_gang_block() to construct the
1850  * block from smaller fragments.
1851  *
1852  * A gang block consists of a gang header (zio_gbh_phys_t) and up to
1853  * three (SPA_GBH_NBLKPTRS) gang members.  The gang header is just like
1854  * an indirect block: it's an array of block pointers.  It consumes
1855  * only one sector and hence is allocatable regardless of fragmentation.
1856  * The gang header's bps point to its gang members, which hold the data.
1857  *
1858  * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
1859  * as the verifier to ensure uniqueness of the SHA256 checksum.
1860  * Critically, the gang block bp's blk_cksum is the checksum of the data,
1861  * not the gang header.  This ensures that data block signatures (needed for
1862  * deduplication) are independent of how the block is physically stored.
1863  *
1864  * Gang blocks can be nested: a gang member may itself be a gang block.
1865  * Thus every gang block is a tree in which root and all interior nodes are
1866  * gang headers, and the leaves are normal blocks that contain user data.
1867  * The root of the gang tree is called the gang leader.
1868  *
1869  * To perform any operation (read, rewrite, free, claim) on a gang block,
1870  * zio_gang_assemble() first assembles the gang tree (minus data leaves)
1871  * in the io_gang_tree field of the original logical i/o by recursively
1872  * reading the gang leader and all gang headers below it.  This yields
1873  * an in-core tree containing the contents of every gang header and the
1874  * bps for every constituent of the gang block.
1875  *
1876  * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
1877  * and invokes a callback on each bp.  To free a gang block, zio_gang_issue()
1878  * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
1879  * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
1880  * zio_read_gang() is a wrapper around zio_read() that omits reading gang
1881  * headers, since we already have those in io_gang_tree.  zio_rewrite_gang()
1882  * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
1883  * of the gang header plus zio_checksum_compute() of the data to update the
1884  * gang header's blk_cksum as described above.
1885  *
1886  * The two-phase assemble/issue model solves the problem of partial failure --
1887  * what if you'd freed part of a gang block but then couldn't read the
1888  * gang header for another part?  Assembling the entire gang tree first
1889  * ensures that all the necessary gang header I/O has succeeded before
1890  * starting the actual work of free, claim, or write.  Once the gang tree
1891  * is assembled, free and claim are in-memory operations that cannot fail.
1892  *
1893  * In the event that a gang write fails, zio_dva_unallocate() walks the
1894  * gang tree to immediately free (i.e. insert back into the space map)
1895  * everything we've allocated.  This ensures that we don't get ENOSPC
1896  * errors during repeated suspend/resume cycles due to a flaky device.
1897  *
1898  * Gang rewrites only happen during sync-to-convergence.  If we can't assemble
1899  * the gang tree, we won't modify the block, so we can safely defer the free
1900  * (knowing that the block is still intact).  If we *can* assemble the gang
1901  * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
1902  * each constituent bp and we can allocate a new block on the next sync pass.
1903  *
1904  * In all cases, the gang tree allows complete recovery from partial failure.
1905  * ==========================================================================
1906  */
1907 
1908 static void
1909 zio_gang_issue_func_done(zio_t *zio)
1910 {
1911 	abd_put(zio->io_abd);
1912 }
1913 
1914 static zio_t *
1915 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
1916     uint64_t offset)
1917 {
1918 	if (gn != NULL)
1919 		return (pio);
1920 
1921 	return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
1922 	    BP_GET_PSIZE(bp), zio_gang_issue_func_done,
1923 	    NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
1924 	    &pio->io_bookmark));
1925 }
1926 
1927 static zio_t *
1928 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
1929     uint64_t offset)
1930 {
1931 	zio_t *zio;
1932 
1933 	if (gn != NULL) {
1934 		abd_t *gbh_abd =
1935 		    abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
1936 		zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
1937 		    gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
1938 		    pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
1939 		    &pio->io_bookmark);
1940 		/*
1941 		 * As we rewrite each gang header, the pipeline will compute
1942 		 * a new gang block header checksum for it; but no one will
1943 		 * compute a new data checksum, so we do that here.  The one
1944 		 * exception is the gang leader: the pipeline already computed
1945 		 * its data checksum because that stage precedes gang assembly.
1946 		 * (Presently, nothing actually uses interior data checksums;
1947 		 * this is just good hygiene.)
1948 		 */
1949 		if (gn != pio->io_gang_leader->io_gang_tree) {
1950 			abd_t *buf = abd_get_offset(data, offset);
1951 
1952 			zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
1953 			    buf, BP_GET_PSIZE(bp));
1954 
1955 			abd_put(buf);
1956 		}
1957 		/*
1958 		 * If we are here to damage data for testing purposes,
1959 		 * leave the GBH alone so that we can detect the damage.
1960 		 */
1961 		if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
1962 			zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
1963 	} else {
1964 		zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
1965 		    abd_get_offset(data, offset), BP_GET_PSIZE(bp),
1966 		    zio_gang_issue_func_done, NULL, pio->io_priority,
1967 		    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
1968 	}
1969 
1970 	return (zio);
1971 }
1972 
1973 /* ARGSUSED */
1974 static zio_t *
1975 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
1976     uint64_t offset)
1977 {
1978 	return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
1979 	    ZIO_GANG_CHILD_FLAGS(pio)));
1980 }
1981 
1982 /* ARGSUSED */
1983 static zio_t *
1984 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
1985     uint64_t offset)
1986 {
1987 	return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
1988 	    NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
1989 }
1990 
1991 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
1992 	NULL,
1993 	zio_read_gang,
1994 	zio_rewrite_gang,
1995 	zio_free_gang,
1996 	zio_claim_gang,
1997 	NULL
1998 };
1999 
2000 static void zio_gang_tree_assemble_done(zio_t *zio);
2001 
2002 static zio_gang_node_t *
2003 zio_gang_node_alloc(zio_gang_node_t **gnpp)
2004 {
2005 	zio_gang_node_t *gn;
2006 
2007 	ASSERT(*gnpp == NULL);
2008 
2009 	gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
2010 	gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
2011 	*gnpp = gn;
2012 
2013 	return (gn);
2014 }
2015 
2016 static void
2017 zio_gang_node_free(zio_gang_node_t **gnpp)
2018 {
2019 	zio_gang_node_t *gn = *gnpp;
2020 
2021 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2022 		ASSERT(gn->gn_child[g] == NULL);
2023 
2024 	zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2025 	kmem_free(gn, sizeof (*gn));
2026 	*gnpp = NULL;
2027 }
2028 
2029 static void
2030 zio_gang_tree_free(zio_gang_node_t **gnpp)
2031 {
2032 	zio_gang_node_t *gn = *gnpp;
2033 
2034 	if (gn == NULL)
2035 		return;
2036 
2037 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2038 		zio_gang_tree_free(&gn->gn_child[g]);
2039 
2040 	zio_gang_node_free(gnpp);
2041 }
2042 
2043 static void
2044 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
2045 {
2046 	zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
2047 	abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2048 
2049 	ASSERT(gio->io_gang_leader == gio);
2050 	ASSERT(BP_IS_GANG(bp));
2051 
2052 	zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
2053 	    zio_gang_tree_assemble_done, gn, gio->io_priority,
2054 	    ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
2055 }
2056 
2057 static void
2058 zio_gang_tree_assemble_done(zio_t *zio)
2059 {
2060 	zio_t *gio = zio->io_gang_leader;
2061 	zio_gang_node_t *gn = zio->io_private;
2062 	blkptr_t *bp = zio->io_bp;
2063 
2064 	ASSERT(gio == zio_unique_parent(zio));
2065 	ASSERT(zio->io_child_count == 0);
2066 
2067 	if (zio->io_error)
2068 		return;
2069 
2070 	/* this ABD was created from a linear buf in zio_gang_tree_assemble */
2071 	if (BP_SHOULD_BYTESWAP(bp))
2072 		byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
2073 
2074 	ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
2075 	ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
2076 	ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2077 
2078 	abd_put(zio->io_abd);
2079 
2080 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2081 		blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2082 		if (!BP_IS_GANG(gbp))
2083 			continue;
2084 		zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
2085 	}
2086 }
2087 
2088 static void
2089 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
2090     uint64_t offset)
2091 {
2092 	zio_t *gio = pio->io_gang_leader;
2093 	zio_t *zio;
2094 
2095 	ASSERT(BP_IS_GANG(bp) == !!gn);
2096 	ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
2097 	ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
2098 
2099 	/*
2100 	 * If you're a gang header, your data is in gn->gn_gbh.
2101 	 * If you're a gang member, your data is in 'data' and gn == NULL.
2102 	 */
2103 	zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
2104 
2105 	if (gn != NULL) {
2106 		ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2107 
2108 		for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2109 			blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2110 			if (BP_IS_HOLE(gbp))
2111 				continue;
2112 			zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
2113 			    offset);
2114 			offset += BP_GET_PSIZE(gbp);
2115 		}
2116 	}
2117 
2118 	if (gn == gio->io_gang_tree)
2119 		ASSERT3U(gio->io_size, ==, offset);
2120 
2121 	if (zio != pio)
2122 		zio_nowait(zio);
2123 }
2124 
2125 static int
2126 zio_gang_assemble(zio_t *zio)
2127 {
2128 	blkptr_t *bp = zio->io_bp;
2129 
2130 	ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
2131 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2132 
2133 	zio->io_gang_leader = zio;
2134 
2135 	zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
2136 
2137 	return (ZIO_PIPELINE_CONTINUE);
2138 }
2139 
2140 static int
2141 zio_gang_issue(zio_t *zio)
2142 {
2143 	blkptr_t *bp = zio->io_bp;
2144 
2145 	if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
2146 		return (ZIO_PIPELINE_STOP);
2147 	}
2148 
2149 	ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
2150 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2151 
2152 	if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
2153 		zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
2154 		    0);
2155 	else
2156 		zio_gang_tree_free(&zio->io_gang_tree);
2157 
2158 	zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2159 
2160 	return (ZIO_PIPELINE_CONTINUE);
2161 }
2162 
2163 static void
2164 zio_write_gang_member_ready(zio_t *zio)
2165 {
2166 	zio_t *pio = zio_unique_parent(zio);
2167 	zio_t *gio = zio->io_gang_leader;
2168 	dva_t *cdva = zio->io_bp->blk_dva;
2169 	dva_t *pdva = pio->io_bp->blk_dva;
2170 	uint64_t asize;
2171 
2172 	if (BP_IS_HOLE(zio->io_bp))
2173 		return;
2174 
2175 	ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
2176 
2177 	ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
2178 	ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
2179 	ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
2180 	ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
2181 	ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
2182 
2183 	mutex_enter(&pio->io_lock);
2184 	for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
2185 		ASSERT(DVA_GET_GANG(&pdva[d]));
2186 		asize = DVA_GET_ASIZE(&pdva[d]);
2187 		asize += DVA_GET_ASIZE(&cdva[d]);
2188 		DVA_SET_ASIZE(&pdva[d], asize);
2189 	}
2190 	mutex_exit(&pio->io_lock);
2191 }
2192 
2193 static void
2194 zio_write_gang_done(zio_t *zio)
2195 {
2196 	/*
2197 	 * The io_abd field will be NULL for a zio with no data.  The io_flags
2198 	 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
2199 	 * check for it here as it is cleared in zio_ready.
2200 	 */
2201 	if (zio->io_abd != NULL)
2202 		abd_put(zio->io_abd);
2203 }
2204 
2205 static int
2206 zio_write_gang_block(zio_t *pio)
2207 {
2208 	spa_t *spa = pio->io_spa;
2209 	metaslab_class_t *mc = spa_normal_class(spa);
2210 	blkptr_t *bp = pio->io_bp;
2211 	zio_t *gio = pio->io_gang_leader;
2212 	zio_t *zio;
2213 	zio_gang_node_t *gn, **gnpp;
2214 	zio_gbh_phys_t *gbh;
2215 	abd_t *gbh_abd;
2216 	uint64_t txg = pio->io_txg;
2217 	uint64_t resid = pio->io_size;
2218 	uint64_t lsize;
2219 	int copies = gio->io_prop.zp_copies;
2220 	int gbh_copies = MIN(copies + 1, spa_max_replication(spa));
2221 	zio_prop_t zp;
2222 	int error;
2223 	boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
2224 
2225 	int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
2226 	if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
2227 		ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
2228 		ASSERT(has_data);
2229 
2230 		flags |= METASLAB_ASYNC_ALLOC;
2231 		VERIFY(zfs_refcount_held(&mc->mc_alloc_slots[pio->io_allocator],
2232 		    pio));
2233 
2234 		/*
2235 		 * The logical zio has already placed a reservation for
2236 		 * 'copies' allocation slots but gang blocks may require
2237 		 * additional copies. These additional copies
2238 		 * (i.e. gbh_copies - copies) are guaranteed to succeed
2239 		 * since metaslab_class_throttle_reserve() always allows
2240 		 * additional reservations for gang blocks.
2241 		 */
2242 		VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
2243 		    pio->io_allocator, pio, flags));
2244 	}
2245 
2246 	error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
2247 	    bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
2248 	    &pio->io_alloc_list, pio, pio->io_allocator);
2249 	if (error) {
2250 		if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
2251 			ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
2252 			ASSERT(has_data);
2253 
2254 			/*
2255 			 * If we failed to allocate the gang block header then
2256 			 * we remove any additional allocation reservations that
2257 			 * we placed here. The original reservation will
2258 			 * be removed when the logical I/O goes to the ready
2259 			 * stage.
2260 			 */
2261 			metaslab_class_throttle_unreserve(mc,
2262 			    gbh_copies - copies, pio->io_allocator, pio);
2263 		}
2264 		pio->io_error = error;
2265 		return (ZIO_PIPELINE_CONTINUE);
2266 	}
2267 
2268 	if (pio == gio) {
2269 		gnpp = &gio->io_gang_tree;
2270 	} else {
2271 		gnpp = pio->io_private;
2272 		ASSERT(pio->io_ready == zio_write_gang_member_ready);
2273 	}
2274 
2275 	gn = zio_gang_node_alloc(gnpp);
2276 	gbh = gn->gn_gbh;
2277 	bzero(gbh, SPA_GANGBLOCKSIZE);
2278 	gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
2279 
2280 	/*
2281 	 * Create the gang header.
2282 	 */
2283 	zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
2284 	    zio_write_gang_done, NULL, pio->io_priority,
2285 	    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2286 
2287 	/*
2288 	 * Create and nowait the gang children.
2289 	 */
2290 	for (int g = 0; resid != 0; resid -= lsize, g++) {
2291 		lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
2292 		    SPA_MINBLOCKSIZE);
2293 		ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
2294 
2295 		zp.zp_checksum = gio->io_prop.zp_checksum;
2296 		zp.zp_compress = ZIO_COMPRESS_OFF;
2297 		zp.zp_type = DMU_OT_NONE;
2298 		zp.zp_level = 0;
2299 		zp.zp_copies = gio->io_prop.zp_copies;
2300 		zp.zp_dedup = B_FALSE;
2301 		zp.zp_dedup_verify = B_FALSE;
2302 		zp.zp_nopwrite = B_FALSE;
2303 
2304 		zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
2305 		    has_data ? abd_get_offset(pio->io_abd, pio->io_size -
2306 		    resid) : NULL, lsize, lsize, &zp,
2307 		    zio_write_gang_member_ready, NULL, NULL,
2308 		    zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
2309 		    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2310 
2311 		if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
2312 			ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
2313 			ASSERT(has_data);
2314 
2315 			/*
2316 			 * Gang children won't throttle but we should
2317 			 * account for their work, so reserve an allocation
2318 			 * slot for them here.
2319 			 */
2320 			VERIFY(metaslab_class_throttle_reserve(mc,
2321 			    zp.zp_copies, cio->io_allocator, cio, flags));
2322 		}
2323 		zio_nowait(cio);
2324 	}
2325 
2326 	/*
2327 	 * Set pio's pipeline to just wait for zio to finish.
2328 	 */
2329 	pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2330 
2331 	zio_nowait(zio);
2332 
2333 	return (ZIO_PIPELINE_CONTINUE);
2334 }
2335 
2336 /*
2337  * The zio_nop_write stage in the pipeline determines if allocating a
2338  * new bp is necessary.  The nopwrite feature can handle writes in
2339  * either syncing or open context (i.e. zil writes) and as a result is
2340  * mutually exclusive with dedup.
2341  *
2342  * By leveraging a cryptographically secure checksum, such as SHA256, we
2343  * can compare the checksums of the new data and the old to determine if
2344  * allocating a new block is required.  Note that our requirements for
2345  * cryptographic strength are fairly weak: there can't be any accidental
2346  * hash collisions, but we don't need to be secure against intentional
2347  * (malicious) collisions.  To trigger a nopwrite, you have to be able
2348  * to write the file to begin with, and triggering an incorrect (hash
2349  * collision) nopwrite is no worse than simply writing to the file.
2350  * That said, there are no known attacks against the checksum algorithms
2351  * used for nopwrite, assuming that the salt and the checksums
2352  * themselves remain secret.
2353  */
2354 static int
2355 zio_nop_write(zio_t *zio)
2356 {
2357 	blkptr_t *bp = zio->io_bp;
2358 	blkptr_t *bp_orig = &zio->io_bp_orig;
2359 	zio_prop_t *zp = &zio->io_prop;
2360 
2361 	ASSERT(BP_GET_LEVEL(bp) == 0);
2362 	ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
2363 	ASSERT(zp->zp_nopwrite);
2364 	ASSERT(!zp->zp_dedup);
2365 	ASSERT(zio->io_bp_override == NULL);
2366 	ASSERT(IO_IS_ALLOCATING(zio));
2367 
2368 	/*
2369 	 * Check to see if the original bp and the new bp have matching
2370 	 * characteristics (i.e. same checksum, compression algorithms, etc).
2371 	 * If they don't then just continue with the pipeline which will
2372 	 * allocate a new bp.
2373 	 */
2374 	if (BP_IS_HOLE(bp_orig) ||
2375 	    !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
2376 	    ZCHECKSUM_FLAG_NOPWRITE) ||
2377 	    BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
2378 	    BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
2379 	    BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
2380 	    zp->zp_copies != BP_GET_NDVAS(bp_orig))
2381 		return (ZIO_PIPELINE_CONTINUE);
2382 
2383 	/*
2384 	 * If the checksums match then reset the pipeline so that we
2385 	 * avoid allocating a new bp and issuing any I/O.
2386 	 */
2387 	if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
2388 		ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
2389 		    ZCHECKSUM_FLAG_NOPWRITE);
2390 		ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
2391 		ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
2392 		ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
2393 		ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop,
2394 		    sizeof (uint64_t)) == 0);
2395 
2396 		*bp = *bp_orig;
2397 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2398 		zio->io_flags |= ZIO_FLAG_NOPWRITE;
2399 	}
2400 
2401 	return (ZIO_PIPELINE_CONTINUE);
2402 }
2403 
2404 /*
2405  * ==========================================================================
2406  * Dedup
2407  * ==========================================================================
2408  */
2409 static void
2410 zio_ddt_child_read_done(zio_t *zio)
2411 {
2412 	blkptr_t *bp = zio->io_bp;
2413 	ddt_entry_t *dde = zio->io_private;
2414 	ddt_phys_t *ddp;
2415 	zio_t *pio = zio_unique_parent(zio);
2416 
2417 	mutex_enter(&pio->io_lock);
2418 	ddp = ddt_phys_select(dde, bp);
2419 	if (zio->io_error == 0)
2420 		ddt_phys_clear(ddp);	/* this ddp doesn't need repair */
2421 
2422 	if (zio->io_error == 0 && dde->dde_repair_abd == NULL)
2423 		dde->dde_repair_abd = zio->io_abd;
2424 	else
2425 		abd_free(zio->io_abd);
2426 	mutex_exit(&pio->io_lock);
2427 }
2428 
2429 static int
2430 zio_ddt_read_start(zio_t *zio)
2431 {
2432 	blkptr_t *bp = zio->io_bp;
2433 
2434 	ASSERT(BP_GET_DEDUP(bp));
2435 	ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
2436 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2437 
2438 	if (zio->io_child_error[ZIO_CHILD_DDT]) {
2439 		ddt_t *ddt = ddt_select(zio->io_spa, bp);
2440 		ddt_entry_t *dde = ddt_repair_start(ddt, bp);
2441 		ddt_phys_t *ddp = dde->dde_phys;
2442 		ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
2443 		blkptr_t blk;
2444 
2445 		ASSERT(zio->io_vsd == NULL);
2446 		zio->io_vsd = dde;
2447 
2448 		if (ddp_self == NULL)
2449 			return (ZIO_PIPELINE_CONTINUE);
2450 
2451 		for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
2452 			if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
2453 				continue;
2454 			ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
2455 			    &blk);
2456 			zio_nowait(zio_read(zio, zio->io_spa, &blk,
2457 			    abd_alloc_for_io(zio->io_size, B_TRUE),
2458 			    zio->io_size, zio_ddt_child_read_done, dde,
2459 			    zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
2460 			    ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
2461 		}
2462 		return (ZIO_PIPELINE_CONTINUE);
2463 	}
2464 
2465 	zio_nowait(zio_read(zio, zio->io_spa, bp,
2466 	    zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
2467 	    ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
2468 
2469 	return (ZIO_PIPELINE_CONTINUE);
2470 }
2471 
2472 static int
2473 zio_ddt_read_done(zio_t *zio)
2474 {
2475 	blkptr_t *bp = zio->io_bp;
2476 
2477 	if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
2478 		return (ZIO_PIPELINE_STOP);
2479 	}
2480 
2481 	ASSERT(BP_GET_DEDUP(bp));
2482 	ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
2483 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2484 
2485 	if (zio->io_child_error[ZIO_CHILD_DDT]) {
2486 		ddt_t *ddt = ddt_select(zio->io_spa, bp);
2487 		ddt_entry_t *dde = zio->io_vsd;
2488 		if (ddt == NULL) {
2489 			ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
2490 			return (ZIO_PIPELINE_CONTINUE);
2491 		}
2492 		if (dde == NULL) {
2493 			zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
2494 			zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
2495 			return (ZIO_PIPELINE_STOP);
2496 		}
2497 		if (dde->dde_repair_abd != NULL) {
2498 			abd_copy(zio->io_abd, dde->dde_repair_abd,
2499 			    zio->io_size);
2500 			zio->io_child_error[ZIO_CHILD_DDT] = 0;
2501 		}
2502 		ddt_repair_done(ddt, dde);
2503 		zio->io_vsd = NULL;
2504 	}
2505 
2506 	ASSERT(zio->io_vsd == NULL);
2507 
2508 	return (ZIO_PIPELINE_CONTINUE);
2509 }
2510 
2511 static boolean_t
2512 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
2513 {
2514 	spa_t *spa = zio->io_spa;
2515 	boolean_t do_raw = (zio->io_flags & ZIO_FLAG_RAW);
2516 
2517 	/* We should never get a raw, override zio */
2518 	ASSERT(!(zio->io_bp_override && do_raw));
2519 
2520 	/*
2521 	 * Note: we compare the original data, not the transformed data,
2522 	 * because when zio->io_bp is an override bp, we will not have
2523 	 * pushed the I/O transforms.  That's an important optimization
2524 	 * because otherwise we'd compress/encrypt all dmu_sync() data twice.
2525 	 */
2526 	for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
2527 		zio_t *lio = dde->dde_lead_zio[p];
2528 
2529 		if (lio != NULL) {
2530 			return (lio->io_orig_size != zio->io_orig_size ||
2531 			    abd_cmp(zio->io_orig_abd, lio->io_orig_abd,
2532 			    zio->io_orig_size) != 0);
2533 		}
2534 	}
2535 
2536 	for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
2537 		ddt_phys_t *ddp = &dde->dde_phys[p];
2538 
2539 		if (ddp->ddp_phys_birth != 0) {
2540 			arc_buf_t *abuf = NULL;
2541 			arc_flags_t aflags = ARC_FLAG_WAIT;
2542 			int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
2543 			blkptr_t blk = *zio->io_bp;
2544 			int error;
2545 
2546 			ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
2547 
2548 			ddt_exit(ddt);
2549 
2550 			/*
2551 			 * Intuitively, it would make more sense to compare
2552 			 * io_abd than io_orig_abd in the raw case since you
2553 			 * don't want to look at any transformations that have
2554 			 * happened to the data. However, for raw I/Os the
2555 			 * data will actually be the same in io_abd and
2556 			 * io_orig_abd, so all we have to do is issue this as
2557 			 * a raw ARC read.
2558 			 */
2559 			if (do_raw) {
2560 				zio_flags |= ZIO_FLAG_RAW;
2561 				ASSERT3U(zio->io_size, ==, zio->io_orig_size);
2562 				ASSERT0(abd_cmp(zio->io_abd, zio->io_orig_abd,
2563 				    zio->io_size));
2564 				ASSERT3P(zio->io_transform_stack, ==, NULL);
2565 			}
2566 
2567 			error = arc_read(NULL, spa, &blk,
2568 			    arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
2569 			    zio_flags, &aflags, &zio->io_bookmark);
2570 
2571 			if (error == 0) {
2572 				if (arc_buf_size(abuf) != zio->io_orig_size ||
2573 				    abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
2574 				    zio->io_orig_size) != 0)
2575 					error = SET_ERROR(EEXIST);
2576 				arc_buf_destroy(abuf, &abuf);
2577 			}
2578 
2579 			ddt_enter(ddt);
2580 			return (error != 0);
2581 		}
2582 	}
2583 
2584 	return (B_FALSE);
2585 }
2586 
2587 static void
2588 zio_ddt_child_write_ready(zio_t *zio)
2589 {
2590 	int p = zio->io_prop.zp_copies;
2591 	ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
2592 	ddt_entry_t *dde = zio->io_private;
2593 	ddt_phys_t *ddp = &dde->dde_phys[p];
2594 	zio_t *pio;
2595 
2596 	if (zio->io_error)
2597 		return;
2598 
2599 	ddt_enter(ddt);
2600 
2601 	ASSERT(dde->dde_lead_zio[p] == zio);
2602 
2603 	ddt_phys_fill(ddp, zio->io_bp);
2604 
2605 	zio_link_t *zl = NULL;
2606 	while ((pio = zio_walk_parents(zio, &zl)) != NULL)
2607 		ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
2608 
2609 	ddt_exit(ddt);
2610 }
2611 
2612 static void
2613 zio_ddt_child_write_done(zio_t *zio)
2614 {
2615 	int p = zio->io_prop.zp_copies;
2616 	ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
2617 	ddt_entry_t *dde = zio->io_private;
2618 	ddt_phys_t *ddp = &dde->dde_phys[p];
2619 
2620 	ddt_enter(ddt);
2621 
2622 	ASSERT(ddp->ddp_refcnt == 0);
2623 	ASSERT(dde->dde_lead_zio[p] == zio);
2624 	dde->dde_lead_zio[p] = NULL;
2625 
2626 	if (zio->io_error == 0) {
2627 		zio_link_t *zl = NULL;
2628 		while (zio_walk_parents(zio, &zl) != NULL)
2629 			ddt_phys_addref(ddp);
2630 	} else {
2631 		ddt_phys_clear(ddp);
2632 	}
2633 
2634 	ddt_exit(ddt);
2635 }
2636 
2637 static void
2638 zio_ddt_ditto_write_done(zio_t *zio)
2639 {
2640 	int p = DDT_PHYS_DITTO;
2641 	zio_prop_t *zp = &zio->io_prop;
2642 	blkptr_t *bp = zio->io_bp;
2643 	ddt_t *ddt = ddt_select(zio->io_spa, bp);
2644 	ddt_entry_t *dde = zio->io_private;
2645 	ddt_phys_t *ddp = &dde->dde_phys[p];
2646 	ddt_key_t *ddk = &dde->dde_key;
2647 
2648 	ddt_enter(ddt);
2649 
2650 	ASSERT(ddp->ddp_refcnt == 0);
2651 	ASSERT(dde->dde_lead_zio[p] == zio);
2652 	dde->dde_lead_zio[p] = NULL;
2653 
2654 	if (zio->io_error == 0) {
2655 		ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum));
2656 		ASSERT(zp->zp_copies < SPA_DVAS_PER_BP);
2657 		ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp));
2658 		if (ddp->ddp_phys_birth != 0)
2659 			ddt_phys_free(ddt, ddk, ddp, zio->io_txg);
2660 		ddt_phys_fill(ddp, bp);
2661 	}
2662 
2663 	ddt_exit(ddt);
2664 }
2665 
2666 static int
2667 zio_ddt_write(zio_t *zio)
2668 {
2669 	spa_t *spa = zio->io_spa;
2670 	blkptr_t *bp = zio->io_bp;
2671 	uint64_t txg = zio->io_txg;
2672 	zio_prop_t *zp = &zio->io_prop;
2673 	int p = zp->zp_copies;
2674 	int ditto_copies;
2675 	zio_t *cio = NULL;
2676 	zio_t *dio = NULL;
2677 	ddt_t *ddt = ddt_select(spa, bp);
2678 	ddt_entry_t *dde;
2679 	ddt_phys_t *ddp;
2680 
2681 	ASSERT(BP_GET_DEDUP(bp));
2682 	ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
2683 	ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
2684 	ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
2685 
2686 	ddt_enter(ddt);
2687 	dde = ddt_lookup(ddt, bp, B_TRUE);
2688 	ddp = &dde->dde_phys[p];
2689 
2690 	if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
2691 		/*
2692 		 * If we're using a weak checksum, upgrade to a strong checksum
2693 		 * and try again.  If we're already using a strong checksum,
2694 		 * we can't resolve it, so just convert to an ordinary write.
2695 		 * (And automatically e-mail a paper to Nature?)
2696 		 */
2697 		if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
2698 		    ZCHECKSUM_FLAG_DEDUP)) {
2699 			zp->zp_checksum = spa_dedup_checksum(spa);
2700 			zio_pop_transforms(zio);
2701 			zio->io_stage = ZIO_STAGE_OPEN;
2702 			BP_ZERO(bp);
2703 		} else {
2704 			zp->zp_dedup = B_FALSE;
2705 			BP_SET_DEDUP(bp, B_FALSE);
2706 		}
2707 		ASSERT(!BP_GET_DEDUP(bp));
2708 		zio->io_pipeline = ZIO_WRITE_PIPELINE;
2709 		ddt_exit(ddt);
2710 		return (ZIO_PIPELINE_CONTINUE);
2711 	}
2712 
2713 	ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp);
2714 	ASSERT(ditto_copies < SPA_DVAS_PER_BP);
2715 
2716 	if (ditto_copies > ddt_ditto_copies_present(dde) &&
2717 	    dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) {
2718 		zio_prop_t czp = *zp;
2719 
2720 		czp.zp_copies = ditto_copies;
2721 
2722 		/*
2723 		 * If we arrived here with an override bp, we won't have run
2724 		 * the transform stack, so we won't have the data we need to
2725 		 * generate a child i/o.  So, toss the override bp and restart.
2726 		 * This is safe, because using the override bp is just an
2727 		 * optimization; and it's rare, so the cost doesn't matter.
2728 		 */
2729 		if (zio->io_bp_override) {
2730 			zio_pop_transforms(zio);
2731 			zio->io_stage = ZIO_STAGE_OPEN;
2732 			zio->io_pipeline = ZIO_WRITE_PIPELINE;
2733 			zio->io_bp_override = NULL;
2734 			BP_ZERO(bp);
2735 			ddt_exit(ddt);
2736 			return (ZIO_PIPELINE_CONTINUE);
2737 		}
2738 
2739 		dio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
2740 		    zio->io_orig_size, zio->io_orig_size, &czp, NULL, NULL,
2741 		    NULL, zio_ddt_ditto_write_done, dde, zio->io_priority,
2742 		    ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
2743 
2744 		zio_push_transform(dio, zio->io_abd, zio->io_size, 0, NULL);
2745 		dde->dde_lead_zio[DDT_PHYS_DITTO] = dio;
2746 	}
2747 
2748 	if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
2749 		if (ddp->ddp_phys_birth != 0)
2750 			ddt_bp_fill(ddp, bp, txg);
2751 		if (dde->dde_lead_zio[p] != NULL)
2752 			zio_add_child(zio, dde->dde_lead_zio[p]);
2753 		else
2754 			ddt_phys_addref(ddp);
2755 	} else if (zio->io_bp_override) {
2756 		ASSERT(bp->blk_birth == txg);
2757 		ASSERT(BP_EQUAL(bp, zio->io_bp_override));
2758 		ddt_phys_fill(ddp, bp);
2759 		ddt_phys_addref(ddp);
2760 	} else {
2761 		cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
2762 		    zio->io_orig_size, zio->io_orig_size, zp,
2763 		    zio_ddt_child_write_ready, NULL, NULL,
2764 		    zio_ddt_child_write_done, dde, zio->io_priority,
2765 		    ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
2766 
2767 		zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
2768 		dde->dde_lead_zio[p] = cio;
2769 	}
2770 
2771 	ddt_exit(ddt);
2772 
2773 	if (cio)
2774 		zio_nowait(cio);
2775 	if (dio)
2776 		zio_nowait(dio);
2777 
2778 	return (ZIO_PIPELINE_CONTINUE);
2779 }
2780 
2781 ddt_entry_t *freedde; /* for debugging */
2782 
2783 static int
2784 zio_ddt_free(zio_t *zio)
2785 {
2786 	spa_t *spa = zio->io_spa;
2787 	blkptr_t *bp = zio->io_bp;
2788 	ddt_t *ddt = ddt_select(spa, bp);
2789 	ddt_entry_t *dde;
2790 	ddt_phys_t *ddp;
2791 
2792 	ASSERT(BP_GET_DEDUP(bp));
2793 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2794 
2795 	ddt_enter(ddt);
2796 	freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
2797 	ddp = ddt_phys_select(dde, bp);
2798 	ddt_phys_decref(ddp);
2799 	ddt_exit(ddt);
2800 
2801 	return (ZIO_PIPELINE_CONTINUE);
2802 }
2803 
2804 /*
2805  * ==========================================================================
2806  * Allocate and free blocks
2807  * ==========================================================================
2808  */
2809 
2810 static zio_t *
2811 zio_io_to_allocate(spa_t *spa, int allocator)
2812 {
2813 	zio_t *zio;
2814 
2815 	ASSERT(MUTEX_HELD(&spa->spa_alloc_locks[allocator]));
2816 
2817 	zio = avl_first(&spa->spa_alloc_trees[allocator]);
2818 	if (zio == NULL)
2819 		return (NULL);
2820 
2821 	ASSERT(IO_IS_ALLOCATING(zio));
2822 
2823 	/*
2824 	 * Try to place a reservation for this zio. If we're unable to
2825 	 * reserve then we throttle.
2826 	 */
2827 	ASSERT3U(zio->io_allocator, ==, allocator);
2828 	if (!metaslab_class_throttle_reserve(spa_normal_class(spa),
2829 	    zio->io_prop.zp_copies, zio->io_allocator, zio, 0)) {
2830 		return (NULL);
2831 	}
2832 
2833 	avl_remove(&spa->spa_alloc_trees[allocator], zio);
2834 	ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
2835 
2836 	return (zio);
2837 }
2838 
2839 static int
2840 zio_dva_throttle(zio_t *zio)
2841 {
2842 	spa_t *spa = zio->io_spa;
2843 	zio_t *nio;
2844 
2845 	if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
2846 	    !spa_normal_class(zio->io_spa)->mc_alloc_throttle_enabled ||
2847 	    zio->io_child_type == ZIO_CHILD_GANG ||
2848 	    zio->io_flags & ZIO_FLAG_NODATA) {
2849 		return (ZIO_PIPELINE_CONTINUE);
2850 	}
2851 
2852 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2853 
2854 	ASSERT3U(zio->io_queued_timestamp, >, 0);
2855 	ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
2856 
2857 	zbookmark_phys_t *bm = &zio->io_bookmark;
2858 	/*
2859 	 * We want to try to use as many allocators as possible to help improve
2860 	 * performance, but we also want logically adjacent IOs to be physically
2861 	 * adjacent to improve sequential read performance. We chunk each object
2862 	 * into 2^20 block regions, and then hash based on the objset, object,
2863 	 * level, and region to accomplish both of these goals.
2864 	 */
2865 	zio->io_allocator = cityhash4(bm->zb_objset, bm->zb_object,
2866 	    bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count;
2867 	mutex_enter(&spa->spa_alloc_locks[zio->io_allocator]);
2868 
2869 	ASSERT(zio->io_type == ZIO_TYPE_WRITE);
2870 	avl_add(&spa->spa_alloc_trees[zio->io_allocator], zio);
2871 
2872 	nio = zio_io_to_allocate(zio->io_spa, zio->io_allocator);
2873 	mutex_exit(&spa->spa_alloc_locks[zio->io_allocator]);
2874 
2875 	if (nio == zio)
2876 		return (ZIO_PIPELINE_CONTINUE);
2877 
2878 	if (nio != NULL) {
2879 		ASSERT(nio->io_stage == ZIO_STAGE_DVA_THROTTLE);
2880 		/*
2881 		 * We are passing control to a new zio so make sure that
2882 		 * it is processed by a different thread. We do this to
2883 		 * avoid stack overflows that can occur when parents are
2884 		 * throttled and children are making progress. We allow
2885 		 * it to go to the head of the taskq since it's already
2886 		 * been waiting.
2887 		 */
2888 		zio_taskq_dispatch(nio, ZIO_TASKQ_ISSUE, B_TRUE);
2889 	}
2890 	return (ZIO_PIPELINE_STOP);
2891 }
2892 
2893 void
2894 zio_allocate_dispatch(spa_t *spa, int allocator)
2895 {
2896 	zio_t *zio;
2897 
2898 	mutex_enter(&spa->spa_alloc_locks[allocator]);
2899 	zio = zio_io_to_allocate(spa, allocator);
2900 	mutex_exit(&spa->spa_alloc_locks[allocator]);
2901 	if (zio == NULL)
2902 		return;
2903 
2904 	ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
2905 	ASSERT0(zio->io_error);
2906 	zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
2907 }
2908 
2909 static int
2910 zio_dva_allocate(zio_t *zio)
2911 {
2912 	spa_t *spa = zio->io_spa;
2913 	metaslab_class_t *mc = spa_normal_class(spa);
2914 	blkptr_t *bp = zio->io_bp;
2915 	int error;
2916 	int flags = 0;
2917 
2918 	if (zio->io_gang_leader == NULL) {
2919 		ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2920 		zio->io_gang_leader = zio;
2921 	}
2922 
2923 	ASSERT(BP_IS_HOLE(bp));
2924 	ASSERT0(BP_GET_NDVAS(bp));
2925 	ASSERT3U(zio->io_prop.zp_copies, >, 0);
2926 	ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
2927 	ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
2928 
2929 	if (zio->io_flags & ZIO_FLAG_NODATA) {
2930 		flags |= METASLAB_DONT_THROTTLE;
2931 	}
2932 	if (zio->io_flags & ZIO_FLAG_GANG_CHILD) {
2933 		flags |= METASLAB_GANG_CHILD;
2934 	}
2935 	if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) {
2936 		flags |= METASLAB_ASYNC_ALLOC;
2937 	}
2938 
2939 	error = metaslab_alloc(spa, mc, zio->io_size, bp,
2940 	    zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
2941 	    &zio->io_alloc_list, zio, zio->io_allocator);
2942 
2943 	if (error != 0) {
2944 		zfs_dbgmsg("%s: metaslab allocation failure: zio %p, "
2945 		    "size %llu, error %d", spa_name(spa), zio, zio->io_size,
2946 		    error);
2947 		if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE)
2948 			return (zio_write_gang_block(zio));
2949 		zio->io_error = error;
2950 	}
2951 
2952 	return (ZIO_PIPELINE_CONTINUE);
2953 }
2954 
2955 static int
2956 zio_dva_free(zio_t *zio)
2957 {
2958 	metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
2959 
2960 	return (ZIO_PIPELINE_CONTINUE);
2961 }
2962 
2963 static int
2964 zio_dva_claim(zio_t *zio)
2965 {
2966 	int error;
2967 
2968 	error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
2969 	if (error)
2970 		zio->io_error = error;
2971 
2972 	return (ZIO_PIPELINE_CONTINUE);
2973 }
2974 
2975 /*
2976  * Undo an allocation.  This is used by zio_done() when an I/O fails
2977  * and we want to give back the block we just allocated.
2978  * This handles both normal blocks and gang blocks.
2979  */
2980 static void
2981 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
2982 {
2983 	ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
2984 	ASSERT(zio->io_bp_override == NULL);
2985 
2986 	if (!BP_IS_HOLE(bp))
2987 		metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE);
2988 
2989 	if (gn != NULL) {
2990 		for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2991 			zio_dva_unallocate(zio, gn->gn_child[g],
2992 			    &gn->gn_gbh->zg_blkptr[g]);
2993 		}
2994 	}
2995 }
2996 
2997 /*
2998  * Try to allocate an intent log block.  Return 0 on success, errno on failure.
2999  */
3000 int
3001 zio_alloc_zil(spa_t *spa, uint64_t objset, uint64_t txg, blkptr_t *new_bp,
3002     blkptr_t *old_bp, uint64_t size, boolean_t *slog)
3003 {
3004 	int error = 1;
3005 	zio_alloc_list_t io_alloc_list;
3006 
3007 	ASSERT(txg > spa_syncing_txg(spa));
3008 
3009 	metaslab_trace_init(&io_alloc_list);
3010 	/*
3011 	 * When allocating a zil block, we don't have information about
3012 	 * the final destination of the block except the objset it's part
3013 	 * of, so we just hash the objset ID to pick the allocator to get
3014 	 * some parallelism.
3015 	 */
3016 	error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
3017 	    txg, old_bp, METASLAB_HINTBP_AVOID, &io_alloc_list, NULL,
3018 	    cityhash4(0, 0, 0, objset) % spa->spa_alloc_count);
3019 	if (error == 0) {
3020 		*slog = TRUE;
3021 	} else {
3022 		error = metaslab_alloc(spa, spa_normal_class(spa), size,
3023 		    new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID,
3024 		    &io_alloc_list, NULL, cityhash4(0, 0, 0, objset) %
3025 		    spa->spa_alloc_count);
3026 		if (error == 0)
3027 			*slog = FALSE;
3028 	}
3029 	metaslab_trace_fini(&io_alloc_list);
3030 
3031 	if (error == 0) {
3032 		BP_SET_LSIZE(new_bp, size);
3033 		BP_SET_PSIZE(new_bp, size);
3034 		BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
3035 		BP_SET_CHECKSUM(new_bp,
3036 		    spa_version(spa) >= SPA_VERSION_SLIM_ZIL
3037 		    ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
3038 		BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
3039 		BP_SET_LEVEL(new_bp, 0);
3040 		BP_SET_DEDUP(new_bp, 0);
3041 		BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
3042 	} else {
3043 		zfs_dbgmsg("%s: zil block allocation failure: "
3044 		    "size %llu, error %d", spa_name(spa), size, error);
3045 	}
3046 
3047 	return (error);
3048 }
3049 
3050 /*
3051  * ==========================================================================
3052  * Read and write to physical devices
3053  * ==========================================================================
3054  */
3055 
3056 
3057 /*
3058  * Issue an I/O to the underlying vdev. Typically the issue pipeline
3059  * stops after this stage and will resume upon I/O completion.
3060  * However, there are instances where the vdev layer may need to
3061  * continue the pipeline when an I/O was not issued. Since the I/O
3062  * that was sent to the vdev layer might be different than the one
3063  * currently active in the pipeline (see vdev_queue_io()), we explicitly
3064  * force the underlying vdev layers to call either zio_execute() or
3065  * zio_interrupt() to ensure that the pipeline continues with the correct I/O.
3066  */
3067 static int
3068 zio_vdev_io_start(zio_t *zio)
3069 {
3070 	vdev_t *vd = zio->io_vd;
3071 	uint64_t align;
3072 	spa_t *spa = zio->io_spa;
3073 
3074 	ASSERT(zio->io_error == 0);
3075 	ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
3076 
3077 	if (vd == NULL) {
3078 		if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
3079 			spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
3080 
3081 		/*
3082 		 * The mirror_ops handle multiple DVAs in a single BP.
3083 		 */
3084 		vdev_mirror_ops.vdev_op_io_start(zio);
3085 		return (ZIO_PIPELINE_STOP);
3086 	}
3087 
3088 	ASSERT3P(zio->io_logical, !=, zio);
3089 	if (zio->io_type == ZIO_TYPE_WRITE) {
3090 		ASSERT(spa->spa_trust_config);
3091 
3092 		if (zio->io_vd->vdev_removing) {
3093 			/*
3094 			 * Note: the code can handle other kinds of writes,
3095 			 * but we don't expect them.
3096 			 */
3097 			ASSERT(zio->io_flags &
3098 			    (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
3099 			    ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
3100 		}
3101 	}
3102 
3103 	/*
3104 	 * We keep track of time-sensitive I/Os so that the scan thread
3105 	 * can quickly react to certain workloads.  In particular, we care
3106 	 * about non-scrubbing, top-level reads and writes with the following
3107 	 * characteristics:
3108 	 *	- synchronous writes of user data to non-slog devices
3109 	 *	- any reads of user data
3110 	 * When these conditions are met, adjust the timestamp of spa_last_io
3111 	 * which allows the scan thread to adjust its workload accordingly.
3112 	 */
3113 	if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL &&
3114 	    vd == vd->vdev_top && !vd->vdev_islog &&
3115 	    zio->io_bookmark.zb_objset != DMU_META_OBJSET &&
3116 	    zio->io_txg != spa_syncing_txg(spa)) {
3117 		uint64_t old = spa->spa_last_io;
3118 		uint64_t new = ddi_get_lbolt64();
3119 		if (old != new)
3120 			(void) atomic_cas_64(&spa->spa_last_io, old, new);
3121 	}
3122 
3123 	align = 1ULL << vd->vdev_top->vdev_ashift;
3124 
3125 	if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
3126 	    P2PHASE(zio->io_size, align) != 0) {
3127 		/* Transform logical writes to be a full physical block size. */
3128 		uint64_t asize = P2ROUNDUP(zio->io_size, align);
3129 		abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
3130 		ASSERT(vd == vd->vdev_top);
3131 		if (zio->io_type == ZIO_TYPE_WRITE) {
3132 			abd_copy(abuf, zio->io_abd, zio->io_size);
3133 			abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
3134 		}
3135 		zio_push_transform(zio, abuf, asize, asize, zio_subblock);
3136 	}
3137 
3138 	/*
3139 	 * If this is not a physical io, make sure that it is properly aligned
3140 	 * before proceeding.
3141 	 */
3142 	if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
3143 		ASSERT0(P2PHASE(zio->io_offset, align));
3144 		ASSERT0(P2PHASE(zio->io_size, align));
3145 	} else {
3146 		/*
3147 		 * For physical writes, we allow 512b aligned writes and assume
3148 		 * the device will perform a read-modify-write as necessary.
3149 		 */
3150 		ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
3151 		ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
3152 	}
3153 
3154 	VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
3155 
3156 	/*
3157 	 * If this is a repair I/O, and there's no self-healing involved --
3158 	 * that is, we're just resilvering what we expect to resilver --
3159 	 * then don't do the I/O unless zio's txg is actually in vd's DTL.
3160 	 * This prevents spurious resilvering.
3161 	 *
3162 	 * There are a few ways that we can end up creating these spurious
3163 	 * resilver i/os:
3164 	 *
3165 	 * 1. A resilver i/o will be issued if any DVA in the BP has a
3166 	 * dirty DTL.  The mirror code will issue resilver writes to
3167 	 * each DVA, including the one(s) that are not on vdevs with dirty
3168 	 * DTLs.
3169 	 *
3170 	 * 2. With nested replication, which happens when we have a
3171 	 * "replacing" or "spare" vdev that's a child of a mirror or raidz.
3172 	 * For example, given mirror(replacing(A+B), C), it's likely that
3173 	 * only A is out of date (it's the new device). In this case, we'll
3174 	 * read from C, then use the data to resilver A+B -- but we don't
3175 	 * actually want to resilver B, just A. The top-level mirror has no
3176 	 * way to know this, so instead we just discard unnecessary repairs
3177 	 * as we work our way down the vdev tree.
3178 	 *
3179 	 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
3180 	 * The same logic applies to any form of nested replication: ditto
3181 	 * + mirror, RAID-Z + replacing, etc.
3182 	 *
3183 	 * However, indirect vdevs point off to other vdevs which may have
3184 	 * DTL's, so we never bypass them.  The child i/os on concrete vdevs
3185 	 * will be properly bypassed instead.
3186 	 */
3187 	if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
3188 	    !(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
3189 	    zio->io_txg != 0 &&	/* not a delegated i/o */
3190 	    vd->vdev_ops != &vdev_indirect_ops &&
3191 	    !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
3192 		ASSERT(zio->io_type == ZIO_TYPE_WRITE);
3193 		zio_vdev_io_bypass(zio);
3194 		return (ZIO_PIPELINE_CONTINUE);
3195 	}
3196 
3197 	if (vd->vdev_ops->vdev_op_leaf &&
3198 	    (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) {
3199 
3200 		if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio))
3201 			return (ZIO_PIPELINE_CONTINUE);
3202 
3203 		if ((zio = vdev_queue_io(zio)) == NULL)
3204 			return (ZIO_PIPELINE_STOP);
3205 
3206 		if (!vdev_accessible(vd, zio)) {
3207 			zio->io_error = SET_ERROR(ENXIO);
3208 			zio_interrupt(zio);
3209 			return (ZIO_PIPELINE_STOP);
3210 		}
3211 	}
3212 
3213 	vd->vdev_ops->vdev_op_io_start(zio);
3214 	return (ZIO_PIPELINE_STOP);
3215 }
3216 
3217 static int
3218 zio_vdev_io_done(zio_t *zio)
3219 {
3220 	vdev_t *vd = zio->io_vd;
3221 	vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
3222 	boolean_t unexpected_error = B_FALSE;
3223 
3224 	if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
3225 		return (ZIO_PIPELINE_STOP);
3226 	}
3227 
3228 	ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
3229 
3230 	if (vd != NULL && vd->vdev_ops->vdev_op_leaf) {
3231 
3232 		vdev_queue_io_done(zio);
3233 
3234 		if (zio->io_type == ZIO_TYPE_WRITE)
3235 			vdev_cache_write(zio);
3236 
3237 		if (zio_injection_enabled && zio->io_error == 0)
3238 			zio->io_error = zio_handle_device_injection(vd,
3239 			    zio, EIO);
3240 
3241 		if (zio_injection_enabled && zio->io_error == 0)
3242 			zio->io_error = zio_handle_label_injection(zio, EIO);
3243 
3244 		if (zio->io_error) {
3245 			if (!vdev_accessible(vd, zio)) {
3246 				zio->io_error = SET_ERROR(ENXIO);
3247 			} else {
3248 				unexpected_error = B_TRUE;
3249 			}
3250 		}
3251 	}
3252 
3253 	ops->vdev_op_io_done(zio);
3254 
3255 	if (unexpected_error)
3256 		VERIFY(vdev_probe(vd, zio) == NULL);
3257 
3258 	return (ZIO_PIPELINE_CONTINUE);
3259 }
3260 
3261 /*
3262  * For non-raidz ZIOs, we can just copy aside the bad data read from the
3263  * disk, and use that to finish the checksum ereport later.
3264  */
3265 static void
3266 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
3267     const void *good_buf)
3268 {
3269 	/* no processing needed */
3270 	zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
3271 }
3272 
3273 /*ARGSUSED*/
3274 void
3275 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored)
3276 {
3277 	void *buf = zio_buf_alloc(zio->io_size);
3278 
3279 	abd_copy_to_buf(buf, zio->io_abd, zio->io_size);
3280 
3281 	zcr->zcr_cbinfo = zio->io_size;
3282 	zcr->zcr_cbdata = buf;
3283 	zcr->zcr_finish = zio_vsd_default_cksum_finish;
3284 	zcr->zcr_free = zio_buf_free;
3285 }
3286 
3287 static int
3288 zio_vdev_io_assess(zio_t *zio)
3289 {
3290 	vdev_t *vd = zio->io_vd;
3291 
3292 	if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
3293 		return (ZIO_PIPELINE_STOP);
3294 	}
3295 
3296 	if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
3297 		spa_config_exit(zio->io_spa, SCL_ZIO, zio);
3298 
3299 	if (zio->io_vsd != NULL) {
3300 		zio->io_vsd_ops->vsd_free(zio);
3301 		zio->io_vsd = NULL;
3302 	}
3303 
3304 	if (zio_injection_enabled && zio->io_error == 0)
3305 		zio->io_error = zio_handle_fault_injection(zio, EIO);
3306 
3307 	/*
3308 	 * If the I/O failed, determine whether we should attempt to retry it.
3309 	 *
3310 	 * On retry, we cut in line in the issue queue, since we don't want
3311 	 * compression/checksumming/etc. work to prevent our (cheap) IO reissue.
3312 	 */
3313 	if (zio->io_error && vd == NULL &&
3314 	    !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
3315 		ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE));	/* not a leaf */
3316 		ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS));	/* not a leaf */
3317 		zio->io_error = 0;
3318 		zio->io_flags |= ZIO_FLAG_IO_RETRY |
3319 		    ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE;
3320 		zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
3321 		zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
3322 		    zio_requeue_io_start_cut_in_line);
3323 		return (ZIO_PIPELINE_STOP);
3324 	}
3325 
3326 	/*
3327 	 * If we got an error on a leaf device, convert it to ENXIO
3328 	 * if the device is not accessible at all.
3329 	 */
3330 	if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
3331 	    !vdev_accessible(vd, zio))
3332 		zio->io_error = SET_ERROR(ENXIO);
3333 
3334 	/*
3335 	 * If we can't write to an interior vdev (mirror or RAID-Z),
3336 	 * set vdev_cant_write so that we stop trying to allocate from it.
3337 	 */
3338 	if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
3339 	    vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
3340 		vd->vdev_cant_write = B_TRUE;
3341 	}
3342 
3343 	/*
3344 	 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future
3345 	 * attempts will ever succeed. In this case we set a persistent bit so
3346 	 * that we don't bother with it in the future.
3347 	 */
3348 	if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
3349 	    zio->io_type == ZIO_TYPE_IOCTL &&
3350 	    zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL)
3351 		vd->vdev_nowritecache = B_TRUE;
3352 
3353 	if (zio->io_error)
3354 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3355 
3356 	if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
3357 	    zio->io_physdone != NULL) {
3358 		ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED));
3359 		ASSERT(zio->io_child_type == ZIO_CHILD_VDEV);
3360 		zio->io_physdone(zio->io_logical);
3361 	}
3362 
3363 	return (ZIO_PIPELINE_CONTINUE);
3364 }
3365 
3366 void
3367 zio_vdev_io_reissue(zio_t *zio)
3368 {
3369 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
3370 	ASSERT(zio->io_error == 0);
3371 
3372 	zio->io_stage >>= 1;
3373 }
3374 
3375 void
3376 zio_vdev_io_redone(zio_t *zio)
3377 {
3378 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
3379 
3380 	zio->io_stage >>= 1;
3381 }
3382 
3383 void
3384 zio_vdev_io_bypass(zio_t *zio)
3385 {
3386 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
3387 	ASSERT(zio->io_error == 0);
3388 
3389 	zio->io_flags |= ZIO_FLAG_IO_BYPASS;
3390 	zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
3391 }
3392 
3393 /*
3394  * ==========================================================================
3395  * Generate and verify checksums
3396  * ==========================================================================
3397  */
3398 static int
3399 zio_checksum_generate(zio_t *zio)
3400 {
3401 	blkptr_t *bp = zio->io_bp;
3402 	enum zio_checksum checksum;
3403 
3404 	if (bp == NULL) {
3405 		/*
3406 		 * This is zio_write_phys().
3407 		 * We're either generating a label checksum, or none at all.
3408 		 */
3409 		checksum = zio->io_prop.zp_checksum;
3410 
3411 		if (checksum == ZIO_CHECKSUM_OFF)
3412 			return (ZIO_PIPELINE_CONTINUE);
3413 
3414 		ASSERT(checksum == ZIO_CHECKSUM_LABEL);
3415 	} else {
3416 		if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
3417 			ASSERT(!IO_IS_ALLOCATING(zio));
3418 			checksum = ZIO_CHECKSUM_GANG_HEADER;
3419 		} else {
3420 			checksum = BP_GET_CHECKSUM(bp);
3421 		}
3422 	}
3423 
3424 	zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
3425 
3426 	return (ZIO_PIPELINE_CONTINUE);
3427 }
3428 
3429 static int
3430 zio_checksum_verify(zio_t *zio)
3431 {
3432 	zio_bad_cksum_t info;
3433 	blkptr_t *bp = zio->io_bp;
3434 	int error;
3435 
3436 	ASSERT(zio->io_vd != NULL);
3437 
3438 	if (bp == NULL) {
3439 		/*
3440 		 * This is zio_read_phys().
3441 		 * We're either verifying a label checksum, or nothing at all.
3442 		 */
3443 		if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
3444 			return (ZIO_PIPELINE_CONTINUE);
3445 
3446 		ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL);
3447 	}
3448 
3449 	if ((error = zio_checksum_error(zio, &info)) != 0) {
3450 		zio->io_error = error;
3451 		if (error == ECKSUM &&
3452 		    !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
3453 			zfs_ereport_start_checksum(zio->io_spa,
3454 			    zio->io_vd, zio, zio->io_offset,
3455 			    zio->io_size, NULL, &info);
3456 		}
3457 	}
3458 
3459 	return (ZIO_PIPELINE_CONTINUE);
3460 }
3461 
3462 /*
3463  * Called by RAID-Z to ensure we don't compute the checksum twice.
3464  */
3465 void
3466 zio_checksum_verified(zio_t *zio)
3467 {
3468 	zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
3469 }
3470 
3471 /*
3472  * ==========================================================================
3473  * Error rank.  Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
3474  * An error of 0 indicates success.  ENXIO indicates whole-device failure,
3475  * which may be transient (e.g. unplugged) or permament.  ECKSUM and EIO
3476  * indicate errors that are specific to one I/O, and most likely permanent.
3477  * Any other error is presumed to be worse because we weren't expecting it.
3478  * ==========================================================================
3479  */
3480 int
3481 zio_worst_error(int e1, int e2)
3482 {
3483 	static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
3484 	int r1, r2;
3485 
3486 	for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
3487 		if (e1 == zio_error_rank[r1])
3488 			break;
3489 
3490 	for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
3491 		if (e2 == zio_error_rank[r2])
3492 			break;
3493 
3494 	return (r1 > r2 ? e1 : e2);
3495 }
3496 
3497 /*
3498  * ==========================================================================
3499  * I/O completion
3500  * ==========================================================================
3501  */
3502 static int
3503 zio_ready(zio_t *zio)
3504 {
3505 	blkptr_t *bp = zio->io_bp;
3506 	zio_t *pio, *pio_next;
3507 	zio_link_t *zl = NULL;
3508 
3509 	if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT,
3510 	    ZIO_WAIT_READY)) {
3511 		return (ZIO_PIPELINE_STOP);
3512 	}
3513 
3514 	if (zio->io_ready) {
3515 		ASSERT(IO_IS_ALLOCATING(zio));
3516 		ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) ||
3517 		    (zio->io_flags & ZIO_FLAG_NOPWRITE));
3518 		ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
3519 
3520 		zio->io_ready(zio);
3521 	}
3522 
3523 	if (bp != NULL && bp != &zio->io_bp_copy)
3524 		zio->io_bp_copy = *bp;
3525 
3526 	if (zio->io_error != 0) {
3527 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3528 
3529 		if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3530 			ASSERT(IO_IS_ALLOCATING(zio));
3531 			ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3532 			/*
3533 			 * We were unable to allocate anything, unreserve and
3534 			 * issue the next I/O to allocate.
3535 			 */
3536 			metaslab_class_throttle_unreserve(
3537 			    spa_normal_class(zio->io_spa),
3538 			    zio->io_prop.zp_copies, zio->io_allocator, zio);
3539 			zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
3540 		}
3541 	}
3542 
3543 	mutex_enter(&zio->io_lock);
3544 	zio->io_state[ZIO_WAIT_READY] = 1;
3545 	pio = zio_walk_parents(zio, &zl);
3546 	mutex_exit(&zio->io_lock);
3547 
3548 	/*
3549 	 * As we notify zio's parents, new parents could be added.
3550 	 * New parents go to the head of zio's io_parent_list, however,
3551 	 * so we will (correctly) not notify them.  The remainder of zio's
3552 	 * io_parent_list, from 'pio_next' onward, cannot change because
3553 	 * all parents must wait for us to be done before they can be done.
3554 	 */
3555 	for (; pio != NULL; pio = pio_next) {
3556 		pio_next = zio_walk_parents(zio, &zl);
3557 		zio_notify_parent(pio, zio, ZIO_WAIT_READY);
3558 	}
3559 
3560 	if (zio->io_flags & ZIO_FLAG_NODATA) {
3561 		if (BP_IS_GANG(bp)) {
3562 			zio->io_flags &= ~ZIO_FLAG_NODATA;
3563 		} else {
3564 			ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
3565 			zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
3566 		}
3567 	}
3568 
3569 	if (zio_injection_enabled &&
3570 	    zio->io_spa->spa_syncing_txg == zio->io_txg)
3571 		zio_handle_ignored_writes(zio);
3572 
3573 	return (ZIO_PIPELINE_CONTINUE);
3574 }
3575 
3576 /*
3577  * Update the allocation throttle accounting.
3578  */
3579 static void
3580 zio_dva_throttle_done(zio_t *zio)
3581 {
3582 	zio_t *lio = zio->io_logical;
3583 	zio_t *pio = zio_unique_parent(zio);
3584 	vdev_t *vd = zio->io_vd;
3585 	int flags = METASLAB_ASYNC_ALLOC;
3586 
3587 	ASSERT3P(zio->io_bp, !=, NULL);
3588 	ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
3589 	ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
3590 	ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
3591 	ASSERT(vd != NULL);
3592 	ASSERT3P(vd, ==, vd->vdev_top);
3593 	ASSERT(!(zio->io_flags & (ZIO_FLAG_IO_REPAIR | ZIO_FLAG_IO_RETRY)));
3594 	ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
3595 	ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
3596 	ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
3597 
3598 	/*
3599 	 * Parents of gang children can have two flavors -- ones that
3600 	 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
3601 	 * and ones that allocated the constituent blocks. The allocation
3602 	 * throttle needs to know the allocating parent zio so we must find
3603 	 * it here.
3604 	 */
3605 	if (pio->io_child_type == ZIO_CHILD_GANG) {
3606 		/*
3607 		 * If our parent is a rewrite gang child then our grandparent
3608 		 * would have been the one that performed the allocation.
3609 		 */
3610 		if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
3611 			pio = zio_unique_parent(pio);
3612 		flags |= METASLAB_GANG_CHILD;
3613 	}
3614 
3615 	ASSERT(IO_IS_ALLOCATING(pio));
3616 	ASSERT3P(zio, !=, zio->io_logical);
3617 	ASSERT(zio->io_logical != NULL);
3618 	ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
3619 	ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
3620 
3621 	mutex_enter(&pio->io_lock);
3622 	metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
3623 	    pio->io_allocator, B_TRUE);
3624 	mutex_exit(&pio->io_lock);
3625 
3626 	metaslab_class_throttle_unreserve(spa_normal_class(zio->io_spa),
3627 	    1, pio->io_allocator, pio);
3628 
3629 	/*
3630 	 * Call into the pipeline to see if there is more work that
3631 	 * needs to be done. If there is work to be done it will be
3632 	 * dispatched to another taskq thread.
3633 	 */
3634 	zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
3635 }
3636 
3637 static int
3638 zio_done(zio_t *zio)
3639 {
3640 	spa_t *spa = zio->io_spa;
3641 	zio_t *lio = zio->io_logical;
3642 	blkptr_t *bp = zio->io_bp;
3643 	vdev_t *vd = zio->io_vd;
3644 	uint64_t psize = zio->io_size;
3645 	zio_t *pio, *pio_next;
3646 	metaslab_class_t *mc = spa_normal_class(spa);
3647 	zio_link_t *zl = NULL;
3648 
3649 	/*
3650 	 * If our children haven't all completed,
3651 	 * wait for them and then repeat this pipeline stage.
3652 	 */
3653 	if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
3654 		return (ZIO_PIPELINE_STOP);
3655 	}
3656 
3657 	/*
3658 	 * If the allocation throttle is enabled, then update the accounting.
3659 	 * We only track child I/Os that are part of an allocating async
3660 	 * write. We must do this since the allocation is performed
3661 	 * by the logical I/O but the actual write is done by child I/Os.
3662 	 */
3663 	if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
3664 	    zio->io_child_type == ZIO_CHILD_VDEV) {
3665 		ASSERT(mc->mc_alloc_throttle_enabled);
3666 		zio_dva_throttle_done(zio);
3667 	}
3668 
3669 	/*
3670 	 * If the allocation throttle is enabled, verify that
3671 	 * we have decremented the refcounts for every I/O that was throttled.
3672 	 */
3673 	if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3674 		ASSERT(zio->io_type == ZIO_TYPE_WRITE);
3675 		ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
3676 		ASSERT(bp != NULL);
3677 		metaslab_group_alloc_verify(spa, zio->io_bp, zio,
3678 		    zio->io_allocator);
3679 		VERIFY(zfs_refcount_not_held(
3680 		    &mc->mc_alloc_slots[zio->io_allocator], zio));
3681 	}
3682 
3683 	for (int c = 0; c < ZIO_CHILD_TYPES; c++)
3684 		for (int w = 0; w < ZIO_WAIT_TYPES; w++)
3685 			ASSERT(zio->io_children[c][w] == 0);
3686 
3687 	if (bp != NULL && !BP_IS_EMBEDDED(bp)) {
3688 		ASSERT(bp->blk_pad[0] == 0);
3689 		ASSERT(bp->blk_pad[1] == 0);
3690 		ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 ||
3691 		    (bp == zio_unique_parent(zio)->io_bp));
3692 		if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) &&
3693 		    zio->io_bp_override == NULL &&
3694 		    !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
3695 			ASSERT(!BP_SHOULD_BYTESWAP(bp));
3696 			ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp));
3697 			ASSERT(BP_COUNT_GANG(bp) == 0 ||
3698 			    (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp)));
3699 		}
3700 		if (zio->io_flags & ZIO_FLAG_NOPWRITE)
3701 			VERIFY(BP_EQUAL(bp, &zio->io_bp_orig));
3702 	}
3703 
3704 	/*
3705 	 * If there were child vdev/gang/ddt errors, they apply to us now.
3706 	 */
3707 	zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
3708 	zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
3709 	zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
3710 
3711 	/*
3712 	 * If the I/O on the transformed data was successful, generate any
3713 	 * checksum reports now while we still have the transformed data.
3714 	 */
3715 	if (zio->io_error == 0) {
3716 		while (zio->io_cksum_report != NULL) {
3717 			zio_cksum_report_t *zcr = zio->io_cksum_report;
3718 			uint64_t align = zcr->zcr_align;
3719 			uint64_t asize = P2ROUNDUP(psize, align);
3720 			char *abuf = NULL;
3721 			abd_t *adata = zio->io_abd;
3722 
3723 			if (asize != psize) {
3724 				adata = abd_alloc_linear(asize, B_TRUE);
3725 				abd_copy(adata, zio->io_abd, psize);
3726 				abd_zero_off(adata, psize, asize - psize);
3727 			}
3728 
3729 			if (adata != NULL)
3730 				abuf = abd_borrow_buf_copy(adata, asize);
3731 
3732 			zio->io_cksum_report = zcr->zcr_next;
3733 			zcr->zcr_next = NULL;
3734 			zcr->zcr_finish(zcr, abuf);
3735 			zfs_ereport_free_checksum(zcr);
3736 
3737 			if (adata != NULL)
3738 				abd_return_buf(adata, abuf, asize);
3739 
3740 			if (asize != psize)
3741 				abd_free(adata);
3742 		}
3743 	}
3744 
3745 	zio_pop_transforms(zio);	/* note: may set zio->io_error */
3746 
3747 	vdev_stat_update(zio, psize);
3748 
3749 	if (zio->io_error) {
3750 		/*
3751 		 * If this I/O is attached to a particular vdev,
3752 		 * generate an error message describing the I/O failure
3753 		 * at the block level.  We ignore these errors if the
3754 		 * device is currently unavailable.
3755 		 */
3756 		if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd))
3757 			zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0);
3758 
3759 		if ((zio->io_error == EIO || !(zio->io_flags &
3760 		    (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
3761 		    zio == lio) {
3762 			/*
3763 			 * For logical I/O requests, tell the SPA to log the
3764 			 * error and generate a logical data ereport.
3765 			 */
3766 			spa_log_error(spa, zio);
3767 			zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio,
3768 			    0, 0);
3769 		}
3770 	}
3771 
3772 	if (zio->io_error && zio == lio) {
3773 		/*
3774 		 * Determine whether zio should be reexecuted.  This will
3775 		 * propagate all the way to the root via zio_notify_parent().
3776 		 */
3777 		ASSERT(vd == NULL && bp != NULL);
3778 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3779 
3780 		if (IO_IS_ALLOCATING(zio) &&
3781 		    !(zio->io_flags & ZIO_FLAG_CANFAIL)) {
3782 			if (zio->io_error != ENOSPC)
3783 				zio->io_reexecute |= ZIO_REEXECUTE_NOW;
3784 			else
3785 				zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
3786 		}
3787 
3788 		if ((zio->io_type == ZIO_TYPE_READ ||
3789 		    zio->io_type == ZIO_TYPE_FREE) &&
3790 		    !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
3791 		    zio->io_error == ENXIO &&
3792 		    spa_load_state(spa) == SPA_LOAD_NONE &&
3793 		    spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE)
3794 			zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
3795 
3796 		if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
3797 			zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
3798 
3799 		/*
3800 		 * Here is a possibly good place to attempt to do
3801 		 * either combinatorial reconstruction or error correction
3802 		 * based on checksums.  It also might be a good place
3803 		 * to send out preliminary ereports before we suspend
3804 		 * processing.
3805 		 */
3806 	}
3807 
3808 	/*
3809 	 * If there were logical child errors, they apply to us now.
3810 	 * We defer this until now to avoid conflating logical child
3811 	 * errors with errors that happened to the zio itself when
3812 	 * updating vdev stats and reporting FMA events above.
3813 	 */
3814 	zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
3815 
3816 	if ((zio->io_error || zio->io_reexecute) &&
3817 	    IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
3818 	    !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
3819 		zio_dva_unallocate(zio, zio->io_gang_tree, bp);
3820 
3821 	zio_gang_tree_free(&zio->io_gang_tree);
3822 
3823 	/*
3824 	 * Godfather I/Os should never suspend.
3825 	 */
3826 	if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
3827 	    (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
3828 		zio->io_reexecute = 0;
3829 
3830 	if (zio->io_reexecute) {
3831 		/*
3832 		 * This is a logical I/O that wants to reexecute.
3833 		 *
3834 		 * Reexecute is top-down.  When an i/o fails, if it's not
3835 		 * the root, it simply notifies its parent and sticks around.
3836 		 * The parent, seeing that it still has children in zio_done(),
3837 		 * does the same.  This percolates all the way up to the root.
3838 		 * The root i/o will reexecute or suspend the entire tree.
3839 		 *
3840 		 * This approach ensures that zio_reexecute() honors
3841 		 * all the original i/o dependency relationships, e.g.
3842 		 * parents not executing until children are ready.
3843 		 */
3844 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3845 
3846 		zio->io_gang_leader = NULL;
3847 
3848 		mutex_enter(&zio->io_lock);
3849 		zio->io_state[ZIO_WAIT_DONE] = 1;
3850 		mutex_exit(&zio->io_lock);
3851 
3852 		/*
3853 		 * "The Godfather" I/O monitors its children but is
3854 		 * not a true parent to them. It will track them through
3855 		 * the pipeline but severs its ties whenever they get into
3856 		 * trouble (e.g. suspended). This allows "The Godfather"
3857 		 * I/O to return status without blocking.
3858 		 */
3859 		zl = NULL;
3860 		for (pio = zio_walk_parents(zio, &zl); pio != NULL;
3861 		    pio = pio_next) {
3862 			zio_link_t *remove_zl = zl;
3863 			pio_next = zio_walk_parents(zio, &zl);
3864 
3865 			if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
3866 			    (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
3867 				zio_remove_child(pio, zio, remove_zl);
3868 				zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
3869 			}
3870 		}
3871 
3872 		if ((pio = zio_unique_parent(zio)) != NULL) {
3873 			/*
3874 			 * We're not a root i/o, so there's nothing to do
3875 			 * but notify our parent.  Don't propagate errors
3876 			 * upward since we haven't permanently failed yet.
3877 			 */
3878 			ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
3879 			zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
3880 			zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
3881 		} else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
3882 			/*
3883 			 * We'd fail again if we reexecuted now, so suspend
3884 			 * until conditions improve (e.g. device comes online).
3885 			 */
3886 			zio_suspend(spa, zio);
3887 		} else {
3888 			/*
3889 			 * Reexecution is potentially a huge amount of work.
3890 			 * Hand it off to the otherwise-unused claim taskq.
3891 			 */
3892 			ASSERT(zio->io_tqent.tqent_next == NULL);
3893 			spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM,
3894 			    ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio,
3895 			    0, &zio->io_tqent);
3896 		}
3897 		return (ZIO_PIPELINE_STOP);
3898 	}
3899 
3900 	ASSERT(zio->io_child_count == 0);
3901 	ASSERT(zio->io_reexecute == 0);
3902 	ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
3903 
3904 	/*
3905 	 * Report any checksum errors, since the I/O is complete.
3906 	 */
3907 	while (zio->io_cksum_report != NULL) {
3908 		zio_cksum_report_t *zcr = zio->io_cksum_report;
3909 		zio->io_cksum_report = zcr->zcr_next;
3910 		zcr->zcr_next = NULL;
3911 		zcr->zcr_finish(zcr, NULL);
3912 		zfs_ereport_free_checksum(zcr);
3913 	}
3914 
3915 	/*
3916 	 * It is the responsibility of the done callback to ensure that this
3917 	 * particular zio is no longer discoverable for adoption, and as
3918 	 * such, cannot acquire any new parents.
3919 	 */
3920 	if (zio->io_done)
3921 		zio->io_done(zio);
3922 
3923 	mutex_enter(&zio->io_lock);
3924 	zio->io_state[ZIO_WAIT_DONE] = 1;
3925 	mutex_exit(&zio->io_lock);
3926 
3927 	zl = NULL;
3928 	for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
3929 		zio_link_t *remove_zl = zl;
3930 		pio_next = zio_walk_parents(zio, &zl);
3931 		zio_remove_child(pio, zio, remove_zl);
3932 		zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
3933 	}
3934 
3935 	if (zio->io_waiter != NULL) {
3936 		mutex_enter(&zio->io_lock);
3937 		zio->io_executor = NULL;
3938 		cv_broadcast(&zio->io_cv);
3939 		mutex_exit(&zio->io_lock);
3940 	} else {
3941 		zio_destroy(zio);
3942 	}
3943 
3944 	return (ZIO_PIPELINE_STOP);
3945 }
3946 
3947 /*
3948  * ==========================================================================
3949  * I/O pipeline definition
3950  * ==========================================================================
3951  */
3952 static zio_pipe_stage_t *zio_pipeline[] = {
3953 	NULL,
3954 	zio_read_bp_init,
3955 	zio_write_bp_init,
3956 	zio_free_bp_init,
3957 	zio_issue_async,
3958 	zio_write_compress,
3959 	zio_checksum_generate,
3960 	zio_nop_write,
3961 	zio_ddt_read_start,
3962 	zio_ddt_read_done,
3963 	zio_ddt_write,
3964 	zio_ddt_free,
3965 	zio_gang_assemble,
3966 	zio_gang_issue,
3967 	zio_dva_throttle,
3968 	zio_dva_allocate,
3969 	zio_dva_free,
3970 	zio_dva_claim,
3971 	zio_ready,
3972 	zio_vdev_io_start,
3973 	zio_vdev_io_done,
3974 	zio_vdev_io_assess,
3975 	zio_checksum_verify,
3976 	zio_done
3977 };
3978 
3979 
3980 
3981 
3982 /*
3983  * Compare two zbookmark_phys_t's to see which we would reach first in a
3984  * pre-order traversal of the object tree.
3985  *
3986  * This is simple in every case aside from the meta-dnode object. For all other
3987  * objects, we traverse them in order (object 1 before object 2, and so on).
3988  * However, all of these objects are traversed while traversing object 0, since
3989  * the data it points to is the list of objects.  Thus, we need to convert to a
3990  * canonical representation so we can compare meta-dnode bookmarks to
3991  * non-meta-dnode bookmarks.
3992  *
3993  * We do this by calculating "equivalents" for each field of the zbookmark.
3994  * zbookmarks outside of the meta-dnode use their own object and level, and
3995  * calculate the level 0 equivalent (the first L0 blkid that is contained in the
3996  * blocks this bookmark refers to) by multiplying their blkid by their span
3997  * (the number of L0 blocks contained within one block at their level).
3998  * zbookmarks inside the meta-dnode calculate their object equivalent
3999  * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
4000  * level + 1<<31 (any value larger than a level could ever be) for their level.
4001  * This causes them to always compare before a bookmark in their object
4002  * equivalent, compare appropriately to bookmarks in other objects, and to
4003  * compare appropriately to other bookmarks in the meta-dnode.
4004  */
4005 int
4006 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
4007     const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
4008 {
4009 	/*
4010 	 * These variables represent the "equivalent" values for the zbookmark,
4011 	 * after converting zbookmarks inside the meta dnode to their
4012 	 * normal-object equivalents.
4013 	 */
4014 	uint64_t zb1obj, zb2obj;
4015 	uint64_t zb1L0, zb2L0;
4016 	uint64_t zb1level, zb2level;
4017 
4018 	if (zb1->zb_object == zb2->zb_object &&
4019 	    zb1->zb_level == zb2->zb_level &&
4020 	    zb1->zb_blkid == zb2->zb_blkid)
4021 		return (0);
4022 
4023 	/*
4024 	 * BP_SPANB calculates the span in blocks.
4025 	 */
4026 	zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
4027 	zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
4028 
4029 	if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
4030 		zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
4031 		zb1L0 = 0;
4032 		zb1level = zb1->zb_level + COMPARE_META_LEVEL;
4033 	} else {
4034 		zb1obj = zb1->zb_object;
4035 		zb1level = zb1->zb_level;
4036 	}
4037 
4038 	if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
4039 		zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
4040 		zb2L0 = 0;
4041 		zb2level = zb2->zb_level + COMPARE_META_LEVEL;
4042 	} else {
4043 		zb2obj = zb2->zb_object;
4044 		zb2level = zb2->zb_level;
4045 	}
4046 
4047 	/* Now that we have a canonical representation, do the comparison. */
4048 	if (zb1obj != zb2obj)
4049 		return (zb1obj < zb2obj ? -1 : 1);
4050 	else if (zb1L0 != zb2L0)
4051 		return (zb1L0 < zb2L0 ? -1 : 1);
4052 	else if (zb1level != zb2level)
4053 		return (zb1level > zb2level ? -1 : 1);
4054 	/*
4055 	 * This can (theoretically) happen if the bookmarks have the same object
4056 	 * and level, but different blkids, if the block sizes are not the same.
4057 	 * There is presently no way to change the indirect block sizes
4058 	 */
4059 	return (0);
4060 }
4061 
4062 /*
4063  *  This function checks the following: given that last_block is the place that
4064  *  our traversal stopped last time, does that guarantee that we've visited
4065  *  every node under subtree_root?  Therefore, we can't just use the raw output
4066  *  of zbookmark_compare.  We have to pass in a modified version of
4067  *  subtree_root; by incrementing the block id, and then checking whether
4068  *  last_block is before or equal to that, we can tell whether or not having
4069  *  visited last_block implies that all of subtree_root's children have been
4070  *  visited.
4071  */
4072 boolean_t
4073 zbookmark_subtree_completed(const dnode_phys_t *dnp,
4074     const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
4075 {
4076 	zbookmark_phys_t mod_zb = *subtree_root;
4077 	mod_zb.zb_blkid++;
4078 	ASSERT(last_block->zb_level == 0);
4079 
4080 	/* The objset_phys_t isn't before anything. */
4081 	if (dnp == NULL)
4082 		return (B_FALSE);
4083 
4084 	/*
4085 	 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
4086 	 * data block size in sectors, because that variable is only used if
4087 	 * the bookmark refers to a block in the meta-dnode.  Since we don't
4088 	 * know without examining it what object it refers to, and there's no
4089 	 * harm in passing in this value in other cases, we always pass it in.
4090 	 *
4091 	 * We pass in 0 for the indirect block size shift because zb2 must be
4092 	 * level 0.  The indirect block size is only used to calculate the span
4093 	 * of the bookmark, but since the bookmark must be level 0, the span is
4094 	 * always 1, so the math works out.
4095 	 *
4096 	 * If you make changes to how the zbookmark_compare code works, be sure
4097 	 * to make sure that this code still works afterwards.
4098 	 */
4099 	return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
4100 	    1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
4101 	    last_block) <= 0);
4102 }
4103