xref: /illumos-gate/usr/src/uts/common/fs/zfs/zio.c (revision dcbf3bd6a1f1360fc1afcee9e22c6dcff7844bf2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
24  * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
25  * Copyright (c) 2014 Integros [integros.com]
26  */
27 
28 #include <sys/sysmacros.h>
29 #include <sys/zfs_context.h>
30 #include <sys/fm/fs/zfs.h>
31 #include <sys/spa.h>
32 #include <sys/txg.h>
33 #include <sys/spa_impl.h>
34 #include <sys/vdev_impl.h>
35 #include <sys/zio_impl.h>
36 #include <sys/zio_compress.h>
37 #include <sys/zio_checksum.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/arc.h>
40 #include <sys/ddt.h>
41 #include <sys/blkptr.h>
42 #include <sys/zfeature.h>
43 
44 /*
45  * ==========================================================================
46  * I/O type descriptions
47  * ==========================================================================
48  */
49 const char *zio_type_name[ZIO_TYPES] = {
50 	"zio_null", "zio_read", "zio_write", "zio_free", "zio_claim",
51 	"zio_ioctl"
52 };
53 
54 /*
55  * ==========================================================================
56  * I/O kmem caches
57  * ==========================================================================
58  */
59 kmem_cache_t *zio_cache;
60 kmem_cache_t *zio_link_cache;
61 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
62 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
63 
64 #ifdef _KERNEL
65 extern vmem_t *zio_alloc_arena;
66 #endif
67 
68 #define	ZIO_PIPELINE_CONTINUE		0x100
69 #define	ZIO_PIPELINE_STOP		0x101
70 
71 #define	BP_SPANB(indblkshift, level) \
72 	(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
73 #define	COMPARE_META_LEVEL	0x80000000ul
74 /*
75  * The following actions directly effect the spa's sync-to-convergence logic.
76  * The values below define the sync pass when we start performing the action.
77  * Care should be taken when changing these values as they directly impact
78  * spa_sync() performance. Tuning these values may introduce subtle performance
79  * pathologies and should only be done in the context of performance analysis.
80  * These tunables will eventually be removed and replaced with #defines once
81  * enough analysis has been done to determine optimal values.
82  *
83  * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
84  * regular blocks are not deferred.
85  */
86 int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */
87 int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */
88 int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */
89 
90 /*
91  * An allocating zio is one that either currently has the DVA allocate
92  * stage set or will have it later in its lifetime.
93  */
94 #define	IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
95 
96 boolean_t	zio_requeue_io_start_cut_in_line = B_TRUE;
97 
98 #ifdef ZFS_DEBUG
99 int zio_buf_debug_limit = 16384;
100 #else
101 int zio_buf_debug_limit = 0;
102 #endif
103 
104 void
105 zio_init(void)
106 {
107 	size_t c;
108 	vmem_t *data_alloc_arena = NULL;
109 
110 #ifdef _KERNEL
111 	data_alloc_arena = zio_alloc_arena;
112 #endif
113 	zio_cache = kmem_cache_create("zio_cache",
114 	    sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
115 	zio_link_cache = kmem_cache_create("zio_link_cache",
116 	    sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
117 
118 	/*
119 	 * For small buffers, we want a cache for each multiple of
120 	 * SPA_MINBLOCKSIZE.  For larger buffers, we want a cache
121 	 * for each quarter-power of 2.
122 	 */
123 	for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
124 		size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
125 		size_t p2 = size;
126 		size_t align = 0;
127 		size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0;
128 
129 		while (!ISP2(p2))
130 			p2 &= p2 - 1;
131 
132 #ifndef _KERNEL
133 		/*
134 		 * If we are using watchpoints, put each buffer on its own page,
135 		 * to eliminate the performance overhead of trapping to the
136 		 * kernel when modifying a non-watched buffer that shares the
137 		 * page with a watched buffer.
138 		 */
139 		if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
140 			continue;
141 #endif
142 		if (size <= 4 * SPA_MINBLOCKSIZE) {
143 			align = SPA_MINBLOCKSIZE;
144 		} else if (IS_P2ALIGNED(size, p2 >> 2)) {
145 			align = MIN(p2 >> 2, PAGESIZE);
146 		}
147 
148 		if (align != 0) {
149 			char name[36];
150 			(void) sprintf(name, "zio_buf_%lu", (ulong_t)size);
151 			zio_buf_cache[c] = kmem_cache_create(name, size,
152 			    align, NULL, NULL, NULL, NULL, NULL, cflags);
153 
154 			/*
155 			 * Since zio_data bufs do not appear in crash dumps, we
156 			 * pass KMC_NOTOUCH so that no allocator metadata is
157 			 * stored with the buffers.
158 			 */
159 			(void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size);
160 			zio_data_buf_cache[c] = kmem_cache_create(name, size,
161 			    align, NULL, NULL, NULL, NULL, data_alloc_arena,
162 			    cflags | KMC_NOTOUCH);
163 		}
164 	}
165 
166 	while (--c != 0) {
167 		ASSERT(zio_buf_cache[c] != NULL);
168 		if (zio_buf_cache[c - 1] == NULL)
169 			zio_buf_cache[c - 1] = zio_buf_cache[c];
170 
171 		ASSERT(zio_data_buf_cache[c] != NULL);
172 		if (zio_data_buf_cache[c - 1] == NULL)
173 			zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
174 	}
175 
176 	zio_inject_init();
177 }
178 
179 void
180 zio_fini(void)
181 {
182 	size_t c;
183 	kmem_cache_t *last_cache = NULL;
184 	kmem_cache_t *last_data_cache = NULL;
185 
186 	for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
187 		if (zio_buf_cache[c] != last_cache) {
188 			last_cache = zio_buf_cache[c];
189 			kmem_cache_destroy(zio_buf_cache[c]);
190 		}
191 		zio_buf_cache[c] = NULL;
192 
193 		if (zio_data_buf_cache[c] != last_data_cache) {
194 			last_data_cache = zio_data_buf_cache[c];
195 			kmem_cache_destroy(zio_data_buf_cache[c]);
196 		}
197 		zio_data_buf_cache[c] = NULL;
198 	}
199 
200 	kmem_cache_destroy(zio_link_cache);
201 	kmem_cache_destroy(zio_cache);
202 
203 	zio_inject_fini();
204 }
205 
206 /*
207  * ==========================================================================
208  * Allocate and free I/O buffers
209  * ==========================================================================
210  */
211 
212 /*
213  * Use zio_buf_alloc to allocate ZFS metadata.  This data will appear in a
214  * crashdump if the kernel panics, so use it judiciously.  Obviously, it's
215  * useful to inspect ZFS metadata, but if possible, we should avoid keeping
216  * excess / transient data in-core during a crashdump.
217  */
218 void *
219 zio_buf_alloc(size_t size)
220 {
221 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
222 
223 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
224 
225 	return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
226 }
227 
228 /*
229  * Use zio_data_buf_alloc to allocate data.  The data will not appear in a
230  * crashdump if the kernel panics.  This exists so that we will limit the amount
231  * of ZFS data that shows up in a kernel crashdump.  (Thus reducing the amount
232  * of kernel heap dumped to disk when the kernel panics)
233  */
234 void *
235 zio_data_buf_alloc(size_t size)
236 {
237 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
238 
239 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
240 
241 	return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
242 }
243 
244 void
245 zio_buf_free(void *buf, size_t size)
246 {
247 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
248 
249 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
250 
251 	kmem_cache_free(zio_buf_cache[c], buf);
252 }
253 
254 void
255 zio_data_buf_free(void *buf, size_t size)
256 {
257 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
258 
259 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
260 
261 	kmem_cache_free(zio_data_buf_cache[c], buf);
262 }
263 
264 /*
265  * ==========================================================================
266  * Push and pop I/O transform buffers
267  * ==========================================================================
268  */
269 void
270 zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize,
271     zio_transform_func_t *transform)
272 {
273 	zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
274 
275 	zt->zt_orig_data = zio->io_data;
276 	zt->zt_orig_size = zio->io_size;
277 	zt->zt_bufsize = bufsize;
278 	zt->zt_transform = transform;
279 
280 	zt->zt_next = zio->io_transform_stack;
281 	zio->io_transform_stack = zt;
282 
283 	zio->io_data = data;
284 	zio->io_size = size;
285 }
286 
287 void
288 zio_pop_transforms(zio_t *zio)
289 {
290 	zio_transform_t *zt;
291 
292 	while ((zt = zio->io_transform_stack) != NULL) {
293 		if (zt->zt_transform != NULL)
294 			zt->zt_transform(zio,
295 			    zt->zt_orig_data, zt->zt_orig_size);
296 
297 		if (zt->zt_bufsize != 0)
298 			zio_buf_free(zio->io_data, zt->zt_bufsize);
299 
300 		zio->io_data = zt->zt_orig_data;
301 		zio->io_size = zt->zt_orig_size;
302 		zio->io_transform_stack = zt->zt_next;
303 
304 		kmem_free(zt, sizeof (zio_transform_t));
305 	}
306 }
307 
308 /*
309  * ==========================================================================
310  * I/O transform callbacks for subblocks and decompression
311  * ==========================================================================
312  */
313 static void
314 zio_subblock(zio_t *zio, void *data, uint64_t size)
315 {
316 	ASSERT(zio->io_size > size);
317 
318 	if (zio->io_type == ZIO_TYPE_READ)
319 		bcopy(zio->io_data, data, size);
320 }
321 
322 static void
323 zio_decompress(zio_t *zio, void *data, uint64_t size)
324 {
325 	if (zio->io_error == 0 &&
326 	    zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
327 	    zio->io_data, data, zio->io_size, size) != 0)
328 		zio->io_error = SET_ERROR(EIO);
329 }
330 
331 /*
332  * ==========================================================================
333  * I/O parent/child relationships and pipeline interlocks
334  * ==========================================================================
335  */
336 /*
337  * NOTE - Callers to zio_walk_parents() and zio_walk_children must
338  *        continue calling these functions until they return NULL.
339  *        Otherwise, the next caller will pick up the list walk in
340  *        some indeterminate state.  (Otherwise every caller would
341  *        have to pass in a cookie to keep the state represented by
342  *        io_walk_link, which gets annoying.)
343  */
344 zio_t *
345 zio_walk_parents(zio_t *cio)
346 {
347 	zio_link_t *zl = cio->io_walk_link;
348 	list_t *pl = &cio->io_parent_list;
349 
350 	zl = (zl == NULL) ? list_head(pl) : list_next(pl, zl);
351 	cio->io_walk_link = zl;
352 
353 	if (zl == NULL)
354 		return (NULL);
355 
356 	ASSERT(zl->zl_child == cio);
357 	return (zl->zl_parent);
358 }
359 
360 zio_t *
361 zio_walk_children(zio_t *pio)
362 {
363 	zio_link_t *zl = pio->io_walk_link;
364 	list_t *cl = &pio->io_child_list;
365 
366 	zl = (zl == NULL) ? list_head(cl) : list_next(cl, zl);
367 	pio->io_walk_link = zl;
368 
369 	if (zl == NULL)
370 		return (NULL);
371 
372 	ASSERT(zl->zl_parent == pio);
373 	return (zl->zl_child);
374 }
375 
376 zio_t *
377 zio_unique_parent(zio_t *cio)
378 {
379 	zio_t *pio = zio_walk_parents(cio);
380 
381 	VERIFY(zio_walk_parents(cio) == NULL);
382 	return (pio);
383 }
384 
385 void
386 zio_add_child(zio_t *pio, zio_t *cio)
387 {
388 	zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
389 
390 	/*
391 	 * Logical I/Os can have logical, gang, or vdev children.
392 	 * Gang I/Os can have gang or vdev children.
393 	 * Vdev I/Os can only have vdev children.
394 	 * The following ASSERT captures all of these constraints.
395 	 */
396 	ASSERT(cio->io_child_type <= pio->io_child_type);
397 
398 	zl->zl_parent = pio;
399 	zl->zl_child = cio;
400 
401 	mutex_enter(&cio->io_lock);
402 	mutex_enter(&pio->io_lock);
403 
404 	ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
405 
406 	for (int w = 0; w < ZIO_WAIT_TYPES; w++)
407 		pio->io_children[cio->io_child_type][w] += !cio->io_state[w];
408 
409 	list_insert_head(&pio->io_child_list, zl);
410 	list_insert_head(&cio->io_parent_list, zl);
411 
412 	pio->io_child_count++;
413 	cio->io_parent_count++;
414 
415 	mutex_exit(&pio->io_lock);
416 	mutex_exit(&cio->io_lock);
417 }
418 
419 static void
420 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
421 {
422 	ASSERT(zl->zl_parent == pio);
423 	ASSERT(zl->zl_child == cio);
424 
425 	mutex_enter(&cio->io_lock);
426 	mutex_enter(&pio->io_lock);
427 
428 	list_remove(&pio->io_child_list, zl);
429 	list_remove(&cio->io_parent_list, zl);
430 
431 	pio->io_child_count--;
432 	cio->io_parent_count--;
433 
434 	mutex_exit(&pio->io_lock);
435 	mutex_exit(&cio->io_lock);
436 
437 	kmem_cache_free(zio_link_cache, zl);
438 }
439 
440 static boolean_t
441 zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait)
442 {
443 	uint64_t *countp = &zio->io_children[child][wait];
444 	boolean_t waiting = B_FALSE;
445 
446 	mutex_enter(&zio->io_lock);
447 	ASSERT(zio->io_stall == NULL);
448 	if (*countp != 0) {
449 		zio->io_stage >>= 1;
450 		zio->io_stall = countp;
451 		waiting = B_TRUE;
452 	}
453 	mutex_exit(&zio->io_lock);
454 
455 	return (waiting);
456 }
457 
458 static void
459 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait)
460 {
461 	uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
462 	int *errorp = &pio->io_child_error[zio->io_child_type];
463 
464 	mutex_enter(&pio->io_lock);
465 	if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
466 		*errorp = zio_worst_error(*errorp, zio->io_error);
467 	pio->io_reexecute |= zio->io_reexecute;
468 	ASSERT3U(*countp, >, 0);
469 
470 	(*countp)--;
471 
472 	if (*countp == 0 && pio->io_stall == countp) {
473 		pio->io_stall = NULL;
474 		mutex_exit(&pio->io_lock);
475 		zio_execute(pio);
476 	} else {
477 		mutex_exit(&pio->io_lock);
478 	}
479 }
480 
481 static void
482 zio_inherit_child_errors(zio_t *zio, enum zio_child c)
483 {
484 	if (zio->io_child_error[c] != 0 && zio->io_error == 0)
485 		zio->io_error = zio->io_child_error[c];
486 }
487 
488 /*
489  * ==========================================================================
490  * Create the various types of I/O (read, write, free, etc)
491  * ==========================================================================
492  */
493 static zio_t *
494 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
495     void *data, uint64_t size, zio_done_func_t *done, void *private,
496     zio_type_t type, zio_priority_t priority, enum zio_flag flags,
497     vdev_t *vd, uint64_t offset, const zbookmark_phys_t *zb,
498     enum zio_stage stage, enum zio_stage pipeline)
499 {
500 	zio_t *zio;
501 
502 	ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
503 	ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0);
504 	ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
505 
506 	ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
507 	ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
508 	ASSERT(vd || stage == ZIO_STAGE_OPEN);
509 
510 	zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
511 	bzero(zio, sizeof (zio_t));
512 
513 	mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL);
514 	cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
515 
516 	list_create(&zio->io_parent_list, sizeof (zio_link_t),
517 	    offsetof(zio_link_t, zl_parent_node));
518 	list_create(&zio->io_child_list, sizeof (zio_link_t),
519 	    offsetof(zio_link_t, zl_child_node));
520 
521 	if (vd != NULL)
522 		zio->io_child_type = ZIO_CHILD_VDEV;
523 	else if (flags & ZIO_FLAG_GANG_CHILD)
524 		zio->io_child_type = ZIO_CHILD_GANG;
525 	else if (flags & ZIO_FLAG_DDT_CHILD)
526 		zio->io_child_type = ZIO_CHILD_DDT;
527 	else
528 		zio->io_child_type = ZIO_CHILD_LOGICAL;
529 
530 	if (bp != NULL) {
531 		zio->io_bp = (blkptr_t *)bp;
532 		zio->io_bp_copy = *bp;
533 		zio->io_bp_orig = *bp;
534 		if (type != ZIO_TYPE_WRITE ||
535 		    zio->io_child_type == ZIO_CHILD_DDT)
536 			zio->io_bp = &zio->io_bp_copy;	/* so caller can free */
537 		if (zio->io_child_type == ZIO_CHILD_LOGICAL)
538 			zio->io_logical = zio;
539 		if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
540 			pipeline |= ZIO_GANG_STAGES;
541 	}
542 
543 	zio->io_spa = spa;
544 	zio->io_txg = txg;
545 	zio->io_done = done;
546 	zio->io_private = private;
547 	zio->io_type = type;
548 	zio->io_priority = priority;
549 	zio->io_vd = vd;
550 	zio->io_offset = offset;
551 	zio->io_orig_data = zio->io_data = data;
552 	zio->io_orig_size = zio->io_size = size;
553 	zio->io_orig_flags = zio->io_flags = flags;
554 	zio->io_orig_stage = zio->io_stage = stage;
555 	zio->io_orig_pipeline = zio->io_pipeline = pipeline;
556 
557 	zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY);
558 	zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
559 
560 	if (zb != NULL)
561 		zio->io_bookmark = *zb;
562 
563 	if (pio != NULL) {
564 		if (zio->io_logical == NULL)
565 			zio->io_logical = pio->io_logical;
566 		if (zio->io_child_type == ZIO_CHILD_GANG)
567 			zio->io_gang_leader = pio->io_gang_leader;
568 		zio_add_child(pio, zio);
569 	}
570 
571 	return (zio);
572 }
573 
574 static void
575 zio_destroy(zio_t *zio)
576 {
577 	list_destroy(&zio->io_parent_list);
578 	list_destroy(&zio->io_child_list);
579 	mutex_destroy(&zio->io_lock);
580 	cv_destroy(&zio->io_cv);
581 	kmem_cache_free(zio_cache, zio);
582 }
583 
584 zio_t *
585 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
586     void *private, enum zio_flag flags)
587 {
588 	zio_t *zio;
589 
590 	zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private,
591 	    ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
592 	    ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
593 
594 	return (zio);
595 }
596 
597 zio_t *
598 zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags)
599 {
600 	return (zio_null(NULL, spa, NULL, done, private, flags));
601 }
602 
603 void
604 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp)
605 {
606 	if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
607 		zfs_panic_recover("blkptr at %p has invalid TYPE %llu",
608 		    bp, (longlong_t)BP_GET_TYPE(bp));
609 	}
610 	if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS ||
611 	    BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) {
612 		zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu",
613 		    bp, (longlong_t)BP_GET_CHECKSUM(bp));
614 	}
615 	if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS ||
616 	    BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) {
617 		zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu",
618 		    bp, (longlong_t)BP_GET_COMPRESS(bp));
619 	}
620 	if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
621 		zfs_panic_recover("blkptr at %p has invalid LSIZE %llu",
622 		    bp, (longlong_t)BP_GET_LSIZE(bp));
623 	}
624 	if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
625 		zfs_panic_recover("blkptr at %p has invalid PSIZE %llu",
626 		    bp, (longlong_t)BP_GET_PSIZE(bp));
627 	}
628 
629 	if (BP_IS_EMBEDDED(bp)) {
630 		if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) {
631 			zfs_panic_recover("blkptr at %p has invalid ETYPE %llu",
632 			    bp, (longlong_t)BPE_GET_ETYPE(bp));
633 		}
634 	}
635 
636 	/*
637 	 * Pool-specific checks.
638 	 *
639 	 * Note: it would be nice to verify that the blk_birth and
640 	 * BP_PHYSICAL_BIRTH() are not too large.  However, spa_freeze()
641 	 * allows the birth time of log blocks (and dmu_sync()-ed blocks
642 	 * that are in the log) to be arbitrarily large.
643 	 */
644 	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
645 		uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]);
646 		if (vdevid >= spa->spa_root_vdev->vdev_children) {
647 			zfs_panic_recover("blkptr at %p DVA %u has invalid "
648 			    "VDEV %llu",
649 			    bp, i, (longlong_t)vdevid);
650 			continue;
651 		}
652 		vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
653 		if (vd == NULL) {
654 			zfs_panic_recover("blkptr at %p DVA %u has invalid "
655 			    "VDEV %llu",
656 			    bp, i, (longlong_t)vdevid);
657 			continue;
658 		}
659 		if (vd->vdev_ops == &vdev_hole_ops) {
660 			zfs_panic_recover("blkptr at %p DVA %u has hole "
661 			    "VDEV %llu",
662 			    bp, i, (longlong_t)vdevid);
663 			continue;
664 		}
665 		if (vd->vdev_ops == &vdev_missing_ops) {
666 			/*
667 			 * "missing" vdevs are valid during import, but we
668 			 * don't have their detailed info (e.g. asize), so
669 			 * we can't perform any more checks on them.
670 			 */
671 			continue;
672 		}
673 		uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
674 		uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]);
675 		if (BP_IS_GANG(bp))
676 			asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
677 		if (offset + asize > vd->vdev_asize) {
678 			zfs_panic_recover("blkptr at %p DVA %u has invalid "
679 			    "OFFSET %llu",
680 			    bp, i, (longlong_t)offset);
681 		}
682 	}
683 }
684 
685 zio_t *
686 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
687     void *data, uint64_t size, zio_done_func_t *done, void *private,
688     zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb)
689 {
690 	zio_t *zio;
691 
692 	zfs_blkptr_verify(spa, bp);
693 
694 	zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
695 	    data, size, done, private,
696 	    ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
697 	    ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
698 	    ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
699 
700 	return (zio);
701 }
702 
703 zio_t *
704 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
705     void *data, uint64_t size, const zio_prop_t *zp,
706     zio_done_func_t *ready, zio_done_func_t *children_ready,
707     zio_done_func_t *physdone, zio_done_func_t *done,
708     void *private, zio_priority_t priority, enum zio_flag flags,
709     const zbookmark_phys_t *zb)
710 {
711 	zio_t *zio;
712 
713 	ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
714 	    zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
715 	    zp->zp_compress >= ZIO_COMPRESS_OFF &&
716 	    zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
717 	    DMU_OT_IS_VALID(zp->zp_type) &&
718 	    zp->zp_level < 32 &&
719 	    zp->zp_copies > 0 &&
720 	    zp->zp_copies <= spa_max_replication(spa));
721 
722 	zio = zio_create(pio, spa, txg, bp, data, size, done, private,
723 	    ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
724 	    ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
725 	    ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
726 
727 	zio->io_ready = ready;
728 	zio->io_children_ready = children_ready;
729 	zio->io_physdone = physdone;
730 	zio->io_prop = *zp;
731 
732 	/*
733 	 * Data can be NULL if we are going to call zio_write_override() to
734 	 * provide the already-allocated BP.  But we may need the data to
735 	 * verify a dedup hit (if requested).  In this case, don't try to
736 	 * dedup (just take the already-allocated BP verbatim).
737 	 */
738 	if (data == NULL && zio->io_prop.zp_dedup_verify) {
739 		zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
740 	}
741 
742 	return (zio);
743 }
744 
745 zio_t *
746 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data,
747     uint64_t size, zio_done_func_t *done, void *private,
748     zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb)
749 {
750 	zio_t *zio;
751 
752 	zio = zio_create(pio, spa, txg, bp, data, size, done, private,
753 	    ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
754 	    ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
755 
756 	return (zio);
757 }
758 
759 void
760 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite)
761 {
762 	ASSERT(zio->io_type == ZIO_TYPE_WRITE);
763 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
764 	ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
765 	ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
766 
767 	/*
768 	 * We must reset the io_prop to match the values that existed
769 	 * when the bp was first written by dmu_sync() keeping in mind
770 	 * that nopwrite and dedup are mutually exclusive.
771 	 */
772 	zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
773 	zio->io_prop.zp_nopwrite = nopwrite;
774 	zio->io_prop.zp_copies = copies;
775 	zio->io_bp_override = bp;
776 }
777 
778 void
779 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
780 {
781 
782 	/*
783 	 * The check for EMBEDDED is a performance optimization.  We
784 	 * process the free here (by ignoring it) rather than
785 	 * putting it on the list and then processing it in zio_free_sync().
786 	 */
787 	if (BP_IS_EMBEDDED(bp))
788 		return;
789 	metaslab_check_free(spa, bp);
790 
791 	/*
792 	 * Frees that are for the currently-syncing txg, are not going to be
793 	 * deferred, and which will not need to do a read (i.e. not GANG or
794 	 * DEDUP), can be processed immediately.  Otherwise, put them on the
795 	 * in-memory list for later processing.
796 	 */
797 	if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp) ||
798 	    txg != spa->spa_syncing_txg ||
799 	    spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) {
800 		bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
801 	} else {
802 		VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0)));
803 	}
804 }
805 
806 zio_t *
807 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
808     enum zio_flag flags)
809 {
810 	zio_t *zio;
811 	enum zio_stage stage = ZIO_FREE_PIPELINE;
812 
813 	ASSERT(!BP_IS_HOLE(bp));
814 	ASSERT(spa_syncing_txg(spa) == txg);
815 	ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free);
816 
817 	if (BP_IS_EMBEDDED(bp))
818 		return (zio_null(pio, spa, NULL, NULL, NULL, 0));
819 
820 	metaslab_check_free(spa, bp);
821 	arc_freed(spa, bp);
822 
823 	/*
824 	 * GANG and DEDUP blocks can induce a read (for the gang block header,
825 	 * or the DDT), so issue them asynchronously so that this thread is
826 	 * not tied up.
827 	 */
828 	if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp))
829 		stage |= ZIO_STAGE_ISSUE_ASYNC;
830 
831 	zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
832 	    NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, flags,
833 	    NULL, 0, NULL, ZIO_STAGE_OPEN, stage);
834 
835 	return (zio);
836 }
837 
838 zio_t *
839 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
840     zio_done_func_t *done, void *private, enum zio_flag flags)
841 {
842 	zio_t *zio;
843 
844 	dprintf_bp(bp, "claiming in txg %llu", txg);
845 
846 	if (BP_IS_EMBEDDED(bp))
847 		return (zio_null(pio, spa, NULL, NULL, NULL, 0));
848 
849 	/*
850 	 * A claim is an allocation of a specific block.  Claims are needed
851 	 * to support immediate writes in the intent log.  The issue is that
852 	 * immediate writes contain committed data, but in a txg that was
853 	 * *not* committed.  Upon opening the pool after an unclean shutdown,
854 	 * the intent log claims all blocks that contain immediate write data
855 	 * so that the SPA knows they're in use.
856 	 *
857 	 * All claims *must* be resolved in the first txg -- before the SPA
858 	 * starts allocating blocks -- so that nothing is allocated twice.
859 	 * If txg == 0 we just verify that the block is claimable.
860 	 */
861 	ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa));
862 	ASSERT(txg == spa_first_txg(spa) || txg == 0);
863 	ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa));	/* zdb(1M) */
864 
865 	zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
866 	    done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, flags,
867 	    NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
868 
869 	return (zio);
870 }
871 
872 zio_t *
873 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
874     zio_done_func_t *done, void *private, enum zio_flag flags)
875 {
876 	zio_t *zio;
877 	int c;
878 
879 	if (vd->vdev_children == 0) {
880 		zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private,
881 		    ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
882 		    ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
883 
884 		zio->io_cmd = cmd;
885 	} else {
886 		zio = zio_null(pio, spa, NULL, NULL, NULL, flags);
887 
888 		for (c = 0; c < vd->vdev_children; c++)
889 			zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd,
890 			    done, private, flags));
891 	}
892 
893 	return (zio);
894 }
895 
896 zio_t *
897 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
898     void *data, int checksum, zio_done_func_t *done, void *private,
899     zio_priority_t priority, enum zio_flag flags, boolean_t labels)
900 {
901 	zio_t *zio;
902 
903 	ASSERT(vd->vdev_children == 0);
904 	ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
905 	    offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
906 	ASSERT3U(offset + size, <=, vd->vdev_psize);
907 
908 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private,
909 	    ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, offset,
910 	    NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
911 
912 	zio->io_prop.zp_checksum = checksum;
913 
914 	return (zio);
915 }
916 
917 zio_t *
918 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
919     void *data, int checksum, zio_done_func_t *done, void *private,
920     zio_priority_t priority, enum zio_flag flags, boolean_t labels)
921 {
922 	zio_t *zio;
923 
924 	ASSERT(vd->vdev_children == 0);
925 	ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
926 	    offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
927 	ASSERT3U(offset + size, <=, vd->vdev_psize);
928 
929 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private,
930 	    ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, offset,
931 	    NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
932 
933 	zio->io_prop.zp_checksum = checksum;
934 
935 	if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
936 		/*
937 		 * zec checksums are necessarily destructive -- they modify
938 		 * the end of the write buffer to hold the verifier/checksum.
939 		 * Therefore, we must make a local copy in case the data is
940 		 * being written to multiple places in parallel.
941 		 */
942 		void *wbuf = zio_buf_alloc(size);
943 		bcopy(data, wbuf, size);
944 		zio_push_transform(zio, wbuf, size, size, NULL);
945 	}
946 
947 	return (zio);
948 }
949 
950 /*
951  * Create a child I/O to do some work for us.
952  */
953 zio_t *
954 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
955     void *data, uint64_t size, int type, zio_priority_t priority,
956     enum zio_flag flags, zio_done_func_t *done, void *private)
957 {
958 	enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
959 	zio_t *zio;
960 
961 	ASSERT(vd->vdev_parent ==
962 	    (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev));
963 
964 	if (type == ZIO_TYPE_READ && bp != NULL) {
965 		/*
966 		 * If we have the bp, then the child should perform the
967 		 * checksum and the parent need not.  This pushes error
968 		 * detection as close to the leaves as possible and
969 		 * eliminates redundant checksums in the interior nodes.
970 		 */
971 		pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
972 		pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
973 	}
974 
975 	if (vd->vdev_children == 0)
976 		offset += VDEV_LABEL_START_SIZE;
977 
978 	flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE;
979 
980 	/*
981 	 * If we've decided to do a repair, the write is not speculative --
982 	 * even if the original read was.
983 	 */
984 	if (flags & ZIO_FLAG_IO_REPAIR)
985 		flags &= ~ZIO_FLAG_SPECULATIVE;
986 
987 	zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size,
988 	    done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
989 	    ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
990 
991 	zio->io_physdone = pio->io_physdone;
992 	if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL)
993 		zio->io_logical->io_phys_children++;
994 
995 	return (zio);
996 }
997 
998 zio_t *
999 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size,
1000     int type, zio_priority_t priority, enum zio_flag flags,
1001     zio_done_func_t *done, void *private)
1002 {
1003 	zio_t *zio;
1004 
1005 	ASSERT(vd->vdev_ops->vdev_op_leaf);
1006 
1007 	zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
1008 	    data, size, done, private, type, priority,
1009 	    flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
1010 	    vd, offset, NULL,
1011 	    ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
1012 
1013 	return (zio);
1014 }
1015 
1016 void
1017 zio_flush(zio_t *zio, vdev_t *vd)
1018 {
1019 	zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE,
1020 	    NULL, NULL,
1021 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY));
1022 }
1023 
1024 void
1025 zio_shrink(zio_t *zio, uint64_t size)
1026 {
1027 	ASSERT(zio->io_executor == NULL);
1028 	ASSERT(zio->io_orig_size == zio->io_size);
1029 	ASSERT(size <= zio->io_size);
1030 
1031 	/*
1032 	 * We don't shrink for raidz because of problems with the
1033 	 * reconstruction when reading back less than the block size.
1034 	 * Note, BP_IS_RAIDZ() assumes no compression.
1035 	 */
1036 	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
1037 	if (!BP_IS_RAIDZ(zio->io_bp))
1038 		zio->io_orig_size = zio->io_size = size;
1039 }
1040 
1041 /*
1042  * ==========================================================================
1043  * Prepare to read and write logical blocks
1044  * ==========================================================================
1045  */
1046 
1047 static int
1048 zio_read_bp_init(zio_t *zio)
1049 {
1050 	blkptr_t *bp = zio->io_bp;
1051 
1052 	if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
1053 	    zio->io_child_type == ZIO_CHILD_LOGICAL &&
1054 	    !(zio->io_flags & ZIO_FLAG_RAW)) {
1055 		uint64_t psize =
1056 		    BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
1057 		void *cbuf = zio_buf_alloc(psize);
1058 
1059 		zio_push_transform(zio, cbuf, psize, psize, zio_decompress);
1060 	}
1061 
1062 	if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
1063 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1064 		decode_embedded_bp_compressed(bp, zio->io_data);
1065 	} else {
1066 		ASSERT(!BP_IS_EMBEDDED(bp));
1067 	}
1068 
1069 	if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0)
1070 		zio->io_flags |= ZIO_FLAG_DONT_CACHE;
1071 
1072 	if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP)
1073 		zio->io_flags |= ZIO_FLAG_DONT_CACHE;
1074 
1075 	if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
1076 		zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
1077 
1078 	return (ZIO_PIPELINE_CONTINUE);
1079 }
1080 
1081 static int
1082 zio_write_bp_init(zio_t *zio)
1083 {
1084 	spa_t *spa = zio->io_spa;
1085 	zio_prop_t *zp = &zio->io_prop;
1086 	enum zio_compress compress = zp->zp_compress;
1087 	blkptr_t *bp = zio->io_bp;
1088 	uint64_t lsize = zio->io_size;
1089 	uint64_t psize = lsize;
1090 	int pass = 1;
1091 
1092 	/*
1093 	 * If our children haven't all reached the ready stage,
1094 	 * wait for them and then repeat this pipeline stage.
1095 	 */
1096 	if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) ||
1097 	    zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY))
1098 		return (ZIO_PIPELINE_STOP);
1099 
1100 	if (!IO_IS_ALLOCATING(zio))
1101 		return (ZIO_PIPELINE_CONTINUE);
1102 
1103 	if (zio->io_children_ready != NULL) {
1104 		/*
1105 		 * Now that all our children are ready, run the callback
1106 		 * associated with this zio in case it wants to modify the
1107 		 * data to be written.
1108 		 */
1109 		ASSERT3U(zp->zp_level, >, 0);
1110 		zio->io_children_ready(zio);
1111 	}
1112 
1113 	ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1114 
1115 	if (zio->io_bp_override) {
1116 		ASSERT(bp->blk_birth != zio->io_txg);
1117 		ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0);
1118 
1119 		*bp = *zio->io_bp_override;
1120 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1121 
1122 		if (BP_IS_EMBEDDED(bp))
1123 			return (ZIO_PIPELINE_CONTINUE);
1124 
1125 		/*
1126 		 * If we've been overridden and nopwrite is set then
1127 		 * set the flag accordingly to indicate that a nopwrite
1128 		 * has already occurred.
1129 		 */
1130 		if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
1131 			ASSERT(!zp->zp_dedup);
1132 			zio->io_flags |= ZIO_FLAG_NOPWRITE;
1133 			return (ZIO_PIPELINE_CONTINUE);
1134 		}
1135 
1136 		ASSERT(!zp->zp_nopwrite);
1137 
1138 		if (BP_IS_HOLE(bp) || !zp->zp_dedup)
1139 			return (ZIO_PIPELINE_CONTINUE);
1140 
1141 		ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
1142 		    ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
1143 
1144 		if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) {
1145 			BP_SET_DEDUP(bp, 1);
1146 			zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
1147 			return (ZIO_PIPELINE_CONTINUE);
1148 		}
1149 		zio->io_bp_override = NULL;
1150 		BP_ZERO(bp);
1151 	}
1152 
1153 	if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) {
1154 		/*
1155 		 * We're rewriting an existing block, which means we're
1156 		 * working on behalf of spa_sync().  For spa_sync() to
1157 		 * converge, it must eventually be the case that we don't
1158 		 * have to allocate new blocks.  But compression changes
1159 		 * the blocksize, which forces a reallocate, and makes
1160 		 * convergence take longer.  Therefore, after the first
1161 		 * few passes, stop compressing to ensure convergence.
1162 		 */
1163 		pass = spa_sync_pass(spa);
1164 
1165 		ASSERT(zio->io_txg == spa_syncing_txg(spa));
1166 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1167 		ASSERT(!BP_GET_DEDUP(bp));
1168 
1169 		if (pass >= zfs_sync_pass_dont_compress)
1170 			compress = ZIO_COMPRESS_OFF;
1171 
1172 		/* Make sure someone doesn't change their mind on overwrites */
1173 		ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp),
1174 		    spa_max_replication(spa)) == BP_GET_NDVAS(bp));
1175 	}
1176 
1177 	if (compress != ZIO_COMPRESS_OFF) {
1178 		void *cbuf = zio_buf_alloc(lsize);
1179 		psize = zio_compress_data(compress, zio->io_data, cbuf, lsize);
1180 		if (psize == 0 || psize == lsize) {
1181 			compress = ZIO_COMPRESS_OFF;
1182 			zio_buf_free(cbuf, lsize);
1183 		} else if (!zp->zp_dedup && psize <= BPE_PAYLOAD_SIZE &&
1184 		    zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
1185 		    spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
1186 			encode_embedded_bp_compressed(bp,
1187 			    cbuf, compress, lsize, psize);
1188 			BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
1189 			BP_SET_TYPE(bp, zio->io_prop.zp_type);
1190 			BP_SET_LEVEL(bp, zio->io_prop.zp_level);
1191 			zio_buf_free(cbuf, lsize);
1192 			bp->blk_birth = zio->io_txg;
1193 			zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1194 			ASSERT(spa_feature_is_active(spa,
1195 			    SPA_FEATURE_EMBEDDED_DATA));
1196 			return (ZIO_PIPELINE_CONTINUE);
1197 		} else {
1198 			/*
1199 			 * Round up compressed size up to the ashift
1200 			 * of the smallest-ashift device, and zero the tail.
1201 			 * This ensures that the compressed size of the BP
1202 			 * (and thus compressratio property) are correct,
1203 			 * in that we charge for the padding used to fill out
1204 			 * the last sector.
1205 			 */
1206 			ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
1207 			size_t rounded = (size_t)P2ROUNDUP(psize,
1208 			    1ULL << spa->spa_min_ashift);
1209 			if (rounded >= lsize) {
1210 				compress = ZIO_COMPRESS_OFF;
1211 				zio_buf_free(cbuf, lsize);
1212 				psize = lsize;
1213 			} else {
1214 				bzero((char *)cbuf + psize, rounded - psize);
1215 				psize = rounded;
1216 				zio_push_transform(zio, cbuf,
1217 				    psize, lsize, NULL);
1218 			}
1219 		}
1220 	}
1221 
1222 	/*
1223 	 * The final pass of spa_sync() must be all rewrites, but the first
1224 	 * few passes offer a trade-off: allocating blocks defers convergence,
1225 	 * but newly allocated blocks are sequential, so they can be written
1226 	 * to disk faster.  Therefore, we allow the first few passes of
1227 	 * spa_sync() to allocate new blocks, but force rewrites after that.
1228 	 * There should only be a handful of blocks after pass 1 in any case.
1229 	 */
1230 	if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg &&
1231 	    BP_GET_PSIZE(bp) == psize &&
1232 	    pass >= zfs_sync_pass_rewrite) {
1233 		ASSERT(psize != 0);
1234 		enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
1235 		zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
1236 		zio->io_flags |= ZIO_FLAG_IO_REWRITE;
1237 	} else {
1238 		BP_ZERO(bp);
1239 		zio->io_pipeline = ZIO_WRITE_PIPELINE;
1240 	}
1241 
1242 	if (psize == 0) {
1243 		if (zio->io_bp_orig.blk_birth != 0 &&
1244 		    spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
1245 			BP_SET_LSIZE(bp, lsize);
1246 			BP_SET_TYPE(bp, zp->zp_type);
1247 			BP_SET_LEVEL(bp, zp->zp_level);
1248 			BP_SET_BIRTH(bp, zio->io_txg, 0);
1249 		}
1250 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1251 	} else {
1252 		ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
1253 		BP_SET_LSIZE(bp, lsize);
1254 		BP_SET_TYPE(bp, zp->zp_type);
1255 		BP_SET_LEVEL(bp, zp->zp_level);
1256 		BP_SET_PSIZE(bp, psize);
1257 		BP_SET_COMPRESS(bp, compress);
1258 		BP_SET_CHECKSUM(bp, zp->zp_checksum);
1259 		BP_SET_DEDUP(bp, zp->zp_dedup);
1260 		BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
1261 		if (zp->zp_dedup) {
1262 			ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1263 			ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1264 			zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
1265 		}
1266 		if (zp->zp_nopwrite) {
1267 			ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1268 			ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1269 			zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
1270 		}
1271 	}
1272 
1273 	return (ZIO_PIPELINE_CONTINUE);
1274 }
1275 
1276 static int
1277 zio_free_bp_init(zio_t *zio)
1278 {
1279 	blkptr_t *bp = zio->io_bp;
1280 
1281 	if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
1282 		if (BP_GET_DEDUP(bp))
1283 			zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
1284 	}
1285 
1286 	return (ZIO_PIPELINE_CONTINUE);
1287 }
1288 
1289 /*
1290  * ==========================================================================
1291  * Execute the I/O pipeline
1292  * ==========================================================================
1293  */
1294 
1295 static void
1296 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
1297 {
1298 	spa_t *spa = zio->io_spa;
1299 	zio_type_t t = zio->io_type;
1300 	int flags = (cutinline ? TQ_FRONT : 0);
1301 
1302 	/*
1303 	 * If we're a config writer or a probe, the normal issue and
1304 	 * interrupt threads may all be blocked waiting for the config lock.
1305 	 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
1306 	 */
1307 	if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
1308 		t = ZIO_TYPE_NULL;
1309 
1310 	/*
1311 	 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
1312 	 */
1313 	if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
1314 		t = ZIO_TYPE_NULL;
1315 
1316 	/*
1317 	 * If this is a high priority I/O, then use the high priority taskq if
1318 	 * available.
1319 	 */
1320 	if (zio->io_priority == ZIO_PRIORITY_NOW &&
1321 	    spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
1322 		q++;
1323 
1324 	ASSERT3U(q, <, ZIO_TASKQ_TYPES);
1325 
1326 	/*
1327 	 * NB: We are assuming that the zio can only be dispatched
1328 	 * to a single taskq at a time.  It would be a grievous error
1329 	 * to dispatch the zio to another taskq at the same time.
1330 	 */
1331 	ASSERT(zio->io_tqent.tqent_next == NULL);
1332 	spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio,
1333 	    flags, &zio->io_tqent);
1334 }
1335 
1336 static boolean_t
1337 zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
1338 {
1339 	kthread_t *executor = zio->io_executor;
1340 	spa_t *spa = zio->io_spa;
1341 
1342 	for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
1343 		spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1344 		uint_t i;
1345 		for (i = 0; i < tqs->stqs_count; i++) {
1346 			if (taskq_member(tqs->stqs_taskq[i], executor))
1347 				return (B_TRUE);
1348 		}
1349 	}
1350 
1351 	return (B_FALSE);
1352 }
1353 
1354 static int
1355 zio_issue_async(zio_t *zio)
1356 {
1357 	zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
1358 
1359 	return (ZIO_PIPELINE_STOP);
1360 }
1361 
1362 void
1363 zio_interrupt(zio_t *zio)
1364 {
1365 	zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
1366 }
1367 
1368 void
1369 zio_delay_interrupt(zio_t *zio)
1370 {
1371 	/*
1372 	 * The timeout_generic() function isn't defined in userspace, so
1373 	 * rather than trying to implement the function, the zio delay
1374 	 * functionality has been disabled for userspace builds.
1375 	 */
1376 
1377 #ifdef _KERNEL
1378 	/*
1379 	 * If io_target_timestamp is zero, then no delay has been registered
1380 	 * for this IO, thus jump to the end of this function and "skip" the
1381 	 * delay; issuing it directly to the zio layer.
1382 	 */
1383 	if (zio->io_target_timestamp != 0) {
1384 		hrtime_t now = gethrtime();
1385 
1386 		if (now >= zio->io_target_timestamp) {
1387 			/*
1388 			 * This IO has already taken longer than the target
1389 			 * delay to complete, so we don't want to delay it
1390 			 * any longer; we "miss" the delay and issue it
1391 			 * directly to the zio layer. This is likely due to
1392 			 * the target latency being set to a value less than
1393 			 * the underlying hardware can satisfy (e.g. delay
1394 			 * set to 1ms, but the disks take 10ms to complete an
1395 			 * IO request).
1396 			 */
1397 
1398 			DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
1399 			    hrtime_t, now);
1400 
1401 			zio_interrupt(zio);
1402 		} else {
1403 			hrtime_t diff = zio->io_target_timestamp - now;
1404 
1405 			DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
1406 			    hrtime_t, now, hrtime_t, diff);
1407 
1408 			(void) timeout_generic(CALLOUT_NORMAL,
1409 			    (void (*)(void *))zio_interrupt, zio, diff, 1, 0);
1410 		}
1411 
1412 		return;
1413 	}
1414 #endif
1415 
1416 	DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
1417 	zio_interrupt(zio);
1418 }
1419 
1420 /*
1421  * Execute the I/O pipeline until one of the following occurs:
1422  *
1423  *	(1) the I/O completes
1424  *	(2) the pipeline stalls waiting for dependent child I/Os
1425  *	(3) the I/O issues, so we're waiting for an I/O completion interrupt
1426  *	(4) the I/O is delegated by vdev-level caching or aggregation
1427  *	(5) the I/O is deferred due to vdev-level queueing
1428  *	(6) the I/O is handed off to another thread.
1429  *
1430  * In all cases, the pipeline stops whenever there's no CPU work; it never
1431  * burns a thread in cv_wait().
1432  *
1433  * There's no locking on io_stage because there's no legitimate way
1434  * for multiple threads to be attempting to process the same I/O.
1435  */
1436 static zio_pipe_stage_t *zio_pipeline[];
1437 
1438 void
1439 zio_execute(zio_t *zio)
1440 {
1441 	zio->io_executor = curthread;
1442 
1443 	while (zio->io_stage < ZIO_STAGE_DONE) {
1444 		enum zio_stage pipeline = zio->io_pipeline;
1445 		enum zio_stage stage = zio->io_stage;
1446 		int rv;
1447 
1448 		ASSERT(!MUTEX_HELD(&zio->io_lock));
1449 		ASSERT(ISP2(stage));
1450 		ASSERT(zio->io_stall == NULL);
1451 
1452 		do {
1453 			stage <<= 1;
1454 		} while ((stage & pipeline) == 0);
1455 
1456 		ASSERT(stage <= ZIO_STAGE_DONE);
1457 
1458 		/*
1459 		 * If we are in interrupt context and this pipeline stage
1460 		 * will grab a config lock that is held across I/O,
1461 		 * or may wait for an I/O that needs an interrupt thread
1462 		 * to complete, issue async to avoid deadlock.
1463 		 *
1464 		 * For VDEV_IO_START, we cut in line so that the io will
1465 		 * be sent to disk promptly.
1466 		 */
1467 		if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
1468 		    zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
1469 			boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
1470 			    zio_requeue_io_start_cut_in_line : B_FALSE;
1471 			zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
1472 			return;
1473 		}
1474 
1475 		zio->io_stage = stage;
1476 		rv = zio_pipeline[highbit64(stage) - 1](zio);
1477 
1478 		if (rv == ZIO_PIPELINE_STOP)
1479 			return;
1480 
1481 		ASSERT(rv == ZIO_PIPELINE_CONTINUE);
1482 	}
1483 }
1484 
1485 /*
1486  * ==========================================================================
1487  * Initiate I/O, either sync or async
1488  * ==========================================================================
1489  */
1490 int
1491 zio_wait(zio_t *zio)
1492 {
1493 	int error;
1494 
1495 	ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
1496 	ASSERT(zio->io_executor == NULL);
1497 
1498 	zio->io_waiter = curthread;
1499 
1500 	zio_execute(zio);
1501 
1502 	mutex_enter(&zio->io_lock);
1503 	while (zio->io_executor != NULL)
1504 		cv_wait(&zio->io_cv, &zio->io_lock);
1505 	mutex_exit(&zio->io_lock);
1506 
1507 	error = zio->io_error;
1508 	zio_destroy(zio);
1509 
1510 	return (error);
1511 }
1512 
1513 void
1514 zio_nowait(zio_t *zio)
1515 {
1516 	ASSERT(zio->io_executor == NULL);
1517 
1518 	if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
1519 	    zio_unique_parent(zio) == NULL) {
1520 		/*
1521 		 * This is a logical async I/O with no parent to wait for it.
1522 		 * We add it to the spa_async_root_zio "Godfather" I/O which
1523 		 * will ensure they complete prior to unloading the pool.
1524 		 */
1525 		spa_t *spa = zio->io_spa;
1526 
1527 		zio_add_child(spa->spa_async_zio_root[CPU_SEQID], zio);
1528 	}
1529 
1530 	zio_execute(zio);
1531 }
1532 
1533 /*
1534  * ==========================================================================
1535  * Reexecute or suspend/resume failed I/O
1536  * ==========================================================================
1537  */
1538 
1539 static void
1540 zio_reexecute(zio_t *pio)
1541 {
1542 	zio_t *cio, *cio_next;
1543 
1544 	ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
1545 	ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
1546 	ASSERT(pio->io_gang_leader == NULL);
1547 	ASSERT(pio->io_gang_tree == NULL);
1548 
1549 	pio->io_flags = pio->io_orig_flags;
1550 	pio->io_stage = pio->io_orig_stage;
1551 	pio->io_pipeline = pio->io_orig_pipeline;
1552 	pio->io_reexecute = 0;
1553 	pio->io_flags |= ZIO_FLAG_REEXECUTED;
1554 	pio->io_error = 0;
1555 	for (int w = 0; w < ZIO_WAIT_TYPES; w++)
1556 		pio->io_state[w] = 0;
1557 	for (int c = 0; c < ZIO_CHILD_TYPES; c++)
1558 		pio->io_child_error[c] = 0;
1559 
1560 	if (IO_IS_ALLOCATING(pio))
1561 		BP_ZERO(pio->io_bp);
1562 
1563 	/*
1564 	 * As we reexecute pio's children, new children could be created.
1565 	 * New children go to the head of pio's io_child_list, however,
1566 	 * so we will (correctly) not reexecute them.  The key is that
1567 	 * the remainder of pio's io_child_list, from 'cio_next' onward,
1568 	 * cannot be affected by any side effects of reexecuting 'cio'.
1569 	 */
1570 	for (cio = zio_walk_children(pio); cio != NULL; cio = cio_next) {
1571 		cio_next = zio_walk_children(pio);
1572 		mutex_enter(&pio->io_lock);
1573 		for (int w = 0; w < ZIO_WAIT_TYPES; w++)
1574 			pio->io_children[cio->io_child_type][w]++;
1575 		mutex_exit(&pio->io_lock);
1576 		zio_reexecute(cio);
1577 	}
1578 
1579 	/*
1580 	 * Now that all children have been reexecuted, execute the parent.
1581 	 * We don't reexecute "The Godfather" I/O here as it's the
1582 	 * responsibility of the caller to wait on him.
1583 	 */
1584 	if (!(pio->io_flags & ZIO_FLAG_GODFATHER))
1585 		zio_execute(pio);
1586 }
1587 
1588 void
1589 zio_suspend(spa_t *spa, zio_t *zio)
1590 {
1591 	if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
1592 		fm_panic("Pool '%s' has encountered an uncorrectable I/O "
1593 		    "failure and the failure mode property for this pool "
1594 		    "is set to panic.", spa_name(spa));
1595 
1596 	zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0);
1597 
1598 	mutex_enter(&spa->spa_suspend_lock);
1599 
1600 	if (spa->spa_suspend_zio_root == NULL)
1601 		spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
1602 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
1603 		    ZIO_FLAG_GODFATHER);
1604 
1605 	spa->spa_suspended = B_TRUE;
1606 
1607 	if (zio != NULL) {
1608 		ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
1609 		ASSERT(zio != spa->spa_suspend_zio_root);
1610 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1611 		ASSERT(zio_unique_parent(zio) == NULL);
1612 		ASSERT(zio->io_stage == ZIO_STAGE_DONE);
1613 		zio_add_child(spa->spa_suspend_zio_root, zio);
1614 	}
1615 
1616 	mutex_exit(&spa->spa_suspend_lock);
1617 }
1618 
1619 int
1620 zio_resume(spa_t *spa)
1621 {
1622 	zio_t *pio;
1623 
1624 	/*
1625 	 * Reexecute all previously suspended i/o.
1626 	 */
1627 	mutex_enter(&spa->spa_suspend_lock);
1628 	spa->spa_suspended = B_FALSE;
1629 	cv_broadcast(&spa->spa_suspend_cv);
1630 	pio = spa->spa_suspend_zio_root;
1631 	spa->spa_suspend_zio_root = NULL;
1632 	mutex_exit(&spa->spa_suspend_lock);
1633 
1634 	if (pio == NULL)
1635 		return (0);
1636 
1637 	zio_reexecute(pio);
1638 	return (zio_wait(pio));
1639 }
1640 
1641 void
1642 zio_resume_wait(spa_t *spa)
1643 {
1644 	mutex_enter(&spa->spa_suspend_lock);
1645 	while (spa_suspended(spa))
1646 		cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
1647 	mutex_exit(&spa->spa_suspend_lock);
1648 }
1649 
1650 /*
1651  * ==========================================================================
1652  * Gang blocks.
1653  *
1654  * A gang block is a collection of small blocks that looks to the DMU
1655  * like one large block.  When zio_dva_allocate() cannot find a block
1656  * of the requested size, due to either severe fragmentation or the pool
1657  * being nearly full, it calls zio_write_gang_block() to construct the
1658  * block from smaller fragments.
1659  *
1660  * A gang block consists of a gang header (zio_gbh_phys_t) and up to
1661  * three (SPA_GBH_NBLKPTRS) gang members.  The gang header is just like
1662  * an indirect block: it's an array of block pointers.  It consumes
1663  * only one sector and hence is allocatable regardless of fragmentation.
1664  * The gang header's bps point to its gang members, which hold the data.
1665  *
1666  * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
1667  * as the verifier to ensure uniqueness of the SHA256 checksum.
1668  * Critically, the gang block bp's blk_cksum is the checksum of the data,
1669  * not the gang header.  This ensures that data block signatures (needed for
1670  * deduplication) are independent of how the block is physically stored.
1671  *
1672  * Gang blocks can be nested: a gang member may itself be a gang block.
1673  * Thus every gang block is a tree in which root and all interior nodes are
1674  * gang headers, and the leaves are normal blocks that contain user data.
1675  * The root of the gang tree is called the gang leader.
1676  *
1677  * To perform any operation (read, rewrite, free, claim) on a gang block,
1678  * zio_gang_assemble() first assembles the gang tree (minus data leaves)
1679  * in the io_gang_tree field of the original logical i/o by recursively
1680  * reading the gang leader and all gang headers below it.  This yields
1681  * an in-core tree containing the contents of every gang header and the
1682  * bps for every constituent of the gang block.
1683  *
1684  * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
1685  * and invokes a callback on each bp.  To free a gang block, zio_gang_issue()
1686  * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
1687  * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
1688  * zio_read_gang() is a wrapper around zio_read() that omits reading gang
1689  * headers, since we already have those in io_gang_tree.  zio_rewrite_gang()
1690  * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
1691  * of the gang header plus zio_checksum_compute() of the data to update the
1692  * gang header's blk_cksum as described above.
1693  *
1694  * The two-phase assemble/issue model solves the problem of partial failure --
1695  * what if you'd freed part of a gang block but then couldn't read the
1696  * gang header for another part?  Assembling the entire gang tree first
1697  * ensures that all the necessary gang header I/O has succeeded before
1698  * starting the actual work of free, claim, or write.  Once the gang tree
1699  * is assembled, free and claim are in-memory operations that cannot fail.
1700  *
1701  * In the event that a gang write fails, zio_dva_unallocate() walks the
1702  * gang tree to immediately free (i.e. insert back into the space map)
1703  * everything we've allocated.  This ensures that we don't get ENOSPC
1704  * errors during repeated suspend/resume cycles due to a flaky device.
1705  *
1706  * Gang rewrites only happen during sync-to-convergence.  If we can't assemble
1707  * the gang tree, we won't modify the block, so we can safely defer the free
1708  * (knowing that the block is still intact).  If we *can* assemble the gang
1709  * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
1710  * each constituent bp and we can allocate a new block on the next sync pass.
1711  *
1712  * In all cases, the gang tree allows complete recovery from partial failure.
1713  * ==========================================================================
1714  */
1715 
1716 static zio_t *
1717 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data)
1718 {
1719 	if (gn != NULL)
1720 		return (pio);
1721 
1722 	return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp),
1723 	    NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
1724 	    &pio->io_bookmark));
1725 }
1726 
1727 zio_t *
1728 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data)
1729 {
1730 	zio_t *zio;
1731 
1732 	if (gn != NULL) {
1733 		zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
1734 		    gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority,
1735 		    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
1736 		/*
1737 		 * As we rewrite each gang header, the pipeline will compute
1738 		 * a new gang block header checksum for it; but no one will
1739 		 * compute a new data checksum, so we do that here.  The one
1740 		 * exception is the gang leader: the pipeline already computed
1741 		 * its data checksum because that stage precedes gang assembly.
1742 		 * (Presently, nothing actually uses interior data checksums;
1743 		 * this is just good hygiene.)
1744 		 */
1745 		if (gn != pio->io_gang_leader->io_gang_tree) {
1746 			zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
1747 			    data, BP_GET_PSIZE(bp));
1748 		}
1749 		/*
1750 		 * If we are here to damage data for testing purposes,
1751 		 * leave the GBH alone so that we can detect the damage.
1752 		 */
1753 		if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
1754 			zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
1755 	} else {
1756 		zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
1757 		    data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority,
1758 		    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
1759 	}
1760 
1761 	return (zio);
1762 }
1763 
1764 /* ARGSUSED */
1765 zio_t *
1766 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data)
1767 {
1768 	return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
1769 	    ZIO_GANG_CHILD_FLAGS(pio)));
1770 }
1771 
1772 /* ARGSUSED */
1773 zio_t *
1774 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data)
1775 {
1776 	return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
1777 	    NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
1778 }
1779 
1780 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
1781 	NULL,
1782 	zio_read_gang,
1783 	zio_rewrite_gang,
1784 	zio_free_gang,
1785 	zio_claim_gang,
1786 	NULL
1787 };
1788 
1789 static void zio_gang_tree_assemble_done(zio_t *zio);
1790 
1791 static zio_gang_node_t *
1792 zio_gang_node_alloc(zio_gang_node_t **gnpp)
1793 {
1794 	zio_gang_node_t *gn;
1795 
1796 	ASSERT(*gnpp == NULL);
1797 
1798 	gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
1799 	gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
1800 	*gnpp = gn;
1801 
1802 	return (gn);
1803 }
1804 
1805 static void
1806 zio_gang_node_free(zio_gang_node_t **gnpp)
1807 {
1808 	zio_gang_node_t *gn = *gnpp;
1809 
1810 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
1811 		ASSERT(gn->gn_child[g] == NULL);
1812 
1813 	zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
1814 	kmem_free(gn, sizeof (*gn));
1815 	*gnpp = NULL;
1816 }
1817 
1818 static void
1819 zio_gang_tree_free(zio_gang_node_t **gnpp)
1820 {
1821 	zio_gang_node_t *gn = *gnpp;
1822 
1823 	if (gn == NULL)
1824 		return;
1825 
1826 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
1827 		zio_gang_tree_free(&gn->gn_child[g]);
1828 
1829 	zio_gang_node_free(gnpp);
1830 }
1831 
1832 static void
1833 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
1834 {
1835 	zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
1836 
1837 	ASSERT(gio->io_gang_leader == gio);
1838 	ASSERT(BP_IS_GANG(bp));
1839 
1840 	zio_nowait(zio_read(gio, gio->io_spa, bp, gn->gn_gbh,
1841 	    SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn,
1842 	    gio->io_priority, ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
1843 }
1844 
1845 static void
1846 zio_gang_tree_assemble_done(zio_t *zio)
1847 {
1848 	zio_t *gio = zio->io_gang_leader;
1849 	zio_gang_node_t *gn = zio->io_private;
1850 	blkptr_t *bp = zio->io_bp;
1851 
1852 	ASSERT(gio == zio_unique_parent(zio));
1853 	ASSERT(zio->io_child_count == 0);
1854 
1855 	if (zio->io_error)
1856 		return;
1857 
1858 	if (BP_SHOULD_BYTESWAP(bp))
1859 		byteswap_uint64_array(zio->io_data, zio->io_size);
1860 
1861 	ASSERT(zio->io_data == gn->gn_gbh);
1862 	ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
1863 	ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
1864 
1865 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
1866 		blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
1867 		if (!BP_IS_GANG(gbp))
1868 			continue;
1869 		zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
1870 	}
1871 }
1872 
1873 static void
1874 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data)
1875 {
1876 	zio_t *gio = pio->io_gang_leader;
1877 	zio_t *zio;
1878 
1879 	ASSERT(BP_IS_GANG(bp) == !!gn);
1880 	ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
1881 	ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
1882 
1883 	/*
1884 	 * If you're a gang header, your data is in gn->gn_gbh.
1885 	 * If you're a gang member, your data is in 'data' and gn == NULL.
1886 	 */
1887 	zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data);
1888 
1889 	if (gn != NULL) {
1890 		ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
1891 
1892 		for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
1893 			blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
1894 			if (BP_IS_HOLE(gbp))
1895 				continue;
1896 			zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data);
1897 			data = (char *)data + BP_GET_PSIZE(gbp);
1898 		}
1899 	}
1900 
1901 	if (gn == gio->io_gang_tree)
1902 		ASSERT3P((char *)gio->io_data + gio->io_size, ==, data);
1903 
1904 	if (zio != pio)
1905 		zio_nowait(zio);
1906 }
1907 
1908 static int
1909 zio_gang_assemble(zio_t *zio)
1910 {
1911 	blkptr_t *bp = zio->io_bp;
1912 
1913 	ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
1914 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
1915 
1916 	zio->io_gang_leader = zio;
1917 
1918 	zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
1919 
1920 	return (ZIO_PIPELINE_CONTINUE);
1921 }
1922 
1923 static int
1924 zio_gang_issue(zio_t *zio)
1925 {
1926 	blkptr_t *bp = zio->io_bp;
1927 
1928 	if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE))
1929 		return (ZIO_PIPELINE_STOP);
1930 
1931 	ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
1932 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
1933 
1934 	if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
1935 		zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_data);
1936 	else
1937 		zio_gang_tree_free(&zio->io_gang_tree);
1938 
1939 	zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1940 
1941 	return (ZIO_PIPELINE_CONTINUE);
1942 }
1943 
1944 static void
1945 zio_write_gang_member_ready(zio_t *zio)
1946 {
1947 	zio_t *pio = zio_unique_parent(zio);
1948 	zio_t *gio = zio->io_gang_leader;
1949 	dva_t *cdva = zio->io_bp->blk_dva;
1950 	dva_t *pdva = pio->io_bp->blk_dva;
1951 	uint64_t asize;
1952 
1953 	if (BP_IS_HOLE(zio->io_bp))
1954 		return;
1955 
1956 	ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
1957 
1958 	ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
1959 	ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
1960 	ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
1961 	ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
1962 	ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
1963 
1964 	mutex_enter(&pio->io_lock);
1965 	for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
1966 		ASSERT(DVA_GET_GANG(&pdva[d]));
1967 		asize = DVA_GET_ASIZE(&pdva[d]);
1968 		asize += DVA_GET_ASIZE(&cdva[d]);
1969 		DVA_SET_ASIZE(&pdva[d], asize);
1970 	}
1971 	mutex_exit(&pio->io_lock);
1972 }
1973 
1974 static int
1975 zio_write_gang_block(zio_t *pio)
1976 {
1977 	spa_t *spa = pio->io_spa;
1978 	blkptr_t *bp = pio->io_bp;
1979 	zio_t *gio = pio->io_gang_leader;
1980 	zio_t *zio;
1981 	zio_gang_node_t *gn, **gnpp;
1982 	zio_gbh_phys_t *gbh;
1983 	uint64_t txg = pio->io_txg;
1984 	uint64_t resid = pio->io_size;
1985 	uint64_t lsize;
1986 	int copies = gio->io_prop.zp_copies;
1987 	int gbh_copies = MIN(copies + 1, spa_max_replication(spa));
1988 	zio_prop_t zp;
1989 	int error;
1990 
1991 	error = metaslab_alloc(spa, spa_normal_class(spa), SPA_GANGBLOCKSIZE,
1992 	    bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp,
1993 	    METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER);
1994 	if (error) {
1995 		pio->io_error = error;
1996 		return (ZIO_PIPELINE_CONTINUE);
1997 	}
1998 
1999 	if (pio == gio) {
2000 		gnpp = &gio->io_gang_tree;
2001 	} else {
2002 		gnpp = pio->io_private;
2003 		ASSERT(pio->io_ready == zio_write_gang_member_ready);
2004 	}
2005 
2006 	gn = zio_gang_node_alloc(gnpp);
2007 	gbh = gn->gn_gbh;
2008 	bzero(gbh, SPA_GANGBLOCKSIZE);
2009 
2010 	/*
2011 	 * Create the gang header.
2012 	 */
2013 	zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL,
2014 	    pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2015 
2016 	/*
2017 	 * Create and nowait the gang children.
2018 	 */
2019 	for (int g = 0; resid != 0; resid -= lsize, g++) {
2020 		lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
2021 		    SPA_MINBLOCKSIZE);
2022 		ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
2023 
2024 		zp.zp_checksum = gio->io_prop.zp_checksum;
2025 		zp.zp_compress = ZIO_COMPRESS_OFF;
2026 		zp.zp_type = DMU_OT_NONE;
2027 		zp.zp_level = 0;
2028 		zp.zp_copies = gio->io_prop.zp_copies;
2029 		zp.zp_dedup = B_FALSE;
2030 		zp.zp_dedup_verify = B_FALSE;
2031 		zp.zp_nopwrite = B_FALSE;
2032 
2033 		zio_nowait(zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
2034 		    (char *)pio->io_data + (pio->io_size - resid), lsize, &zp,
2035 		    zio_write_gang_member_ready, NULL, NULL, NULL,
2036 		    &gn->gn_child[g], pio->io_priority,
2037 		    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark));
2038 	}
2039 
2040 	/*
2041 	 * Set pio's pipeline to just wait for zio to finish.
2042 	 */
2043 	pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2044 
2045 	zio_nowait(zio);
2046 
2047 	return (ZIO_PIPELINE_CONTINUE);
2048 }
2049 
2050 /*
2051  * The zio_nop_write stage in the pipeline determines if allocating a
2052  * new bp is necessary.  The nopwrite feature can handle writes in
2053  * either syncing or open context (i.e. zil writes) and as a result is
2054  * mutually exclusive with dedup.
2055  *
2056  * By leveraging a cryptographically secure checksum, such as SHA256, we
2057  * can compare the checksums of the new data and the old to determine if
2058  * allocating a new block is required.  Note that our requirements for
2059  * cryptographic strength are fairly weak: there can't be any accidental
2060  * hash collisions, but we don't need to be secure against intentional
2061  * (malicious) collisions.  To trigger a nopwrite, you have to be able
2062  * to write the file to begin with, and triggering an incorrect (hash
2063  * collision) nopwrite is no worse than simply writing to the file.
2064  * That said, there are no known attacks against the checksum algorithms
2065  * used for nopwrite, assuming that the salt and the checksums
2066  * themselves remain secret.
2067  */
2068 static int
2069 zio_nop_write(zio_t *zio)
2070 {
2071 	blkptr_t *bp = zio->io_bp;
2072 	blkptr_t *bp_orig = &zio->io_bp_orig;
2073 	zio_prop_t *zp = &zio->io_prop;
2074 
2075 	ASSERT(BP_GET_LEVEL(bp) == 0);
2076 	ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
2077 	ASSERT(zp->zp_nopwrite);
2078 	ASSERT(!zp->zp_dedup);
2079 	ASSERT(zio->io_bp_override == NULL);
2080 	ASSERT(IO_IS_ALLOCATING(zio));
2081 
2082 	/*
2083 	 * Check to see if the original bp and the new bp have matching
2084 	 * characteristics (i.e. same checksum, compression algorithms, etc).
2085 	 * If they don't then just continue with the pipeline which will
2086 	 * allocate a new bp.
2087 	 */
2088 	if (BP_IS_HOLE(bp_orig) ||
2089 	    !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
2090 	    ZCHECKSUM_FLAG_NOPWRITE) ||
2091 	    BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
2092 	    BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
2093 	    BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
2094 	    zp->zp_copies != BP_GET_NDVAS(bp_orig))
2095 		return (ZIO_PIPELINE_CONTINUE);
2096 
2097 	/*
2098 	 * If the checksums match then reset the pipeline so that we
2099 	 * avoid allocating a new bp and issuing any I/O.
2100 	 */
2101 	if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
2102 		ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
2103 		    ZCHECKSUM_FLAG_NOPWRITE);
2104 		ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
2105 		ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
2106 		ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
2107 		ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop,
2108 		    sizeof (uint64_t)) == 0);
2109 
2110 		*bp = *bp_orig;
2111 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2112 		zio->io_flags |= ZIO_FLAG_NOPWRITE;
2113 	}
2114 
2115 	return (ZIO_PIPELINE_CONTINUE);
2116 }
2117 
2118 /*
2119  * ==========================================================================
2120  * Dedup
2121  * ==========================================================================
2122  */
2123 static void
2124 zio_ddt_child_read_done(zio_t *zio)
2125 {
2126 	blkptr_t *bp = zio->io_bp;
2127 	ddt_entry_t *dde = zio->io_private;
2128 	ddt_phys_t *ddp;
2129 	zio_t *pio = zio_unique_parent(zio);
2130 
2131 	mutex_enter(&pio->io_lock);
2132 	ddp = ddt_phys_select(dde, bp);
2133 	if (zio->io_error == 0)
2134 		ddt_phys_clear(ddp);	/* this ddp doesn't need repair */
2135 	if (zio->io_error == 0 && dde->dde_repair_data == NULL)
2136 		dde->dde_repair_data = zio->io_data;
2137 	else
2138 		zio_buf_free(zio->io_data, zio->io_size);
2139 	mutex_exit(&pio->io_lock);
2140 }
2141 
2142 static int
2143 zio_ddt_read_start(zio_t *zio)
2144 {
2145 	blkptr_t *bp = zio->io_bp;
2146 
2147 	ASSERT(BP_GET_DEDUP(bp));
2148 	ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
2149 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2150 
2151 	if (zio->io_child_error[ZIO_CHILD_DDT]) {
2152 		ddt_t *ddt = ddt_select(zio->io_spa, bp);
2153 		ddt_entry_t *dde = ddt_repair_start(ddt, bp);
2154 		ddt_phys_t *ddp = dde->dde_phys;
2155 		ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
2156 		blkptr_t blk;
2157 
2158 		ASSERT(zio->io_vsd == NULL);
2159 		zio->io_vsd = dde;
2160 
2161 		if (ddp_self == NULL)
2162 			return (ZIO_PIPELINE_CONTINUE);
2163 
2164 		for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
2165 			if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
2166 				continue;
2167 			ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
2168 			    &blk);
2169 			zio_nowait(zio_read(zio, zio->io_spa, &blk,
2170 			    zio_buf_alloc(zio->io_size), zio->io_size,
2171 			    zio_ddt_child_read_done, dde, zio->io_priority,
2172 			    ZIO_DDT_CHILD_FLAGS(zio) | ZIO_FLAG_DONT_PROPAGATE,
2173 			    &zio->io_bookmark));
2174 		}
2175 		return (ZIO_PIPELINE_CONTINUE);
2176 	}
2177 
2178 	zio_nowait(zio_read(zio, zio->io_spa, bp,
2179 	    zio->io_data, zio->io_size, NULL, NULL, zio->io_priority,
2180 	    ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
2181 
2182 	return (ZIO_PIPELINE_CONTINUE);
2183 }
2184 
2185 static int
2186 zio_ddt_read_done(zio_t *zio)
2187 {
2188 	blkptr_t *bp = zio->io_bp;
2189 
2190 	if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE))
2191 		return (ZIO_PIPELINE_STOP);
2192 
2193 	ASSERT(BP_GET_DEDUP(bp));
2194 	ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
2195 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2196 
2197 	if (zio->io_child_error[ZIO_CHILD_DDT]) {
2198 		ddt_t *ddt = ddt_select(zio->io_spa, bp);
2199 		ddt_entry_t *dde = zio->io_vsd;
2200 		if (ddt == NULL) {
2201 			ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
2202 			return (ZIO_PIPELINE_CONTINUE);
2203 		}
2204 		if (dde == NULL) {
2205 			zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
2206 			zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
2207 			return (ZIO_PIPELINE_STOP);
2208 		}
2209 		if (dde->dde_repair_data != NULL) {
2210 			bcopy(dde->dde_repair_data, zio->io_data, zio->io_size);
2211 			zio->io_child_error[ZIO_CHILD_DDT] = 0;
2212 		}
2213 		ddt_repair_done(ddt, dde);
2214 		zio->io_vsd = NULL;
2215 	}
2216 
2217 	ASSERT(zio->io_vsd == NULL);
2218 
2219 	return (ZIO_PIPELINE_CONTINUE);
2220 }
2221 
2222 static boolean_t
2223 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
2224 {
2225 	spa_t *spa = zio->io_spa;
2226 
2227 	/*
2228 	 * Note: we compare the original data, not the transformed data,
2229 	 * because when zio->io_bp is an override bp, we will not have
2230 	 * pushed the I/O transforms.  That's an important optimization
2231 	 * because otherwise we'd compress/encrypt all dmu_sync() data twice.
2232 	 */
2233 	for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
2234 		zio_t *lio = dde->dde_lead_zio[p];
2235 
2236 		if (lio != NULL) {
2237 			return (lio->io_orig_size != zio->io_orig_size ||
2238 			    bcmp(zio->io_orig_data, lio->io_orig_data,
2239 			    zio->io_orig_size) != 0);
2240 		}
2241 	}
2242 
2243 	for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
2244 		ddt_phys_t *ddp = &dde->dde_phys[p];
2245 
2246 		if (ddp->ddp_phys_birth != 0) {
2247 			arc_buf_t *abuf = NULL;
2248 			arc_flags_t aflags = ARC_FLAG_WAIT;
2249 			blkptr_t blk = *zio->io_bp;
2250 			int error;
2251 
2252 			ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
2253 
2254 			ddt_exit(ddt);
2255 
2256 			error = arc_read(NULL, spa, &blk,
2257 			    arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
2258 			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2259 			    &aflags, &zio->io_bookmark);
2260 
2261 			if (error == 0) {
2262 				if (arc_buf_size(abuf) != zio->io_orig_size ||
2263 				    bcmp(abuf->b_data, zio->io_orig_data,
2264 				    zio->io_orig_size) != 0)
2265 					error = SET_ERROR(EEXIST);
2266 				arc_buf_destroy(abuf, &abuf);
2267 			}
2268 
2269 			ddt_enter(ddt);
2270 			return (error != 0);
2271 		}
2272 	}
2273 
2274 	return (B_FALSE);
2275 }
2276 
2277 static void
2278 zio_ddt_child_write_ready(zio_t *zio)
2279 {
2280 	int p = zio->io_prop.zp_copies;
2281 	ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
2282 	ddt_entry_t *dde = zio->io_private;
2283 	ddt_phys_t *ddp = &dde->dde_phys[p];
2284 	zio_t *pio;
2285 
2286 	if (zio->io_error)
2287 		return;
2288 
2289 	ddt_enter(ddt);
2290 
2291 	ASSERT(dde->dde_lead_zio[p] == zio);
2292 
2293 	ddt_phys_fill(ddp, zio->io_bp);
2294 
2295 	while ((pio = zio_walk_parents(zio)) != NULL)
2296 		ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
2297 
2298 	ddt_exit(ddt);
2299 }
2300 
2301 static void
2302 zio_ddt_child_write_done(zio_t *zio)
2303 {
2304 	int p = zio->io_prop.zp_copies;
2305 	ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
2306 	ddt_entry_t *dde = zio->io_private;
2307 	ddt_phys_t *ddp = &dde->dde_phys[p];
2308 
2309 	ddt_enter(ddt);
2310 
2311 	ASSERT(ddp->ddp_refcnt == 0);
2312 	ASSERT(dde->dde_lead_zio[p] == zio);
2313 	dde->dde_lead_zio[p] = NULL;
2314 
2315 	if (zio->io_error == 0) {
2316 		while (zio_walk_parents(zio) != NULL)
2317 			ddt_phys_addref(ddp);
2318 	} else {
2319 		ddt_phys_clear(ddp);
2320 	}
2321 
2322 	ddt_exit(ddt);
2323 }
2324 
2325 static void
2326 zio_ddt_ditto_write_done(zio_t *zio)
2327 {
2328 	int p = DDT_PHYS_DITTO;
2329 	zio_prop_t *zp = &zio->io_prop;
2330 	blkptr_t *bp = zio->io_bp;
2331 	ddt_t *ddt = ddt_select(zio->io_spa, bp);
2332 	ddt_entry_t *dde = zio->io_private;
2333 	ddt_phys_t *ddp = &dde->dde_phys[p];
2334 	ddt_key_t *ddk = &dde->dde_key;
2335 
2336 	ddt_enter(ddt);
2337 
2338 	ASSERT(ddp->ddp_refcnt == 0);
2339 	ASSERT(dde->dde_lead_zio[p] == zio);
2340 	dde->dde_lead_zio[p] = NULL;
2341 
2342 	if (zio->io_error == 0) {
2343 		ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum));
2344 		ASSERT(zp->zp_copies < SPA_DVAS_PER_BP);
2345 		ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp));
2346 		if (ddp->ddp_phys_birth != 0)
2347 			ddt_phys_free(ddt, ddk, ddp, zio->io_txg);
2348 		ddt_phys_fill(ddp, bp);
2349 	}
2350 
2351 	ddt_exit(ddt);
2352 }
2353 
2354 static int
2355 zio_ddt_write(zio_t *zio)
2356 {
2357 	spa_t *spa = zio->io_spa;
2358 	blkptr_t *bp = zio->io_bp;
2359 	uint64_t txg = zio->io_txg;
2360 	zio_prop_t *zp = &zio->io_prop;
2361 	int p = zp->zp_copies;
2362 	int ditto_copies;
2363 	zio_t *cio = NULL;
2364 	zio_t *dio = NULL;
2365 	ddt_t *ddt = ddt_select(spa, bp);
2366 	ddt_entry_t *dde;
2367 	ddt_phys_t *ddp;
2368 
2369 	ASSERT(BP_GET_DEDUP(bp));
2370 	ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
2371 	ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
2372 
2373 	ddt_enter(ddt);
2374 	dde = ddt_lookup(ddt, bp, B_TRUE);
2375 	ddp = &dde->dde_phys[p];
2376 
2377 	if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
2378 		/*
2379 		 * If we're using a weak checksum, upgrade to a strong checksum
2380 		 * and try again.  If we're already using a strong checksum,
2381 		 * we can't resolve it, so just convert to an ordinary write.
2382 		 * (And automatically e-mail a paper to Nature?)
2383 		 */
2384 		if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
2385 		    ZCHECKSUM_FLAG_DEDUP)) {
2386 			zp->zp_checksum = spa_dedup_checksum(spa);
2387 			zio_pop_transforms(zio);
2388 			zio->io_stage = ZIO_STAGE_OPEN;
2389 			BP_ZERO(bp);
2390 		} else {
2391 			zp->zp_dedup = B_FALSE;
2392 		}
2393 		zio->io_pipeline = ZIO_WRITE_PIPELINE;
2394 		ddt_exit(ddt);
2395 		return (ZIO_PIPELINE_CONTINUE);
2396 	}
2397 
2398 	ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp);
2399 	ASSERT(ditto_copies < SPA_DVAS_PER_BP);
2400 
2401 	if (ditto_copies > ddt_ditto_copies_present(dde) &&
2402 	    dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) {
2403 		zio_prop_t czp = *zp;
2404 
2405 		czp.zp_copies = ditto_copies;
2406 
2407 		/*
2408 		 * If we arrived here with an override bp, we won't have run
2409 		 * the transform stack, so we won't have the data we need to
2410 		 * generate a child i/o.  So, toss the override bp and restart.
2411 		 * This is safe, because using the override bp is just an
2412 		 * optimization; and it's rare, so the cost doesn't matter.
2413 		 */
2414 		if (zio->io_bp_override) {
2415 			zio_pop_transforms(zio);
2416 			zio->io_stage = ZIO_STAGE_OPEN;
2417 			zio->io_pipeline = ZIO_WRITE_PIPELINE;
2418 			zio->io_bp_override = NULL;
2419 			BP_ZERO(bp);
2420 			ddt_exit(ddt);
2421 			return (ZIO_PIPELINE_CONTINUE);
2422 		}
2423 
2424 		dio = zio_write(zio, spa, txg, bp, zio->io_orig_data,
2425 		    zio->io_orig_size, &czp, NULL, NULL,
2426 		    NULL, zio_ddt_ditto_write_done, dde, zio->io_priority,
2427 		    ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
2428 
2429 		zio_push_transform(dio, zio->io_data, zio->io_size, 0, NULL);
2430 		dde->dde_lead_zio[DDT_PHYS_DITTO] = dio;
2431 	}
2432 
2433 	if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
2434 		if (ddp->ddp_phys_birth != 0)
2435 			ddt_bp_fill(ddp, bp, txg);
2436 		if (dde->dde_lead_zio[p] != NULL)
2437 			zio_add_child(zio, dde->dde_lead_zio[p]);
2438 		else
2439 			ddt_phys_addref(ddp);
2440 	} else if (zio->io_bp_override) {
2441 		ASSERT(bp->blk_birth == txg);
2442 		ASSERT(BP_EQUAL(bp, zio->io_bp_override));
2443 		ddt_phys_fill(ddp, bp);
2444 		ddt_phys_addref(ddp);
2445 	} else {
2446 		cio = zio_write(zio, spa, txg, bp, zio->io_orig_data,
2447 		    zio->io_orig_size, zp,
2448 		    zio_ddt_child_write_ready, NULL, NULL,
2449 		    zio_ddt_child_write_done, dde, zio->io_priority,
2450 		    ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
2451 
2452 		zio_push_transform(cio, zio->io_data, zio->io_size, 0, NULL);
2453 		dde->dde_lead_zio[p] = cio;
2454 	}
2455 
2456 	ddt_exit(ddt);
2457 
2458 	if (cio)
2459 		zio_nowait(cio);
2460 	if (dio)
2461 		zio_nowait(dio);
2462 
2463 	return (ZIO_PIPELINE_CONTINUE);
2464 }
2465 
2466 ddt_entry_t *freedde; /* for debugging */
2467 
2468 static int
2469 zio_ddt_free(zio_t *zio)
2470 {
2471 	spa_t *spa = zio->io_spa;
2472 	blkptr_t *bp = zio->io_bp;
2473 	ddt_t *ddt = ddt_select(spa, bp);
2474 	ddt_entry_t *dde;
2475 	ddt_phys_t *ddp;
2476 
2477 	ASSERT(BP_GET_DEDUP(bp));
2478 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2479 
2480 	ddt_enter(ddt);
2481 	freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
2482 	ddp = ddt_phys_select(dde, bp);
2483 	ddt_phys_decref(ddp);
2484 	ddt_exit(ddt);
2485 
2486 	return (ZIO_PIPELINE_CONTINUE);
2487 }
2488 
2489 /*
2490  * ==========================================================================
2491  * Allocate and free blocks
2492  * ==========================================================================
2493  */
2494 static int
2495 zio_dva_allocate(zio_t *zio)
2496 {
2497 	spa_t *spa = zio->io_spa;
2498 	metaslab_class_t *mc = spa_normal_class(spa);
2499 	blkptr_t *bp = zio->io_bp;
2500 	int error;
2501 	int flags = 0;
2502 
2503 	if (zio->io_gang_leader == NULL) {
2504 		ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2505 		zio->io_gang_leader = zio;
2506 	}
2507 
2508 	ASSERT(BP_IS_HOLE(bp));
2509 	ASSERT0(BP_GET_NDVAS(bp));
2510 	ASSERT3U(zio->io_prop.zp_copies, >, 0);
2511 	ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
2512 	ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
2513 
2514 	/*
2515 	 * The dump device does not support gang blocks so allocation on
2516 	 * behalf of the dump device (i.e. ZIO_FLAG_NODATA) must avoid
2517 	 * the "fast" gang feature.
2518 	 */
2519 	flags |= (zio->io_flags & ZIO_FLAG_NODATA) ? METASLAB_GANG_AVOID : 0;
2520 	flags |= (zio->io_flags & ZIO_FLAG_GANG_CHILD) ?
2521 	    METASLAB_GANG_CHILD : 0;
2522 	error = metaslab_alloc(spa, mc, zio->io_size, bp,
2523 	    zio->io_prop.zp_copies, zio->io_txg, NULL, flags);
2524 
2525 	if (error) {
2526 		spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, "
2527 		    "size %llu, error %d", spa_name(spa), zio, zio->io_size,
2528 		    error);
2529 		if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE)
2530 			return (zio_write_gang_block(zio));
2531 		zio->io_error = error;
2532 	}
2533 
2534 	return (ZIO_PIPELINE_CONTINUE);
2535 }
2536 
2537 static int
2538 zio_dva_free(zio_t *zio)
2539 {
2540 	metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
2541 
2542 	return (ZIO_PIPELINE_CONTINUE);
2543 }
2544 
2545 static int
2546 zio_dva_claim(zio_t *zio)
2547 {
2548 	int error;
2549 
2550 	error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
2551 	if (error)
2552 		zio->io_error = error;
2553 
2554 	return (ZIO_PIPELINE_CONTINUE);
2555 }
2556 
2557 /*
2558  * Undo an allocation.  This is used by zio_done() when an I/O fails
2559  * and we want to give back the block we just allocated.
2560  * This handles both normal blocks and gang blocks.
2561  */
2562 static void
2563 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
2564 {
2565 	ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
2566 	ASSERT(zio->io_bp_override == NULL);
2567 
2568 	if (!BP_IS_HOLE(bp))
2569 		metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE);
2570 
2571 	if (gn != NULL) {
2572 		for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2573 			zio_dva_unallocate(zio, gn->gn_child[g],
2574 			    &gn->gn_gbh->zg_blkptr[g]);
2575 		}
2576 	}
2577 }
2578 
2579 /*
2580  * Try to allocate an intent log block.  Return 0 on success, errno on failure.
2581  */
2582 int
2583 zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp,
2584     uint64_t size, boolean_t use_slog)
2585 {
2586 	int error = 1;
2587 
2588 	ASSERT(txg > spa_syncing_txg(spa));
2589 
2590 	/*
2591 	 * ZIL blocks are always contiguous (i.e. not gang blocks) so we
2592 	 * set the METASLAB_GANG_AVOID flag so that they don't "fast gang"
2593 	 * when allocating them.
2594 	 */
2595 	if (use_slog) {
2596 		error = metaslab_alloc(spa, spa_log_class(spa), size,
2597 		    new_bp, 1, txg, old_bp,
2598 		    METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID);
2599 	}
2600 
2601 	if (error) {
2602 		error = metaslab_alloc(spa, spa_normal_class(spa), size,
2603 		    new_bp, 1, txg, old_bp,
2604 		    METASLAB_HINTBP_AVOID);
2605 	}
2606 
2607 	if (error == 0) {
2608 		BP_SET_LSIZE(new_bp, size);
2609 		BP_SET_PSIZE(new_bp, size);
2610 		BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
2611 		BP_SET_CHECKSUM(new_bp,
2612 		    spa_version(spa) >= SPA_VERSION_SLIM_ZIL
2613 		    ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
2614 		BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
2615 		BP_SET_LEVEL(new_bp, 0);
2616 		BP_SET_DEDUP(new_bp, 0);
2617 		BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
2618 	}
2619 
2620 	return (error);
2621 }
2622 
2623 /*
2624  * Free an intent log block.
2625  */
2626 void
2627 zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp)
2628 {
2629 	ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG);
2630 	ASSERT(!BP_IS_GANG(bp));
2631 
2632 	zio_free(spa, txg, bp);
2633 }
2634 
2635 /*
2636  * ==========================================================================
2637  * Read and write to physical devices
2638  * ==========================================================================
2639  */
2640 
2641 
2642 /*
2643  * Issue an I/O to the underlying vdev. Typically the issue pipeline
2644  * stops after this stage and will resume upon I/O completion.
2645  * However, there are instances where the vdev layer may need to
2646  * continue the pipeline when an I/O was not issued. Since the I/O
2647  * that was sent to the vdev layer might be different than the one
2648  * currently active in the pipeline (see vdev_queue_io()), we explicitly
2649  * force the underlying vdev layers to call either zio_execute() or
2650  * zio_interrupt() to ensure that the pipeline continues with the correct I/O.
2651  */
2652 static int
2653 zio_vdev_io_start(zio_t *zio)
2654 {
2655 	vdev_t *vd = zio->io_vd;
2656 	uint64_t align;
2657 	spa_t *spa = zio->io_spa;
2658 
2659 	ASSERT(zio->io_error == 0);
2660 	ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
2661 
2662 	if (vd == NULL) {
2663 		if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
2664 			spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
2665 
2666 		/*
2667 		 * The mirror_ops handle multiple DVAs in a single BP.
2668 		 */
2669 		vdev_mirror_ops.vdev_op_io_start(zio);
2670 		return (ZIO_PIPELINE_STOP);
2671 	}
2672 
2673 	/*
2674 	 * We keep track of time-sensitive I/Os so that the scan thread
2675 	 * can quickly react to certain workloads.  In particular, we care
2676 	 * about non-scrubbing, top-level reads and writes with the following
2677 	 * characteristics:
2678 	 *	- synchronous writes of user data to non-slog devices
2679 	 *	- any reads of user data
2680 	 * When these conditions are met, adjust the timestamp of spa_last_io
2681 	 * which allows the scan thread to adjust its workload accordingly.
2682 	 */
2683 	if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL &&
2684 	    vd == vd->vdev_top && !vd->vdev_islog &&
2685 	    zio->io_bookmark.zb_objset != DMU_META_OBJSET &&
2686 	    zio->io_txg != spa_syncing_txg(spa)) {
2687 		uint64_t old = spa->spa_last_io;
2688 		uint64_t new = ddi_get_lbolt64();
2689 		if (old != new)
2690 			(void) atomic_cas_64(&spa->spa_last_io, old, new);
2691 	}
2692 
2693 	align = 1ULL << vd->vdev_top->vdev_ashift;
2694 
2695 	if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
2696 	    P2PHASE(zio->io_size, align) != 0) {
2697 		/* Transform logical writes to be a full physical block size. */
2698 		uint64_t asize = P2ROUNDUP(zio->io_size, align);
2699 		char *abuf = zio_buf_alloc(asize);
2700 		ASSERT(vd == vd->vdev_top);
2701 		if (zio->io_type == ZIO_TYPE_WRITE) {
2702 			bcopy(zio->io_data, abuf, zio->io_size);
2703 			bzero(abuf + zio->io_size, asize - zio->io_size);
2704 		}
2705 		zio_push_transform(zio, abuf, asize, asize, zio_subblock);
2706 	}
2707 
2708 	/*
2709 	 * If this is not a physical io, make sure that it is properly aligned
2710 	 * before proceeding.
2711 	 */
2712 	if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
2713 		ASSERT0(P2PHASE(zio->io_offset, align));
2714 		ASSERT0(P2PHASE(zio->io_size, align));
2715 	} else {
2716 		/*
2717 		 * For physical writes, we allow 512b aligned writes and assume
2718 		 * the device will perform a read-modify-write as necessary.
2719 		 */
2720 		ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
2721 		ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
2722 	}
2723 
2724 	VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
2725 
2726 	/*
2727 	 * If this is a repair I/O, and there's no self-healing involved --
2728 	 * that is, we're just resilvering what we expect to resilver --
2729 	 * then don't do the I/O unless zio's txg is actually in vd's DTL.
2730 	 * This prevents spurious resilvering with nested replication.
2731 	 * For example, given a mirror of mirrors, (A+B)+(C+D), if only
2732 	 * A is out of date, we'll read from C+D, then use the data to
2733 	 * resilver A+B -- but we don't actually want to resilver B, just A.
2734 	 * The top-level mirror has no way to know this, so instead we just
2735 	 * discard unnecessary repairs as we work our way down the vdev tree.
2736 	 * The same logic applies to any form of nested replication:
2737 	 * ditto + mirror, RAID-Z + replacing, etc.  This covers them all.
2738 	 */
2739 	if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
2740 	    !(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
2741 	    zio->io_txg != 0 &&	/* not a delegated i/o */
2742 	    !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
2743 		ASSERT(zio->io_type == ZIO_TYPE_WRITE);
2744 		zio_vdev_io_bypass(zio);
2745 		return (ZIO_PIPELINE_CONTINUE);
2746 	}
2747 
2748 	if (vd->vdev_ops->vdev_op_leaf &&
2749 	    (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) {
2750 
2751 		if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio))
2752 			return (ZIO_PIPELINE_CONTINUE);
2753 
2754 		if ((zio = vdev_queue_io(zio)) == NULL)
2755 			return (ZIO_PIPELINE_STOP);
2756 
2757 		if (!vdev_accessible(vd, zio)) {
2758 			zio->io_error = SET_ERROR(ENXIO);
2759 			zio_interrupt(zio);
2760 			return (ZIO_PIPELINE_STOP);
2761 		}
2762 	}
2763 
2764 	vd->vdev_ops->vdev_op_io_start(zio);
2765 	return (ZIO_PIPELINE_STOP);
2766 }
2767 
2768 static int
2769 zio_vdev_io_done(zio_t *zio)
2770 {
2771 	vdev_t *vd = zio->io_vd;
2772 	vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
2773 	boolean_t unexpected_error = B_FALSE;
2774 
2775 	if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE))
2776 		return (ZIO_PIPELINE_STOP);
2777 
2778 	ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
2779 
2780 	if (vd != NULL && vd->vdev_ops->vdev_op_leaf) {
2781 
2782 		vdev_queue_io_done(zio);
2783 
2784 		if (zio->io_type == ZIO_TYPE_WRITE)
2785 			vdev_cache_write(zio);
2786 
2787 		if (zio_injection_enabled && zio->io_error == 0)
2788 			zio->io_error = zio_handle_device_injection(vd,
2789 			    zio, EIO);
2790 
2791 		if (zio_injection_enabled && zio->io_error == 0)
2792 			zio->io_error = zio_handle_label_injection(zio, EIO);
2793 
2794 		if (zio->io_error) {
2795 			if (!vdev_accessible(vd, zio)) {
2796 				zio->io_error = SET_ERROR(ENXIO);
2797 			} else {
2798 				unexpected_error = B_TRUE;
2799 			}
2800 		}
2801 	}
2802 
2803 	ops->vdev_op_io_done(zio);
2804 
2805 	if (unexpected_error)
2806 		VERIFY(vdev_probe(vd, zio) == NULL);
2807 
2808 	return (ZIO_PIPELINE_CONTINUE);
2809 }
2810 
2811 /*
2812  * For non-raidz ZIOs, we can just copy aside the bad data read from the
2813  * disk, and use that to finish the checksum ereport later.
2814  */
2815 static void
2816 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
2817     const void *good_buf)
2818 {
2819 	/* no processing needed */
2820 	zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
2821 }
2822 
2823 /*ARGSUSED*/
2824 void
2825 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored)
2826 {
2827 	void *buf = zio_buf_alloc(zio->io_size);
2828 
2829 	bcopy(zio->io_data, buf, zio->io_size);
2830 
2831 	zcr->zcr_cbinfo = zio->io_size;
2832 	zcr->zcr_cbdata = buf;
2833 	zcr->zcr_finish = zio_vsd_default_cksum_finish;
2834 	zcr->zcr_free = zio_buf_free;
2835 }
2836 
2837 static int
2838 zio_vdev_io_assess(zio_t *zio)
2839 {
2840 	vdev_t *vd = zio->io_vd;
2841 
2842 	if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE))
2843 		return (ZIO_PIPELINE_STOP);
2844 
2845 	if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
2846 		spa_config_exit(zio->io_spa, SCL_ZIO, zio);
2847 
2848 	if (zio->io_vsd != NULL) {
2849 		zio->io_vsd_ops->vsd_free(zio);
2850 		zio->io_vsd = NULL;
2851 	}
2852 
2853 	if (zio_injection_enabled && zio->io_error == 0)
2854 		zio->io_error = zio_handle_fault_injection(zio, EIO);
2855 
2856 	/*
2857 	 * If the I/O failed, determine whether we should attempt to retry it.
2858 	 *
2859 	 * On retry, we cut in line in the issue queue, since we don't want
2860 	 * compression/checksumming/etc. work to prevent our (cheap) IO reissue.
2861 	 */
2862 	if (zio->io_error && vd == NULL &&
2863 	    !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
2864 		ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE));	/* not a leaf */
2865 		ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS));	/* not a leaf */
2866 		zio->io_error = 0;
2867 		zio->io_flags |= ZIO_FLAG_IO_RETRY |
2868 		    ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE;
2869 		zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
2870 		zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
2871 		    zio_requeue_io_start_cut_in_line);
2872 		return (ZIO_PIPELINE_STOP);
2873 	}
2874 
2875 	/*
2876 	 * If we got an error on a leaf device, convert it to ENXIO
2877 	 * if the device is not accessible at all.
2878 	 */
2879 	if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
2880 	    !vdev_accessible(vd, zio))
2881 		zio->io_error = SET_ERROR(ENXIO);
2882 
2883 	/*
2884 	 * If we can't write to an interior vdev (mirror or RAID-Z),
2885 	 * set vdev_cant_write so that we stop trying to allocate from it.
2886 	 */
2887 	if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
2888 	    vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
2889 		vd->vdev_cant_write = B_TRUE;
2890 	}
2891 
2892 	if (zio->io_error)
2893 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2894 
2895 	if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
2896 	    zio->io_physdone != NULL) {
2897 		ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED));
2898 		ASSERT(zio->io_child_type == ZIO_CHILD_VDEV);
2899 		zio->io_physdone(zio->io_logical);
2900 	}
2901 
2902 	return (ZIO_PIPELINE_CONTINUE);
2903 }
2904 
2905 void
2906 zio_vdev_io_reissue(zio_t *zio)
2907 {
2908 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
2909 	ASSERT(zio->io_error == 0);
2910 
2911 	zio->io_stage >>= 1;
2912 }
2913 
2914 void
2915 zio_vdev_io_redone(zio_t *zio)
2916 {
2917 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
2918 
2919 	zio->io_stage >>= 1;
2920 }
2921 
2922 void
2923 zio_vdev_io_bypass(zio_t *zio)
2924 {
2925 	ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
2926 	ASSERT(zio->io_error == 0);
2927 
2928 	zio->io_flags |= ZIO_FLAG_IO_BYPASS;
2929 	zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
2930 }
2931 
2932 /*
2933  * ==========================================================================
2934  * Generate and verify checksums
2935  * ==========================================================================
2936  */
2937 static int
2938 zio_checksum_generate(zio_t *zio)
2939 {
2940 	blkptr_t *bp = zio->io_bp;
2941 	enum zio_checksum checksum;
2942 
2943 	if (bp == NULL) {
2944 		/*
2945 		 * This is zio_write_phys().
2946 		 * We're either generating a label checksum, or none at all.
2947 		 */
2948 		checksum = zio->io_prop.zp_checksum;
2949 
2950 		if (checksum == ZIO_CHECKSUM_OFF)
2951 			return (ZIO_PIPELINE_CONTINUE);
2952 
2953 		ASSERT(checksum == ZIO_CHECKSUM_LABEL);
2954 	} else {
2955 		if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
2956 			ASSERT(!IO_IS_ALLOCATING(zio));
2957 			checksum = ZIO_CHECKSUM_GANG_HEADER;
2958 		} else {
2959 			checksum = BP_GET_CHECKSUM(bp);
2960 		}
2961 	}
2962 
2963 	zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size);
2964 
2965 	return (ZIO_PIPELINE_CONTINUE);
2966 }
2967 
2968 static int
2969 zio_checksum_verify(zio_t *zio)
2970 {
2971 	zio_bad_cksum_t info;
2972 	blkptr_t *bp = zio->io_bp;
2973 	int error;
2974 
2975 	ASSERT(zio->io_vd != NULL);
2976 
2977 	if (bp == NULL) {
2978 		/*
2979 		 * This is zio_read_phys().
2980 		 * We're either verifying a label checksum, or nothing at all.
2981 		 */
2982 		if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
2983 			return (ZIO_PIPELINE_CONTINUE);
2984 
2985 		ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL);
2986 	}
2987 
2988 	if ((error = zio_checksum_error(zio, &info)) != 0) {
2989 		zio->io_error = error;
2990 		if (error == ECKSUM &&
2991 		    !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
2992 			zfs_ereport_start_checksum(zio->io_spa,
2993 			    zio->io_vd, zio, zio->io_offset,
2994 			    zio->io_size, NULL, &info);
2995 		}
2996 	}
2997 
2998 	return (ZIO_PIPELINE_CONTINUE);
2999 }
3000 
3001 /*
3002  * Called by RAID-Z to ensure we don't compute the checksum twice.
3003  */
3004 void
3005 zio_checksum_verified(zio_t *zio)
3006 {
3007 	zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
3008 }
3009 
3010 /*
3011  * ==========================================================================
3012  * Error rank.  Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
3013  * An error of 0 indicates success.  ENXIO indicates whole-device failure,
3014  * which may be transient (e.g. unplugged) or permament.  ECKSUM and EIO
3015  * indicate errors that are specific to one I/O, and most likely permanent.
3016  * Any other error is presumed to be worse because we weren't expecting it.
3017  * ==========================================================================
3018  */
3019 int
3020 zio_worst_error(int e1, int e2)
3021 {
3022 	static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
3023 	int r1, r2;
3024 
3025 	for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
3026 		if (e1 == zio_error_rank[r1])
3027 			break;
3028 
3029 	for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
3030 		if (e2 == zio_error_rank[r2])
3031 			break;
3032 
3033 	return (r1 > r2 ? e1 : e2);
3034 }
3035 
3036 /*
3037  * ==========================================================================
3038  * I/O completion
3039  * ==========================================================================
3040  */
3041 static int
3042 zio_ready(zio_t *zio)
3043 {
3044 	blkptr_t *bp = zio->io_bp;
3045 	zio_t *pio, *pio_next;
3046 
3047 	if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) ||
3048 	    zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY))
3049 		return (ZIO_PIPELINE_STOP);
3050 
3051 	if (zio->io_ready) {
3052 		ASSERT(IO_IS_ALLOCATING(zio));
3053 		ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) ||
3054 		    (zio->io_flags & ZIO_FLAG_NOPWRITE));
3055 		ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
3056 
3057 		zio->io_ready(zio);
3058 	}
3059 
3060 	if (bp != NULL && bp != &zio->io_bp_copy)
3061 		zio->io_bp_copy = *bp;
3062 
3063 	if (zio->io_error)
3064 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3065 
3066 	mutex_enter(&zio->io_lock);
3067 	zio->io_state[ZIO_WAIT_READY] = 1;
3068 	pio = zio_walk_parents(zio);
3069 	mutex_exit(&zio->io_lock);
3070 
3071 	/*
3072 	 * As we notify zio's parents, new parents could be added.
3073 	 * New parents go to the head of zio's io_parent_list, however,
3074 	 * so we will (correctly) not notify them.  The remainder of zio's
3075 	 * io_parent_list, from 'pio_next' onward, cannot change because
3076 	 * all parents must wait for us to be done before they can be done.
3077 	 */
3078 	for (; pio != NULL; pio = pio_next) {
3079 		pio_next = zio_walk_parents(zio);
3080 		zio_notify_parent(pio, zio, ZIO_WAIT_READY);
3081 	}
3082 
3083 	if (zio->io_flags & ZIO_FLAG_NODATA) {
3084 		if (BP_IS_GANG(bp)) {
3085 			zio->io_flags &= ~ZIO_FLAG_NODATA;
3086 		} else {
3087 			ASSERT((uintptr_t)zio->io_data < SPA_MAXBLOCKSIZE);
3088 			zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
3089 		}
3090 	}
3091 
3092 	if (zio_injection_enabled &&
3093 	    zio->io_spa->spa_syncing_txg == zio->io_txg)
3094 		zio_handle_ignored_writes(zio);
3095 
3096 	return (ZIO_PIPELINE_CONTINUE);
3097 }
3098 
3099 static int
3100 zio_done(zio_t *zio)
3101 {
3102 	spa_t *spa = zio->io_spa;
3103 	zio_t *lio = zio->io_logical;
3104 	blkptr_t *bp = zio->io_bp;
3105 	vdev_t *vd = zio->io_vd;
3106 	uint64_t psize = zio->io_size;
3107 	zio_t *pio, *pio_next;
3108 
3109 	/*
3110 	 * If our children haven't all completed,
3111 	 * wait for them and then repeat this pipeline stage.
3112 	 */
3113 	if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) ||
3114 	    zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) ||
3115 	    zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) ||
3116 	    zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE))
3117 		return (ZIO_PIPELINE_STOP);
3118 
3119 	for (int c = 0; c < ZIO_CHILD_TYPES; c++)
3120 		for (int w = 0; w < ZIO_WAIT_TYPES; w++)
3121 			ASSERT(zio->io_children[c][w] == 0);
3122 
3123 	if (bp != NULL && !BP_IS_EMBEDDED(bp)) {
3124 		ASSERT(bp->blk_pad[0] == 0);
3125 		ASSERT(bp->blk_pad[1] == 0);
3126 		ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 ||
3127 		    (bp == zio_unique_parent(zio)->io_bp));
3128 		if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) &&
3129 		    zio->io_bp_override == NULL &&
3130 		    !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
3131 			ASSERT(!BP_SHOULD_BYTESWAP(bp));
3132 			ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp));
3133 			ASSERT(BP_COUNT_GANG(bp) == 0 ||
3134 			    (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp)));
3135 		}
3136 		if (zio->io_flags & ZIO_FLAG_NOPWRITE)
3137 			VERIFY(BP_EQUAL(bp, &zio->io_bp_orig));
3138 	}
3139 
3140 	/*
3141 	 * If there were child vdev/gang/ddt errors, they apply to us now.
3142 	 */
3143 	zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
3144 	zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
3145 	zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
3146 
3147 	/*
3148 	 * If the I/O on the transformed data was successful, generate any
3149 	 * checksum reports now while we still have the transformed data.
3150 	 */
3151 	if (zio->io_error == 0) {
3152 		while (zio->io_cksum_report != NULL) {
3153 			zio_cksum_report_t *zcr = zio->io_cksum_report;
3154 			uint64_t align = zcr->zcr_align;
3155 			uint64_t asize = P2ROUNDUP(psize, align);
3156 			char *abuf = zio->io_data;
3157 
3158 			if (asize != psize) {
3159 				abuf = zio_buf_alloc(asize);
3160 				bcopy(zio->io_data, abuf, psize);
3161 				bzero(abuf + psize, asize - psize);
3162 			}
3163 
3164 			zio->io_cksum_report = zcr->zcr_next;
3165 			zcr->zcr_next = NULL;
3166 			zcr->zcr_finish(zcr, abuf);
3167 			zfs_ereport_free_checksum(zcr);
3168 
3169 			if (asize != psize)
3170 				zio_buf_free(abuf, asize);
3171 		}
3172 	}
3173 
3174 	zio_pop_transforms(zio);	/* note: may set zio->io_error */
3175 
3176 	vdev_stat_update(zio, psize);
3177 
3178 	if (zio->io_error) {
3179 		/*
3180 		 * If this I/O is attached to a particular vdev,
3181 		 * generate an error message describing the I/O failure
3182 		 * at the block level.  We ignore these errors if the
3183 		 * device is currently unavailable.
3184 		 */
3185 		if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd))
3186 			zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0);
3187 
3188 		if ((zio->io_error == EIO || !(zio->io_flags &
3189 		    (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
3190 		    zio == lio) {
3191 			/*
3192 			 * For logical I/O requests, tell the SPA to log the
3193 			 * error and generate a logical data ereport.
3194 			 */
3195 			spa_log_error(spa, zio);
3196 			zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio,
3197 			    0, 0);
3198 		}
3199 	}
3200 
3201 	if (zio->io_error && zio == lio) {
3202 		/*
3203 		 * Determine whether zio should be reexecuted.  This will
3204 		 * propagate all the way to the root via zio_notify_parent().
3205 		 */
3206 		ASSERT(vd == NULL && bp != NULL);
3207 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3208 
3209 		if (IO_IS_ALLOCATING(zio) &&
3210 		    !(zio->io_flags & ZIO_FLAG_CANFAIL)) {
3211 			if (zio->io_error != ENOSPC)
3212 				zio->io_reexecute |= ZIO_REEXECUTE_NOW;
3213 			else
3214 				zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
3215 		}
3216 
3217 		if ((zio->io_type == ZIO_TYPE_READ ||
3218 		    zio->io_type == ZIO_TYPE_FREE) &&
3219 		    !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
3220 		    zio->io_error == ENXIO &&
3221 		    spa_load_state(spa) == SPA_LOAD_NONE &&
3222 		    spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE)
3223 			zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
3224 
3225 		if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
3226 			zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
3227 
3228 		/*
3229 		 * Here is a possibly good place to attempt to do
3230 		 * either combinatorial reconstruction or error correction
3231 		 * based on checksums.  It also might be a good place
3232 		 * to send out preliminary ereports before we suspend
3233 		 * processing.
3234 		 */
3235 	}
3236 
3237 	/*
3238 	 * If there were logical child errors, they apply to us now.
3239 	 * We defer this until now to avoid conflating logical child
3240 	 * errors with errors that happened to the zio itself when
3241 	 * updating vdev stats and reporting FMA events above.
3242 	 */
3243 	zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
3244 
3245 	if ((zio->io_error || zio->io_reexecute) &&
3246 	    IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
3247 	    !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
3248 		zio_dva_unallocate(zio, zio->io_gang_tree, bp);
3249 
3250 	zio_gang_tree_free(&zio->io_gang_tree);
3251 
3252 	/*
3253 	 * Godfather I/Os should never suspend.
3254 	 */
3255 	if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
3256 	    (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
3257 		zio->io_reexecute = 0;
3258 
3259 	if (zio->io_reexecute) {
3260 		/*
3261 		 * This is a logical I/O that wants to reexecute.
3262 		 *
3263 		 * Reexecute is top-down.  When an i/o fails, if it's not
3264 		 * the root, it simply notifies its parent and sticks around.
3265 		 * The parent, seeing that it still has children in zio_done(),
3266 		 * does the same.  This percolates all the way up to the root.
3267 		 * The root i/o will reexecute or suspend the entire tree.
3268 		 *
3269 		 * This approach ensures that zio_reexecute() honors
3270 		 * all the original i/o dependency relationships, e.g.
3271 		 * parents not executing until children are ready.
3272 		 */
3273 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3274 
3275 		zio->io_gang_leader = NULL;
3276 
3277 		mutex_enter(&zio->io_lock);
3278 		zio->io_state[ZIO_WAIT_DONE] = 1;
3279 		mutex_exit(&zio->io_lock);
3280 
3281 		/*
3282 		 * "The Godfather" I/O monitors its children but is
3283 		 * not a true parent to them. It will track them through
3284 		 * the pipeline but severs its ties whenever they get into
3285 		 * trouble (e.g. suspended). This allows "The Godfather"
3286 		 * I/O to return status without blocking.
3287 		 */
3288 		for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) {
3289 			zio_link_t *zl = zio->io_walk_link;
3290 			pio_next = zio_walk_parents(zio);
3291 
3292 			if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
3293 			    (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
3294 				zio_remove_child(pio, zio, zl);
3295 				zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
3296 			}
3297 		}
3298 
3299 		if ((pio = zio_unique_parent(zio)) != NULL) {
3300 			/*
3301 			 * We're not a root i/o, so there's nothing to do
3302 			 * but notify our parent.  Don't propagate errors
3303 			 * upward since we haven't permanently failed yet.
3304 			 */
3305 			ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
3306 			zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
3307 			zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
3308 		} else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
3309 			/*
3310 			 * We'd fail again if we reexecuted now, so suspend
3311 			 * until conditions improve (e.g. device comes online).
3312 			 */
3313 			zio_suspend(spa, zio);
3314 		} else {
3315 			/*
3316 			 * Reexecution is potentially a huge amount of work.
3317 			 * Hand it off to the otherwise-unused claim taskq.
3318 			 */
3319 			ASSERT(zio->io_tqent.tqent_next == NULL);
3320 			spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM,
3321 			    ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio,
3322 			    0, &zio->io_tqent);
3323 		}
3324 		return (ZIO_PIPELINE_STOP);
3325 	}
3326 
3327 	ASSERT(zio->io_child_count == 0);
3328 	ASSERT(zio->io_reexecute == 0);
3329 	ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
3330 
3331 	/*
3332 	 * Report any checksum errors, since the I/O is complete.
3333 	 */
3334 	while (zio->io_cksum_report != NULL) {
3335 		zio_cksum_report_t *zcr = zio->io_cksum_report;
3336 		zio->io_cksum_report = zcr->zcr_next;
3337 		zcr->zcr_next = NULL;
3338 		zcr->zcr_finish(zcr, NULL);
3339 		zfs_ereport_free_checksum(zcr);
3340 	}
3341 
3342 	/*
3343 	 * It is the responsibility of the done callback to ensure that this
3344 	 * particular zio is no longer discoverable for adoption, and as
3345 	 * such, cannot acquire any new parents.
3346 	 */
3347 	if (zio->io_done)
3348 		zio->io_done(zio);
3349 
3350 	mutex_enter(&zio->io_lock);
3351 	zio->io_state[ZIO_WAIT_DONE] = 1;
3352 	mutex_exit(&zio->io_lock);
3353 
3354 	for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) {
3355 		zio_link_t *zl = zio->io_walk_link;
3356 		pio_next = zio_walk_parents(zio);
3357 		zio_remove_child(pio, zio, zl);
3358 		zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
3359 	}
3360 
3361 	if (zio->io_waiter != NULL) {
3362 		mutex_enter(&zio->io_lock);
3363 		zio->io_executor = NULL;
3364 		cv_broadcast(&zio->io_cv);
3365 		mutex_exit(&zio->io_lock);
3366 	} else {
3367 		zio_destroy(zio);
3368 	}
3369 
3370 	return (ZIO_PIPELINE_STOP);
3371 }
3372 
3373 /*
3374  * ==========================================================================
3375  * I/O pipeline definition
3376  * ==========================================================================
3377  */
3378 static zio_pipe_stage_t *zio_pipeline[] = {
3379 	NULL,
3380 	zio_read_bp_init,
3381 	zio_free_bp_init,
3382 	zio_issue_async,
3383 	zio_write_bp_init,
3384 	zio_checksum_generate,
3385 	zio_nop_write,
3386 	zio_ddt_read_start,
3387 	zio_ddt_read_done,
3388 	zio_ddt_write,
3389 	zio_ddt_free,
3390 	zio_gang_assemble,
3391 	zio_gang_issue,
3392 	zio_dva_allocate,
3393 	zio_dva_free,
3394 	zio_dva_claim,
3395 	zio_ready,
3396 	zio_vdev_io_start,
3397 	zio_vdev_io_done,
3398 	zio_vdev_io_assess,
3399 	zio_checksum_verify,
3400 	zio_done
3401 };
3402 
3403 
3404 
3405 
3406 /*
3407  * Compare two zbookmark_phys_t's to see which we would reach first in a
3408  * pre-order traversal of the object tree.
3409  *
3410  * This is simple in every case aside from the meta-dnode object. For all other
3411  * objects, we traverse them in order (object 1 before object 2, and so on).
3412  * However, all of these objects are traversed while traversing object 0, since
3413  * the data it points to is the list of objects.  Thus, we need to convert to a
3414  * canonical representation so we can compare meta-dnode bookmarks to
3415  * non-meta-dnode bookmarks.
3416  *
3417  * We do this by calculating "equivalents" for each field of the zbookmark.
3418  * zbookmarks outside of the meta-dnode use their own object and level, and
3419  * calculate the level 0 equivalent (the first L0 blkid that is contained in the
3420  * blocks this bookmark refers to) by multiplying their blkid by their span
3421  * (the number of L0 blocks contained within one block at their level).
3422  * zbookmarks inside the meta-dnode calculate their object equivalent
3423  * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
3424  * level + 1<<31 (any value larger than a level could ever be) for their level.
3425  * This causes them to always compare before a bookmark in their object
3426  * equivalent, compare appropriately to bookmarks in other objects, and to
3427  * compare appropriately to other bookmarks in the meta-dnode.
3428  */
3429 int
3430 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
3431     const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
3432 {
3433 	/*
3434 	 * These variables represent the "equivalent" values for the zbookmark,
3435 	 * after converting zbookmarks inside the meta dnode to their
3436 	 * normal-object equivalents.
3437 	 */
3438 	uint64_t zb1obj, zb2obj;
3439 	uint64_t zb1L0, zb2L0;
3440 	uint64_t zb1level, zb2level;
3441 
3442 	if (zb1->zb_object == zb2->zb_object &&
3443 	    zb1->zb_level == zb2->zb_level &&
3444 	    zb1->zb_blkid == zb2->zb_blkid)
3445 		return (0);
3446 
3447 	/*
3448 	 * BP_SPANB calculates the span in blocks.
3449 	 */
3450 	zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
3451 	zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
3452 
3453 	if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
3454 		zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
3455 		zb1L0 = 0;
3456 		zb1level = zb1->zb_level + COMPARE_META_LEVEL;
3457 	} else {
3458 		zb1obj = zb1->zb_object;
3459 		zb1level = zb1->zb_level;
3460 	}
3461 
3462 	if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
3463 		zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
3464 		zb2L0 = 0;
3465 		zb2level = zb2->zb_level + COMPARE_META_LEVEL;
3466 	} else {
3467 		zb2obj = zb2->zb_object;
3468 		zb2level = zb2->zb_level;
3469 	}
3470 
3471 	/* Now that we have a canonical representation, do the comparison. */
3472 	if (zb1obj != zb2obj)
3473 		return (zb1obj < zb2obj ? -1 : 1);
3474 	else if (zb1L0 != zb2L0)
3475 		return (zb1L0 < zb2L0 ? -1 : 1);
3476 	else if (zb1level != zb2level)
3477 		return (zb1level > zb2level ? -1 : 1);
3478 	/*
3479 	 * This can (theoretically) happen if the bookmarks have the same object
3480 	 * and level, but different blkids, if the block sizes are not the same.
3481 	 * There is presently no way to change the indirect block sizes
3482 	 */
3483 	return (0);
3484 }
3485 
3486 /*
3487  *  This function checks the following: given that last_block is the place that
3488  *  our traversal stopped last time, does that guarantee that we've visited
3489  *  every node under subtree_root?  Therefore, we can't just use the raw output
3490  *  of zbookmark_compare.  We have to pass in a modified version of
3491  *  subtree_root; by incrementing the block id, and then checking whether
3492  *  last_block is before or equal to that, we can tell whether or not having
3493  *  visited last_block implies that all of subtree_root's children have been
3494  *  visited.
3495  */
3496 boolean_t
3497 zbookmark_subtree_completed(const dnode_phys_t *dnp,
3498     const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
3499 {
3500 	zbookmark_phys_t mod_zb = *subtree_root;
3501 	mod_zb.zb_blkid++;
3502 	ASSERT(last_block->zb_level == 0);
3503 
3504 	/* The objset_phys_t isn't before anything. */
3505 	if (dnp == NULL)
3506 		return (B_FALSE);
3507 
3508 	/*
3509 	 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
3510 	 * data block size in sectors, because that variable is only used if
3511 	 * the bookmark refers to a block in the meta-dnode.  Since we don't
3512 	 * know without examining it what object it refers to, and there's no
3513 	 * harm in passing in this value in other cases, we always pass it in.
3514 	 *
3515 	 * We pass in 0 for the indirect block size shift because zb2 must be
3516 	 * level 0.  The indirect block size is only used to calculate the span
3517 	 * of the bookmark, but since the bookmark must be level 0, the span is
3518 	 * always 1, so the math works out.
3519 	 *
3520 	 * If you make changes to how the zbookmark_compare code works, be sure
3521 	 * to make sure that this code still works afterwards.
3522 	 */
3523 	return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
3524 	    1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
3525 	    last_block) <= 0);
3526 }
3527