xref: /illumos-gate/usr/src/uts/common/fs/zfs/zio.c (revision bbf21555)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
24  * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
25  * Copyright (c) 2014 Integros [integros.com]
26  * Copyright (c) 2017, Intel Corporation.
27  * Copyright 2020 Joyent, Inc.
28  */
29 
30 #include <sys/sysmacros.h>
31 #include <sys/zfs_context.h>
32 #include <sys/fm/fs/zfs.h>
33 #include <sys/spa.h>
34 #include <sys/txg.h>
35 #include <sys/spa_impl.h>
36 #include <sys/vdev_impl.h>
37 #include <sys/vdev_trim.h>
38 #include <sys/zio_impl.h>
39 #include <sys/zio_compress.h>
40 #include <sys/zio_checksum.h>
41 #include <sys/dmu_objset.h>
42 #include <sys/arc.h>
43 #include <sys/ddt.h>
44 #include <sys/blkptr.h>
45 #include <sys/zfeature.h>
46 #include <sys/time.h>
47 #include <sys/dsl_scan.h>
48 #include <sys/metaslab_impl.h>
49 #include <sys/abd.h>
50 #include <sys/cityhash.h>
51 #include <sys/dsl_crypt.h>
52 #include <sys/stdbool.h>
53 
54 /*
55  * ==========================================================================
56  * I/O type descriptions
57  * ==========================================================================
58  */
59 const char *zio_type_name[ZIO_TYPES] = {
60 	"zio_null", "zio_read", "zio_write", "zio_free", "zio_claim",
61 	"zio_ioctl", "z_trim"
62 };
63 
64 boolean_t zio_dva_throttle_enabled = B_TRUE;
65 
66 /*
67  * ==========================================================================
68  * I/O kmem caches
69  * ==========================================================================
70  */
71 kmem_cache_t *zio_cache;
72 kmem_cache_t *zio_link_cache;
73 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
74 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
75 
76 #ifdef _KERNEL
77 extern vmem_t *zio_alloc_arena;
78 #endif
79 
80 #define	ZIO_PIPELINE_CONTINUE		0x100
81 #define	ZIO_PIPELINE_STOP		0x101
82 
83 /* Mark IOs as "slow" if they take longer than 30 seconds */
84 int zio_slow_io_ms = (30 * MILLISEC);
85 
86 #define	BP_SPANB(indblkshift, level) \
87 	(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
88 #define	COMPARE_META_LEVEL	0x80000000ul
89 /*
90  * The following actions directly effect the spa's sync-to-convergence logic.
91  * The values below define the sync pass when we start performing the action.
92  * Care should be taken when changing these values as they directly impact
93  * spa_sync() performance. Tuning these values may introduce subtle performance
94  * pathologies and should only be done in the context of performance analysis.
95  * These tunables will eventually be removed and replaced with #defines once
96  * enough analysis has been done to determine optimal values.
97  *
98  * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
99  * regular blocks are not deferred.
100  */
101 int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */
102 int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */
103 int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */
104 
105 /*
106  * An allocating zio is one that either currently has the DVA allocate
107  * stage set or will have it later in its lifetime.
108  */
109 #define	IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
110 
111 boolean_t	zio_requeue_io_start_cut_in_line = B_TRUE;
112 
113 #ifdef ZFS_DEBUG
114 int zio_buf_debug_limit = 16384;
115 #else
116 int zio_buf_debug_limit = 0;
117 #endif
118 
119 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
120 
121 void
zio_init(void)122 zio_init(void)
123 {
124 	size_t c;
125 	vmem_t *data_alloc_arena = NULL;
126 
127 #ifdef _KERNEL
128 	data_alloc_arena = zio_alloc_arena;
129 #endif
130 	zio_cache = kmem_cache_create("zio_cache",
131 	    sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
132 	zio_link_cache = kmem_cache_create("zio_link_cache",
133 	    sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
134 
135 	/*
136 	 * For small buffers, we want a cache for each multiple of
137 	 * SPA_MINBLOCKSIZE.  For larger buffers, we want a cache
138 	 * for each quarter-power of 2.
139 	 */
140 	for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
141 		size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
142 		size_t p2 = size;
143 		size_t align = 0;
144 		size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0;
145 
146 		while (!ISP2(p2))
147 			p2 &= p2 - 1;
148 
149 #ifndef _KERNEL
150 		/*
151 		 * If we are using watchpoints, put each buffer on its own page,
152 		 * to eliminate the performance overhead of trapping to the
153 		 * kernel when modifying a non-watched buffer that shares the
154 		 * page with a watched buffer.
155 		 */
156 		if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
157 			continue;
158 #endif
159 		if (size <= 4 * SPA_MINBLOCKSIZE) {
160 			align = SPA_MINBLOCKSIZE;
161 		} else if (IS_P2ALIGNED(size, p2 >> 2)) {
162 			align = MIN(p2 >> 2, PAGESIZE);
163 		}
164 
165 		if (align != 0) {
166 			char name[36];
167 			(void) sprintf(name, "zio_buf_%lu", (ulong_t)size);
168 			zio_buf_cache[c] = kmem_cache_create(name, size,
169 			    align, NULL, NULL, NULL, NULL, NULL, cflags);
170 
171 			/*
172 			 * Since zio_data bufs do not appear in crash dumps, we
173 			 * pass KMC_NOTOUCH so that no allocator metadata is
174 			 * stored with the buffers.
175 			 */
176 			(void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size);
177 			zio_data_buf_cache[c] = kmem_cache_create(name, size,
178 			    align, NULL, NULL, NULL, NULL, data_alloc_arena,
179 			    cflags | KMC_NOTOUCH);
180 		}
181 	}
182 
183 	while (--c != 0) {
184 		ASSERT(zio_buf_cache[c] != NULL);
185 		if (zio_buf_cache[c - 1] == NULL)
186 			zio_buf_cache[c - 1] = zio_buf_cache[c];
187 
188 		ASSERT(zio_data_buf_cache[c] != NULL);
189 		if (zio_data_buf_cache[c - 1] == NULL)
190 			zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
191 	}
192 
193 	zio_inject_init();
194 }
195 
196 void
zio_fini(void)197 zio_fini(void)
198 {
199 	size_t c;
200 	kmem_cache_t *last_cache = NULL;
201 	kmem_cache_t *last_data_cache = NULL;
202 
203 	for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
204 		if (zio_buf_cache[c] != last_cache) {
205 			last_cache = zio_buf_cache[c];
206 			kmem_cache_destroy(zio_buf_cache[c]);
207 		}
208 		zio_buf_cache[c] = NULL;
209 
210 		if (zio_data_buf_cache[c] != last_data_cache) {
211 			last_data_cache = zio_data_buf_cache[c];
212 			kmem_cache_destroy(zio_data_buf_cache[c]);
213 		}
214 		zio_data_buf_cache[c] = NULL;
215 	}
216 
217 	kmem_cache_destroy(zio_link_cache);
218 	kmem_cache_destroy(zio_cache);
219 
220 	zio_inject_fini();
221 }
222 
223 /*
224  * ==========================================================================
225  * Allocate and free I/O buffers
226  * ==========================================================================
227  */
228 
229 /*
230  * Use zio_buf_alloc to allocate ZFS metadata.  This data will appear in a
231  * crashdump if the kernel panics, so use it judiciously.  Obviously, it's
232  * useful to inspect ZFS metadata, but if possible, we should avoid keeping
233  * excess / transient data in-core during a crashdump.
234  */
235 void *
zio_buf_alloc(size_t size)236 zio_buf_alloc(size_t size)
237 {
238 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
239 
240 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
241 
242 	return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
243 }
244 
245 /*
246  * Use zio_data_buf_alloc to allocate data.  The data will not appear in a
247  * crashdump if the kernel panics.  This exists so that we will limit the amount
248  * of ZFS data that shows up in a kernel crashdump.  (Thus reducing the amount
249  * of kernel heap dumped to disk when the kernel panics)
250  */
251 void *
zio_data_buf_alloc(size_t size)252 zio_data_buf_alloc(size_t size)
253 {
254 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
255 
256 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
257 
258 	return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
259 }
260 
261 void
zio_buf_free(void * buf,size_t size)262 zio_buf_free(void *buf, size_t size)
263 {
264 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
265 
266 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
267 
268 	kmem_cache_free(zio_buf_cache[c], buf);
269 }
270 
271 void
zio_data_buf_free(void * buf,size_t size)272 zio_data_buf_free(void *buf, size_t size)
273 {
274 	size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
275 
276 	VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
277 
278 	kmem_cache_free(zio_data_buf_cache[c], buf);
279 }
280 
281 /* ARGSUSED */
282 static void
zio_abd_free(void * abd,size_t size)283 zio_abd_free(void *abd, size_t size)
284 {
285 	abd_free((abd_t *)abd);
286 }
287 
288 /*
289  * ==========================================================================
290  * Push and pop I/O transform buffers
291  * ==========================================================================
292  */
293 void
zio_push_transform(zio_t * zio,abd_t * data,uint64_t size,uint64_t bufsize,zio_transform_func_t * transform)294 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
295     zio_transform_func_t *transform)
296 {
297 	zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
298 
299 	/*
300 	 * Ensure that anyone expecting this zio to contain a linear ABD isn't
301 	 * going to get a nasty surprise when they try to access the data.
302 	 */
303 	IMPLY(abd_is_linear(zio->io_abd), abd_is_linear(data));
304 
305 	zt->zt_orig_abd = zio->io_abd;
306 	zt->zt_orig_size = zio->io_size;
307 	zt->zt_bufsize = bufsize;
308 	zt->zt_transform = transform;
309 
310 	zt->zt_next = zio->io_transform_stack;
311 	zio->io_transform_stack = zt;
312 
313 	zio->io_abd = data;
314 	zio->io_size = size;
315 }
316 
317 void
zio_pop_transforms(zio_t * zio)318 zio_pop_transforms(zio_t *zio)
319 {
320 	zio_transform_t *zt;
321 
322 	while ((zt = zio->io_transform_stack) != NULL) {
323 		if (zt->zt_transform != NULL)
324 			zt->zt_transform(zio,
325 			    zt->zt_orig_abd, zt->zt_orig_size);
326 
327 		if (zt->zt_bufsize != 0)
328 			abd_free(zio->io_abd);
329 
330 		zio->io_abd = zt->zt_orig_abd;
331 		zio->io_size = zt->zt_orig_size;
332 		zio->io_transform_stack = zt->zt_next;
333 
334 		kmem_free(zt, sizeof (zio_transform_t));
335 	}
336 }
337 
338 /*
339  * ==========================================================================
340  * I/O transform callbacks for subblocks, decompression, and decryption
341  * ==========================================================================
342  */
343 static void
zio_subblock(zio_t * zio,abd_t * data,uint64_t size)344 zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
345 {
346 	ASSERT(zio->io_size > size);
347 
348 	if (zio->io_type == ZIO_TYPE_READ)
349 		abd_copy(data, zio->io_abd, size);
350 }
351 
352 static void
zio_decompress(zio_t * zio,abd_t * data,uint64_t size)353 zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
354 {
355 	if (zio->io_error == 0) {
356 		void *tmp = abd_borrow_buf(data, size);
357 		int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
358 		    zio->io_abd, tmp, zio->io_size, size);
359 		abd_return_buf_copy(data, tmp, size);
360 
361 		if (ret != 0)
362 			zio->io_error = SET_ERROR(EIO);
363 	}
364 }
365 
366 static void
zio_decrypt(zio_t * zio,abd_t * data,uint64_t size)367 zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
368 {
369 	int ret;
370 	void *tmp;
371 	blkptr_t *bp = zio->io_bp;
372 	spa_t *spa = zio->io_spa;
373 	uint64_t dsobj = zio->io_bookmark.zb_objset;
374 	uint64_t lsize = BP_GET_LSIZE(bp);
375 	dmu_object_type_t ot = BP_GET_TYPE(bp);
376 	uint8_t salt[ZIO_DATA_SALT_LEN];
377 	uint8_t iv[ZIO_DATA_IV_LEN];
378 	uint8_t mac[ZIO_DATA_MAC_LEN];
379 	boolean_t no_crypt = B_FALSE;
380 
381 	ASSERT(BP_USES_CRYPT(bp));
382 	ASSERT3U(size, !=, 0);
383 
384 	if (zio->io_error != 0)
385 		return;
386 
387 	/*
388 	 * Verify the cksum of MACs stored in an indirect bp. It will always
389 	 * be possible to verify this since it does not require an encryption
390 	 * key.
391 	 */
392 	if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
393 		zio_crypt_decode_mac_bp(bp, mac);
394 
395 		if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
396 			/*
397 			 * We haven't decompressed the data yet, but
398 			 * zio_crypt_do_indirect_mac_checksum() requires
399 			 * decompressed data to be able to parse out the MACs
400 			 * from the indirect block. We decompress it now and
401 			 * throw away the result after we are finished.
402 			 */
403 			tmp = zio_buf_alloc(lsize);
404 			ret = zio_decompress_data(BP_GET_COMPRESS(bp),
405 			    zio->io_abd, tmp, zio->io_size, lsize);
406 			if (ret != 0) {
407 				ret = SET_ERROR(EIO);
408 				goto error;
409 			}
410 			ret = zio_crypt_do_indirect_mac_checksum(B_FALSE,
411 			    tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac);
412 			zio_buf_free(tmp, lsize);
413 		} else {
414 			ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
415 			    zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
416 		}
417 		abd_copy(data, zio->io_abd, size);
418 
419 		if (ret != 0)
420 			goto error;
421 
422 		return;
423 	}
424 
425 	/*
426 	 * If this is an authenticated block, just check the MAC. It would be
427 	 * nice to separate this out into its own flag, but for the moment
428 	 * enum zio_flag is out of bits.
429 	 */
430 	if (BP_IS_AUTHENTICATED(bp)) {
431 		if (ot == DMU_OT_OBJSET) {
432 			ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
433 			    dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
434 		} else {
435 			zio_crypt_decode_mac_bp(bp, mac);
436 			ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
437 			    zio->io_abd, size, mac);
438 		}
439 		abd_copy(data, zio->io_abd, size);
440 
441 		if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
442 			ret = zio_handle_decrypt_injection(spa,
443 			    &zio->io_bookmark, ot, ECKSUM);
444 		}
445 		if (ret != 0)
446 			goto error;
447 
448 		return;
449 	}
450 
451 	zio_crypt_decode_params_bp(bp, salt, iv);
452 
453 	if (ot == DMU_OT_INTENT_LOG) {
454 		tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
455 		zio_crypt_decode_mac_zil(tmp, mac);
456 		abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
457 	} else {
458 		zio_crypt_decode_mac_bp(bp, mac);
459 	}
460 
461 	ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
462 	    BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
463 	    zio->io_abd, &no_crypt);
464 	if (no_crypt)
465 		abd_copy(data, zio->io_abd, size);
466 
467 	if (ret != 0)
468 		goto error;
469 
470 	return;
471 
472 error:
473 	/* assert that the key was found unless this was speculative */
474 	ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
475 
476 	/*
477 	 * If there was a decryption / authentication error return EIO as
478 	 * the io_error. If this was not a speculative zio, create an ereport.
479 	 */
480 	if (ret == ECKSUM) {
481 		zio->io_error = SET_ERROR(EIO);
482 		if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
483 			spa_log_error(spa, &zio->io_bookmark);
484 			(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
485 			    spa, NULL, &zio->io_bookmark, zio, 0, 0);
486 		}
487 	} else {
488 		zio->io_error = ret;
489 	}
490 }
491 
492 /*
493  * ==========================================================================
494  * I/O parent/child relationships and pipeline interlocks
495  * ==========================================================================
496  */
497 zio_t *
zio_walk_parents(zio_t * cio,zio_link_t ** zl)498 zio_walk_parents(zio_t *cio, zio_link_t **zl)
499 {
500 	list_t *pl = &cio->io_parent_list;
501 
502 	*zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
503 	if (*zl == NULL)
504 		return (NULL);
505 
506 	ASSERT((*zl)->zl_child == cio);
507 	return ((*zl)->zl_parent);
508 }
509 
510 zio_t *
zio_walk_children(zio_t * pio,zio_link_t ** zl)511 zio_walk_children(zio_t *pio, zio_link_t **zl)
512 {
513 	list_t *cl = &pio->io_child_list;
514 
515 	ASSERT(MUTEX_HELD(&pio->io_lock));
516 
517 	*zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
518 	if (*zl == NULL)
519 		return (NULL);
520 
521 	ASSERT((*zl)->zl_parent == pio);
522 	return ((*zl)->zl_child);
523 }
524 
525 zio_t *
zio_unique_parent(zio_t * cio)526 zio_unique_parent(zio_t *cio)
527 {
528 	zio_link_t *zl = NULL;
529 	zio_t *pio = zio_walk_parents(cio, &zl);
530 
531 	VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
532 	return (pio);
533 }
534 
535 void
zio_add_child(zio_t * pio,zio_t * cio)536 zio_add_child(zio_t *pio, zio_t *cio)
537 {
538 	zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
539 
540 	/*
541 	 * Logical I/Os can have logical, gang, or vdev children.
542 	 * Gang I/Os can have gang or vdev children.
543 	 * Vdev I/Os can only have vdev children.
544 	 * The following ASSERT captures all of these constraints.
545 	 */
546 	ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
547 
548 	zl->zl_parent = pio;
549 	zl->zl_child = cio;
550 
551 	mutex_enter(&pio->io_lock);
552 	mutex_enter(&cio->io_lock);
553 
554 	ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
555 
556 	for (int w = 0; w < ZIO_WAIT_TYPES; w++)
557 		pio->io_children[cio->io_child_type][w] += !cio->io_state[w];
558 
559 	list_insert_head(&pio->io_child_list, zl);
560 	list_insert_head(&cio->io_parent_list, zl);
561 
562 	pio->io_child_count++;
563 	cio->io_parent_count++;
564 
565 	mutex_exit(&cio->io_lock);
566 	mutex_exit(&pio->io_lock);
567 }
568 
569 static void
zio_remove_child(zio_t * pio,zio_t * cio,zio_link_t * zl)570 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
571 {
572 	ASSERT(zl->zl_parent == pio);
573 	ASSERT(zl->zl_child == cio);
574 
575 	mutex_enter(&pio->io_lock);
576 	mutex_enter(&cio->io_lock);
577 
578 	list_remove(&pio->io_child_list, zl);
579 	list_remove(&cio->io_parent_list, zl);
580 
581 	pio->io_child_count--;
582 	cio->io_parent_count--;
583 
584 	mutex_exit(&cio->io_lock);
585 	mutex_exit(&pio->io_lock);
586 
587 	kmem_cache_free(zio_link_cache, zl);
588 }
589 
590 static boolean_t
zio_wait_for_children(zio_t * zio,uint8_t childbits,enum zio_wait_type wait)591 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
592 {
593 	boolean_t waiting = B_FALSE;
594 
595 	mutex_enter(&zio->io_lock);
596 	ASSERT(zio->io_stall == NULL);
597 	for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
598 		if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
599 			continue;
600 
601 		uint64_t *countp = &zio->io_children[c][wait];
602 		if (*countp != 0) {
603 			zio->io_stage >>= 1;
604 			ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
605 			zio->io_stall = countp;
606 			waiting = B_TRUE;
607 			break;
608 		}
609 	}
610 	mutex_exit(&zio->io_lock);
611 	return (waiting);
612 }
613 
614 static void
zio_notify_parent(zio_t * pio,zio_t * zio,enum zio_wait_type wait)615 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait)
616 {
617 	uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
618 	int *errorp = &pio->io_child_error[zio->io_child_type];
619 
620 	mutex_enter(&pio->io_lock);
621 	if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
622 		*errorp = zio_worst_error(*errorp, zio->io_error);
623 	pio->io_reexecute |= zio->io_reexecute;
624 	ASSERT3U(*countp, >, 0);
625 
626 	(*countp)--;
627 
628 	if (*countp == 0 && pio->io_stall == countp) {
629 		zio_taskq_type_t type =
630 		    pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
631 		    ZIO_TASKQ_INTERRUPT;
632 		pio->io_stall = NULL;
633 		mutex_exit(&pio->io_lock);
634 		/*
635 		 * Dispatch the parent zio in its own taskq so that
636 		 * the child can continue to make progress. This also
637 		 * prevents overflowing the stack when we have deeply nested
638 		 * parent-child relationships.
639 		 */
640 		zio_taskq_dispatch(pio, type, B_FALSE);
641 	} else {
642 		mutex_exit(&pio->io_lock);
643 	}
644 }
645 
646 static void
zio_inherit_child_errors(zio_t * zio,enum zio_child c)647 zio_inherit_child_errors(zio_t *zio, enum zio_child c)
648 {
649 	if (zio->io_child_error[c] != 0 && zio->io_error == 0)
650 		zio->io_error = zio->io_child_error[c];
651 }
652 
653 int
zio_bookmark_compare(const void * x1,const void * x2)654 zio_bookmark_compare(const void *x1, const void *x2)
655 {
656 	const zio_t *z1 = x1;
657 	const zio_t *z2 = x2;
658 
659 	if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
660 		return (-1);
661 	if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
662 		return (1);
663 
664 	if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
665 		return (-1);
666 	if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
667 		return (1);
668 
669 	if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
670 		return (-1);
671 	if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
672 		return (1);
673 
674 	if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
675 		return (-1);
676 	if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
677 		return (1);
678 
679 	if (z1 < z2)
680 		return (-1);
681 	if (z1 > z2)
682 		return (1);
683 
684 	return (0);
685 }
686 
687 /*
688  * ==========================================================================
689  * Create the various types of I/O (read, write, free, etc)
690  * ==========================================================================
691  */
692 static zio_t *
zio_create(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,abd_t * data,uint64_t lsize,uint64_t psize,zio_done_func_t * done,void * private,zio_type_t type,zio_priority_t priority,enum zio_flag flags,vdev_t * vd,uint64_t offset,const zbookmark_phys_t * zb,enum zio_stage stage,enum zio_stage pipeline)693 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
694     abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
695     void *private, zio_type_t type, zio_priority_t priority,
696     enum zio_flag flags, vdev_t *vd, uint64_t offset,
697     const zbookmark_phys_t *zb, enum zio_stage stage, enum zio_stage pipeline)
698 {
699 	zio_t *zio;
700 
701 	IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
702 	ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
703 	ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
704 
705 	ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
706 	ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
707 	ASSERT(vd || stage == ZIO_STAGE_OPEN);
708 
709 	IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
710 
711 	zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
712 	bzero(zio, sizeof (zio_t));
713 
714 	mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL);
715 	cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
716 
717 	list_create(&zio->io_parent_list, sizeof (zio_link_t),
718 	    offsetof(zio_link_t, zl_parent_node));
719 	list_create(&zio->io_child_list, sizeof (zio_link_t),
720 	    offsetof(zio_link_t, zl_child_node));
721 	metaslab_trace_init(&zio->io_alloc_list);
722 
723 	if (vd != NULL)
724 		zio->io_child_type = ZIO_CHILD_VDEV;
725 	else if (flags & ZIO_FLAG_GANG_CHILD)
726 		zio->io_child_type = ZIO_CHILD_GANG;
727 	else if (flags & ZIO_FLAG_DDT_CHILD)
728 		zio->io_child_type = ZIO_CHILD_DDT;
729 	else
730 		zio->io_child_type = ZIO_CHILD_LOGICAL;
731 
732 	if (bp != NULL) {
733 		zio->io_bp = (blkptr_t *)bp;
734 		zio->io_bp_copy = *bp;
735 		zio->io_bp_orig = *bp;
736 		if (type != ZIO_TYPE_WRITE ||
737 		    zio->io_child_type == ZIO_CHILD_DDT)
738 			zio->io_bp = &zio->io_bp_copy;	/* so caller can free */
739 		if (zio->io_child_type == ZIO_CHILD_LOGICAL)
740 			zio->io_logical = zio;
741 		if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
742 			pipeline |= ZIO_GANG_STAGES;
743 	}
744 
745 	zio->io_spa = spa;
746 	zio->io_txg = txg;
747 	zio->io_done = done;
748 	zio->io_private = private;
749 	zio->io_type = type;
750 	zio->io_priority = priority;
751 	zio->io_vd = vd;
752 	zio->io_offset = offset;
753 	zio->io_orig_abd = zio->io_abd = data;
754 	zio->io_orig_size = zio->io_size = psize;
755 	zio->io_lsize = lsize;
756 	zio->io_orig_flags = zio->io_flags = flags;
757 	zio->io_orig_stage = zio->io_stage = stage;
758 	zio->io_orig_pipeline = zio->io_pipeline = pipeline;
759 	zio->io_pipeline_trace = ZIO_STAGE_OPEN;
760 
761 	zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY);
762 	zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
763 
764 	if (zb != NULL)
765 		zio->io_bookmark = *zb;
766 
767 	if (pio != NULL) {
768 		if (zio->io_metaslab_class == NULL)
769 			zio->io_metaslab_class = pio->io_metaslab_class;
770 		if (zio->io_logical == NULL)
771 			zio->io_logical = pio->io_logical;
772 		if (zio->io_child_type == ZIO_CHILD_GANG)
773 			zio->io_gang_leader = pio->io_gang_leader;
774 		zio_add_child(pio, zio);
775 	}
776 
777 	return (zio);
778 }
779 
780 static void
zio_destroy(zio_t * zio)781 zio_destroy(zio_t *zio)
782 {
783 	metaslab_trace_fini(&zio->io_alloc_list);
784 	list_destroy(&zio->io_parent_list);
785 	list_destroy(&zio->io_child_list);
786 	mutex_destroy(&zio->io_lock);
787 	cv_destroy(&zio->io_cv);
788 	kmem_cache_free(zio_cache, zio);
789 }
790 
791 zio_t *
zio_null(zio_t * pio,spa_t * spa,vdev_t * vd,zio_done_func_t * done,void * private,enum zio_flag flags)792 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
793     void *private, enum zio_flag flags)
794 {
795 	zio_t *zio;
796 
797 	zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
798 	    ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
799 	    ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
800 
801 	return (zio);
802 }
803 
804 zio_t *
zio_root(spa_t * spa,zio_done_func_t * done,void * private,enum zio_flag flags)805 zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags)
806 {
807 	return (zio_null(NULL, spa, NULL, done, private, flags));
808 }
809 
810 void
zfs_blkptr_verify(spa_t * spa,const blkptr_t * bp)811 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp)
812 {
813 	if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
814 		zfs_panic_recover("blkptr at %p has invalid TYPE %llu",
815 		    bp, (longlong_t)BP_GET_TYPE(bp));
816 	}
817 	if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS ||
818 	    BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) {
819 		zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu",
820 		    bp, (longlong_t)BP_GET_CHECKSUM(bp));
821 	}
822 	if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS ||
823 	    BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) {
824 		zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu",
825 		    bp, (longlong_t)BP_GET_COMPRESS(bp));
826 	}
827 	if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
828 		zfs_panic_recover("blkptr at %p has invalid LSIZE %llu",
829 		    bp, (longlong_t)BP_GET_LSIZE(bp));
830 	}
831 	if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
832 		zfs_panic_recover("blkptr at %p has invalid PSIZE %llu",
833 		    bp, (longlong_t)BP_GET_PSIZE(bp));
834 	}
835 
836 	if (BP_IS_EMBEDDED(bp)) {
837 		if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) {
838 			zfs_panic_recover("blkptr at %p has invalid ETYPE %llu",
839 			    bp, (longlong_t)BPE_GET_ETYPE(bp));
840 		}
841 	}
842 
843 	/*
844 	 * Do not verify individual DVAs if the config is not trusted. This
845 	 * will be done once the zio is executed in vdev_mirror_map_alloc.
846 	 */
847 	if (!spa->spa_trust_config)
848 		return;
849 
850 	/*
851 	 * Pool-specific checks.
852 	 *
853 	 * Note: it would be nice to verify that the blk_birth and
854 	 * BP_PHYSICAL_BIRTH() are not too large.  However, spa_freeze()
855 	 * allows the birth time of log blocks (and dmu_sync()-ed blocks
856 	 * that are in the log) to be arbitrarily large.
857 	 */
858 	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
859 		uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]);
860 		if (vdevid >= spa->spa_root_vdev->vdev_children) {
861 			zfs_panic_recover("blkptr at %p DVA %u has invalid "
862 			    "VDEV %llu",
863 			    bp, i, (longlong_t)vdevid);
864 			continue;
865 		}
866 		vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
867 		if (vd == NULL) {
868 			zfs_panic_recover("blkptr at %p DVA %u has invalid "
869 			    "VDEV %llu",
870 			    bp, i, (longlong_t)vdevid);
871 			continue;
872 		}
873 		if (vd->vdev_ops == &vdev_hole_ops) {
874 			zfs_panic_recover("blkptr at %p DVA %u has hole "
875 			    "VDEV %llu",
876 			    bp, i, (longlong_t)vdevid);
877 			continue;
878 		}
879 		if (vd->vdev_ops == &vdev_missing_ops) {
880 			/*
881 			 * "missing" vdevs are valid during import, but we
882 			 * don't have their detailed info (e.g. asize), so
883 			 * we can't perform any more checks on them.
884 			 */
885 			continue;
886 		}
887 		uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
888 		uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]);
889 		if (BP_IS_GANG(bp))
890 			asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
891 		if (offset + asize > vd->vdev_asize) {
892 			zfs_panic_recover("blkptr at %p DVA %u has invalid "
893 			    "OFFSET %llu",
894 			    bp, i, (longlong_t)offset);
895 		}
896 	}
897 }
898 
899 boolean_t
zfs_dva_valid(spa_t * spa,const dva_t * dva,const blkptr_t * bp)900 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
901 {
902 	uint64_t vdevid = DVA_GET_VDEV(dva);
903 
904 	if (vdevid >= spa->spa_root_vdev->vdev_children)
905 		return (B_FALSE);
906 
907 	vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
908 	if (vd == NULL)
909 		return (B_FALSE);
910 
911 	if (vd->vdev_ops == &vdev_hole_ops)
912 		return (B_FALSE);
913 
914 	if (vd->vdev_ops == &vdev_missing_ops) {
915 		return (B_FALSE);
916 	}
917 
918 	uint64_t offset = DVA_GET_OFFSET(dva);
919 	uint64_t asize = DVA_GET_ASIZE(dva);
920 
921 	if (BP_IS_GANG(bp))
922 		asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
923 	if (offset + asize > vd->vdev_asize)
924 		return (B_FALSE);
925 
926 	return (B_TRUE);
927 }
928 
929 zio_t *
zio_read(zio_t * pio,spa_t * spa,const blkptr_t * bp,abd_t * data,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,enum zio_flag flags,const zbookmark_phys_t * zb)930 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
931     abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
932     zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb)
933 {
934 	zio_t *zio;
935 
936 	zfs_blkptr_verify(spa, bp);
937 
938 	zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
939 	    data, size, size, done, private,
940 	    ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
941 	    ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
942 	    ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
943 
944 	return (zio);
945 }
946 
947 zio_t *
zio_write(zio_t * pio,spa_t * spa,uint64_t txg,blkptr_t * bp,abd_t * data,uint64_t lsize,uint64_t psize,const zio_prop_t * zp,zio_done_func_t * ready,zio_done_func_t * children_ready,zio_done_func_t * physdone,zio_done_func_t * done,void * private,zio_priority_t priority,enum zio_flag flags,const zbookmark_phys_t * zb)948 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
949     abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
950     zio_done_func_t *ready, zio_done_func_t *children_ready,
951     zio_done_func_t *physdone, zio_done_func_t *done,
952     void *private, zio_priority_t priority, enum zio_flag flags,
953     const zbookmark_phys_t *zb)
954 {
955 	zio_t *zio;
956 
957 	ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
958 	    zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
959 	    zp->zp_compress >= ZIO_COMPRESS_OFF &&
960 	    zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
961 	    DMU_OT_IS_VALID(zp->zp_type) &&
962 	    zp->zp_level < 32 &&
963 	    zp->zp_copies > 0 &&
964 	    zp->zp_copies <= spa_max_replication(spa));
965 
966 	zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
967 	    ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
968 	    ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
969 	    ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
970 
971 	zio->io_ready = ready;
972 	zio->io_children_ready = children_ready;
973 	zio->io_physdone = physdone;
974 	zio->io_prop = *zp;
975 
976 	/*
977 	 * Data can be NULL if we are going to call zio_write_override() to
978 	 * provide the already-allocated BP.  But we may need the data to
979 	 * verify a dedup hit (if requested).  In this case, don't try to
980 	 * dedup (just take the already-allocated BP verbatim). Encrypted
981 	 * dedup blocks need data as well so we also disable dedup in this
982 	 * case.
983 	 */
984 	if (data == NULL &&
985 	    (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
986 		zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
987 	}
988 
989 	return (zio);
990 }
991 
992 zio_t *
zio_rewrite(zio_t * pio,spa_t * spa,uint64_t txg,blkptr_t * bp,abd_t * data,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,enum zio_flag flags,zbookmark_phys_t * zb)993 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
994     uint64_t size, zio_done_func_t *done, void *private,
995     zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb)
996 {
997 	zio_t *zio;
998 
999 	zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
1000 	    ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
1001 	    ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
1002 
1003 	return (zio);
1004 }
1005 
1006 void
zio_write_override(zio_t * zio,blkptr_t * bp,int copies,boolean_t nopwrite)1007 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite)
1008 {
1009 	ASSERT(zio->io_type == ZIO_TYPE_WRITE);
1010 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1011 	ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
1012 	ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
1013 
1014 	/*
1015 	 * We must reset the io_prop to match the values that existed
1016 	 * when the bp was first written by dmu_sync() keeping in mind
1017 	 * that nopwrite and dedup are mutually exclusive.
1018 	 */
1019 	zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
1020 	zio->io_prop.zp_nopwrite = nopwrite;
1021 	zio->io_prop.zp_copies = copies;
1022 	zio->io_bp_override = bp;
1023 }
1024 
1025 void
zio_free(spa_t * spa,uint64_t txg,const blkptr_t * bp)1026 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
1027 {
1028 
1029 	zfs_blkptr_verify(spa, bp);
1030 
1031 	/*
1032 	 * The check for EMBEDDED is a performance optimization.  We
1033 	 * process the free here (by ignoring it) rather than
1034 	 * putting it on the list and then processing it in zio_free_sync().
1035 	 */
1036 	if (BP_IS_EMBEDDED(bp))
1037 		return;
1038 	metaslab_check_free(spa, bp);
1039 
1040 	/*
1041 	 * Frees that are for the currently-syncing txg, are not going to be
1042 	 * deferred, and which will not need to do a read (i.e. not GANG or
1043 	 * DEDUP), can be processed immediately.  Otherwise, put them on the
1044 	 * in-memory list for later processing.
1045 	 *
1046 	 * Note that we only defer frees after zfs_sync_pass_deferred_free
1047 	 * when the log space map feature is disabled. [see relevant comment
1048 	 * in spa_sync_iterate_to_convergence()]
1049 	 */
1050 	if (BP_IS_GANG(bp) ||
1051 	    BP_GET_DEDUP(bp) ||
1052 	    txg != spa->spa_syncing_txg ||
1053 	    (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
1054 	    !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))) {
1055 		bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
1056 	} else {
1057 		VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0)));
1058 	}
1059 }
1060 
1061 zio_t *
zio_free_sync(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,enum zio_flag flags)1062 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1063     enum zio_flag flags)
1064 {
1065 	zio_t *zio;
1066 	enum zio_stage stage = ZIO_FREE_PIPELINE;
1067 
1068 	ASSERT(!BP_IS_HOLE(bp));
1069 	ASSERT(spa_syncing_txg(spa) == txg);
1070 
1071 	if (BP_IS_EMBEDDED(bp))
1072 		return (zio_null(pio, spa, NULL, NULL, NULL, 0));
1073 
1074 	metaslab_check_free(spa, bp);
1075 	arc_freed(spa, bp);
1076 	dsl_scan_freed(spa, bp);
1077 
1078 	/*
1079 	 * GANG and DEDUP blocks can induce a read (for the gang block header,
1080 	 * or the DDT), so issue them asynchronously so that this thread is
1081 	 * not tied up.
1082 	 */
1083 	if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp))
1084 		stage |= ZIO_STAGE_ISSUE_ASYNC;
1085 
1086 	zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1087 	    BP_GET_PSIZE(bp), NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
1088 	    flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage);
1089 
1090 	return (zio);
1091 }
1092 
1093 zio_t *
zio_claim(zio_t * pio,spa_t * spa,uint64_t txg,const blkptr_t * bp,zio_done_func_t * done,void * private,enum zio_flag flags)1094 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1095     zio_done_func_t *done, void *private, enum zio_flag flags)
1096 {
1097 	zio_t *zio;
1098 
1099 	zfs_blkptr_verify(spa, bp);
1100 
1101 	if (BP_IS_EMBEDDED(bp))
1102 		return (zio_null(pio, spa, NULL, NULL, NULL, 0));
1103 
1104 	/*
1105 	 * A claim is an allocation of a specific block.  Claims are needed
1106 	 * to support immediate writes in the intent log.  The issue is that
1107 	 * immediate writes contain committed data, but in a txg that was
1108 	 * *not* committed.  Upon opening the pool after an unclean shutdown,
1109 	 * the intent log claims all blocks that contain immediate write data
1110 	 * so that the SPA knows they're in use.
1111 	 *
1112 	 * All claims *must* be resolved in the first txg -- before the SPA
1113 	 * starts allocating blocks -- so that nothing is allocated twice.
1114 	 * If txg == 0 we just verify that the block is claimable.
1115 	 */
1116 	ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <,
1117 	    spa_min_claim_txg(spa));
1118 	ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
1119 	ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa));	/* zdb(8) */
1120 
1121 	zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1122 	    BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
1123 	    flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
1124 	ASSERT0(zio->io_queued_timestamp);
1125 
1126 	return (zio);
1127 }
1128 
1129 zio_t *
zio_ioctl(zio_t * pio,spa_t * spa,vdev_t * vd,int cmd,zio_done_func_t * done,void * private,enum zio_flag flags)1130 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
1131     zio_done_func_t *done, void *private, enum zio_flag flags)
1132 {
1133 	zio_t *zio;
1134 	int c;
1135 
1136 	if (vd->vdev_children == 0) {
1137 		zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
1138 		    ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
1139 		    ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
1140 
1141 		zio->io_cmd = cmd;
1142 	} else {
1143 		zio = zio_null(pio, spa, NULL, NULL, NULL, flags);
1144 
1145 		for (c = 0; c < vd->vdev_children; c++)
1146 			zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd,
1147 			    done, private, flags));
1148 	}
1149 
1150 	return (zio);
1151 }
1152 
1153 zio_t *
zio_trim(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,zio_done_func_t * done,void * private,zio_priority_t priority,enum zio_flag flags,enum trim_flag trim_flags)1154 zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1155     zio_done_func_t *done, void *private, zio_priority_t priority,
1156     enum zio_flag flags, enum trim_flag trim_flags)
1157 {
1158 	zio_t *zio;
1159 
1160 	ASSERT0(vd->vdev_children);
1161 	ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
1162 	ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
1163 	ASSERT3U(size, !=, 0);
1164 
1165 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
1166 	    private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
1167 	    vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
1168 	zio->io_trim_flags = trim_flags;
1169 
1170 	return (zio);
1171 }
1172 
1173 zio_t *
zio_read_phys(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,abd_t * data,int checksum,zio_done_func_t * done,void * private,zio_priority_t priority,enum zio_flag flags,boolean_t labels)1174 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1175     abd_t *data, int checksum, zio_done_func_t *done, void *private,
1176     zio_priority_t priority, enum zio_flag flags, boolean_t labels)
1177 {
1178 	zio_t *zio;
1179 
1180 	ASSERT(vd->vdev_children == 0);
1181 	ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1182 	    offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1183 	ASSERT3U(offset + size, <=, vd->vdev_psize);
1184 
1185 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1186 	    private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1187 	    offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
1188 
1189 	zio->io_prop.zp_checksum = checksum;
1190 
1191 	return (zio);
1192 }
1193 
1194 zio_t *
zio_write_phys(zio_t * pio,vdev_t * vd,uint64_t offset,uint64_t size,abd_t * data,int checksum,zio_done_func_t * done,void * private,zio_priority_t priority,enum zio_flag flags,boolean_t labels)1195 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1196     abd_t *data, int checksum, zio_done_func_t *done, void *private,
1197     zio_priority_t priority, enum zio_flag flags, boolean_t labels)
1198 {
1199 	zio_t *zio;
1200 
1201 	ASSERT(vd->vdev_children == 0);
1202 	ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1203 	    offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1204 	ASSERT3U(offset + size, <=, vd->vdev_psize);
1205 
1206 	zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1207 	    private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1208 	    offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
1209 
1210 	zio->io_prop.zp_checksum = checksum;
1211 
1212 	if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
1213 		/*
1214 		 * zec checksums are necessarily destructive -- they modify
1215 		 * the end of the write buffer to hold the verifier/checksum.
1216 		 * Therefore, we must make a local copy in case the data is
1217 		 * being written to multiple places in parallel.
1218 		 */
1219 		abd_t *wbuf = abd_alloc_sametype(data, size);
1220 		abd_copy(wbuf, data, size);
1221 
1222 		zio_push_transform(zio, wbuf, size, size, NULL);
1223 	}
1224 
1225 	return (zio);
1226 }
1227 
1228 /*
1229  * Create a child I/O to do some work for us.
1230  */
1231 zio_t *
zio_vdev_child_io(zio_t * pio,blkptr_t * bp,vdev_t * vd,uint64_t offset,abd_t * data,uint64_t size,int type,zio_priority_t priority,enum zio_flag flags,zio_done_func_t * done,void * private)1232 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
1233     abd_t *data, uint64_t size, int type, zio_priority_t priority,
1234     enum zio_flag flags, zio_done_func_t *done, void *private)
1235 {
1236 	enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
1237 	zio_t *zio;
1238 
1239 	/*
1240 	 * vdev child I/Os do not propagate their error to the parent.
1241 	 * Therefore, for correct operation the caller *must* check for
1242 	 * and handle the error in the child i/o's done callback.
1243 	 * The only exceptions are i/os that we don't care about
1244 	 * (OPTIONAL or REPAIR).
1245 	 */
1246 	ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
1247 	    done != NULL);
1248 
1249 	if (type == ZIO_TYPE_READ && bp != NULL) {
1250 		/*
1251 		 * If we have the bp, then the child should perform the
1252 		 * checksum and the parent need not.  This pushes error
1253 		 * detection as close to the leaves as possible and
1254 		 * eliminates redundant checksums in the interior nodes.
1255 		 */
1256 		pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
1257 		pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
1258 	}
1259 
1260 	if (vd->vdev_ops->vdev_op_leaf) {
1261 		ASSERT0(vd->vdev_children);
1262 		offset += VDEV_LABEL_START_SIZE;
1263 	}
1264 
1265 	flags |= ZIO_VDEV_CHILD_FLAGS(pio);
1266 
1267 	/*
1268 	 * If we've decided to do a repair, the write is not speculative --
1269 	 * even if the original read was.
1270 	 */
1271 	if (flags & ZIO_FLAG_IO_REPAIR)
1272 		flags &= ~ZIO_FLAG_SPECULATIVE;
1273 
1274 	/*
1275 	 * If we're creating a child I/O that is not associated with a
1276 	 * top-level vdev, then the child zio is not an allocating I/O.
1277 	 * If this is a retried I/O then we ignore it since we will
1278 	 * have already processed the original allocating I/O.
1279 	 */
1280 	if (flags & ZIO_FLAG_IO_ALLOCATING &&
1281 	    (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
1282 		ASSERT(pio->io_metaslab_class != NULL);
1283 		ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
1284 		ASSERT(type == ZIO_TYPE_WRITE);
1285 		ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
1286 		ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
1287 		ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
1288 		    pio->io_child_type == ZIO_CHILD_GANG);
1289 
1290 		flags &= ~ZIO_FLAG_IO_ALLOCATING;
1291 	}
1292 
1293 	zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
1294 	    done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
1295 	    ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
1296 	ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
1297 
1298 	zio->io_physdone = pio->io_physdone;
1299 	if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL)
1300 		zio->io_logical->io_phys_children++;
1301 
1302 	return (zio);
1303 }
1304 
1305 zio_t *
zio_vdev_delegated_io(vdev_t * vd,uint64_t offset,abd_t * data,uint64_t size,zio_type_t type,zio_priority_t priority,enum zio_flag flags,zio_done_func_t * done,void * private)1306 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
1307     zio_type_t type, zio_priority_t priority, enum zio_flag flags,
1308     zio_done_func_t *done, void *private)
1309 {
1310 	zio_t *zio;
1311 
1312 	ASSERT(vd->vdev_ops->vdev_op_leaf);
1313 
1314 	zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
1315 	    data, size, size, done, private, type, priority,
1316 	    flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
1317 	    vd, offset, NULL,
1318 	    ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
1319 
1320 	return (zio);
1321 }
1322 
1323 void
zio_flush(zio_t * zio,vdev_t * vd)1324 zio_flush(zio_t *zio, vdev_t *vd)
1325 {
1326 	zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE,
1327 	    NULL, NULL,
1328 	    ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY));
1329 }
1330 
1331 void
zio_shrink(zio_t * zio,uint64_t size)1332 zio_shrink(zio_t *zio, uint64_t size)
1333 {
1334 	ASSERT3P(zio->io_executor, ==, NULL);
1335 	ASSERT3P(zio->io_orig_size, ==, zio->io_size);
1336 	ASSERT3U(size, <=, zio->io_size);
1337 
1338 	/*
1339 	 * We don't shrink for raidz because of problems with the
1340 	 * reconstruction when reading back less than the block size.
1341 	 * Note, BP_IS_RAIDZ() assumes no compression.
1342 	 */
1343 	ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
1344 	if (!BP_IS_RAIDZ(zio->io_bp)) {
1345 		/* we are not doing a raw write */
1346 		ASSERT3U(zio->io_size, ==, zio->io_lsize);
1347 		zio->io_orig_size = zio->io_size = zio->io_lsize = size;
1348 	}
1349 }
1350 
1351 /*
1352  * ==========================================================================
1353  * Prepare to read and write logical blocks
1354  * ==========================================================================
1355  */
1356 
1357 static int
zio_read_bp_init(zio_t * zio)1358 zio_read_bp_init(zio_t *zio)
1359 {
1360 	blkptr_t *bp = zio->io_bp;
1361 	uint64_t psize =
1362 	    BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
1363 
1364 	ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1365 
1366 	if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
1367 	    zio->io_child_type == ZIO_CHILD_LOGICAL &&
1368 	    !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1369 		zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1370 		    psize, psize, zio_decompress);
1371 	}
1372 
1373 	if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
1374 	    BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
1375 	    zio->io_child_type == ZIO_CHILD_LOGICAL) {
1376 		zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1377 		    psize, psize, zio_decrypt);
1378 	}
1379 
1380 	if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
1381 		int psize = BPE_GET_PSIZE(bp);
1382 		void *data = abd_borrow_buf(zio->io_abd, psize);
1383 
1384 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1385 		decode_embedded_bp_compressed(bp, data);
1386 		abd_return_buf_copy(zio->io_abd, data, psize);
1387 	} else {
1388 		ASSERT(!BP_IS_EMBEDDED(bp));
1389 		ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1390 	}
1391 
1392 	if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0)
1393 		zio->io_flags |= ZIO_FLAG_DONT_CACHE;
1394 
1395 	if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP)
1396 		zio->io_flags |= ZIO_FLAG_DONT_CACHE;
1397 
1398 	if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
1399 		zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
1400 
1401 	return (ZIO_PIPELINE_CONTINUE);
1402 }
1403 
1404 static int
zio_write_bp_init(zio_t * zio)1405 zio_write_bp_init(zio_t *zio)
1406 {
1407 	if (!IO_IS_ALLOCATING(zio))
1408 		return (ZIO_PIPELINE_CONTINUE);
1409 
1410 	ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1411 
1412 	if (zio->io_bp_override) {
1413 		blkptr_t *bp = zio->io_bp;
1414 		zio_prop_t *zp = &zio->io_prop;
1415 
1416 		ASSERT(bp->blk_birth != zio->io_txg);
1417 		ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0);
1418 
1419 		*bp = *zio->io_bp_override;
1420 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1421 
1422 		if (BP_IS_EMBEDDED(bp))
1423 			return (ZIO_PIPELINE_CONTINUE);
1424 
1425 		/*
1426 		 * If we've been overridden and nopwrite is set then
1427 		 * set the flag accordingly to indicate that a nopwrite
1428 		 * has already occurred.
1429 		 */
1430 		if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
1431 			ASSERT(!zp->zp_dedup);
1432 			ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
1433 			zio->io_flags |= ZIO_FLAG_NOPWRITE;
1434 			return (ZIO_PIPELINE_CONTINUE);
1435 		}
1436 
1437 		ASSERT(!zp->zp_nopwrite);
1438 
1439 		if (BP_IS_HOLE(bp) || !zp->zp_dedup)
1440 			return (ZIO_PIPELINE_CONTINUE);
1441 
1442 		ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
1443 		    ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
1444 
1445 		if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
1446 		    !zp->zp_encrypt) {
1447 			BP_SET_DEDUP(bp, 1);
1448 			zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
1449 			return (ZIO_PIPELINE_CONTINUE);
1450 		}
1451 
1452 		/*
1453 		 * We were unable to handle this as an override bp, treat
1454 		 * it as a regular write I/O.
1455 		 */
1456 		zio->io_bp_override = NULL;
1457 		*bp = zio->io_bp_orig;
1458 		zio->io_pipeline = zio->io_orig_pipeline;
1459 	}
1460 
1461 	return (ZIO_PIPELINE_CONTINUE);
1462 }
1463 
1464 static int
zio_write_compress(zio_t * zio)1465 zio_write_compress(zio_t *zio)
1466 {
1467 	spa_t *spa = zio->io_spa;
1468 	zio_prop_t *zp = &zio->io_prop;
1469 	enum zio_compress compress = zp->zp_compress;
1470 	blkptr_t *bp = zio->io_bp;
1471 	uint64_t lsize = zio->io_lsize;
1472 	uint64_t psize = zio->io_size;
1473 	int pass = 1;
1474 
1475 	/*
1476 	 * If our children haven't all reached the ready stage,
1477 	 * wait for them and then repeat this pipeline stage.
1478 	 */
1479 	if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
1480 	    ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
1481 		return (ZIO_PIPELINE_STOP);
1482 	}
1483 
1484 	if (!IO_IS_ALLOCATING(zio))
1485 		return (ZIO_PIPELINE_CONTINUE);
1486 
1487 	if (zio->io_children_ready != NULL) {
1488 		/*
1489 		 * Now that all our children are ready, run the callback
1490 		 * associated with this zio in case it wants to modify the
1491 		 * data to be written.
1492 		 */
1493 		ASSERT3U(zp->zp_level, >, 0);
1494 		zio->io_children_ready(zio);
1495 	}
1496 
1497 	ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1498 	ASSERT(zio->io_bp_override == NULL);
1499 
1500 	if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) {
1501 		/*
1502 		 * We're rewriting an existing block, which means we're
1503 		 * working on behalf of spa_sync().  For spa_sync() to
1504 		 * converge, it must eventually be the case that we don't
1505 		 * have to allocate new blocks.  But compression changes
1506 		 * the blocksize, which forces a reallocate, and makes
1507 		 * convergence take longer.  Therefore, after the first
1508 		 * few passes, stop compressing to ensure convergence.
1509 		 */
1510 		pass = spa_sync_pass(spa);
1511 
1512 		ASSERT(zio->io_txg == spa_syncing_txg(spa));
1513 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1514 		ASSERT(!BP_GET_DEDUP(bp));
1515 
1516 		if (pass >= zfs_sync_pass_dont_compress)
1517 			compress = ZIO_COMPRESS_OFF;
1518 
1519 		/* Make sure someone doesn't change their mind on overwrites */
1520 		ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp),
1521 		    spa_max_replication(spa)) == BP_GET_NDVAS(bp));
1522 	}
1523 
1524 	/* If it's a compressed write that is not raw, compress the buffer. */
1525 	if (compress != ZIO_COMPRESS_OFF &&
1526 	    !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
1527 		void *cbuf = zio_buf_alloc(lsize);
1528 		psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize);
1529 		if (psize == 0 || psize == lsize) {
1530 			compress = ZIO_COMPRESS_OFF;
1531 			zio_buf_free(cbuf, lsize);
1532 		} else if (!zp->zp_dedup && !zp->zp_encrypt &&
1533 		    psize <= BPE_PAYLOAD_SIZE &&
1534 		    zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
1535 		    spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
1536 			encode_embedded_bp_compressed(bp,
1537 			    cbuf, compress, lsize, psize);
1538 			BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
1539 			BP_SET_TYPE(bp, zio->io_prop.zp_type);
1540 			BP_SET_LEVEL(bp, zio->io_prop.zp_level);
1541 			zio_buf_free(cbuf, lsize);
1542 			bp->blk_birth = zio->io_txg;
1543 			zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1544 			ASSERT(spa_feature_is_active(spa,
1545 			    SPA_FEATURE_EMBEDDED_DATA));
1546 			return (ZIO_PIPELINE_CONTINUE);
1547 		} else {
1548 			/*
1549 			 * Round up compressed size up to the ashift
1550 			 * of the smallest-ashift device, and zero the tail.
1551 			 * This ensures that the compressed size of the BP
1552 			 * (and thus compressratio property) are correct,
1553 			 * in that we charge for the padding used to fill out
1554 			 * the last sector.
1555 			 */
1556 			ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
1557 			size_t rounded = (size_t)P2ROUNDUP(psize,
1558 			    1ULL << spa->spa_min_ashift);
1559 			if (rounded >= lsize) {
1560 				compress = ZIO_COMPRESS_OFF;
1561 				zio_buf_free(cbuf, lsize);
1562 				psize = lsize;
1563 			} else {
1564 				abd_t *cdata = abd_get_from_buf(cbuf, lsize);
1565 				abd_take_ownership_of_buf(cdata, B_TRUE);
1566 				abd_zero_off(cdata, psize, rounded - psize);
1567 				psize = rounded;
1568 				zio_push_transform(zio, cdata,
1569 				    psize, lsize, NULL);
1570 			}
1571 		}
1572 
1573 		/*
1574 		 * We were unable to handle this as an override bp, treat
1575 		 * it as a regular write I/O.
1576 		 */
1577 		zio->io_bp_override = NULL;
1578 		*bp = zio->io_bp_orig;
1579 		zio->io_pipeline = zio->io_orig_pipeline;
1580 
1581 	} else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
1582 	    zp->zp_type == DMU_OT_DNODE) {
1583 		/*
1584 		 * The DMU actually relies on the zio layer's compression
1585 		 * to free metadnode blocks that have had all contained
1586 		 * dnodes freed. As a result, even when doing a raw
1587 		 * receive, we must check whether the block can be compressed
1588 		 * to a hole.
1589 		 */
1590 		psize = zio_compress_data(ZIO_COMPRESS_EMPTY,
1591 		    zio->io_abd, NULL, lsize);
1592 		if (psize == 0)
1593 			compress = ZIO_COMPRESS_OFF;
1594 	} else {
1595 		ASSERT3U(psize, !=, 0);
1596 	}
1597 
1598 	/*
1599 	 * The final pass of spa_sync() must be all rewrites, but the first
1600 	 * few passes offer a trade-off: allocating blocks defers convergence,
1601 	 * but newly allocated blocks are sequential, so they can be written
1602 	 * to disk faster.  Therefore, we allow the first few passes of
1603 	 * spa_sync() to allocate new blocks, but force rewrites after that.
1604 	 * There should only be a handful of blocks after pass 1 in any case.
1605 	 */
1606 	if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg &&
1607 	    BP_GET_PSIZE(bp) == psize &&
1608 	    pass >= zfs_sync_pass_rewrite) {
1609 		VERIFY3U(psize, !=, 0);
1610 		enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
1611 		zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
1612 		zio->io_flags |= ZIO_FLAG_IO_REWRITE;
1613 	} else {
1614 		BP_ZERO(bp);
1615 		zio->io_pipeline = ZIO_WRITE_PIPELINE;
1616 	}
1617 
1618 	if (psize == 0) {
1619 		if (zio->io_bp_orig.blk_birth != 0 &&
1620 		    spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
1621 			BP_SET_LSIZE(bp, lsize);
1622 			BP_SET_TYPE(bp, zp->zp_type);
1623 			BP_SET_LEVEL(bp, zp->zp_level);
1624 			BP_SET_BIRTH(bp, zio->io_txg, 0);
1625 		}
1626 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1627 	} else {
1628 		ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
1629 		BP_SET_LSIZE(bp, lsize);
1630 		BP_SET_TYPE(bp, zp->zp_type);
1631 		BP_SET_LEVEL(bp, zp->zp_level);
1632 		BP_SET_PSIZE(bp, psize);
1633 		BP_SET_COMPRESS(bp, compress);
1634 		BP_SET_CHECKSUM(bp, zp->zp_checksum);
1635 		BP_SET_DEDUP(bp, zp->zp_dedup);
1636 		BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
1637 		if (zp->zp_dedup) {
1638 			ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1639 			ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1640 			ASSERT(!zp->zp_encrypt ||
1641 			    DMU_OT_IS_ENCRYPTED(zp->zp_type));
1642 			zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
1643 		}
1644 		if (zp->zp_nopwrite) {
1645 			ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1646 			ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1647 			zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
1648 		}
1649 	}
1650 	return (ZIO_PIPELINE_CONTINUE);
1651 }
1652 
1653 static int
zio_free_bp_init(zio_t * zio)1654 zio_free_bp_init(zio_t *zio)
1655 {
1656 	blkptr_t *bp = zio->io_bp;
1657 
1658 	if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
1659 		if (BP_GET_DEDUP(bp))
1660 			zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
1661 	}
1662 
1663 	ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1664 
1665 	return (ZIO_PIPELINE_CONTINUE);
1666 }
1667 
1668 /*
1669  * ==========================================================================
1670  * Execute the I/O pipeline
1671  * ==========================================================================
1672  */
1673 
1674 static void
zio_taskq_dispatch(zio_t * zio,zio_taskq_type_t q,boolean_t cutinline)1675 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
1676 {
1677 	spa_t *spa = zio->io_spa;
1678 	zio_type_t t = zio->io_type;
1679 	int flags = (cutinline ? TQ_FRONT : 0);
1680 
1681 	/*
1682 	 * If we're a config writer or a probe, the normal issue and
1683 	 * interrupt threads may all be blocked waiting for the config lock.
1684 	 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
1685 	 */
1686 	if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
1687 		t = ZIO_TYPE_NULL;
1688 
1689 	/*
1690 	 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
1691 	 */
1692 	if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
1693 		t = ZIO_TYPE_NULL;
1694 
1695 	/*
1696 	 * If this is a high priority I/O, then use the high priority taskq if
1697 	 * available.
1698 	 */
1699 	if ((zio->io_priority == ZIO_PRIORITY_NOW ||
1700 	    zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) &&
1701 	    spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
1702 		q++;
1703 
1704 	ASSERT3U(q, <, ZIO_TASKQ_TYPES);
1705 
1706 	/*
1707 	 * NB: We are assuming that the zio can only be dispatched
1708 	 * to a single taskq at a time.  It would be a grievous error
1709 	 * to dispatch the zio to another taskq at the same time.
1710 	 */
1711 	ASSERT(zio->io_tqent.tqent_next == NULL);
1712 	spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio,
1713 	    flags, &zio->io_tqent);
1714 }
1715 
1716 static boolean_t
zio_taskq_member(zio_t * zio,zio_taskq_type_t q)1717 zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
1718 {
1719 	kthread_t *executor = zio->io_executor;
1720 	spa_t *spa = zio->io_spa;
1721 
1722 	for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
1723 		spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1724 		uint_t i;
1725 		for (i = 0; i < tqs->stqs_count; i++) {
1726 			if (taskq_member(tqs->stqs_taskq[i], executor))
1727 				return (B_TRUE);
1728 		}
1729 	}
1730 
1731 	return (B_FALSE);
1732 }
1733 
1734 static int
zio_issue_async(zio_t * zio)1735 zio_issue_async(zio_t *zio)
1736 {
1737 	zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
1738 
1739 	return (ZIO_PIPELINE_STOP);
1740 }
1741 
1742 void
zio_interrupt(zio_t * zio)1743 zio_interrupt(zio_t *zio)
1744 {
1745 	zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
1746 }
1747 
1748 void
zio_delay_interrupt(zio_t * zio)1749 zio_delay_interrupt(zio_t *zio)
1750 {
1751 	/*
1752 	 * The timeout_generic() function isn't defined in userspace, so
1753 	 * rather than trying to implement the function, the zio delay
1754 	 * functionality has been disabled for userspace builds.
1755 	 */
1756 
1757 #ifdef _KERNEL
1758 	/*
1759 	 * If io_target_timestamp is zero, then no delay has been registered
1760 	 * for this IO, thus jump to the end of this function and "skip" the
1761 	 * delay; issuing it directly to the zio layer.
1762 	 */
1763 	if (zio->io_target_timestamp != 0) {
1764 		hrtime_t now = gethrtime();
1765 
1766 		if (now >= zio->io_target_timestamp) {
1767 			/*
1768 			 * This IO has already taken longer than the target
1769 			 * delay to complete, so we don't want to delay it
1770 			 * any longer; we "miss" the delay and issue it
1771 			 * directly to the zio layer. This is likely due to
1772 			 * the target latency being set to a value less than
1773 			 * the underlying hardware can satisfy (e.g. delay
1774 			 * set to 1ms, but the disks take 10ms to complete an
1775 			 * IO request).
1776 			 */
1777 
1778 			DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
1779 			    hrtime_t, now);
1780 
1781 			zio_interrupt(zio);
1782 		} else {
1783 			hrtime_t diff = zio->io_target_timestamp - now;
1784 
1785 			DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
1786 			    hrtime_t, now, hrtime_t, diff);
1787 
1788 			(void) timeout_generic(CALLOUT_NORMAL,
1789 			    (void (*)(void *))zio_interrupt, zio, diff, 1, 0);
1790 		}
1791 
1792 		return;
1793 	}
1794 #endif
1795 
1796 	DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
1797 	zio_interrupt(zio);
1798 }
1799 
1800 /*
1801  * Execute the I/O pipeline until one of the following occurs:
1802  *
1803  *	(1) the I/O completes
1804  *	(2) the pipeline stalls waiting for dependent child I/Os
1805  *	(3) the I/O issues, so we're waiting for an I/O completion interrupt
1806  *	(4) the I/O is delegated by vdev-level caching or aggregation
1807  *	(5) the I/O is deferred due to vdev-level queueing
1808  *	(6) the I/O is handed off to another thread.
1809  *
1810  * In all cases, the pipeline stops whenever there's no CPU work; it never
1811  * burns a thread in cv_wait().
1812  *
1813  * There's no locking on io_stage because there's no legitimate way
1814  * for multiple threads to be attempting to process the same I/O.
1815  */
1816 static zio_pipe_stage_t *zio_pipeline[];
1817 
1818 void
zio_execute(zio_t * zio)1819 zio_execute(zio_t *zio)
1820 {
1821 	zio->io_executor = curthread;
1822 
1823 	ASSERT3U(zio->io_queued_timestamp, >, 0);
1824 
1825 	while (zio->io_stage < ZIO_STAGE_DONE) {
1826 		enum zio_stage pipeline = zio->io_pipeline;
1827 		enum zio_stage stage = zio->io_stage;
1828 		int rv;
1829 
1830 		ASSERT(!MUTEX_HELD(&zio->io_lock));
1831 		ASSERT(ISP2(stage));
1832 		ASSERT(zio->io_stall == NULL);
1833 
1834 		do {
1835 			stage <<= 1;
1836 		} while ((stage & pipeline) == 0);
1837 
1838 		ASSERT(stage <= ZIO_STAGE_DONE);
1839 
1840 		/*
1841 		 * If we are in interrupt context and this pipeline stage
1842 		 * will grab a config lock that is held across I/O,
1843 		 * or may wait for an I/O that needs an interrupt thread
1844 		 * to complete, issue async to avoid deadlock.
1845 		 *
1846 		 * For VDEV_IO_START, we cut in line so that the io will
1847 		 * be sent to disk promptly.
1848 		 */
1849 		if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
1850 		    zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
1851 			boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
1852 			    zio_requeue_io_start_cut_in_line : B_FALSE;
1853 			zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
1854 			return;
1855 		}
1856 
1857 #ifdef _KERNEL
1858 		/*
1859 		 * The I/O pipeline is a part of the machinery responsible for
1860 		 * evacuation of memory pages to disk when we are under
1861 		 * sufficient memory pressure for pageout to run.  By setting
1862 		 * this flag, allocations may dip into pages in the pageout
1863 		 * reserved pool in order to try to make forward progress.
1864 		 */
1865 		bool set_pushpage = false;
1866 		if (!(curthread->t_flag & T_PUSHPAGE)) {
1867 			/*
1868 			 * We can be called recursively, so we need to remember
1869 			 * if this frame was the one that first set the flag or
1870 			 * not.
1871 			 */
1872 			set_pushpage = true;
1873 			curthread->t_flag |= T_PUSHPAGE;
1874 		}
1875 #endif
1876 
1877 		zio->io_stage = stage;
1878 		zio->io_pipeline_trace |= zio->io_stage;
1879 		rv = zio_pipeline[highbit64(stage) - 1](zio);
1880 
1881 #ifdef _KERNEL
1882 		if (set_pushpage) {
1883 			curthread->t_flag &= ~T_PUSHPAGE;
1884 		}
1885 #endif
1886 
1887 		if (rv == ZIO_PIPELINE_STOP)
1888 			return;
1889 
1890 		ASSERT(rv == ZIO_PIPELINE_CONTINUE);
1891 	}
1892 }
1893 
1894 /*
1895  * ==========================================================================
1896  * Initiate I/O, either sync or async
1897  * ==========================================================================
1898  */
1899 int
zio_wait(zio_t * zio)1900 zio_wait(zio_t *zio)
1901 {
1902 	int error;
1903 
1904 	ASSERT3P(zio->io_stage, ==, ZIO_STAGE_OPEN);
1905 	ASSERT3P(zio->io_executor, ==, NULL);
1906 
1907 	zio->io_waiter = curthread;
1908 	ASSERT0(zio->io_queued_timestamp);
1909 	zio->io_queued_timestamp = gethrtime();
1910 
1911 	zio_execute(zio);
1912 
1913 	mutex_enter(&zio->io_lock);
1914 	while (zio->io_executor != NULL)
1915 		cv_wait(&zio->io_cv, &zio->io_lock);
1916 	mutex_exit(&zio->io_lock);
1917 
1918 	error = zio->io_error;
1919 	zio_destroy(zio);
1920 
1921 	return (error);
1922 }
1923 
1924 void
zio_nowait(zio_t * zio)1925 zio_nowait(zio_t *zio)
1926 {
1927 	ASSERT3P(zio->io_executor, ==, NULL);
1928 
1929 	if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
1930 	    zio_unique_parent(zio) == NULL) {
1931 		/*
1932 		 * This is a logical async I/O with no parent to wait for it.
1933 		 * We add it to the spa_async_root_zio "Godfather" I/O which
1934 		 * will ensure they complete prior to unloading the pool.
1935 		 */
1936 		spa_t *spa = zio->io_spa;
1937 
1938 		zio_add_child(spa->spa_async_zio_root[CPU_SEQID], zio);
1939 	}
1940 
1941 	ASSERT0(zio->io_queued_timestamp);
1942 	zio->io_queued_timestamp = gethrtime();
1943 	zio_execute(zio);
1944 }
1945 
1946 /*
1947  * ==========================================================================
1948  * Reexecute, cancel, or suspend/resume failed I/O
1949  * ==========================================================================
1950  */
1951 
1952 static void
zio_reexecute(zio_t * pio)1953 zio_reexecute(zio_t *pio)
1954 {
1955 	zio_t *cio, *cio_next;
1956 
1957 	ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
1958 	ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
1959 	ASSERT(pio->io_gang_leader == NULL);
1960 	ASSERT(pio->io_gang_tree == NULL);
1961 
1962 	pio->io_flags = pio->io_orig_flags;
1963 	pio->io_stage = pio->io_orig_stage;
1964 	pio->io_pipeline = pio->io_orig_pipeline;
1965 	pio->io_reexecute = 0;
1966 	pio->io_flags |= ZIO_FLAG_REEXECUTED;
1967 	pio->io_pipeline_trace = 0;
1968 	pio->io_error = 0;
1969 	for (int w = 0; w < ZIO_WAIT_TYPES; w++)
1970 		pio->io_state[w] = 0;
1971 	for (int c = 0; c < ZIO_CHILD_TYPES; c++)
1972 		pio->io_child_error[c] = 0;
1973 
1974 	if (IO_IS_ALLOCATING(pio))
1975 		BP_ZERO(pio->io_bp);
1976 
1977 	/*
1978 	 * As we reexecute pio's children, new children could be created.
1979 	 * New children go to the head of pio's io_child_list, however,
1980 	 * so we will (correctly) not reexecute them.  The key is that
1981 	 * the remainder of pio's io_child_list, from 'cio_next' onward,
1982 	 * cannot be affected by any side effects of reexecuting 'cio'.
1983 	 */
1984 	zio_link_t *zl = NULL;
1985 	mutex_enter(&pio->io_lock);
1986 	for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
1987 		cio_next = zio_walk_children(pio, &zl);
1988 		for (int w = 0; w < ZIO_WAIT_TYPES; w++)
1989 			pio->io_children[cio->io_child_type][w]++;
1990 		mutex_exit(&pio->io_lock);
1991 		zio_reexecute(cio);
1992 		mutex_enter(&pio->io_lock);
1993 	}
1994 	mutex_exit(&pio->io_lock);
1995 
1996 	/*
1997 	 * Now that all children have been reexecuted, execute the parent.
1998 	 * We don't reexecute "The Godfather" I/O here as it's the
1999 	 * responsibility of the caller to wait on it.
2000 	 */
2001 	if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
2002 		pio->io_queued_timestamp = gethrtime();
2003 		zio_execute(pio);
2004 	}
2005 }
2006 
2007 void
zio_suspend(spa_t * spa,zio_t * zio,zio_suspend_reason_t reason)2008 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
2009 {
2010 	if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
2011 		fm_panic("Pool '%s' has encountered an uncorrectable I/O "
2012 		    "failure and the failure mode property for this pool "
2013 		    "is set to panic.", spa_name(spa));
2014 
2015 	cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O "
2016 	    "failure and has been suspended; `zpool clear` will be required "
2017 	    "before the pool can be written to.", spa_name(spa));
2018 
2019 	(void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
2020 	    NULL, NULL, 0, 0);
2021 
2022 	mutex_enter(&spa->spa_suspend_lock);
2023 
2024 	if (spa->spa_suspend_zio_root == NULL)
2025 		spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
2026 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2027 		    ZIO_FLAG_GODFATHER);
2028 
2029 	spa->spa_suspended = reason;
2030 
2031 	if (zio != NULL) {
2032 		ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
2033 		ASSERT(zio != spa->spa_suspend_zio_root);
2034 		ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2035 		ASSERT(zio_unique_parent(zio) == NULL);
2036 		ASSERT(zio->io_stage == ZIO_STAGE_DONE);
2037 		zio_add_child(spa->spa_suspend_zio_root, zio);
2038 	}
2039 
2040 	mutex_exit(&spa->spa_suspend_lock);
2041 }
2042 
2043 int
zio_resume(spa_t * spa)2044 zio_resume(spa_t *spa)
2045 {
2046 	zio_t *pio;
2047 
2048 	/*
2049 	 * Reexecute all previously suspended i/o.
2050 	 */
2051 	mutex_enter(&spa->spa_suspend_lock);
2052 	spa->spa_suspended = ZIO_SUSPEND_NONE;
2053 	cv_broadcast(&spa->spa_suspend_cv);
2054 	pio = spa->spa_suspend_zio_root;
2055 	spa->spa_suspend_zio_root = NULL;
2056 	mutex_exit(&spa->spa_suspend_lock);
2057 
2058 	if (pio == NULL)
2059 		return (0);
2060 
2061 	zio_reexecute(pio);
2062 	return (zio_wait(pio));
2063 }
2064 
2065 void
zio_resume_wait(spa_t * spa)2066 zio_resume_wait(spa_t *spa)
2067 {
2068 	mutex_enter(&spa->spa_suspend_lock);
2069 	while (spa_suspended(spa))
2070 		cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
2071 	mutex_exit(&spa->spa_suspend_lock);
2072 }
2073 
2074 /*
2075  * ==========================================================================
2076  * Gang blocks.
2077  *
2078  * A gang block is a collection of small blocks that looks to the DMU
2079  * like one large block.  When zio_dva_allocate() cannot find a block
2080  * of the requested size, due to either severe fragmentation or the pool
2081  * being nearly full, it calls zio_write_gang_block() to construct the
2082  * block from smaller fragments.
2083  *
2084  * A gang block consists of a gang header (zio_gbh_phys_t) and up to
2085  * three (SPA_GBH_NBLKPTRS) gang members.  The gang header is just like
2086  * an indirect block: it's an array of block pointers.  It consumes
2087  * only one sector and hence is allocatable regardless of fragmentation.
2088  * The gang header's bps point to its gang members, which hold the data.
2089  *
2090  * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
2091  * as the verifier to ensure uniqueness of the SHA256 checksum.
2092  * Critically, the gang block bp's blk_cksum is the checksum of the data,
2093  * not the gang header.  This ensures that data block signatures (needed for
2094  * deduplication) are independent of how the block is physically stored.
2095  *
2096  * Gang blocks can be nested: a gang member may itself be a gang block.
2097  * Thus every gang block is a tree in which root and all interior nodes are
2098  * gang headers, and the leaves are normal blocks that contain user data.
2099  * The root of the gang tree is called the gang leader.
2100  *
2101  * To perform any operation (read, rewrite, free, claim) on a gang block,
2102  * zio_gang_assemble() first assembles the gang tree (minus data leaves)
2103  * in the io_gang_tree field of the original logical i/o by recursively
2104  * reading the gang leader and all gang headers below it.  This yields
2105  * an in-core tree containing the contents of every gang header and the
2106  * bps for every constituent of the gang block.
2107  *
2108  * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
2109  * and invokes a callback on each bp.  To free a gang block, zio_gang_issue()
2110  * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
2111  * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
2112  * zio_read_gang() is a wrapper around zio_read() that omits reading gang
2113  * headers, since we already have those in io_gang_tree.  zio_rewrite_gang()
2114  * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
2115  * of the gang header plus zio_checksum_compute() of the data to update the
2116  * gang header's blk_cksum as described above.
2117  *
2118  * The two-phase assemble/issue model solves the problem of partial failure --
2119  * what if you'd freed part of a gang block but then couldn't read the
2120  * gang header for another part?  Assembling the entire gang tree first
2121  * ensures that all the necessary gang header I/O has succeeded before
2122  * starting the actual work of free, claim, or write.  Once the gang tree
2123  * is assembled, free and claim are in-memory operations that cannot fail.
2124  *
2125  * In the event that a gang write fails, zio_dva_unallocate() walks the
2126  * gang tree to immediately free (i.e. insert back into the space map)
2127  * everything we've allocated.  This ensures that we don't get ENOSPC
2128  * errors during repeated suspend/resume cycles due to a flaky device.
2129  *
2130  * Gang rewrites only happen during sync-to-convergence.  If we can't assemble
2131  * the gang tree, we won't modify the block, so we can safely defer the free
2132  * (knowing that the block is still intact).  If we *can* assemble the gang
2133  * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
2134  * each constituent bp and we can allocate a new block on the next sync pass.
2135  *
2136  * In all cases, the gang tree allows complete recovery from partial failure.
2137  * ==========================================================================
2138  */
2139 
2140 static void
zio_gang_issue_func_done(zio_t * zio)2141 zio_gang_issue_func_done(zio_t *zio)
2142 {
2143 	abd_put(zio->io_abd);
2144 }
2145 
2146 static zio_t *
zio_read_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2147 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2148     uint64_t offset)
2149 {
2150 	if (gn != NULL)
2151 		return (pio);
2152 
2153 	return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
2154 	    BP_GET_PSIZE(bp), zio_gang_issue_func_done,
2155 	    NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2156 	    &pio->io_bookmark));
2157 }
2158 
2159 static zio_t *
zio_rewrite_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2160 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2161     uint64_t offset)
2162 {
2163 	zio_t *zio;
2164 
2165 	if (gn != NULL) {
2166 		abd_t *gbh_abd =
2167 		    abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2168 		zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2169 		    gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
2170 		    pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2171 		    &pio->io_bookmark);
2172 		/*
2173 		 * As we rewrite each gang header, the pipeline will compute
2174 		 * a new gang block header checksum for it; but no one will
2175 		 * compute a new data checksum, so we do that here.  The one
2176 		 * exception is the gang leader: the pipeline already computed
2177 		 * its data checksum because that stage precedes gang assembly.
2178 		 * (Presently, nothing actually uses interior data checksums;
2179 		 * this is just good hygiene.)
2180 		 */
2181 		if (gn != pio->io_gang_leader->io_gang_tree) {
2182 			abd_t *buf = abd_get_offset(data, offset);
2183 
2184 			zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
2185 			    buf, BP_GET_PSIZE(bp));
2186 
2187 			abd_put(buf);
2188 		}
2189 		/*
2190 		 * If we are here to damage data for testing purposes,
2191 		 * leave the GBH alone so that we can detect the damage.
2192 		 */
2193 		if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
2194 			zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
2195 	} else {
2196 		zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
2197 		    abd_get_offset(data, offset), BP_GET_PSIZE(bp),
2198 		    zio_gang_issue_func_done, NULL, pio->io_priority,
2199 		    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2200 	}
2201 
2202 	return (zio);
2203 }
2204 
2205 /* ARGSUSED */
2206 static zio_t *
zio_free_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2207 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2208     uint64_t offset)
2209 {
2210 	return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
2211 	    ZIO_GANG_CHILD_FLAGS(pio)));
2212 }
2213 
2214 /* ARGSUSED */
2215 static zio_t *
zio_claim_gang(zio_t * pio,blkptr_t * bp,zio_gang_node_t * gn,abd_t * data,uint64_t offset)2216 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2217     uint64_t offset)
2218 {
2219 	return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
2220 	    NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
2221 }
2222 
2223 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
2224 	NULL,
2225 	zio_read_gang,
2226 	zio_rewrite_gang,
2227 	zio_free_gang,
2228 	zio_claim_gang,
2229 	NULL
2230 };
2231 
2232 static void zio_gang_tree_assemble_done(zio_t *zio);
2233 
2234 static zio_gang_node_t *
zio_gang_node_alloc(zio_gang_node_t ** gnpp)2235 zio_gang_node_alloc(zio_gang_node_t **gnpp)
2236 {
2237 	zio_gang_node_t *gn;
2238 
2239 	ASSERT(*gnpp == NULL);
2240 
2241 	gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
2242 	gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
2243 	*gnpp = gn;
2244 
2245 	return (gn);
2246 }
2247 
2248 static void
zio_gang_node_free(zio_gang_node_t ** gnpp)2249 zio_gang_node_free(zio_gang_node_t **gnpp)
2250 {
2251 	zio_gang_node_t *gn = *gnpp;
2252 
2253 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2254 		ASSERT(gn->gn_child[g] == NULL);
2255 
2256 	zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2257 	kmem_free(gn, sizeof (*gn));
2258 	*gnpp = NULL;
2259 }
2260 
2261 static void
zio_gang_tree_free(zio_gang_node_t ** gnpp)2262 zio_gang_tree_free(zio_gang_node_t **gnpp)
2263 {
2264 	zio_gang_node_t *gn = *gnpp;
2265 
2266 	if (gn == NULL)
2267 		return;
2268 
2269 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
2270 		zio_gang_tree_free(&gn->gn_child[g]);
2271 
2272 	zio_gang_node_free(gnpp);
2273 }
2274 
2275 static void
zio_gang_tree_assemble(zio_t * gio,blkptr_t * bp,zio_gang_node_t ** gnpp)2276 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
2277 {
2278 	zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
2279 	abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2280 
2281 	ASSERT(gio->io_gang_leader == gio);
2282 	ASSERT(BP_IS_GANG(bp));
2283 
2284 	zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
2285 	    zio_gang_tree_assemble_done, gn, gio->io_priority,
2286 	    ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
2287 }
2288 
2289 static void
zio_gang_tree_assemble_done(zio_t * zio)2290 zio_gang_tree_assemble_done(zio_t *zio)
2291 {
2292 	zio_t *gio = zio->io_gang_leader;
2293 	zio_gang_node_t *gn = zio->io_private;
2294 	blkptr_t *bp = zio->io_bp;
2295 
2296 	ASSERT(gio == zio_unique_parent(zio));
2297 	ASSERT(zio->io_child_count == 0);
2298 
2299 	if (zio->io_error)
2300 		return;
2301 
2302 	/* this ABD was created from a linear buf in zio_gang_tree_assemble */
2303 	if (BP_SHOULD_BYTESWAP(bp))
2304 		byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
2305 
2306 	ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
2307 	ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
2308 	ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2309 
2310 	abd_put(zio->io_abd);
2311 
2312 	for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2313 		blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2314 		if (!BP_IS_GANG(gbp))
2315 			continue;
2316 		zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
2317 	}
2318 }
2319 
2320 static void
zio_gang_tree_issue(zio_t * pio,zio_gang_node_t * gn,blkptr_t * bp,abd_t * data,uint64_t offset)2321 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
2322     uint64_t offset)
2323 {
2324 	zio_t *gio = pio->io_gang_leader;
2325 	zio_t *zio;
2326 
2327 	ASSERT(BP_IS_GANG(bp) == !!gn);
2328 	ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
2329 	ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
2330 
2331 	/*
2332 	 * If you're a gang header, your data is in gn->gn_gbh.
2333 	 * If you're a gang member, your data is in 'data' and gn == NULL.
2334 	 */
2335 	zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
2336 
2337 	if (gn != NULL) {
2338 		ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
2339 
2340 		for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
2341 			blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2342 			if (BP_IS_HOLE(gbp))
2343 				continue;
2344 			zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
2345 			    offset);
2346 			offset += BP_GET_PSIZE(gbp);
2347 		}
2348 	}
2349 
2350 	if (gn == gio->io_gang_tree)
2351 		ASSERT3U(gio->io_size, ==, offset);
2352 
2353 	if (zio != pio)
2354 		zio_nowait(zio);
2355 }
2356 
2357 static int
zio_gang_assemble(zio_t * zio)2358 zio_gang_assemble(zio_t *zio)
2359 {
2360 	blkptr_t *bp = zio->io_bp;
2361 
2362 	ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
2363 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2364 
2365 	zio->io_gang_leader = zio;
2366 
2367 	zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
2368 
2369 	return (ZIO_PIPELINE_CONTINUE);
2370 }
2371 
2372 static int
zio_gang_issue(zio_t * zio)2373 zio_gang_issue(zio_t *zio)
2374 {
2375 	blkptr_t *bp = zio->io_bp;
2376 
2377 	if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
2378 		return (ZIO_PIPELINE_STOP);
2379 	}
2380 
2381 	ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
2382 	ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2383 
2384 	if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
2385 		zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
2386 		    0);
2387 	else
2388 		zio_gang_tree_free(&zio->io_gang_tree);
2389 
2390 	zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2391 
2392 	return (ZIO_PIPELINE_CONTINUE);
2393 }
2394 
2395 static void
zio_write_gang_member_ready(zio_t * zio)2396 zio_write_gang_member_ready(zio_t *zio)
2397 {
2398 	zio_t *pio = zio_unique_parent(zio);
2399 	zio_t *gio = zio->io_gang_leader;
2400 	dva_t *cdva = zio->io_bp->blk_dva;
2401 	dva_t *pdva = pio->io_bp->blk_dva;
2402 	uint64_t asize;
2403 
2404 	if (BP_IS_HOLE(zio->io_bp))
2405 		return;
2406 
2407 	ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
2408 
2409 	ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
2410 	ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
2411 	ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
2412 	ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
2413 	ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
2414 
2415 	mutex_enter(&pio->io_lock);
2416 	for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
2417 		ASSERT(DVA_GET_GANG(&pdva[d]));
2418 		asize = DVA_GET_ASIZE(&pdva[d]);
2419 		asize += DVA_GET_ASIZE(&cdva[d]);
2420 		DVA_SET_ASIZE(&pdva[d], asize);
2421 	}
2422 	mutex_exit(&pio->io_lock);
2423 }
2424 
2425 static void
zio_write_gang_done(zio_t * zio)2426 zio_write_gang_done(zio_t *zio)
2427 {
2428 	/*
2429 	 * The io_abd field will be NULL for a zio with no data.  The io_flags
2430 	 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
2431 	 * check for it here as it is cleared in zio_ready.
2432 	 */
2433 	if (zio->io_abd != NULL)
2434 		abd_put(zio->io_abd);
2435 }
2436 
2437 static int
zio_write_gang_block(zio_t * pio)2438 zio_write_gang_block(zio_t *pio)
2439 {
2440 	spa_t *spa = pio->io_spa;
2441 	metaslab_class_t *mc = spa_normal_class(spa);
2442 	blkptr_t *bp = pio->io_bp;
2443 	zio_t *gio = pio->io_gang_leader;
2444 	zio_t *zio;
2445 	zio_gang_node_t *gn, **gnpp;
2446 	zio_gbh_phys_t *gbh;
2447 	abd_t *gbh_abd;
2448 	uint64_t txg = pio->io_txg;
2449 	uint64_t resid = pio->io_size;
2450 	uint64_t lsize;
2451 	int copies = gio->io_prop.zp_copies;
2452 	int gbh_copies = MIN(copies + 1, spa_max_replication(spa));
2453 	zio_prop_t zp;
2454 	int error;
2455 	boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
2456 
2457 	/*
2458 	 * encrypted blocks need DVA[2] free so encrypted gang headers can't
2459 	 * have a third copy.
2460 	 */
2461 	if (gio->io_prop.zp_encrypt && gbh_copies >= SPA_DVAS_PER_BP)
2462 		gbh_copies = SPA_DVAS_PER_BP - 1;
2463 
2464 	int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
2465 	if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
2466 		ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
2467 		ASSERT(has_data);
2468 
2469 		flags |= METASLAB_ASYNC_ALLOC;
2470 		VERIFY(zfs_refcount_held(&mc->mc_alloc_slots[pio->io_allocator],
2471 		    pio));
2472 
2473 		/*
2474 		 * The logical zio has already placed a reservation for
2475 		 * 'copies' allocation slots but gang blocks may require
2476 		 * additional copies. These additional copies
2477 		 * (i.e. gbh_copies - copies) are guaranteed to succeed
2478 		 * since metaslab_class_throttle_reserve() always allows
2479 		 * additional reservations for gang blocks.
2480 		 */
2481 		VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
2482 		    pio->io_allocator, pio, flags));
2483 	}
2484 
2485 	error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
2486 	    bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
2487 	    &pio->io_alloc_list, pio, pio->io_allocator);
2488 	if (error) {
2489 		if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
2490 			ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
2491 			ASSERT(has_data);
2492 
2493 			/*
2494 			 * If we failed to allocate the gang block header then
2495 			 * we remove any additional allocation reservations that
2496 			 * we placed here. The original reservation will
2497 			 * be removed when the logical I/O goes to the ready
2498 			 * stage.
2499 			 */
2500 			metaslab_class_throttle_unreserve(mc,
2501 			    gbh_copies - copies, pio->io_allocator, pio);
2502 		}
2503 		pio->io_error = error;
2504 		return (ZIO_PIPELINE_CONTINUE);
2505 	}
2506 
2507 	if (pio == gio) {
2508 		gnpp = &gio->io_gang_tree;
2509 	} else {
2510 		gnpp = pio->io_private;
2511 		ASSERT(pio->io_ready == zio_write_gang_member_ready);
2512 	}
2513 
2514 	gn = zio_gang_node_alloc(gnpp);
2515 	gbh = gn->gn_gbh;
2516 	bzero(gbh, SPA_GANGBLOCKSIZE);
2517 	gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
2518 
2519 	/*
2520 	 * Create the gang header.
2521 	 */
2522 	zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
2523 	    zio_write_gang_done, NULL, pio->io_priority,
2524 	    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2525 
2526 	/*
2527 	 * Create and nowait the gang children.
2528 	 */
2529 	for (int g = 0; resid != 0; resid -= lsize, g++) {
2530 		lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
2531 		    SPA_MINBLOCKSIZE);
2532 		ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
2533 
2534 		zp.zp_checksum = gio->io_prop.zp_checksum;
2535 		zp.zp_compress = ZIO_COMPRESS_OFF;
2536 		zp.zp_type = DMU_OT_NONE;
2537 		zp.zp_level = 0;
2538 		zp.zp_copies = gio->io_prop.zp_copies;
2539 		zp.zp_dedup = B_FALSE;
2540 		zp.zp_dedup_verify = B_FALSE;
2541 		zp.zp_nopwrite = B_FALSE;
2542 		zp.zp_encrypt = gio->io_prop.zp_encrypt;
2543 		zp.zp_byteorder = gio->io_prop.zp_byteorder;
2544 		bzero(zp.zp_salt, ZIO_DATA_SALT_LEN);
2545 		bzero(zp.zp_iv, ZIO_DATA_IV_LEN);
2546 		bzero(zp.zp_mac, ZIO_DATA_MAC_LEN);
2547 
2548 		zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
2549 		    has_data ? abd_get_offset(pio->io_abd, pio->io_size -
2550 		    resid) : NULL, lsize, lsize, &zp,
2551 		    zio_write_gang_member_ready, NULL, NULL,
2552 		    zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
2553 		    ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2554 
2555 		if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
2556 			ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
2557 			ASSERT(has_data);
2558 
2559 			/*
2560 			 * Gang children won't throttle but we should
2561 			 * account for their work, so reserve an allocation
2562 			 * slot for them here.
2563 			 */
2564 			VERIFY(metaslab_class_throttle_reserve(mc,
2565 			    zp.zp_copies, cio->io_allocator, cio, flags));
2566 		}
2567 		zio_nowait(cio);
2568 	}
2569 
2570 	/*
2571 	 * Set pio's pipeline to just wait for zio to finish.
2572 	 */
2573 	pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2574 
2575 	zio_nowait(zio);
2576 
2577 	return (ZIO_PIPELINE_CONTINUE);
2578 }
2579 
2580 /*
2581  * The zio_nop_write stage in the pipeline determines if allocating a
2582  * new bp is necessary.  The nopwrite feature can handle writes in
2583  * either syncing or open context (i.e. zil writes) and as a result is
2584  * mutually exclusive with dedup.
2585  *
2586  * By leveraging a cryptographically secure checksum, such as SHA256, we
2587  * can compare the checksums of the new data and the old to determine if
2588  * allocating a new block is required.  Note that our requirements for
2589  * cryptographic strength are fairly weak: there can't be any accidental
2590  * hash collisions, but we don't need to be secure against intentional
2591  * (malicious) collisions.  To trigger a nopwrite, you have to be able
2592  * to write the file to begin with, and triggering an incorrect (hash
2593  * collision) nopwrite is no worse than simply writing to the file.
2594  * That said, there are no known attacks against the checksum algorithms
2595  * used for nopwrite, assuming that the salt and the checksums
2596  * themselves remain secret.
2597  */
2598 static int
zio_nop_write(zio_t * zio)2599 zio_nop_write(zio_t *zio)
2600 {
2601 	blkptr_t *bp = zio->io_bp;
2602 	blkptr_t *bp_orig = &zio->io_bp_orig;
2603 	zio_prop_t *zp = &zio->io_prop;
2604 
2605 	ASSERT(BP_GET_LEVEL(bp) == 0);
2606 	ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
2607 	ASSERT(zp->zp_nopwrite);
2608 	ASSERT(!zp->zp_dedup);
2609 	ASSERT(zio->io_bp_override == NULL);
2610 	ASSERT(IO_IS_ALLOCATING(zio));
2611 
2612 	/*
2613 	 * Check to see if the original bp and the new bp have matching
2614 	 * characteristics (i.e. same checksum, compression algorithms, etc).
2615 	 * If they don't then just continue with the pipeline which will
2616 	 * allocate a new bp.
2617 	 */
2618 	if (BP_IS_HOLE(bp_orig) ||
2619 	    !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
2620 	    ZCHECKSUM_FLAG_NOPWRITE) ||
2621 	    BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
2622 	    BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
2623 	    BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
2624 	    BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
2625 	    zp->zp_copies != BP_GET_NDVAS(bp_orig))
2626 		return (ZIO_PIPELINE_CONTINUE);
2627 
2628 	/*
2629 	 * If the checksums match then reset the pipeline so that we
2630 	 * avoid allocating a new bp and issuing any I/O.
2631 	 */
2632 	if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
2633 		ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
2634 		    ZCHECKSUM_FLAG_NOPWRITE);
2635 		ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
2636 		ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
2637 		ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
2638 		ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop,
2639 		    sizeof (uint64_t)) == 0);
2640 
2641 		*bp = *bp_orig;
2642 		zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2643 		zio->io_flags |= ZIO_FLAG_NOPWRITE;
2644 	}
2645 
2646 	return (ZIO_PIPELINE_CONTINUE);
2647 }
2648 
2649 /*
2650  * ==========================================================================
2651  * Dedup
2652  * ==========================================================================
2653  */
2654 static void
zio_ddt_child_read_done(zio_t * zio)2655 zio_ddt_child_read_done(zio_t *zio)
2656 {
2657 	blkptr_t *bp = zio->io_bp;
2658 	ddt_entry_t *dde = zio->io_private;
2659 	ddt_phys_t *ddp;
2660 	zio_t *pio = zio_unique_parent(zio);
2661 
2662 	mutex_enter(&pio->io_lock);
2663 	ddp = ddt_phys_select(dde, bp);
2664 	if (zio->io_error == 0)
2665 		ddt_phys_clear(ddp);	/* this ddp doesn't need repair */
2666 
2667 	if (zio->io_error == 0 && dde->dde_repair_abd == NULL)
2668 		dde->dde_repair_abd = zio->io_abd;
2669 	else
2670 		abd_free(zio->io_abd);
2671 	mutex_exit(&pio->io_lock);
2672 }
2673 
2674 static int
zio_ddt_read_start(zio_t * zio)2675 zio_ddt_read_start(zio_t *zio)
2676 {
2677 	blkptr_t *bp = zio->io_bp;
2678 
2679 	ASSERT(BP_GET_DEDUP(bp));
2680 	ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
2681 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2682 
2683 	if (zio->io_child_error[ZIO_CHILD_DDT]) {
2684 		ddt_t *ddt = ddt_select(zio->io_spa, bp);
2685 		ddt_entry_t *dde = ddt_repair_start(ddt, bp);
2686 		ddt_phys_t *ddp = dde->dde_phys;
2687 		ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
2688 		blkptr_t blk;
2689 
2690 		ASSERT(zio->io_vsd == NULL);
2691 		zio->io_vsd = dde;
2692 
2693 		if (ddp_self == NULL)
2694 			return (ZIO_PIPELINE_CONTINUE);
2695 
2696 		for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
2697 			if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
2698 				continue;
2699 			ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
2700 			    &blk);
2701 			zio_nowait(zio_read(zio, zio->io_spa, &blk,
2702 			    abd_alloc_for_io(zio->io_size, B_TRUE),
2703 			    zio->io_size, zio_ddt_child_read_done, dde,
2704 			    zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
2705 			    ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
2706 		}
2707 		return (ZIO_PIPELINE_CONTINUE);
2708 	}
2709 
2710 	zio_nowait(zio_read(zio, zio->io_spa, bp,
2711 	    zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
2712 	    ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
2713 
2714 	return (ZIO_PIPELINE_CONTINUE);
2715 }
2716 
2717 static int
zio_ddt_read_done(zio_t * zio)2718 zio_ddt_read_done(zio_t *zio)
2719 {
2720 	blkptr_t *bp = zio->io_bp;
2721 
2722 	if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
2723 		return (ZIO_PIPELINE_STOP);
2724 	}
2725 
2726 	ASSERT(BP_GET_DEDUP(bp));
2727 	ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
2728 	ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
2729 
2730 	if (zio->io_child_error[ZIO_CHILD_DDT]) {
2731 		ddt_t *ddt = ddt_select(zio->io_spa, bp);
2732 		ddt_entry_t *dde = zio->io_vsd;
2733 		if (ddt == NULL) {
2734 			ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
2735 			return (ZIO_PIPELINE_CONTINUE);
2736 		}
2737 		if (dde == NULL) {
2738 			zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
2739 			zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
2740 			return (ZIO_PIPELINE_STOP);
2741 		}
2742 		if (dde->dde_repair_abd != NULL) {
2743 			abd_copy(zio->io_abd, dde->dde_repair_abd,
2744 			    zio->io_size);
2745 			zio->io_child_error[ZIO_CHILD_DDT] = 0;
2746 		}
2747 		ddt_repair_done(ddt, dde);
2748 		zio->io_vsd = NULL;
2749 	}
2750 
2751 	ASSERT(zio->io_vsd == NULL);
2752 
2753 	return (ZIO_PIPELINE_CONTINUE);
2754 }
2755 
2756 static boolean_t
zio_ddt_collision(zio_t * zio,ddt_t * ddt,ddt_entry_t * dde)2757 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
2758 {
2759 	spa_t *spa = zio->io_spa;
2760 	boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
2761 
2762 	/* We should never get a raw, override zio */
2763 	ASSERT(!(zio->io_bp_override && do_raw));
2764 
2765 	/*
2766 	 * Note: we compare the original data, not the transformed data,
2767 	 * because when zio->io_bp is an override bp, we will not have
2768 	 * pushed the I/O transforms.  That's an important optimization
2769 	 * because otherwise we'd compress/encrypt all dmu_sync() data twice.
2770 	 * However, we should never get a raw, override zio so in these
2771 	 * cases we can compare the io_data directly. This is useful because
2772 	 * it allows us to do dedup verification even if we don't have access
2773 	 * to the original data (for instance, if the encryption keys aren't
2774 	 * loaded).
2775 	 */
2776 
2777 	for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
2778 		zio_t *lio = dde->dde_lead_zio[p];
2779 
2780 		if (lio != NULL && do_raw) {
2781 			return (lio->io_size != zio->io_size ||
2782 			    abd_cmp(zio->io_abd, lio->io_abd,
2783 			    zio->io_size) != 0);
2784 		} else if (lio != NULL) {
2785 			return (lio->io_orig_size != zio->io_orig_size ||
2786 			    abd_cmp(zio->io_orig_abd, lio->io_orig_abd,
2787 			    zio->io_orig_size) != 0);
2788 		}
2789 	}
2790 
2791 	for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
2792 		ddt_phys_t *ddp = &dde->dde_phys[p];
2793 
2794 		if (ddp->ddp_phys_birth != 0 && do_raw) {
2795 			blkptr_t blk = *zio->io_bp;
2796 			uint64_t psize;
2797 			abd_t *tmpabd;
2798 			int error;
2799 
2800 			ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
2801 			psize = BP_GET_PSIZE(&blk);
2802 
2803 			if (psize != zio->io_size)
2804 				return (B_TRUE);
2805 
2806 			ddt_exit(ddt);
2807 
2808 			tmpabd = abd_alloc_for_io(psize, B_TRUE);
2809 
2810 			error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
2811 			    psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
2812 			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2813 			    ZIO_FLAG_RAW, &zio->io_bookmark));
2814 
2815 			if (error == 0) {
2816 				if (abd_cmp(tmpabd, zio->io_abd, psize) != 0)
2817 					error = SET_ERROR(ENOENT);
2818 			}
2819 
2820 			abd_free(tmpabd);
2821 			ddt_enter(ddt);
2822 			return (error != 0);
2823 		} else if (ddp->ddp_phys_birth != 0) {
2824 			arc_buf_t *abuf = NULL;
2825 			arc_flags_t aflags = ARC_FLAG_WAIT;
2826 			int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
2827 			blkptr_t blk = *zio->io_bp;
2828 			int error;
2829 
2830 			ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
2831 
2832 			if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
2833 				return (B_TRUE);
2834 
2835 			ddt_exit(ddt);
2836 
2837 			/*
2838 			 * Intuitively, it would make more sense to compare
2839 			 * io_abd than io_orig_abd in the raw case since you
2840 			 * don't want to look at any transformations that have
2841 			 * happened to the data. However, for raw I/Os the
2842 			 * data will actually be the same in io_abd and
2843 			 * io_orig_abd, so all we have to do is issue this as
2844 			 * a raw ARC read.
2845 			 */
2846 			if (do_raw) {
2847 				zio_flags |= ZIO_FLAG_RAW;
2848 				ASSERT3U(zio->io_size, ==, zio->io_orig_size);
2849 				ASSERT0(abd_cmp(zio->io_abd, zio->io_orig_abd,
2850 				    zio->io_size));
2851 				ASSERT3P(zio->io_transform_stack, ==, NULL);
2852 			}
2853 
2854 			error = arc_read(NULL, spa, &blk,
2855 			    arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
2856 			    zio_flags, &aflags, &zio->io_bookmark);
2857 
2858 			if (error == 0) {
2859 				if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
2860 				    zio->io_orig_size) != 0)
2861 					error = SET_ERROR(ENOENT);
2862 				arc_buf_destroy(abuf, &abuf);
2863 			}
2864 
2865 			ddt_enter(ddt);
2866 			return (error != 0);
2867 		}
2868 	}
2869 
2870 	return (B_FALSE);
2871 }
2872 
2873 static void
zio_ddt_child_write_ready(zio_t * zio)2874 zio_ddt_child_write_ready(zio_t *zio)
2875 {
2876 	int