1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 223f9d6ad7SLin Ling * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23f78cdc34SPaul Dagnelie * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 245aeb9474SGarrett D'Amore * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 26663207adSDon Brady * Copyright (c) 2017, Intel Corporation. 27*8548ec78SJohn Levon * Copyright 2020 Joyent, Inc. 28fa9e4066Sahrens */ 29fa9e4066Sahrens 30de710d24SJosef 'Jeff' Sipek #include <sys/sysmacros.h> 31fa9e4066Sahrens #include <sys/zfs_context.h> 32ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h> 33fa9e4066Sahrens #include <sys/spa.h> 34fa9e4066Sahrens #include <sys/txg.h> 35fa9e4066Sahrens #include <sys/spa_impl.h> 36fa9e4066Sahrens #include <sys/vdev_impl.h> 37084fd14fSBrian Behlendorf #include <sys/vdev_trim.h> 38fa9e4066Sahrens #include <sys/zio_impl.h> 39fa9e4066Sahrens #include <sys/zio_compress.h> 40fa9e4066Sahrens #include <sys/zio_checksum.h> 41b24ab676SJeff Bonwick #include <sys/dmu_objset.h> 42b24ab676SJeff Bonwick #include <sys/arc.h> 43b24ab676SJeff Bonwick #include <sys/ddt.h> 445d7b4d43SMatthew Ahrens #include <sys/blkptr.h> 4543466aaeSMax Grossman #include <sys/zfeature.h> 46dd50e0ccSTony Hutter #include <sys/time.h> 47a3874b8bSToomas Soome #include <sys/dsl_scan.h> 480f7643c7SGeorge Wilson #include <sys/metaslab_impl.h> 49770499e1SDan Kimmel #include <sys/abd.h> 50f78cdc34SPaul Dagnelie #include <sys/cityhash.h> 51eb633035STom Caputi #include <sys/dsl_crypt.h> 52fa9e4066Sahrens 53fa9e4066Sahrens /* 54fa9e4066Sahrens * ========================================================================== 55fa9e4066Sahrens * I/O type descriptions 56fa9e4066Sahrens * ========================================================================== 57fa9e4066Sahrens */ 5869962b56SMatthew Ahrens const char *zio_type_name[ZIO_TYPES] = { 5980eb36f2SGeorge Wilson "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 60084fd14fSBrian Behlendorf "zio_ioctl", "z_trim" 6180eb36f2SGeorge Wilson }; 62fa9e4066Sahrens 630f7643c7SGeorge Wilson boolean_t zio_dva_throttle_enabled = B_TRUE; 640f7643c7SGeorge Wilson 65fa9e4066Sahrens /* 66fa9e4066Sahrens * ========================================================================== 67fa9e4066Sahrens * I/O kmem caches 68fa9e4066Sahrens * ========================================================================== 69fa9e4066Sahrens */ 70ccae0b50Seschrock kmem_cache_t *zio_cache; 71a3f829aeSBill Moore kmem_cache_t *zio_link_cache; 72fa9e4066Sahrens kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 73ad23a2dbSjohansen kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 74ad23a2dbSjohansen 75ad23a2dbSjohansen #ifdef _KERNEL 76ad23a2dbSjohansen extern vmem_t *zio_alloc_arena; 77ad23a2dbSjohansen #endif 78fa9e4066Sahrens 79738f37bcSGeorge Wilson #define ZIO_PIPELINE_CONTINUE 0x100 80738f37bcSGeorge Wilson #define ZIO_PIPELINE_STOP 0x101 81738f37bcSGeorge Wilson 82dd50e0ccSTony Hutter /* Mark IOs as "slow" if they take longer than 30 seconds */ 83dd50e0ccSTony Hutter int zio_slow_io_ms = (30 * MILLISEC); 84dd50e0ccSTony Hutter 85a2cdcdd2SPaul Dagnelie #define BP_SPANB(indblkshift, level) \ 86a2cdcdd2SPaul Dagnelie (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) 87a2cdcdd2SPaul Dagnelie #define COMPARE_META_LEVEL 0x80000000ul 8801f55e48SGeorge Wilson /* 8901f55e48SGeorge Wilson * The following actions directly effect the spa's sync-to-convergence logic. 9001f55e48SGeorge Wilson * The values below define the sync pass when we start performing the action. 9101f55e48SGeorge Wilson * Care should be taken when changing these values as they directly impact 9201f55e48SGeorge Wilson * spa_sync() performance. Tuning these values may introduce subtle performance 9301f55e48SGeorge Wilson * pathologies and should only be done in the context of performance analysis. 9401f55e48SGeorge Wilson * These tunables will eventually be removed and replaced with #defines once 9501f55e48SGeorge Wilson * enough analysis has been done to determine optimal values. 9601f55e48SGeorge Wilson * 9701f55e48SGeorge Wilson * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 9801f55e48SGeorge Wilson * regular blocks are not deferred. 9901f55e48SGeorge Wilson */ 10001f55e48SGeorge Wilson int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 10101f55e48SGeorge Wilson int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 10201f55e48SGeorge Wilson int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 10301f55e48SGeorge Wilson 1040a4e9518Sgw /* 105e14bb325SJeff Bonwick * An allocating zio is one that either currently has the DVA allocate 106e14bb325SJeff Bonwick * stage set or will have it later in its lifetime. 1070a4e9518Sgw */ 108b24ab676SJeff Bonwick #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 109b24ab676SJeff Bonwick 11035a5a358SJonathan Adams boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 11135a5a358SJonathan Adams 112b24ab676SJeff Bonwick #ifdef ZFS_DEBUG 113b24ab676SJeff Bonwick int zio_buf_debug_limit = 16384; 114b24ab676SJeff Bonwick #else 115b24ab676SJeff Bonwick int zio_buf_debug_limit = 0; 116b24ab676SJeff Bonwick #endif 1170a4e9518Sgw 1180f7643c7SGeorge Wilson static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t); 1190f7643c7SGeorge Wilson 120fa9e4066Sahrens void 121fa9e4066Sahrens zio_init(void) 122fa9e4066Sahrens { 123fa9e4066Sahrens size_t c; 124ad23a2dbSjohansen vmem_t *data_alloc_arena = NULL; 125ad23a2dbSjohansen 126ad23a2dbSjohansen #ifdef _KERNEL 127ad23a2dbSjohansen data_alloc_arena = zio_alloc_arena; 128ad23a2dbSjohansen #endif 129a3f829aeSBill Moore zio_cache = kmem_cache_create("zio_cache", 130a3f829aeSBill Moore sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 131a3f829aeSBill Moore zio_link_cache = kmem_cache_create("zio_link_cache", 132a3f829aeSBill Moore sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 133ccae0b50Seschrock 134fa9e4066Sahrens /* 135fa9e4066Sahrens * For small buffers, we want a cache for each multiple of 136b5152584SMatthew Ahrens * SPA_MINBLOCKSIZE. For larger buffers, we want a cache 137b5152584SMatthew Ahrens * for each quarter-power of 2. 138fa9e4066Sahrens */ 139fa9e4066Sahrens for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 140fa9e4066Sahrens size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 141fa9e4066Sahrens size_t p2 = size; 142fa9e4066Sahrens size_t align = 0; 143e291592aSJonathan Adams size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 144fa9e4066Sahrens 145de710d24SJosef 'Jeff' Sipek while (!ISP2(p2)) 146fa9e4066Sahrens p2 &= p2 - 1; 147fa9e4066Sahrens 148cd1c8b85SMatthew Ahrens #ifndef _KERNEL 149cd1c8b85SMatthew Ahrens /* 150cd1c8b85SMatthew Ahrens * If we are using watchpoints, put each buffer on its own page, 151cd1c8b85SMatthew Ahrens * to eliminate the performance overhead of trapping to the 152cd1c8b85SMatthew Ahrens * kernel when modifying a non-watched buffer that shares the 153cd1c8b85SMatthew Ahrens * page with a watched buffer. 154cd1c8b85SMatthew Ahrens */ 155cd1c8b85SMatthew Ahrens if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 156cd1c8b85SMatthew Ahrens continue; 157cd1c8b85SMatthew Ahrens #endif 158fa9e4066Sahrens if (size <= 4 * SPA_MINBLOCKSIZE) { 159fa9e4066Sahrens align = SPA_MINBLOCKSIZE; 160cd1c8b85SMatthew Ahrens } else if (IS_P2ALIGNED(size, p2 >> 2)) { 161b5152584SMatthew Ahrens align = MIN(p2 >> 2, PAGESIZE); 162fa9e4066Sahrens } 163fa9e4066Sahrens 164fa9e4066Sahrens if (align != 0) { 165ad23a2dbSjohansen char name[36]; 1665ad82045Snd (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 167fa9e4066Sahrens zio_buf_cache[c] = kmem_cache_create(name, size, 168e291592aSJonathan Adams align, NULL, NULL, NULL, NULL, NULL, cflags); 169ad23a2dbSjohansen 170e291592aSJonathan Adams /* 171e291592aSJonathan Adams * Since zio_data bufs do not appear in crash dumps, we 172e291592aSJonathan Adams * pass KMC_NOTOUCH so that no allocator metadata is 173e291592aSJonathan Adams * stored with the buffers. 174e291592aSJonathan Adams */ 175ad23a2dbSjohansen (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 176ad23a2dbSjohansen zio_data_buf_cache[c] = kmem_cache_create(name, size, 177ad23a2dbSjohansen align, NULL, NULL, NULL, NULL, data_alloc_arena, 178e291592aSJonathan Adams cflags | KMC_NOTOUCH); 179fa9e4066Sahrens } 180fa9e4066Sahrens } 181fa9e4066Sahrens 182fa9e4066Sahrens while (--c != 0) { 183fa9e4066Sahrens ASSERT(zio_buf_cache[c] != NULL); 184fa9e4066Sahrens if (zio_buf_cache[c - 1] == NULL) 185fa9e4066Sahrens zio_buf_cache[c - 1] = zio_buf_cache[c]; 186ad23a2dbSjohansen 187ad23a2dbSjohansen ASSERT(zio_data_buf_cache[c] != NULL); 188ad23a2dbSjohansen if (zio_data_buf_cache[c - 1] == NULL) 189ad23a2dbSjohansen zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 190fa9e4066Sahrens } 191ea8dc4b6Seschrock 192ea8dc4b6Seschrock zio_inject_init(); 193fa9e4066Sahrens } 194fa9e4066Sahrens 195fa9e4066Sahrens void 196fa9e4066Sahrens zio_fini(void) 197fa9e4066Sahrens { 198fa9e4066Sahrens size_t c; 199fa9e4066Sahrens kmem_cache_t *last_cache = NULL; 200ad23a2dbSjohansen kmem_cache_t *last_data_cache = NULL; 201fa9e4066Sahrens 202fa9e4066Sahrens for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 203fa9e4066Sahrens if (zio_buf_cache[c] != last_cache) { 204fa9e4066Sahrens last_cache = zio_buf_cache[c]; 205fa9e4066Sahrens kmem_cache_destroy(zio_buf_cache[c]); 206fa9e4066Sahrens } 207fa9e4066Sahrens zio_buf_cache[c] = NULL; 208ad23a2dbSjohansen 209ad23a2dbSjohansen if (zio_data_buf_cache[c] != last_data_cache) { 210ad23a2dbSjohansen last_data_cache = zio_data_buf_cache[c]; 211ad23a2dbSjohansen kmem_cache_destroy(zio_data_buf_cache[c]); 212ad23a2dbSjohansen } 213ad23a2dbSjohansen zio_data_buf_cache[c] = NULL; 214fa9e4066Sahrens } 215ea8dc4b6Seschrock 216a3f829aeSBill Moore kmem_cache_destroy(zio_link_cache); 217ccae0b50Seschrock kmem_cache_destroy(zio_cache); 218ccae0b50Seschrock 219ea8dc4b6Seschrock zio_inject_fini(); 220fa9e4066Sahrens } 221fa9e4066Sahrens 222fa9e4066Sahrens /* 223fa9e4066Sahrens * ========================================================================== 224fa9e4066Sahrens * Allocate and free I/O buffers 225fa9e4066Sahrens * ========================================================================== 226fa9e4066Sahrens */ 227ad23a2dbSjohansen 228ad23a2dbSjohansen /* 229ad23a2dbSjohansen * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 230ad23a2dbSjohansen * crashdump if the kernel panics, so use it judiciously. Obviously, it's 231ad23a2dbSjohansen * useful to inspect ZFS metadata, but if possible, we should avoid keeping 232ad23a2dbSjohansen * excess / transient data in-core during a crashdump. 233ad23a2dbSjohansen */ 234fa9e4066Sahrens void * 235fa9e4066Sahrens zio_buf_alloc(size_t size) 236fa9e4066Sahrens { 237fa9e4066Sahrens size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 238fa9e4066Sahrens 239f63ab3d5SMatthew Ahrens VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 240fa9e4066Sahrens 2411ab7f2deSmaybee return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 242fa9e4066Sahrens } 243fa9e4066Sahrens 244ad23a2dbSjohansen /* 245ad23a2dbSjohansen * Use zio_data_buf_alloc to allocate data. The data will not appear in a 246ad23a2dbSjohansen * crashdump if the kernel panics. This exists so that we will limit the amount 247ad23a2dbSjohansen * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 248ad23a2dbSjohansen * of kernel heap dumped to disk when the kernel panics) 249ad23a2dbSjohansen */ 250ad23a2dbSjohansen void * 251ad23a2dbSjohansen zio_data_buf_alloc(size_t size) 252ad23a2dbSjohansen { 253ad23a2dbSjohansen size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 254ad23a2dbSjohansen 255f63ab3d5SMatthew Ahrens VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 256ad23a2dbSjohansen 2571ab7f2deSmaybee return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 258ad23a2dbSjohansen } 259ad23a2dbSjohansen 260fa9e4066Sahrens void 261fa9e4066Sahrens zio_buf_free(void *buf, size_t size) 262fa9e4066Sahrens { 263fa9e4066Sahrens size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 264fa9e4066Sahrens 265f63ab3d5SMatthew Ahrens VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 266fa9e4066Sahrens 267fa9e4066Sahrens kmem_cache_free(zio_buf_cache[c], buf); 268fa9e4066Sahrens } 269fa9e4066Sahrens 270ad23a2dbSjohansen void 271ad23a2dbSjohansen zio_data_buf_free(void *buf, size_t size) 272ad23a2dbSjohansen { 273ad23a2dbSjohansen size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 274ad23a2dbSjohansen 275f63ab3d5SMatthew Ahrens VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 276ad23a2dbSjohansen 277ad23a2dbSjohansen kmem_cache_free(zio_data_buf_cache[c], buf); 278ad23a2dbSjohansen } 279b3995adbSahrens 280eb633035STom Caputi /* ARGSUSED */ 281eb633035STom Caputi static void 282eb633035STom Caputi zio_abd_free(void *abd, size_t size) 283eb633035STom Caputi { 284eb633035STom Caputi abd_free((abd_t *)abd); 285eb633035STom Caputi } 286eb633035STom Caputi 287fa9e4066Sahrens /* 288fa9e4066Sahrens * ========================================================================== 289fa9e4066Sahrens * Push and pop I/O transform buffers 290fa9e4066Sahrens * ========================================================================== 291fa9e4066Sahrens */ 292dcbf3bd6SGeorge Wilson void 293770499e1SDan Kimmel zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize, 2949a686fbcSPaul Dagnelie zio_transform_func_t *transform) 295fa9e4066Sahrens { 296fa9e4066Sahrens zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 297fa9e4066Sahrens 298770499e1SDan Kimmel /* 299770499e1SDan Kimmel * Ensure that anyone expecting this zio to contain a linear ABD isn't 300770499e1SDan Kimmel * going to get a nasty surprise when they try to access the data. 301770499e1SDan Kimmel */ 302770499e1SDan Kimmel IMPLY(abd_is_linear(zio->io_abd), abd_is_linear(data)); 303770499e1SDan Kimmel 304770499e1SDan Kimmel zt->zt_orig_abd = zio->io_abd; 305e14bb325SJeff Bonwick zt->zt_orig_size = zio->io_size; 306fa9e4066Sahrens zt->zt_bufsize = bufsize; 307e14bb325SJeff Bonwick zt->zt_transform = transform; 308fa9e4066Sahrens 309fa9e4066Sahrens zt->zt_next = zio->io_transform_stack; 310fa9e4066Sahrens zio->io_transform_stack = zt; 311fa9e4066Sahrens 312770499e1SDan Kimmel zio->io_abd = data; 313fa9e4066Sahrens zio->io_size = size; 314fa9e4066Sahrens } 315fa9e4066Sahrens 316dcbf3bd6SGeorge Wilson void 317e14bb325SJeff Bonwick zio_pop_transforms(zio_t *zio) 318fa9e4066Sahrens { 319e14bb325SJeff Bonwick zio_transform_t *zt; 320e14bb325SJeff Bonwick 321e14bb325SJeff Bonwick while ((zt = zio->io_transform_stack) != NULL) { 322e14bb325SJeff Bonwick if (zt->zt_transform != NULL) 323e14bb325SJeff Bonwick zt->zt_transform(zio, 324770499e1SDan Kimmel zt->zt_orig_abd, zt->zt_orig_size); 325fa9e4066Sahrens 326b24ab676SJeff Bonwick if (zt->zt_bufsize != 0) 327770499e1SDan Kimmel abd_free(zio->io_abd); 328fa9e4066Sahrens 329770499e1SDan Kimmel zio->io_abd = zt->zt_orig_abd; 330e14bb325SJeff Bonwick zio->io_size = zt->zt_orig_size; 331e14bb325SJeff Bonwick zio->io_transform_stack = zt->zt_next; 332fa9e4066Sahrens 333e14bb325SJeff Bonwick kmem_free(zt, sizeof (zio_transform_t)); 334fa9e4066Sahrens } 335fa9e4066Sahrens } 336fa9e4066Sahrens 337e14bb325SJeff Bonwick /* 338e14bb325SJeff Bonwick * ========================================================================== 339eb633035STom Caputi * I/O transform callbacks for subblocks, decompression, and decryption 340e14bb325SJeff Bonwick * ========================================================================== 341e14bb325SJeff Bonwick */ 342e14bb325SJeff Bonwick static void 343770499e1SDan Kimmel zio_subblock(zio_t *zio, abd_t *data, uint64_t size) 344e14bb325SJeff Bonwick { 345e14bb325SJeff Bonwick ASSERT(zio->io_size > size); 346e14bb325SJeff Bonwick 347e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_READ) 348770499e1SDan Kimmel abd_copy(data, zio->io_abd, size); 349e14bb325SJeff Bonwick } 350e14bb325SJeff Bonwick 351e14bb325SJeff Bonwick static void 352770499e1SDan Kimmel zio_decompress(zio_t *zio, abd_t *data, uint64_t size) 353e14bb325SJeff Bonwick { 354770499e1SDan Kimmel if (zio->io_error == 0) { 355770499e1SDan Kimmel void *tmp = abd_borrow_buf(data, size); 356770499e1SDan Kimmel int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 357770499e1SDan Kimmel zio->io_abd, tmp, zio->io_size, size); 358770499e1SDan Kimmel abd_return_buf_copy(data, tmp, size); 359770499e1SDan Kimmel 360770499e1SDan Kimmel if (ret != 0) 361770499e1SDan Kimmel zio->io_error = SET_ERROR(EIO); 362770499e1SDan Kimmel } 363e14bb325SJeff Bonwick } 364e14bb325SJeff Bonwick 365eb633035STom Caputi static void 366eb633035STom Caputi zio_decrypt(zio_t *zio, abd_t *data, uint64_t size) 367eb633035STom Caputi { 368eb633035STom Caputi int ret; 369eb633035STom Caputi void *tmp; 370eb633035STom Caputi blkptr_t *bp = zio->io_bp; 371eb633035STom Caputi spa_t *spa = zio->io_spa; 372eb633035STom Caputi uint64_t dsobj = zio->io_bookmark.zb_objset; 373eb633035STom Caputi uint64_t lsize = BP_GET_LSIZE(bp); 374eb633035STom Caputi dmu_object_type_t ot = BP_GET_TYPE(bp); 375eb633035STom Caputi uint8_t salt[ZIO_DATA_SALT_LEN]; 376eb633035STom Caputi uint8_t iv[ZIO_DATA_IV_LEN]; 377eb633035STom Caputi uint8_t mac[ZIO_DATA_MAC_LEN]; 378eb633035STom Caputi boolean_t no_crypt = B_FALSE; 379eb633035STom Caputi 380eb633035STom Caputi ASSERT(BP_USES_CRYPT(bp)); 381eb633035STom Caputi ASSERT3U(size, !=, 0); 382eb633035STom Caputi 383eb633035STom Caputi if (zio->io_error != 0) 384eb633035STom Caputi return; 385eb633035STom Caputi 386eb633035STom Caputi /* 387eb633035STom Caputi * Verify the cksum of MACs stored in an indirect bp. It will always 388eb633035STom Caputi * be possible to verify this since it does not require an encryption 389eb633035STom Caputi * key. 390eb633035STom Caputi */ 391eb633035STom Caputi if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) { 392eb633035STom Caputi zio_crypt_decode_mac_bp(bp, mac); 393eb633035STom Caputi 394eb633035STom Caputi if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) { 395eb633035STom Caputi /* 396eb633035STom Caputi * We haven't decompressed the data yet, but 397eb633035STom Caputi * zio_crypt_do_indirect_mac_checksum() requires 398eb633035STom Caputi * decompressed data to be able to parse out the MACs 399eb633035STom Caputi * from the indirect block. We decompress it now and 400eb633035STom Caputi * throw away the result after we are finished. 401eb633035STom Caputi */ 402eb633035STom Caputi tmp = zio_buf_alloc(lsize); 403eb633035STom Caputi ret = zio_decompress_data(BP_GET_COMPRESS(bp), 404eb633035STom Caputi zio->io_abd, tmp, zio->io_size, lsize); 405eb633035STom Caputi if (ret != 0) { 406eb633035STom Caputi ret = SET_ERROR(EIO); 407eb633035STom Caputi goto error; 408eb633035STom Caputi } 409eb633035STom Caputi ret = zio_crypt_do_indirect_mac_checksum(B_FALSE, 410eb633035STom Caputi tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac); 411eb633035STom Caputi zio_buf_free(tmp, lsize); 412eb633035STom Caputi } else { 413eb633035STom Caputi ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE, 414eb633035STom Caputi zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac); 415eb633035STom Caputi } 416eb633035STom Caputi abd_copy(data, zio->io_abd, size); 417eb633035STom Caputi 418eb633035STom Caputi if (ret != 0) 419eb633035STom Caputi goto error; 420eb633035STom Caputi 421eb633035STom Caputi return; 422eb633035STom Caputi } 423eb633035STom Caputi 424eb633035STom Caputi /* 425eb633035STom Caputi * If this is an authenticated block, just check the MAC. It would be 426eb633035STom Caputi * nice to separate this out into its own flag, but for the moment 427eb633035STom Caputi * enum zio_flag is out of bits. 428eb633035STom Caputi */ 429eb633035STom Caputi if (BP_IS_AUTHENTICATED(bp)) { 430eb633035STom Caputi if (ot == DMU_OT_OBJSET) { 431eb633035STom Caputi ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, 432eb633035STom Caputi dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp)); 433eb633035STom Caputi } else { 434eb633035STom Caputi zio_crypt_decode_mac_bp(bp, mac); 435eb633035STom Caputi ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, 436eb633035STom Caputi zio->io_abd, size, mac); 437eb633035STom Caputi } 438eb633035STom Caputi abd_copy(data, zio->io_abd, size); 439eb633035STom Caputi 440eb633035STom Caputi if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) { 441eb633035STom Caputi ret = zio_handle_decrypt_injection(spa, 442eb633035STom Caputi &zio->io_bookmark, ot, ECKSUM); 443eb633035STom Caputi } 444eb633035STom Caputi if (ret != 0) 445eb633035STom Caputi goto error; 446eb633035STom Caputi 447eb633035STom Caputi return; 448eb633035STom Caputi } 449eb633035STom Caputi 450eb633035STom Caputi zio_crypt_decode_params_bp(bp, salt, iv); 451eb633035STom Caputi 452eb633035STom Caputi if (ot == DMU_OT_INTENT_LOG) { 453eb633035STom Caputi tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t)); 454eb633035STom Caputi zio_crypt_decode_mac_zil(tmp, mac); 455eb633035STom Caputi abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t)); 456eb633035STom Caputi } else { 457eb633035STom Caputi zio_crypt_decode_mac_bp(bp, mac); 458eb633035STom Caputi } 459eb633035STom Caputi 460eb633035STom Caputi ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp), 461eb633035STom Caputi BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data, 462eb633035STom Caputi zio->io_abd, &no_crypt); 463eb633035STom Caputi if (no_crypt) 464eb633035STom Caputi abd_copy(data, zio->io_abd, size); 465eb633035STom Caputi 466eb633035STom Caputi if (ret != 0) 467eb633035STom Caputi goto error; 468eb633035STom Caputi 469eb633035STom Caputi return; 470eb633035STom Caputi 471eb633035STom Caputi error: 472eb633035STom Caputi /* assert that the key was found unless this was speculative */ 473eb633035STom Caputi ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE)); 474eb633035STom Caputi 475eb633035STom Caputi /* 476eb633035STom Caputi * If there was a decryption / authentication error return EIO as 477eb633035STom Caputi * the io_error. If this was not a speculative zio, create an ereport. 478eb633035STom Caputi */ 479eb633035STom Caputi if (ret == ECKSUM) { 480eb633035STom Caputi zio->io_error = SET_ERROR(EIO); 481eb633035STom Caputi if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { 482eb633035STom Caputi spa_log_error(spa, &zio->io_bookmark); 483eb633035STom Caputi zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION, 484eb633035STom Caputi spa, NULL, &zio->io_bookmark, zio, 0, 0); 485eb633035STom Caputi } 486eb633035STom Caputi } else { 487eb633035STom Caputi zio->io_error = ret; 488eb633035STom Caputi } 489eb633035STom Caputi } 490eb633035STom Caputi 491e14bb325SJeff Bonwick /* 492e14bb325SJeff Bonwick * ========================================================================== 493e14bb325SJeff Bonwick * I/O parent/child relationships and pipeline interlocks 494e14bb325SJeff Bonwick * ========================================================================== 495e14bb325SJeff Bonwick */ 496a3f829aeSBill Moore zio_t * 4970f7643c7SGeorge Wilson zio_walk_parents(zio_t *cio, zio_link_t **zl) 498a3f829aeSBill Moore { 499a3f829aeSBill Moore list_t *pl = &cio->io_parent_list; 500e14bb325SJeff Bonwick 5010f7643c7SGeorge Wilson *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl); 5020f7643c7SGeorge Wilson if (*zl == NULL) 503a3f829aeSBill Moore return (NULL); 504a3f829aeSBill Moore 5050f7643c7SGeorge Wilson ASSERT((*zl)->zl_child == cio); 5060f7643c7SGeorge Wilson return ((*zl)->zl_parent); 507a3f829aeSBill Moore } 508a3f829aeSBill Moore 509a3f829aeSBill Moore zio_t * 5100f7643c7SGeorge Wilson zio_walk_children(zio_t *pio, zio_link_t **zl) 511a3f829aeSBill Moore { 512a3f829aeSBill Moore list_t *cl = &pio->io_child_list; 513a3f829aeSBill Moore 514a3874b8bSToomas Soome ASSERT(MUTEX_HELD(&pio->io_lock)); 515a3874b8bSToomas Soome 5160f7643c7SGeorge Wilson *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl); 5170f7643c7SGeorge Wilson if (*zl == NULL) 518a3f829aeSBill Moore return (NULL); 519a3f829aeSBill Moore 5200f7643c7SGeorge Wilson ASSERT((*zl)->zl_parent == pio); 5210f7643c7SGeorge Wilson return ((*zl)->zl_child); 522a3f829aeSBill Moore } 523a3f829aeSBill Moore 524a3f829aeSBill Moore zio_t * 525a3f829aeSBill Moore zio_unique_parent(zio_t *cio) 526a3f829aeSBill Moore { 5270f7643c7SGeorge Wilson zio_link_t *zl = NULL; 5280f7643c7SGeorge Wilson zio_t *pio = zio_walk_parents(cio, &zl); 529a3f829aeSBill Moore 5300f7643c7SGeorge Wilson VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL); 531a3f829aeSBill Moore return (pio); 532a3f829aeSBill Moore } 533a3f829aeSBill Moore 534a3f829aeSBill Moore void 535a3f829aeSBill Moore zio_add_child(zio_t *pio, zio_t *cio) 536e14bb325SJeff Bonwick { 537a3f829aeSBill Moore zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 538a3f829aeSBill Moore 539a3f829aeSBill Moore /* 540a3f829aeSBill Moore * Logical I/Os can have logical, gang, or vdev children. 541a3f829aeSBill Moore * Gang I/Os can have gang or vdev children. 542a3f829aeSBill Moore * Vdev I/Os can only have vdev children. 543a3f829aeSBill Moore * The following ASSERT captures all of these constraints. 544a3f829aeSBill Moore */ 5451271e4b1SPrakash Surya ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 546a3f829aeSBill Moore 547a3f829aeSBill Moore zl->zl_parent = pio; 548a3f829aeSBill Moore zl->zl_child = cio; 549a3f829aeSBill Moore 550e14bb325SJeff Bonwick mutex_enter(&pio->io_lock); 551a3874b8bSToomas Soome mutex_enter(&cio->io_lock); 552a3f829aeSBill Moore 553a3f829aeSBill Moore ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 554a3f829aeSBill Moore 555a3f829aeSBill Moore for (int w = 0; w < ZIO_WAIT_TYPES; w++) 556a3f829aeSBill Moore pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 557a3f829aeSBill Moore 558a3f829aeSBill Moore list_insert_head(&pio->io_child_list, zl); 559a3f829aeSBill Moore list_insert_head(&cio->io_parent_list, zl); 560a3f829aeSBill Moore 561b24ab676SJeff Bonwick pio->io_child_count++; 562b24ab676SJeff Bonwick cio->io_parent_count++; 563b24ab676SJeff Bonwick 564a3f829aeSBill Moore mutex_exit(&cio->io_lock); 565a3874b8bSToomas Soome mutex_exit(&pio->io_lock); 566e14bb325SJeff Bonwick } 567e14bb325SJeff Bonwick 568fa9e4066Sahrens static void 569a3f829aeSBill Moore zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 570e14bb325SJeff Bonwick { 571a3f829aeSBill Moore ASSERT(zl->zl_parent == pio); 572a3f829aeSBill Moore ASSERT(zl->zl_child == cio); 573e14bb325SJeff Bonwick 574e14bb325SJeff Bonwick mutex_enter(&pio->io_lock); 575a3874b8bSToomas Soome mutex_enter(&cio->io_lock); 576a3f829aeSBill Moore 577a3f829aeSBill Moore list_remove(&pio->io_child_list, zl); 578a3f829aeSBill Moore list_remove(&cio->io_parent_list, zl); 579a3f829aeSBill Moore 580b24ab676SJeff Bonwick pio->io_child_count--; 581b24ab676SJeff Bonwick cio->io_parent_count--; 582b24ab676SJeff Bonwick 583a3f829aeSBill Moore mutex_exit(&cio->io_lock); 584a3874b8bSToomas Soome mutex_exit(&pio->io_lock); 585a3f829aeSBill Moore 586a3f829aeSBill Moore kmem_cache_free(zio_link_cache, zl); 587e14bb325SJeff Bonwick } 588e14bb325SJeff Bonwick 589e14bb325SJeff Bonwick static boolean_t 590d6e1c446SGeorge Wilson zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait) 591fa9e4066Sahrens { 592e14bb325SJeff Bonwick boolean_t waiting = B_FALSE; 593e14bb325SJeff Bonwick 594e14bb325SJeff Bonwick mutex_enter(&zio->io_lock); 595e14bb325SJeff Bonwick ASSERT(zio->io_stall == NULL); 596d6e1c446SGeorge Wilson for (int c = 0; c < ZIO_CHILD_TYPES; c++) { 597d6e1c446SGeorge Wilson if (!(ZIO_CHILD_BIT_IS_SET(childbits, c))) 598d6e1c446SGeorge Wilson continue; 599d6e1c446SGeorge Wilson 600d6e1c446SGeorge Wilson uint64_t *countp = &zio->io_children[c][wait]; 601d6e1c446SGeorge Wilson if (*countp != 0) { 602d6e1c446SGeorge Wilson zio->io_stage >>= 1; 603d6e1c446SGeorge Wilson ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN); 604d6e1c446SGeorge Wilson zio->io_stall = countp; 605d6e1c446SGeorge Wilson waiting = B_TRUE; 606d6e1c446SGeorge Wilson break; 607d6e1c446SGeorge Wilson } 608e14bb325SJeff Bonwick } 609e14bb325SJeff Bonwick mutex_exit(&zio->io_lock); 610e14bb325SJeff Bonwick return (waiting); 611e14bb325SJeff Bonwick } 612fa9e4066Sahrens 613e14bb325SJeff Bonwick static void 614e14bb325SJeff Bonwick zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 615e14bb325SJeff Bonwick { 616e14bb325SJeff Bonwick uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 617e14bb325SJeff Bonwick int *errorp = &pio->io_child_error[zio->io_child_type]; 618fa9e4066Sahrens 619e14bb325SJeff Bonwick mutex_enter(&pio->io_lock); 620e14bb325SJeff Bonwick if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 621e14bb325SJeff Bonwick *errorp = zio_worst_error(*errorp, zio->io_error); 622e14bb325SJeff Bonwick pio->io_reexecute |= zio->io_reexecute; 623e14bb325SJeff Bonwick ASSERT3U(*countp, >, 0); 62469962b56SMatthew Ahrens 62569962b56SMatthew Ahrens (*countp)--; 62669962b56SMatthew Ahrens 62769962b56SMatthew Ahrens if (*countp == 0 && pio->io_stall == countp) { 6280f7643c7SGeorge Wilson zio_taskq_type_t type = 6290f7643c7SGeorge Wilson pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE : 6300f7643c7SGeorge Wilson ZIO_TASKQ_INTERRUPT; 631e14bb325SJeff Bonwick pio->io_stall = NULL; 632e14bb325SJeff Bonwick mutex_exit(&pio->io_lock); 6330f7643c7SGeorge Wilson /* 6340f7643c7SGeorge Wilson * Dispatch the parent zio in its own taskq so that 6350f7643c7SGeorge Wilson * the child can continue to make progress. This also 6360f7643c7SGeorge Wilson * prevents overflowing the stack when we have deeply nested 6370f7643c7SGeorge Wilson * parent-child relationships. 6380f7643c7SGeorge Wilson */ 6390f7643c7SGeorge Wilson zio_taskq_dispatch(pio, type, B_FALSE); 640e14bb325SJeff Bonwick } else { 641e14bb325SJeff Bonwick mutex_exit(&pio->io_lock); 642fa9e4066Sahrens } 643fa9e4066Sahrens } 644fa9e4066Sahrens 645e14bb325SJeff Bonwick static void 646e14bb325SJeff Bonwick zio_inherit_child_errors(zio_t *zio, enum zio_child c) 647e14bb325SJeff Bonwick { 648e14bb325SJeff Bonwick if (zio->io_child_error[c] != 0 && zio->io_error == 0) 649e14bb325SJeff Bonwick zio->io_error = zio->io_child_error[c]; 650e14bb325SJeff Bonwick } 651e14bb325SJeff Bonwick 6520f7643c7SGeorge Wilson int 65394c2d0ebSMatthew Ahrens zio_bookmark_compare(const void *x1, const void *x2) 6540f7643c7SGeorge Wilson { 6550f7643c7SGeorge Wilson const zio_t *z1 = x1; 6560f7643c7SGeorge Wilson const zio_t *z2 = x2; 6570f7643c7SGeorge Wilson 65894c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset) 6590f7643c7SGeorge Wilson return (-1); 66094c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset) 6610f7643c7SGeorge Wilson return (1); 6620f7643c7SGeorge Wilson 66394c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object) 6640f7643c7SGeorge Wilson return (-1); 66594c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object) 66694c2d0ebSMatthew Ahrens return (1); 66794c2d0ebSMatthew Ahrens 66894c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level) 66994c2d0ebSMatthew Ahrens return (-1); 67094c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level) 67194c2d0ebSMatthew Ahrens return (1); 67294c2d0ebSMatthew Ahrens 67394c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid) 67494c2d0ebSMatthew Ahrens return (-1); 67594c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid) 6760f7643c7SGeorge Wilson return (1); 6770f7643c7SGeorge Wilson 6780f7643c7SGeorge Wilson if (z1 < z2) 6790f7643c7SGeorge Wilson return (-1); 6800f7643c7SGeorge Wilson if (z1 > z2) 6810f7643c7SGeorge Wilson return (1); 6820f7643c7SGeorge Wilson 6830f7643c7SGeorge Wilson return (0); 6840f7643c7SGeorge Wilson } 6850f7643c7SGeorge Wilson 686fa9e4066Sahrens /* 687fa9e4066Sahrens * ========================================================================== 688e14bb325SJeff Bonwick * Create the various types of I/O (read, write, free, etc) 689fa9e4066Sahrens * ========================================================================== 690fa9e4066Sahrens */ 691fa9e4066Sahrens static zio_t * 692b24ab676SJeff Bonwick zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 693770499e1SDan Kimmel abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done, 6945602294fSDan Kimmel void *private, zio_type_t type, zio_priority_t priority, 6955602294fSDan Kimmel enum zio_flag flags, vdev_t *vd, uint64_t offset, 6965602294fSDan Kimmel const zbookmark_phys_t *zb, enum zio_stage stage, enum zio_stage pipeline) 697fa9e4066Sahrens { 698fa9e4066Sahrens zio_t *zio; 699fa9e4066Sahrens 700084fd14fSBrian Behlendorf IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE); 7015602294fSDan Kimmel ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0); 702e14bb325SJeff Bonwick ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 703fa9e4066Sahrens 704e14bb325SJeff Bonwick ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 705e14bb325SJeff Bonwick ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 706e14bb325SJeff Bonwick ASSERT(vd || stage == ZIO_STAGE_OPEN); 707088f3894Sahrens 708eb633035STom Caputi IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0); 7095602294fSDan Kimmel 710ccae0b50Seschrock zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 711ccae0b50Seschrock bzero(zio, sizeof (zio_t)); 712e14bb325SJeff Bonwick 713e14bb325SJeff Bonwick mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 714e14bb325SJeff Bonwick cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 715e14bb325SJeff Bonwick 716a3f829aeSBill Moore list_create(&zio->io_parent_list, sizeof (zio_link_t), 717a3f829aeSBill Moore offsetof(zio_link_t, zl_parent_node)); 718a3f829aeSBill Moore list_create(&zio->io_child_list, sizeof (zio_link_t), 719a3f829aeSBill Moore offsetof(zio_link_t, zl_child_node)); 7208363e80aSGeorge Wilson metaslab_trace_init(&zio->io_alloc_list); 721a3f829aeSBill Moore 722e14bb325SJeff Bonwick if (vd != NULL) 723e14bb325SJeff Bonwick zio->io_child_type = ZIO_CHILD_VDEV; 724e14bb325SJeff Bonwick else if (flags & ZIO_FLAG_GANG_CHILD) 725e14bb325SJeff Bonwick zio->io_child_type = ZIO_CHILD_GANG; 726b24ab676SJeff Bonwick else if (flags & ZIO_FLAG_DDT_CHILD) 727b24ab676SJeff Bonwick zio->io_child_type = ZIO_CHILD_DDT; 728e14bb325SJeff Bonwick else 729e14bb325SJeff Bonwick zio->io_child_type = ZIO_CHILD_LOGICAL; 730e14bb325SJeff Bonwick 731fa9e4066Sahrens if (bp != NULL) { 732b24ab676SJeff Bonwick zio->io_bp = (blkptr_t *)bp; 733fa9e4066Sahrens zio->io_bp_copy = *bp; 734fa9e4066Sahrens zio->io_bp_orig = *bp; 735b24ab676SJeff Bonwick if (type != ZIO_TYPE_WRITE || 736b24ab676SJeff Bonwick zio->io_child_type == ZIO_CHILD_DDT) 737e14bb325SJeff Bonwick zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 738f5383399SBill Moore if (zio->io_child_type == ZIO_CHILD_LOGICAL) 739e14bb325SJeff Bonwick zio->io_logical = zio; 740f5383399SBill Moore if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 741f5383399SBill Moore pipeline |= ZIO_GANG_STAGES; 742fa9e4066Sahrens } 743e14bb325SJeff Bonwick 744e14bb325SJeff Bonwick zio->io_spa = spa; 745e14bb325SJeff Bonwick zio->io_txg = txg; 746fa9e4066Sahrens zio->io_done = done; 747fa9e4066Sahrens zio->io_private = private; 748fa9e4066Sahrens zio->io_type = type; 749fa9e4066Sahrens zio->io_priority = priority; 750e14bb325SJeff Bonwick zio->io_vd = vd; 751e14bb325SJeff Bonwick zio->io_offset = offset; 752770499e1SDan Kimmel zio->io_orig_abd = zio->io_abd = data; 7535602294fSDan Kimmel zio->io_orig_size = zio->io_size = psize; 7545602294fSDan Kimmel zio->io_lsize = lsize; 755e14bb325SJeff Bonwick zio->io_orig_flags = zio->io_flags = flags; 756e14bb325SJeff Bonwick zio->io_orig_stage = zio->io_stage = stage; 757e14bb325SJeff Bonwick zio->io_orig_pipeline = zio->io_pipeline = pipeline; 7580f7643c7SGeorge Wilson zio->io_pipeline_trace = ZIO_STAGE_OPEN; 759fa9e4066Sahrens 760a3f829aeSBill Moore zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 761a3f829aeSBill Moore zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 762a3f829aeSBill Moore 763e14bb325SJeff Bonwick if (zb != NULL) 764e14bb325SJeff Bonwick zio->io_bookmark = *zb; 765e14bb325SJeff Bonwick 766e14bb325SJeff Bonwick if (pio != NULL) { 767663207adSDon Brady if (zio->io_metaslab_class == NULL) 768663207adSDon Brady zio->io_metaslab_class = pio->io_metaslab_class; 769e14bb325SJeff Bonwick if (zio->io_logical == NULL) 770ea8dc4b6Seschrock zio->io_logical = pio->io_logical; 771f5383399SBill Moore if (zio->io_child_type == ZIO_CHILD_GANG) 772f5383399SBill Moore zio->io_gang_leader = pio->io_gang_leader; 773e14bb325SJeff Bonwick zio_add_child(pio, zio); 774fa9e4066Sahrens } 775fa9e4066Sahrens 776fa9e4066Sahrens return (zio); 777fa9e4066Sahrens } 778fa9e4066Sahrens 7790a4e9518Sgw static void 780e14bb325SJeff Bonwick zio_destroy(zio_t *zio) 7810a4e9518Sgw { 7828363e80aSGeorge Wilson metaslab_trace_fini(&zio->io_alloc_list); 783a3f829aeSBill Moore list_destroy(&zio->io_parent_list); 784a3f829aeSBill Moore list_destroy(&zio->io_child_list); 785e14bb325SJeff Bonwick mutex_destroy(&zio->io_lock); 786e14bb325SJeff Bonwick cv_destroy(&zio->io_cv); 787e14bb325SJeff Bonwick kmem_cache_free(zio_cache, zio); 7880a4e9518Sgw } 7890a4e9518Sgw 790fa9e4066Sahrens zio_t * 791a3f829aeSBill Moore zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 792b24ab676SJeff Bonwick void *private, enum zio_flag flags) 793fa9e4066Sahrens { 794fa9e4066Sahrens zio_t *zio; 795fa9e4066Sahrens 7965602294fSDan Kimmel zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 797a3f829aeSBill Moore ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 798e14bb325SJeff Bonwick ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 799fa9e4066Sahrens 800fa9e4066Sahrens return (zio); 801fa9e4066Sahrens } 802fa9e4066Sahrens 803fa9e4066Sahrens zio_t * 804b24ab676SJeff Bonwick zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 805fa9e4066Sahrens { 806a3f829aeSBill Moore return (zio_null(NULL, spa, NULL, done, private, flags)); 807fa9e4066Sahrens } 808fa9e4066Sahrens 809f63ab3d5SMatthew Ahrens void 810f63ab3d5SMatthew Ahrens zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp) 811f63ab3d5SMatthew Ahrens { 812f63ab3d5SMatthew Ahrens if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { 813f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid TYPE %llu", 814f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_TYPE(bp)); 815f63ab3d5SMatthew Ahrens } 816f63ab3d5SMatthew Ahrens if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS || 817f63ab3d5SMatthew Ahrens BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) { 818f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu", 819f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_CHECKSUM(bp)); 820f63ab3d5SMatthew Ahrens } 821f63ab3d5SMatthew Ahrens if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS || 822f63ab3d5SMatthew Ahrens BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) { 823f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu", 824f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_COMPRESS(bp)); 825f63ab3d5SMatthew Ahrens } 826f63ab3d5SMatthew Ahrens if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { 827f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid LSIZE %llu", 828f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_LSIZE(bp)); 829f63ab3d5SMatthew Ahrens } 830f63ab3d5SMatthew Ahrens if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { 831f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid PSIZE %llu", 832f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_PSIZE(bp)); 833f63ab3d5SMatthew Ahrens } 834f63ab3d5SMatthew Ahrens 835f63ab3d5SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) { 836f63ab3d5SMatthew Ahrens if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) { 837f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid ETYPE %llu", 838f63ab3d5SMatthew Ahrens bp, (longlong_t)BPE_GET_ETYPE(bp)); 839f63ab3d5SMatthew Ahrens } 840f63ab3d5SMatthew Ahrens } 841f63ab3d5SMatthew Ahrens 8426f793812SPavel Zakharov /* 8436f793812SPavel Zakharov * Do not verify individual DVAs if the config is not trusted. This 8446f793812SPavel Zakharov * will be done once the zio is executed in vdev_mirror_map_alloc. 8456f793812SPavel Zakharov */ 8466f793812SPavel Zakharov if (!spa->spa_trust_config) 8476f793812SPavel Zakharov return; 8486f793812SPavel Zakharov 849f63ab3d5SMatthew Ahrens /* 850f63ab3d5SMatthew Ahrens * Pool-specific checks. 851f63ab3d5SMatthew Ahrens * 852f63ab3d5SMatthew Ahrens * Note: it would be nice to verify that the blk_birth and 853f63ab3d5SMatthew Ahrens * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() 854f63ab3d5SMatthew Ahrens * allows the birth time of log blocks (and dmu_sync()-ed blocks 855f63ab3d5SMatthew Ahrens * that are in the log) to be arbitrarily large. 856f63ab3d5SMatthew Ahrens */ 857f63ab3d5SMatthew Ahrens for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 858f63ab3d5SMatthew Ahrens uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); 859f63ab3d5SMatthew Ahrens if (vdevid >= spa->spa_root_vdev->vdev_children) { 860f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p DVA %u has invalid " 861f63ab3d5SMatthew Ahrens "VDEV %llu", 862f63ab3d5SMatthew Ahrens bp, i, (longlong_t)vdevid); 8635897eb49SJustin Gibbs continue; 864f63ab3d5SMatthew Ahrens } 865f63ab3d5SMatthew Ahrens vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 866f63ab3d5SMatthew Ahrens if (vd == NULL) { 867f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p DVA %u has invalid " 868f63ab3d5SMatthew Ahrens "VDEV %llu", 869f63ab3d5SMatthew Ahrens bp, i, (longlong_t)vdevid); 8705897eb49SJustin Gibbs continue; 871f63ab3d5SMatthew Ahrens } 872f63ab3d5SMatthew Ahrens if (vd->vdev_ops == &vdev_hole_ops) { 873f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p DVA %u has hole " 874f63ab3d5SMatthew Ahrens "VDEV %llu", 875f63ab3d5SMatthew Ahrens bp, i, (longlong_t)vdevid); 8765897eb49SJustin Gibbs continue; 877f63ab3d5SMatthew Ahrens } 878f63ab3d5SMatthew Ahrens if (vd->vdev_ops == &vdev_missing_ops) { 879f63ab3d5SMatthew Ahrens /* 880f63ab3d5SMatthew Ahrens * "missing" vdevs are valid during import, but we 881f63ab3d5SMatthew Ahrens * don't have their detailed info (e.g. asize), so 882f63ab3d5SMatthew Ahrens * we can't perform any more checks on them. 883f63ab3d5SMatthew Ahrens */ 884f63ab3d5SMatthew Ahrens continue; 885f63ab3d5SMatthew Ahrens } 886f63ab3d5SMatthew Ahrens uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 887f63ab3d5SMatthew Ahrens uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]); 888f63ab3d5SMatthew Ahrens if (BP_IS_GANG(bp)) 889f63ab3d5SMatthew Ahrens asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 890f63ab3d5SMatthew Ahrens if (offset + asize > vd->vdev_asize) { 891f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p DVA %u has invalid " 892f63ab3d5SMatthew Ahrens "OFFSET %llu", 893f63ab3d5SMatthew Ahrens bp, i, (longlong_t)offset); 894f63ab3d5SMatthew Ahrens } 895f63ab3d5SMatthew Ahrens } 896f63ab3d5SMatthew Ahrens } 897f63ab3d5SMatthew Ahrens 8986f793812SPavel Zakharov boolean_t 8996f793812SPavel Zakharov zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp) 9006f793812SPavel Zakharov { 9016f793812SPavel Zakharov uint64_t vdevid = DVA_GET_VDEV(dva); 9026f793812SPavel Zakharov 9036f793812SPavel Zakharov if (vdevid >= spa->spa_root_vdev->vdev_children) 9046f793812SPavel Zakharov return (B_FALSE); 9056f793812SPavel Zakharov 9066f793812SPavel Zakharov vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 9076f793812SPavel Zakharov if (vd == NULL) 9086f793812SPavel Zakharov return (B_FALSE); 9096f793812SPavel Zakharov 9106f793812SPavel Zakharov if (vd->vdev_ops == &vdev_hole_ops) 9116f793812SPavel Zakharov return (B_FALSE); 9126f793812SPavel Zakharov 9136f793812SPavel Zakharov if (vd->vdev_ops == &vdev_missing_ops) { 9146f793812SPavel Zakharov return (B_FALSE); 9156f793812SPavel Zakharov } 9166f793812SPavel Zakharov 9176f793812SPavel Zakharov uint64_t offset = DVA_GET_OFFSET(dva); 9186f793812SPavel Zakharov uint64_t asize = DVA_GET_ASIZE(dva); 9196f793812SPavel Zakharov 9206f793812SPavel Zakharov if (BP_IS_GANG(bp)) 9216f793812SPavel Zakharov asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 9226f793812SPavel Zakharov if (offset + asize > vd->vdev_asize) 9236f793812SPavel Zakharov return (B_FALSE); 9246f793812SPavel Zakharov 9256f793812SPavel Zakharov return (B_TRUE); 9266f793812SPavel Zakharov } 9276f793812SPavel Zakharov 928fa9e4066Sahrens zio_t * 929e14bb325SJeff Bonwick zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 930770499e1SDan Kimmel abd_t *data, uint64_t size, zio_done_func_t *done, void *private, 9317802d7bfSMatthew Ahrens zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 932fa9e4066Sahrens { 933fa9e4066Sahrens zio_t *zio; 934fa9e4066Sahrens 935f63ab3d5SMatthew Ahrens zfs_blkptr_verify(spa, bp); 936f63ab3d5SMatthew Ahrens 937b24ab676SJeff Bonwick zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 9385602294fSDan Kimmel data, size, size, done, private, 939e14bb325SJeff Bonwick ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 940b24ab676SJeff Bonwick ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 941b24ab676SJeff Bonwick ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 942fa9e4066Sahrens 943fa9e4066Sahrens return (zio); 944fa9e4066Sahrens } 945fa9e4066Sahrens 946fa9e4066Sahrens zio_t * 947e14bb325SJeff Bonwick zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 948770499e1SDan Kimmel abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp, 9498df0bcf0SPaul Dagnelie zio_done_func_t *ready, zio_done_func_t *children_ready, 9508df0bcf0SPaul Dagnelie zio_done_func_t *physdone, zio_done_func_t *done, 9518df0bcf0SPaul Dagnelie void *private, zio_priority_t priority, enum zio_flag flags, 9528df0bcf0SPaul Dagnelie const zbookmark_phys_t *zb) 953fa9e4066Sahrens { 954fa9e4066Sahrens zio_t *zio; 955fa9e4066Sahrens 956e14bb325SJeff Bonwick ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 957e14bb325SJeff Bonwick zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 958e14bb325SJeff Bonwick zp->zp_compress >= ZIO_COMPRESS_OFF && 959e14bb325SJeff Bonwick zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 960ad135b5dSChristopher Siden DMU_OT_IS_VALID(zp->zp_type) && 961e14bb325SJeff Bonwick zp->zp_level < 32 && 962b24ab676SJeff Bonwick zp->zp_copies > 0 && 96380901aeaSGeorge Wilson zp->zp_copies <= spa_max_replication(spa)); 9640a4e9518Sgw 9655602294fSDan Kimmel zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private, 966e14bb325SJeff Bonwick ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 967b24ab676SJeff Bonwick ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 968b24ab676SJeff Bonwick ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 969fa9e4066Sahrens 970c717a561Smaybee zio->io_ready = ready; 9718df0bcf0SPaul Dagnelie zio->io_children_ready = children_ready; 97269962b56SMatthew Ahrens zio->io_physdone = physdone; 973e14bb325SJeff Bonwick zio->io_prop = *zp; 974fa9e4066Sahrens 9755d7b4d43SMatthew Ahrens /* 9765d7b4d43SMatthew Ahrens * Data can be NULL if we are going to call zio_write_override() to 9775d7b4d43SMatthew Ahrens * provide the already-allocated BP. But we may need the data to 9785d7b4d43SMatthew Ahrens * verify a dedup hit (if requested). In this case, don't try to 979eb633035STom Caputi * dedup (just take the already-allocated BP verbatim). Encrypted 980eb633035STom Caputi * dedup blocks need data as well so we also disable dedup in this 981eb633035STom Caputi * case. 9825d7b4d43SMatthew Ahrens */ 983eb633035STom Caputi if (data == NULL && 984eb633035STom Caputi (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) { 9855d7b4d43SMatthew Ahrens zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 9865d7b4d43SMatthew Ahrens } 9875d7b4d43SMatthew Ahrens 988fa9e4066Sahrens return (zio); 989fa9e4066Sahrens } 990fa9e4066Sahrens 991fa9e4066Sahrens zio_t * 992770499e1SDan Kimmel zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data, 99369962b56SMatthew Ahrens uint64_t size, zio_done_func_t *done, void *private, 9947802d7bfSMatthew Ahrens zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb) 995fa9e4066Sahrens { 996fa9e4066Sahrens zio_t *zio; 997fa9e4066Sahrens 9985602294fSDan Kimmel zio = zio_create(pio, spa, txg, bp, data, size, size, done, private, 9990f7643c7SGeorge Wilson ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb, 1000e14bb325SJeff Bonwick ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 1001fa9e4066Sahrens 1002fa9e4066Sahrens return (zio); 1003fa9e4066Sahrens } 1004fa9e4066Sahrens 1005b24ab676SJeff Bonwick void 100680901aeaSGeorge Wilson zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) 1007b24ab676SJeff Bonwick { 1008b24ab676SJeff Bonwick ASSERT(zio->io_type == ZIO_TYPE_WRITE); 1009b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1010b24ab676SJeff Bonwick ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1011b24ab676SJeff Bonwick ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 1012b24ab676SJeff Bonwick 101380901aeaSGeorge Wilson /* 101480901aeaSGeorge Wilson * We must reset the io_prop to match the values that existed 101580901aeaSGeorge Wilson * when the bp was first written by dmu_sync() keeping in mind 101680901aeaSGeorge Wilson * that nopwrite and dedup are mutually exclusive. 101780901aeaSGeorge Wilson */ 101880901aeaSGeorge Wilson zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 101980901aeaSGeorge Wilson zio->io_prop.zp_nopwrite = nopwrite; 1020b24ab676SJeff Bonwick zio->io_prop.zp_copies = copies; 1021b24ab676SJeff Bonwick zio->io_bp_override = bp; 1022b24ab676SJeff Bonwick } 1023b24ab676SJeff Bonwick 1024b24ab676SJeff Bonwick void 1025b24ab676SJeff Bonwick zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 1026b24ab676SJeff Bonwick { 10275d7b4d43SMatthew Ahrens 10285cabbc6bSPrashanth Sreenivasa zfs_blkptr_verify(spa, bp); 10295cabbc6bSPrashanth Sreenivasa 10305d7b4d43SMatthew Ahrens /* 10315d7b4d43SMatthew Ahrens * The check for EMBEDDED is a performance optimization. We 10325d7b4d43SMatthew Ahrens * process the free here (by ignoring it) rather than 10335d7b4d43SMatthew Ahrens * putting it on the list and then processing it in zio_free_sync(). 10345d7b4d43SMatthew Ahrens */ 10355d7b4d43SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) 10365d7b4d43SMatthew Ahrens return; 10373b2aab18SMatthew Ahrens metaslab_check_free(spa, bp); 10389cb154a3SMatthew Ahrens 10399cb154a3SMatthew Ahrens /* 10409cb154a3SMatthew Ahrens * Frees that are for the currently-syncing txg, are not going to be 10419cb154a3SMatthew Ahrens * deferred, and which will not need to do a read (i.e. not GANG or 10429cb154a3SMatthew Ahrens * DEDUP), can be processed immediately. Otherwise, put them on the 10439cb154a3SMatthew Ahrens * in-memory list for later processing. 1044814dcd43SSerapheim Dimitropoulos * 1045814dcd43SSerapheim Dimitropoulos * Note that we only defer frees after zfs_sync_pass_deferred_free 1046814dcd43SSerapheim Dimitropoulos * when the log space map feature is disabled. [see relevant comment 1047814dcd43SSerapheim Dimitropoulos * in spa_sync_iterate_to_convergence()] 10489cb154a3SMatthew Ahrens */ 1049814dcd43SSerapheim Dimitropoulos if (BP_IS_GANG(bp) || 1050814dcd43SSerapheim Dimitropoulos BP_GET_DEDUP(bp) || 10519cb154a3SMatthew Ahrens txg != spa->spa_syncing_txg || 1052814dcd43SSerapheim Dimitropoulos (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free && 1053814dcd43SSerapheim Dimitropoulos !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))) { 10549cb154a3SMatthew Ahrens bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 10559cb154a3SMatthew Ahrens } else { 10569cb154a3SMatthew Ahrens VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0))); 10579cb154a3SMatthew Ahrens } 1058b24ab676SJeff Bonwick } 1059b24ab676SJeff Bonwick 1060fa9e4066Sahrens zio_t * 1061b24ab676SJeff Bonwick zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1062b24ab676SJeff Bonwick enum zio_flag flags) 1063fa9e4066Sahrens { 1064fa9e4066Sahrens zio_t *zio; 10659cb154a3SMatthew Ahrens enum zio_stage stage = ZIO_FREE_PIPELINE; 1066fa9e4066Sahrens 1067fa9e4066Sahrens ASSERT(!BP_IS_HOLE(bp)); 1068b24ab676SJeff Bonwick ASSERT(spa_syncing_txg(spa) == txg); 1069fa9e4066Sahrens 10705d7b4d43SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) 10715d7b4d43SMatthew Ahrens return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 10725d7b4d43SMatthew Ahrens 10733b2aab18SMatthew Ahrens metaslab_check_free(spa, bp); 10746e6d5868SMatthew Ahrens arc_freed(spa, bp); 1075a3874b8bSToomas Soome dsl_scan_freed(spa, bp); 10763b2aab18SMatthew Ahrens 10779cb154a3SMatthew Ahrens /* 10789cb154a3SMatthew Ahrens * GANG and DEDUP blocks can induce a read (for the gang block header, 10799cb154a3SMatthew Ahrens * or the DDT), so issue them asynchronously so that this thread is 10809cb154a3SMatthew Ahrens * not tied up. 10819cb154a3SMatthew Ahrens */ 10829cb154a3SMatthew Ahrens if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) 10839cb154a3SMatthew Ahrens stage |= ZIO_STAGE_ISSUE_ASYNC; 10849cb154a3SMatthew Ahrens 1085e14bb325SJeff Bonwick zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 10865602294fSDan Kimmel BP_GET_PSIZE(bp), NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, 10875602294fSDan Kimmel flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage); 10889cb154a3SMatthew Ahrens 1089fa9e4066Sahrens return (zio); 1090fa9e4066Sahrens } 1091fa9e4066Sahrens 1092fa9e4066Sahrens zio_t * 1093b24ab676SJeff Bonwick zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1094b24ab676SJeff Bonwick zio_done_func_t *done, void *private, enum zio_flag flags) 1095fa9e4066Sahrens { 1096fa9e4066Sahrens zio_t *zio; 1097fa9e4066Sahrens 10985cabbc6bSPrashanth Sreenivasa zfs_blkptr_verify(spa, bp); 10995d7b4d43SMatthew Ahrens 11005d7b4d43SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) 11015d7b4d43SMatthew Ahrens return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 11025d7b4d43SMatthew Ahrens 1103fa9e4066Sahrens /* 1104fa9e4066Sahrens * A claim is an allocation of a specific block. Claims are needed 1105fa9e4066Sahrens * to support immediate writes in the intent log. The issue is that 1106fa9e4066Sahrens * immediate writes contain committed data, but in a txg that was 1107fa9e4066Sahrens * *not* committed. Upon opening the pool after an unclean shutdown, 1108fa9e4066Sahrens * the intent log claims all blocks that contain immediate write data 1109fa9e4066Sahrens * so that the SPA knows they're in use. 1110fa9e4066Sahrens * 1111fa9e4066Sahrens * All claims *must* be resolved in the first txg -- before the SPA 1112fa9e4066Sahrens * starts allocating blocks -- so that nothing is allocated twice. 1113b24ab676SJeff Bonwick * If txg == 0 we just verify that the block is claimable. 1114fa9e4066Sahrens */ 111586714001SSerapheim Dimitropoulos ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, 111686714001SSerapheim Dimitropoulos spa_min_claim_txg(spa)); 111786714001SSerapheim Dimitropoulos ASSERT(txg == spa_min_claim_txg(spa) || txg == 0); 1118b24ab676SJeff Bonwick ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 1119fa9e4066Sahrens 1120e14bb325SJeff Bonwick zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 11215602294fSDan Kimmel BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 11225602294fSDan Kimmel flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 11230f7643c7SGeorge Wilson ASSERT0(zio->io_queued_timestamp); 1124fa9e4066Sahrens 1125fa9e4066Sahrens return (zio); 1126fa9e4066Sahrens } 1127fa9e4066Sahrens 1128fa9e4066Sahrens zio_t * 1129fa9e4066Sahrens zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 113069962b56SMatthew Ahrens zio_done_func_t *done, void *private, enum zio_flag flags) 1131fa9e4066Sahrens { 1132fa9e4066Sahrens zio_t *zio; 1133fa9e4066Sahrens int c; 1134fa9e4066Sahrens 1135fa9e4066Sahrens if (vd->vdev_children == 0) { 11365602294fSDan Kimmel zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 113769962b56SMatthew Ahrens ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 1138fa9e4066Sahrens ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 1139fa9e4066Sahrens 1140fa9e4066Sahrens zio->io_cmd = cmd; 1141fa9e4066Sahrens } else { 1142a3f829aeSBill Moore zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 1143fa9e4066Sahrens 1144fa9e4066Sahrens for (c = 0; c < vd->vdev_children; c++) 1145fa9e4066Sahrens zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 114669962b56SMatthew Ahrens done, private, flags)); 1147fa9e4066Sahrens } 1148fa9e4066Sahrens 1149fa9e4066Sahrens return (zio); 1150fa9e4066Sahrens } 1151fa9e4066Sahrens 1152084fd14fSBrian Behlendorf zio_t * 1153084fd14fSBrian Behlendorf zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1154084fd14fSBrian Behlendorf zio_done_func_t *done, void *private, zio_priority_t priority, 1155084fd14fSBrian Behlendorf enum zio_flag flags, enum trim_flag trim_flags) 1156084fd14fSBrian Behlendorf { 1157084fd14fSBrian Behlendorf zio_t *zio; 1158084fd14fSBrian Behlendorf 1159084fd14fSBrian Behlendorf ASSERT0(vd->vdev_children); 1160084fd14fSBrian Behlendorf ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 1161084fd14fSBrian Behlendorf ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 1162084fd14fSBrian Behlendorf ASSERT3U(size, !=, 0); 1163084fd14fSBrian Behlendorf 1164084fd14fSBrian Behlendorf zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done, 1165084fd14fSBrian Behlendorf private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL, 1166084fd14fSBrian Behlendorf vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE); 1167084fd14fSBrian Behlendorf zio->io_trim_flags = trim_flags; 1168084fd14fSBrian Behlendorf 1169084fd14fSBrian Behlendorf return (zio); 1170084fd14fSBrian Behlendorf } 1171084fd14fSBrian Behlendorf 1172fa9e4066Sahrens zio_t * 1173fa9e4066Sahrens zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1174770499e1SDan Kimmel abd_t *data, int checksum, zio_done_func_t *done, void *private, 117569962b56SMatthew Ahrens zio_priority_t priority, enum zio_flag flags, boolean_t labels) 1176fa9e4066Sahrens { 1177fa9e4066Sahrens zio_t *zio; 11780a4e9518Sgw 1179e14bb325SJeff Bonwick ASSERT(vd->vdev_children == 0); 1180e14bb325SJeff Bonwick ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1181e14bb325SJeff Bonwick offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1182e14bb325SJeff Bonwick ASSERT3U(offset + size, <=, vd->vdev_psize); 1183fa9e4066Sahrens 11845602294fSDan Kimmel zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 11855602294fSDan Kimmel private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, 11865602294fSDan Kimmel offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 1187fa9e4066Sahrens 1188e14bb325SJeff Bonwick zio->io_prop.zp_checksum = checksum; 1189fa9e4066Sahrens 1190fa9e4066Sahrens return (zio); 1191fa9e4066Sahrens } 1192fa9e4066Sahrens 1193fa9e4066Sahrens zio_t * 1194fa9e4066Sahrens zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1195770499e1SDan Kimmel abd_t *data, int checksum, zio_done_func_t *done, void *private, 119669962b56SMatthew Ahrens zio_priority_t priority, enum zio_flag flags, boolean_t labels) 1197fa9e4066Sahrens { 1198fa9e4066Sahrens zio_t *zio; 11990a4e9518Sgw 1200e14bb325SJeff Bonwick ASSERT(vd->vdev_children == 0); 1201e14bb325SJeff Bonwick ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1202e14bb325SJeff Bonwick offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1203e14bb325SJeff Bonwick ASSERT3U(offset + size, <=, vd->vdev_psize); 1204fa9e4066Sahrens 12055602294fSDan Kimmel zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 12065602294fSDan Kimmel private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, 12075602294fSDan Kimmel offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 1208fa9e4066Sahrens 1209e14bb325SJeff Bonwick zio->io_prop.zp_checksum = checksum; 1210fa9e4066Sahrens 121145818ee1SMatthew Ahrens if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) { 1212fa9e4066Sahrens /* 12136e1f5caaSNeil Perrin * zec checksums are necessarily destructive -- they modify 1214e14bb325SJeff Bonwick * the end of the write buffer to hold the verifier/checksum. 1215fa9e4066Sahrens * Therefore, we must make a local copy in case the data is 1216e14bb325SJeff Bonwick * being written to multiple places in parallel. 1217fa9e4066Sahrens */ 1218770499e1SDan Kimmel abd_t *wbuf = abd_alloc_sametype(data, size); 1219770499e1SDan Kimmel abd_copy(wbuf, data, size); 1220770499e1SDan Kimmel 1221e14bb325SJeff Bonwick zio_push_transform(zio, wbuf, size, size, NULL); 1222fa9e4066Sahrens } 1223fa9e4066Sahrens 1224fa9e4066Sahrens return (zio); 1225fa9e4066Sahrens } 1226fa9e4066Sahrens 1227fa9e4066Sahrens /* 1228e14bb325SJeff Bonwick * Create a child I/O to do some work for us. 1229fa9e4066Sahrens */ 1230fa9e4066Sahrens zio_t * 1231e14bb325SJeff Bonwick zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 1232770499e1SDan Kimmel abd_t *data, uint64_t size, int type, zio_priority_t priority, 1233dcbf3bd6SGeorge Wilson enum zio_flag flags, zio_done_func_t *done, void *private) 1234fa9e4066Sahrens { 1235b24ab676SJeff Bonwick enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 1236e14bb325SJeff Bonwick zio_t *zio; 1237e14bb325SJeff Bonwick 12385cabbc6bSPrashanth Sreenivasa /* 12395cabbc6bSPrashanth Sreenivasa * vdev child I/Os do not propagate their error to the parent. 12405cabbc6bSPrashanth Sreenivasa * Therefore, for correct operation the caller *must* check for 12415cabbc6bSPrashanth Sreenivasa * and handle the error in the child i/o's done callback. 12425cabbc6bSPrashanth Sreenivasa * The only exceptions are i/os that we don't care about 12435cabbc6bSPrashanth Sreenivasa * (OPTIONAL or REPAIR). 12445cabbc6bSPrashanth Sreenivasa */ 12455cabbc6bSPrashanth Sreenivasa ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) || 12465cabbc6bSPrashanth Sreenivasa done != NULL); 12475cabbc6bSPrashanth Sreenivasa 1248fa9e4066Sahrens if (type == ZIO_TYPE_READ && bp != NULL) { 1249fa9e4066Sahrens /* 1250fa9e4066Sahrens * If we have the bp, then the child should perform the 1251fa9e4066Sahrens * checksum and the parent need not. This pushes error 1252fa9e4066Sahrens * detection as close to the leaves as possible and 1253fa9e4066Sahrens * eliminates redundant checksums in the interior nodes. 1254fa9e4066Sahrens */ 1255b24ab676SJeff Bonwick pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 1256b24ab676SJeff Bonwick pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 1257fa9e4066Sahrens } 1258fa9e4066Sahrens 12595cabbc6bSPrashanth Sreenivasa if (vd->vdev_ops->vdev_op_leaf) { 12605cabbc6bSPrashanth Sreenivasa ASSERT0(vd->vdev_children); 1261e14bb325SJeff Bonwick offset += VDEV_LABEL_START_SIZE; 12625cabbc6bSPrashanth Sreenivasa } 1263e14bb325SJeff Bonwick 12645cabbc6bSPrashanth Sreenivasa flags |= ZIO_VDEV_CHILD_FLAGS(pio); 1265b24ab676SJeff Bonwick 1266b24ab676SJeff Bonwick /* 1267b24ab676SJeff Bonwick * If we've decided to do a repair, the write is not speculative -- 1268b24ab676SJeff Bonwick * even if the original read was. 1269b24ab676SJeff Bonwick */ 1270b24ab676SJeff Bonwick if (flags & ZIO_FLAG_IO_REPAIR) 1271b24ab676SJeff Bonwick flags &= ~ZIO_FLAG_SPECULATIVE; 1272b24ab676SJeff Bonwick 12730f7643c7SGeorge Wilson /* 12740f7643c7SGeorge Wilson * If we're creating a child I/O that is not associated with a 12750f7643c7SGeorge Wilson * top-level vdev, then the child zio is not an allocating I/O. 12760f7643c7SGeorge Wilson * If this is a retried I/O then we ignore it since we will 12770f7643c7SGeorge Wilson * have already processed the original allocating I/O. 12780f7643c7SGeorge Wilson */ 12790f7643c7SGeorge Wilson if (flags & ZIO_FLAG_IO_ALLOCATING && 12800f7643c7SGeorge Wilson (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { 1281663207adSDon Brady ASSERT(pio->io_metaslab_class != NULL); 1282663207adSDon Brady ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled); 12830f7643c7SGeorge Wilson ASSERT(type == ZIO_TYPE_WRITE); 12840f7643c7SGeorge Wilson ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE); 12850f7643c7SGeorge Wilson ASSERT(!(flags & ZIO_FLAG_IO_REPAIR)); 12860f7643c7SGeorge Wilson ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || 12870f7643c7SGeorge Wilson pio->io_child_type == ZIO_CHILD_GANG); 12880f7643c7SGeorge Wilson 12890f7643c7SGeorge Wilson flags &= ~ZIO_FLAG_IO_ALLOCATING; 12900f7643c7SGeorge Wilson } 12910f7643c7SGeorge Wilson 12925602294fSDan Kimmel zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size, 1293b24ab676SJeff Bonwick done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 1294b24ab676SJeff Bonwick ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 12950f7643c7SGeorge Wilson ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 1296fa9e4066Sahrens 129769962b56SMatthew Ahrens zio->io_physdone = pio->io_physdone; 129869962b56SMatthew Ahrens if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) 129969962b56SMatthew Ahrens zio->io_logical->io_phys_children++; 130069962b56SMatthew Ahrens 1301e14bb325SJeff Bonwick return (zio); 130232b87932Sek } 130332b87932Sek 1304e14bb325SJeff Bonwick zio_t * 1305770499e1SDan Kimmel zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size, 13063a4b1be9SMatthew Ahrens zio_type_t type, zio_priority_t priority, enum zio_flag flags, 13079a686fbcSPaul Dagnelie zio_done_func_t *done, void *private) 1308fa9e4066Sahrens { 1309e14bb325SJeff Bonwick zio_t *zio; 1310fa9e4066Sahrens 1311e14bb325SJeff Bonwick ASSERT(vd->vdev_ops->vdev_op_leaf); 1312fa9e4066Sahrens 1313e14bb325SJeff Bonwick zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 13145602294fSDan Kimmel data, size, size, done, private, type, priority, 131569962b56SMatthew Ahrens flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 1316e14bb325SJeff Bonwick vd, offset, NULL, 1317b24ab676SJeff Bonwick ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 1318fa9e4066Sahrens 1319e14bb325SJeff Bonwick return (zio); 1320e05725b1Sbonwick } 1321e05725b1Sbonwick 1322e05725b1Sbonwick void 1323e14bb325SJeff Bonwick zio_flush(zio_t *zio, vdev_t *vd) 1324e05725b1Sbonwick { 1325e14bb325SJeff Bonwick zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 132669962b56SMatthew Ahrens NULL, NULL, 1327e14bb325SJeff Bonwick ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 1328fa9e4066Sahrens } 1329fa9e4066Sahrens 13306e1f5caaSNeil Perrin void 13316e1f5caaSNeil Perrin zio_shrink(zio_t *zio, uint64_t size) 13326e1f5caaSNeil Perrin { 13331271e4b1SPrakash Surya ASSERT3P(zio->io_executor, ==, NULL); 13341271e4b1SPrakash Surya ASSERT3P(zio->io_orig_size, ==, zio->io_size); 13351271e4b1SPrakash Surya ASSERT3U(size, <=, zio->io_size); 13366e1f5caaSNeil Perrin 13376e1f5caaSNeil Perrin /* 13386e1f5caaSNeil Perrin * We don't shrink for raidz because of problems with the 13396e1f5caaSNeil Perrin * reconstruction when reading back less than the block size. 13406e1f5caaSNeil Perrin * Note, BP_IS_RAIDZ() assumes no compression. 13416e1f5caaSNeil Perrin */ 13426e1f5caaSNeil Perrin ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 13435602294fSDan Kimmel if (!BP_IS_RAIDZ(zio->io_bp)) { 13445602294fSDan Kimmel /* we are not doing a raw write */ 13455602294fSDan Kimmel ASSERT3U(zio->io_size, ==, zio->io_lsize); 13465602294fSDan Kimmel zio->io_orig_size = zio->io_size = zio->io_lsize = size; 13475602294fSDan Kimmel } 13486e1f5caaSNeil Perrin } 13496e1f5caaSNeil Perrin 1350fa9e4066Sahrens /* 1351fa9e4066Sahrens * ========================================================================== 1352e14bb325SJeff Bonwick * Prepare to read and write logical blocks 1353fa9e4066Sahrens * ========================================================================== 1354fa9e4066Sahrens */ 1355e14bb325SJeff Bonwick 1356e05725b1Sbonwick static int 1357e14bb325SJeff Bonwick zio_read_bp_init(zio_t *zio) 1358fa9e4066Sahrens { 1359e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 1360eb633035STom Caputi uint64_t psize = 1361eb633035STom Caputi BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1362e05725b1Sbonwick 13635cabbc6bSPrashanth Sreenivasa ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 13645cabbc6bSPrashanth Sreenivasa 136503361682SJeff Bonwick if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1366f5383399SBill Moore zio->io_child_type == ZIO_CHILD_LOGICAL && 1367eb633035STom Caputi !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { 1368770499e1SDan Kimmel zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1369770499e1SDan Kimmel psize, psize, zio_decompress); 1370e14bb325SJeff Bonwick } 1371fa9e4066Sahrens 1372eb633035STom Caputi if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) || 1373eb633035STom Caputi BP_HAS_INDIRECT_MAC_CKSUM(bp)) && 1374eb633035STom Caputi zio->io_child_type == ZIO_CHILD_LOGICAL) { 1375eb633035STom Caputi zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1376eb633035STom Caputi psize, psize, zio_decrypt); 1377eb633035STom Caputi } 1378770499e1SDan Kimmel 1379eb633035STom Caputi if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1380770499e1SDan Kimmel int psize = BPE_GET_PSIZE(bp); 1381770499e1SDan Kimmel void *data = abd_borrow_buf(zio->io_abd, psize); 1382eb633035STom Caputi 1383eb633035STom Caputi zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1384770499e1SDan Kimmel decode_embedded_bp_compressed(bp, data); 1385770499e1SDan Kimmel abd_return_buf_copy(zio->io_abd, data, psize); 13865d7b4d43SMatthew Ahrens } else { 13875d7b4d43SMatthew Ahrens ASSERT(!BP_IS_EMBEDDED(bp)); 13885cabbc6bSPrashanth Sreenivasa ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 13895d7b4d43SMatthew Ahrens } 13905d7b4d43SMatthew Ahrens 1391ad135b5dSChristopher Siden if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 1392e14bb325SJeff Bonwick zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1393fa9e4066Sahrens 1394bbfd46c4SJeff Bonwick if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 1395bbfd46c4SJeff Bonwick zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1396bbfd46c4SJeff Bonwick 1397b24ab676SJeff Bonwick if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1398b24ab676SJeff Bonwick zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1399b24ab676SJeff Bonwick 1400e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 1401fa9e4066Sahrens } 1402fa9e4066Sahrens 1403e05725b1Sbonwick static int 1404e14bb325SJeff Bonwick zio_write_bp_init(zio_t *zio) 14050a4e9518Sgw { 1406e14bb325SJeff Bonwick if (!IO_IS_ALLOCATING(zio)) 1407e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 14080a4e9518Sgw 1409b24ab676SJeff Bonwick ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1410b24ab676SJeff Bonwick 1411b24ab676SJeff Bonwick if (zio->io_bp_override) { 14120f7643c7SGeorge Wilson blkptr_t *bp = zio->io_bp; 14130f7643c7SGeorge Wilson zio_prop_t *zp = &zio->io_prop; 14140f7643c7SGeorge Wilson 1415b24ab676SJeff Bonwick ASSERT(bp->blk_birth != zio->io_txg); 1416b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 1417b24ab676SJeff Bonwick 1418b24ab676SJeff Bonwick *bp = *zio->io_bp_override; 1419b24ab676SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1420b24ab676SJeff Bonwick 14215d7b4d43SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) 14225d7b4d43SMatthew Ahrens return (ZIO_PIPELINE_CONTINUE); 14235d7b4d43SMatthew Ahrens 142480901aeaSGeorge Wilson /* 142580901aeaSGeorge Wilson * If we've been overridden and nopwrite is set then 142680901aeaSGeorge Wilson * set the flag accordingly to indicate that a nopwrite 142780901aeaSGeorge Wilson * has already occurred. 142880901aeaSGeorge Wilson */ 142980901aeaSGeorge Wilson if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 143080901aeaSGeorge Wilson ASSERT(!zp->zp_dedup); 14310f7643c7SGeorge Wilson ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum); 143280901aeaSGeorge Wilson zio->io_flags |= ZIO_FLAG_NOPWRITE; 143380901aeaSGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 143480901aeaSGeorge Wilson } 143580901aeaSGeorge Wilson 143680901aeaSGeorge Wilson ASSERT(!zp->zp_nopwrite); 143780901aeaSGeorge Wilson 1438b24ab676SJeff Bonwick if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1439b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 1440b24ab676SJeff Bonwick 144145818ee1SMatthew Ahrens ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags & 144245818ee1SMatthew Ahrens ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify); 1443b24ab676SJeff Bonwick 1444eb633035STom Caputi if (BP_GET_CHECKSUM(bp) == zp->zp_checksum && 1445eb633035STom Caputi !zp->zp_encrypt) { 1446b24ab676SJeff Bonwick BP_SET_DEDUP(bp, 1); 1447b24ab676SJeff Bonwick zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1448b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 1449b24ab676SJeff Bonwick } 14500f7643c7SGeorge Wilson 14510f7643c7SGeorge Wilson /* 14520f7643c7SGeorge Wilson * We were unable to handle this as an override bp, treat 14530f7643c7SGeorge Wilson * it as a regular write I/O. 14540f7643c7SGeorge Wilson */ 1455b39b744bSMatthew Ahrens zio->io_bp_override = NULL; 14560f7643c7SGeorge Wilson *bp = zio->io_bp_orig; 14570f7643c7SGeorge Wilson zio->io_pipeline = zio->io_orig_pipeline; 1458b24ab676SJeff Bonwick } 14590a4e9518Sgw 14600f7643c7SGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 14610f7643c7SGeorge Wilson } 14620f7643c7SGeorge Wilson 14630f7643c7SGeorge Wilson static int 14640f7643c7SGeorge Wilson zio_write_compress(zio_t *zio) 14650f7643c7SGeorge Wilson { 14660f7643c7SGeorge Wilson spa_t *spa = zio->io_spa; 14670f7643c7SGeorge Wilson zio_prop_t *zp = &zio->io_prop; 14680f7643c7SGeorge Wilson enum zio_compress compress = zp->zp_compress; 14690f7643c7SGeorge Wilson blkptr_t *bp = zio->io_bp; 14705602294fSDan Kimmel uint64_t lsize = zio->io_lsize; 14715602294fSDan Kimmel uint64_t psize = zio->io_size; 14720f7643c7SGeorge Wilson int pass = 1; 14730f7643c7SGeorge Wilson 14740f7643c7SGeorge Wilson /* 14750f7643c7SGeorge Wilson * If our children haven't all reached the ready stage, 14760f7643c7SGeorge Wilson * wait for them and then repeat this pipeline stage. 14770f7643c7SGeorge Wilson */ 1478d6e1c446SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | 1479d6e1c446SGeorge Wilson ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) { 14800f7643c7SGeorge Wilson return (ZIO_PIPELINE_STOP); 1481d6e1c446SGeorge Wilson } 14820f7643c7SGeorge Wilson 14830f7643c7SGeorge Wilson if (!IO_IS_ALLOCATING(zio)) 14840f7643c7SGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 14850f7643c7SGeorge Wilson 14860f7643c7SGeorge Wilson if (zio->io_children_ready != NULL) { 14870f7643c7SGeorge Wilson /* 14880f7643c7SGeorge Wilson * Now that all our children are ready, run the callback 14890f7643c7SGeorge Wilson * associated with this zio in case it wants to modify the 14900f7643c7SGeorge Wilson * data to be written. 14910f7643c7SGeorge Wilson */ 14920f7643c7SGeorge Wilson ASSERT3U(zp->zp_level, >, 0); 14930f7643c7SGeorge Wilson zio->io_children_ready(zio); 14940f7643c7SGeorge Wilson } 14950f7643c7SGeorge Wilson 14960f7643c7SGeorge Wilson ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 14970f7643c7SGeorge Wilson ASSERT(zio->io_bp_override == NULL); 14980f7643c7SGeorge Wilson 149943466aaeSMax Grossman if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { 1500e14bb325SJeff Bonwick /* 1501e14bb325SJeff Bonwick * We're rewriting an existing block, which means we're 1502e14bb325SJeff Bonwick * working on behalf of spa_sync(). For spa_sync() to 1503e14bb325SJeff Bonwick * converge, it must eventually be the case that we don't 1504e14bb325SJeff Bonwick * have to allocate new blocks. But compression changes 1505e14bb325SJeff Bonwick * the blocksize, which forces a reallocate, and makes 1506e14bb325SJeff Bonwick * convergence take longer. Therefore, after the first 1507e14bb325SJeff Bonwick * few passes, stop compressing to ensure convergence. 1508e14bb325SJeff Bonwick */ 1509b24ab676SJeff Bonwick pass = spa_sync_pass(spa); 1510b24ab676SJeff Bonwick 1511b24ab676SJeff Bonwick ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1512b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1513b24ab676SJeff Bonwick ASSERT(!BP_GET_DEDUP(bp)); 1514e05725b1Sbonwick 151501f55e48SGeorge Wilson if (pass >= zfs_sync_pass_dont_compress) 1516e14bb325SJeff Bonwick compress = ZIO_COMPRESS_OFF; 1517e05725b1Sbonwick 1518e14bb325SJeff Bonwick /* Make sure someone doesn't change their mind on overwrites */ 15195d7b4d43SMatthew Ahrens ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), 1520b24ab676SJeff Bonwick spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1521e14bb325SJeff Bonwick } 1522fa9e4066Sahrens 15235602294fSDan Kimmel /* If it's a compressed write that is not raw, compress the buffer. */ 1524eb633035STom Caputi if (compress != ZIO_COMPRESS_OFF && 1525eb633035STom Caputi !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { 1526b24ab676SJeff Bonwick void *cbuf = zio_buf_alloc(lsize); 1527770499e1SDan Kimmel psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize); 1528b24ab676SJeff Bonwick if (psize == 0 || psize == lsize) { 1529e14bb325SJeff Bonwick compress = ZIO_COMPRESS_OFF; 1530b24ab676SJeff Bonwick zio_buf_free(cbuf, lsize); 1531eb633035STom Caputi } else if (!zp->zp_dedup && !zp->zp_encrypt && 1532eb633035STom Caputi psize <= BPE_PAYLOAD_SIZE && 15335d7b4d43SMatthew Ahrens zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 15345d7b4d43SMatthew Ahrens spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 15355d7b4d43SMatthew Ahrens encode_embedded_bp_compressed(bp, 15365d7b4d43SMatthew Ahrens cbuf, compress, lsize, psize); 15375d7b4d43SMatthew Ahrens BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 15385d7b4d43SMatthew Ahrens BP_SET_TYPE(bp, zio->io_prop.zp_type); 15395d7b4d43SMatthew Ahrens BP_SET_LEVEL(bp, zio->io_prop.zp_level); 15405d7b4d43SMatthew Ahrens zio_buf_free(cbuf, lsize); 15415d7b4d43SMatthew Ahrens bp->blk_birth = zio->io_txg; 15425d7b4d43SMatthew Ahrens zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 15435d7b4d43SMatthew Ahrens ASSERT(spa_feature_is_active(spa, 15445d7b4d43SMatthew Ahrens SPA_FEATURE_EMBEDDED_DATA)); 15455d7b4d43SMatthew Ahrens return (ZIO_PIPELINE_CONTINUE); 1546b24ab676SJeff Bonwick } else { 15475d7b4d43SMatthew Ahrens /* 154881cd5c55SMatthew Ahrens * Round up compressed size up to the ashift 154981cd5c55SMatthew Ahrens * of the smallest-ashift device, and zero the tail. 155081cd5c55SMatthew Ahrens * This ensures that the compressed size of the BP 155181cd5c55SMatthew Ahrens * (and thus compressratio property) are correct, 155281cd5c55SMatthew Ahrens * in that we charge for the padding used to fill out 155381cd5c55SMatthew Ahrens * the last sector. 15545d7b4d43SMatthew Ahrens */ 155581cd5c55SMatthew Ahrens ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 155681cd5c55SMatthew Ahrens size_t rounded = (size_t)P2ROUNDUP(psize, 155781cd5c55SMatthew Ahrens 1ULL << spa->spa_min_ashift); 155881cd5c55SMatthew Ahrens if (rounded >= lsize) { 15595d7b4d43SMatthew Ahrens compress = ZIO_COMPRESS_OFF; 15605d7b4d43SMatthew Ahrens zio_buf_free(cbuf, lsize); 156181cd5c55SMatthew Ahrens psize = lsize; 15625d7b4d43SMatthew Ahrens } else { 1563770499e1SDan Kimmel abd_t *cdata = abd_get_from_buf(cbuf, lsize); 1564770499e1SDan Kimmel abd_take_ownership_of_buf(cdata, B_TRUE); 1565770499e1SDan Kimmel abd_zero_off(cdata, psize, rounded - psize); 156681cd5c55SMatthew Ahrens psize = rounded; 1567770499e1SDan Kimmel zio_push_transform(zio, cdata, 15685d7b4d43SMatthew Ahrens psize, lsize, NULL); 15695d7b4d43SMatthew Ahrens } 1570e14bb325SJeff Bonwick } 15710f7643c7SGeorge Wilson 15720f7643c7SGeorge Wilson /* 15730f7643c7SGeorge Wilson * We were unable to handle this as an override bp, treat 15740f7643c7SGeorge Wilson * it as a regular write I/O. 15750f7643c7SGeorge Wilson */ 15760f7643c7SGeorge Wilson zio->io_bp_override = NULL; 15770f7643c7SGeorge Wilson *bp = zio->io_bp_orig; 15780f7643c7SGeorge Wilson zio->io_pipeline = zio->io_orig_pipeline; 1579eb633035STom Caputi 1580eb633035STom Caputi } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 && 1581eb633035STom Caputi zp->zp_type == DMU_OT_DNODE) { 1582eb633035STom Caputi /* 1583eb633035STom Caputi * The DMU actually relies on the zio layer's compression 1584eb633035STom Caputi * to free metadnode blocks that have had all contained 1585eb633035STom Caputi * dnodes freed. As a result, even when doing a raw 1586eb633035STom Caputi * receive, we must check whether the block can be compressed 1587eb633035STom Caputi * to a hole. 1588eb633035STom Caputi */ 1589eb633035STom Caputi psize = zio_compress_data(ZIO_COMPRESS_EMPTY, 1590eb633035STom Caputi zio->io_abd, NULL, lsize); 1591eb633035STom Caputi if (psize == 0) 1592eb633035STom Caputi compress = ZIO_COMPRESS_OFF; 15935602294fSDan Kimmel } else { 15945602294fSDan Kimmel ASSERT3U(psize, !=, 0); 1595e14bb325SJeff Bonwick } 1596c717a561Smaybee 1597e14bb325SJeff Bonwick /* 1598e14bb325SJeff Bonwick * The final pass of spa_sync() must be all rewrites, but the first 1599e14bb325SJeff Bonwick * few passes offer a trade-off: allocating blocks defers convergence, 1600e14bb325SJeff Bonwick * but newly allocated blocks are sequential, so they can be written 1601e14bb325SJeff Bonwick * to disk faster. Therefore, we allow the first few passes of 1602e14bb325SJeff Bonwick * spa_sync() to allocate new blocks, but force rewrites after that. 1603e14bb325SJeff Bonwick * There should only be a handful of blocks after pass 1 in any case. 1604e14bb325SJeff Bonwick */ 160543466aaeSMax Grossman if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && 160643466aaeSMax Grossman BP_GET_PSIZE(bp) == psize && 160701f55e48SGeorge Wilson pass >= zfs_sync_pass_rewrite) { 1608663207adSDon Brady VERIFY3U(psize, !=, 0); 1609b24ab676SJeff Bonwick enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1610e14bb325SJeff Bonwick zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1611e14bb325SJeff Bonwick zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1612e14bb325SJeff Bonwick } else { 1613e14bb325SJeff Bonwick BP_ZERO(bp); 1614e14bb325SJeff Bonwick zio->io_pipeline = ZIO_WRITE_PIPELINE; 1615e14bb325SJeff Bonwick } 1616fa9e4066Sahrens 1617b24ab676SJeff Bonwick if (psize == 0) { 161843466aaeSMax Grossman if (zio->io_bp_orig.blk_birth != 0 && 161943466aaeSMax Grossman spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 162043466aaeSMax Grossman BP_SET_LSIZE(bp, lsize); 162143466aaeSMax Grossman BP_SET_TYPE(bp, zp->zp_type); 162243466aaeSMax Grossman BP_SET_LEVEL(bp, zp->zp_level); 162343466aaeSMax Grossman BP_SET_BIRTH(bp, zio->io_txg, 0); 162443466aaeSMax Grossman } 1625e14bb325SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1626e14bb325SJeff Bonwick } else { 1627e14bb325SJeff Bonwick ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1628e14bb325SJeff Bonwick BP_SET_LSIZE(bp, lsize); 162943466aaeSMax Grossman BP_SET_TYPE(bp, zp->zp_type); 163043466aaeSMax Grossman BP_SET_LEVEL(bp, zp->zp_level); 1631b24ab676SJeff Bonwick BP_SET_PSIZE(bp, psize); 1632e14bb325SJeff Bonwick BP_SET_COMPRESS(bp, compress); 1633e14bb325SJeff Bonwick BP_SET_CHECKSUM(bp, zp->zp_checksum); 1634b24ab676SJeff Bonwick BP_SET_DEDUP(bp, zp->zp_dedup); 1635e14bb325SJeff Bonwick BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1636b24ab676SJeff Bonwick if (zp->zp_dedup) { 1637b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1638b24ab676SJeff Bonwick ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1639eb633035STom Caputi ASSERT(!zp->zp_encrypt || 1640eb633035STom Caputi DMU_OT_IS_ENCRYPTED(zp->zp_type)); 1641b24ab676SJeff Bonwick zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1642b24ab676SJeff Bonwick } 164380901aeaSGeorge Wilson if (zp->zp_nopwrite) { 164480901aeaSGeorge Wilson ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 164580901aeaSGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 164680901aeaSGeorge Wilson zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 164780901aeaSGeorge Wilson } 1648b24ab676SJeff Bonwick } 1649b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 1650b24ab676SJeff Bonwick } 1651b24ab676SJeff Bonwick 1652b24ab676SJeff Bonwick static int 1653b24ab676SJeff Bonwick zio_free_bp_init(zio_t *zio) 1654b24ab676SJeff Bonwick { 1655b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 1656b24ab676SJeff Bonwick 1657b24ab676SJeff Bonwick if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1658b24ab676SJeff Bonwick if (BP_GET_DEDUP(bp)) 1659b24ab676SJeff Bonwick zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1660e14bb325SJeff Bonwick } 1661fa9e4066Sahrens 16625cabbc6bSPrashanth Sreenivasa ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 16635cabbc6bSPrashanth Sreenivasa 1664e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 1665fa9e4066Sahrens } 1666fa9e4066Sahrens 1667e14bb325SJeff Bonwick /* 1668e14bb325SJeff Bonwick * ========================================================================== 1669e14bb325SJeff Bonwick * Execute the I/O pipeline 1670e14bb325SJeff Bonwick * ========================================================================== 1671e14bb325SJeff Bonwick */ 1672e14bb325SJeff Bonwick 1673e14bb325SJeff Bonwick static void 1674ec94d322SAdam Leventhal zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 1675fa9e4066Sahrens { 167680eb36f2SGeorge Wilson spa_t *spa = zio->io_spa; 1677e14bb325SJeff Bonwick zio_type_t t = zio->io_type; 16785aeb9474SGarrett D'Amore int flags = (cutinline ? TQ_FRONT : 0); 16790a4e9518Sgw 16800a4e9518Sgw /* 1681bbe36defSGeorge Wilson * If we're a config writer or a probe, the normal issue and 1682bbe36defSGeorge Wilson * interrupt threads may all be blocked waiting for the config lock. 1683bbe36defSGeorge Wilson * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 16840a4e9518Sgw */ 1685bbe36defSGeorge Wilson if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1686e14bb325SJeff Bonwick t = ZIO_TYPE_NULL; 16870a4e9518Sgw 16880a4e9518Sgw /* 1689e14bb325SJeff Bonwick * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 16900a4e9518Sgw */ 1691e14bb325SJeff Bonwick if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1692e14bb325SJeff Bonwick t = ZIO_TYPE_NULL; 16930a4e9518Sgw 169480eb36f2SGeorge Wilson /* 1695ec94d322SAdam Leventhal * If this is a high priority I/O, then use the high priority taskq if 1696ec94d322SAdam Leventhal * available. 169780eb36f2SGeorge Wilson */ 16982258ad0bSGeorge Wilson if ((zio->io_priority == ZIO_PRIORITY_NOW || 16992258ad0bSGeorge Wilson zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) && 1700ec94d322SAdam Leventhal spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 170180eb36f2SGeorge Wilson q++; 170280eb36f2SGeorge Wilson 170380eb36f2SGeorge Wilson ASSERT3U(q, <, ZIO_TASKQ_TYPES); 17045aeb9474SGarrett D'Amore 17055aeb9474SGarrett D'Amore /* 17065aeb9474SGarrett D'Amore * NB: We are assuming that the zio can only be dispatched 17075aeb9474SGarrett D'Amore * to a single taskq at a time. It would be a grievous error 17085aeb9474SGarrett D'Amore * to dispatch the zio to another taskq at the same time. 17095aeb9474SGarrett D'Amore */ 17105aeb9474SGarrett D'Amore ASSERT(zio->io_tqent.tqent_next == NULL); 1711ec94d322SAdam Leventhal spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, 1712ec94d322SAdam Leventhal flags, &zio->io_tqent); 1713e14bb325SJeff Bonwick } 17140a4e9518Sgw 1715e14bb325SJeff Bonwick static boolean_t 1716ec94d322SAdam Leventhal zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 1717e14bb325SJeff Bonwick { 1718e14bb325SJeff Bonwick kthread_t *executor = zio->io_executor; 1719e14bb325SJeff Bonwick spa_t *spa = zio->io_spa; 17200a4e9518Sgw 1721ec94d322SAdam Leventhal for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 1722ec94d322SAdam Leventhal spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1723ec94d322SAdam Leventhal uint_t i; 1724ec94d322SAdam Leventhal for (i = 0; i < tqs->stqs_count; i++) { 1725ec94d322SAdam Leventhal if (taskq_member(tqs->stqs_taskq[i], executor)) 1726ec94d322SAdam Leventhal return (B_TRUE); 1727ec94d322SAdam Leventhal } 1728ec94d322SAdam Leventhal } 17290a4e9518Sgw 1730e14bb325SJeff Bonwick return (B_FALSE); 1731e14bb325SJeff Bonwick } 1732e05725b1Sbonwick 1733e14bb325SJeff Bonwick static int 1734e14bb325SJeff Bonwick zio_issue_async(zio_t *zio) 1735e14bb325SJeff Bonwick { 173635a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1737e14bb325SJeff Bonwick 1738e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 17390a4e9518Sgw } 17400a4e9518Sgw 1741e14bb325SJeff Bonwick void 1742e14bb325SJeff Bonwick zio_interrupt(zio_t *zio) 17430a4e9518Sgw { 174435a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1745e14bb325SJeff Bonwick } 17460a4e9518Sgw 174797e81309SPrakash Surya void 174897e81309SPrakash Surya zio_delay_interrupt(zio_t *zio) 174997e81309SPrakash Surya { 175097e81309SPrakash Surya /* 175197e81309SPrakash Surya * The timeout_generic() function isn't defined in userspace, so 175297e81309SPrakash Surya * rather than trying to implement the function, the zio delay 175397e81309SPrakash Surya * functionality has been disabled for userspace builds. 175497e81309SPrakash Surya */ 175597e81309SPrakash Surya 175697e81309SPrakash Surya #ifdef _KERNEL 175797e81309SPrakash Surya /* 175897e81309SPrakash Surya * If io_target_timestamp is zero, then no delay has been registered 175997e81309SPrakash Surya * for this IO, thus jump to the end of this function and "skip" the 176097e81309SPrakash Surya * delay; issuing it directly to the zio layer. 176197e81309SPrakash Surya */ 176297e81309SPrakash Surya if (zio->io_target_timestamp != 0) { 176397e81309SPrakash Surya hrtime_t now = gethrtime(); 176497e81309SPrakash Surya 176597e81309SPrakash Surya if (now >= zio->io_target_timestamp) { 176697e81309SPrakash Surya /* 176797e81309SPrakash Surya * This IO has already taken longer than the target 176897e81309SPrakash Surya * delay to complete, so we don't want to delay it 176997e81309SPrakash Surya * any longer; we "miss" the delay and issue it 177097e81309SPrakash Surya * directly to the zio layer. This is likely due to 177197e81309SPrakash Surya * the target latency being set to a value less than 177297e81309SPrakash Surya * the underlying hardware can satisfy (e.g. delay 177397e81309SPrakash Surya * set to 1ms, but the disks take 10ms to complete an 177497e81309SPrakash Surya * IO request). 177597e81309SPrakash Surya */ 177697e81309SPrakash Surya 177797e81309SPrakash Surya DTRACE_PROBE2(zio__delay__miss, zio_t *, zio, 177897e81309SPrakash Surya hrtime_t, now); 177997e81309SPrakash Surya 178097e81309SPrakash Surya zio_interrupt(zio); 178197e81309SPrakash Surya } else { 178297e81309SPrakash Surya hrtime_t diff = zio->io_target_timestamp - now; 178397e81309SPrakash Surya 178497e81309SPrakash Surya DTRACE_PROBE3(zio__delay__hit, zio_t *, zio, 178597e81309SPrakash Surya hrtime_t, now, hrtime_t, diff); 178697e81309SPrakash Surya 178797e81309SPrakash Surya (void) timeout_generic(CALLOUT_NORMAL, 178897e81309SPrakash Surya (void (*)(void *))zio_interrupt, zio, diff, 1, 0); 178997e81309SPrakash Surya } 179097e81309SPrakash Surya 179197e81309SPrakash Surya return; 179297e81309SPrakash Surya } 179397e81309SPrakash Surya #endif 179497e81309SPrakash Surya 179597e81309SPrakash Surya DTRACE_PROBE1(zio__delay__skip, zio_t *, zio); 179697e81309SPrakash Surya zio_interrupt(zio); 179797e81309SPrakash Surya } 179897e81309SPrakash Surya 1799e14bb325SJeff Bonwick /* 1800e14bb325SJeff Bonwick * Execute the I/O pipeline until one of the following occurs: 1801f7170741SWill Andrews * 1802f7170741SWill Andrews * (1) the I/O completes 1803f7170741SWill Andrews * (2) the pipeline stalls waiting for dependent child I/Os 1804f7170741SWill Andrews * (3) the I/O issues, so we're waiting for an I/O completion interrupt 1805f7170741SWill Andrews * (4) the I/O is delegated by vdev-level caching or aggregation 1806f7170741SWill Andrews * (5) the I/O is deferred due to vdev-level queueing 1807f7170741SWill Andrews * (6) the I/O is handed off to another thread. 1808f7170741SWill Andrews * 1809f7170741SWill Andrews * In all cases, the pipeline stops whenever there's no CPU work; it never 1810f7170741SWill Andrews * burns a thread in cv_wait(). 1811e14bb325SJeff Bonwick * 1812e14bb325SJeff Bonwick * There's no locking on io_stage because there's no legitimate way 1813e14bb325SJeff Bonwick * for multiple threads to be attempting to process the same I/O. 1814e14bb325SJeff Bonwick */ 1815b24ab676SJeff Bonwick static zio_pipe_stage_t *zio_pipeline[]; 18160a4e9518Sgw 1817e14bb325SJeff Bonwick void 1818e14bb325SJeff Bonwick zio_execute(zio_t *zio) 1819e14bb325SJeff Bonwick { 1820e14bb325SJeff Bonwick zio->io_executor = curthread; 18210a4e9518Sgw 18220f7643c7SGeorge Wilson ASSERT3U(zio->io_queued_timestamp, >, 0); 18230f7643c7SGeorge Wilson 1824e14bb325SJeff Bonwick while (zio->io_stage < ZIO_STAGE_DONE) { 1825b24ab676SJeff Bonwick enum zio_stage pipeline = zio->io_pipeline; 1826b24ab676SJeff Bonwick enum zio_stage stage = zio->io_stage; 1827e14bb325SJeff Bonwick int rv; 18280a4e9518Sgw 1829e14bb325SJeff Bonwick ASSERT(!MUTEX_HELD(&zio->io_lock)); 1830b24ab676SJeff Bonwick ASSERT(ISP2(stage)); 1831b24ab676SJeff Bonwick ASSERT(zio->io_stall == NULL); 18320a4e9518Sgw 1833b24ab676SJeff Bonwick do { 1834b24ab676SJeff Bonwick stage <<= 1; 1835b24ab676SJeff Bonwick } while ((stage & pipeline) == 0); 1836e14bb325SJeff Bonwick 1837e14bb325SJeff Bonwick ASSERT(stage <= ZIO_STAGE_DONE); 18380a4e9518Sgw 18390a4e9518Sgw /* 1840e14bb325SJeff Bonwick * If we are in interrupt context and this pipeline stage 1841e14bb325SJeff Bonwick * will grab a config lock that is held across I/O, 1842b24ab676SJeff Bonwick * or may wait for an I/O that needs an interrupt thread 1843b24ab676SJeff Bonwick * to complete, issue async to avoid deadlock. 184435a5a358SJonathan Adams * 184535a5a358SJonathan Adams * For VDEV_IO_START, we cut in line so that the io will 184635a5a358SJonathan Adams * be sent to disk promptly. 18470a4e9518Sgw */ 1848b24ab676SJeff Bonwick if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1849e14bb325SJeff Bonwick zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 185035a5a358SJonathan Adams boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 185135a5a358SJonathan Adams zio_requeue_io_start_cut_in_line : B_FALSE; 185235a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1853e14bb325SJeff Bonwick return; 18540a4e9518Sgw } 18550a4e9518Sgw 1856e14bb325SJeff Bonwick zio->io_stage = stage; 18570f7643c7SGeorge Wilson zio->io_pipeline_trace |= zio->io_stage; 1858bf16b11eSMatthew Ahrens rv = zio_pipeline[highbit64(stage) - 1](zio); 18590a4e9518Sgw 1860e14bb325SJeff Bonwick if (rv == ZIO_PIPELINE_STOP) 1861e14bb325SJeff Bonwick return; 18620a4e9518Sgw 1863e14bb325SJeff Bonwick ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1864e14bb325SJeff Bonwick } 18650a4e9518Sgw } 18660a4e9518Sgw 1867e14bb325SJeff Bonwick /* 1868e14bb325SJeff Bonwick * ========================================================================== 1869e14bb325SJeff Bonwick * Initiate I/O, either sync or async 1870e14bb325SJeff Bonwick * ========================================================================== 1871e14bb325SJeff Bonwick */ 1872e14bb325SJeff Bonwick int 1873e14bb325SJeff Bonwick zio_wait(zio_t *zio) 18740a4e9518Sgw { 1875e14bb325SJeff Bonwick int error; 18760a4e9518Sgw 18771271e4b1SPrakash Surya ASSERT3P(zio->io_stage, ==, ZIO_STAGE_OPEN); 18781271e4b1SPrakash Surya ASSERT3P(zio->io_executor, ==, NULL); 18790a4e9518Sgw 1880e14bb325SJeff Bonwick zio->io_waiter = curthread; 18810f7643c7SGeorge Wilson ASSERT0(zio->io_queued_timestamp); 18820f7643c7SGeorge Wilson zio->io_queued_timestamp = gethrtime(); 1883e05725b1Sbonwick 1884e14bb325SJeff Bonwick zio_execute(zio); 18850a4e9518Sgw 1886e14bb325SJeff Bonwick mutex_enter(&zio->io_lock); 1887e14bb325SJeff Bonwick while (zio->io_executor != NULL) 1888e14bb325SJeff Bonwick cv_wait(&zio->io_cv, &zio->io_lock); 1889e14bb325SJeff Bonwick mutex_exit(&zio->io_lock); 189032b87932Sek 1891e14bb325SJeff Bonwick error = zio->io_error; 1892e14bb325SJeff Bonwick zio_destroy(zio); 189332b87932Sek 1894e14bb325SJeff Bonwick return (error); 189532b87932Sek } 189632b87932Sek 1897e14bb325SJeff Bonwick void 1898e14bb325SJeff Bonwick zio_nowait(zio_t *zio) 18990a4e9518Sgw { 19001271e4b1SPrakash Surya ASSERT3P(zio->io_executor, ==, NULL); 1901fa9e4066Sahrens 1902a3f829aeSBill Moore if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1903a3f829aeSBill Moore zio_unique_parent(zio) == NULL) { 1904ea8dc4b6Seschrock /* 1905e14bb325SJeff Bonwick * This is a logical async I/O with no parent to wait for it. 190654d692b7SGeorge Wilson * We add it to the spa_async_root_zio "Godfather" I/O which 190754d692b7SGeorge Wilson * will ensure they complete prior to unloading the pool. 1908ea8dc4b6Seschrock */ 1909e14bb325SJeff Bonwick spa_t *spa = zio->io_spa; 191054d692b7SGeorge Wilson 19116f834bc1SMatthew Ahrens zio_add_child(spa->spa_async_zio_root[CPU_SEQID], zio); 1912e14bb325SJeff Bonwick } 1913ea8dc4b6Seschrock 19140f7643c7SGeorge Wilson ASSERT0(zio->io_queued_timestamp); 19150f7643c7SGeorge Wilson zio->io_queued_timestamp = gethrtime(); 1916e14bb325SJeff Bonwick zio_execute(zio); 1917e14bb325SJeff Bonwick } 1918ea8dc4b6Seschrock 1919e14bb325SJeff Bonwick /* 1920e14bb325SJeff Bonwick * ========================================================================== 19211271e4b1SPrakash Surya * Reexecute, cancel, or suspend/resume failed I/O 1922e14bb325SJeff Bonwick * ========================================================================== 1923e14bb325SJeff Bonwick */ 1924fa9e4066Sahrens 1925e14bb325SJeff Bonwick static void 1926e14bb325SJeff Bonwick zio_reexecute(zio_t *pio) 1927e14bb325SJeff Bonwick { 1928a3f829aeSBill Moore zio_t *cio, *cio_next; 1929a3f829aeSBill Moore 1930a3f829aeSBill Moore ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1931a3f829aeSBill Moore ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1932f5383399SBill Moore ASSERT(pio->io_gang_leader == NULL); 1933f5383399SBill Moore ASSERT(pio->io_gang_tree == NULL); 1934e05725b1Sbonwick 1935e14bb325SJeff Bonwick pio->io_flags = pio->io_orig_flags; 1936e14bb325SJeff Bonwick pio->io_stage = pio->io_orig_stage; 1937e14bb325SJeff Bonwick pio->io_pipeline = pio->io_orig_pipeline; 1938e14bb325SJeff Bonwick pio->io_reexecute = 0; 193980901aeaSGeorge Wilson pio->io_flags |= ZIO_FLAG_REEXECUTED; 19400f7643c7SGeorge Wilson pio->io_pipeline_trace = 0; 1941e14bb325SJeff Bonwick pio->io_error = 0; 1942a3f829aeSBill Moore for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1943a3f829aeSBill Moore pio->io_state[w] = 0; 1944e14bb325SJeff Bonwick for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1945e14bb325SJeff Bonwick pio->io_child_error[c] = 0; 19460a4e9518Sgw 1947b24ab676SJeff Bonwick if (IO_IS_ALLOCATING(pio)) 1948b24ab676SJeff Bonwick BP_ZERO(pio->io_bp); 1949d58459f4Sek 1950e14bb325SJeff Bonwick /* 1951e14bb325SJeff Bonwick * As we reexecute pio's children, new children could be created. 1952a3f829aeSBill Moore * New children go to the head of pio's io_child_list, however, 1953e14bb325SJeff Bonwick * so we will (correctly) not reexecute them. The key is that 1954a3f829aeSBill Moore * the remainder of pio's io_child_list, from 'cio_next' onward, 1955a3f829aeSBill Moore * cannot be affected by any side effects of reexecuting 'cio'. 1956e14bb325SJeff Bonwick */ 19570f7643c7SGeorge Wilson zio_link_t *zl = NULL; 1958a3874b8bSToomas Soome mutex_enter(&pio->io_lock); 19590f7643c7SGeorge Wilson for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 19600f7643c7SGeorge Wilson cio_next = zio_walk_children(pio, &zl); 1961a3f829aeSBill Moore for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1962a3f829aeSBill Moore pio->io_children[cio->io_child_type][w]++; 1963e14bb325SJeff Bonwick mutex_exit(&pio->io_lock); 1964a3f829aeSBill Moore zio_reexecute(cio); 1965a3874b8bSToomas Soome mutex_enter(&pio->io_lock); 1966fa9e4066Sahrens } 1967a3874b8bSToomas Soome mutex_exit(&pio->io_lock); 1968e05725b1Sbonwick 1969e14bb325SJeff Bonwick /* 1970e14bb325SJeff Bonwick * Now that all children have been reexecuted, execute the parent. 197154d692b7SGeorge Wilson * We don't reexecute "The Godfather" I/O here as it's the 197248bbca81SDaniel Hoffman * responsibility of the caller to wait on it. 1973e14bb325SJeff Bonwick */ 19740f7643c7SGeorge Wilson if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) { 19750f7643c7SGeorge Wilson pio->io_queued_timestamp = gethrtime(); 197654d692b7SGeorge Wilson zio_execute(pio); 19770f7643c7SGeorge Wilson } 19780a4e9518Sgw } 19790a4e9518Sgw 1980e14bb325SJeff Bonwick void 1981e0f1c0afSOlaf Faaland zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason) 19820a4e9518Sgw { 1983e14bb325SJeff Bonwick if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1984e14bb325SJeff Bonwick fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1985e14bb325SJeff Bonwick "failure and the failure mode property for this pool " 1986e14bb325SJeff Bonwick "is set to panic.", spa_name(spa)); 19870a4e9518Sgw 1988*8548ec78SJohn Levon cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O " 1989*8548ec78SJohn Levon "failure and has been suspended; `zpool clear` will be required " 1990*8548ec78SJohn Levon "before the pool can be written to.", spa_name(spa)); 1991*8548ec78SJohn Levon 1992eb633035STom Caputi zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, 1993eb633035STom Caputi NULL, NULL, 0, 0); 19940a4e9518Sgw 1995e14bb325SJeff Bonwick mutex_enter(&spa->spa_suspend_lock); 1996fa9e4066Sahrens 1997e14bb325SJeff Bonwick if (spa->spa_suspend_zio_root == NULL) 199854d692b7SGeorge Wilson spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 199954d692b7SGeorge Wilson ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 200054d692b7SGeorge Wilson ZIO_FLAG_GODFATHER); 2001fa9e4066Sahrens 2002e0f1c0afSOlaf Faaland spa->spa_suspended = reason; 2003fa9e4066Sahrens 2004e14bb325SJeff Bonwick if (zio != NULL) { 200554d692b7SGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 2006e14bb325SJeff Bonwick ASSERT(zio != spa->spa_suspend_zio_root); 2007e14bb325SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2008a3f829aeSBill Moore ASSERT(zio_unique_parent(zio) == NULL); 2009e14bb325SJeff Bonwick ASSERT(zio->io_stage == ZIO_STAGE_DONE); 2010e14bb325SJeff Bonwick zio_add_child(spa->spa_suspend_zio_root, zio); 2011e14bb325SJeff Bonwick } 2012fa9e4066Sahrens 2013e14bb325SJeff Bonwick mutex_exit(&spa->spa_suspend_lock); 2014e14bb325SJeff Bonwick } 2015fa9e4066Sahrens 201654d692b7SGeorge Wilson int 2017e14bb325SJeff Bonwick zio_resume(spa_t *spa) 2018e14bb325SJeff Bonwick { 201954d692b7SGeorge Wilson zio_t *pio; 2020fa9e4066Sahrens 2021b3995adbSahrens /* 2022e14bb325SJeff Bonwick * Reexecute all previously suspended i/o. 2023b3995adbSahrens */ 2024e14bb325SJeff Bonwick mutex_enter(&spa->spa_suspend_lock); 2025e0f1c0afSOlaf Faaland spa->spa_suspended = ZIO_SUSPEND_NONE; 2026e14bb325SJeff Bonwick cv_broadcast(&spa->spa_suspend_cv); 2027e14bb325SJeff Bonwick pio = spa->spa_suspend_zio_root; 2028e14bb325SJeff Bonwick spa->spa_suspend_zio_root = NULL; 2029e14bb325SJeff Bonwick mutex_exit(&spa->spa_suspend_lock); 2030e14bb325SJeff Bonwick 2031e14bb325SJeff Bonwick if (pio == NULL) 203254d692b7SGeorge Wilson return (0); 2033e14bb325SJeff Bonwick 203454d692b7SGeorge Wilson zio_reexecute(pio); 203554d692b7SGeorge Wilson return (zio_wait(pio)); 2036e14bb325SJeff Bonwick } 2037e14bb325SJeff Bonwick 2038e14bb325SJeff Bonwick void 2039e14bb325SJeff Bonwick zio_resume_wait(spa_t *spa) 2040e14bb325SJeff Bonwick { 2041e14bb325SJeff Bonwick mutex_enter(&spa->spa_suspend_lock); 2042e14bb325SJeff Bonwick while (spa_suspended(spa)) 2043e14bb325SJeff Bonwick cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 2044e14bb325SJeff Bonwick mutex_exit(&spa->spa_suspend_lock); 2045fa9e4066Sahrens } 2046fa9e4066Sahrens 2047fa9e4066Sahrens /* 2048fa9e4066Sahrens * ========================================================================== 2049e14bb325SJeff Bonwick * Gang blocks. 2050e14bb325SJeff Bonwick * 2051e14bb325SJeff Bonwick * A gang block is a collection of small blocks that looks to the DMU 2052e14bb325SJeff Bonwick * like one large block. When zio_dva_allocate() cannot find a block 2053e14bb325SJeff Bonwick * of the requested size, due to either severe fragmentation or the pool 2054e14bb325SJeff Bonwick * being nearly full, it calls zio_write_gang_block() to construct the 2055e14bb325SJeff Bonwick * block from smaller fragments. 2056e14bb325SJeff Bonwick * 2057e14bb325SJeff Bonwick * A gang block consists of a gang header (zio_gbh_phys_t) and up to 2058e14bb325SJeff Bonwick * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 2059e14bb325SJeff Bonwick * an indirect block: it's an array of block pointers. It consumes 2060e14bb325SJeff Bonwick * only one sector and hence is allocatable regardless of fragmentation. 2061e14bb325SJeff Bonwick * The gang header's bps point to its gang members, which hold the data. 2062e14bb325SJeff Bonwick * 2063e14bb325SJeff Bonwick * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 2064e14bb325SJeff Bonwick * as the verifier to ensure uniqueness of the SHA256 checksum. 2065e14bb325SJeff Bonwick * Critically, the gang block bp's blk_cksum is the checksum of the data, 2066e14bb325SJeff Bonwick * not the gang header. This ensures that data block signatures (needed for 2067e14bb325SJeff Bonwick * deduplication) are independent of how the block is physically stored. 2068e14bb325SJeff Bonwick * 2069e14bb325SJeff Bonwick * Gang blocks can be nested: a gang member may itself be a gang block. 2070e14bb325SJeff Bonwick * Thus every gang block is a tree in which root and all interior nodes are 2071e14bb325SJeff Bonwick * gang headers, and the leaves are normal blocks that contain user data. 2072e14bb325SJeff Bonwick * The root of the gang tree is called the gang leader. 2073e14bb325SJeff Bonwick * 2074e14bb325SJeff Bonwick * To perform any operation (read, rewrite, free, claim) on a gang block, 2075e14bb325SJeff Bonwick * zio_gang_assemble() first assembles the gang tree (minus data leaves) 2076e14bb325SJeff Bonwick * in the io_gang_tree field of the original logical i/o by recursively 2077e14bb325SJeff Bonwick * reading the gang leader and all gang headers below it. This yields 2078e14bb325SJeff Bonwick * an in-core tree containing the contents of every gang header and the 2079e14bb325SJeff Bonwick * bps for every constituent of the gang block. 2080e14bb325SJeff Bonwick * 2081e14bb325SJeff Bonwick * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 2082e14bb325SJeff Bonwick * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 2083e14bb325SJeff Bonwick * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 2084e14bb325SJeff Bonwick * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 2085e14bb325SJeff Bonwick * zio_read_gang() is a wrapper around zio_read() that omits reading gang 2086e14bb325SJeff Bonwick * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 2087e14bb325SJeff Bonwick * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 2088e14bb325SJeff Bonwick * of the gang header plus zio_checksum_compute() of the data to update the 2089e14bb325SJeff Bonwick * gang header's blk_cksum as described above. 2090e14bb325SJeff Bonwick * 2091e14bb325SJeff Bonwick * The two-phase assemble/issue model solves the problem of partial failure -- 2092e14bb325SJeff Bonwick * what if you'd freed part of a gang block but then couldn't read the 2093e14bb325SJeff Bonwick * gang header for another part? Assembling the entire gang tree first 2094e14bb325SJeff Bonwick * ensures that all the necessary gang header I/O has succeeded before 2095e14bb325SJeff Bonwick * starting the actual work of free, claim, or write. Once the gang tree 2096e14bb325SJeff Bonwick * is assembled, free and claim are in-memory operations that cannot fail. 2097e14bb325SJeff Bonwick * 2098e14bb325SJeff Bonwick * In the event that a gang write fails, zio_dva_unallocate() walks the 2099e14bb325SJeff Bonwick * gang tree to immediately free (i.e. insert back into the space map) 2100e14bb325SJeff Bonwick * everything we've allocated. This ensures that we don't get ENOSPC 2101e14bb325SJeff Bonwick * errors during repeated suspend/resume cycles due to a flaky device. 2102e14bb325SJeff Bonwick * 2103e14bb325SJeff Bonwick * Gang rewrites only happen during sync-to-convergence. If we can't assemble 2104e14bb325SJeff Bonwick * the gang tree, we won't modify the block, so we can safely defer the free 2105e14bb325SJeff Bonwick * (knowing that the block is still intact). If we *can* assemble the gang 2106e14bb325SJeff Bonwick * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 2107e14bb325SJeff Bonwick * each constituent bp and we can allocate a new block on the next sync pass. 2108e14bb325SJeff Bonwick * 2109e14bb325SJeff Bonwick * In all cases, the gang tree allows complete recovery from partial failure. 2110fa9e4066Sahrens * ========================================================================== 2111fa9e4066Sahrens */ 2112e14bb325SJeff Bonwick 2113770499e1SDan Kimmel static void 2114770499e1SDan Kimmel zio_gang_issue_func_done(zio_t *zio) 2115770499e1SDan Kimmel { 2116770499e1SDan Kimmel abd_put(zio->io_abd); 2117770499e1SDan Kimmel } 2118770499e1SDan Kimmel 2119e14bb325SJeff Bonwick static zio_t * 2120770499e1SDan Kimmel zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2121770499e1SDan Kimmel uint64_t offset) 2122fa9e4066Sahrens { 2123e14bb325SJeff Bonwick if (gn != NULL) 2124e14bb325SJeff Bonwick return (pio); 2125fa9e4066Sahrens 2126770499e1SDan Kimmel return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset), 2127770499e1SDan Kimmel BP_GET_PSIZE(bp), zio_gang_issue_func_done, 2128770499e1SDan Kimmel NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2129e14bb325SJeff Bonwick &pio->io_bookmark)); 2130e14bb325SJeff Bonwick } 2131e14bb325SJeff Bonwick 2132770499e1SDan Kimmel static zio_t * 2133770499e1SDan Kimmel zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2134770499e1SDan Kimmel uint64_t offset) 2135e14bb325SJeff Bonwick { 2136e14bb325SJeff Bonwick zio_t *zio; 2137e14bb325SJeff Bonwick 2138e14bb325SJeff Bonwick if (gn != NULL) { 2139770499e1SDan Kimmel abd_t *gbh_abd = 2140770499e1SDan Kimmel abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2141e14bb325SJeff Bonwick zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2142770499e1SDan Kimmel gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL, 2143770499e1SDan Kimmel pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2144770499e1SDan Kimmel &pio->io_bookmark); 2145fa9e4066Sahrens /* 2146e14bb325SJeff Bonwick * As we rewrite each gang header, the pipeline will compute 2147e14bb325SJeff Bonwick * a new gang block header checksum for it; but no one will 2148e14bb325SJeff Bonwick * compute a new data checksum, so we do that here. The one 2149e14bb325SJeff Bonwick * exception is the gang leader: the pipeline already computed 2150e14bb325SJeff Bonwick * its data checksum because that stage precedes gang assembly. 2151e14bb325SJeff Bonwick * (Presently, nothing actually uses interior data checksums; 2152e14bb325SJeff Bonwick * this is just good hygiene.) 2153fa9e4066Sahrens */ 2154f5383399SBill Moore if (gn != pio->io_gang_leader->io_gang_tree) { 2155770499e1SDan Kimmel abd_t *buf = abd_get_offset(data, offset); 2156770499e1SDan Kimmel 2157e14bb325SJeff Bonwick zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 2158770499e1SDan Kimmel buf, BP_GET_PSIZE(bp)); 2159770499e1SDan Kimmel 2160770499e1SDan Kimmel abd_put(buf); 2161e14bb325SJeff Bonwick } 2162b24ab676SJeff Bonwick /* 2163b24ab676SJeff Bonwick * If we are here to damage data for testing purposes, 2164b24ab676SJeff Bonwick * leave the GBH alone so that we can detect the damage. 2165b24ab676SJeff Bonwick */ 2166b24ab676SJeff Bonwick if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 2167b24ab676SJeff Bonwick zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 2168fa9e4066Sahrens } else { 2169e14bb325SJeff Bonwick zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2170770499e1SDan Kimmel abd_get_offset(data, offset), BP_GET_PSIZE(bp), 2171770499e1SDan Kimmel zio_gang_issue_func_done, NULL, pio->io_priority, 2172e14bb325SJeff Bonwick ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2173fa9e4066Sahrens } 2174fa9e4066Sahrens 2175e14bb325SJeff Bonwick return (zio); 2176e14bb325SJeff Bonwick } 2177fa9e4066Sahrens 2178e14bb325SJeff Bonwick /* ARGSUSED */ 2179770499e1SDan Kimmel static zio_t * 2180770499e1SDan Kimmel zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2181770499e1SDan Kimmel uint64_t offset) 2182e14bb325SJeff Bonwick { 2183b24ab676SJeff Bonwick return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 2184b24ab676SJeff Bonwick ZIO_GANG_CHILD_FLAGS(pio))); 2185fa9e4066Sahrens } 2186fa9e4066Sahrens 2187e14bb325SJeff Bonwick /* ARGSUSED */ 2188770499e1SDan Kimmel static zio_t * 2189770499e1SDan Kimmel zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2190770499e1SDan Kimmel uint64_t offset) 2191fa9e4066Sahrens { 2192e14bb325SJeff Bonwick return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 2193e14bb325SJeff Bonwick NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 2194e14bb325SJeff Bonwick } 2195fa9e4066Sahrens 2196e14bb325SJeff Bonwick static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 2197e14bb325SJeff Bonwick NULL, 2198e14bb325SJeff Bonwick zio_read_gang, 2199e14bb325SJeff Bonwick zio_rewrite_gang, 2200e14bb325SJeff Bonwick zio_free_gang, 2201e14bb325SJeff Bonwick zio_claim_gang, 2202e14bb325SJeff Bonwick NULL 2203e14bb325SJeff Bonwick }; 2204fa9e4066Sahrens 2205e14bb325SJeff Bonwick static void zio_gang_tree_assemble_done(zio_t *zio); 2206fa9e4066Sahrens 2207e14bb325SJeff Bonwick static zio_gang_node_t * 2208e14bb325SJeff Bonwick zio_gang_node_alloc(zio_gang_node_t **gnpp) 2209e14bb325SJeff Bonwick { 2210e14bb325SJeff Bonwick zio_gang_node_t *gn; 2211fa9e4066Sahrens 2212e14bb325SJeff Bonwick ASSERT(*gnpp == NULL); 2213fa9e4066Sahrens 2214e14bb325SJeff Bonwick gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 2215e14bb325SJeff Bonwick gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 2216e14bb325SJeff Bonwick *gnpp = gn; 2217e14bb325SJeff Bonwick 2218e14bb325SJeff Bonwick return (gn); 2219fa9e4066Sahrens } 2220fa9e4066Sahrens 2221fa9e4066Sahrens static void 2222e14bb325SJeff Bonwick zio_gang_node_free(zio_gang_node_t **gnpp) 2223fa9e4066Sahrens { 2224e14bb325SJeff Bonwick zio_gang_node_t *gn = *gnpp; 2225fa9e4066Sahrens 2226e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2227e14bb325SJeff Bonwick ASSERT(gn->gn_child[g] == NULL); 2228e14bb325SJeff Bonwick 2229e14bb325SJeff Bonwick zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2230e14bb325SJeff Bonwick kmem_free(gn, sizeof (*gn)); 2231e14bb325SJeff Bonwick *gnpp = NULL; 2232fa9e4066Sahrens } 2233fa9e4066Sahrens 2234e14bb325SJeff Bonwick static void 2235e14bb325SJeff Bonwick zio_gang_tree_free(zio_gang_node_t **gnpp) 2236fa9e4066Sahrens { 2237e14bb325SJeff Bonwick zio_gang_node_t *gn = *gnpp; 2238fa9e4066Sahrens 2239e14bb325SJeff Bonwick if (gn == NULL) 2240e14bb325SJeff Bonwick return; 2241fa9e4066Sahrens 2242e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2243e14bb325SJeff Bonwick zio_gang_tree_free(&gn->gn_child[g]); 2244fa9e4066Sahrens 2245e14bb325SJeff Bonwick zio_gang_node_free(gnpp); 2246fa9e4066Sahrens } 2247fa9e4066Sahrens 2248e14bb325SJeff Bonwick static void 2249f5383399SBill Moore zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 2250fa9e4066Sahrens { 2251e14bb325SJeff Bonwick zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 2252770499e1SDan Kimmel abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2253e14bb325SJeff Bonwick 2254f5383399SBill Moore ASSERT(gio->io_gang_leader == gio); 2255e14bb325SJeff Bonwick ASSERT(BP_IS_GANG(bp)); 2256fa9e4066Sahrens 2257770499e1SDan Kimmel zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2258770499e1SDan Kimmel zio_gang_tree_assemble_done, gn, gio->io_priority, 2259770499e1SDan Kimmel ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 2260e14bb325SJeff Bonwick } 2261fa9e4066Sahrens 2262e14bb325SJeff Bonwick static void 2263e14bb325SJeff Bonwick zio_gang_tree_assemble_done(zio_t *zio) 2264e14bb325SJeff Bonwick { 2265f5383399SBill Moore zio_t *gio = zio->io_gang_leader; 2266e14bb325SJeff Bonwick zio_gang_node_t *gn = zio->io_private; 2267e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 2268fa9e4066Sahrens 2269f5383399SBill Moore ASSERT(gio == zio_unique_parent(zio)); 2270b24ab676SJeff Bonwick ASSERT(zio->io_child_count == 0); 2271fa9e4066Sahrens 2272e14bb325SJeff Bonwick if (zio->io_error) 2273e14bb325SJeff Bonwick return; 2274fa9e4066Sahrens 2275770499e1SDan Kimmel /* this ABD was created from a linear buf in zio_gang_tree_assemble */ 2276e14bb325SJeff Bonwick if (BP_SHOULD_BYTESWAP(bp)) 2277770499e1SDan Kimmel byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size); 2278fa9e4066Sahrens 2279770499e1SDan Kimmel ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh); 2280e14bb325SJeff Bonwick ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 22816e1f5caaSNeil Perrin ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2282e05725b1Sbonwick 2283770499e1SDan Kimmel abd_put(zio->io_abd); 2284770499e1SDan Kimmel 2285e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2286e14bb325SJeff Bonwick blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2287e14bb325SJeff Bonwick if (!BP_IS_GANG(gbp)) 2288e14bb325SJeff Bonwick continue; 2289f5383399SBill Moore zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 2290e14bb325SJeff Bonwick } 2291fa9e4066Sahrens } 2292fa9e4066Sahrens 2293e14bb325SJeff Bonwick static void 2294770499e1SDan Kimmel zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data, 2295770499e1SDan Kimmel uint64_t offset) 2296fa9e4066Sahrens { 2297f5383399SBill Moore zio_t *gio = pio->io_gang_leader; 2298e14bb325SJeff Bonwick zio_t *zio; 2299fa9e4066Sahrens 2300e14bb325SJeff Bonwick ASSERT(BP_IS_GANG(bp) == !!gn); 2301f5383399SBill Moore ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 2302f5383399SBill Moore ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 2303fa9e4066Sahrens 2304e14bb325SJeff Bonwick /* 2305e14bb325SJeff Bonwick * If you're a gang header, your data is in gn->gn_gbh. 2306e14bb325SJeff Bonwick * If you're a gang member, your data is in 'data' and gn == NULL. 2307e14bb325SJeff Bonwick */ 2308770499e1SDan Kimmel zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset); 2309fa9e4066Sahrens 2310e14bb325SJeff Bonwick if (gn != NULL) { 23116e1f5caaSNeil Perrin ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2312fa9e4066Sahrens 2313e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2314e14bb325SJeff Bonwick blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2315e14bb325SJeff Bonwick if (BP_IS_HOLE(gbp)) 2316e14bb325SJeff Bonwick continue; 2317770499e1SDan Kimmel zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data, 2318770499e1SDan Kimmel offset); 2319770499e1SDan Kimmel offset += BP_GET_PSIZE(gbp); 2320e14bb325SJeff Bonwick } 2321fa9e4066Sahrens } 2322fa9e4066Sahrens 2323f5383399SBill Moore if (gn == gio->io_gang_tree) 2324770499e1SDan Kimmel ASSERT3U(gio->io_size, ==, offset); 2325e05725b1Sbonwick 2326e14bb325SJeff Bonwick if (zio != pio) 2327e14bb325SJeff Bonwick zio_nowait(zio); 2328fa9e4066Sahrens } 2329fa9e4066Sahrens 2330e05725b1Sbonwick static int 2331e14bb325SJeff Bonwick zio_gang_assemble(zio_t *zio) 2332fa9e4066Sahrens { 2333e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 2334fa9e4066Sahrens 2335f5383399SBill Moore ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 2336f5383399SBill Moore ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2337f5383399SBill Moore 2338f5383399SBill Moore zio->io_gang_leader = zio; 2339fa9e4066Sahrens 2340e14bb325SJeff Bonwick zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 2341e05725b1Sbonwick 2342e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 2343fa9e4066Sahrens } 2344fa9e4066Sahrens 2345e05725b1Sbonwick static int 2346e14bb325SJeff Bonwick zio_gang_issue(zio_t *zio) 2347fa9e4066Sahrens { 2348e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 2349fa9e4066Sahrens 2350d6e1c446SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) { 2351e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 2352d6e1c446SGeorge Wilson } 2353fa9e4066Sahrens 2354f5383399SBill Moore ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 2355f5383399SBill Moore ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2356fa9e4066Sahrens 2357e14bb325SJeff Bonwick if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 2358770499e1SDan Kimmel zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd, 2359770499e1SDan Kimmel 0); 2360e14bb325SJeff Bonwick else 2361f5383399SBill Moore zio_gang_tree_free(&zio->io_gang_tree); 2362fa9e4066Sahrens 2363e14bb325SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2364e05725b1Sbonwick 2365e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 2366fa9e4066Sahrens } 2367fa9e4066Sahrens 2368fa9e4066Sahrens static void 2369e14bb325SJeff Bonwick zio_write_gang_member_ready(zio_t *zio) 2370fa9e4066Sahrens { 2371a3f829aeSBill Moore zio_t *pio = zio_unique_parent(zio); 2372f5383399SBill Moore zio_t *gio = zio->io_gang_leader; 237344cd46caSbillm dva_t *cdva = zio->io_bp->blk_dva; 237444cd46caSbillm dva_t *pdva = pio->io_bp->blk_dva; 2375fa9e4066Sahrens uint64_t asize; 2376fa9e4066Sahrens 2377e14bb325SJeff Bonwick if (BP_IS_HOLE(zio->io_bp)) 2378e14bb325SJeff Bonwick return; 2379e14bb325SJeff Bonwick 2380e14bb325SJeff Bonwick ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 2381e14bb325SJeff Bonwick 2382e14bb325SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 2383b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 2384b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 2385b24ab676SJeff Bonwick ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 238644cd46caSbillm ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 2387fa9e4066Sahrens 2388fa9e4066Sahrens mutex_enter(&pio->io_lock); 2389e14bb325SJeff Bonwick for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 239044cd46caSbillm ASSERT(DVA_GET_GANG(&pdva[d])); 239144cd46caSbillm asize = DVA_GET_ASIZE(&pdva[d]); 239244cd46caSbillm asize += DVA_GET_ASIZE(&cdva[d]); 239344cd46caSbillm DVA_SET_ASIZE(&pdva[d], asize); 239444cd46caSbillm } 2395fa9e4066Sahrens mutex_exit(&pio->io_lock); 2396fa9e4066Sahrens } 2397fa9e4066Sahrens 2398770499e1SDan Kimmel static void 2399770499e1SDan Kimmel zio_write_gang_done(zio_t *zio) 2400770499e1SDan Kimmel { 24017341a7deSBrad Lewis /* 24027341a7deSBrad Lewis * The io_abd field will be NULL for a zio with no data. The io_flags 24037341a7deSBrad Lewis * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't 24047341a7deSBrad Lewis * check for it here as it is cleared in zio_ready. 24057341a7deSBrad Lewis */ 24067341a7deSBrad Lewis if (zio->io_abd != NULL) 24077341a7deSBrad Lewis abd_put(zio->io_abd); 2408770499e1SDan Kimmel } 2409770499e1SDan Kimmel 24100a4e9518Sgw static int 2411e14bb325SJeff Bonwick zio_write_gang_block(zio_t *pio) 2412fa9e4066Sahrens { 2413e14bb325SJeff Bonwick spa_t *spa = pio->io_spa; 24140f7643c7SGeorge Wilson metaslab_class_t *mc = spa_normal_class(spa); 2415e14bb325SJeff Bonwick blkptr_t *bp = pio->io_bp; 2416f5383399SBill Moore zio_t *gio = pio->io_gang_leader; 2417e14bb325SJeff Bonwick zio_t *zio; 2418e14bb325SJeff Bonwick zio_gang_node_t *gn, **gnpp; 2419fa9e4066Sahrens zio_gbh_phys_t *gbh; 2420770499e1SDan Kimmel abd_t *gbh_abd; 2421e14bb325SJeff Bonwick uint64_t txg = pio->io_txg; 2422e14bb325SJeff Bonwick uint64_t resid = pio->io_size; 2423e14bb325SJeff Bonwick uint64_t lsize; 2424b24ab676SJeff Bonwick int copies = gio->io_prop.zp_copies; 2425b24ab676SJeff Bonwick int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 2426e14bb325SJeff Bonwick zio_prop_t zp; 2427fa9e4066Sahrens int error; 24287341a7deSBrad Lewis boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA); 2429fa9e4066Sahrens 2430eb633035STom Caputi /* 2431eb633035STom Caputi * encrypted blocks need DVA[2] free so encrypted gang headers can't 2432eb633035STom Caputi * have a third copy. 2433eb633035STom Caputi */ 2434eb633035STom Caputi if (gio->io_prop.zp_encrypt && gbh_copies >= SPA_DVAS_PER_BP) 2435eb633035STom Caputi gbh_copies = SPA_DVAS_PER_BP - 1; 2436eb633035STom Caputi 24370f7643c7SGeorge Wilson int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER; 24380f7643c7SGeorge Wilson if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 24390f7643c7SGeorge Wilson ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 24407341a7deSBrad Lewis ASSERT(has_data); 24410f7643c7SGeorge Wilson 24420f7643c7SGeorge Wilson flags |= METASLAB_ASYNC_ALLOC; 2443e914ace2STim Schumacher VERIFY(zfs_refcount_held(&mc->mc_alloc_slots[pio->io_allocator], 2444f78cdc34SPaul Dagnelie pio)); 24450f7643c7SGeorge Wilson 24460f7643c7SGeorge Wilson /* 24470f7643c7SGeorge Wilson * The logical zio has already placed a reservation for 24480f7643c7SGeorge Wilson * 'copies' allocation slots but gang blocks may require 24490f7643c7SGeorge Wilson * additional copies. These additional copies 24500f7643c7SGeorge Wilson * (i.e. gbh_copies - copies) are guaranteed to succeed 24510f7643c7SGeorge Wilson * since metaslab_class_throttle_reserve() always allows 24520f7643c7SGeorge Wilson * additional reservations for gang blocks. 24530f7643c7SGeorge Wilson */ 24540f7643c7SGeorge Wilson VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies, 2455f78cdc34SPaul Dagnelie pio->io_allocator, pio, flags)); 24560f7643c7SGeorge Wilson } 24570f7643c7SGeorge Wilson 24580f7643c7SGeorge Wilson error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, 24598363e80aSGeorge Wilson bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, 2460f78cdc34SPaul Dagnelie &pio->io_alloc_list, pio, pio->io_allocator); 2461e05725b1Sbonwick if (error) { 24620f7643c7SGeorge Wilson if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 24630f7643c7SGeorge Wilson ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 24647341a7deSBrad Lewis ASSERT(has_data); 24650f7643c7SGeorge Wilson 24660f7643c7SGeorge Wilson /* 24670f7643c7SGeorge Wilson * If we failed to allocate the gang block header then 24680f7643c7SGeorge Wilson * we remove any additional allocation reservations that 24690f7643c7SGeorge Wilson * we placed here. The original reservation will 24700f7643c7SGeorge Wilson * be removed when the logical I/O goes to the ready 24710f7643c7SGeorge Wilson * stage. 24720f7643c7SGeorge Wilson */ 24730f7643c7SGeorge Wilson metaslab_class_throttle_unreserve(mc, 2474f78cdc34SPaul Dagnelie gbh_copies - copies, pio->io_allocator, pio); 24750f7643c7SGeorge Wilson } 2476e14bb325SJeff Bonwick pio->io_error = error; 2477e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 2478e05725b1Sbonwick } 2479fa9e4066Sahrens 2480f5383399SBill Moore if (pio == gio) { 2481f5383399SBill Moore gnpp = &gio->io_gang_tree; 2482e14bb325SJeff Bonwick } else { 2483e14bb325SJeff Bonwick gnpp = pio->io_private; 2484e14bb325SJeff Bonwick ASSERT(pio->io_ready == zio_write_gang_member_ready); 2485fa9e4066Sahrens } 2486fa9e4066Sahrens 2487e14bb325SJeff Bonwick gn = zio_gang_node_alloc(gnpp); 2488e14bb325SJeff Bonwick gbh = gn->gn_gbh; 2489e14bb325SJeff Bonwick bzero(gbh, SPA_GANGBLOCKSIZE); 2490770499e1SDan Kimmel gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE); 2491fa9e4066Sahrens 2492e14bb325SJeff Bonwick /* 2493e14bb325SJeff Bonwick * Create the gang header. 2494e14bb325SJeff Bonwick */ 2495770499e1SDan Kimmel zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2496770499e1SDan Kimmel zio_write_gang_done, NULL, pio->io_priority, 2497770499e1SDan Kimmel ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2498fa9e4066Sahrens 2499e14bb325SJeff Bonwick /* 2500e14bb325SJeff Bonwick * Create and nowait the gang children. 2501e14bb325SJeff Bonwick */ 2502e14bb325SJeff Bonwick for (int g = 0; resid != 0; resid -= lsize, g++) { 2503e14bb325SJeff Bonwick lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 2504e14bb325SJeff Bonwick SPA_MINBLOCKSIZE); 2505e14bb325SJeff Bonwick ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 2506e14bb325SJeff Bonwick 2507f5383399SBill Moore zp.zp_checksum = gio->io_prop.zp_checksum; 2508e14bb325SJeff Bonwick zp.zp_compress = ZIO_COMPRESS_OFF; 2509e14bb325SJeff Bonwick zp.zp_type = DMU_OT_NONE; 2510e14bb325SJeff Bonwick zp.zp_level = 0; 2511b24ab676SJeff Bonwick zp.zp_copies = gio->io_prop.zp_copies; 251280901aeaSGeorge Wilson zp.zp_dedup = B_FALSE; 251380901aeaSGeorge Wilson zp.zp_dedup_verify = B_FALSE; 251480901aeaSGeorge Wilson zp.zp_nopwrite = B_FALSE; 2515eb633035STom Caputi zp.zp_encrypt = gio->io_prop.zp_encrypt; 2516eb633035STom Caputi zp.zp_byteorder = gio->io_prop.zp_byteorder; 2517eb633035STom Caputi bzero(zp.zp_salt, ZIO_DATA_SALT_LEN); 2518eb633035STom Caputi bzero(zp.zp_iv, ZIO_DATA_IV_LEN); 2519eb633035STom Caputi bzero(zp.zp_mac, ZIO_DATA_MAC_LEN); 2520e14bb325SJeff Bonwick 25210f7643c7SGeorge Wilson zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 25227341a7deSBrad Lewis has_data ? abd_get_offset(pio->io_abd, pio->io_size - 25237341a7deSBrad Lewis resid) : NULL, lsize, lsize, &zp, 25247341a7deSBrad Lewis zio_write_gang_member_ready, NULL, NULL, 2525770499e1SDan Kimmel zio_write_gang_done, &gn->gn_child[g], pio->io_priority, 25260f7643c7SGeorge Wilson ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 25270f7643c7SGeorge Wilson 25280f7643c7SGeorge Wilson if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 25290f7643c7SGeorge Wilson ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 25307341a7deSBrad Lewis ASSERT(has_data); 25310f7643c7SGeorge Wilson 25320f7643c7SGeorge Wilson /* 25330f7643c7SGeorge Wilson * Gang children won't throttle but we should 25340f7643c7SGeorge Wilson * account for their work, so reserve an allocation 25350f7643c7SGeorge Wilson * slot for them here. 25360f7643c7SGeorge Wilson */ 25370f7643c7SGeorge Wilson VERIFY(metaslab_class_throttle_reserve(mc, 2538f78cdc34SPaul Dagnelie zp.zp_copies, cio->io_allocator, cio, flags)); 25390f7643c7SGeorge Wilson } 25400f7643c7SGeorge Wilson zio_nowait(cio); 2541e14bb325SJeff Bonwick } 2542e05725b1Sbonwick 254344cd46caSbillm /* 2544e14bb325SJeff Bonwick * Set pio's pipeline to just wait for zio to finish. 254544cd46caSbillm */ 2546e14bb325SJeff Bonwick pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2547e14bb325SJeff Bonwick 2548e14bb325SJeff Bonwick zio_nowait(zio); 2549e14bb325SJeff Bonwick 2550e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2551fa9e4066Sahrens } 2552fa9e4066Sahrens 255380901aeaSGeorge Wilson /* 255445818ee1SMatthew Ahrens * The zio_nop_write stage in the pipeline determines if allocating a 255545818ee1SMatthew Ahrens * new bp is necessary. The nopwrite feature can handle writes in 255645818ee1SMatthew Ahrens * either syncing or open context (i.e. zil writes) and as a result is 255745818ee1SMatthew Ahrens * mutually exclusive with dedup. 255845818ee1SMatthew Ahrens * 255945818ee1SMatthew Ahrens * By leveraging a cryptographically secure checksum, such as SHA256, we 256045818ee1SMatthew Ahrens * can compare the checksums of the new data and the old to determine if 256145818ee1SMatthew Ahrens * allocating a new block is required. Note that our requirements for 256245818ee1SMatthew Ahrens * cryptographic strength are fairly weak: there can't be any accidental 256345818ee1SMatthew Ahrens * hash collisions, but we don't need to be secure against intentional 256445818ee1SMatthew Ahrens * (malicious) collisions. To trigger a nopwrite, you have to be able 256545818ee1SMatthew Ahrens * to write the file to begin with, and triggering an incorrect (hash 256645818ee1SMatthew Ahrens * collision) nopwrite is no worse than simply writing to the file. 256745818ee1SMatthew Ahrens * That said, there are no known attacks against the checksum algorithms 256845818ee1SMatthew Ahrens * used for nopwrite, assuming that the salt and the checksums 256945818ee1SMatthew Ahrens * themselves remain secret. 257080901aeaSGeorge Wilson */ 257180901aeaSGeorge Wilson static int 257280901aeaSGeorge Wilson zio_nop_write(zio_t *zio) 257380901aeaSGeorge Wilson { 257480901aeaSGeorge Wilson blkptr_t *bp = zio->io_bp; 257580901aeaSGeorge Wilson blkptr_t *bp_orig = &zio->io_bp_orig; 257680901aeaSGeorge Wilson zio_prop_t *zp = &zio->io_prop; 257780901aeaSGeorge Wilson 257880901aeaSGeorge Wilson ASSERT(BP_GET_LEVEL(bp) == 0); 257980901aeaSGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 258080901aeaSGeorge Wilson ASSERT(zp->zp_nopwrite); 258180901aeaSGeorge Wilson ASSERT(!zp->zp_dedup); 258280901aeaSGeorge Wilson ASSERT(zio->io_bp_override == NULL); 258380901aeaSGeorge Wilson ASSERT(IO_IS_ALLOCATING(zio)); 258480901aeaSGeorge Wilson 258580901aeaSGeorge Wilson /* 258680901aeaSGeorge Wilson * Check to see if the original bp and the new bp have matching 258780901aeaSGeorge Wilson * characteristics (i.e. same checksum, compression algorithms, etc). 258880901aeaSGeorge Wilson * If they don't then just continue with the pipeline which will 258980901aeaSGeorge Wilson * allocate a new bp. 259080901aeaSGeorge Wilson */ 259180901aeaSGeorge Wilson if (BP_IS_HOLE(bp_orig) || 259245818ee1SMatthew Ahrens !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags & 259345818ee1SMatthew Ahrens ZCHECKSUM_FLAG_NOPWRITE) || 2594eb633035STom Caputi BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) || 259580901aeaSGeorge Wilson BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 259680901aeaSGeorge Wilson BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 259780901aeaSGeorge Wilson BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 259880901aeaSGeorge Wilson zp->zp_copies != BP_GET_NDVAS(bp_orig)) 259980901aeaSGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 260080901aeaSGeorge Wilson 260180901aeaSGeorge Wilson /* 260280901aeaSGeorge Wilson * If the checksums match then reset the pipeline so that we 260380901aeaSGeorge Wilson * avoid allocating a new bp and issuing any I/O. 260480901aeaSGeorge Wilson */ 260580901aeaSGeorge Wilson if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 260645818ee1SMatthew Ahrens ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags & 260745818ee1SMatthew Ahrens ZCHECKSUM_FLAG_NOPWRITE); 260880901aeaSGeorge Wilson ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 260980901aeaSGeorge Wilson ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 261080901aeaSGeorge Wilson ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 261180901aeaSGeorge Wilson ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, 261280901aeaSGeorge Wilson sizeof (uint64_t)) == 0); 261380901aeaSGeorge Wilson 261480901aeaSGeorge Wilson *bp = *bp_orig; 261580901aeaSGeorge Wilson zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 261680901aeaSGeorge Wilson zio->io_flags |= ZIO_FLAG_NOPWRITE; 261780901aeaSGeorge Wilson } 261880901aeaSGeorge Wilson 261980901aeaSGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 262080901aeaSGeorge Wilson } 262180901aeaSGeorge Wilson 2622fa9e4066Sahrens /* 2623fa9e4066Sahrens * ========================================================================== 2624b24ab676SJeff Bonwick * Dedup 2625fa9e4066Sahrens * ========================================================================== 2626fa9e4066Sahrens */ 2627b24ab676SJeff Bonwick static void 2628b24ab676SJeff Bonwick zio_ddt_child_read_done(zio_t *zio) 2629b24ab676SJeff Bonwick { 2630b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2631b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_private; 2632b24ab676SJeff Bonwick ddt_phys_t *ddp; 2633b24ab676SJeff Bonwick zio_t *pio = zio_unique_parent(zio); 2634b24ab676SJeff Bonwick 2635b24ab676SJeff Bonwick mutex_enter(&pio->io_lock); 2636b24ab676SJeff Bonwick ddp = ddt_phys_select(dde, bp); 2637b24ab676SJeff Bonwick if (zio->io_error == 0) 2638b24ab676SJeff Bonwick ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 2639770499e1SDan Kimmel 2640770499e1SDan Kimmel if (zio->io_error == 0 && dde->dde_repair_abd == NULL) 2641770499e1SDan Kimmel dde->dde_repair_abd = zio->io_abd; 2642b24ab676SJeff Bonwick else 2643770499e1SDan Kimmel abd_free(zio->io_abd); 2644b24ab676SJeff Bonwick mutex_exit(&pio->io_lock); 2645b24ab676SJeff Bonwick } 2646b24ab676SJeff Bonwick 2647b24ab676SJeff Bonwick static int 2648b24ab676SJeff Bonwick zio_ddt_read_start(zio_t *zio) 2649b24ab676SJeff Bonwick { 2650b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2651b24ab676SJeff Bonwick 2652b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(bp)); 2653b24ab676SJeff Bonwick ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2654b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2655b24ab676SJeff Bonwick 2656b24ab676SJeff Bonwick if (zio->io_child_error[ZIO_CHILD_DDT]) { 2657b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, bp); 2658b24ab676SJeff Bonwick ddt_entry_t *dde = ddt_repair_start(ddt, bp); 2659b24ab676SJeff Bonwick ddt_phys_t *ddp = dde->dde_phys; 2660b24ab676SJeff Bonwick ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 2661b24ab676SJeff Bonwick blkptr_t blk; 2662b24ab676SJeff Bonwick 2663b24ab676SJeff Bonwick ASSERT(zio->io_vsd == NULL); 2664b24ab676SJeff Bonwick zio->io_vsd = dde; 2665b24ab676SJeff Bonwick 2666b24ab676SJeff Bonwick if (ddp_self == NULL) 2667b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2668b24ab676SJeff Bonwick 2669b24ab676SJeff Bonwick for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2670b24ab676SJeff Bonwick if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 2671b24ab676SJeff Bonwick continue; 2672bbfd46c4SJeff Bonwick ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 2673bbfd46c4SJeff Bonwick &blk); 2674b24ab676SJeff Bonwick zio_nowait(zio_read(zio, zio->io_spa, &blk, 2675770499e1SDan Kimmel abd_alloc_for_io(zio->io_size, B_TRUE), 2676770499e1SDan Kimmel zio->io_size, zio_ddt_child_read_done, dde, 2677770499e1SDan Kimmel zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) | 2678770499e1SDan Kimmel ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark)); 2679b24ab676SJeff Bonwick } 2680b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2681b24ab676SJeff Bonwick } 2682b24ab676SJeff Bonwick 2683b24ab676SJeff Bonwick zio_nowait(zio_read(zio, zio->io_spa, bp, 2684770499e1SDan Kimmel zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority, 2685b24ab676SJeff Bonwick ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 2686b24ab676SJeff Bonwick 2687b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2688b24ab676SJeff Bonwick } 2689e14bb325SJeff Bonwick 2690b24ab676SJeff Bonwick static int 2691b24ab676SJeff Bonwick zio_ddt_read_done(zio_t *zio) 2692b24ab676SJeff Bonwick { 2693b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2694b24ab676SJeff Bonwick 2695d6e1c446SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) { 2696b24ab676SJeff Bonwick return (ZIO_PIPELINE_STOP); 2697d6e1c446SGeorge Wilson } 2698b24ab676SJeff Bonwick 2699b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(bp)); 2700b24ab676SJeff Bonwick ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2701b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2702b24ab676SJeff Bonwick 2703b24ab676SJeff Bonwick if (zio->io_child_error[ZIO_CHILD_DDT]) { 2704b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, bp); 2705b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_vsd; 2706b24ab676SJeff Bonwick if (ddt == NULL) { 2707b16da2e2SGeorge Wilson ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 2708b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2709b24ab676SJeff Bonwick } 2710b24ab676SJeff Bonwick if (dde == NULL) { 2711b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 271235a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2713b24ab676SJeff Bonwick return (ZIO_PIPELINE_STOP); 2714b24ab676SJeff Bonwick } 2715770499e1SDan Kimmel if (dde->dde_repair_abd != NULL) { 2716770499e1SDan Kimmel abd_copy(zio->io_abd, dde->dde_repair_abd, 2717770499e1SDan Kimmel zio->io_size); 2718b24ab676SJeff Bonwick zio->io_child_error[ZIO_CHILD_DDT] = 0; 2719b24ab676SJeff Bonwick } 2720b24ab676SJeff Bonwick ddt_repair_done(ddt, dde); 2721b24ab676SJeff Bonwick zio->io_vsd = NULL; 2722b24ab676SJeff Bonwick } 2723b24ab676SJeff Bonwick 2724b24ab676SJeff Bonwick ASSERT(zio->io_vsd == NULL); 2725b24ab676SJeff Bonwick 2726b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2727b24ab676SJeff Bonwick } 2728b24ab676SJeff Bonwick 2729b24ab676SJeff Bonwick static boolean_t 2730b24ab676SJeff Bonwick zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 2731b24ab676SJeff Bonwick { 2732b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 2733eb633035STom Caputi boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW); 27345602294fSDan Kimmel 27355602294fSDan Kimmel /* We should never get a raw, override zio */ 27365602294fSDan Kimmel ASSERT(!(zio->io_bp_override && do_raw)); 2737b24ab676SJeff Bonwick 2738b24ab676SJeff Bonwick /* 2739b24ab676SJeff Bonwick * Note: we compare the original data, not the transformed data, 2740b24ab676SJeff Bonwick * because when zio->io_bp is an override bp, we will not have 2741b24ab676SJeff Bonwick * pushed the I/O transforms. That's an important optimization 2742b24ab676SJeff Bonwick * because otherwise we'd compress/encrypt all dmu_sync() data twice. 2743eb633035STom Caputi * However, we should never get a raw, override zio so in these 2744eb633035STom Caputi * cases we can compare the io_data directly. This is useful because 2745eb633035STom Caputi * it allows us to do dedup verification even if we don't have access 2746eb633035STom Caputi * to the original data (for instance, if the encryption keys aren't 2747eb633035STom Caputi * loaded). 2748b24ab676SJeff Bonwick */ 2749eb633035STom Caputi 2750b24ab676SJeff Bonwick for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2751b24ab676SJeff Bonwick zio_t *lio = dde->dde_lead_zio[p]; 2752b24ab676SJeff Bonwick 2753eb633035STom Caputi if (lio != NULL && do_raw) { 2754eb633035STom Caputi return (lio->io_size != zio->io_size || 2755eb633035STom Caputi abd_cmp(zio->io_abd, lio->io_abd, 2756eb633035STom Caputi zio->io_size) != 0); 2757eb633035STom Caputi } else if (lio != NULL) { 2758b24ab676SJeff Bonwick return (lio->io_orig_size != zio->io_orig_size || 2759770499e1SDan Kimmel abd_cmp(zio->io_orig_abd, lio->io_orig_abd, 2760b24ab676SJeff Bonwick zio->io_orig_size) != 0); 2761b24ab676SJeff Bonwick } 2762b24ab676SJeff Bonwick } 2763b24ab676SJeff Bonwick 2764b24ab676SJeff Bonwick for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2765b24ab676SJeff Bonwick ddt_phys_t *ddp = &dde->dde_phys[p]; 2766b24ab676SJeff Bonwick 2767eb633035STom Caputi if (ddp->ddp_phys_birth != 0 && do_raw) { 2768eb633035STom Caputi blkptr_t blk = *zio->io_bp; 2769eb633035STom Caputi uint64_t psize; 2770eb633035STom Caputi abd_t *tmpabd; 2771eb633035STom Caputi int error; 2772eb633035STom Caputi 2773eb633035STom Caputi ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2774eb633035STom Caputi psize = BP_GET_PSIZE(&blk); 2775eb633035STom Caputi 2776eb633035STom Caputi if (psize != zio->io_size) 2777eb633035STom Caputi return (B_TRUE); 2778eb633035STom Caputi 2779eb633035STom Caputi ddt_exit(ddt); 2780eb633035STom Caputi 2781eb633035STom Caputi tmpabd = abd_alloc_for_io(psize, B_TRUE); 2782eb633035STom Caputi 2783eb633035STom Caputi error = zio_wait(zio_read(NULL, spa, &blk, tmpabd, 2784eb633035STom Caputi psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ, 2785eb633035STom Caputi ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 2786eb633035STom Caputi ZIO_FLAG_RAW, &zio->io_bookmark)); 2787eb633035STom Caputi 2788eb633035STom Caputi if (error == 0) { 2789eb633035STom Caputi if (abd_cmp(tmpabd, zio->io_abd, psize) != 0) 2790eb633035STom Caputi error = SET_ERROR(ENOENT); 2791eb633035STom Caputi } 2792eb633035STom Caputi 2793eb633035STom Caputi abd_free(tmpabd); 2794eb633035STom Caputi ddt_enter(ddt); 2795eb633035STom Caputi return (error != 0); 2796eb633035STom Caputi } else if (ddp->ddp_phys_birth != 0) { 2797b24ab676SJeff Bonwick arc_buf_t *abuf = NULL; 27987adb730bSGeorge Wilson arc_flags_t aflags = ARC_FLAG_WAIT; 27995602294fSDan Kimmel int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 2800b24ab676SJeff Bonwick blkptr_t blk = *zio->io_bp; 2801b24ab676SJeff Bonwick int error; 2802b24ab676SJeff Bonwick 2803b24ab676SJeff Bonwick ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2804b24ab676SJeff Bonwick 2805eb633035STom Caputi if (BP_GET_LSIZE(&blk) != zio->io_orig_size) 2806eb633035STom Caputi return (B_TRUE); 2807eb633035STom Caputi 2808b24ab676SJeff Bonwick ddt_exit(ddt); 2809b24ab676SJeff Bonwick 28105602294fSDan Kimmel /* 28115602294fSDan Kimmel * Intuitively, it would make more sense to compare 2812770499e1SDan Kimmel * io_abd than io_orig_abd in the raw case since you 28135602294fSDan Kimmel * don't want to look at any transformations that have 28145602294fSDan Kimmel * happened to the data. However, for raw I/Os the 2815770499e1SDan Kimmel * data will actually be the same in io_abd and 2816770499e1SDan Kimmel * io_orig_abd, so all we have to do is issue this as 28175602294fSDan Kimmel * a raw ARC read. 28185602294fSDan Kimmel */ 28195602294fSDan Kimmel if (do_raw) { 28205602294fSDan Kimmel zio_flags |= ZIO_FLAG_RAW; 28215602294fSDan Kimmel ASSERT3U(zio->io_size, ==, zio->io_orig_size); 2822770499e1SDan Kimmel ASSERT0(abd_cmp(zio->io_abd, zio->io_orig_abd, 28235602294fSDan Kimmel zio->io_size)); 28245602294fSDan Kimmel ASSERT3P(zio->io_transform_stack, ==, NULL); 28255602294fSDan Kimmel } 28265602294fSDan Kimmel 28271b912ec7SGeorge Wilson error = arc_read(NULL, spa, &blk, 2828b24ab676SJeff Bonwick arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 28295602294fSDan Kimmel zio_flags, &aflags, &zio->io_bookmark); 2830b24ab676SJeff Bonwick 2831b24ab676SJeff Bonwick if (error == 0) { 2832eb633035STom Caputi if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data, 2833b24ab676SJeff Bonwick zio->io_orig_size) != 0) 2834eb633035STom Caputi error = SET_ERROR(ENOENT); 2835dcbf3bd6SGeorge Wilson arc_buf_destroy(abuf, &abuf); 2836b24ab676SJeff Bonwick } 2837b24ab676SJeff Bonwick 2838b24ab676SJeff Bonwick ddt_enter(ddt); 2839b24ab676SJeff Bonwick return (error != 0); 2840b24ab676SJeff Bonwick } 2841b24ab676SJeff Bonwick } 2842b24ab676SJeff Bonwick 2843b24ab676SJeff Bonwick return (B_FALSE); 2844b24ab676SJeff Bonwick } 2845b24ab676SJeff Bonwick 2846b24ab676SJeff Bonwick static void 2847b24ab676SJeff Bonwick zio_ddt_child_write_ready(zio_t *zio) 2848b24ab676SJeff Bonwick { 2849b24ab676SJeff Bonwick int p = zio->io_prop.zp_copies; 2850b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2851b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_private; 2852b24ab676SJeff Bonwick ddt_phys_t *ddp = &dde->dde_phys[p]; 2853b24ab676SJeff Bonwick zio_t *pio; 2854b24ab676SJeff Bonwick 2855b24ab676SJeff Bonwick if (zio->io_error) 2856b24ab676SJeff Bonwick return; 2857b24ab676SJeff Bonwick 2858b24ab676SJeff Bonwick ddt_enter(ddt); 2859b24ab676SJeff Bonwick 2860b24ab676SJeff Bonwick ASSERT(dde->dde_lead_zio[p] == zio); 2861b24ab676SJeff Bonwick 2862b24ab676SJeff Bonwick ddt_phys_fill(ddp, zio->io_bp); 2863b24ab676SJeff Bonwick 28640f7643c7SGeorge Wilson zio_link_t *zl = NULL; 28650f7643c7SGeorge Wilson while ((pio = zio_walk_parents(zio, &zl)) != NULL) 2866b24ab676SJeff Bonwick ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 2867b24ab676SJeff Bonwick 2868b24ab676SJeff Bonwick ddt_exit(ddt); 2869b24ab676SJeff Bonwick } 2870b24ab676SJeff Bonwick 2871b24ab676SJeff Bonwick static void 2872b24ab676SJeff Bonwick zio_ddt_child_write_done(zio_t *zio) 2873b24ab676SJeff Bonwick { 2874b24ab676SJeff Bonwick int p = zio->io_prop.zp_copies; 2875b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2876b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_private; 2877b24ab676SJeff Bonwick ddt_phys_t *ddp = &dde->dde_phys[p]; 2878b24ab676SJeff Bonwick 2879b24ab676SJeff Bonwick ddt_enter(ddt); 2880b24ab676SJeff Bonwick 2881b24ab676SJeff Bonwick ASSERT(ddp->ddp_refcnt == 0); 2882b24ab676SJeff Bonwick ASSERT(dde->dde_lead_zio[p] == zio); 2883b24ab676SJeff Bonwick dde->dde_lead_zio[p] = NULL; 2884b24ab676SJeff Bonwick 2885b24ab676SJeff Bonwick if (zio->io_error == 0) { 28860f7643c7SGeorge Wilson zio_link_t *zl = NULL; 28870f7643c7SGeorge Wilson while (zio_walk_parents(zio, &zl) != NULL) 2888b24ab676SJeff Bonwick ddt_phys_addref(ddp); 2889b24ab676SJeff Bonwick } else { 2890b24ab676SJeff Bonwick ddt_phys_clear(ddp); 2891b24ab676SJeff Bonwick } 2892b24ab676SJeff Bonwick 2893b24ab676SJeff Bonwick ddt_exit(ddt); 2894b24ab676SJeff Bonwick } 2895b24ab676SJeff Bonwick 2896b24ab676SJeff Bonwick static void 2897b24ab676SJeff Bonwick zio_ddt_ditto_write_done(zio_t *zio) 2898b24ab676SJeff Bonwick { 2899b24ab676SJeff Bonwick int p = DDT_PHYS_DITTO; 2900b24ab676SJeff Bonwick zio_prop_t *zp = &zio->io_prop; 2901b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2902b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, bp); 2903b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_private; 2904b24ab676SJeff Bonwick ddt_phys_t *ddp = &dde->dde_phys[p]; 2905b24ab676SJeff Bonwick ddt_key_t *ddk = &dde->dde_key; 2906b24ab676SJeff Bonwick 2907b24ab676SJeff Bonwick ddt_enter(ddt); 2908b24ab676SJeff Bonwick 2909b24ab676SJeff Bonwick ASSERT(ddp->ddp_refcnt == 0); 2910b24ab676SJeff Bonwick ASSERT(dde->dde_lead_zio[p] == zio); 2911b24ab676SJeff Bonwick dde->dde_lead_zio[p] = NULL; 2912b24ab676SJeff Bonwick 2913b24ab676SJeff Bonwick if (zio->io_error == 0) { 2914b24ab676SJeff Bonwick ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2915b24ab676SJeff Bonwick ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2916b24ab676SJeff Bonwick ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2917b24ab676SJeff Bonwick if (ddp->ddp_phys_birth != 0) 2918b24ab676SJeff Bonwick ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2919b24ab676SJeff Bonwick ddt_phys_fill(ddp, bp); 2920b24ab676SJeff Bonwick } 2921b24ab676SJeff Bonwick 2922b24ab676SJeff Bonwick ddt_exit(ddt); 2923b24ab676SJeff Bonwick } 2924b24ab676SJeff Bonwick 2925b24ab676SJeff Bonwick static int 2926b24ab676SJeff Bonwick zio_ddt_write(zio_t *zio) 2927b24ab676SJeff Bonwick { 2928b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 2929b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2930b24ab676SJeff Bonwick uint64_t txg = zio->io_txg; 2931b24ab676SJeff Bonwick zio_prop_t *zp = &zio->io_prop; 2932b24ab676SJeff Bonwick int p = zp->zp_copies; 2933b24ab676SJeff Bonwick int ditto_copies; 2934b24ab676SJeff Bonwick zio_t *cio = NULL; 2935b24ab676SJeff Bonwick zio_t *dio = NULL; 2936b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(spa, bp); 2937b24ab676SJeff Bonwick ddt_entry_t *dde; 2938b24ab676SJeff Bonwick ddt_phys_t *ddp; 2939b24ab676SJeff Bonwick 2940b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(bp)); 2941b24ab676SJeff Bonwick ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2942b24ab676SJeff Bonwick ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 29435602294fSDan Kimmel ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW))); 2944b24ab676SJeff Bonwick 2945b24ab676SJeff Bonwick ddt_enter(ddt); 2946b24ab676SJeff Bonwick dde = ddt_lookup(ddt, bp, B_TRUE); 2947b24ab676SJeff Bonwick ddp = &dde->dde_phys[p]; 2948b24ab676SJeff Bonwick 2949b24ab676SJeff Bonwick if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2950b24ab676SJeff Bonwick /* 2951b24ab676SJeff Bonwick * If we're using a weak checksum, upgrade to a strong checksum 2952b24ab676SJeff Bonwick * and try again. If we're already using a strong checksum, 2953b24ab676SJeff Bonwick * we can't resolve it, so just convert to an ordinary write. 2954b24ab676SJeff Bonwick * (And automatically e-mail a paper to Nature?) 2955b24ab676SJeff Bonwick */ 295645818ee1SMatthew Ahrens if (!(zio_checksum_table[zp->zp_checksum].ci_flags & 295745818ee1SMatthew Ahrens ZCHECKSUM_FLAG_DEDUP)) { 2958b24ab676SJeff Bonwick zp->zp_checksum = spa_dedup_checksum(spa); 2959b24ab676SJeff Bonwick zio_pop_transforms(zio); 2960b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_OPEN; 2961b24ab676SJeff Bonwick BP_ZERO(bp); 2962b24ab676SJeff Bonwick } else { 296380901aeaSGeorge Wilson zp->zp_dedup = B_FALSE; 29645602294fSDan Kimmel BP_SET_DEDUP(bp, B_FALSE); 2965b24ab676SJeff Bonwick } 29665602294fSDan Kimmel ASSERT(!BP_GET_DEDUP(bp)); 2967b24ab676SJeff Bonwick zio->io_pipeline = ZIO_WRITE_PIPELINE; 2968b24ab676SJeff Bonwick ddt_exit(ddt); 2969b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2970b24ab676SJeff Bonwick } 2971b24ab676SJeff Bonwick 2972b24ab676SJeff Bonwick ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2973b24ab676SJeff Bonwick ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2974b24ab676SJeff Bonwick 2975b24ab676SJeff Bonwick if (ditto_copies > ddt_ditto_copies_present(dde) && 2976b24ab676SJeff Bonwick dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2977b24ab676SJeff Bonwick zio_prop_t czp = *zp; 2978b24ab676SJeff Bonwick 2979b24ab676SJeff Bonwick czp.zp_copies = ditto_copies; 2980b24ab676SJeff Bonwick 2981b24ab676SJeff Bonwick /* 2982b24ab676SJeff Bonwick * If we arrived here with an override bp, we won't have run 2983b24ab676SJeff Bonwick * the transform stack, so we won't have the data we need to 2984b24ab676SJeff Bonwick * generate a child i/o. So, toss the override bp and restart. 2985b24ab676SJeff Bonwick * This is safe, because using the override bp is just an 2986b24ab676SJeff Bonwick * optimization; and it's rare, so the cost doesn't matter. 2987b24ab676SJeff Bonwick */ 2988b24ab676SJeff Bonwick if (zio->io_bp_override) { 2989b24ab676SJeff Bonwick zio_pop_transforms(zio); 2990b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_OPEN; 2991b24ab676SJeff Bonwick zio->io_pipeline = ZIO_WRITE_PIPELINE; 2992b24ab676SJeff Bonwick zio->io_bp_override = NULL; 2993b24ab676SJeff Bonwick BP_ZERO(bp); 2994b24ab676SJeff Bonwick ddt_exit(ddt); 2995b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2996b24ab676SJeff Bonwick } 2997b24ab676SJeff Bonwick 2998770499e1SDan Kimmel dio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 29995602294fSDan Kimmel zio->io_orig_size, zio->io_orig_size, &czp, NULL, NULL, 30008df0bcf0SPaul Dagnelie NULL, zio_ddt_ditto_write_done, dde, zio->io_priority, 3001b24ab676SJeff Bonwick ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 3002b24ab676SJeff Bonwick 3003770499e1SDan Kimmel zio_push_transform(dio, zio->io_abd, zio->io_size, 0, NULL); 3004b24ab676SJeff Bonwick dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 3005b24ab676SJeff Bonwick } 3006b24ab676SJeff Bonwick 3007b24ab676SJeff Bonwick if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 3008b24ab676SJeff Bonwick if (ddp->ddp_phys_birth != 0) 3009b24ab676SJeff Bonwick ddt_bp_fill(ddp, bp, txg); 3010b24ab676SJeff Bonwick if (dde->dde_lead_zio[p] != NULL) 3011b24ab676SJeff Bonwick zio_add_child(zio, dde->dde_lead_zio[p]); 3012b24ab676SJeff Bonwick else 3013b24ab676SJeff Bonwick ddt_phys_addref(ddp); 3014b24ab676SJeff Bonwick } else if (zio->io_bp_override) { 3015b24ab676SJeff Bonwick ASSERT(bp->blk_birth == txg); 3016b24ab676SJeff Bonwick ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 3017b24ab676SJeff Bonwick ddt_phys_fill(ddp, bp); 3018b24ab676SJeff Bonwick ddt_phys_addref(ddp); 3019b24ab676SJeff Bonwick } else { 3020770499e1SDan Kimmel cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 30215602294fSDan Kimmel zio->io_orig_size, zio->io_orig_size, zp, 30228df0bcf0SPaul Dagnelie zio_ddt_child_write_ready, NULL, NULL, 3023b24ab676SJeff Bonwick zio_ddt_child_write_done, dde, zio->io_priority, 3024b24ab676SJeff Bonwick ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 3025b24ab676SJeff Bonwick 3026770499e1SDan Kimmel zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL); 3027b24ab676SJeff Bonwick dde->dde_lead_zio[p] = cio; 3028b24ab676SJeff Bonwick } 3029b24ab676SJeff Bonwick 3030b24ab676SJeff Bonwick ddt_exit(ddt); 3031b24ab676SJeff Bonwick 3032b24ab676SJeff Bonwick if (cio) 3033b24ab676SJeff Bonwick zio_nowait(cio); 3034b24ab676SJeff Bonwick if (dio) 3035b24ab676SJeff Bonwick zio_nowait(dio); 3036b24ab676SJeff Bonwick 3037b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 3038b24ab676SJeff Bonwick } 3039b24ab676SJeff Bonwick 30403f9d6ad7SLin Ling ddt_entry_t *freedde; /* for debugging */ 30413f9d6ad7SLin Ling 3042b24ab676SJeff Bonwick static int 3043b24ab676SJeff Bonwick zio_ddt_free(zio_t *zio) 3044b24ab676SJeff Bonwick { 3045b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 3046b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 3047b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(spa, bp); 3048b24ab676SJeff Bonwick ddt_entry_t *dde; 3049b24ab676SJeff Bonwick ddt_phys_t *ddp; 3050b24ab676SJeff Bonwick 3051b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(bp)); 3052b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3053b24ab676SJeff Bonwick 3054b24ab676SJeff Bonwick ddt_enter(ddt); 30553f9d6ad7SLin Ling freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 3056b24ab676SJeff Bonwick ddp = ddt_phys_select(dde, bp); 3057b24ab676SJeff Bonwick ddt_phys_decref(ddp); 3058b24ab676SJeff Bonwick ddt_exit(ddt); 3059b24ab676SJeff Bonwick 3060b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 3061b24ab676SJeff Bonwick } 3062b24ab676SJeff Bonwick 3063b24ab676SJeff Bonwick /* 3064b24ab676SJeff Bonwick * ========================================================================== 3065b24ab676SJeff Bonwick * Allocate and free blocks 3066b24ab676SJeff Bonwick * ========================================================================== 3067b24ab676SJeff Bonwick */ 30680f7643c7SGeorge Wilson 30690f7643c7SGeorge Wilson static zio_t * 3070f78cdc34SPaul Dagnelie zio_io_to_allocate(spa_t *spa, int allocator) 30710f7643c7SGeorge Wilson { 30720f7643c7SGeorge Wilson zio_t *zio; 30730f7643c7SGeorge Wilson 3074f78cdc34SPaul Dagnelie ASSERT(MUTEX_HELD(&spa->spa_alloc_locks[allocator])); 30750f7643c7SGeorge Wilson 3076f78cdc34SPaul Dagnelie zio = avl_first(&spa->spa_alloc_trees[allocator]); 30770f7643c7SGeorge Wilson if (zio == NULL) 30780f7643c7SGeorge Wilson return (NULL); 30790f7643c7SGeorge Wilson 30800f7643c7SGeorge Wilson ASSERT(IO_IS_ALLOCATING(zio)); 30810f7643c7SGeorge Wilson 30820f7643c7SGeorge Wilson /* 30830f7643c7SGeorge Wilson * Try to place a reservation for this zio. If we're unable to 30840f7643c7SGeorge Wilson * reserve then we throttle. 30850f7643c7SGeorge Wilson */ 3086f78cdc34SPaul Dagnelie ASSERT3U(zio->io_allocator, ==, allocator); 3087663207adSDon Brady if (!metaslab_class_throttle_reserve(zio->io_metaslab_class, 3088f78cdc34SPaul Dagnelie zio->io_prop.zp_copies, zio->io_allocator, zio, 0)) { 30890f7643c7SGeorge Wilson return (NULL); 30900f7643c7SGeorge Wilson } 30910f7643c7SGeorge Wilson 3092f78cdc34SPaul Dagnelie avl_remove(&spa->spa_alloc_trees[allocator], zio); 30930f7643c7SGeorge Wilson ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); 30940f7643c7SGeorge Wilson 30950f7643c7SGeorge Wilson return (zio); 30960f7643c7SGeorge Wilson } 30970f7643c7SGeorge Wilson 30980f7643c7SGeorge Wilson static int 30990f7643c7SGeorge Wilson zio_dva_throttle(zio_t *zio) 31000f7643c7SGeorge Wilson { 31010f7643c7SGeorge Wilson spa_t *spa = zio->io_spa; 31020f7643c7SGeorge Wilson zio_t *nio; 3103663207adSDon Brady metaslab_class_t *mc; 3104663207adSDon Brady 3105663207adSDon Brady /* locate an appropriate allocation class */ 3106663207adSDon Brady mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type, 3107663207adSDon Brady zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk); 31080f7643c7SGeorge Wilson 31090f7643c7SGeorge Wilson if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE || 3110663207adSDon Brady !mc->mc_alloc_throttle_enabled || 31110f7643c7SGeorge Wilson zio->io_child_type == ZIO_CHILD_GANG || 31120f7643c7SGeorge Wilson zio->io_flags & ZIO_FLAG_NODATA) { 31130f7643c7SGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 31140f7643c7SGeorge Wilson } 31150f7643c7SGeorge Wilson 31160f7643c7SGeorge Wilson ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 31170f7643c7SGeorge Wilson 31180f7643c7SGeorge Wilson ASSERT3U(zio->io_queued_timestamp, >, 0); 31190f7643c7SGeorge Wilson ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE); 31200f7643c7SGeorge Wilson 3121f78cdc34SPaul Dagnelie zbookmark_phys_t *bm = &zio->io_bookmark; 3122f78cdc34SPaul Dagnelie /* 3123f78cdc34SPaul Dagnelie * We want to try to use as many allocators as possible to help improve 3124f78cdc34SPaul Dagnelie * performance, but we also want logically adjacent IOs to be physically 3125f78cdc34SPaul Dagnelie * adjacent to improve sequential read performance. We chunk each object 3126f78cdc34SPaul Dagnelie * into 2^20 block regions, and then hash based on the objset, object, 3127f78cdc34SPaul Dagnelie * level, and region to accomplish both of these goals. 3128f78cdc34SPaul Dagnelie */ 3129f78cdc34SPaul Dagnelie zio->io_allocator = cityhash4(bm->zb_objset, bm->zb_object, 3130f78cdc34SPaul Dagnelie bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count; 3131f78cdc34SPaul Dagnelie mutex_enter(&spa->spa_alloc_locks[zio->io_allocator]); 31320f7643c7SGeorge Wilson ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3133663207adSDon Brady zio->io_metaslab_class = mc; 3134f78cdc34SPaul Dagnelie avl_add(&spa->spa_alloc_trees[zio->io_allocator], zio); 3135663207adSDon Brady nio = zio_io_to_allocate(spa, zio->io_allocator); 3136f78cdc34SPaul Dagnelie mutex_exit(&spa->spa_alloc_locks[zio->io_allocator]); 31370f7643c7SGeorge Wilson 31380f7643c7SGeorge Wilson if (nio == zio) 31390f7643c7SGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 31400f7643c7SGeorge Wilson 31410f7643c7SGeorge Wilson if (nio != NULL) { 31420f7643c7SGeorge Wilson ASSERT(nio->io_stage == ZIO_STAGE_DVA_THROTTLE); 31430f7643c7SGeorge Wilson /* 31440f7643c7SGeorge Wilson * We are passing control to a new zio so make sure that 31450f7643c7SGeorge Wilson * it is processed by a different thread. We do this to 31460f7643c7SGeorge Wilson * avoid stack overflows that can occur when parents are 31470f7643c7SGeorge Wilson * throttled and children are making progress. We allow 31480f7643c7SGeorge Wilson * it to go to the head of the taskq since it's already 31490f7643c7SGeorge Wilson * been waiting. 31500f7643c7SGeorge Wilson */ 31510f7643c7SGeorge Wilson zio_taskq_dispatch(nio, ZIO_TASKQ_ISSUE, B_TRUE); 31520f7643c7SGeorge Wilson } 31530f7643c7SGeorge Wilson return (ZIO_PIPELINE_STOP); 31540f7643c7SGeorge Wilson } 31550f7643c7SGeorge Wilson 3156663207adSDon Brady static void 3157f78cdc34SPaul Dagnelie zio_allocate_dispatch(spa_t *spa, int allocator) 31580f7643c7SGeorge Wilson { 31590f7643c7SGeorge Wilson zio_t *zio; 31600f7643c7SGeorge Wilson 3161f78cdc34SPaul Dagnelie mutex_enter(&spa->spa_alloc_locks[allocator]); 3162f78cdc34SPaul Dagnelie zio = zio_io_to_allocate(spa, allocator); 3163f78cdc34SPaul Dagnelie mutex_exit(&spa->spa_alloc_locks[allocator]); 31640f7643c7SGeorge Wilson if (zio == NULL) 31650f7643c7SGeorge Wilson return; 31660f7643c7SGeorge Wilson 31670f7643c7SGeorge Wilson ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE); 31680f7643c7SGeorge Wilson ASSERT0(zio->io_error); 31690f7643c7SGeorge Wilson zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE); 31700f7643c7SGeorge Wilson } 31710f7643c7SGeorge Wilson 3172e05725b1Sbonwick static int 3173fa9e4066Sahrens zio_dva_allocate(zio_t *zio) 3174fa9e4066Sahrens { 31758654d025Sperrin spa_t *spa = zio->io_spa; 3176663207adSDon Brady metaslab_class_t *mc; 3177fa9e4066Sahrens blkptr_t *bp = zio->io_bp; 3178fa9e4066Sahrens int error; 317909c9d376SGeorge Wilson int flags = 0; 3180fa9e4066Sahrens 3181f5383399SBill Moore if (zio->io_gang_leader == NULL) { 3182f5383399SBill Moore ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 3183f5383399SBill Moore zio->io_gang_leader = zio; 3184f5383399SBill Moore } 3185f5383399SBill Moore 3186fa9e4066Sahrens ASSERT(BP_IS_HOLE(bp)); 3187fb09f5aaSMadhav Suresh ASSERT0(BP_GET_NDVAS(bp)); 3188b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, >, 0); 3189b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 3190fa9e4066Sahrens ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 3191fa9e4066Sahrens 3192663207adSDon Brady if (zio->io_flags & ZIO_FLAG_NODATA) 31930f7643c7SGeorge Wilson flags |= METASLAB_DONT_THROTTLE; 3194663207adSDon Brady if (zio->io_flags & ZIO_FLAG_GANG_CHILD) 31950f7643c7SGeorge Wilson flags |= METASLAB_GANG_CHILD; 3196663207adSDon Brady if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) 31970f7643c7SGeorge Wilson flags |= METASLAB_ASYNC_ALLOC; 3198663207adSDon Brady 3199663207adSDon Brady /* 3200663207adSDon Brady * if not already chosen, locate an appropriate allocation class 3201663207adSDon Brady */ 3202663207adSDon Brady mc = zio->io_metaslab_class; 3203663207adSDon Brady if (mc == NULL) { 3204663207adSDon Brady mc = spa_preferred_class(spa, zio->io_size, 3205663207adSDon Brady zio->io_prop.zp_type, zio->io_prop.zp_level, 3206663207adSDon Brady zio->io_prop.zp_zpl_smallblk); 3207663207adSDon Brady zio->io_metaslab_class = mc; 32080f7643c7SGeorge Wilson } 32090f7643c7SGeorge Wilson 3210e14bb325SJeff Bonwick error = metaslab_alloc(spa, mc, zio->io_size, bp, 32118363e80aSGeorge Wilson zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 3212f78cdc34SPaul Dagnelie &zio->io_alloc_list, zio, zio->io_allocator); 3213fa9e4066Sahrens 3214663207adSDon Brady /* 3215663207adSDon Brady * Fallback to normal class when an alloc class is full 3216663207adSDon Brady */ 3217663207adSDon Brady if (error == ENOSPC && mc != spa_normal_class(spa)) { 3218663207adSDon Brady /* 3219663207adSDon Brady * If throttling, transfer reservation over to normal class. 3220663207adSDon Brady * The io_allocator slot can remain the same even though we 3221663207adSDon Brady * are switching classes. 3222663207adSDon Brady */ 3223663207adSDon Brady if (mc->mc_alloc_throttle_enabled && 3224663207adSDon Brady (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) { 3225663207adSDon Brady metaslab_class_throttle_unreserve(mc, 3226663207adSDon Brady zio->io_prop.zp_copies, zio->io_allocator, zio); 3227663207adSDon Brady zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING; 3228663207adSDon Brady 3229663207adSDon Brady mc = spa_normal_class(spa); 3230663207adSDon Brady VERIFY(metaslab_class_throttle_reserve(mc, 3231663207adSDon Brady zio->io_prop.zp_copies, zio->io_allocator, zio, 3232663207adSDon Brady flags | METASLAB_MUST_RESERVE)); 3233663207adSDon Brady } else { 3234663207adSDon Brady mc = spa_normal_class(spa); 3235663207adSDon Brady } 3236663207adSDon Brady zio->io_metaslab_class = mc; 3237663207adSDon Brady 3238663207adSDon Brady error = metaslab_alloc(spa, mc, zio->io_size, bp, 3239663207adSDon Brady zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 3240663207adSDon Brady &zio->io_alloc_list, zio, zio->io_allocator); 3241663207adSDon Brady } 3242663207adSDon Brady 32430f7643c7SGeorge Wilson if (error != 0) { 324421f7c81cSMatthew Ahrens zfs_dbgmsg("%s: metaslab allocation failure: zio %p, " 324509c9d376SGeorge Wilson "size %llu, error %d", spa_name(spa), zio, zio->io_size, 324609c9d376SGeorge Wilson error); 3247e14bb325SJeff Bonwick if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 3248e14bb325SJeff Bonwick return (zio_write_gang_block(zio)); 3249fa9e4066Sahrens zio->io_error = error; 3250fa9e4066Sahrens } 3251e05725b1Sbonwick 3252e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3253fa9e4066Sahrens } 3254fa9e4066Sahrens 3255e05725b1Sbonwick static int 3256fa9e4066Sahrens zio_dva_free(zio_t *zio) 3257fa9e4066Sahrens { 3258e14bb325SJeff Bonwick metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 3259fa9e4066Sahrens 3260e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3261fa9e4066Sahrens } 3262fa9e4066Sahrens 3263e05725b1Sbonwick static int 3264fa9e4066Sahrens zio_dva_claim(zio_t *zio) 3265fa9e4066Sahrens { 3266e14bb325SJeff Bonwick int error; 3267e14bb325SJeff Bonwick 3268e14bb325SJeff Bonwick error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 3269e14bb325SJeff Bonwick if (error) 3270e14bb325SJeff Bonwick zio->io_error = error; 3271fa9e4066Sahrens 3272e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3273fa9e4066Sahrens } 3274fa9e4066Sahrens 3275e14bb325SJeff Bonwick /* 3276e14bb325SJeff Bonwick * Undo an allocation. This is used by zio_done() when an I/O fails 3277e14bb325SJeff Bonwick * and we want to give back the block we just allocated. 3278e14bb325SJeff Bonwick * This handles both normal blocks and gang blocks. 3279e14bb325SJeff Bonwick */ 3280e14bb325SJeff Bonwick static void 3281e14bb325SJeff Bonwick zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 3282e14bb325SJeff Bonwick { 3283e14bb325SJeff Bonwick ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 3284b24ab676SJeff Bonwick ASSERT(zio->io_bp_override == NULL); 3285e14bb325SJeff Bonwick 3286e14bb325SJeff Bonwick if (!BP_IS_HOLE(bp)) 3287b24ab676SJeff Bonwick metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 3288e14bb325SJeff Bonwick 3289e14bb325SJeff Bonwick if (gn != NULL) { 3290e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 3291e14bb325SJeff Bonwick zio_dva_unallocate(zio, gn->gn_child[g], 3292e14bb325SJeff Bonwick &gn->gn_gbh->zg_blkptr[g]); 3293e14bb325SJeff Bonwick } 3294e14bb325SJeff Bonwick } 3295e14bb325SJeff Bonwick } 3296e14bb325SJeff Bonwick 3297e14bb325SJeff Bonwick /* 3298e14bb325SJeff Bonwick * Try to allocate an intent log block. Return 0 on success, errno on failure. 3299e14bb325SJeff Bonwick */ 3300e14bb325SJeff Bonwick int 3301eb633035STom Caputi zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp, 3302f78cdc34SPaul Dagnelie blkptr_t *old_bp, uint64_t size, boolean_t *slog) 3303e14bb325SJeff Bonwick { 3304e09fa4daSNeil Perrin int error = 1; 33058363e80aSGeorge Wilson zio_alloc_list_t io_alloc_list; 3306e14bb325SJeff Bonwick 3307b24ab676SJeff Bonwick ASSERT(txg > spa_syncing_txg(spa)); 3308b24ab676SJeff Bonwick 33098363e80aSGeorge Wilson metaslab_trace_init(&io_alloc_list); 3310663207adSDon Brady 3311663207adSDon Brady /* 3312663207adSDon Brady * Block pointer fields are useful to metaslabs for stats and debugging. 3313663207adSDon Brady * Fill in the obvious ones before calling into metaslab_alloc(). 3314663207adSDon Brady */ 3315663207adSDon Brady BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3316663207adSDon Brady BP_SET_PSIZE(new_bp, size); 3317663207adSDon Brady BP_SET_LEVEL(new_bp, 0); 3318663207adSDon Brady 3319f78cdc34SPaul Dagnelie /* 3320f78cdc34SPaul Dagnelie * When allocating a zil block, we don't have information about 3321f78cdc34SPaul Dagnelie * the final destination of the block except the objset it's part 3322f78cdc34SPaul Dagnelie * of, so we just hash the objset ID to pick the allocator to get 3323f78cdc34SPaul Dagnelie * some parallelism. 3324f78cdc34SPaul Dagnelie */ 3325c5ee4681SAlexander Motin error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1, 3326f78cdc34SPaul Dagnelie txg, old_bp, METASLAB_HINTBP_AVOID, &io_alloc_list, NULL, 3327eb633035STom Caputi cityhash4(0, 0, 0, 3328eb633035STom Caputi os->os_dsl_dataset->ds_object) % spa->spa_alloc_count); 3329c5ee4681SAlexander Motin if (error == 0) { 3330c5ee4681SAlexander Motin *slog = TRUE; 3331c5ee4681SAlexander Motin } else { 3332b24ab676SJeff Bonwick error = metaslab_alloc(spa, spa_normal_class(spa), size, 33338363e80aSGeorge Wilson new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID, 3334eb633035STom Caputi &io_alloc_list, NULL, cityhash4(0, 0, 0, 3335eb633035STom Caputi os->os_dsl_dataset->ds_object) % spa->spa_alloc_count); 3336c5ee4681SAlexander Motin if (error == 0) 3337c5ee4681SAlexander Motin *slog = FALSE; 3338840345f6SGeorge Wilson } 33398363e80aSGeorge Wilson metaslab_trace_fini(&io_alloc_list); 3340e14bb325SJeff Bonwick 3341e14bb325SJeff Bonwick if (error == 0) { 3342e14bb325SJeff Bonwick BP_SET_LSIZE(new_bp, size); 3343e14bb325SJeff Bonwick BP_SET_PSIZE(new_bp, size); 3344e14bb325SJeff Bonwick BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 33456e1f5caaSNeil Perrin BP_SET_CHECKSUM(new_bp, 33466e1f5caaSNeil Perrin spa_version(spa) >= SPA_VERSION_SLIM_ZIL 33476e1f5caaSNeil Perrin ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 3348e14bb325SJeff Bonwick BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3349e14bb325SJeff Bonwick BP_SET_LEVEL(new_bp, 0); 3350b24ab676SJeff Bonwick BP_SET_DEDUP(new_bp, 0); 3351e14bb325SJeff Bonwick BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 3352eb633035STom Caputi 3353eb633035STom Caputi /* 3354eb633035STom Caputi * encrypted blocks will require an IV and salt. We generate 3355eb633035STom Caputi * these now since we will not be rewriting the bp at 3356eb633035STom Caputi * rewrite time. 3357eb633035STom Caputi */ 3358eb633035STom Caputi if (os->os_encrypted) { 3359eb633035STom Caputi uint8_t iv[ZIO_DATA_IV_LEN]; 3360eb633035STom Caputi uint8_t salt[ZIO_DATA_SALT_LEN]; 3361eb633035STom Caputi 3362eb633035STom Caputi BP_SET_CRYPT(new_bp, B_TRUE); 3363eb633035STom Caputi VERIFY0(spa_crypt_get_salt(spa, 3364eb633035STom Caputi dmu_objset_id(os), salt)); 3365eb633035STom Caputi VERIFY0(zio_crypt_generate_iv(iv)); 3366eb633035STom Caputi 3367eb633035STom Caputi zio_crypt_encode_params_bp(new_bp, salt, iv); 3368eb633035STom Caputi } 33691271e4b1SPrakash Surya } else { 33701271e4b1SPrakash Surya zfs_dbgmsg("%s: zil block allocation failure: " 33711271e4b1SPrakash Surya "size %llu, error %d", spa_name(spa), size, error); 3372e14bb325SJeff Bonwick } 3373e14bb325SJeff Bonwick 3374e14bb325SJeff Bonwick return (error); 3375e14bb325SJeff Bonwick } 3376e14bb325SJeff Bonwick 3377fa9e4066Sahrens /* 3378fa9e4066Sahrens * ========================================================================== 3379fa9e4066Sahrens * Read and write to physical devices 3380fa9e4066Sahrens * ========================================================================== 3381fa9e4066Sahrens */ 3382738f37bcSGeorge Wilson 3383738f37bcSGeorge Wilson /* 3384738f37bcSGeorge Wilson * Issue an I/O to the underlying vdev. Typically the issue pipeline 3385738f37bcSGeorge Wilson * stops after this stage and will resume upon I/O completion. 3386738f37bcSGeorge Wilson * However, there are instances where the vdev layer may need to 3387738f37bcSGeorge Wilson * continue the pipeline when an I/O was not issued. Since the I/O 3388738f37bcSGeorge Wilson * that was sent to the vdev layer might be different than the one 3389738f37bcSGeorge Wilson * currently active in the pipeline (see vdev_queue_io()), we explicitly 3390738f37bcSGeorge Wilson * force the underlying vdev layers to call either zio_execute() or 3391738f37bcSGeorge Wilson * zio_interrupt() to ensure that the pipeline continues with the correct I/O. 3392738f37bcSGeorge Wilson */ 3393e05725b1Sbonwick static int 339444cd46caSbillm zio_vdev_io_start(zio_t *zio) 3395fa9e4066Sahrens { 3396fa9e4066Sahrens vdev_t *vd = zio->io_vd; 339744cd46caSbillm uint64_t align; 33980a4e9518Sgw spa_t *spa = zio->io_spa; 33990a4e9518Sgw 3400dd50e0ccSTony Hutter zio->io_delay = 0; 3401dd50e0ccSTony Hutter 3402e14bb325SJeff Bonwick ASSERT(zio->io_error == 0); 3403e14bb325SJeff Bonwick ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 3404fa9e4066Sahrens 3405e14bb325SJeff Bonwick if (vd == NULL) { 3406e14bb325SJeff Bonwick if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3407e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 3408fa9e4066Sahrens 3409e14bb325SJeff Bonwick /* 3410e14bb325SJeff Bonwick * The mirror_ops handle multiple DVAs in a single BP. 3411e14bb325SJeff Bonwick */ 3412738f37bcSGeorge Wilson vdev_mirror_ops.vdev_op_io_start(zio); 3413738f37bcSGeorge Wilson return (ZIO_PIPELINE_STOP); 3414fa9e4066Sahrens } 3415fa9e4066Sahrens 34160f7643c7SGeorge Wilson ASSERT3P(zio->io_logical, !=, zio); 34176f793812SPavel Zakharov if (zio->io_type == ZIO_TYPE_WRITE) { 34186f793812SPavel Zakharov ASSERT(spa->spa_trust_config); 34196f793812SPavel Zakharov 3420a3874b8bSToomas Soome /* 3421a3874b8bSToomas Soome * Note: the code can handle other kinds of writes, 3422a3874b8bSToomas Soome * but we don't expect them. 3423a3874b8bSToomas Soome */ 34246f793812SPavel Zakharov if (zio->io_vd->vdev_removing) { 34256f793812SPavel Zakharov ASSERT(zio->io_flags & 34266f793812SPavel Zakharov (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL | 34273a4b1be9SMatthew Ahrens ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)); 34286f793812SPavel Zakharov } 34295cabbc6bSPrashanth Sreenivasa } 34300f7643c7SGeorge Wilson 3431e14bb325SJeff Bonwick align = 1ULL << vd->vdev_top->vdev_ashift; 3432e14bb325SJeff Bonwick 34332a104a52SAlex Reece if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 34342a104a52SAlex Reece P2PHASE(zio->io_size, align) != 0) { 34352a104a52SAlex Reece /* Transform logical writes to be a full physical block size. */ 3436ecc2d604Sbonwick uint64_t asize = P2ROUNDUP(zio->io_size, align); 3437770499e1SDan Kimmel abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize); 3438e14bb325SJeff Bonwick ASSERT(vd == vd->vdev_top); 3439ecc2d604Sbonwick if (zio->io_type == ZIO_TYPE_WRITE) { 3440770499e1SDan Kimmel abd_copy(abuf, zio->io_abd, zio->io_size); 3441770499e1SDan Kimmel abd_zero_off(abuf, zio->io_size, asize - zio->io_size); 3442ecc2d604Sbonwick } 3443e14bb325SJeff Bonwick zio_push_transform(zio, abuf, asize, asize, zio_subblock); 3444ecc2d604Sbonwick } 3445ecc2d604Sbonwick 34462a104a52SAlex Reece /* 34472a104a52SAlex Reece * If this is not a physical io, make sure that it is properly aligned 34482a104a52SAlex Reece * before proceeding. 34492a104a52SAlex Reece */ 34502a104a52SAlex Reece if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 34512a104a52SAlex Reece ASSERT0(P2PHASE(zio->io_offset, align)); 34522a104a52SAlex Reece ASSERT0(P2PHASE(zio->io_size, align)); 34532a104a52SAlex Reece } else { 34542a104a52SAlex Reece /* 34552a104a52SAlex Reece * For physical writes, we allow 512b aligned writes and assume 34562a104a52SAlex Reece * the device will perform a read-modify-write as necessary. 34572a104a52SAlex Reece */ 34582a104a52SAlex Reece ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 34592a104a52SAlex Reece ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 34602a104a52SAlex Reece } 34612a104a52SAlex Reece 3462f9af39baSGeorge Wilson VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 34638ad4d6ddSJeff Bonwick 34648ad4d6ddSJeff Bonwick /* 34658ad4d6ddSJeff Bonwick * If this is a repair I/O, and there's no self-healing involved -- 34668ad4d6ddSJeff Bonwick * that is, we're just resilvering what we expect to resilver -- 34678ad4d6ddSJeff Bonwick * then don't do the I/O unless zio's txg is actually in vd's DTL. 34683a4b1be9SMatthew Ahrens * This prevents spurious resilvering. 34693a4b1be9SMatthew Ahrens * 34703a4b1be9SMatthew Ahrens * There are a few ways that we can end up creating these spurious 34713a4b1be9SMatthew Ahrens * resilver i/os: 34723a4b1be9SMatthew Ahrens * 34733a4b1be9SMatthew Ahrens * 1. A resilver i/o will be issued if any DVA in the BP has a 34743a4b1be9SMatthew Ahrens * dirty DTL. The mirror code will issue resilver writes to 34753a4b1be9SMatthew Ahrens * each DVA, including the one(s) that are not on vdevs with dirty 34763a4b1be9SMatthew Ahrens * DTLs. 34773a4b1be9SMatthew Ahrens * 34783a4b1be9SMatthew Ahrens * 2. With nested replication, which happens when we have a 34793a4b1be9SMatthew Ahrens * "replacing" or "spare" vdev that's a child of a mirror or raidz. 34803a4b1be9SMatthew Ahrens * For example, given mirror(replacing(A+B), C), it's likely that 34813a4b1be9SMatthew Ahrens * only A is out of date (it's the new device). In this case, we'll 34823a4b1be9SMatthew Ahrens * read from C, then use the data to resilver A+B -- but we don't 34833a4b1be9SMatthew Ahrens * actually want to resilver B, just A. The top-level mirror has no 34843a4b1be9SMatthew Ahrens * way to know this, so instead we just discard unnecessary repairs 34853a4b1be9SMatthew Ahrens * as we work our way down the vdev tree. 34863a4b1be9SMatthew Ahrens * 34873a4b1be9SMatthew Ahrens * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc. 34883a4b1be9SMatthew Ahrens * The same logic applies to any form of nested replication: ditto 34893a4b1be9SMatthew Ahrens * + mirror, RAID-Z + replacing, etc. 34903a4b1be9SMatthew Ahrens * 34913a4b1be9SMatthew Ahrens * However, indirect vdevs point off to other vdevs which may have 34923a4b1be9SMatthew Ahrens * DTL's, so we never bypass them. The child i/os on concrete vdevs 34933a4b1be9SMatthew Ahrens * will be properly bypassed instead. 34948ad4d6ddSJeff Bonwick */ 34958ad4d6ddSJeff Bonwick if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 34968ad4d6ddSJeff Bonwick !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 34978ad4d6ddSJeff Bonwick zio->io_txg != 0 && /* not a delegated i/o */ 34983a4b1be9SMatthew Ahrens vd->vdev_ops != &vdev_indirect_ops && 34998ad4d6ddSJeff Bonwick !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 35008ad4d6ddSJeff Bonwick ASSERT(zio->io_type == ZIO_TYPE_WRITE); 35018ad4d6ddSJeff Bonwick zio_vdev_io_bypass(zio); 35028ad4d6ddSJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 35038ad4d6ddSJeff Bonwick } 3504fa9e4066Sahrens 3505084fd14fSBrian Behlendorf if (vd->vdev_ops->vdev_op_leaf && (zio->io_type == ZIO_TYPE_READ || 3506084fd14fSBrian Behlendorf zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM)) { 3507e14bb325SJeff Bonwick 350843466aaeSMax Grossman if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio)) 3509a3f829aeSBill Moore return (ZIO_PIPELINE_CONTINUE); 3510e14bb325SJeff Bonwick 3511e14bb325SJeff Bonwick if ((zio = vdev_queue_io(zio)) == NULL) 3512e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3513e14bb325SJeff Bonwick 3514e14bb325SJeff Bonwick if (!vdev_accessible(vd, zio)) { 3515be6fd75aSMatthew Ahrens zio->io_error = SET_ERROR(ENXIO); 3516e14bb325SJeff Bonwick zio_interrupt(zio); 3517e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3518e14bb325SJeff Bonwick } 3519dd50e0ccSTony Hutter zio->io_delay = gethrtime(); 3520e14bb325SJeff Bonwick } 3521e14bb325SJeff Bonwick 3522738f37bcSGeorge Wilson vd->vdev_ops->vdev_op_io_start(zio); 3523738f37bcSGeorge Wilson return (ZIO_PIPELINE_STOP); 3524fa9e4066Sahrens } 3525fa9e4066Sahrens 3526e05725b1Sbonwick static int 3527fa9e4066Sahrens zio_vdev_io_done(zio_t *zio) 3528fa9e4066Sahrens { 3529e14bb325SJeff Bonwick vdev_t *vd = zio->io_vd; 3530e14bb325SJeff Bonwick vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 3531e14bb325SJeff Bonwick boolean_t unexpected_error = B_FALSE; 3532e05725b1Sbonwick 3533d6e1c446SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3534e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3535d6e1c446SGeorge Wilson } 3536fa9e4066Sahrens 3537084fd14fSBrian Behlendorf ASSERT(zio->io_type == ZIO_TYPE_READ || 3538084fd14fSBrian Behlendorf zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM); 3539e14bb325SJeff Bonwick 3540dd50e0ccSTony Hutter if (zio->io_delay) 3541dd50e0ccSTony Hutter zio->io_delay = gethrtime() - zio->io_delay; 3542dd50e0ccSTony Hutter 3543e14bb325SJeff Bonwick if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { 3544e14bb325SJeff Bonwick 3545e14bb325SJeff Bonwick vdev_queue_io_done(zio); 3546fa9e4066Sahrens 3547e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_WRITE) 3548e14bb325SJeff Bonwick vdev_cache_write(zio); 3549e14bb325SJeff Bonwick 3550e14bb325SJeff Bonwick if (zio_injection_enabled && zio->io_error == 0) 35518956713aSEric Schrock zio->io_error = zio_handle_device_injection(vd, 35528956713aSEric Schrock zio, EIO); 3553e14bb325SJeff Bonwick 3554e14bb325SJeff Bonwick if (zio_injection_enabled && zio->io_error == 0) 3555e14bb325SJeff Bonwick zio->io_error = zio_handle_label_injection(zio, EIO); 3556e14bb325SJeff Bonwick 3557084fd14fSBrian Behlendorf if (zio->io_error && zio->io_type != ZIO_TYPE_TRIM) { 3558e14bb325SJeff Bonwick if (!vdev_accessible(vd, zio)) { 3559be6fd75aSMatthew Ahrens zio->io_error = SET_ERROR(ENXIO); 3560e14bb325SJeff Bonwick } else { 3561e14bb325SJeff Bonwick unexpected_error = B_TRUE; 3562e14bb325SJeff Bonwick } 3563e14bb325SJeff Bonwick } 356451ece835Seschrock } 3565fa9e4066Sahrens 3566e14bb325SJeff Bonwick ops->vdev_op_io_done(zio); 3567e14bb325SJeff Bonwick 3568e14bb325SJeff Bonwick if (unexpected_error) 3569a3f829aeSBill Moore VERIFY(vdev_probe(vd, zio) == NULL); 3570e14bb325SJeff Bonwick 3571e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 3572fa9e4066Sahrens } 3573fa9e4066Sahrens 3574a3874b8bSToomas Soome /* 3575a3874b8bSToomas Soome * This function is used to change the priority of an existing zio that is 3576a3874b8bSToomas Soome * currently in-flight. This is used by the arc to upgrade priority in the 3577a3874b8bSToomas Soome * event that a demand read is made for a block that is currently queued 3578a3874b8bSToomas Soome * as a scrub or async read IO. Otherwise, the high priority read request 3579a3874b8bSToomas Soome * would end up having to wait for the lower priority IO. 3580a3874b8bSToomas Soome */ 3581a3874b8bSToomas Soome void 3582a3874b8bSToomas Soome zio_change_priority(zio_t *pio, zio_priority_t priority) 3583a3874b8bSToomas Soome { 3584a3874b8bSToomas Soome zio_t *cio, *cio_next; 3585a3874b8bSToomas Soome zio_link_t *zl = NULL; 3586a3874b8bSToomas Soome 3587a3874b8bSToomas Soome ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 3588a3874b8bSToomas Soome 3589a3874b8bSToomas Soome if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) { 3590a3874b8bSToomas Soome vdev_queue_change_io_priority(pio, priority); 3591a3874b8bSToomas Soome } else { 3592a3874b8bSToomas Soome pio->io_priority = priority; 3593a3874b8bSToomas Soome } 3594a3874b8bSToomas Soome 3595a3874b8bSToomas Soome mutex_enter(&pio->io_lock); 3596a3874b8bSToomas Soome for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 3597a3874b8bSToomas Soome cio_next = zio_walk_children(pio, &zl); 3598a3874b8bSToomas Soome zio_change_priority(cio, priority); 3599a3874b8bSToomas Soome } 3600a3874b8bSToomas Soome mutex_exit(&pio->io_lock); 3601a3874b8bSToomas Soome } 3602a3874b8bSToomas Soome 360322fe2c88SJonathan Adams /* 360422fe2c88SJonathan Adams * For non-raidz ZIOs, we can just copy aside the bad data read from the 360522fe2c88SJonathan Adams * disk, and use that to finish the checksum ereport later. 360622fe2c88SJonathan Adams */ 360722fe2c88SJonathan Adams static void 360822fe2c88SJonathan Adams zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 3609eb633035STom Caputi const abd_t *good_buf) 361022fe2c88SJonathan Adams { 361122fe2c88SJonathan Adams /* no processing needed */ 361222fe2c88SJonathan Adams zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 361322fe2c88SJonathan Adams } 361422fe2c88SJonathan Adams 361522fe2c88SJonathan Adams /*ARGSUSED*/ 361622fe2c88SJonathan Adams void 361722fe2c88SJonathan Adams zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 361822fe2c88SJonathan Adams { 3619eb633035STom Caputi void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size); 362022fe2c88SJonathan Adams 3621eb633035STom Caputi abd_copy(abd, zio->io_abd, zio->io_size); 362222fe2c88SJonathan Adams 362322fe2c88SJonathan Adams zcr->zcr_cbinfo = zio->io_size; 3624eb633035STom Caputi zcr->zcr_cbdata = abd; 362522fe2c88SJonathan Adams zcr->zcr_finish = zio_vsd_default_cksum_finish; 3626eb633035STom Caputi zcr->zcr_free = zio_abd_free; 362722fe2c88SJonathan Adams } 362822fe2c88SJonathan Adams 3629e05725b1Sbonwick static int 3630fa9e4066Sahrens zio_vdev_io_assess(zio_t *zio) 3631fa9e4066Sahrens { 3632fa9e4066Sahrens vdev_t *vd = zio->io_vd; 3633e14bb325SJeff Bonwick 3634d6e1c446SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3635e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3636d6e1c446SGeorge Wilson } 3637e14bb325SJeff Bonwick 3638e14bb325SJeff Bonwick if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3639e14bb325SJeff Bonwick spa_config_exit(zio->io_spa, SCL_ZIO, zio); 3640e14bb325SJeff Bonwick 3641e14bb325SJeff Bonwick if (zio->io_vsd != NULL) { 364222fe2c88SJonathan Adams zio->io_vsd_ops->vsd_free(zio); 3643e14bb325SJeff Bonwick zio->io_vsd = NULL; 3644ecc2d604Sbonwick } 3645ecc2d604Sbonwick 3646e14bb325SJeff Bonwick if (zio_injection_enabled && zio->io_error == 0) 3647ea8dc4b6Seschrock zio->io_error = zio_handle_fault_injection(zio, EIO); 3648ea8dc4b6Seschrock 3649fa9e4066Sahrens /* 3650fa9e4066Sahrens * If the I/O failed, determine whether we should attempt to retry it. 365135a5a358SJonathan Adams * 365235a5a358SJonathan Adams * On retry, we cut in line in the issue queue, since we don't want 365335a5a358SJonathan Adams * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 3654fa9e4066Sahrens */ 3655e14bb325SJeff Bonwick if (zio->io_error && vd == NULL && 3656e14bb325SJeff Bonwick !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 3657e14bb325SJeff Bonwick ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 3658e14bb325SJeff Bonwick ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 3659fa9e4066Sahrens zio->io_error = 0; 3660e14bb325SJeff Bonwick zio->io_flags |= ZIO_FLAG_IO_RETRY | 3661e14bb325SJeff Bonwick ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 3662b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 366335a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 366435a5a358SJonathan Adams zio_requeue_io_start_cut_in_line); 3665e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3666ea8dc4b6Seschrock } 3667fa9e4066Sahrens 3668e14bb325SJeff Bonwick /* 3669e14bb325SJeff Bonwick * If we got an error on a leaf device, convert it to ENXIO 3670e14bb325SJeff Bonwick * if the device is not accessible at all. 3671e14bb325SJeff Bonwick */ 3672e14bb325SJeff Bonwick if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 3673e14bb325SJeff Bonwick !vdev_accessible(vd, zio)) 3674be6fd75aSMatthew Ahrens zio->io_error = SET_ERROR(ENXIO); 3675e14bb325SJeff Bonwick 3676e14bb325SJeff Bonwick /* 3677e14bb325SJeff Bonwick * If we can't write to an interior vdev (mirror or RAID-Z), 3678e14bb325SJeff Bonwick * set vdev_cant_write so that we stop trying to allocate from it. 3679e14bb325SJeff Bonwick */ 3680e14bb325SJeff Bonwick if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 36813b2aab18SMatthew Ahrens vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 3682e14bb325SJeff Bonwick vd->vdev_cant_write = B_TRUE; 36833b2aab18SMatthew Ahrens } 3684e14bb325SJeff Bonwick 3685295438baSHans Rosenfeld /* 3686295438baSHans Rosenfeld * If a cache flush returns ENOTSUP or ENOTTY, we know that no future 3687084fd14fSBrian Behlendorf * attempts will ever succeed. In this case we set a persistent 3688084fd14fSBrian Behlendorf * boolean flag so that we don't bother with it in the future. 3689295438baSHans Rosenfeld */ 3690295438baSHans Rosenfeld if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) && 3691295438baSHans Rosenfeld zio->io_type == ZIO_TYPE_IOCTL && 3692295438baSHans Rosenfeld zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL) 3693295438baSHans Rosenfeld vd->vdev_nowritecache = B_TRUE; 3694295438baSHans Rosenfeld 3695e14bb325SJeff Bonwick if (zio->io_error) 3696e14bb325SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3697e14bb325SJeff Bonwick 369869962b56SMatthew Ahrens if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 369969962b56SMatthew Ahrens zio->io_physdone != NULL) { 370069962b56SMatthew Ahrens ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); 370169962b56SMatthew Ahrens ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); 370269962b56SMatthew Ahrens zio->io_physdone(zio->io_logical); 370369962b56SMatthew Ahrens } 370469962b56SMatthew Ahrens 3705e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3706fa9e4066Sahrens } 3707fa9e4066Sahrens 3708fa9e4066Sahrens void 3709fa9e4066Sahrens zio_vdev_io_reissue(zio_t *zio) 3710fa9e4066Sahrens { 3711fa9e4066Sahrens ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3712fa9e4066Sahrens ASSERT(zio->io_error == 0); 3713fa9e4066Sahrens 3714b24ab676SJeff Bonwick zio->io_stage >>= 1; 3715fa9e4066Sahrens } 3716fa9e4066Sahrens 3717fa9e4066Sahrens void 3718fa9e4066Sahrens zio_vdev_io_redone(zio_t *zio) 3719fa9e4066Sahrens { 3720fa9e4066Sahrens ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 3721fa9e4066Sahrens 3722b24ab676SJeff Bonwick zio->io_stage >>= 1; 3723fa9e4066Sahrens } 3724fa9e4066Sahrens 3725fa9e4066Sahrens void 3726fa9e4066Sahrens zio_vdev_io_bypass(zio_t *zio) 3727fa9e4066Sahrens { 3728fa9e4066Sahrens ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3729fa9e4066Sahrens ASSERT(zio->io_error == 0); 3730fa9e4066Sahrens 3731fa9e4066Sahrens zio->io_flags |= ZIO_FLAG_IO_BYPASS; 3732b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 3733fa9e4066Sahrens } 3734fa9e4066Sahrens 3735eb633035STom Caputi /* 3736eb633035STom Caputi * ========================================================================== 3737eb633035STom Caputi * Encrypt and store encryption parameters 3738eb633035STom Caputi * ========================================================================== 3739eb633035STom Caputi */ 3740eb633035STom Caputi 3741eb633035STom Caputi 3742eb633035STom Caputi /* 3743eb633035STom Caputi * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for 3744eb633035STom Caputi * managing the storage of encryption parameters and passing them to the 3745eb633035STom Caputi * lower-level encryption functions. 3746eb633035STom Caputi */ 3747eb633035STom Caputi static int 3748eb633035STom Caputi zio_encrypt(zio_t *zio) 3749eb633035STom Caputi { 3750eb633035STom Caputi zio_prop_t *zp = &zio->io_prop; 3751eb633035STom Caputi spa_t *spa = zio->io_spa; 3752eb633035STom Caputi blkptr_t *bp = zio->io_bp; 3753eb633035STom Caputi uint64_t psize = BP_GET_PSIZE(bp); 3754eb633035STom Caputi uint64_t dsobj = zio->io_bookmark.zb_objset; 3755eb633035STom Caputi dmu_object_type_t ot = BP_GET_TYPE(bp); 3756eb633035STom Caputi void *enc_buf = NULL; 3757eb633035STom Caputi abd_t *eabd = NULL; 3758eb633035STom Caputi uint8_t salt[ZIO_DATA_SALT_LEN]; 3759eb633035STom Caputi uint8_t iv[ZIO_DATA_IV_LEN]; 3760eb633035STom Caputi uint8_t mac[ZIO_DATA_MAC_LEN]; 3761eb633035STom Caputi boolean_t no_crypt = B_FALSE; 3762eb633035STom Caputi 3763eb633035STom Caputi /* the root zio already encrypted the data */ 3764eb633035STom Caputi if (zio->io_child_type == ZIO_CHILD_GANG) 3765eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3766eb633035STom Caputi 3767eb633035STom Caputi /* only ZIL blocks are re-encrypted on rewrite */ 3768eb633035STom Caputi if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG) 3769eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3770eb633035STom Caputi 3771eb633035STom Caputi if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) { 3772eb633035STom Caputi BP_SET_CRYPT(bp, B_FALSE); 3773eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3774eb633035STom Caputi } 3775eb633035STom Caputi 3776eb633035STom Caputi /* if we are doing raw encryption set the provided encryption params */ 3777eb633035STom Caputi if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) { 3778eb633035STom Caputi ASSERT0(BP_GET_LEVEL(bp)); 3779eb633035STom Caputi BP_SET_CRYPT(bp, B_TRUE); 3780eb633035STom Caputi BP_SET_BYTEORDER(bp, zp->zp_byteorder); 3781eb633035STom Caputi if (ot != DMU_OT_OBJSET) 3782eb633035STom Caputi zio_crypt_encode_mac_bp(bp, zp->zp_mac); 3783eb633035STom Caputi 3784eb633035STom Caputi /* dnode blocks must be written out in the provided byteorder */ 3785eb633035STom Caputi if (zp->zp_byteorder != ZFS_HOST_BYTEORDER && 3786eb633035STom Caputi ot == DMU_OT_DNODE) { 3787eb633035STom Caputi void *bswap_buf = zio_buf_alloc(psize); 3788eb633035STom Caputi abd_t *babd = abd_get_from_buf(bswap_buf, psize); 3789eb633035STom Caputi 3790eb633035STom Caputi ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 3791eb633035STom Caputi abd_copy_to_buf(bswap_buf, zio->io_abd, psize); 3792eb633035STom Caputi dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf, 3793eb633035STom Caputi psize); 3794eb633035STom Caputi 3795eb633035STom Caputi abd_take_ownership_of_buf(babd, B_TRUE); 3796eb633035STom Caputi zio_push_transform(zio, babd, psize, psize, NULL); 3797eb633035STom Caputi } 3798eb633035STom Caputi 3799eb633035STom Caputi if (DMU_OT_IS_ENCRYPTED(ot)) 3800eb633035STom Caputi zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv); 3801eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3802eb633035STom Caputi } 3803eb633035STom Caputi 3804eb633035STom Caputi /* indirect blocks only maintain a cksum of the lower level MACs */ 3805eb633035STom Caputi if (BP_GET_LEVEL(bp) > 0) { 3806eb633035STom Caputi BP_SET_CRYPT(bp, B_TRUE); 3807eb633035STom Caputi VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE, 3808eb633035STom Caputi zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp), 3809eb633035STom Caputi mac)); 3810eb633035STom Caputi zio_crypt_encode_mac_bp(bp, mac); 3811eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3812eb633035STom Caputi } 3813eb633035STom Caputi 3814eb633035STom Caputi /* 3815eb633035STom Caputi * Objset blocks are a special case since they have 2 256-bit MACs 3816eb633035STom Caputi * embedded within them. 3817eb633035STom Caputi */ 3818eb633035STom Caputi if (ot == DMU_OT_OBJSET) { 3819eb633035STom Caputi ASSERT0(DMU_OT_IS_ENCRYPTED(ot)); 3820eb633035STom Caputi ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 3821eb633035STom Caputi BP_SET_CRYPT(bp, B_TRUE); 3822eb633035STom Caputi VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj, 3823eb633035STom Caputi zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp))); 3824eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3825eb633035STom Caputi } 3826eb633035STom Caputi 3827eb633035STom Caputi /* unencrypted object types are only authenticated with a MAC */ 3828eb633035STom Caputi if (!DMU_OT_IS_ENCRYPTED(ot)) { 3829eb633035STom Caputi BP_SET_CRYPT(bp, B_TRUE); 3830eb633035STom Caputi VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj, 3831eb633035STom Caputi zio->io_abd, psize, mac)); 3832eb633035STom Caputi zio_crypt_encode_mac_bp(bp, mac); 3833eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3834eb633035STom Caputi } 3835eb633035STom Caputi 3836eb633035STom Caputi /* 3837eb633035STom Caputi * Later passes of sync-to-convergence may decide to rewrite data 3838eb633035STom Caputi * in place to avoid more disk reallocations. This presents a problem 3839eb633035STom Caputi * for encryption because this consitutes rewriting the new data with 3840eb633035STom Caputi * the same encryption key and IV. However, this only applies to blocks 3841eb633035STom Caputi * in the MOS (particularly the spacemaps) and we do not encrypt the 3842eb633035STom Caputi * MOS. We assert that the zio is allocating or an intent log write 3843eb633035STom Caputi * to enforce this. 3844eb633035STom Caputi */ 3845eb633035STom Caputi ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG); 3846eb633035STom Caputi ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG); 3847eb633035STom Caputi ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION)); 3848eb633035STom Caputi ASSERT3U(psize, !=, 0); 3849eb633035STom Caputi 3850eb633035STom Caputi enc_buf = zio_buf_alloc(psize); 3851eb633035STom Caputi eabd = abd_get_from_buf(enc_buf, psize); 3852eb633035STom Caputi abd_take_ownership_of_buf(eabd, B_TRUE); 3853eb633035STom Caputi 3854eb633035STom Caputi /* 3855eb633035STom Caputi * For an explanation of what encryption parameters are stored 3856eb633035STom Caputi * where, see the block comment in zio_crypt.c. 3857eb633035STom Caputi */ 3858eb633035STom Caputi if (ot == DMU_OT_INTENT_LOG) { 3859eb633035STom Caputi zio_crypt_decode_params_bp(bp, salt, iv); 3860eb633035STom Caputi } else { 3861eb633035STom Caputi BP_SET_CRYPT(bp, B_TRUE); 3862eb633035STom Caputi } 3863eb633035STom Caputi 3864eb633035STom Caputi /* Perform the encryption. This should not fail */ 3865eb633035STom Caputi VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark, 3866eb633035STom Caputi BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), 3867eb633035STom Caputi salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt)); 3868eb633035STom Caputi 3869eb633035STom Caputi /* encode encryption metadata into the bp */ 3870eb633035STom Caputi if (ot == DMU_OT_INTENT_LOG) { 3871eb633035STom Caputi /* 3872eb633035STom Caputi * ZIL blocks store the MAC in the embedded checksum, so the 3873eb633035STom Caputi * transform must always be applied. 3874eb633035STom Caputi */ 3875eb633035STom Caputi zio_crypt_encode_mac_zil(enc_buf, mac); 3876eb633035STom Caputi zio_push_transform(zio, eabd, psize, psize, NULL); 3877eb633035STom Caputi } else { 3878eb633035STom Caputi BP_SET_CRYPT(bp, B_TRUE); 3879eb633035STom Caputi zio_crypt_encode_params_bp(bp, salt, iv); 3880eb633035STom Caputi zio_crypt_encode_mac_bp(bp, mac); 3881eb633035STom Caputi 3882eb633035STom Caputi if (no_crypt) { 3883eb633035STom Caputi ASSERT3U(ot, ==, DMU_OT_DNODE); 3884eb633035STom Caputi abd_free(eabd); 3885eb633035STom Caputi } else { 3886eb633035STom Caputi zio_push_transform(zio, eabd, psize, psize, NULL); 3887eb633035STom Caputi } 3888eb633035STom Caputi } 3889eb633035STom Caputi 3890eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3891eb633035STom Caputi } 3892eb633035STom Caputi 3893fa9e4066Sahrens /* 3894fa9e4066Sahrens * ========================================================================== 3895fa9e4066Sahrens * Generate and verify checksums 3896fa9e4066Sahrens * ========================================================================== 3897fa9e4066Sahrens */ 3898e05725b1Sbonwick static int 3899fa9e4066Sahrens zio_checksum_generate(zio_t *zio) 3900fa9e4066Sahrens { 3901fa9e4066Sahrens blkptr_t *bp = zio->io_bp; 3902e14bb325SJeff Bonwick enum zio_checksum checksum; 3903fa9e4066Sahrens 3904e14bb325SJeff Bonwick if (bp == NULL) { 3905e14bb325SJeff Bonwick /* 3906e14bb325SJeff Bonwick * This is zio_write_phys(). 3907e14bb325SJeff Bonwick * We're either generating a label checksum, or none at all. 3908e14bb325SJeff Bonwick */ 3909e14bb325SJeff Bonwick checksum = zio->io_prop.zp_checksum; 3910e14bb325SJeff Bonwick 3911e14bb325SJeff Bonwick if (checksum == ZIO_CHECKSUM_OFF) 3912e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 3913fa9e4066Sahrens 3914e14bb325SJeff Bonwick ASSERT(checksum == ZIO_CHECKSUM_LABEL); 3915e14bb325SJeff Bonwick } else { 3916e14bb325SJeff Bonwick if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 3917e14bb325SJeff Bonwick ASSERT(!IO_IS_ALLOCATING(zio)); 3918e14bb325SJeff Bonwick checksum = ZIO_CHECKSUM_GANG_HEADER; 3919e14bb325SJeff Bonwick } else { 3920e14bb325SJeff Bonwick checksum = BP_GET_CHECKSUM(bp); 3921e14bb325SJeff Bonwick } 3922e14bb325SJeff Bonwick } 3923fa9e4066Sahrens 3924770499e1SDan Kimmel zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size); 3925fa9e4066Sahrens 3926e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3927fa9e4066Sahrens } 3928fa9e4066Sahrens 3929e05725b1Sbonwick static int 3930e14bb325SJeff Bonwick zio_checksum_verify(zio_t *zio) 3931fa9e4066Sahrens { 393222fe2c88SJonathan Adams zio_bad_cksum_t info; 3933e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 3934e14bb325SJeff Bonwick int error; 3935fa9e4066Sahrens 3936b24ab676SJeff Bonwick ASSERT(zio->io_vd != NULL); 3937b24ab676SJeff Bonwick 3938e14bb325SJeff Bonwick if (bp == NULL) { 3939e14bb325SJeff Bonwick /* 3940e14bb325SJeff Bonwick * This is zio_read_phys(). 3941e14bb325SJeff Bonwick * We're either verifying a label checksum, or nothing at all. 3942e14bb325SJeff Bonwick */ 3943e14bb325SJeff Bonwick if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 3944e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 3945fa9e4066Sahrens 3946e14bb325SJeff Bonwick ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 3947e14bb325SJeff Bonwick } 3948fa9e4066Sahrens 394922fe2c88SJonathan Adams if ((error = zio_checksum_error(zio, &info)) != 0) { 3950e14bb325SJeff Bonwick zio->io_error = error; 3951373dc1cfSMatthew Ahrens if (error == ECKSUM && 3952373dc1cfSMatthew Ahrens !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 395322fe2c88SJonathan Adams zfs_ereport_start_checksum(zio->io_spa, 3954eb633035STom Caputi zio->io_vd, &zio->io_bookmark, zio, 3955eb633035STom Caputi zio->io_offset, zio->io_size, NULL, &info); 3956e14bb325SJeff Bonwick } 3957fa9e4066Sahrens } 3958fa9e4066Sahrens 3959e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3960fa9e4066Sahrens } 3961fa9e4066Sahrens 3962fa9e4066Sahrens /* 3963fa9e4066Sahrens * Called by RAID-Z to ensure we don't compute the checksum twice. 3964fa9e4066Sahrens */ 3965fa9e4066Sahrens void 3966fa9e4066Sahrens zio_checksum_verified(zio_t *zio) 3967fa9e4066Sahrens { 3968b24ab676SJeff Bonwick zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 3969fa9e4066Sahrens } 3970fa9e4066Sahrens 3971fa9e4066Sahrens /* 3972e14bb325SJeff Bonwick * ========================================================================== 3973e14bb325SJeff Bonwick * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 39745d7b4d43SMatthew Ahrens * An error of 0 indicates success. ENXIO indicates whole-device failure, 3975e14bb325SJeff Bonwick * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 3976e14bb325SJeff Bonwick * indicate errors that are specific to one I/O, and most likely permanent. 3977e14bb325SJeff Bonwick * Any other error is presumed to be worse because we weren't expecting it. 3978e14bb325SJeff Bonwick * ========================================================================== 3979fa9e4066Sahrens */ 3980e14bb325SJeff Bonwick int 3981e14bb325SJeff Bonwick zio_worst_error(int e1, int e2) 3982fa9e4066Sahrens { 3983e14bb325SJeff Bonwick static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 3984e14bb325SJeff Bonwick int r1, r2; 3985e14bb325SJeff Bonwick 3986e14bb325SJeff Bonwick for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 3987e14bb325SJeff Bonwick if (e1 == zio_error_rank[r1]) 3988e14bb325SJeff Bonwick break; 3989e14bb325SJeff Bonwick 3990e14bb325SJeff Bonwick for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 3991e14bb325SJeff Bonwick if (e2 == zio_error_rank[r2]) 3992e14bb325SJeff Bonwick break; 399344cd46caSbillm 3994e14bb325SJeff Bonwick return (r1 > r2 ? e1 : e2); 3995fa9e4066Sahrens } 3996fa9e4066Sahrens 3997fa9e4066Sahrens /* 3998fa9e4066Sahrens * ========================================================================== 3999e14bb325SJeff Bonwick * I/O completion 4000fa9e4066Sahrens * ========================================================================== 4001fa9e4066Sahrens */ 4002e14bb325SJeff Bonwick static int 4003e14bb325SJeff Bonwick zio_ready(zio_t *zio) 4004fa9e4066Sahrens { 4005e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 4006a3f829aeSBill Moore zio_t *pio, *pio_next; 40070f7643c7SGeorge Wilson zio_link_t *zl = NULL; 4008fa9e4066Sahrens 4009d6e1c446SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, 4010d6e1c446SGeorge Wilson ZIO_WAIT_READY)) { 4011f5383399SBill Moore return (ZIO_PIPELINE_STOP); 4012d6e1c446SGeorge Wilson } 4013fa9e4066Sahrens 4014f5383399SBill Moore if (zio->io_ready) { 4015e14bb325SJeff Bonwick ASSERT(IO_IS_ALLOCATING(zio)); 401680901aeaSGeorge Wilson ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 401780901aeaSGeorge Wilson (zio->io_flags & ZIO_FLAG_NOPWRITE)); 4018e14bb325SJeff Bonwick ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 4019fa9e4066Sahrens 4020e14bb325SJeff Bonwick zio->io_ready(zio); 4021e14bb325SJeff Bonwick } 4022fa9e4066Sahrens 4023e14bb325SJeff Bonwick if (bp != NULL && bp != &zio->io_bp_copy) 4024e14bb325SJeff Bonwick zio->io_bp_copy = *bp; 4025fa9e4066Sahrens 40260f7643c7SGeorge Wilson if (zio->io_error != 0) { 4027e14bb325SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 4028fa9e4066Sahrens 40290f7643c7SGeorge Wilson if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 40300f7643c7SGeorge Wilson ASSERT(IO_IS_ALLOCATING(zio)); 40310f7643c7SGeorge Wilson ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 4032663207adSDon Brady ASSERT(zio->io_metaslab_class != NULL); 4033663207adSDon Brady 40340f7643c7SGeorge Wilson /* 40350f7643c7SGeorge Wilson * We were unable to allocate anything, unreserve and 40360f7643c7SGeorge Wilson * issue the next I/O to allocate. 40370f7643c7SGeorge Wilson */ 40380f7643c7SGeorge Wilson metaslab_class_throttle_unreserve( 4039663207adSDon Brady zio->io_metaslab_class, zio->io_prop.zp_copies, 4040663207adSDon Brady zio->io_allocator, zio); 4041f78cdc34SPaul Dagnelie zio_allocate_dispatch(zio->io_spa, zio->io_allocator); 40420f7643c7SGeorge Wilson } 40430f7643c7SGeorge Wilson } 40440f7643c7SGeorge Wilson 4045a3f829aeSBill Moore mutex_enter(&zio->io_lock); 4046a3f829aeSBill Moore zio->io_state[ZIO_WAIT_READY] = 1; 40470f7643c7SGeorge Wilson pio = zio_walk_parents(zio, &zl); 4048a3f829aeSBill Moore mutex_exit(&zio->io_lock); 4049a3f829aeSBill Moore 4050a3f829aeSBill Moore /* 4051a3f829aeSBill Moore * As we notify zio's parents, new parents could be added. 4052a3f829aeSBill Moore * New parents go to the head of zio's io_parent_list, however, 4053a3f829aeSBill Moore * so we will (correctly) not notify them. The remainder of zio's 4054a3f829aeSBill Moore * io_parent_list, from 'pio_next' onward, cannot change because 4055a3f829aeSBill Moore * all parents must wait for us to be done before they can be done. 4056a3f829aeSBill Moore */ 4057a3f829aeSBill Moore for (; pio != NULL; pio = pio_next) { 40580f7643c7SGeorge Wilson pio_next = zio_walk_parents(zio, &zl); 4059e14bb325SJeff Bonwick zio_notify_parent(pio, zio, ZIO_WAIT_READY); 4060a3f829aeSBill Moore } 4061fa9e4066Sahrens 4062b24ab676SJeff Bonwick if (zio->io_flags & ZIO_FLAG_NODATA) { 4063b24ab676SJeff Bonwick if (BP_IS_GANG(bp)) { 4064b24ab676SJeff Bonwick zio->io_flags &= ~ZIO_FLAG_NODATA; 4065b24ab676SJeff Bonwick } else { 4066770499e1SDan Kimmel ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE); 4067b24ab676SJeff Bonwick zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 4068b24ab676SJeff Bonwick } 4069b24ab676SJeff Bonwick } 4070b24ab676SJeff Bonwick 4071a33cae98STim Haley if (zio_injection_enabled && 4072a33cae98STim Haley zio->io_spa->spa_syncing_txg == zio->io_txg) 4073a33cae98STim Haley zio_handle_ignored_writes(zio); 4074a33cae98STim Haley 4075e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 4076fa9e4066Sahrens } 4077fa9e4066Sahrens 40780f7643c7SGeorge Wilson /* 40790f7643c7SGeorge Wilson * Update the allocation throttle accounting. 40800f7643c7SGeorge Wilson */ 40810f7643c7SGeorge Wilson static void 40820f7643c7SGeorge Wilson zio_dva_throttle_done(zio_t *zio) 40830f7643c7SGeorge Wilson { 40840f7643c7SGeorge Wilson zio_t *lio = zio->io_logical; 40850f7643c7SGeorge Wilson zio_t *pio = zio_unique_parent(zio); 40860f7643c7SGeorge Wilson vdev_t *vd = zio->io_vd; 40870f7643c7SGeorge Wilson int flags = METASLAB_ASYNC_ALLOC; 40880f7643c7SGeorge Wilson 40890f7643c7SGeorge Wilson ASSERT3P(zio->io_bp, !=, NULL); 40900f7643c7SGeorge Wilson ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 40910f7643c7SGeorge Wilson ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE); 40920f7643c7SGeorge Wilson ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 40930f7643c7SGeorge Wilson ASSERT(vd != NULL); 40940f7643c7SGeorge Wilson ASSERT3P(vd, ==, vd->vdev_top); 40950f7643c7SGeorge Wilson ASSERT(!(zio->io_flags & (ZIO_FLAG_IO_REPAIR | ZIO_FLAG_IO_RETRY))); 40960f7643c7SGeorge Wilson ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); 40970f7643c7SGeorge Wilson ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE)); 40980f7643c7SGeorge Wilson ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA)); 40990f7643c7SGeorge Wilson 41000f7643c7SGeorge Wilson /* 41010f7643c7SGeorge Wilson * Parents of gang children can have two flavors -- ones that 41020f7643c7SGeorge Wilson * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set) 41030f7643c7SGeorge Wilson * and ones that allocated the constituent blocks. The allocation 41040f7643c7SGeorge Wilson * throttle needs to know the allocating parent zio so we must find 41050f7643c7SGeorge Wilson * it here. 41060f7643c7SGeorge Wilson */ 41070f7643c7SGeorge Wilson if (pio->io_child_type == ZIO_CHILD_GANG) { 41080f7643c7SGeorge Wilson /* 41090f7643c7SGeorge Wilson * If our parent is a rewrite gang child then our grandparent 41100f7643c7SGeorge Wilson * would have been the one that performed the allocation. 41110f7643c7SGeorge Wilson */ 41120f7643c7SGeorge Wilson if (pio->io_flags & ZIO_FLAG_IO_REWRITE) 41130f7643c7SGeorge Wilson pio = zio_unique_parent(pio); 41140f7643c7SGeorge Wilson flags |= METASLAB_GANG_CHILD; 41150f7643c7SGeorge Wilson } 41160f7643c7SGeorge Wilson 41170f7643c7SGeorge Wilson ASSERT(IO_IS_ALLOCATING(pio)); 41180f7643c7SGeorge Wilson ASSERT3P(zio, !=, zio->io_logical); 41190f7643c7SGeorge Wilson ASSERT(zio->io_logical != NULL); 41200f7643c7SGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 41210f7643c7SGeorge Wilson ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE); 4122663207adSDon Brady ASSERT(zio->io_metaslab_class != NULL); 41230f7643c7SGeorge Wilson 41240f7643c7SGeorge Wilson mutex_enter(&pio->io_lock); 4125f78cdc34SPaul Dagnelie metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags, 4126f78cdc34SPaul Dagnelie pio->io_allocator, B_TRUE); 41270f7643c7SGeorge Wilson mutex_exit(&pio->io_lock); 41280f7643c7SGeorge Wilson 4129663207adSDon Brady metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1, 4130663207adSDon Brady pio->io_allocator, pio); 41310f7643c7SGeorge Wilson 41320f7643c7SGeorge Wilson /* 41330f7643c7SGeorge Wilson * Call into the pipeline to see if there is more work that 41340f7643c7SGeorge Wilson * needs to be done. If there is work to be done it will be 41350f7643c7SGeorge Wilson * dispatched to another taskq thread. 41360f7643c7SGeorge Wilson */ 4137f78cdc34SPaul Dagnelie zio_allocate_dispatch(zio->io_spa, pio->io_allocator); 41380f7643c7SGeorge Wilson } 41390f7643c7SGeorge Wilson 4140e14bb325SJeff Bonwick static int 4141e14bb325SJeff Bonwick zio_done(zio_t *zio) 4142d63d470bSgw { 4143e14bb325SJeff Bonwick spa_t *spa = zio->io_spa; 4144e14bb325SJeff Bonwick zio_t *lio = zio->io_logical; 4145e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 4146e14bb325SJeff Bonwick vdev_t *vd = zio->io_vd; 4147e14bb325SJeff Bonwick uint64_t psize = zio->io_size; 4148a3f829aeSBill Moore zio_t *pio, *pio_next; 41490f7643c7SGeorge Wilson zio_link_t *zl = NULL; 4150d63d470bSgw 4151e14bb325SJeff Bonwick /* 4152f5383399SBill Moore * If our children haven't all completed, 4153e14bb325SJeff Bonwick * wait for them and then repeat this pipeline stage. 4154e14bb325SJeff Bonwick */ 4155d6e1c446SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) { 4156e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 4157d6e1c446SGeorge Wilson } 4158d63d470bSgw 41590f7643c7SGeorge Wilson /* 41600f7643c7SGeorge Wilson * If the allocation throttle is enabled, then update the accounting. 41610f7643c7SGeorge Wilson * We only track child I/Os that are part of an allocating async 41620f7643c7SGeorge Wilson * write. We must do this since the allocation is performed 41630f7643c7SGeorge Wilson * by the logical I/O but the actual write is done by child I/Os. 41640f7643c7SGeorge Wilson */ 41650f7643c7SGeorge Wilson if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && 41660f7643c7SGeorge Wilson zio->io_child_type == ZIO_CHILD_VDEV) { 4167663207adSDon Brady ASSERT(zio->io_metaslab_class != NULL); 4168663207adSDon Brady ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled); 41690f7643c7SGeorge Wilson zio_dva_throttle_done(zio); 41700f7643c7SGeorge Wilson } 41710f7643c7SGeorge Wilson 41720f7643c7SGeorge Wilson /* 41730f7643c7SGeorge Wilson * If the allocation throttle is enabled, verify that 41740f7643c7SGeorge Wilson * we have decremented the refcounts for every I/O that was throttled. 41750f7643c7SGeorge Wilson */ 41760f7643c7SGeorge Wilson if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 41770f7643c7SGeorge Wilson ASSERT(zio->io_type == ZIO_TYPE_WRITE); 41780f7643c7SGeorge Wilson ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 41790f7643c7SGeorge Wilson ASSERT(bp != NULL); 4180663207adSDon Brady 4181f78cdc34SPaul Dagnelie metaslab_group_alloc_verify(spa, zio->io_bp, zio, 4182f78cdc34SPaul Dagnelie zio->io_allocator); 4183e914ace2STim Schumacher VERIFY(zfs_refcount_not_held( 4184663207adSDon Brady &zio->io_metaslab_class->mc_alloc_slots[zio->io_allocator], 4185663207adSDon Brady zio)); 41860f7643c7SGeorge Wilson } 41870f7643c7SGeorge Wilson 4188e14bb325SJeff Bonwick for (int c = 0; c < ZIO_CHILD_TYPES; c++) 4189e14bb325SJeff Bonwick for (int w = 0; w < ZIO_WAIT_TYPES; w++) 4190e14bb325SJeff Bonwick ASSERT(zio->io_children[c][w] == 0); 4191e14bb325SJeff Bonwick 41925d7b4d43SMatthew Ahrens if (bp != NULL && !BP_IS_EMBEDDED(bp)) { 4193e14bb325SJeff Bonwick ASSERT(bp->blk_pad[0] == 0); 4194e14bb325SJeff Bonwick ASSERT(bp->blk_pad[1] == 0); 4195e14bb325SJeff Bonwick ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 4196a3f829aeSBill Moore (bp == zio_unique_parent(zio)->io_bp)); 4197e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 4198b24ab676SJeff Bonwick zio->io_bp_override == NULL && 4199e14bb325SJeff Bonwick !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 4200b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 4201e14bb325SJeff Bonwick ASSERT(BP_COUNT_GANG(bp) == 0 || 4202e14bb325SJeff Bonwick (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 4203e14bb325SJeff Bonwick } 420480901aeaSGeorge Wilson if (zio->io_flags & ZIO_FLAG_NOPWRITE) 420580901aeaSGeorge Wilson VERIFY(BP_EQUAL(bp, &zio->io_bp_orig)); 4206e14bb325SJeff Bonwick } 4207fa9e4066Sahrens 4208e14bb325SJeff Bonwick /* 4209b24ab676SJeff Bonwick * If there were child vdev/gang/ddt errors, they apply to us now. 4210e14bb325SJeff Bonwick */ 4211e14bb325SJeff Bonwick zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 4212e14bb325SJeff Bonwick zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 4213b24ab676SJeff Bonwick zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 4214b24ab676SJeff Bonwick 4215b24ab676SJeff Bonwick /* 4216b24ab676SJeff Bonwick * If the I/O on the transformed data was successful, generate any 4217b24ab676SJeff Bonwick * checksum reports now while we still have the transformed data. 4218b24ab676SJeff Bonwick */ 4219b24ab676SJeff Bonwick if (zio->io_error == 0) { 4220b24ab676SJeff Bonwick while (zio->io_cksum_report != NULL) { 4221b24ab676SJeff Bonwick zio_cksum_report_t *zcr = zio->io_cksum_report; 4222b24ab676SJeff Bonwick uint64_t align = zcr->zcr_align; 4223b24ab676SJeff Bonwick uint64_t asize = P2ROUNDUP(psize, align); 4224770499e1SDan Kimmel abd_t *adata = zio->io_abd; 4225b24ab676SJeff Bonwick 4226b24ab676SJeff Bonwick if (asize != psize) { 4227eb633035STom Caputi adata = abd_alloc(asize, B_TRUE); 4228770499e1SDan Kimmel abd_copy(adata, zio->io_abd, psize); 4229770499e1SDan Kimmel abd_zero_off(adata, psize, asize - psize); 4230b24ab676SJeff Bonwick } 4231b24ab676SJeff Bonwick 4232b24ab676SJeff Bonwick zio->io_cksum_report = zcr->zcr_next; 4233b24ab676SJeff Bonwick zcr->zcr_next = NULL; 4234eb633035STom Caputi zcr->zcr_finish(zcr, adata); 4235b24ab676SJeff Bonwick zfs_ereport_free_checksum(zcr); 4236b24ab676SJeff Bonwick 4237b24ab676SJeff Bonwick if (asize != psize) 4238770499e1SDan Kimmel abd_free(adata); 4239b24ab676SJeff Bonwick } 4240b24ab676SJeff Bonwick } 4241e14bb325SJeff Bonwick 4242e14bb325SJeff Bonwick zio_pop_transforms(zio); /* note: may set zio->io_error */ 4243e14bb325SJeff Bonwick 4244e14bb325SJeff Bonwick vdev_stat_update(zio, psize); 4245e14bb325SJeff Bonwick 4246dd50e0ccSTony Hutter if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) { 4247dd50e0ccSTony Hutter if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) { 4248dd50e0ccSTony Hutter /* 4249dd50e0ccSTony Hutter * We want to only increment our slow IO counters if 4250dd50e0ccSTony Hutter * the IO is valid (i.e. not if the drive is removed). 4251dd50e0ccSTony Hutter * 4252dd50e0ccSTony Hutter * zfs_ereport_post() will also do these checks, but 4253dd50e0ccSTony Hutter * it can also have other failures, so we need to 4254dd50e0ccSTony Hutter * increment the slow_io counters independent of it. 4255dd50e0ccSTony Hutter */ 4256dd50e0ccSTony Hutter if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY, 4257dd50e0ccSTony Hutter zio->io_spa, zio->io_vd, zio)) { 4258dd50e0ccSTony Hutter mutex_enter(&zio->io_vd->vdev_stat_lock); 4259dd50e0ccSTony Hutter zio->io_vd->vdev_stat.vs_slow_ios++; 4260dd50e0ccSTony Hutter mutex_exit(&zio->io_vd->vdev_stat_lock); 4261dd50e0ccSTony Hutter 4262dd50e0ccSTony Hutter zfs_ereport_post(FM_EREPORT_ZFS_DELAY, 4263dd50e0ccSTony Hutter zio->io_spa, zio->io_vd, &zio->io_bookmark, 4264dd50e0ccSTony Hutter zio, 0, 0); 4265dd50e0ccSTony Hutter } 4266dd50e0ccSTony Hutter } 4267dd50e0ccSTony Hutter } 4268dd50e0ccSTony Hutter 4269e14bb325SJeff Bonwick if (zio->io_error) { 4270e14bb325SJeff Bonwick /* 4271e14bb325SJeff Bonwick * If this I/O is attached to a particular vdev, 4272e14bb325SJeff Bonwick * generate an error message describing the I/O failure 4273e14bb325SJeff Bonwick * at the block level. We ignore these errors if the 4274e14bb325SJeff Bonwick * device is currently unavailable. 4275e14bb325SJeff Bonwick */ 4276e14bb325SJeff Bonwick if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 4277eb633035STom Caputi zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, 4278eb633035STom Caputi &zio->io_bookmark, zio, 0, 0); 4279e14bb325SJeff Bonwick 42808f18d1faSGeorge Wilson if ((zio->io_error == EIO || !(zio->io_flags & 42818f18d1faSGeorge Wilson (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 42828f18d1faSGeorge Wilson zio == lio) { 4283e14bb325SJeff Bonwick /* 4284e14bb325SJeff Bonwick * For logical I/O requests, tell the SPA to log the 4285e14bb325SJeff Bonwick * error and generate a logical data ereport. 4286e14bb325SJeff Bonwick */ 4287eb633035STom Caputi spa_log_error(spa, &zio->io_bookmark); 4288eb633035STom Caputi zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, 4289eb633035STom Caputi &zio->io_bookmark, zio, 0, 0); 4290e14bb325SJeff Bonwick } 4291e14bb325SJeff Bonwick } 4292fa9e4066Sahrens 4293e14bb325SJeff Bonwick if (zio->io_error && zio == lio) { 4294e14bb325SJeff Bonwick /* 4295e14bb325SJeff Bonwick * Determine whether zio should be reexecuted. This will 4296e14bb325SJeff Bonwick * propagate all the way to the root via zio_notify_parent(). 4297e14bb325SJeff Bonwick */ 4298e14bb325SJeff Bonwick ASSERT(vd == NULL && bp != NULL); 4299b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 4300e14bb325SJeff Bonwick 4301b24ab676SJeff Bonwick if (IO_IS_ALLOCATING(zio) && 4302b24ab676SJeff Bonwick !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 4303e14bb325SJeff Bonwick if (zio->io_error != ENOSPC) 4304e14bb325SJeff Bonwick zio->io_reexecute |= ZIO_REEXECUTE_NOW; 4305e14bb325SJeff Bonwick else 4306e14bb325SJeff Bonwick zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4307b24ab676SJeff Bonwick } 4308e14bb325SJeff Bonwick 4309e14bb325SJeff Bonwick if ((zio->io_type == ZIO_TYPE_READ || 4310e14bb325SJeff Bonwick zio->io_type == ZIO_TYPE_FREE) && 431144ecc532SGeorge Wilson !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 4312e14bb325SJeff Bonwick zio->io_error == ENXIO && 4313b16da2e2SGeorge Wilson spa_load_state(spa) == SPA_LOAD_NONE && 4314e14bb325SJeff Bonwick spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 4315e14bb325SJeff Bonwick zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4316e14bb325SJeff Bonwick 4317e14bb325SJeff Bonwick if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 4318e14bb325SJeff Bonwick zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 431922fe2c88SJonathan Adams 432022fe2c88SJonathan Adams /* 432122fe2c88SJonathan Adams * Here is a possibly good place to attempt to do 432222fe2c88SJonathan Adams * either combinatorial reconstruction or error correction 432322fe2c88SJonathan Adams * based on checksums. It also might be a good place 432422fe2c88SJonathan Adams * to send out preliminary ereports before we suspend 432522fe2c88SJonathan Adams * processing. 432622fe2c88SJonathan Adams */ 4327d63d470bSgw } 4328d63d470bSgw 432967bd71c6Sperrin /* 4330e14bb325SJeff Bonwick * If there were logical child errors, they apply to us now. 4331e14bb325SJeff Bonwick * We defer this until now to avoid conflating logical child 4332e14bb325SJeff Bonwick * errors with errors that happened to the zio itself when 4333e14bb325SJeff Bonwick * updating vdev stats and reporting FMA events above. 433467bd71c6Sperrin */ 4335e14bb325SJeff Bonwick zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 43368654d025Sperrin 4337b24ab676SJeff Bonwick if ((zio->io_error || zio->io_reexecute) && 4338b24ab676SJeff Bonwick IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 433980901aeaSGeorge Wilson !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 4340f5383399SBill Moore zio_dva_unallocate(zio, zio->io_gang_tree, bp); 4341f5383399SBill Moore 4342f5383399SBill Moore zio_gang_tree_free(&zio->io_gang_tree); 4343f5383399SBill Moore 434433a372edSGeorge Wilson /* 434533a372edSGeorge Wilson * Godfather I/Os should never suspend. 434633a372edSGeorge Wilson */ 434733a372edSGeorge Wilson if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 434833a372edSGeorge Wilson (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 434933a372edSGeorge Wilson zio->io_reexecute = 0; 435033a372edSGeorge Wilson 435133a372edSGeorge Wilson if (zio->io_reexecute) { 4352e14bb325SJeff Bonwick /* 4353e14bb325SJeff Bonwick * This is a logical I/O that wants to reexecute. 4354e14bb325SJeff Bonwick * 4355e14bb325SJeff Bonwick * Reexecute is top-down. When an i/o fails, if it's not 4356e14bb325SJeff Bonwick * the root, it simply notifies its parent and sticks around. 4357e14bb325SJeff Bonwick * The parent, seeing that it still has children in zio_done(), 4358e14bb325SJeff Bonwick * does the same. This percolates all the way up to the root. 4359e14bb325SJeff Bonwick * The root i/o will reexecute or suspend the entire tree. 4360e14bb325SJeff Bonwick * 4361e14bb325SJeff Bonwick * This approach ensures that zio_reexecute() honors 4362e14bb325SJeff Bonwick * all the original i/o dependency relationships, e.g. 4363e14bb325SJeff Bonwick * parents not executing until children are ready. 4364e14bb325SJeff Bonwick */ 4365e14bb325SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 4366fa9e4066Sahrens 4367f5383399SBill Moore zio->io_gang_leader = NULL; 4368e14bb325SJeff Bonwick 4369a3f829aeSBill Moore mutex_enter(&zio->io_lock); 4370a3f829aeSBill Moore zio->io_state[ZIO_WAIT_DONE] = 1; 4371a3f829aeSBill Moore mutex_exit(&zio->io_lock); 4372a3f829aeSBill Moore 437354d692b7SGeorge Wilson /* 437454d692b7SGeorge Wilson * "The Godfather" I/O monitors its children but is 437554d692b7SGeorge Wilson * not a true parent to them. It will track them through 437654d692b7SGeorge Wilson * the pipeline but severs its ties whenever they get into 437754d692b7SGeorge Wilson * trouble (e.g. suspended). This allows "The Godfather" 437854d692b7SGeorge Wilson * I/O to return status without blocking. 437954d692b7SGeorge Wilson */ 43800f7643c7SGeorge Wilson zl = NULL; 43810f7643c7SGeorge Wilson for (pio = zio_walk_parents(zio, &zl); pio != NULL; 43820f7643c7SGeorge Wilson pio = pio_next) { 43830f7643c7SGeorge Wilson zio_link_t *remove_zl = zl; 43840f7643c7SGeorge Wilson pio_next = zio_walk_parents(zio, &zl); 438554d692b7SGeorge Wilson 438654d692b7SGeorge Wilson if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 438754d692b7SGeorge Wilson (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 43880f7643c7SGeorge Wilson zio_remove_child(pio, zio, remove_zl); 438954d692b7SGeorge Wilson zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 439054d692b7SGeorge Wilson } 439154d692b7SGeorge Wilson } 439254d692b7SGeorge Wilson 4393a3f829aeSBill Moore if ((pio = zio_unique_parent(zio)) != NULL) { 4394e14bb325SJeff Bonwick /* 4395e14bb325SJeff Bonwick * We're not a root i/o, so there's nothing to do 4396e14bb325SJeff Bonwick * but notify our parent. Don't propagate errors 4397e14bb325SJeff Bonwick * upward since we haven't permanently failed yet. 4398e14bb325SJeff Bonwick */ 439933a372edSGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 4400e14bb325SJeff Bonwick zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 4401e14bb325SJeff Bonwick zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 4402e14bb325SJeff Bonwick } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 4403e14bb325SJeff Bonwick /* 4404e14bb325SJeff Bonwick * We'd fail again if we reexecuted now, so suspend 4405e14bb325SJeff Bonwick * until conditions improve (e.g. device comes online). 4406e14bb325SJeff Bonwick */ 4407e0f1c0afSOlaf Faaland zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR); 4408e14bb325SJeff Bonwick } else { 4409e14bb325SJeff Bonwick /* 4410e14bb325SJeff Bonwick * Reexecution is potentially a huge amount of work. 4411e14bb325SJeff Bonwick * Hand it off to the otherwise-unused claim taskq. 4412e14bb325SJeff Bonwick */ 44135aeb9474SGarrett D'Amore ASSERT(zio->io_tqent.tqent_next == NULL); 4414ec94d322SAdam Leventhal spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM, 4415ec94d322SAdam Leventhal ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio, 4416ec94d322SAdam Leventhal 0, &zio->io_tqent); 4417e14bb325SJeff Bonwick } 4418e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 4419fa9e4066Sahrens } 4420fa9e4066Sahrens 4421b24ab676SJeff Bonwick ASSERT(zio->io_child_count == 0); 442233a372edSGeorge Wilson ASSERT(zio->io_reexecute == 0); 4423e14bb325SJeff Bonwick ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 4424fa9e4066Sahrens 4425b24ab676SJeff Bonwick /* 4426b24ab676SJeff Bonwick * Report any checksum errors, since the I/O is complete. 4427b24ab676SJeff Bonwick */ 442822fe2c88SJonathan Adams while (zio->io_cksum_report != NULL) { 4429b24ab676SJeff Bonwick zio_cksum_report_t *zcr = zio->io_cksum_report; 4430b24ab676SJeff Bonwick zio->io_cksum_report = zcr->zcr_next; 4431b24ab676SJeff Bonwick zcr->zcr_next = NULL; 4432b24ab676SJeff Bonwick zcr->zcr_finish(zcr, NULL); 4433b24ab676SJeff Bonwick zfs_ereport_free_checksum(zcr); 443422fe2c88SJonathan Adams } 443522fe2c88SJonathan Adams 4436a3f829aeSBill Moore /* 4437a3f829aeSBill Moore * It is the responsibility of the done callback to ensure that this 4438a3f829aeSBill Moore * particular zio is no longer discoverable for adoption, and as 4439a3f829aeSBill Moore * such, cannot acquire any new parents. 4440a3f829aeSBill Moore */ 4441e14bb325SJeff Bonwick if (zio->io_done) 4442e14bb325SJeff Bonwick zio->io_done(zio); 4443fa9e4066Sahrens 4444a3f829aeSBill Moore mutex_enter(&zio->io_lock); 4445a3f829aeSBill Moore zio->io_state[ZIO_WAIT_DONE] = 1; 4446a3f829aeSBill Moore mutex_exit(&zio->io_lock); 4447fa9e4066Sahrens 44480f7643c7SGeorge Wilson zl = NULL; 44490f7643c7SGeorge Wilson for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) { 44500f7643c7SGeorge Wilson zio_link_t *remove_zl = zl; 44510f7643c7SGeorge Wilson pio_next = zio_walk_parents(zio, &zl); 44520f7643c7SGeorge Wilson zio_remove_child(pio, zio, remove_zl); 4453e14bb325SJeff Bonwick zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 4454e14bb325SJeff Bonwick } 4455fa9e4066Sahrens 4456e14bb325SJeff Bonwick if (zio->io_waiter != NULL) { 4457e14bb325SJeff Bonwick mutex_enter(&zio->io_lock); 4458e14bb325SJeff Bonwick zio->io_executor = NULL; 4459e14bb325SJeff Bonwick cv_broadcast(&zio->io_cv); 4460e14bb325SJeff Bonwick mutex_exit(&zio->io_lock); 4461e14bb325SJeff Bonwick } else { 4462e14bb325SJeff Bonwick zio_destroy(zio); 4463e14bb325SJeff Bonwick } 4464fa9e4066Sahrens 4465e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 4466fa9e4066Sahrens } 446746341222Sperrin 446846341222Sperrin /* 4469e14bb325SJeff Bonwick * ========================================================================== 4470e14bb325SJeff Bonwick * I/O pipeline definition 4471e14bb325SJeff Bonwick * ========================================================================== 447246341222Sperrin */ 4473b24ab676SJeff Bonwick static zio_pipe_stage_t *zio_pipeline[] = { 4474e14bb325SJeff Bonwick NULL, 4475e14bb325SJeff Bonwick zio_read_bp_init, 44760f7643c7SGeorge Wilson zio_write_bp_init, 4477b24ab676SJeff Bonwick zio_free_bp_init, 4478b24ab676SJeff Bonwick zio_issue_async, 44790f7643c7SGeorge Wilson zio_write_compress, 4480eb633035STom Caputi zio_encrypt, 4481e14bb325SJeff Bonwick zio_checksum_generate, 448280901aeaSGeorge Wilson zio_nop_write, 4483b24ab676SJeff Bonwick zio_ddt_read_start, 4484b24ab676SJeff Bonwick zio_ddt_read_done, 4485b24ab676SJeff Bonwick zio_ddt_write, 4486b24ab676SJeff Bonwick zio_ddt_free, 4487e14bb325SJeff Bonwick zio_gang_assemble, 4488e14bb325SJeff Bonwick zio_gang_issue, 44890f7643c7SGeorge Wilson zio_dva_throttle, 4490e14bb325SJeff Bonwick zio_dva_allocate, 4491e14bb325SJeff Bonwick zio_dva_free, 4492e14bb325SJeff Bonwick zio_dva_claim, 4493e14bb325SJeff Bonwick zio_ready, 4494e14bb325SJeff Bonwick zio_vdev_io_start, 4495e14bb325SJeff Bonwick zio_vdev_io_done, 4496e14bb325SJeff Bonwick zio_vdev_io_assess, 4497e14bb325SJeff Bonwick zio_checksum_verify, 4498e14bb325SJeff Bonwick zio_done 4499e14bb325SJeff Bonwick }; 4500ad135b5dSChristopher Siden 4501ad135b5dSChristopher Siden 4502ad135b5dSChristopher Siden 4503ad135b5dSChristopher Siden 4504a2cdcdd2SPaul Dagnelie /* 4505a2cdcdd2SPaul Dagnelie * Compare two zbookmark_phys_t's to see which we would reach first in a 4506a2cdcdd2SPaul Dagnelie * pre-order traversal of the object tree. 4507a2cdcdd2SPaul Dagnelie * 4508a2cdcdd2SPaul Dagnelie * This is simple in every case aside from the meta-dnode object. For all other 4509a2cdcdd2SPaul Dagnelie * objects, we traverse them in order (object 1 before object 2, and so on). 4510a2cdcdd2SPaul Dagnelie * However, all of these objects are traversed while traversing object 0, since 4511a2cdcdd2SPaul Dagnelie * the data it points to is the list of objects. Thus, we need to convert to a 4512a2cdcdd2SPaul Dagnelie * canonical representation so we can compare meta-dnode bookmarks to 4513a2cdcdd2SPaul Dagnelie * non-meta-dnode bookmarks. 4514a2cdcdd2SPaul Dagnelie * 4515a2cdcdd2SPaul Dagnelie * We do this by calculating "equivalents" for each field of the zbookmark. 4516a2cdcdd2SPaul Dagnelie * zbookmarks outside of the meta-dnode use their own object and level, and 4517a2cdcdd2SPaul Dagnelie * calculate the level 0 equivalent (the first L0 blkid that is contained in the 4518a2cdcdd2SPaul Dagnelie * blocks this bookmark refers to) by multiplying their blkid by their span 4519a2cdcdd2SPaul Dagnelie * (the number of L0 blocks contained within one block at their level). 4520a2cdcdd2SPaul Dagnelie * zbookmarks inside the meta-dnode calculate their object equivalent 4521a2cdcdd2SPaul Dagnelie * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use 4522a2cdcdd2SPaul Dagnelie * level + 1<<31 (any value larger than a level could ever be) for their level. 4523a2cdcdd2SPaul Dagnelie * This causes them to always compare before a bookmark in their object 4524a2cdcdd2SPaul Dagnelie * equivalent, compare appropriately to bookmarks in other objects, and to 4525a2cdcdd2SPaul Dagnelie * compare appropriately to other bookmarks in the meta-dnode. 4526a2cdcdd2SPaul Dagnelie */ 4527a2cdcdd2SPaul Dagnelie int 4528a2cdcdd2SPaul Dagnelie zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, 4529a2cdcdd2SPaul Dagnelie const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) 4530a2cdcdd2SPaul Dagnelie { 4531a2cdcdd2SPaul Dagnelie /* 4532a2cdcdd2SPaul Dagnelie * These variables represent the "equivalent" values for the zbookmark, 4533a2cdcdd2SPaul Dagnelie * after converting zbookmarks inside the meta dnode to their 4534a2cdcdd2SPaul Dagnelie * normal-object equivalents. 4535a2cdcdd2SPaul Dagnelie */ 4536a2cdcdd2SPaul Dagnelie uint64_t zb1obj, zb2obj; 4537a2cdcdd2SPaul Dagnelie uint64_t zb1L0, zb2L0; 4538a2cdcdd2SPaul Dagnelie uint64_t zb1level, zb2level; 4539ad135b5dSChristopher Siden 4540a2cdcdd2SPaul Dagnelie if (zb1->zb_object == zb2->zb_object && 4541a2cdcdd2SPaul Dagnelie zb1->zb_level == zb2->zb_level && 4542a2cdcdd2SPaul Dagnelie zb1->zb_blkid == zb2->zb_blkid) 4543a2cdcdd2SPaul Dagnelie return (0); 4544a2cdcdd2SPaul Dagnelie 4545a2cdcdd2SPaul Dagnelie /* 4546a2cdcdd2SPaul Dagnelie * BP_SPANB calculates the span in blocks. 4547a2cdcdd2SPaul Dagnelie */ 4548a2cdcdd2SPaul Dagnelie zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); 4549a2cdcdd2SPaul Dagnelie zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); 4550ad135b5dSChristopher Siden 4551ad135b5dSChristopher Siden if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 4552a2cdcdd2SPaul Dagnelie zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 4553a2cdcdd2SPaul Dagnelie zb1L0 = 0; 4554a2cdcdd2SPaul Dagnelie zb1level = zb1->zb_level + COMPARE_META_LEVEL; 4555a2cdcdd2SPaul Dagnelie } else { 4556a2cdcdd2SPaul Dagnelie zb1obj = zb1->zb_object; 4557a2cdcdd2SPaul Dagnelie zb1level = zb1->zb_level; 4558ad135b5dSChristopher Siden } 4559ad135b5dSChristopher Siden 4560a2cdcdd2SPaul Dagnelie if (zb2->zb_object == DMU_META_DNODE_OBJECT) { 4561a2cdcdd2SPaul Dagnelie zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 4562a2cdcdd2SPaul Dagnelie zb2L0 = 0; 4563a2cdcdd2SPaul Dagnelie zb2level = zb2->zb_level + COMPARE_META_LEVEL; 4564a2cdcdd2SPaul Dagnelie } else { 4565a2cdcdd2SPaul Dagnelie zb2obj = zb2->zb_object; 4566a2cdcdd2SPaul Dagnelie zb2level = zb2->zb_level; 4567a2cdcdd2SPaul Dagnelie } 4568a2cdcdd2SPaul Dagnelie 4569a2cdcdd2SPaul Dagnelie /* Now that we have a canonical representation, do the comparison. */ 4570a2cdcdd2SPaul Dagnelie if (zb1obj != zb2obj) 4571a2cdcdd2SPaul Dagnelie return (zb1obj < zb2obj ? -1 : 1); 4572a2cdcdd2SPaul Dagnelie else if (zb1L0 != zb2L0) 4573a2cdcdd2SPaul Dagnelie return (zb1L0 < zb2L0 ? -1 : 1); 4574a2cdcdd2SPaul Dagnelie else if (zb1level != zb2level) 4575a2cdcdd2SPaul Dagnelie return (zb1level > zb2level ? -1 : 1); 4576a2cdcdd2SPaul Dagnelie /* 4577a2cdcdd2SPaul Dagnelie * This can (theoretically) happen if the bookmarks have the same object 4578a2cdcdd2SPaul Dagnelie * and level, but different blkids, if the block sizes are not the same. 4579a2cdcdd2SPaul Dagnelie * There is presently no way to change the indirect block sizes 4580a2cdcdd2SPaul Dagnelie */ 4581a2cdcdd2SPaul Dagnelie return (0); 4582a2cdcdd2SPaul Dagnelie } 4583a2cdcdd2SPaul Dagnelie 4584a2cdcdd2SPaul Dagnelie /* 4585a2cdcdd2SPaul Dagnelie * This function checks the following: given that last_block is the place that 4586a2cdcdd2SPaul Dagnelie * our traversal stopped last time, does that guarantee that we've visited 4587a2cdcdd2SPaul Dagnelie * every node under subtree_root? Therefore, we can't just use the raw output 4588a2cdcdd2SPaul Dagnelie * of zbookmark_compare. We have to pass in a modified version of 4589a2cdcdd2SPaul Dagnelie * subtree_root; by incrementing the block id, and then checking whether 4590a2cdcdd2SPaul Dagnelie * last_block is before or equal to that, we can tell whether or not having 4591a2cdcdd2SPaul Dagnelie * visited last_block implies that all of subtree_root's children have been 4592a2cdcdd2SPaul Dagnelie * visited. 4593a2cdcdd2SPaul Dagnelie */ 4594a2cdcdd2SPaul Dagnelie boolean_t 4595a2cdcdd2SPaul Dagnelie zbookmark_subtree_completed(const dnode_phys_t *dnp, 4596a2cdcdd2SPaul Dagnelie const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 4597a2cdcdd2SPaul Dagnelie { 4598a2cdcdd2SPaul Dagnelie zbookmark_phys_t mod_zb = *subtree_root; 4599a2cdcdd2SPaul Dagnelie mod_zb.zb_blkid++; 4600a2cdcdd2SPaul Dagnelie ASSERT(last_block->zb_level == 0); 4601a2cdcdd2SPaul Dagnelie 4602a2cdcdd2SPaul Dagnelie /* The objset_phys_t isn't before anything. */ 4603a2cdcdd2SPaul Dagnelie if (dnp == NULL) 4604ad135b5dSChristopher Siden return (B_FALSE); 4605a2cdcdd2SPaul Dagnelie 4606a2cdcdd2SPaul Dagnelie /* 4607a2cdcdd2SPaul Dagnelie * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the 4608a2cdcdd2SPaul Dagnelie * data block size in sectors, because that variable is only used if 4609a2cdcdd2SPaul Dagnelie * the bookmark refers to a block in the meta-dnode. Since we don't 4610a2cdcdd2SPaul Dagnelie * know without examining it what object it refers to, and there's no 4611a2cdcdd2SPaul Dagnelie * harm in passing in this value in other cases, we always pass it in. 4612a2cdcdd2SPaul Dagnelie * 4613a2cdcdd2SPaul Dagnelie * We pass in 0 for the indirect block size shift because zb2 must be 4614a2cdcdd2SPaul Dagnelie * level 0. The indirect block size is only used to calculate the span 4615a2cdcdd2SPaul Dagnelie * of the bookmark, but since the bookmark must be level 0, the span is 4616a2cdcdd2SPaul Dagnelie * always 1, so the math works out. 4617a2cdcdd2SPaul Dagnelie * 4618a2cdcdd2SPaul Dagnelie * If you make changes to how the zbookmark_compare code works, be sure 4619a2cdcdd2SPaul Dagnelie * to make sure that this code still works afterwards. 4620a2cdcdd2SPaul Dagnelie */ 4621a2cdcdd2SPaul Dagnelie return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 4622a2cdcdd2SPaul Dagnelie 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, 4623a2cdcdd2SPaul Dagnelie last_block) <= 0); 4624ad135b5dSChristopher Siden } 4625