1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 223f9d6ad7SLin Ling * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23f78cdc34SPaul Dagnelie * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 245aeb9474SGarrett D'Amore * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 26663207adSDon Brady * Copyright (c) 2017, Intel Corporation. 27fa9e4066Sahrens */ 28fa9e4066Sahrens 29de710d24SJosef 'Jeff' Sipek #include <sys/sysmacros.h> 30fa9e4066Sahrens #include <sys/zfs_context.h> 31ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h> 32fa9e4066Sahrens #include <sys/spa.h> 33fa9e4066Sahrens #include <sys/txg.h> 34fa9e4066Sahrens #include <sys/spa_impl.h> 35fa9e4066Sahrens #include <sys/vdev_impl.h> 36fa9e4066Sahrens #include <sys/zio_impl.h> 37fa9e4066Sahrens #include <sys/zio_compress.h> 38fa9e4066Sahrens #include <sys/zio_checksum.h> 39b24ab676SJeff Bonwick #include <sys/dmu_objset.h> 40b24ab676SJeff Bonwick #include <sys/arc.h> 41b24ab676SJeff Bonwick #include <sys/ddt.h> 425d7b4d43SMatthew Ahrens #include <sys/blkptr.h> 4343466aaeSMax Grossman #include <sys/zfeature.h> 44a3874b8bSToomas Soome #include <sys/dsl_scan.h> 450f7643c7SGeorge Wilson #include <sys/metaslab_impl.h> 46770499e1SDan Kimmel #include <sys/abd.h> 47f78cdc34SPaul Dagnelie #include <sys/cityhash.h> 48*eb633035STom Caputi #include <sys/dsl_crypt.h> 49fa9e4066Sahrens 50fa9e4066Sahrens /* 51fa9e4066Sahrens * ========================================================================== 52fa9e4066Sahrens * I/O type descriptions 53fa9e4066Sahrens * ========================================================================== 54fa9e4066Sahrens */ 5569962b56SMatthew Ahrens const char *zio_type_name[ZIO_TYPES] = { 5680eb36f2SGeorge Wilson "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 5780eb36f2SGeorge Wilson "zio_ioctl" 5880eb36f2SGeorge Wilson }; 59fa9e4066Sahrens 600f7643c7SGeorge Wilson boolean_t zio_dva_throttle_enabled = B_TRUE; 610f7643c7SGeorge Wilson 62fa9e4066Sahrens /* 63fa9e4066Sahrens * ========================================================================== 64fa9e4066Sahrens * I/O kmem caches 65fa9e4066Sahrens * ========================================================================== 66fa9e4066Sahrens */ 67ccae0b50Seschrock kmem_cache_t *zio_cache; 68a3f829aeSBill Moore kmem_cache_t *zio_link_cache; 69fa9e4066Sahrens kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 70ad23a2dbSjohansen kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 71ad23a2dbSjohansen 72ad23a2dbSjohansen #ifdef _KERNEL 73ad23a2dbSjohansen extern vmem_t *zio_alloc_arena; 74ad23a2dbSjohansen #endif 75fa9e4066Sahrens 76738f37bcSGeorge Wilson #define ZIO_PIPELINE_CONTINUE 0x100 77738f37bcSGeorge Wilson #define ZIO_PIPELINE_STOP 0x101 78738f37bcSGeorge Wilson 79a2cdcdd2SPaul Dagnelie #define BP_SPANB(indblkshift, level) \ 80a2cdcdd2SPaul Dagnelie (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) 81a2cdcdd2SPaul Dagnelie #define COMPARE_META_LEVEL 0x80000000ul 8201f55e48SGeorge Wilson /* 8301f55e48SGeorge Wilson * The following actions directly effect the spa's sync-to-convergence logic. 8401f55e48SGeorge Wilson * The values below define the sync pass when we start performing the action. 8501f55e48SGeorge Wilson * Care should be taken when changing these values as they directly impact 8601f55e48SGeorge Wilson * spa_sync() performance. Tuning these values may introduce subtle performance 8701f55e48SGeorge Wilson * pathologies and should only be done in the context of performance analysis. 8801f55e48SGeorge Wilson * These tunables will eventually be removed and replaced with #defines once 8901f55e48SGeorge Wilson * enough analysis has been done to determine optimal values. 9001f55e48SGeorge Wilson * 9101f55e48SGeorge Wilson * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 9201f55e48SGeorge Wilson * regular blocks are not deferred. 9301f55e48SGeorge Wilson */ 9401f55e48SGeorge Wilson int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 9501f55e48SGeorge Wilson int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 9601f55e48SGeorge Wilson int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 9701f55e48SGeorge Wilson 980a4e9518Sgw /* 99e14bb325SJeff Bonwick * An allocating zio is one that either currently has the DVA allocate 100e14bb325SJeff Bonwick * stage set or will have it later in its lifetime. 1010a4e9518Sgw */ 102b24ab676SJeff Bonwick #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 103b24ab676SJeff Bonwick 10435a5a358SJonathan Adams boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 10535a5a358SJonathan Adams 106b24ab676SJeff Bonwick #ifdef ZFS_DEBUG 107b24ab676SJeff Bonwick int zio_buf_debug_limit = 16384; 108b24ab676SJeff Bonwick #else 109b24ab676SJeff Bonwick int zio_buf_debug_limit = 0; 110b24ab676SJeff Bonwick #endif 1110a4e9518Sgw 1120f7643c7SGeorge Wilson static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t); 1130f7643c7SGeorge Wilson 114fa9e4066Sahrens void 115fa9e4066Sahrens zio_init(void) 116fa9e4066Sahrens { 117fa9e4066Sahrens size_t c; 118ad23a2dbSjohansen vmem_t *data_alloc_arena = NULL; 119ad23a2dbSjohansen 120ad23a2dbSjohansen #ifdef _KERNEL 121ad23a2dbSjohansen data_alloc_arena = zio_alloc_arena; 122ad23a2dbSjohansen #endif 123a3f829aeSBill Moore zio_cache = kmem_cache_create("zio_cache", 124a3f829aeSBill Moore sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 125a3f829aeSBill Moore zio_link_cache = kmem_cache_create("zio_link_cache", 126a3f829aeSBill Moore sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 127ccae0b50Seschrock 128fa9e4066Sahrens /* 129fa9e4066Sahrens * For small buffers, we want a cache for each multiple of 130b5152584SMatthew Ahrens * SPA_MINBLOCKSIZE. For larger buffers, we want a cache 131b5152584SMatthew Ahrens * for each quarter-power of 2. 132fa9e4066Sahrens */ 133fa9e4066Sahrens for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 134fa9e4066Sahrens size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 135fa9e4066Sahrens size_t p2 = size; 136fa9e4066Sahrens size_t align = 0; 137e291592aSJonathan Adams size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 138fa9e4066Sahrens 139de710d24SJosef 'Jeff' Sipek while (!ISP2(p2)) 140fa9e4066Sahrens p2 &= p2 - 1; 141fa9e4066Sahrens 142cd1c8b85SMatthew Ahrens #ifndef _KERNEL 143cd1c8b85SMatthew Ahrens /* 144cd1c8b85SMatthew Ahrens * If we are using watchpoints, put each buffer on its own page, 145cd1c8b85SMatthew Ahrens * to eliminate the performance overhead of trapping to the 146cd1c8b85SMatthew Ahrens * kernel when modifying a non-watched buffer that shares the 147cd1c8b85SMatthew Ahrens * page with a watched buffer. 148cd1c8b85SMatthew Ahrens */ 149cd1c8b85SMatthew Ahrens if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 150cd1c8b85SMatthew Ahrens continue; 151cd1c8b85SMatthew Ahrens #endif 152fa9e4066Sahrens if (size <= 4 * SPA_MINBLOCKSIZE) { 153fa9e4066Sahrens align = SPA_MINBLOCKSIZE; 154cd1c8b85SMatthew Ahrens } else if (IS_P2ALIGNED(size, p2 >> 2)) { 155b5152584SMatthew Ahrens align = MIN(p2 >> 2, PAGESIZE); 156fa9e4066Sahrens } 157fa9e4066Sahrens 158fa9e4066Sahrens if (align != 0) { 159ad23a2dbSjohansen char name[36]; 1605ad82045Snd (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 161fa9e4066Sahrens zio_buf_cache[c] = kmem_cache_create(name, size, 162e291592aSJonathan Adams align, NULL, NULL, NULL, NULL, NULL, cflags); 163ad23a2dbSjohansen 164e291592aSJonathan Adams /* 165e291592aSJonathan Adams * Since zio_data bufs do not appear in crash dumps, we 166e291592aSJonathan Adams * pass KMC_NOTOUCH so that no allocator metadata is 167e291592aSJonathan Adams * stored with the buffers. 168e291592aSJonathan Adams */ 169ad23a2dbSjohansen (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 170ad23a2dbSjohansen zio_data_buf_cache[c] = kmem_cache_create(name, size, 171ad23a2dbSjohansen align, NULL, NULL, NULL, NULL, data_alloc_arena, 172e291592aSJonathan Adams cflags | KMC_NOTOUCH); 173fa9e4066Sahrens } 174fa9e4066Sahrens } 175fa9e4066Sahrens 176fa9e4066Sahrens while (--c != 0) { 177fa9e4066Sahrens ASSERT(zio_buf_cache[c] != NULL); 178fa9e4066Sahrens if (zio_buf_cache[c - 1] == NULL) 179fa9e4066Sahrens zio_buf_cache[c - 1] = zio_buf_cache[c]; 180ad23a2dbSjohansen 181ad23a2dbSjohansen ASSERT(zio_data_buf_cache[c] != NULL); 182ad23a2dbSjohansen if (zio_data_buf_cache[c - 1] == NULL) 183ad23a2dbSjohansen zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 184fa9e4066Sahrens } 185ea8dc4b6Seschrock 186ea8dc4b6Seschrock zio_inject_init(); 187fa9e4066Sahrens } 188fa9e4066Sahrens 189fa9e4066Sahrens void 190fa9e4066Sahrens zio_fini(void) 191fa9e4066Sahrens { 192fa9e4066Sahrens size_t c; 193fa9e4066Sahrens kmem_cache_t *last_cache = NULL; 194ad23a2dbSjohansen kmem_cache_t *last_data_cache = NULL; 195fa9e4066Sahrens 196fa9e4066Sahrens for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 197fa9e4066Sahrens if (zio_buf_cache[c] != last_cache) { 198fa9e4066Sahrens last_cache = zio_buf_cache[c]; 199fa9e4066Sahrens kmem_cache_destroy(zio_buf_cache[c]); 200fa9e4066Sahrens } 201fa9e4066Sahrens zio_buf_cache[c] = NULL; 202ad23a2dbSjohansen 203ad23a2dbSjohansen if (zio_data_buf_cache[c] != last_data_cache) { 204ad23a2dbSjohansen last_data_cache = zio_data_buf_cache[c]; 205ad23a2dbSjohansen kmem_cache_destroy(zio_data_buf_cache[c]); 206ad23a2dbSjohansen } 207ad23a2dbSjohansen zio_data_buf_cache[c] = NULL; 208fa9e4066Sahrens } 209ea8dc4b6Seschrock 210a3f829aeSBill Moore kmem_cache_destroy(zio_link_cache); 211ccae0b50Seschrock kmem_cache_destroy(zio_cache); 212ccae0b50Seschrock 213ea8dc4b6Seschrock zio_inject_fini(); 214fa9e4066Sahrens } 215fa9e4066Sahrens 216fa9e4066Sahrens /* 217fa9e4066Sahrens * ========================================================================== 218fa9e4066Sahrens * Allocate and free I/O buffers 219fa9e4066Sahrens * ========================================================================== 220fa9e4066Sahrens */ 221ad23a2dbSjohansen 222ad23a2dbSjohansen /* 223ad23a2dbSjohansen * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 224ad23a2dbSjohansen * crashdump if the kernel panics, so use it judiciously. Obviously, it's 225ad23a2dbSjohansen * useful to inspect ZFS metadata, but if possible, we should avoid keeping 226ad23a2dbSjohansen * excess / transient data in-core during a crashdump. 227ad23a2dbSjohansen */ 228fa9e4066Sahrens void * 229fa9e4066Sahrens zio_buf_alloc(size_t size) 230fa9e4066Sahrens { 231fa9e4066Sahrens size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 232fa9e4066Sahrens 233f63ab3d5SMatthew Ahrens VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 234fa9e4066Sahrens 2351ab7f2deSmaybee return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 236fa9e4066Sahrens } 237fa9e4066Sahrens 238ad23a2dbSjohansen /* 239ad23a2dbSjohansen * Use zio_data_buf_alloc to allocate data. The data will not appear in a 240ad23a2dbSjohansen * crashdump if the kernel panics. This exists so that we will limit the amount 241ad23a2dbSjohansen * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 242ad23a2dbSjohansen * of kernel heap dumped to disk when the kernel panics) 243ad23a2dbSjohansen */ 244ad23a2dbSjohansen void * 245ad23a2dbSjohansen zio_data_buf_alloc(size_t size) 246ad23a2dbSjohansen { 247ad23a2dbSjohansen size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 248ad23a2dbSjohansen 249f63ab3d5SMatthew Ahrens VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 250ad23a2dbSjohansen 2511ab7f2deSmaybee return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 252ad23a2dbSjohansen } 253ad23a2dbSjohansen 254fa9e4066Sahrens void 255fa9e4066Sahrens zio_buf_free(void *buf, size_t size) 256fa9e4066Sahrens { 257fa9e4066Sahrens size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 258fa9e4066Sahrens 259f63ab3d5SMatthew Ahrens VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 260fa9e4066Sahrens 261fa9e4066Sahrens kmem_cache_free(zio_buf_cache[c], buf); 262fa9e4066Sahrens } 263fa9e4066Sahrens 264ad23a2dbSjohansen void 265ad23a2dbSjohansen zio_data_buf_free(void *buf, size_t size) 266ad23a2dbSjohansen { 267ad23a2dbSjohansen size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 268ad23a2dbSjohansen 269f63ab3d5SMatthew Ahrens VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 270ad23a2dbSjohansen 271ad23a2dbSjohansen kmem_cache_free(zio_data_buf_cache[c], buf); 272ad23a2dbSjohansen } 273b3995adbSahrens 274*eb633035STom Caputi /* ARGSUSED */ 275*eb633035STom Caputi static void 276*eb633035STom Caputi zio_abd_free(void *abd, size_t size) 277*eb633035STom Caputi { 278*eb633035STom Caputi abd_free((abd_t *)abd); 279*eb633035STom Caputi } 280*eb633035STom Caputi 281fa9e4066Sahrens /* 282fa9e4066Sahrens * ========================================================================== 283fa9e4066Sahrens * Push and pop I/O transform buffers 284fa9e4066Sahrens * ========================================================================== 285fa9e4066Sahrens */ 286dcbf3bd6SGeorge Wilson void 287770499e1SDan Kimmel zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize, 2889a686fbcSPaul Dagnelie zio_transform_func_t *transform) 289fa9e4066Sahrens { 290fa9e4066Sahrens zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 291fa9e4066Sahrens 292770499e1SDan Kimmel /* 293770499e1SDan Kimmel * Ensure that anyone expecting this zio to contain a linear ABD isn't 294770499e1SDan Kimmel * going to get a nasty surprise when they try to access the data. 295770499e1SDan Kimmel */ 296770499e1SDan Kimmel IMPLY(abd_is_linear(zio->io_abd), abd_is_linear(data)); 297770499e1SDan Kimmel 298770499e1SDan Kimmel zt->zt_orig_abd = zio->io_abd; 299e14bb325SJeff Bonwick zt->zt_orig_size = zio->io_size; 300fa9e4066Sahrens zt->zt_bufsize = bufsize; 301e14bb325SJeff Bonwick zt->zt_transform = transform; 302fa9e4066Sahrens 303fa9e4066Sahrens zt->zt_next = zio->io_transform_stack; 304fa9e4066Sahrens zio->io_transform_stack = zt; 305fa9e4066Sahrens 306770499e1SDan Kimmel zio->io_abd = data; 307fa9e4066Sahrens zio->io_size = size; 308fa9e4066Sahrens } 309fa9e4066Sahrens 310dcbf3bd6SGeorge Wilson void 311e14bb325SJeff Bonwick zio_pop_transforms(zio_t *zio) 312fa9e4066Sahrens { 313e14bb325SJeff Bonwick zio_transform_t *zt; 314e14bb325SJeff Bonwick 315e14bb325SJeff Bonwick while ((zt = zio->io_transform_stack) != NULL) { 316e14bb325SJeff Bonwick if (zt->zt_transform != NULL) 317e14bb325SJeff Bonwick zt->zt_transform(zio, 318770499e1SDan Kimmel zt->zt_orig_abd, zt->zt_orig_size); 319fa9e4066Sahrens 320b24ab676SJeff Bonwick if (zt->zt_bufsize != 0) 321770499e1SDan Kimmel abd_free(zio->io_abd); 322fa9e4066Sahrens 323770499e1SDan Kimmel zio->io_abd = zt->zt_orig_abd; 324e14bb325SJeff Bonwick zio->io_size = zt->zt_orig_size; 325e14bb325SJeff Bonwick zio->io_transform_stack = zt->zt_next; 326fa9e4066Sahrens 327e14bb325SJeff Bonwick kmem_free(zt, sizeof (zio_transform_t)); 328fa9e4066Sahrens } 329fa9e4066Sahrens } 330fa9e4066Sahrens 331e14bb325SJeff Bonwick /* 332e14bb325SJeff Bonwick * ========================================================================== 333*eb633035STom Caputi * I/O transform callbacks for subblocks, decompression, and decryption 334e14bb325SJeff Bonwick * ========================================================================== 335e14bb325SJeff Bonwick */ 336e14bb325SJeff Bonwick static void 337770499e1SDan Kimmel zio_subblock(zio_t *zio, abd_t *data, uint64_t size) 338e14bb325SJeff Bonwick { 339e14bb325SJeff Bonwick ASSERT(zio->io_size > size); 340e14bb325SJeff Bonwick 341e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_READ) 342770499e1SDan Kimmel abd_copy(data, zio->io_abd, size); 343e14bb325SJeff Bonwick } 344e14bb325SJeff Bonwick 345e14bb325SJeff Bonwick static void 346770499e1SDan Kimmel zio_decompress(zio_t *zio, abd_t *data, uint64_t size) 347e14bb325SJeff Bonwick { 348770499e1SDan Kimmel if (zio->io_error == 0) { 349770499e1SDan Kimmel void *tmp = abd_borrow_buf(data, size); 350770499e1SDan Kimmel int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 351770499e1SDan Kimmel zio->io_abd, tmp, zio->io_size, size); 352770499e1SDan Kimmel abd_return_buf_copy(data, tmp, size); 353770499e1SDan Kimmel 354770499e1SDan Kimmel if (ret != 0) 355770499e1SDan Kimmel zio->io_error = SET_ERROR(EIO); 356770499e1SDan Kimmel } 357e14bb325SJeff Bonwick } 358e14bb325SJeff Bonwick 359*eb633035STom Caputi static void 360*eb633035STom Caputi zio_decrypt(zio_t *zio, abd_t *data, uint64_t size) 361*eb633035STom Caputi { 362*eb633035STom Caputi int ret; 363*eb633035STom Caputi void *tmp; 364*eb633035STom Caputi blkptr_t *bp = zio->io_bp; 365*eb633035STom Caputi spa_t *spa = zio->io_spa; 366*eb633035STom Caputi uint64_t dsobj = zio->io_bookmark.zb_objset; 367*eb633035STom Caputi uint64_t lsize = BP_GET_LSIZE(bp); 368*eb633035STom Caputi dmu_object_type_t ot = BP_GET_TYPE(bp); 369*eb633035STom Caputi uint8_t salt[ZIO_DATA_SALT_LEN]; 370*eb633035STom Caputi uint8_t iv[ZIO_DATA_IV_LEN]; 371*eb633035STom Caputi uint8_t mac[ZIO_DATA_MAC_LEN]; 372*eb633035STom Caputi boolean_t no_crypt = B_FALSE; 373*eb633035STom Caputi 374*eb633035STom Caputi ASSERT(BP_USES_CRYPT(bp)); 375*eb633035STom Caputi ASSERT3U(size, !=, 0); 376*eb633035STom Caputi 377*eb633035STom Caputi if (zio->io_error != 0) 378*eb633035STom Caputi return; 379*eb633035STom Caputi 380*eb633035STom Caputi /* 381*eb633035STom Caputi * Verify the cksum of MACs stored in an indirect bp. It will always 382*eb633035STom Caputi * be possible to verify this since it does not require an encryption 383*eb633035STom Caputi * key. 384*eb633035STom Caputi */ 385*eb633035STom Caputi if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) { 386*eb633035STom Caputi zio_crypt_decode_mac_bp(bp, mac); 387*eb633035STom Caputi 388*eb633035STom Caputi if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) { 389*eb633035STom Caputi /* 390*eb633035STom Caputi * We haven't decompressed the data yet, but 391*eb633035STom Caputi * zio_crypt_do_indirect_mac_checksum() requires 392*eb633035STom Caputi * decompressed data to be able to parse out the MACs 393*eb633035STom Caputi * from the indirect block. We decompress it now and 394*eb633035STom Caputi * throw away the result after we are finished. 395*eb633035STom Caputi */ 396*eb633035STom Caputi tmp = zio_buf_alloc(lsize); 397*eb633035STom Caputi ret = zio_decompress_data(BP_GET_COMPRESS(bp), 398*eb633035STom Caputi zio->io_abd, tmp, zio->io_size, lsize); 399*eb633035STom Caputi if (ret != 0) { 400*eb633035STom Caputi ret = SET_ERROR(EIO); 401*eb633035STom Caputi goto error; 402*eb633035STom Caputi } 403*eb633035STom Caputi ret = zio_crypt_do_indirect_mac_checksum(B_FALSE, 404*eb633035STom Caputi tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac); 405*eb633035STom Caputi zio_buf_free(tmp, lsize); 406*eb633035STom Caputi } else { 407*eb633035STom Caputi ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE, 408*eb633035STom Caputi zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac); 409*eb633035STom Caputi } 410*eb633035STom Caputi abd_copy(data, zio->io_abd, size); 411*eb633035STom Caputi 412*eb633035STom Caputi if (ret != 0) 413*eb633035STom Caputi goto error; 414*eb633035STom Caputi 415*eb633035STom Caputi return; 416*eb633035STom Caputi } 417*eb633035STom Caputi 418*eb633035STom Caputi /* 419*eb633035STom Caputi * If this is an authenticated block, just check the MAC. It would be 420*eb633035STom Caputi * nice to separate this out into its own flag, but for the moment 421*eb633035STom Caputi * enum zio_flag is out of bits. 422*eb633035STom Caputi */ 423*eb633035STom Caputi if (BP_IS_AUTHENTICATED(bp)) { 424*eb633035STom Caputi if (ot == DMU_OT_OBJSET) { 425*eb633035STom Caputi ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, 426*eb633035STom Caputi dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp)); 427*eb633035STom Caputi } else { 428*eb633035STom Caputi zio_crypt_decode_mac_bp(bp, mac); 429*eb633035STom Caputi ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, 430*eb633035STom Caputi zio->io_abd, size, mac); 431*eb633035STom Caputi } 432*eb633035STom Caputi abd_copy(data, zio->io_abd, size); 433*eb633035STom Caputi 434*eb633035STom Caputi if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) { 435*eb633035STom Caputi ret = zio_handle_decrypt_injection(spa, 436*eb633035STom Caputi &zio->io_bookmark, ot, ECKSUM); 437*eb633035STom Caputi } 438*eb633035STom Caputi if (ret != 0) 439*eb633035STom Caputi goto error; 440*eb633035STom Caputi 441*eb633035STom Caputi return; 442*eb633035STom Caputi } 443*eb633035STom Caputi 444*eb633035STom Caputi zio_crypt_decode_params_bp(bp, salt, iv); 445*eb633035STom Caputi 446*eb633035STom Caputi if (ot == DMU_OT_INTENT_LOG) { 447*eb633035STom Caputi tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t)); 448*eb633035STom Caputi zio_crypt_decode_mac_zil(tmp, mac); 449*eb633035STom Caputi abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t)); 450*eb633035STom Caputi } else { 451*eb633035STom Caputi zio_crypt_decode_mac_bp(bp, mac); 452*eb633035STom Caputi } 453*eb633035STom Caputi 454*eb633035STom Caputi ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp), 455*eb633035STom Caputi BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data, 456*eb633035STom Caputi zio->io_abd, &no_crypt); 457*eb633035STom Caputi if (no_crypt) 458*eb633035STom Caputi abd_copy(data, zio->io_abd, size); 459*eb633035STom Caputi 460*eb633035STom Caputi if (ret != 0) 461*eb633035STom Caputi goto error; 462*eb633035STom Caputi 463*eb633035STom Caputi return; 464*eb633035STom Caputi 465*eb633035STom Caputi error: 466*eb633035STom Caputi /* assert that the key was found unless this was speculative */ 467*eb633035STom Caputi ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE)); 468*eb633035STom Caputi 469*eb633035STom Caputi /* 470*eb633035STom Caputi * If there was a decryption / authentication error return EIO as 471*eb633035STom Caputi * the io_error. If this was not a speculative zio, create an ereport. 472*eb633035STom Caputi */ 473*eb633035STom Caputi if (ret == ECKSUM) { 474*eb633035STom Caputi zio->io_error = SET_ERROR(EIO); 475*eb633035STom Caputi if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { 476*eb633035STom Caputi spa_log_error(spa, &zio->io_bookmark); 477*eb633035STom Caputi zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION, 478*eb633035STom Caputi spa, NULL, &zio->io_bookmark, zio, 0, 0); 479*eb633035STom Caputi } 480*eb633035STom Caputi } else { 481*eb633035STom Caputi zio->io_error = ret; 482*eb633035STom Caputi } 483*eb633035STom Caputi } 484*eb633035STom Caputi 485e14bb325SJeff Bonwick /* 486e14bb325SJeff Bonwick * ========================================================================== 487e14bb325SJeff Bonwick * I/O parent/child relationships and pipeline interlocks 488e14bb325SJeff Bonwick * ========================================================================== 489e14bb325SJeff Bonwick */ 490a3f829aeSBill Moore zio_t * 4910f7643c7SGeorge Wilson zio_walk_parents(zio_t *cio, zio_link_t **zl) 492a3f829aeSBill Moore { 493a3f829aeSBill Moore list_t *pl = &cio->io_parent_list; 494e14bb325SJeff Bonwick 4950f7643c7SGeorge Wilson *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl); 4960f7643c7SGeorge Wilson if (*zl == NULL) 497a3f829aeSBill Moore return (NULL); 498a3f829aeSBill Moore 4990f7643c7SGeorge Wilson ASSERT((*zl)->zl_child == cio); 5000f7643c7SGeorge Wilson return ((*zl)->zl_parent); 501a3f829aeSBill Moore } 502a3f829aeSBill Moore 503a3f829aeSBill Moore zio_t * 5040f7643c7SGeorge Wilson zio_walk_children(zio_t *pio, zio_link_t **zl) 505a3f829aeSBill Moore { 506a3f829aeSBill Moore list_t *cl = &pio->io_child_list; 507a3f829aeSBill Moore 508a3874b8bSToomas Soome ASSERT(MUTEX_HELD(&pio->io_lock)); 509a3874b8bSToomas Soome 5100f7643c7SGeorge Wilson *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl); 5110f7643c7SGeorge Wilson if (*zl == NULL) 512a3f829aeSBill Moore return (NULL); 513a3f829aeSBill Moore 5140f7643c7SGeorge Wilson ASSERT((*zl)->zl_parent == pio); 5150f7643c7SGeorge Wilson return ((*zl)->zl_child); 516a3f829aeSBill Moore } 517a3f829aeSBill Moore 518a3f829aeSBill Moore zio_t * 519a3f829aeSBill Moore zio_unique_parent(zio_t *cio) 520a3f829aeSBill Moore { 5210f7643c7SGeorge Wilson zio_link_t *zl = NULL; 5220f7643c7SGeorge Wilson zio_t *pio = zio_walk_parents(cio, &zl); 523a3f829aeSBill Moore 5240f7643c7SGeorge Wilson VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL); 525a3f829aeSBill Moore return (pio); 526a3f829aeSBill Moore } 527a3f829aeSBill Moore 528a3f829aeSBill Moore void 529a3f829aeSBill Moore zio_add_child(zio_t *pio, zio_t *cio) 530e14bb325SJeff Bonwick { 531a3f829aeSBill Moore zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 532a3f829aeSBill Moore 533a3f829aeSBill Moore /* 534a3f829aeSBill Moore * Logical I/Os can have logical, gang, or vdev children. 535a3f829aeSBill Moore * Gang I/Os can have gang or vdev children. 536a3f829aeSBill Moore * Vdev I/Os can only have vdev children. 537a3f829aeSBill Moore * The following ASSERT captures all of these constraints. 538a3f829aeSBill Moore */ 5391271e4b1SPrakash Surya ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 540a3f829aeSBill Moore 541a3f829aeSBill Moore zl->zl_parent = pio; 542a3f829aeSBill Moore zl->zl_child = cio; 543a3f829aeSBill Moore 544e14bb325SJeff Bonwick mutex_enter(&pio->io_lock); 545a3874b8bSToomas Soome mutex_enter(&cio->io_lock); 546a3f829aeSBill Moore 547a3f829aeSBill Moore ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 548a3f829aeSBill Moore 549a3f829aeSBill Moore for (int w = 0; w < ZIO_WAIT_TYPES; w++) 550a3f829aeSBill Moore pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 551a3f829aeSBill Moore 552a3f829aeSBill Moore list_insert_head(&pio->io_child_list, zl); 553a3f829aeSBill Moore list_insert_head(&cio->io_parent_list, zl); 554a3f829aeSBill Moore 555b24ab676SJeff Bonwick pio->io_child_count++; 556b24ab676SJeff Bonwick cio->io_parent_count++; 557b24ab676SJeff Bonwick 558a3f829aeSBill Moore mutex_exit(&cio->io_lock); 559a3874b8bSToomas Soome mutex_exit(&pio->io_lock); 560e14bb325SJeff Bonwick } 561e14bb325SJeff Bonwick 562fa9e4066Sahrens static void 563a3f829aeSBill Moore zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 564e14bb325SJeff Bonwick { 565a3f829aeSBill Moore ASSERT(zl->zl_parent == pio); 566a3f829aeSBill Moore ASSERT(zl->zl_child == cio); 567e14bb325SJeff Bonwick 568e14bb325SJeff Bonwick mutex_enter(&pio->io_lock); 569a3874b8bSToomas Soome mutex_enter(&cio->io_lock); 570a3f829aeSBill Moore 571a3f829aeSBill Moore list_remove(&pio->io_child_list, zl); 572a3f829aeSBill Moore list_remove(&cio->io_parent_list, zl); 573a3f829aeSBill Moore 574b24ab676SJeff Bonwick pio->io_child_count--; 575b24ab676SJeff Bonwick cio->io_parent_count--; 576b24ab676SJeff Bonwick 577a3f829aeSBill Moore mutex_exit(&cio->io_lock); 578a3874b8bSToomas Soome mutex_exit(&pio->io_lock); 579a3f829aeSBill Moore 580a3f829aeSBill Moore kmem_cache_free(zio_link_cache, zl); 581e14bb325SJeff Bonwick } 582e14bb325SJeff Bonwick 583e14bb325SJeff Bonwick static boolean_t 584d6e1c446SGeorge Wilson zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait) 585fa9e4066Sahrens { 586e14bb325SJeff Bonwick boolean_t waiting = B_FALSE; 587e14bb325SJeff Bonwick 588e14bb325SJeff Bonwick mutex_enter(&zio->io_lock); 589e14bb325SJeff Bonwick ASSERT(zio->io_stall == NULL); 590d6e1c446SGeorge Wilson for (int c = 0; c < ZIO_CHILD_TYPES; c++) { 591d6e1c446SGeorge Wilson if (!(ZIO_CHILD_BIT_IS_SET(childbits, c))) 592d6e1c446SGeorge Wilson continue; 593d6e1c446SGeorge Wilson 594d6e1c446SGeorge Wilson uint64_t *countp = &zio->io_children[c][wait]; 595d6e1c446SGeorge Wilson if (*countp != 0) { 596d6e1c446SGeorge Wilson zio->io_stage >>= 1; 597d6e1c446SGeorge Wilson ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN); 598d6e1c446SGeorge Wilson zio->io_stall = countp; 599d6e1c446SGeorge Wilson waiting = B_TRUE; 600d6e1c446SGeorge Wilson break; 601d6e1c446SGeorge Wilson } 602e14bb325SJeff Bonwick } 603e14bb325SJeff Bonwick mutex_exit(&zio->io_lock); 604e14bb325SJeff Bonwick return (waiting); 605e14bb325SJeff Bonwick } 606fa9e4066Sahrens 607e14bb325SJeff Bonwick static void 608e14bb325SJeff Bonwick zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 609e14bb325SJeff Bonwick { 610e14bb325SJeff Bonwick uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 611e14bb325SJeff Bonwick int *errorp = &pio->io_child_error[zio->io_child_type]; 612fa9e4066Sahrens 613e14bb325SJeff Bonwick mutex_enter(&pio->io_lock); 614e14bb325SJeff Bonwick if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 615e14bb325SJeff Bonwick *errorp = zio_worst_error(*errorp, zio->io_error); 616e14bb325SJeff Bonwick pio->io_reexecute |= zio->io_reexecute; 617e14bb325SJeff Bonwick ASSERT3U(*countp, >, 0); 61869962b56SMatthew Ahrens 61969962b56SMatthew Ahrens (*countp)--; 62069962b56SMatthew Ahrens 62169962b56SMatthew Ahrens if (*countp == 0 && pio->io_stall == countp) { 6220f7643c7SGeorge Wilson zio_taskq_type_t type = 6230f7643c7SGeorge Wilson pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE : 6240f7643c7SGeorge Wilson ZIO_TASKQ_INTERRUPT; 625e14bb325SJeff Bonwick pio->io_stall = NULL; 626e14bb325SJeff Bonwick mutex_exit(&pio->io_lock); 6270f7643c7SGeorge Wilson /* 6280f7643c7SGeorge Wilson * Dispatch the parent zio in its own taskq so that 6290f7643c7SGeorge Wilson * the child can continue to make progress. This also 6300f7643c7SGeorge Wilson * prevents overflowing the stack when we have deeply nested 6310f7643c7SGeorge Wilson * parent-child relationships. 6320f7643c7SGeorge Wilson */ 6330f7643c7SGeorge Wilson zio_taskq_dispatch(pio, type, B_FALSE); 634e14bb325SJeff Bonwick } else { 635e14bb325SJeff Bonwick mutex_exit(&pio->io_lock); 636fa9e4066Sahrens } 637fa9e4066Sahrens } 638fa9e4066Sahrens 639e14bb325SJeff Bonwick static void 640e14bb325SJeff Bonwick zio_inherit_child_errors(zio_t *zio, enum zio_child c) 641e14bb325SJeff Bonwick { 642e14bb325SJeff Bonwick if (zio->io_child_error[c] != 0 && zio->io_error == 0) 643e14bb325SJeff Bonwick zio->io_error = zio->io_child_error[c]; 644e14bb325SJeff Bonwick } 645e14bb325SJeff Bonwick 6460f7643c7SGeorge Wilson int 64794c2d0ebSMatthew Ahrens zio_bookmark_compare(const void *x1, const void *x2) 6480f7643c7SGeorge Wilson { 6490f7643c7SGeorge Wilson const zio_t *z1 = x1; 6500f7643c7SGeorge Wilson const zio_t *z2 = x2; 6510f7643c7SGeorge Wilson 65294c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset) 6530f7643c7SGeorge Wilson return (-1); 65494c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset) 6550f7643c7SGeorge Wilson return (1); 6560f7643c7SGeorge Wilson 65794c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object) 6580f7643c7SGeorge Wilson return (-1); 65994c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object) 66094c2d0ebSMatthew Ahrens return (1); 66194c2d0ebSMatthew Ahrens 66294c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level) 66394c2d0ebSMatthew Ahrens return (-1); 66494c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level) 66594c2d0ebSMatthew Ahrens return (1); 66694c2d0ebSMatthew Ahrens 66794c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid) 66894c2d0ebSMatthew Ahrens return (-1); 66994c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid) 6700f7643c7SGeorge Wilson return (1); 6710f7643c7SGeorge Wilson 6720f7643c7SGeorge Wilson if (z1 < z2) 6730f7643c7SGeorge Wilson return (-1); 6740f7643c7SGeorge Wilson if (z1 > z2) 6750f7643c7SGeorge Wilson return (1); 6760f7643c7SGeorge Wilson 6770f7643c7SGeorge Wilson return (0); 6780f7643c7SGeorge Wilson } 6790f7643c7SGeorge Wilson 680fa9e4066Sahrens /* 681fa9e4066Sahrens * ========================================================================== 682e14bb325SJeff Bonwick * Create the various types of I/O (read, write, free, etc) 683fa9e4066Sahrens * ========================================================================== 684fa9e4066Sahrens */ 685fa9e4066Sahrens static zio_t * 686b24ab676SJeff Bonwick zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 687770499e1SDan Kimmel abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done, 6885602294fSDan Kimmel void *private, zio_type_t type, zio_priority_t priority, 6895602294fSDan Kimmel enum zio_flag flags, vdev_t *vd, uint64_t offset, 6905602294fSDan Kimmel const zbookmark_phys_t *zb, enum zio_stage stage, enum zio_stage pipeline) 691fa9e4066Sahrens { 692fa9e4066Sahrens zio_t *zio; 693fa9e4066Sahrens 6945602294fSDan Kimmel ASSERT3U(psize, <=, SPA_MAXBLOCKSIZE); 6955602294fSDan Kimmel ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0); 696e14bb325SJeff Bonwick ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 697fa9e4066Sahrens 698e14bb325SJeff Bonwick ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 699e14bb325SJeff Bonwick ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 700e14bb325SJeff Bonwick ASSERT(vd || stage == ZIO_STAGE_OPEN); 701088f3894Sahrens 702*eb633035STom Caputi IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0); 7035602294fSDan Kimmel 704ccae0b50Seschrock zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 705ccae0b50Seschrock bzero(zio, sizeof (zio_t)); 706e14bb325SJeff Bonwick 707e14bb325SJeff Bonwick mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 708e14bb325SJeff Bonwick cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 709e14bb325SJeff Bonwick 710a3f829aeSBill Moore list_create(&zio->io_parent_list, sizeof (zio_link_t), 711a3f829aeSBill Moore offsetof(zio_link_t, zl_parent_node)); 712a3f829aeSBill Moore list_create(&zio->io_child_list, sizeof (zio_link_t), 713a3f829aeSBill Moore offsetof(zio_link_t, zl_child_node)); 7148363e80aSGeorge Wilson metaslab_trace_init(&zio->io_alloc_list); 715a3f829aeSBill Moore 716e14bb325SJeff Bonwick if (vd != NULL) 717e14bb325SJeff Bonwick zio->io_child_type = ZIO_CHILD_VDEV; 718e14bb325SJeff Bonwick else if (flags & ZIO_FLAG_GANG_CHILD) 719e14bb325SJeff Bonwick zio->io_child_type = ZIO_CHILD_GANG; 720b24ab676SJeff Bonwick else if (flags & ZIO_FLAG_DDT_CHILD) 721b24ab676SJeff Bonwick zio->io_child_type = ZIO_CHILD_DDT; 722e14bb325SJeff Bonwick else 723e14bb325SJeff Bonwick zio->io_child_type = ZIO_CHILD_LOGICAL; 724e14bb325SJeff Bonwick 725fa9e4066Sahrens if (bp != NULL) { 726b24ab676SJeff Bonwick zio->io_bp = (blkptr_t *)bp; 727fa9e4066Sahrens zio->io_bp_copy = *bp; 728fa9e4066Sahrens zio->io_bp_orig = *bp; 729b24ab676SJeff Bonwick if (type != ZIO_TYPE_WRITE || 730b24ab676SJeff Bonwick zio->io_child_type == ZIO_CHILD_DDT) 731e14bb325SJeff Bonwick zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 732f5383399SBill Moore if (zio->io_child_type == ZIO_CHILD_LOGICAL) 733e14bb325SJeff Bonwick zio->io_logical = zio; 734f5383399SBill Moore if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 735f5383399SBill Moore pipeline |= ZIO_GANG_STAGES; 736fa9e4066Sahrens } 737e14bb325SJeff Bonwick 738e14bb325SJeff Bonwick zio->io_spa = spa; 739e14bb325SJeff Bonwick zio->io_txg = txg; 740fa9e4066Sahrens zio->io_done = done; 741fa9e4066Sahrens zio->io_private = private; 742fa9e4066Sahrens zio->io_type = type; 743fa9e4066Sahrens zio->io_priority = priority; 744e14bb325SJeff Bonwick zio->io_vd = vd; 745e14bb325SJeff Bonwick zio->io_offset = offset; 746770499e1SDan Kimmel zio->io_orig_abd = zio->io_abd = data; 7475602294fSDan Kimmel zio->io_orig_size = zio->io_size = psize; 7485602294fSDan Kimmel zio->io_lsize = lsize; 749e14bb325SJeff Bonwick zio->io_orig_flags = zio->io_flags = flags; 750e14bb325SJeff Bonwick zio->io_orig_stage = zio->io_stage = stage; 751e14bb325SJeff Bonwick zio->io_orig_pipeline = zio->io_pipeline = pipeline; 7520f7643c7SGeorge Wilson zio->io_pipeline_trace = ZIO_STAGE_OPEN; 753fa9e4066Sahrens 754a3f829aeSBill Moore zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 755a3f829aeSBill Moore zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 756a3f829aeSBill Moore 757e14bb325SJeff Bonwick if (zb != NULL) 758e14bb325SJeff Bonwick zio->io_bookmark = *zb; 759e14bb325SJeff Bonwick 760e14bb325SJeff Bonwick if (pio != NULL) { 761663207adSDon Brady if (zio->io_metaslab_class == NULL) 762663207adSDon Brady zio->io_metaslab_class = pio->io_metaslab_class; 763e14bb325SJeff Bonwick if (zio->io_logical == NULL) 764ea8dc4b6Seschrock zio->io_logical = pio->io_logical; 765f5383399SBill Moore if (zio->io_child_type == ZIO_CHILD_GANG) 766f5383399SBill Moore zio->io_gang_leader = pio->io_gang_leader; 767e14bb325SJeff Bonwick zio_add_child(pio, zio); 768fa9e4066Sahrens } 769fa9e4066Sahrens 770fa9e4066Sahrens return (zio); 771fa9e4066Sahrens } 772fa9e4066Sahrens 7730a4e9518Sgw static void 774e14bb325SJeff Bonwick zio_destroy(zio_t *zio) 7750a4e9518Sgw { 7768363e80aSGeorge Wilson metaslab_trace_fini(&zio->io_alloc_list); 777a3f829aeSBill Moore list_destroy(&zio->io_parent_list); 778a3f829aeSBill Moore list_destroy(&zio->io_child_list); 779e14bb325SJeff Bonwick mutex_destroy(&zio->io_lock); 780e14bb325SJeff Bonwick cv_destroy(&zio->io_cv); 781e14bb325SJeff Bonwick kmem_cache_free(zio_cache, zio); 7820a4e9518Sgw } 7830a4e9518Sgw 784fa9e4066Sahrens zio_t * 785a3f829aeSBill Moore zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 786b24ab676SJeff Bonwick void *private, enum zio_flag flags) 787fa9e4066Sahrens { 788fa9e4066Sahrens zio_t *zio; 789fa9e4066Sahrens 7905602294fSDan Kimmel zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 791a3f829aeSBill Moore ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 792e14bb325SJeff Bonwick ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 793fa9e4066Sahrens 794fa9e4066Sahrens return (zio); 795fa9e4066Sahrens } 796fa9e4066Sahrens 797fa9e4066Sahrens zio_t * 798b24ab676SJeff Bonwick zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 799fa9e4066Sahrens { 800a3f829aeSBill Moore return (zio_null(NULL, spa, NULL, done, private, flags)); 801fa9e4066Sahrens } 802fa9e4066Sahrens 803f63ab3d5SMatthew Ahrens void 804f63ab3d5SMatthew Ahrens zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp) 805f63ab3d5SMatthew Ahrens { 806f63ab3d5SMatthew Ahrens if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { 807f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid TYPE %llu", 808f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_TYPE(bp)); 809f63ab3d5SMatthew Ahrens } 810f63ab3d5SMatthew Ahrens if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS || 811f63ab3d5SMatthew Ahrens BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) { 812f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu", 813f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_CHECKSUM(bp)); 814f63ab3d5SMatthew Ahrens } 815f63ab3d5SMatthew Ahrens if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS || 816f63ab3d5SMatthew Ahrens BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) { 817f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu", 818f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_COMPRESS(bp)); 819f63ab3d5SMatthew Ahrens } 820f63ab3d5SMatthew Ahrens if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { 821f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid LSIZE %llu", 822f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_LSIZE(bp)); 823f63ab3d5SMatthew Ahrens } 824f63ab3d5SMatthew Ahrens if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { 825f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid PSIZE %llu", 826f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_PSIZE(bp)); 827f63ab3d5SMatthew Ahrens } 828f63ab3d5SMatthew Ahrens 829f63ab3d5SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) { 830f63ab3d5SMatthew Ahrens if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) { 831f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid ETYPE %llu", 832f63ab3d5SMatthew Ahrens bp, (longlong_t)BPE_GET_ETYPE(bp)); 833f63ab3d5SMatthew Ahrens } 834f63ab3d5SMatthew Ahrens } 835f63ab3d5SMatthew Ahrens 8366f793812SPavel Zakharov /* 8376f793812SPavel Zakharov * Do not verify individual DVAs if the config is not trusted. This 8386f793812SPavel Zakharov * will be done once the zio is executed in vdev_mirror_map_alloc. 8396f793812SPavel Zakharov */ 8406f793812SPavel Zakharov if (!spa->spa_trust_config) 8416f793812SPavel Zakharov return; 8426f793812SPavel Zakharov 843f63ab3d5SMatthew Ahrens /* 844f63ab3d5SMatthew Ahrens * Pool-specific checks. 845f63ab3d5SMatthew Ahrens * 846f63ab3d5SMatthew Ahrens * Note: it would be nice to verify that the blk_birth and 847f63ab3d5SMatthew Ahrens * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() 848f63ab3d5SMatthew Ahrens * allows the birth time of log blocks (and dmu_sync()-ed blocks 849f63ab3d5SMatthew Ahrens * that are in the log) to be arbitrarily large. 850f63ab3d5SMatthew Ahrens */ 851f63ab3d5SMatthew Ahrens for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 852f63ab3d5SMatthew Ahrens uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); 853f63ab3d5SMatthew Ahrens if (vdevid >= spa->spa_root_vdev->vdev_children) { 854f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p DVA %u has invalid " 855f63ab3d5SMatthew Ahrens "VDEV %llu", 856f63ab3d5SMatthew Ahrens bp, i, (longlong_t)vdevid); 8575897eb49SJustin Gibbs continue; 858f63ab3d5SMatthew Ahrens } 859f63ab3d5SMatthew Ahrens vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 860f63ab3d5SMatthew Ahrens if (vd == NULL) { 861f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p DVA %u has invalid " 862f63ab3d5SMatthew Ahrens "VDEV %llu", 863f63ab3d5SMatthew Ahrens bp, i, (longlong_t)vdevid); 8645897eb49SJustin Gibbs continue; 865f63ab3d5SMatthew Ahrens } 866f63ab3d5SMatthew Ahrens if (vd->vdev_ops == &vdev_hole_ops) { 867f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p DVA %u has hole " 868f63ab3d5SMatthew Ahrens "VDEV %llu", 869f63ab3d5SMatthew Ahrens bp, i, (longlong_t)vdevid); 8705897eb49SJustin Gibbs continue; 871f63ab3d5SMatthew Ahrens } 872f63ab3d5SMatthew Ahrens if (vd->vdev_ops == &vdev_missing_ops) { 873f63ab3d5SMatthew Ahrens /* 874f63ab3d5SMatthew Ahrens * "missing" vdevs are valid during import, but we 875f63ab3d5SMatthew Ahrens * don't have their detailed info (e.g. asize), so 876f63ab3d5SMatthew Ahrens * we can't perform any more checks on them. 877f63ab3d5SMatthew Ahrens */ 878f63ab3d5SMatthew Ahrens continue; 879f63ab3d5SMatthew Ahrens } 880f63ab3d5SMatthew Ahrens uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 881f63ab3d5SMatthew Ahrens uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]); 882f63ab3d5SMatthew Ahrens if (BP_IS_GANG(bp)) 883f63ab3d5SMatthew Ahrens asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 884f63ab3d5SMatthew Ahrens if (offset + asize > vd->vdev_asize) { 885f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p DVA %u has invalid " 886f63ab3d5SMatthew Ahrens "OFFSET %llu", 887f63ab3d5SMatthew Ahrens bp, i, (longlong_t)offset); 888f63ab3d5SMatthew Ahrens } 889f63ab3d5SMatthew Ahrens } 890f63ab3d5SMatthew Ahrens } 891f63ab3d5SMatthew Ahrens 8926f793812SPavel Zakharov boolean_t 8936f793812SPavel Zakharov zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp) 8946f793812SPavel Zakharov { 8956f793812SPavel Zakharov uint64_t vdevid = DVA_GET_VDEV(dva); 8966f793812SPavel Zakharov 8976f793812SPavel Zakharov if (vdevid >= spa->spa_root_vdev->vdev_children) 8986f793812SPavel Zakharov return (B_FALSE); 8996f793812SPavel Zakharov 9006f793812SPavel Zakharov vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 9016f793812SPavel Zakharov if (vd == NULL) 9026f793812SPavel Zakharov return (B_FALSE); 9036f793812SPavel Zakharov 9046f793812SPavel Zakharov if (vd->vdev_ops == &vdev_hole_ops) 9056f793812SPavel Zakharov return (B_FALSE); 9066f793812SPavel Zakharov 9076f793812SPavel Zakharov if (vd->vdev_ops == &vdev_missing_ops) { 9086f793812SPavel Zakharov return (B_FALSE); 9096f793812SPavel Zakharov } 9106f793812SPavel Zakharov 9116f793812SPavel Zakharov uint64_t offset = DVA_GET_OFFSET(dva); 9126f793812SPavel Zakharov uint64_t asize = DVA_GET_ASIZE(dva); 9136f793812SPavel Zakharov 9146f793812SPavel Zakharov if (BP_IS_GANG(bp)) 9156f793812SPavel Zakharov asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 9166f793812SPavel Zakharov if (offset + asize > vd->vdev_asize) 9176f793812SPavel Zakharov return (B_FALSE); 9186f793812SPavel Zakharov 9196f793812SPavel Zakharov return (B_TRUE); 9206f793812SPavel Zakharov } 9216f793812SPavel Zakharov 922fa9e4066Sahrens zio_t * 923e14bb325SJeff Bonwick zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 924770499e1SDan Kimmel abd_t *data, uint64_t size, zio_done_func_t *done, void *private, 9257802d7bfSMatthew Ahrens zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 926fa9e4066Sahrens { 927fa9e4066Sahrens zio_t *zio; 928fa9e4066Sahrens 929f63ab3d5SMatthew Ahrens zfs_blkptr_verify(spa, bp); 930f63ab3d5SMatthew Ahrens 931b24ab676SJeff Bonwick zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 9325602294fSDan Kimmel data, size, size, done, private, 933e14bb325SJeff Bonwick ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 934b24ab676SJeff Bonwick ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 935b24ab676SJeff Bonwick ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 936fa9e4066Sahrens 937fa9e4066Sahrens return (zio); 938fa9e4066Sahrens } 939fa9e4066Sahrens 940fa9e4066Sahrens zio_t * 941e14bb325SJeff Bonwick zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 942770499e1SDan Kimmel abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp, 9438df0bcf0SPaul Dagnelie zio_done_func_t *ready, zio_done_func_t *children_ready, 9448df0bcf0SPaul Dagnelie zio_done_func_t *physdone, zio_done_func_t *done, 9458df0bcf0SPaul Dagnelie void *private, zio_priority_t priority, enum zio_flag flags, 9468df0bcf0SPaul Dagnelie const zbookmark_phys_t *zb) 947fa9e4066Sahrens { 948fa9e4066Sahrens zio_t *zio; 949fa9e4066Sahrens 950e14bb325SJeff Bonwick ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 951e14bb325SJeff Bonwick zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 952e14bb325SJeff Bonwick zp->zp_compress >= ZIO_COMPRESS_OFF && 953e14bb325SJeff Bonwick zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 954ad135b5dSChristopher Siden DMU_OT_IS_VALID(zp->zp_type) && 955e14bb325SJeff Bonwick zp->zp_level < 32 && 956b24ab676SJeff Bonwick zp->zp_copies > 0 && 95780901aeaSGeorge Wilson zp->zp_copies <= spa_max_replication(spa)); 9580a4e9518Sgw 9595602294fSDan Kimmel zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private, 960e14bb325SJeff Bonwick ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 961b24ab676SJeff Bonwick ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 962b24ab676SJeff Bonwick ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 963fa9e4066Sahrens 964c717a561Smaybee zio->io_ready = ready; 9658df0bcf0SPaul Dagnelie zio->io_children_ready = children_ready; 96669962b56SMatthew Ahrens zio->io_physdone = physdone; 967e14bb325SJeff Bonwick zio->io_prop = *zp; 968fa9e4066Sahrens 9695d7b4d43SMatthew Ahrens /* 9705d7b4d43SMatthew Ahrens * Data can be NULL if we are going to call zio_write_override() to 9715d7b4d43SMatthew Ahrens * provide the already-allocated BP. But we may need the data to 9725d7b4d43SMatthew Ahrens * verify a dedup hit (if requested). In this case, don't try to 973*eb633035STom Caputi * dedup (just take the already-allocated BP verbatim). Encrypted 974*eb633035STom Caputi * dedup blocks need data as well so we also disable dedup in this 975*eb633035STom Caputi * case. 9765d7b4d43SMatthew Ahrens */ 977*eb633035STom Caputi if (data == NULL && 978*eb633035STom Caputi (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) { 9795d7b4d43SMatthew Ahrens zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 9805d7b4d43SMatthew Ahrens } 9815d7b4d43SMatthew Ahrens 982fa9e4066Sahrens return (zio); 983fa9e4066Sahrens } 984fa9e4066Sahrens 985fa9e4066Sahrens zio_t * 986770499e1SDan Kimmel zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data, 98769962b56SMatthew Ahrens uint64_t size, zio_done_func_t *done, void *private, 9887802d7bfSMatthew Ahrens zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb) 989fa9e4066Sahrens { 990fa9e4066Sahrens zio_t *zio; 991fa9e4066Sahrens 9925602294fSDan Kimmel zio = zio_create(pio, spa, txg, bp, data, size, size, done, private, 9930f7643c7SGeorge Wilson ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb, 994e14bb325SJeff Bonwick ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 995fa9e4066Sahrens 996fa9e4066Sahrens return (zio); 997fa9e4066Sahrens } 998fa9e4066Sahrens 999b24ab676SJeff Bonwick void 100080901aeaSGeorge Wilson zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) 1001b24ab676SJeff Bonwick { 1002b24ab676SJeff Bonwick ASSERT(zio->io_type == ZIO_TYPE_WRITE); 1003b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1004b24ab676SJeff Bonwick ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 1005b24ab676SJeff Bonwick ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 1006b24ab676SJeff Bonwick 100780901aeaSGeorge Wilson /* 100880901aeaSGeorge Wilson * We must reset the io_prop to match the values that existed 100980901aeaSGeorge Wilson * when the bp was first written by dmu_sync() keeping in mind 101080901aeaSGeorge Wilson * that nopwrite and dedup are mutually exclusive. 101180901aeaSGeorge Wilson */ 101280901aeaSGeorge Wilson zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 101380901aeaSGeorge Wilson zio->io_prop.zp_nopwrite = nopwrite; 1014b24ab676SJeff Bonwick zio->io_prop.zp_copies = copies; 1015b24ab676SJeff Bonwick zio->io_bp_override = bp; 1016b24ab676SJeff Bonwick } 1017b24ab676SJeff Bonwick 1018b24ab676SJeff Bonwick void 1019b24ab676SJeff Bonwick zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 1020b24ab676SJeff Bonwick { 10215d7b4d43SMatthew Ahrens 10225cabbc6bSPrashanth Sreenivasa zfs_blkptr_verify(spa, bp); 10235cabbc6bSPrashanth Sreenivasa 10245d7b4d43SMatthew Ahrens /* 10255d7b4d43SMatthew Ahrens * The check for EMBEDDED is a performance optimization. We 10265d7b4d43SMatthew Ahrens * process the free here (by ignoring it) rather than 10275d7b4d43SMatthew Ahrens * putting it on the list and then processing it in zio_free_sync(). 10285d7b4d43SMatthew Ahrens */ 10295d7b4d43SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) 10305d7b4d43SMatthew Ahrens return; 10313b2aab18SMatthew Ahrens metaslab_check_free(spa, bp); 10329cb154a3SMatthew Ahrens 10339cb154a3SMatthew Ahrens /* 10349cb154a3SMatthew Ahrens * Frees that are for the currently-syncing txg, are not going to be 10359cb154a3SMatthew Ahrens * deferred, and which will not need to do a read (i.e. not GANG or 10369cb154a3SMatthew Ahrens * DEDUP), can be processed immediately. Otherwise, put them on the 10379cb154a3SMatthew Ahrens * in-memory list for later processing. 10389cb154a3SMatthew Ahrens */ 10399cb154a3SMatthew Ahrens if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp) || 10409cb154a3SMatthew Ahrens txg != spa->spa_syncing_txg || 10419cb154a3SMatthew Ahrens spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) { 10429cb154a3SMatthew Ahrens bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 10439cb154a3SMatthew Ahrens } else { 10449cb154a3SMatthew Ahrens VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0))); 10459cb154a3SMatthew Ahrens } 1046b24ab676SJeff Bonwick } 1047b24ab676SJeff Bonwick 1048fa9e4066Sahrens zio_t * 1049b24ab676SJeff Bonwick zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1050b24ab676SJeff Bonwick enum zio_flag flags) 1051fa9e4066Sahrens { 1052fa9e4066Sahrens zio_t *zio; 10539cb154a3SMatthew Ahrens enum zio_stage stage = ZIO_FREE_PIPELINE; 1054fa9e4066Sahrens 1055fa9e4066Sahrens ASSERT(!BP_IS_HOLE(bp)); 1056b24ab676SJeff Bonwick ASSERT(spa_syncing_txg(spa) == txg); 105701f55e48SGeorge Wilson ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); 1058fa9e4066Sahrens 10595d7b4d43SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) 10605d7b4d43SMatthew Ahrens return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 10615d7b4d43SMatthew Ahrens 10623b2aab18SMatthew Ahrens metaslab_check_free(spa, bp); 10636e6d5868SMatthew Ahrens arc_freed(spa, bp); 1064a3874b8bSToomas Soome dsl_scan_freed(spa, bp); 10653b2aab18SMatthew Ahrens 10669cb154a3SMatthew Ahrens /* 10679cb154a3SMatthew Ahrens * GANG and DEDUP blocks can induce a read (for the gang block header, 10689cb154a3SMatthew Ahrens * or the DDT), so issue them asynchronously so that this thread is 10699cb154a3SMatthew Ahrens * not tied up. 10709cb154a3SMatthew Ahrens */ 10719cb154a3SMatthew Ahrens if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) 10729cb154a3SMatthew Ahrens stage |= ZIO_STAGE_ISSUE_ASYNC; 10739cb154a3SMatthew Ahrens 1074e14bb325SJeff Bonwick zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 10755602294fSDan Kimmel BP_GET_PSIZE(bp), NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, 10765602294fSDan Kimmel flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage); 10779cb154a3SMatthew Ahrens 1078fa9e4066Sahrens return (zio); 1079fa9e4066Sahrens } 1080fa9e4066Sahrens 1081fa9e4066Sahrens zio_t * 1082b24ab676SJeff Bonwick zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 1083b24ab676SJeff Bonwick zio_done_func_t *done, void *private, enum zio_flag flags) 1084fa9e4066Sahrens { 1085fa9e4066Sahrens zio_t *zio; 1086fa9e4066Sahrens 10875cabbc6bSPrashanth Sreenivasa zfs_blkptr_verify(spa, bp); 10885d7b4d43SMatthew Ahrens 10895d7b4d43SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) 10905d7b4d43SMatthew Ahrens return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 10915d7b4d43SMatthew Ahrens 1092fa9e4066Sahrens /* 1093fa9e4066Sahrens * A claim is an allocation of a specific block. Claims are needed 1094fa9e4066Sahrens * to support immediate writes in the intent log. The issue is that 1095fa9e4066Sahrens * immediate writes contain committed data, but in a txg that was 1096fa9e4066Sahrens * *not* committed. Upon opening the pool after an unclean shutdown, 1097fa9e4066Sahrens * the intent log claims all blocks that contain immediate write data 1098fa9e4066Sahrens * so that the SPA knows they're in use. 1099fa9e4066Sahrens * 1100fa9e4066Sahrens * All claims *must* be resolved in the first txg -- before the SPA 1101fa9e4066Sahrens * starts allocating blocks -- so that nothing is allocated twice. 1102b24ab676SJeff Bonwick * If txg == 0 we just verify that the block is claimable. 1103fa9e4066Sahrens */ 110486714001SSerapheim Dimitropoulos ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, 110586714001SSerapheim Dimitropoulos spa_min_claim_txg(spa)); 110686714001SSerapheim Dimitropoulos ASSERT(txg == spa_min_claim_txg(spa) || txg == 0); 1107b24ab676SJeff Bonwick ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 1108fa9e4066Sahrens 1109e14bb325SJeff Bonwick zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 11105602294fSDan Kimmel BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 11115602294fSDan Kimmel flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 11120f7643c7SGeorge Wilson ASSERT0(zio->io_queued_timestamp); 1113fa9e4066Sahrens 1114fa9e4066Sahrens return (zio); 1115fa9e4066Sahrens } 1116fa9e4066Sahrens 1117fa9e4066Sahrens zio_t * 1118fa9e4066Sahrens zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 111969962b56SMatthew Ahrens zio_done_func_t *done, void *private, enum zio_flag flags) 1120fa9e4066Sahrens { 1121fa9e4066Sahrens zio_t *zio; 1122fa9e4066Sahrens int c; 1123fa9e4066Sahrens 1124fa9e4066Sahrens if (vd->vdev_children == 0) { 11255602294fSDan Kimmel zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 112669962b56SMatthew Ahrens ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 1127fa9e4066Sahrens ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 1128fa9e4066Sahrens 1129fa9e4066Sahrens zio->io_cmd = cmd; 1130fa9e4066Sahrens } else { 1131a3f829aeSBill Moore zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 1132fa9e4066Sahrens 1133fa9e4066Sahrens for (c = 0; c < vd->vdev_children; c++) 1134fa9e4066Sahrens zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 113569962b56SMatthew Ahrens done, private, flags)); 1136fa9e4066Sahrens } 1137fa9e4066Sahrens 1138fa9e4066Sahrens return (zio); 1139fa9e4066Sahrens } 1140fa9e4066Sahrens 1141fa9e4066Sahrens zio_t * 1142fa9e4066Sahrens zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1143770499e1SDan Kimmel abd_t *data, int checksum, zio_done_func_t *done, void *private, 114469962b56SMatthew Ahrens zio_priority_t priority, enum zio_flag flags, boolean_t labels) 1145fa9e4066Sahrens { 1146fa9e4066Sahrens zio_t *zio; 11470a4e9518Sgw 1148e14bb325SJeff Bonwick ASSERT(vd->vdev_children == 0); 1149e14bb325SJeff Bonwick ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1150e14bb325SJeff Bonwick offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1151e14bb325SJeff Bonwick ASSERT3U(offset + size, <=, vd->vdev_psize); 1152fa9e4066Sahrens 11535602294fSDan Kimmel zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 11545602294fSDan Kimmel private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, 11555602294fSDan Kimmel offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 1156fa9e4066Sahrens 1157e14bb325SJeff Bonwick zio->io_prop.zp_checksum = checksum; 1158fa9e4066Sahrens 1159fa9e4066Sahrens return (zio); 1160fa9e4066Sahrens } 1161fa9e4066Sahrens 1162fa9e4066Sahrens zio_t * 1163fa9e4066Sahrens zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 1164770499e1SDan Kimmel abd_t *data, int checksum, zio_done_func_t *done, void *private, 116569962b56SMatthew Ahrens zio_priority_t priority, enum zio_flag flags, boolean_t labels) 1166fa9e4066Sahrens { 1167fa9e4066Sahrens zio_t *zio; 11680a4e9518Sgw 1169e14bb325SJeff Bonwick ASSERT(vd->vdev_children == 0); 1170e14bb325SJeff Bonwick ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 1171e14bb325SJeff Bonwick offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 1172e14bb325SJeff Bonwick ASSERT3U(offset + size, <=, vd->vdev_psize); 1173fa9e4066Sahrens 11745602294fSDan Kimmel zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 11755602294fSDan Kimmel private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, 11765602294fSDan Kimmel offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 1177fa9e4066Sahrens 1178e14bb325SJeff Bonwick zio->io_prop.zp_checksum = checksum; 1179fa9e4066Sahrens 118045818ee1SMatthew Ahrens if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) { 1181fa9e4066Sahrens /* 11826e1f5caaSNeil Perrin * zec checksums are necessarily destructive -- they modify 1183e14bb325SJeff Bonwick * the end of the write buffer to hold the verifier/checksum. 1184fa9e4066Sahrens * Therefore, we must make a local copy in case the data is 1185e14bb325SJeff Bonwick * being written to multiple places in parallel. 1186fa9e4066Sahrens */ 1187770499e1SDan Kimmel abd_t *wbuf = abd_alloc_sametype(data, size); 1188770499e1SDan Kimmel abd_copy(wbuf, data, size); 1189770499e1SDan Kimmel 1190e14bb325SJeff Bonwick zio_push_transform(zio, wbuf, size, size, NULL); 1191fa9e4066Sahrens } 1192fa9e4066Sahrens 1193fa9e4066Sahrens return (zio); 1194fa9e4066Sahrens } 1195fa9e4066Sahrens 1196fa9e4066Sahrens /* 1197e14bb325SJeff Bonwick * Create a child I/O to do some work for us. 1198fa9e4066Sahrens */ 1199fa9e4066Sahrens zio_t * 1200e14bb325SJeff Bonwick zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 1201770499e1SDan Kimmel abd_t *data, uint64_t size, int type, zio_priority_t priority, 1202dcbf3bd6SGeorge Wilson enum zio_flag flags, zio_done_func_t *done, void *private) 1203fa9e4066Sahrens { 1204b24ab676SJeff Bonwick enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 1205e14bb325SJeff Bonwick zio_t *zio; 1206e14bb325SJeff Bonwick 12075cabbc6bSPrashanth Sreenivasa /* 12085cabbc6bSPrashanth Sreenivasa * vdev child I/Os do not propagate their error to the parent. 12095cabbc6bSPrashanth Sreenivasa * Therefore, for correct operation the caller *must* check for 12105cabbc6bSPrashanth Sreenivasa * and handle the error in the child i/o's done callback. 12115cabbc6bSPrashanth Sreenivasa * The only exceptions are i/os that we don't care about 12125cabbc6bSPrashanth Sreenivasa * (OPTIONAL or REPAIR). 12135cabbc6bSPrashanth Sreenivasa */ 12145cabbc6bSPrashanth Sreenivasa ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) || 12155cabbc6bSPrashanth Sreenivasa done != NULL); 12165cabbc6bSPrashanth Sreenivasa 1217fa9e4066Sahrens if (type == ZIO_TYPE_READ && bp != NULL) { 1218fa9e4066Sahrens /* 1219fa9e4066Sahrens * If we have the bp, then the child should perform the 1220fa9e4066Sahrens * checksum and the parent need not. This pushes error 1221fa9e4066Sahrens * detection as close to the leaves as possible and 1222fa9e4066Sahrens * eliminates redundant checksums in the interior nodes. 1223fa9e4066Sahrens */ 1224b24ab676SJeff Bonwick pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 1225b24ab676SJeff Bonwick pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 1226fa9e4066Sahrens } 1227fa9e4066Sahrens 12285cabbc6bSPrashanth Sreenivasa if (vd->vdev_ops->vdev_op_leaf) { 12295cabbc6bSPrashanth Sreenivasa ASSERT0(vd->vdev_children); 1230e14bb325SJeff Bonwick offset += VDEV_LABEL_START_SIZE; 12315cabbc6bSPrashanth Sreenivasa } 1232e14bb325SJeff Bonwick 12335cabbc6bSPrashanth Sreenivasa flags |= ZIO_VDEV_CHILD_FLAGS(pio); 1234b24ab676SJeff Bonwick 1235b24ab676SJeff Bonwick /* 1236b24ab676SJeff Bonwick * If we've decided to do a repair, the write is not speculative -- 1237b24ab676SJeff Bonwick * even if the original read was. 1238b24ab676SJeff Bonwick */ 1239b24ab676SJeff Bonwick if (flags & ZIO_FLAG_IO_REPAIR) 1240b24ab676SJeff Bonwick flags &= ~ZIO_FLAG_SPECULATIVE; 1241b24ab676SJeff Bonwick 12420f7643c7SGeorge Wilson /* 12430f7643c7SGeorge Wilson * If we're creating a child I/O that is not associated with a 12440f7643c7SGeorge Wilson * top-level vdev, then the child zio is not an allocating I/O. 12450f7643c7SGeorge Wilson * If this is a retried I/O then we ignore it since we will 12460f7643c7SGeorge Wilson * have already processed the original allocating I/O. 12470f7643c7SGeorge Wilson */ 12480f7643c7SGeorge Wilson if (flags & ZIO_FLAG_IO_ALLOCATING && 12490f7643c7SGeorge Wilson (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { 1250663207adSDon Brady ASSERT(pio->io_metaslab_class != NULL); 1251663207adSDon Brady ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled); 12520f7643c7SGeorge Wilson ASSERT(type == ZIO_TYPE_WRITE); 12530f7643c7SGeorge Wilson ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE); 12540f7643c7SGeorge Wilson ASSERT(!(flags & ZIO_FLAG_IO_REPAIR)); 12550f7643c7SGeorge Wilson ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || 12560f7643c7SGeorge Wilson pio->io_child_type == ZIO_CHILD_GANG); 12570f7643c7SGeorge Wilson 12580f7643c7SGeorge Wilson flags &= ~ZIO_FLAG_IO_ALLOCATING; 12590f7643c7SGeorge Wilson } 12600f7643c7SGeorge Wilson 12615602294fSDan Kimmel zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size, 1262b24ab676SJeff Bonwick done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 1263b24ab676SJeff Bonwick ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 12640f7643c7SGeorge Wilson ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 1265fa9e4066Sahrens 126669962b56SMatthew Ahrens zio->io_physdone = pio->io_physdone; 126769962b56SMatthew Ahrens if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) 126869962b56SMatthew Ahrens zio->io_logical->io_phys_children++; 126969962b56SMatthew Ahrens 1270e14bb325SJeff Bonwick return (zio); 127132b87932Sek } 127232b87932Sek 1273e14bb325SJeff Bonwick zio_t * 1274770499e1SDan Kimmel zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size, 12753a4b1be9SMatthew Ahrens zio_type_t type, zio_priority_t priority, enum zio_flag flags, 12769a686fbcSPaul Dagnelie zio_done_func_t *done, void *private) 1277fa9e4066Sahrens { 1278e14bb325SJeff Bonwick zio_t *zio; 1279fa9e4066Sahrens 1280e14bb325SJeff Bonwick ASSERT(vd->vdev_ops->vdev_op_leaf); 1281fa9e4066Sahrens 1282e14bb325SJeff Bonwick zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 12835602294fSDan Kimmel data, size, size, done, private, type, priority, 128469962b56SMatthew Ahrens flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 1285e14bb325SJeff Bonwick vd, offset, NULL, 1286b24ab676SJeff Bonwick ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 1287fa9e4066Sahrens 1288e14bb325SJeff Bonwick return (zio); 1289e05725b1Sbonwick } 1290e05725b1Sbonwick 1291e05725b1Sbonwick void 1292e14bb325SJeff Bonwick zio_flush(zio_t *zio, vdev_t *vd) 1293e05725b1Sbonwick { 1294e14bb325SJeff Bonwick zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 129569962b56SMatthew Ahrens NULL, NULL, 1296e14bb325SJeff Bonwick ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 1297fa9e4066Sahrens } 1298fa9e4066Sahrens 12996e1f5caaSNeil Perrin void 13006e1f5caaSNeil Perrin zio_shrink(zio_t *zio, uint64_t size) 13016e1f5caaSNeil Perrin { 13021271e4b1SPrakash Surya ASSERT3P(zio->io_executor, ==, NULL); 13031271e4b1SPrakash Surya ASSERT3P(zio->io_orig_size, ==, zio->io_size); 13041271e4b1SPrakash Surya ASSERT3U(size, <=, zio->io_size); 13056e1f5caaSNeil Perrin 13066e1f5caaSNeil Perrin /* 13076e1f5caaSNeil Perrin * We don't shrink for raidz because of problems with the 13086e1f5caaSNeil Perrin * reconstruction when reading back less than the block size. 13096e1f5caaSNeil Perrin * Note, BP_IS_RAIDZ() assumes no compression. 13106e1f5caaSNeil Perrin */ 13116e1f5caaSNeil Perrin ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 13125602294fSDan Kimmel if (!BP_IS_RAIDZ(zio->io_bp)) { 13135602294fSDan Kimmel /* we are not doing a raw write */ 13145602294fSDan Kimmel ASSERT3U(zio->io_size, ==, zio->io_lsize); 13155602294fSDan Kimmel zio->io_orig_size = zio->io_size = zio->io_lsize = size; 13165602294fSDan Kimmel } 13176e1f5caaSNeil Perrin } 13186e1f5caaSNeil Perrin 1319fa9e4066Sahrens /* 1320fa9e4066Sahrens * ========================================================================== 1321e14bb325SJeff Bonwick * Prepare to read and write logical blocks 1322fa9e4066Sahrens * ========================================================================== 1323fa9e4066Sahrens */ 1324e14bb325SJeff Bonwick 1325e05725b1Sbonwick static int 1326e14bb325SJeff Bonwick zio_read_bp_init(zio_t *zio) 1327fa9e4066Sahrens { 1328e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 1329*eb633035STom Caputi uint64_t psize = 1330*eb633035STom Caputi BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1331e05725b1Sbonwick 13325cabbc6bSPrashanth Sreenivasa ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 13335cabbc6bSPrashanth Sreenivasa 133403361682SJeff Bonwick if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1335f5383399SBill Moore zio->io_child_type == ZIO_CHILD_LOGICAL && 1336*eb633035STom Caputi !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { 1337770499e1SDan Kimmel zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1338770499e1SDan Kimmel psize, psize, zio_decompress); 1339e14bb325SJeff Bonwick } 1340fa9e4066Sahrens 1341*eb633035STom Caputi if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) || 1342*eb633035STom Caputi BP_HAS_INDIRECT_MAC_CKSUM(bp)) && 1343*eb633035STom Caputi zio->io_child_type == ZIO_CHILD_LOGICAL) { 1344*eb633035STom Caputi zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1345*eb633035STom Caputi psize, psize, zio_decrypt); 1346*eb633035STom Caputi } 1347770499e1SDan Kimmel 1348*eb633035STom Caputi if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 1349770499e1SDan Kimmel int psize = BPE_GET_PSIZE(bp); 1350770499e1SDan Kimmel void *data = abd_borrow_buf(zio->io_abd, psize); 1351*eb633035STom Caputi 1352*eb633035STom Caputi zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1353770499e1SDan Kimmel decode_embedded_bp_compressed(bp, data); 1354770499e1SDan Kimmel abd_return_buf_copy(zio->io_abd, data, psize); 13555d7b4d43SMatthew Ahrens } else { 13565d7b4d43SMatthew Ahrens ASSERT(!BP_IS_EMBEDDED(bp)); 13575cabbc6bSPrashanth Sreenivasa ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 13585d7b4d43SMatthew Ahrens } 13595d7b4d43SMatthew Ahrens 1360ad135b5dSChristopher Siden if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 1361e14bb325SJeff Bonwick zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1362fa9e4066Sahrens 1363bbfd46c4SJeff Bonwick if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 1364bbfd46c4SJeff Bonwick zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1365bbfd46c4SJeff Bonwick 1366b24ab676SJeff Bonwick if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1367b24ab676SJeff Bonwick zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1368b24ab676SJeff Bonwick 1369e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 1370fa9e4066Sahrens } 1371fa9e4066Sahrens 1372e05725b1Sbonwick static int 1373e14bb325SJeff Bonwick zio_write_bp_init(zio_t *zio) 13740a4e9518Sgw { 1375e14bb325SJeff Bonwick if (!IO_IS_ALLOCATING(zio)) 1376e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 13770a4e9518Sgw 1378b24ab676SJeff Bonwick ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1379b24ab676SJeff Bonwick 1380b24ab676SJeff Bonwick if (zio->io_bp_override) { 13810f7643c7SGeorge Wilson blkptr_t *bp = zio->io_bp; 13820f7643c7SGeorge Wilson zio_prop_t *zp = &zio->io_prop; 13830f7643c7SGeorge Wilson 1384b24ab676SJeff Bonwick ASSERT(bp->blk_birth != zio->io_txg); 1385b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 1386b24ab676SJeff Bonwick 1387b24ab676SJeff Bonwick *bp = *zio->io_bp_override; 1388b24ab676SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1389b24ab676SJeff Bonwick 13905d7b4d43SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) 13915d7b4d43SMatthew Ahrens return (ZIO_PIPELINE_CONTINUE); 13925d7b4d43SMatthew Ahrens 139380901aeaSGeorge Wilson /* 139480901aeaSGeorge Wilson * If we've been overridden and nopwrite is set then 139580901aeaSGeorge Wilson * set the flag accordingly to indicate that a nopwrite 139680901aeaSGeorge Wilson * has already occurred. 139780901aeaSGeorge Wilson */ 139880901aeaSGeorge Wilson if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 139980901aeaSGeorge Wilson ASSERT(!zp->zp_dedup); 14000f7643c7SGeorge Wilson ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum); 140180901aeaSGeorge Wilson zio->io_flags |= ZIO_FLAG_NOPWRITE; 140280901aeaSGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 140380901aeaSGeorge Wilson } 140480901aeaSGeorge Wilson 140580901aeaSGeorge Wilson ASSERT(!zp->zp_nopwrite); 140680901aeaSGeorge Wilson 1407b24ab676SJeff Bonwick if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1408b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 1409b24ab676SJeff Bonwick 141045818ee1SMatthew Ahrens ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags & 141145818ee1SMatthew Ahrens ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify); 1412b24ab676SJeff Bonwick 1413*eb633035STom Caputi if (BP_GET_CHECKSUM(bp) == zp->zp_checksum && 1414*eb633035STom Caputi !zp->zp_encrypt) { 1415b24ab676SJeff Bonwick BP_SET_DEDUP(bp, 1); 1416b24ab676SJeff Bonwick zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1417b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 1418b24ab676SJeff Bonwick } 14190f7643c7SGeorge Wilson 14200f7643c7SGeorge Wilson /* 14210f7643c7SGeorge Wilson * We were unable to handle this as an override bp, treat 14220f7643c7SGeorge Wilson * it as a regular write I/O. 14230f7643c7SGeorge Wilson */ 1424b39b744bSMatthew Ahrens zio->io_bp_override = NULL; 14250f7643c7SGeorge Wilson *bp = zio->io_bp_orig; 14260f7643c7SGeorge Wilson zio->io_pipeline = zio->io_orig_pipeline; 1427b24ab676SJeff Bonwick } 14280a4e9518Sgw 14290f7643c7SGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 14300f7643c7SGeorge Wilson } 14310f7643c7SGeorge Wilson 14320f7643c7SGeorge Wilson static int 14330f7643c7SGeorge Wilson zio_write_compress(zio_t *zio) 14340f7643c7SGeorge Wilson { 14350f7643c7SGeorge Wilson spa_t *spa = zio->io_spa; 14360f7643c7SGeorge Wilson zio_prop_t *zp = &zio->io_prop; 14370f7643c7SGeorge Wilson enum zio_compress compress = zp->zp_compress; 14380f7643c7SGeorge Wilson blkptr_t *bp = zio->io_bp; 14395602294fSDan Kimmel uint64_t lsize = zio->io_lsize; 14405602294fSDan Kimmel uint64_t psize = zio->io_size; 14410f7643c7SGeorge Wilson int pass = 1; 14420f7643c7SGeorge Wilson 14430f7643c7SGeorge Wilson /* 14440f7643c7SGeorge Wilson * If our children haven't all reached the ready stage, 14450f7643c7SGeorge Wilson * wait for them and then repeat this pipeline stage. 14460f7643c7SGeorge Wilson */ 1447d6e1c446SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | 1448d6e1c446SGeorge Wilson ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) { 14490f7643c7SGeorge Wilson return (ZIO_PIPELINE_STOP); 1450d6e1c446SGeorge Wilson } 14510f7643c7SGeorge Wilson 14520f7643c7SGeorge Wilson if (!IO_IS_ALLOCATING(zio)) 14530f7643c7SGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 14540f7643c7SGeorge Wilson 14550f7643c7SGeorge Wilson if (zio->io_children_ready != NULL) { 14560f7643c7SGeorge Wilson /* 14570f7643c7SGeorge Wilson * Now that all our children are ready, run the callback 14580f7643c7SGeorge Wilson * associated with this zio in case it wants to modify the 14590f7643c7SGeorge Wilson * data to be written. 14600f7643c7SGeorge Wilson */ 14610f7643c7SGeorge Wilson ASSERT3U(zp->zp_level, >, 0); 14620f7643c7SGeorge Wilson zio->io_children_ready(zio); 14630f7643c7SGeorge Wilson } 14640f7643c7SGeorge Wilson 14650f7643c7SGeorge Wilson ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 14660f7643c7SGeorge Wilson ASSERT(zio->io_bp_override == NULL); 14670f7643c7SGeorge Wilson 146843466aaeSMax Grossman if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { 1469e14bb325SJeff Bonwick /* 1470e14bb325SJeff Bonwick * We're rewriting an existing block, which means we're 1471e14bb325SJeff Bonwick * working on behalf of spa_sync(). For spa_sync() to 1472e14bb325SJeff Bonwick * converge, it must eventually be the case that we don't 1473e14bb325SJeff Bonwick * have to allocate new blocks. But compression changes 1474e14bb325SJeff Bonwick * the blocksize, which forces a reallocate, and makes 1475e14bb325SJeff Bonwick * convergence take longer. Therefore, after the first 1476e14bb325SJeff Bonwick * few passes, stop compressing to ensure convergence. 1477e14bb325SJeff Bonwick */ 1478b24ab676SJeff Bonwick pass = spa_sync_pass(spa); 1479b24ab676SJeff Bonwick 1480b24ab676SJeff Bonwick ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1481b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1482b24ab676SJeff Bonwick ASSERT(!BP_GET_DEDUP(bp)); 1483e05725b1Sbonwick 148401f55e48SGeorge Wilson if (pass >= zfs_sync_pass_dont_compress) 1485e14bb325SJeff Bonwick compress = ZIO_COMPRESS_OFF; 1486e05725b1Sbonwick 1487e14bb325SJeff Bonwick /* Make sure someone doesn't change their mind on overwrites */ 14885d7b4d43SMatthew Ahrens ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), 1489b24ab676SJeff Bonwick spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1490e14bb325SJeff Bonwick } 1491fa9e4066Sahrens 14925602294fSDan Kimmel /* If it's a compressed write that is not raw, compress the buffer. */ 1493*eb633035STom Caputi if (compress != ZIO_COMPRESS_OFF && 1494*eb633035STom Caputi !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { 1495b24ab676SJeff Bonwick void *cbuf = zio_buf_alloc(lsize); 1496770499e1SDan Kimmel psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize); 1497b24ab676SJeff Bonwick if (psize == 0 || psize == lsize) { 1498e14bb325SJeff Bonwick compress = ZIO_COMPRESS_OFF; 1499b24ab676SJeff Bonwick zio_buf_free(cbuf, lsize); 1500*eb633035STom Caputi } else if (!zp->zp_dedup && !zp->zp_encrypt && 1501*eb633035STom Caputi psize <= BPE_PAYLOAD_SIZE && 15025d7b4d43SMatthew Ahrens zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 15035d7b4d43SMatthew Ahrens spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 15045d7b4d43SMatthew Ahrens encode_embedded_bp_compressed(bp, 15055d7b4d43SMatthew Ahrens cbuf, compress, lsize, psize); 15065d7b4d43SMatthew Ahrens BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 15075d7b4d43SMatthew Ahrens BP_SET_TYPE(bp, zio->io_prop.zp_type); 15085d7b4d43SMatthew Ahrens BP_SET_LEVEL(bp, zio->io_prop.zp_level); 15095d7b4d43SMatthew Ahrens zio_buf_free(cbuf, lsize); 15105d7b4d43SMatthew Ahrens bp->blk_birth = zio->io_txg; 15115d7b4d43SMatthew Ahrens zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 15125d7b4d43SMatthew Ahrens ASSERT(spa_feature_is_active(spa, 15135d7b4d43SMatthew Ahrens SPA_FEATURE_EMBEDDED_DATA)); 15145d7b4d43SMatthew Ahrens return (ZIO_PIPELINE_CONTINUE); 1515b24ab676SJeff Bonwick } else { 15165d7b4d43SMatthew Ahrens /* 151781cd5c55SMatthew Ahrens * Round up compressed size up to the ashift 151881cd5c55SMatthew Ahrens * of the smallest-ashift device, and zero the tail. 151981cd5c55SMatthew Ahrens * This ensures that the compressed size of the BP 152081cd5c55SMatthew Ahrens * (and thus compressratio property) are correct, 152181cd5c55SMatthew Ahrens * in that we charge for the padding used to fill out 152281cd5c55SMatthew Ahrens * the last sector. 15235d7b4d43SMatthew Ahrens */ 152481cd5c55SMatthew Ahrens ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 152581cd5c55SMatthew Ahrens size_t rounded = (size_t)P2ROUNDUP(psize, 152681cd5c55SMatthew Ahrens 1ULL << spa->spa_min_ashift); 152781cd5c55SMatthew Ahrens if (rounded >= lsize) { 15285d7b4d43SMatthew Ahrens compress = ZIO_COMPRESS_OFF; 15295d7b4d43SMatthew Ahrens zio_buf_free(cbuf, lsize); 153081cd5c55SMatthew Ahrens psize = lsize; 15315d7b4d43SMatthew Ahrens } else { 1532770499e1SDan Kimmel abd_t *cdata = abd_get_from_buf(cbuf, lsize); 1533770499e1SDan Kimmel abd_take_ownership_of_buf(cdata, B_TRUE); 1534770499e1SDan Kimmel abd_zero_off(cdata, psize, rounded - psize); 153581cd5c55SMatthew Ahrens psize = rounded; 1536770499e1SDan Kimmel zio_push_transform(zio, cdata, 15375d7b4d43SMatthew Ahrens psize, lsize, NULL); 15385d7b4d43SMatthew Ahrens } 1539e14bb325SJeff Bonwick } 15400f7643c7SGeorge Wilson 15410f7643c7SGeorge Wilson /* 15420f7643c7SGeorge Wilson * We were unable to handle this as an override bp, treat 15430f7643c7SGeorge Wilson * it as a regular write I/O. 15440f7643c7SGeorge Wilson */ 15450f7643c7SGeorge Wilson zio->io_bp_override = NULL; 15460f7643c7SGeorge Wilson *bp = zio->io_bp_orig; 15470f7643c7SGeorge Wilson zio->io_pipeline = zio->io_orig_pipeline; 1548*eb633035STom Caputi 1549*eb633035STom Caputi } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 && 1550*eb633035STom Caputi zp->zp_type == DMU_OT_DNODE) { 1551*eb633035STom Caputi /* 1552*eb633035STom Caputi * The DMU actually relies on the zio layer's compression 1553*eb633035STom Caputi * to free metadnode blocks that have had all contained 1554*eb633035STom Caputi * dnodes freed. As a result, even when doing a raw 1555*eb633035STom Caputi * receive, we must check whether the block can be compressed 1556*eb633035STom Caputi * to a hole. 1557*eb633035STom Caputi */ 1558*eb633035STom Caputi psize = zio_compress_data(ZIO_COMPRESS_EMPTY, 1559*eb633035STom Caputi zio->io_abd, NULL, lsize); 1560*eb633035STom Caputi if (psize == 0) 1561*eb633035STom Caputi compress = ZIO_COMPRESS_OFF; 15625602294fSDan Kimmel } else { 15635602294fSDan Kimmel ASSERT3U(psize, !=, 0); 1564e14bb325SJeff Bonwick } 1565c717a561Smaybee 1566e14bb325SJeff Bonwick /* 1567e14bb325SJeff Bonwick * The final pass of spa_sync() must be all rewrites, but the first 1568e14bb325SJeff Bonwick * few passes offer a trade-off: allocating blocks defers convergence, 1569e14bb325SJeff Bonwick * but newly allocated blocks are sequential, so they can be written 1570e14bb325SJeff Bonwick * to disk faster. Therefore, we allow the first few passes of 1571e14bb325SJeff Bonwick * spa_sync() to allocate new blocks, but force rewrites after that. 1572e14bb325SJeff Bonwick * There should only be a handful of blocks after pass 1 in any case. 1573e14bb325SJeff Bonwick */ 157443466aaeSMax Grossman if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && 157543466aaeSMax Grossman BP_GET_PSIZE(bp) == psize && 157601f55e48SGeorge Wilson pass >= zfs_sync_pass_rewrite) { 1577663207adSDon Brady VERIFY3U(psize, !=, 0); 1578b24ab676SJeff Bonwick enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1579e14bb325SJeff Bonwick zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1580e14bb325SJeff Bonwick zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1581e14bb325SJeff Bonwick } else { 1582e14bb325SJeff Bonwick BP_ZERO(bp); 1583e14bb325SJeff Bonwick zio->io_pipeline = ZIO_WRITE_PIPELINE; 1584e14bb325SJeff Bonwick } 1585fa9e4066Sahrens 1586b24ab676SJeff Bonwick if (psize == 0) { 158743466aaeSMax Grossman if (zio->io_bp_orig.blk_birth != 0 && 158843466aaeSMax Grossman spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 158943466aaeSMax Grossman BP_SET_LSIZE(bp, lsize); 159043466aaeSMax Grossman BP_SET_TYPE(bp, zp->zp_type); 159143466aaeSMax Grossman BP_SET_LEVEL(bp, zp->zp_level); 159243466aaeSMax Grossman BP_SET_BIRTH(bp, zio->io_txg, 0); 159343466aaeSMax Grossman } 1594e14bb325SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1595e14bb325SJeff Bonwick } else { 1596e14bb325SJeff Bonwick ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1597e14bb325SJeff Bonwick BP_SET_LSIZE(bp, lsize); 159843466aaeSMax Grossman BP_SET_TYPE(bp, zp->zp_type); 159943466aaeSMax Grossman BP_SET_LEVEL(bp, zp->zp_level); 1600b24ab676SJeff Bonwick BP_SET_PSIZE(bp, psize); 1601e14bb325SJeff Bonwick BP_SET_COMPRESS(bp, compress); 1602e14bb325SJeff Bonwick BP_SET_CHECKSUM(bp, zp->zp_checksum); 1603b24ab676SJeff Bonwick BP_SET_DEDUP(bp, zp->zp_dedup); 1604e14bb325SJeff Bonwick BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1605b24ab676SJeff Bonwick if (zp->zp_dedup) { 1606b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1607b24ab676SJeff Bonwick ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1608*eb633035STom Caputi ASSERT(!zp->zp_encrypt || 1609*eb633035STom Caputi DMU_OT_IS_ENCRYPTED(zp->zp_type)); 1610b24ab676SJeff Bonwick zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1611b24ab676SJeff Bonwick } 161280901aeaSGeorge Wilson if (zp->zp_nopwrite) { 161380901aeaSGeorge Wilson ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 161480901aeaSGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 161580901aeaSGeorge Wilson zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 161680901aeaSGeorge Wilson } 1617b24ab676SJeff Bonwick } 1618b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 1619b24ab676SJeff Bonwick } 1620b24ab676SJeff Bonwick 1621b24ab676SJeff Bonwick static int 1622b24ab676SJeff Bonwick zio_free_bp_init(zio_t *zio) 1623b24ab676SJeff Bonwick { 1624b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 1625b24ab676SJeff Bonwick 1626b24ab676SJeff Bonwick if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1627b24ab676SJeff Bonwick if (BP_GET_DEDUP(bp)) 1628b24ab676SJeff Bonwick zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1629e14bb325SJeff Bonwick } 1630fa9e4066Sahrens 16315cabbc6bSPrashanth Sreenivasa ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); 16325cabbc6bSPrashanth Sreenivasa 1633e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 1634fa9e4066Sahrens } 1635fa9e4066Sahrens 1636e14bb325SJeff Bonwick /* 1637e14bb325SJeff Bonwick * ========================================================================== 1638e14bb325SJeff Bonwick * Execute the I/O pipeline 1639e14bb325SJeff Bonwick * ========================================================================== 1640e14bb325SJeff Bonwick */ 1641e14bb325SJeff Bonwick 1642e14bb325SJeff Bonwick static void 1643ec94d322SAdam Leventhal zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 1644fa9e4066Sahrens { 164580eb36f2SGeorge Wilson spa_t *spa = zio->io_spa; 1646e14bb325SJeff Bonwick zio_type_t t = zio->io_type; 16475aeb9474SGarrett D'Amore int flags = (cutinline ? TQ_FRONT : 0); 16480a4e9518Sgw 16490a4e9518Sgw /* 1650bbe36defSGeorge Wilson * If we're a config writer or a probe, the normal issue and 1651bbe36defSGeorge Wilson * interrupt threads may all be blocked waiting for the config lock. 1652bbe36defSGeorge Wilson * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 16530a4e9518Sgw */ 1654bbe36defSGeorge Wilson if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1655e14bb325SJeff Bonwick t = ZIO_TYPE_NULL; 16560a4e9518Sgw 16570a4e9518Sgw /* 1658e14bb325SJeff Bonwick * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 16590a4e9518Sgw */ 1660e14bb325SJeff Bonwick if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1661e14bb325SJeff Bonwick t = ZIO_TYPE_NULL; 16620a4e9518Sgw 166380eb36f2SGeorge Wilson /* 1664ec94d322SAdam Leventhal * If this is a high priority I/O, then use the high priority taskq if 1665ec94d322SAdam Leventhal * available. 166680eb36f2SGeorge Wilson */ 16672258ad0bSGeorge Wilson if ((zio->io_priority == ZIO_PRIORITY_NOW || 16682258ad0bSGeorge Wilson zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) && 1669ec94d322SAdam Leventhal spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 167080eb36f2SGeorge Wilson q++; 167180eb36f2SGeorge Wilson 167280eb36f2SGeorge Wilson ASSERT3U(q, <, ZIO_TASKQ_TYPES); 16735aeb9474SGarrett D'Amore 16745aeb9474SGarrett D'Amore /* 16755aeb9474SGarrett D'Amore * NB: We are assuming that the zio can only be dispatched 16765aeb9474SGarrett D'Amore * to a single taskq at a time. It would be a grievous error 16775aeb9474SGarrett D'Amore * to dispatch the zio to another taskq at the same time. 16785aeb9474SGarrett D'Amore */ 16795aeb9474SGarrett D'Amore ASSERT(zio->io_tqent.tqent_next == NULL); 1680ec94d322SAdam Leventhal spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, 1681ec94d322SAdam Leventhal flags, &zio->io_tqent); 1682e14bb325SJeff Bonwick } 16830a4e9518Sgw 1684e14bb325SJeff Bonwick static boolean_t 1685ec94d322SAdam Leventhal zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 1686e14bb325SJeff Bonwick { 1687e14bb325SJeff Bonwick kthread_t *executor = zio->io_executor; 1688e14bb325SJeff Bonwick spa_t *spa = zio->io_spa; 16890a4e9518Sgw 1690ec94d322SAdam Leventhal for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 1691ec94d322SAdam Leventhal spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1692ec94d322SAdam Leventhal uint_t i; 1693ec94d322SAdam Leventhal for (i = 0; i < tqs->stqs_count; i++) { 1694ec94d322SAdam Leventhal if (taskq_member(tqs->stqs_taskq[i], executor)) 1695ec94d322SAdam Leventhal return (B_TRUE); 1696ec94d322SAdam Leventhal } 1697ec94d322SAdam Leventhal } 16980a4e9518Sgw 1699e14bb325SJeff Bonwick return (B_FALSE); 1700e14bb325SJeff Bonwick } 1701e05725b1Sbonwick 1702e14bb325SJeff Bonwick static int 1703e14bb325SJeff Bonwick zio_issue_async(zio_t *zio) 1704e14bb325SJeff Bonwick { 170535a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1706e14bb325SJeff Bonwick 1707e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 17080a4e9518Sgw } 17090a4e9518Sgw 1710e14bb325SJeff Bonwick void 1711e14bb325SJeff Bonwick zio_interrupt(zio_t *zio) 17120a4e9518Sgw { 171335a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1714e14bb325SJeff Bonwick } 17150a4e9518Sgw 171697e81309SPrakash Surya void 171797e81309SPrakash Surya zio_delay_interrupt(zio_t *zio) 171897e81309SPrakash Surya { 171997e81309SPrakash Surya /* 172097e81309SPrakash Surya * The timeout_generic() function isn't defined in userspace, so 172197e81309SPrakash Surya * rather than trying to implement the function, the zio delay 172297e81309SPrakash Surya * functionality has been disabled for userspace builds. 172397e81309SPrakash Surya */ 172497e81309SPrakash Surya 172597e81309SPrakash Surya #ifdef _KERNEL 172697e81309SPrakash Surya /* 172797e81309SPrakash Surya * If io_target_timestamp is zero, then no delay has been registered 172897e81309SPrakash Surya * for this IO, thus jump to the end of this function and "skip" the 172997e81309SPrakash Surya * delay; issuing it directly to the zio layer. 173097e81309SPrakash Surya */ 173197e81309SPrakash Surya if (zio->io_target_timestamp != 0) { 173297e81309SPrakash Surya hrtime_t now = gethrtime(); 173397e81309SPrakash Surya 173497e81309SPrakash Surya if (now >= zio->io_target_timestamp) { 173597e81309SPrakash Surya /* 173697e81309SPrakash Surya * This IO has already taken longer than the target 173797e81309SPrakash Surya * delay to complete, so we don't want to delay it 173897e81309SPrakash Surya * any longer; we "miss" the delay and issue it 173997e81309SPrakash Surya * directly to the zio layer. This is likely due to 174097e81309SPrakash Surya * the target latency being set to a value less than 174197e81309SPrakash Surya * the underlying hardware can satisfy (e.g. delay 174297e81309SPrakash Surya * set to 1ms, but the disks take 10ms to complete an 174397e81309SPrakash Surya * IO request). 174497e81309SPrakash Surya */ 174597e81309SPrakash Surya 174697e81309SPrakash Surya DTRACE_PROBE2(zio__delay__miss, zio_t *, zio, 174797e81309SPrakash Surya hrtime_t, now); 174897e81309SPrakash Surya 174997e81309SPrakash Surya zio_interrupt(zio); 175097e81309SPrakash Surya } else { 175197e81309SPrakash Surya hrtime_t diff = zio->io_target_timestamp - now; 175297e81309SPrakash Surya 175397e81309SPrakash Surya DTRACE_PROBE3(zio__delay__hit, zio_t *, zio, 175497e81309SPrakash Surya hrtime_t, now, hrtime_t, diff); 175597e81309SPrakash Surya 175697e81309SPrakash Surya (void) timeout_generic(CALLOUT_NORMAL, 175797e81309SPrakash Surya (void (*)(void *))zio_interrupt, zio, diff, 1, 0); 175897e81309SPrakash Surya } 175997e81309SPrakash Surya 176097e81309SPrakash Surya return; 176197e81309SPrakash Surya } 176297e81309SPrakash Surya #endif 176397e81309SPrakash Surya 176497e81309SPrakash Surya DTRACE_PROBE1(zio__delay__skip, zio_t *, zio); 176597e81309SPrakash Surya zio_interrupt(zio); 176697e81309SPrakash Surya } 176797e81309SPrakash Surya 1768e14bb325SJeff Bonwick /* 1769e14bb325SJeff Bonwick * Execute the I/O pipeline until one of the following occurs: 1770f7170741SWill Andrews * 1771f7170741SWill Andrews * (1) the I/O completes 1772f7170741SWill Andrews * (2) the pipeline stalls waiting for dependent child I/Os 1773f7170741SWill Andrews * (3) the I/O issues, so we're waiting for an I/O completion interrupt 1774f7170741SWill Andrews * (4) the I/O is delegated by vdev-level caching or aggregation 1775f7170741SWill Andrews * (5) the I/O is deferred due to vdev-level queueing 1776f7170741SWill Andrews * (6) the I/O is handed off to another thread. 1777f7170741SWill Andrews * 1778f7170741SWill Andrews * In all cases, the pipeline stops whenever there's no CPU work; it never 1779f7170741SWill Andrews * burns a thread in cv_wait(). 1780e14bb325SJeff Bonwick * 1781e14bb325SJeff Bonwick * There's no locking on io_stage because there's no legitimate way 1782e14bb325SJeff Bonwick * for multiple threads to be attempting to process the same I/O. 1783e14bb325SJeff Bonwick */ 1784b24ab676SJeff Bonwick static zio_pipe_stage_t *zio_pipeline[]; 17850a4e9518Sgw 1786e14bb325SJeff Bonwick void 1787e14bb325SJeff Bonwick zio_execute(zio_t *zio) 1788e14bb325SJeff Bonwick { 1789e14bb325SJeff Bonwick zio->io_executor = curthread; 17900a4e9518Sgw 17910f7643c7SGeorge Wilson ASSERT3U(zio->io_queued_timestamp, >, 0); 17920f7643c7SGeorge Wilson 1793e14bb325SJeff Bonwick while (zio->io_stage < ZIO_STAGE_DONE) { 1794b24ab676SJeff Bonwick enum zio_stage pipeline = zio->io_pipeline; 1795b24ab676SJeff Bonwick enum zio_stage stage = zio->io_stage; 1796e14bb325SJeff Bonwick int rv; 17970a4e9518Sgw 1798e14bb325SJeff Bonwick ASSERT(!MUTEX_HELD(&zio->io_lock)); 1799b24ab676SJeff Bonwick ASSERT(ISP2(stage)); 1800b24ab676SJeff Bonwick ASSERT(zio->io_stall == NULL); 18010a4e9518Sgw 1802b24ab676SJeff Bonwick do { 1803b24ab676SJeff Bonwick stage <<= 1; 1804b24ab676SJeff Bonwick } while ((stage & pipeline) == 0); 1805e14bb325SJeff Bonwick 1806e14bb325SJeff Bonwick ASSERT(stage <= ZIO_STAGE_DONE); 18070a4e9518Sgw 18080a4e9518Sgw /* 1809e14bb325SJeff Bonwick * If we are in interrupt context and this pipeline stage 1810e14bb325SJeff Bonwick * will grab a config lock that is held across I/O, 1811b24ab676SJeff Bonwick * or may wait for an I/O that needs an interrupt thread 1812b24ab676SJeff Bonwick * to complete, issue async to avoid deadlock. 181335a5a358SJonathan Adams * 181435a5a358SJonathan Adams * For VDEV_IO_START, we cut in line so that the io will 181535a5a358SJonathan Adams * be sent to disk promptly. 18160a4e9518Sgw */ 1817b24ab676SJeff Bonwick if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1818e14bb325SJeff Bonwick zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 181935a5a358SJonathan Adams boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 182035a5a358SJonathan Adams zio_requeue_io_start_cut_in_line : B_FALSE; 182135a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1822e14bb325SJeff Bonwick return; 18230a4e9518Sgw } 18240a4e9518Sgw 1825e14bb325SJeff Bonwick zio->io_stage = stage; 18260f7643c7SGeorge Wilson zio->io_pipeline_trace |= zio->io_stage; 1827bf16b11eSMatthew Ahrens rv = zio_pipeline[highbit64(stage) - 1](zio); 18280a4e9518Sgw 1829e14bb325SJeff Bonwick if (rv == ZIO_PIPELINE_STOP) 1830e14bb325SJeff Bonwick return; 18310a4e9518Sgw 1832e14bb325SJeff Bonwick ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1833e14bb325SJeff Bonwick } 18340a4e9518Sgw } 18350a4e9518Sgw 1836e14bb325SJeff Bonwick /* 1837e14bb325SJeff Bonwick * ========================================================================== 1838e14bb325SJeff Bonwick * Initiate I/O, either sync or async 1839e14bb325SJeff Bonwick * ========================================================================== 1840e14bb325SJeff Bonwick */ 1841e14bb325SJeff Bonwick int 1842e14bb325SJeff Bonwick zio_wait(zio_t *zio) 18430a4e9518Sgw { 1844e14bb325SJeff Bonwick int error; 18450a4e9518Sgw 18461271e4b1SPrakash Surya ASSERT3P(zio->io_stage, ==, ZIO_STAGE_OPEN); 18471271e4b1SPrakash Surya ASSERT3P(zio->io_executor, ==, NULL); 18480a4e9518Sgw 1849e14bb325SJeff Bonwick zio->io_waiter = curthread; 18500f7643c7SGeorge Wilson ASSERT0(zio->io_queued_timestamp); 18510f7643c7SGeorge Wilson zio->io_queued_timestamp = gethrtime(); 1852e05725b1Sbonwick 1853e14bb325SJeff Bonwick zio_execute(zio); 18540a4e9518Sgw 1855e14bb325SJeff Bonwick mutex_enter(&zio->io_lock); 1856e14bb325SJeff Bonwick while (zio->io_executor != NULL) 1857e14bb325SJeff Bonwick cv_wait(&zio->io_cv, &zio->io_lock); 1858e14bb325SJeff Bonwick mutex_exit(&zio->io_lock); 185932b87932Sek 1860e14bb325SJeff Bonwick error = zio->io_error; 1861e14bb325SJeff Bonwick zio_destroy(zio); 186232b87932Sek 1863e14bb325SJeff Bonwick return (error); 186432b87932Sek } 186532b87932Sek 1866e14bb325SJeff Bonwick void 1867e14bb325SJeff Bonwick zio_nowait(zio_t *zio) 18680a4e9518Sgw { 18691271e4b1SPrakash Surya ASSERT3P(zio->io_executor, ==, NULL); 1870fa9e4066Sahrens 1871a3f829aeSBill Moore if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1872a3f829aeSBill Moore zio_unique_parent(zio) == NULL) { 1873ea8dc4b6Seschrock /* 1874e14bb325SJeff Bonwick * This is a logical async I/O with no parent to wait for it. 187554d692b7SGeorge Wilson * We add it to the spa_async_root_zio "Godfather" I/O which 187654d692b7SGeorge Wilson * will ensure they complete prior to unloading the pool. 1877ea8dc4b6Seschrock */ 1878e14bb325SJeff Bonwick spa_t *spa = zio->io_spa; 187954d692b7SGeorge Wilson 18806f834bc1SMatthew Ahrens zio_add_child(spa->spa_async_zio_root[CPU_SEQID], zio); 1881e14bb325SJeff Bonwick } 1882ea8dc4b6Seschrock 18830f7643c7SGeorge Wilson ASSERT0(zio->io_queued_timestamp); 18840f7643c7SGeorge Wilson zio->io_queued_timestamp = gethrtime(); 1885e14bb325SJeff Bonwick zio_execute(zio); 1886e14bb325SJeff Bonwick } 1887ea8dc4b6Seschrock 1888e14bb325SJeff Bonwick /* 1889e14bb325SJeff Bonwick * ========================================================================== 18901271e4b1SPrakash Surya * Reexecute, cancel, or suspend/resume failed I/O 1891e14bb325SJeff Bonwick * ========================================================================== 1892e14bb325SJeff Bonwick */ 1893fa9e4066Sahrens 1894e14bb325SJeff Bonwick static void 1895e14bb325SJeff Bonwick zio_reexecute(zio_t *pio) 1896e14bb325SJeff Bonwick { 1897a3f829aeSBill Moore zio_t *cio, *cio_next; 1898a3f829aeSBill Moore 1899a3f829aeSBill Moore ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1900a3f829aeSBill Moore ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1901f5383399SBill Moore ASSERT(pio->io_gang_leader == NULL); 1902f5383399SBill Moore ASSERT(pio->io_gang_tree == NULL); 1903e05725b1Sbonwick 1904e14bb325SJeff Bonwick pio->io_flags = pio->io_orig_flags; 1905e14bb325SJeff Bonwick pio->io_stage = pio->io_orig_stage; 1906e14bb325SJeff Bonwick pio->io_pipeline = pio->io_orig_pipeline; 1907e14bb325SJeff Bonwick pio->io_reexecute = 0; 190880901aeaSGeorge Wilson pio->io_flags |= ZIO_FLAG_REEXECUTED; 19090f7643c7SGeorge Wilson pio->io_pipeline_trace = 0; 1910e14bb325SJeff Bonwick pio->io_error = 0; 1911a3f829aeSBill Moore for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1912a3f829aeSBill Moore pio->io_state[w] = 0; 1913e14bb325SJeff Bonwick for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1914e14bb325SJeff Bonwick pio->io_child_error[c] = 0; 19150a4e9518Sgw 1916b24ab676SJeff Bonwick if (IO_IS_ALLOCATING(pio)) 1917b24ab676SJeff Bonwick BP_ZERO(pio->io_bp); 1918d58459f4Sek 1919e14bb325SJeff Bonwick /* 1920e14bb325SJeff Bonwick * As we reexecute pio's children, new children could be created. 1921a3f829aeSBill Moore * New children go to the head of pio's io_child_list, however, 1922e14bb325SJeff Bonwick * so we will (correctly) not reexecute them. The key is that 1923a3f829aeSBill Moore * the remainder of pio's io_child_list, from 'cio_next' onward, 1924a3f829aeSBill Moore * cannot be affected by any side effects of reexecuting 'cio'. 1925e14bb325SJeff Bonwick */ 19260f7643c7SGeorge Wilson zio_link_t *zl = NULL; 1927a3874b8bSToomas Soome mutex_enter(&pio->io_lock); 19280f7643c7SGeorge Wilson for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 19290f7643c7SGeorge Wilson cio_next = zio_walk_children(pio, &zl); 1930a3f829aeSBill Moore for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1931a3f829aeSBill Moore pio->io_children[cio->io_child_type][w]++; 1932e14bb325SJeff Bonwick mutex_exit(&pio->io_lock); 1933a3f829aeSBill Moore zio_reexecute(cio); 1934a3874b8bSToomas Soome mutex_enter(&pio->io_lock); 1935fa9e4066Sahrens } 1936a3874b8bSToomas Soome mutex_exit(&pio->io_lock); 1937e05725b1Sbonwick 1938e14bb325SJeff Bonwick /* 1939e14bb325SJeff Bonwick * Now that all children have been reexecuted, execute the parent. 194054d692b7SGeorge Wilson * We don't reexecute "The Godfather" I/O here as it's the 194148bbca81SDaniel Hoffman * responsibility of the caller to wait on it. 1942e14bb325SJeff Bonwick */ 19430f7643c7SGeorge Wilson if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) { 19440f7643c7SGeorge Wilson pio->io_queued_timestamp = gethrtime(); 194554d692b7SGeorge Wilson zio_execute(pio); 19460f7643c7SGeorge Wilson } 19470a4e9518Sgw } 19480a4e9518Sgw 1949e14bb325SJeff Bonwick void 1950e0f1c0afSOlaf Faaland zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason) 19510a4e9518Sgw { 1952e14bb325SJeff Bonwick if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1953e14bb325SJeff Bonwick fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1954e14bb325SJeff Bonwick "failure and the failure mode property for this pool " 1955e14bb325SJeff Bonwick "is set to panic.", spa_name(spa)); 19560a4e9518Sgw 1957*eb633035STom Caputi zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, 1958*eb633035STom Caputi NULL, NULL, 0, 0); 19590a4e9518Sgw 1960e14bb325SJeff Bonwick mutex_enter(&spa->spa_suspend_lock); 1961fa9e4066Sahrens 1962e14bb325SJeff Bonwick if (spa->spa_suspend_zio_root == NULL) 196354d692b7SGeorge Wilson spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 196454d692b7SGeorge Wilson ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 196554d692b7SGeorge Wilson ZIO_FLAG_GODFATHER); 1966fa9e4066Sahrens 1967e0f1c0afSOlaf Faaland spa->spa_suspended = reason; 1968fa9e4066Sahrens 1969e14bb325SJeff Bonwick if (zio != NULL) { 197054d692b7SGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1971e14bb325SJeff Bonwick ASSERT(zio != spa->spa_suspend_zio_root); 1972e14bb325SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1973a3f829aeSBill Moore ASSERT(zio_unique_parent(zio) == NULL); 1974e14bb325SJeff Bonwick ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1975e14bb325SJeff Bonwick zio_add_child(spa->spa_suspend_zio_root, zio); 1976e14bb325SJeff Bonwick } 1977fa9e4066Sahrens 1978e14bb325SJeff Bonwick mutex_exit(&spa->spa_suspend_lock); 1979e14bb325SJeff Bonwick } 1980fa9e4066Sahrens 198154d692b7SGeorge Wilson int 1982e14bb325SJeff Bonwick zio_resume(spa_t *spa) 1983e14bb325SJeff Bonwick { 198454d692b7SGeorge Wilson zio_t *pio; 1985fa9e4066Sahrens 1986b3995adbSahrens /* 1987e14bb325SJeff Bonwick * Reexecute all previously suspended i/o. 1988b3995adbSahrens */ 1989e14bb325SJeff Bonwick mutex_enter(&spa->spa_suspend_lock); 1990e0f1c0afSOlaf Faaland spa->spa_suspended = ZIO_SUSPEND_NONE; 1991e14bb325SJeff Bonwick cv_broadcast(&spa->spa_suspend_cv); 1992e14bb325SJeff Bonwick pio = spa->spa_suspend_zio_root; 1993e14bb325SJeff Bonwick spa->spa_suspend_zio_root = NULL; 1994e14bb325SJeff Bonwick mutex_exit(&spa->spa_suspend_lock); 1995e14bb325SJeff Bonwick 1996e14bb325SJeff Bonwick if (pio == NULL) 199754d692b7SGeorge Wilson return (0); 1998e14bb325SJeff Bonwick 199954d692b7SGeorge Wilson zio_reexecute(pio); 200054d692b7SGeorge Wilson return (zio_wait(pio)); 2001e14bb325SJeff Bonwick } 2002e14bb325SJeff Bonwick 2003e14bb325SJeff Bonwick void 2004e14bb325SJeff Bonwick zio_resume_wait(spa_t *spa) 2005e14bb325SJeff Bonwick { 2006e14bb325SJeff Bonwick mutex_enter(&spa->spa_suspend_lock); 2007e14bb325SJeff Bonwick while (spa_suspended(spa)) 2008e14bb325SJeff Bonwick cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 2009e14bb325SJeff Bonwick mutex_exit(&spa->spa_suspend_lock); 2010fa9e4066Sahrens } 2011fa9e4066Sahrens 2012fa9e4066Sahrens /* 2013fa9e4066Sahrens * ========================================================================== 2014e14bb325SJeff Bonwick * Gang blocks. 2015e14bb325SJeff Bonwick * 2016e14bb325SJeff Bonwick * A gang block is a collection of small blocks that looks to the DMU 2017e14bb325SJeff Bonwick * like one large block. When zio_dva_allocate() cannot find a block 2018e14bb325SJeff Bonwick * of the requested size, due to either severe fragmentation or the pool 2019e14bb325SJeff Bonwick * being nearly full, it calls zio_write_gang_block() to construct the 2020e14bb325SJeff Bonwick * block from smaller fragments. 2021e14bb325SJeff Bonwick * 2022e14bb325SJeff Bonwick * A gang block consists of a gang header (zio_gbh_phys_t) and up to 2023e14bb325SJeff Bonwick * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 2024e14bb325SJeff Bonwick * an indirect block: it's an array of block pointers. It consumes 2025e14bb325SJeff Bonwick * only one sector and hence is allocatable regardless of fragmentation. 2026e14bb325SJeff Bonwick * The gang header's bps point to its gang members, which hold the data. 2027e14bb325SJeff Bonwick * 2028e14bb325SJeff Bonwick * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 2029e14bb325SJeff Bonwick * as the verifier to ensure uniqueness of the SHA256 checksum. 2030e14bb325SJeff Bonwick * Critically, the gang block bp's blk_cksum is the checksum of the data, 2031e14bb325SJeff Bonwick * not the gang header. This ensures that data block signatures (needed for 2032e14bb325SJeff Bonwick * deduplication) are independent of how the block is physically stored. 2033e14bb325SJeff Bonwick * 2034e14bb325SJeff Bonwick * Gang blocks can be nested: a gang member may itself be a gang block. 2035e14bb325SJeff Bonwick * Thus every gang block is a tree in which root and all interior nodes are 2036e14bb325SJeff Bonwick * gang headers, and the leaves are normal blocks that contain user data. 2037e14bb325SJeff Bonwick * The root of the gang tree is called the gang leader. 2038e14bb325SJeff Bonwick * 2039e14bb325SJeff Bonwick * To perform any operation (read, rewrite, free, claim) on a gang block, 2040e14bb325SJeff Bonwick * zio_gang_assemble() first assembles the gang tree (minus data leaves) 2041e14bb325SJeff Bonwick * in the io_gang_tree field of the original logical i/o by recursively 2042e14bb325SJeff Bonwick * reading the gang leader and all gang headers below it. This yields 2043e14bb325SJeff Bonwick * an in-core tree containing the contents of every gang header and the 2044e14bb325SJeff Bonwick * bps for every constituent of the gang block. 2045e14bb325SJeff Bonwick * 2046e14bb325SJeff Bonwick * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 2047e14bb325SJeff Bonwick * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 2048e14bb325SJeff Bonwick * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 2049e14bb325SJeff Bonwick * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 2050e14bb325SJeff Bonwick * zio_read_gang() is a wrapper around zio_read() that omits reading gang 2051e14bb325SJeff Bonwick * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 2052e14bb325SJeff Bonwick * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 2053e14bb325SJeff Bonwick * of the gang header plus zio_checksum_compute() of the data to update the 2054e14bb325SJeff Bonwick * gang header's blk_cksum as described above. 2055e14bb325SJeff Bonwick * 2056e14bb325SJeff Bonwick * The two-phase assemble/issue model solves the problem of partial failure -- 2057e14bb325SJeff Bonwick * what if you'd freed part of a gang block but then couldn't read the 2058e14bb325SJeff Bonwick * gang header for another part? Assembling the entire gang tree first 2059e14bb325SJeff Bonwick * ensures that all the necessary gang header I/O has succeeded before 2060e14bb325SJeff Bonwick * starting the actual work of free, claim, or write. Once the gang tree 2061e14bb325SJeff Bonwick * is assembled, free and claim are in-memory operations that cannot fail. 2062e14bb325SJeff Bonwick * 2063e14bb325SJeff Bonwick * In the event that a gang write fails, zio_dva_unallocate() walks the 2064e14bb325SJeff Bonwick * gang tree to immediately free (i.e. insert back into the space map) 2065e14bb325SJeff Bonwick * everything we've allocated. This ensures that we don't get ENOSPC 2066e14bb325SJeff Bonwick * errors during repeated suspend/resume cycles due to a flaky device. 2067e14bb325SJeff Bonwick * 2068e14bb325SJeff Bonwick * Gang rewrites only happen during sync-to-convergence. If we can't assemble 2069e14bb325SJeff Bonwick * the gang tree, we won't modify the block, so we can safely defer the free 2070e14bb325SJeff Bonwick * (knowing that the block is still intact). If we *can* assemble the gang 2071e14bb325SJeff Bonwick * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 2072e14bb325SJeff Bonwick * each constituent bp and we can allocate a new block on the next sync pass. 2073e14bb325SJeff Bonwick * 2074e14bb325SJeff Bonwick * In all cases, the gang tree allows complete recovery from partial failure. 2075fa9e4066Sahrens * ========================================================================== 2076fa9e4066Sahrens */ 2077e14bb325SJeff Bonwick 2078770499e1SDan Kimmel static void 2079770499e1SDan Kimmel zio_gang_issue_func_done(zio_t *zio) 2080770499e1SDan Kimmel { 2081770499e1SDan Kimmel abd_put(zio->io_abd); 2082770499e1SDan Kimmel } 2083770499e1SDan Kimmel 2084e14bb325SJeff Bonwick static zio_t * 2085770499e1SDan Kimmel zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2086770499e1SDan Kimmel uint64_t offset) 2087fa9e4066Sahrens { 2088e14bb325SJeff Bonwick if (gn != NULL) 2089e14bb325SJeff Bonwick return (pio); 2090fa9e4066Sahrens 2091770499e1SDan Kimmel return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset), 2092770499e1SDan Kimmel BP_GET_PSIZE(bp), zio_gang_issue_func_done, 2093770499e1SDan Kimmel NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2094e14bb325SJeff Bonwick &pio->io_bookmark)); 2095e14bb325SJeff Bonwick } 2096e14bb325SJeff Bonwick 2097770499e1SDan Kimmel static zio_t * 2098770499e1SDan Kimmel zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2099770499e1SDan Kimmel uint64_t offset) 2100e14bb325SJeff Bonwick { 2101e14bb325SJeff Bonwick zio_t *zio; 2102e14bb325SJeff Bonwick 2103e14bb325SJeff Bonwick if (gn != NULL) { 2104770499e1SDan Kimmel abd_t *gbh_abd = 2105770499e1SDan Kimmel abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2106e14bb325SJeff Bonwick zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2107770499e1SDan Kimmel gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL, 2108770499e1SDan Kimmel pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 2109770499e1SDan Kimmel &pio->io_bookmark); 2110fa9e4066Sahrens /* 2111e14bb325SJeff Bonwick * As we rewrite each gang header, the pipeline will compute 2112e14bb325SJeff Bonwick * a new gang block header checksum for it; but no one will 2113e14bb325SJeff Bonwick * compute a new data checksum, so we do that here. The one 2114e14bb325SJeff Bonwick * exception is the gang leader: the pipeline already computed 2115e14bb325SJeff Bonwick * its data checksum because that stage precedes gang assembly. 2116e14bb325SJeff Bonwick * (Presently, nothing actually uses interior data checksums; 2117e14bb325SJeff Bonwick * this is just good hygiene.) 2118fa9e4066Sahrens */ 2119f5383399SBill Moore if (gn != pio->io_gang_leader->io_gang_tree) { 2120770499e1SDan Kimmel abd_t *buf = abd_get_offset(data, offset); 2121770499e1SDan Kimmel 2122e14bb325SJeff Bonwick zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 2123770499e1SDan Kimmel buf, BP_GET_PSIZE(bp)); 2124770499e1SDan Kimmel 2125770499e1SDan Kimmel abd_put(buf); 2126e14bb325SJeff Bonwick } 2127b24ab676SJeff Bonwick /* 2128b24ab676SJeff Bonwick * If we are here to damage data for testing purposes, 2129b24ab676SJeff Bonwick * leave the GBH alone so that we can detect the damage. 2130b24ab676SJeff Bonwick */ 2131b24ab676SJeff Bonwick if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 2132b24ab676SJeff Bonwick zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 2133fa9e4066Sahrens } else { 2134e14bb325SJeff Bonwick zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 2135770499e1SDan Kimmel abd_get_offset(data, offset), BP_GET_PSIZE(bp), 2136770499e1SDan Kimmel zio_gang_issue_func_done, NULL, pio->io_priority, 2137e14bb325SJeff Bonwick ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2138fa9e4066Sahrens } 2139fa9e4066Sahrens 2140e14bb325SJeff Bonwick return (zio); 2141e14bb325SJeff Bonwick } 2142fa9e4066Sahrens 2143e14bb325SJeff Bonwick /* ARGSUSED */ 2144770499e1SDan Kimmel static zio_t * 2145770499e1SDan Kimmel zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2146770499e1SDan Kimmel uint64_t offset) 2147e14bb325SJeff Bonwick { 2148b24ab676SJeff Bonwick return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 2149b24ab676SJeff Bonwick ZIO_GANG_CHILD_FLAGS(pio))); 2150fa9e4066Sahrens } 2151fa9e4066Sahrens 2152e14bb325SJeff Bonwick /* ARGSUSED */ 2153770499e1SDan Kimmel static zio_t * 2154770499e1SDan Kimmel zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 2155770499e1SDan Kimmel uint64_t offset) 2156fa9e4066Sahrens { 2157e14bb325SJeff Bonwick return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 2158e14bb325SJeff Bonwick NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 2159e14bb325SJeff Bonwick } 2160fa9e4066Sahrens 2161e14bb325SJeff Bonwick static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 2162e14bb325SJeff Bonwick NULL, 2163e14bb325SJeff Bonwick zio_read_gang, 2164e14bb325SJeff Bonwick zio_rewrite_gang, 2165e14bb325SJeff Bonwick zio_free_gang, 2166e14bb325SJeff Bonwick zio_claim_gang, 2167e14bb325SJeff Bonwick NULL 2168e14bb325SJeff Bonwick }; 2169fa9e4066Sahrens 2170e14bb325SJeff Bonwick static void zio_gang_tree_assemble_done(zio_t *zio); 2171fa9e4066Sahrens 2172e14bb325SJeff Bonwick static zio_gang_node_t * 2173e14bb325SJeff Bonwick zio_gang_node_alloc(zio_gang_node_t **gnpp) 2174e14bb325SJeff Bonwick { 2175e14bb325SJeff Bonwick zio_gang_node_t *gn; 2176fa9e4066Sahrens 2177e14bb325SJeff Bonwick ASSERT(*gnpp == NULL); 2178fa9e4066Sahrens 2179e14bb325SJeff Bonwick gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 2180e14bb325SJeff Bonwick gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 2181e14bb325SJeff Bonwick *gnpp = gn; 2182e14bb325SJeff Bonwick 2183e14bb325SJeff Bonwick return (gn); 2184fa9e4066Sahrens } 2185fa9e4066Sahrens 2186fa9e4066Sahrens static void 2187e14bb325SJeff Bonwick zio_gang_node_free(zio_gang_node_t **gnpp) 2188fa9e4066Sahrens { 2189e14bb325SJeff Bonwick zio_gang_node_t *gn = *gnpp; 2190fa9e4066Sahrens 2191e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2192e14bb325SJeff Bonwick ASSERT(gn->gn_child[g] == NULL); 2193e14bb325SJeff Bonwick 2194e14bb325SJeff Bonwick zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2195e14bb325SJeff Bonwick kmem_free(gn, sizeof (*gn)); 2196e14bb325SJeff Bonwick *gnpp = NULL; 2197fa9e4066Sahrens } 2198fa9e4066Sahrens 2199e14bb325SJeff Bonwick static void 2200e14bb325SJeff Bonwick zio_gang_tree_free(zio_gang_node_t **gnpp) 2201fa9e4066Sahrens { 2202e14bb325SJeff Bonwick zio_gang_node_t *gn = *gnpp; 2203fa9e4066Sahrens 2204e14bb325SJeff Bonwick if (gn == NULL) 2205e14bb325SJeff Bonwick return; 2206fa9e4066Sahrens 2207e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 2208e14bb325SJeff Bonwick zio_gang_tree_free(&gn->gn_child[g]); 2209fa9e4066Sahrens 2210e14bb325SJeff Bonwick zio_gang_node_free(gnpp); 2211fa9e4066Sahrens } 2212fa9e4066Sahrens 2213e14bb325SJeff Bonwick static void 2214f5383399SBill Moore zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 2215fa9e4066Sahrens { 2216e14bb325SJeff Bonwick zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 2217770499e1SDan Kimmel abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2218e14bb325SJeff Bonwick 2219f5383399SBill Moore ASSERT(gio->io_gang_leader == gio); 2220e14bb325SJeff Bonwick ASSERT(BP_IS_GANG(bp)); 2221fa9e4066Sahrens 2222770499e1SDan Kimmel zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2223770499e1SDan Kimmel zio_gang_tree_assemble_done, gn, gio->io_priority, 2224770499e1SDan Kimmel ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 2225e14bb325SJeff Bonwick } 2226fa9e4066Sahrens 2227e14bb325SJeff Bonwick static void 2228e14bb325SJeff Bonwick zio_gang_tree_assemble_done(zio_t *zio) 2229e14bb325SJeff Bonwick { 2230f5383399SBill Moore zio_t *gio = zio->io_gang_leader; 2231e14bb325SJeff Bonwick zio_gang_node_t *gn = zio->io_private; 2232e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 2233fa9e4066Sahrens 2234f5383399SBill Moore ASSERT(gio == zio_unique_parent(zio)); 2235b24ab676SJeff Bonwick ASSERT(zio->io_child_count == 0); 2236fa9e4066Sahrens 2237e14bb325SJeff Bonwick if (zio->io_error) 2238e14bb325SJeff Bonwick return; 2239fa9e4066Sahrens 2240770499e1SDan Kimmel /* this ABD was created from a linear buf in zio_gang_tree_assemble */ 2241e14bb325SJeff Bonwick if (BP_SHOULD_BYTESWAP(bp)) 2242770499e1SDan Kimmel byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size); 2243fa9e4066Sahrens 2244770499e1SDan Kimmel ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh); 2245e14bb325SJeff Bonwick ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 22466e1f5caaSNeil Perrin ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2247e05725b1Sbonwick 2248770499e1SDan Kimmel abd_put(zio->io_abd); 2249770499e1SDan Kimmel 2250e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2251e14bb325SJeff Bonwick blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2252e14bb325SJeff Bonwick if (!BP_IS_GANG(gbp)) 2253e14bb325SJeff Bonwick continue; 2254f5383399SBill Moore zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 2255e14bb325SJeff Bonwick } 2256fa9e4066Sahrens } 2257fa9e4066Sahrens 2258e14bb325SJeff Bonwick static void 2259770499e1SDan Kimmel zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data, 2260770499e1SDan Kimmel uint64_t offset) 2261fa9e4066Sahrens { 2262f5383399SBill Moore zio_t *gio = pio->io_gang_leader; 2263e14bb325SJeff Bonwick zio_t *zio; 2264fa9e4066Sahrens 2265e14bb325SJeff Bonwick ASSERT(BP_IS_GANG(bp) == !!gn); 2266f5383399SBill Moore ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 2267f5383399SBill Moore ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 2268fa9e4066Sahrens 2269e14bb325SJeff Bonwick /* 2270e14bb325SJeff Bonwick * If you're a gang header, your data is in gn->gn_gbh. 2271e14bb325SJeff Bonwick * If you're a gang member, your data is in 'data' and gn == NULL. 2272e14bb325SJeff Bonwick */ 2273770499e1SDan Kimmel zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset); 2274fa9e4066Sahrens 2275e14bb325SJeff Bonwick if (gn != NULL) { 22766e1f5caaSNeil Perrin ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2277fa9e4066Sahrens 2278e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2279e14bb325SJeff Bonwick blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2280e14bb325SJeff Bonwick if (BP_IS_HOLE(gbp)) 2281e14bb325SJeff Bonwick continue; 2282770499e1SDan Kimmel zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data, 2283770499e1SDan Kimmel offset); 2284770499e1SDan Kimmel offset += BP_GET_PSIZE(gbp); 2285e14bb325SJeff Bonwick } 2286fa9e4066Sahrens } 2287fa9e4066Sahrens 2288f5383399SBill Moore if (gn == gio->io_gang_tree) 2289770499e1SDan Kimmel ASSERT3U(gio->io_size, ==, offset); 2290e05725b1Sbonwick 2291e14bb325SJeff Bonwick if (zio != pio) 2292e14bb325SJeff Bonwick zio_nowait(zio); 2293fa9e4066Sahrens } 2294fa9e4066Sahrens 2295e05725b1Sbonwick static int 2296e14bb325SJeff Bonwick zio_gang_assemble(zio_t *zio) 2297fa9e4066Sahrens { 2298e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 2299fa9e4066Sahrens 2300f5383399SBill Moore ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 2301f5383399SBill Moore ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2302f5383399SBill Moore 2303f5383399SBill Moore zio->io_gang_leader = zio; 2304fa9e4066Sahrens 2305e14bb325SJeff Bonwick zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 2306e05725b1Sbonwick 2307e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 2308fa9e4066Sahrens } 2309fa9e4066Sahrens 2310e05725b1Sbonwick static int 2311e14bb325SJeff Bonwick zio_gang_issue(zio_t *zio) 2312fa9e4066Sahrens { 2313e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 2314fa9e4066Sahrens 2315d6e1c446SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) { 2316e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 2317d6e1c446SGeorge Wilson } 2318fa9e4066Sahrens 2319f5383399SBill Moore ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 2320f5383399SBill Moore ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2321fa9e4066Sahrens 2322e14bb325SJeff Bonwick if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 2323770499e1SDan Kimmel zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd, 2324770499e1SDan Kimmel 0); 2325e14bb325SJeff Bonwick else 2326f5383399SBill Moore zio_gang_tree_free(&zio->io_gang_tree); 2327fa9e4066Sahrens 2328e14bb325SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2329e05725b1Sbonwick 2330e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 2331fa9e4066Sahrens } 2332fa9e4066Sahrens 2333fa9e4066Sahrens static void 2334e14bb325SJeff Bonwick zio_write_gang_member_ready(zio_t *zio) 2335fa9e4066Sahrens { 2336a3f829aeSBill Moore zio_t *pio = zio_unique_parent(zio); 2337f5383399SBill Moore zio_t *gio = zio->io_gang_leader; 233844cd46caSbillm dva_t *cdva = zio->io_bp->blk_dva; 233944cd46caSbillm dva_t *pdva = pio->io_bp->blk_dva; 2340fa9e4066Sahrens uint64_t asize; 2341fa9e4066Sahrens 2342e14bb325SJeff Bonwick if (BP_IS_HOLE(zio->io_bp)) 2343e14bb325SJeff Bonwick return; 2344e14bb325SJeff Bonwick 2345e14bb325SJeff Bonwick ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 2346e14bb325SJeff Bonwick 2347e14bb325SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 2348b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 2349b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 2350b24ab676SJeff Bonwick ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 235144cd46caSbillm ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 2352fa9e4066Sahrens 2353fa9e4066Sahrens mutex_enter(&pio->io_lock); 2354e14bb325SJeff Bonwick for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 235544cd46caSbillm ASSERT(DVA_GET_GANG(&pdva[d])); 235644cd46caSbillm asize = DVA_GET_ASIZE(&pdva[d]); 235744cd46caSbillm asize += DVA_GET_ASIZE(&cdva[d]); 235844cd46caSbillm DVA_SET_ASIZE(&pdva[d], asize); 235944cd46caSbillm } 2360fa9e4066Sahrens mutex_exit(&pio->io_lock); 2361fa9e4066Sahrens } 2362fa9e4066Sahrens 2363770499e1SDan Kimmel static void 2364770499e1SDan Kimmel zio_write_gang_done(zio_t *zio) 2365770499e1SDan Kimmel { 23667341a7deSBrad Lewis /* 23677341a7deSBrad Lewis * The io_abd field will be NULL for a zio with no data. The io_flags 23687341a7deSBrad Lewis * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't 23697341a7deSBrad Lewis * check for it here as it is cleared in zio_ready. 23707341a7deSBrad Lewis */ 23717341a7deSBrad Lewis if (zio->io_abd != NULL) 23727341a7deSBrad Lewis abd_put(zio->io_abd); 2373770499e1SDan Kimmel } 2374770499e1SDan Kimmel 23750a4e9518Sgw static int 2376e14bb325SJeff Bonwick zio_write_gang_block(zio_t *pio) 2377fa9e4066Sahrens { 2378e14bb325SJeff Bonwick spa_t *spa = pio->io_spa; 23790f7643c7SGeorge Wilson metaslab_class_t *mc = spa_normal_class(spa); 2380e14bb325SJeff Bonwick blkptr_t *bp = pio->io_bp; 2381f5383399SBill Moore zio_t *gio = pio->io_gang_leader; 2382e14bb325SJeff Bonwick zio_t *zio; 2383e14bb325SJeff Bonwick zio_gang_node_t *gn, **gnpp; 2384fa9e4066Sahrens zio_gbh_phys_t *gbh; 2385770499e1SDan Kimmel abd_t *gbh_abd; 2386e14bb325SJeff Bonwick uint64_t txg = pio->io_txg; 2387e14bb325SJeff Bonwick uint64_t resid = pio->io_size; 2388e14bb325SJeff Bonwick uint64_t lsize; 2389b24ab676SJeff Bonwick int copies = gio->io_prop.zp_copies; 2390b24ab676SJeff Bonwick int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 2391e14bb325SJeff Bonwick zio_prop_t zp; 2392fa9e4066Sahrens int error; 23937341a7deSBrad Lewis boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA); 2394fa9e4066Sahrens 2395*eb633035STom Caputi /* 2396*eb633035STom Caputi * encrypted blocks need DVA[2] free so encrypted gang headers can't 2397*eb633035STom Caputi * have a third copy. 2398*eb633035STom Caputi */ 2399*eb633035STom Caputi if (gio->io_prop.zp_encrypt && gbh_copies >= SPA_DVAS_PER_BP) 2400*eb633035STom Caputi gbh_copies = SPA_DVAS_PER_BP - 1; 2401*eb633035STom Caputi 24020f7643c7SGeorge Wilson int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER; 24030f7643c7SGeorge Wilson if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 24040f7643c7SGeorge Wilson ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 24057341a7deSBrad Lewis ASSERT(has_data); 24060f7643c7SGeorge Wilson 24070f7643c7SGeorge Wilson flags |= METASLAB_ASYNC_ALLOC; 2408e914ace2STim Schumacher VERIFY(zfs_refcount_held(&mc->mc_alloc_slots[pio->io_allocator], 2409f78cdc34SPaul Dagnelie pio)); 24100f7643c7SGeorge Wilson 24110f7643c7SGeorge Wilson /* 24120f7643c7SGeorge Wilson * The logical zio has already placed a reservation for 24130f7643c7SGeorge Wilson * 'copies' allocation slots but gang blocks may require 24140f7643c7SGeorge Wilson * additional copies. These additional copies 24150f7643c7SGeorge Wilson * (i.e. gbh_copies - copies) are guaranteed to succeed 24160f7643c7SGeorge Wilson * since metaslab_class_throttle_reserve() always allows 24170f7643c7SGeorge Wilson * additional reservations for gang blocks. 24180f7643c7SGeorge Wilson */ 24190f7643c7SGeorge Wilson VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies, 2420f78cdc34SPaul Dagnelie pio->io_allocator, pio, flags)); 24210f7643c7SGeorge Wilson } 24220f7643c7SGeorge Wilson 24230f7643c7SGeorge Wilson error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, 24248363e80aSGeorge Wilson bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, 2425f78cdc34SPaul Dagnelie &pio->io_alloc_list, pio, pio->io_allocator); 2426e05725b1Sbonwick if (error) { 24270f7643c7SGeorge Wilson if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 24280f7643c7SGeorge Wilson ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 24297341a7deSBrad Lewis ASSERT(has_data); 24300f7643c7SGeorge Wilson 24310f7643c7SGeorge Wilson /* 24320f7643c7SGeorge Wilson * If we failed to allocate the gang block header then 24330f7643c7SGeorge Wilson * we remove any additional allocation reservations that 24340f7643c7SGeorge Wilson * we placed here. The original reservation will 24350f7643c7SGeorge Wilson * be removed when the logical I/O goes to the ready 24360f7643c7SGeorge Wilson * stage. 24370f7643c7SGeorge Wilson */ 24380f7643c7SGeorge Wilson metaslab_class_throttle_unreserve(mc, 2439f78cdc34SPaul Dagnelie gbh_copies - copies, pio->io_allocator, pio); 24400f7643c7SGeorge Wilson } 2441e14bb325SJeff Bonwick pio->io_error = error; 2442e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 2443e05725b1Sbonwick } 2444fa9e4066Sahrens 2445f5383399SBill Moore if (pio == gio) { 2446f5383399SBill Moore gnpp = &gio->io_gang_tree; 2447e14bb325SJeff Bonwick } else { 2448e14bb325SJeff Bonwick gnpp = pio->io_private; 2449e14bb325SJeff Bonwick ASSERT(pio->io_ready == zio_write_gang_member_ready); 2450fa9e4066Sahrens } 2451fa9e4066Sahrens 2452e14bb325SJeff Bonwick gn = zio_gang_node_alloc(gnpp); 2453e14bb325SJeff Bonwick gbh = gn->gn_gbh; 2454e14bb325SJeff Bonwick bzero(gbh, SPA_GANGBLOCKSIZE); 2455770499e1SDan Kimmel gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE); 2456fa9e4066Sahrens 2457e14bb325SJeff Bonwick /* 2458e14bb325SJeff Bonwick * Create the gang header. 2459e14bb325SJeff Bonwick */ 2460770499e1SDan Kimmel zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2461770499e1SDan Kimmel zio_write_gang_done, NULL, pio->io_priority, 2462770499e1SDan Kimmel ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2463fa9e4066Sahrens 2464e14bb325SJeff Bonwick /* 2465e14bb325SJeff Bonwick * Create and nowait the gang children. 2466e14bb325SJeff Bonwick */ 2467e14bb325SJeff Bonwick for (int g = 0; resid != 0; resid -= lsize, g++) { 2468e14bb325SJeff Bonwick lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 2469e14bb325SJeff Bonwick SPA_MINBLOCKSIZE); 2470e14bb325SJeff Bonwick ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 2471e14bb325SJeff Bonwick 2472f5383399SBill Moore zp.zp_checksum = gio->io_prop.zp_checksum; 2473e14bb325SJeff Bonwick zp.zp_compress = ZIO_COMPRESS_OFF; 2474e14bb325SJeff Bonwick zp.zp_type = DMU_OT_NONE; 2475e14bb325SJeff Bonwick zp.zp_level = 0; 2476b24ab676SJeff Bonwick zp.zp_copies = gio->io_prop.zp_copies; 247780901aeaSGeorge Wilson zp.zp_dedup = B_FALSE; 247880901aeaSGeorge Wilson zp.zp_dedup_verify = B_FALSE; 247980901aeaSGeorge Wilson zp.zp_nopwrite = B_FALSE; 2480*eb633035STom Caputi zp.zp_encrypt = gio->io_prop.zp_encrypt; 2481*eb633035STom Caputi zp.zp_byteorder = gio->io_prop.zp_byteorder; 2482*eb633035STom Caputi bzero(zp.zp_salt, ZIO_DATA_SALT_LEN); 2483*eb633035STom Caputi bzero(zp.zp_iv, ZIO_DATA_IV_LEN); 2484*eb633035STom Caputi bzero(zp.zp_mac, ZIO_DATA_MAC_LEN); 2485e14bb325SJeff Bonwick 24860f7643c7SGeorge Wilson zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 24877341a7deSBrad Lewis has_data ? abd_get_offset(pio->io_abd, pio->io_size - 24887341a7deSBrad Lewis resid) : NULL, lsize, lsize, &zp, 24897341a7deSBrad Lewis zio_write_gang_member_ready, NULL, NULL, 2490770499e1SDan Kimmel zio_write_gang_done, &gn->gn_child[g], pio->io_priority, 24910f7643c7SGeorge Wilson ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 24920f7643c7SGeorge Wilson 24930f7643c7SGeorge Wilson if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 24940f7643c7SGeorge Wilson ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 24957341a7deSBrad Lewis ASSERT(has_data); 24960f7643c7SGeorge Wilson 24970f7643c7SGeorge Wilson /* 24980f7643c7SGeorge Wilson * Gang children won't throttle but we should 24990f7643c7SGeorge Wilson * account for their work, so reserve an allocation 25000f7643c7SGeorge Wilson * slot for them here. 25010f7643c7SGeorge Wilson */ 25020f7643c7SGeorge Wilson VERIFY(metaslab_class_throttle_reserve(mc, 2503f78cdc34SPaul Dagnelie zp.zp_copies, cio->io_allocator, cio, flags)); 25040f7643c7SGeorge Wilson } 25050f7643c7SGeorge Wilson zio_nowait(cio); 2506e14bb325SJeff Bonwick } 2507e05725b1Sbonwick 250844cd46caSbillm /* 2509e14bb325SJeff Bonwick * Set pio's pipeline to just wait for zio to finish. 251044cd46caSbillm */ 2511e14bb325SJeff Bonwick pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2512e14bb325SJeff Bonwick 2513e14bb325SJeff Bonwick zio_nowait(zio); 2514e14bb325SJeff Bonwick 2515e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2516fa9e4066Sahrens } 2517fa9e4066Sahrens 251880901aeaSGeorge Wilson /* 251945818ee1SMatthew Ahrens * The zio_nop_write stage in the pipeline determines if allocating a 252045818ee1SMatthew Ahrens * new bp is necessary. The nopwrite feature can handle writes in 252145818ee1SMatthew Ahrens * either syncing or open context (i.e. zil writes) and as a result is 252245818ee1SMatthew Ahrens * mutually exclusive with dedup. 252345818ee1SMatthew Ahrens * 252445818ee1SMatthew Ahrens * By leveraging a cryptographically secure checksum, such as SHA256, we 252545818ee1SMatthew Ahrens * can compare the checksums of the new data and the old to determine if 252645818ee1SMatthew Ahrens * allocating a new block is required. Note that our requirements for 252745818ee1SMatthew Ahrens * cryptographic strength are fairly weak: there can't be any accidental 252845818ee1SMatthew Ahrens * hash collisions, but we don't need to be secure against intentional 252945818ee1SMatthew Ahrens * (malicious) collisions. To trigger a nopwrite, you have to be able 253045818ee1SMatthew Ahrens * to write the file to begin with, and triggering an incorrect (hash 253145818ee1SMatthew Ahrens * collision) nopwrite is no worse than simply writing to the file. 253245818ee1SMatthew Ahrens * That said, there are no known attacks against the checksum algorithms 253345818ee1SMatthew Ahrens * used for nopwrite, assuming that the salt and the checksums 253445818ee1SMatthew Ahrens * themselves remain secret. 253580901aeaSGeorge Wilson */ 253680901aeaSGeorge Wilson static int 253780901aeaSGeorge Wilson zio_nop_write(zio_t *zio) 253880901aeaSGeorge Wilson { 253980901aeaSGeorge Wilson blkptr_t *bp = zio->io_bp; 254080901aeaSGeorge Wilson blkptr_t *bp_orig = &zio->io_bp_orig; 254180901aeaSGeorge Wilson zio_prop_t *zp = &zio->io_prop; 254280901aeaSGeorge Wilson 254380901aeaSGeorge Wilson ASSERT(BP_GET_LEVEL(bp) == 0); 254480901aeaSGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 254580901aeaSGeorge Wilson ASSERT(zp->zp_nopwrite); 254680901aeaSGeorge Wilson ASSERT(!zp->zp_dedup); 254780901aeaSGeorge Wilson ASSERT(zio->io_bp_override == NULL); 254880901aeaSGeorge Wilson ASSERT(IO_IS_ALLOCATING(zio)); 254980901aeaSGeorge Wilson 255080901aeaSGeorge Wilson /* 255180901aeaSGeorge Wilson * Check to see if the original bp and the new bp have matching 255280901aeaSGeorge Wilson * characteristics (i.e. same checksum, compression algorithms, etc). 255380901aeaSGeorge Wilson * If they don't then just continue with the pipeline which will 255480901aeaSGeorge Wilson * allocate a new bp. 255580901aeaSGeorge Wilson */ 255680901aeaSGeorge Wilson if (BP_IS_HOLE(bp_orig) || 255745818ee1SMatthew Ahrens !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags & 255845818ee1SMatthew Ahrens ZCHECKSUM_FLAG_NOPWRITE) || 2559*eb633035STom Caputi BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) || 256080901aeaSGeorge Wilson BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 256180901aeaSGeorge Wilson BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 256280901aeaSGeorge Wilson BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 256380901aeaSGeorge Wilson zp->zp_copies != BP_GET_NDVAS(bp_orig)) 256480901aeaSGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 256580901aeaSGeorge Wilson 256680901aeaSGeorge Wilson /* 256780901aeaSGeorge Wilson * If the checksums match then reset the pipeline so that we 256880901aeaSGeorge Wilson * avoid allocating a new bp and issuing any I/O. 256980901aeaSGeorge Wilson */ 257080901aeaSGeorge Wilson if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 257145818ee1SMatthew Ahrens ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags & 257245818ee1SMatthew Ahrens ZCHECKSUM_FLAG_NOPWRITE); 257380901aeaSGeorge Wilson ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 257480901aeaSGeorge Wilson ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 257580901aeaSGeorge Wilson ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 257680901aeaSGeorge Wilson ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, 257780901aeaSGeorge Wilson sizeof (uint64_t)) == 0); 257880901aeaSGeorge Wilson 257980901aeaSGeorge Wilson *bp = *bp_orig; 258080901aeaSGeorge Wilson zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 258180901aeaSGeorge Wilson zio->io_flags |= ZIO_FLAG_NOPWRITE; 258280901aeaSGeorge Wilson } 258380901aeaSGeorge Wilson 258480901aeaSGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 258580901aeaSGeorge Wilson } 258680901aeaSGeorge Wilson 2587fa9e4066Sahrens /* 2588fa9e4066Sahrens * ========================================================================== 2589b24ab676SJeff Bonwick * Dedup 2590fa9e4066Sahrens * ========================================================================== 2591fa9e4066Sahrens */ 2592b24ab676SJeff Bonwick static void 2593b24ab676SJeff Bonwick zio_ddt_child_read_done(zio_t *zio) 2594b24ab676SJeff Bonwick { 2595b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2596b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_private; 2597b24ab676SJeff Bonwick ddt_phys_t *ddp; 2598b24ab676SJeff Bonwick zio_t *pio = zio_unique_parent(zio); 2599b24ab676SJeff Bonwick 2600b24ab676SJeff Bonwick mutex_enter(&pio->io_lock); 2601b24ab676SJeff Bonwick ddp = ddt_phys_select(dde, bp); 2602b24ab676SJeff Bonwick if (zio->io_error == 0) 2603b24ab676SJeff Bonwick ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 2604770499e1SDan Kimmel 2605770499e1SDan Kimmel if (zio->io_error == 0 && dde->dde_repair_abd == NULL) 2606770499e1SDan Kimmel dde->dde_repair_abd = zio->io_abd; 2607b24ab676SJeff Bonwick else 2608770499e1SDan Kimmel abd_free(zio->io_abd); 2609b24ab676SJeff Bonwick mutex_exit(&pio->io_lock); 2610b24ab676SJeff Bonwick } 2611b24ab676SJeff Bonwick 2612b24ab676SJeff Bonwick static int 2613b24ab676SJeff Bonwick zio_ddt_read_start(zio_t *zio) 2614b24ab676SJeff Bonwick { 2615b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2616b24ab676SJeff Bonwick 2617b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(bp)); 2618b24ab676SJeff Bonwick ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2619b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2620b24ab676SJeff Bonwick 2621b24ab676SJeff Bonwick if (zio->io_child_error[ZIO_CHILD_DDT]) { 2622b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, bp); 2623b24ab676SJeff Bonwick ddt_entry_t *dde = ddt_repair_start(ddt, bp); 2624b24ab676SJeff Bonwick ddt_phys_t *ddp = dde->dde_phys; 2625b24ab676SJeff Bonwick ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 2626b24ab676SJeff Bonwick blkptr_t blk; 2627b24ab676SJeff Bonwick 2628b24ab676SJeff Bonwick ASSERT(zio->io_vsd == NULL); 2629b24ab676SJeff Bonwick zio->io_vsd = dde; 2630b24ab676SJeff Bonwick 2631b24ab676SJeff Bonwick if (ddp_self == NULL) 2632b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2633b24ab676SJeff Bonwick 2634b24ab676SJeff Bonwick for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2635b24ab676SJeff Bonwick if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 2636b24ab676SJeff Bonwick continue; 2637bbfd46c4SJeff Bonwick ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 2638bbfd46c4SJeff Bonwick &blk); 2639b24ab676SJeff Bonwick zio_nowait(zio_read(zio, zio->io_spa, &blk, 2640770499e1SDan Kimmel abd_alloc_for_io(zio->io_size, B_TRUE), 2641770499e1SDan Kimmel zio->io_size, zio_ddt_child_read_done, dde, 2642770499e1SDan Kimmel zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) | 2643770499e1SDan Kimmel ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark)); 2644b24ab676SJeff Bonwick } 2645b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2646b24ab676SJeff Bonwick } 2647b24ab676SJeff Bonwick 2648b24ab676SJeff Bonwick zio_nowait(zio_read(zio, zio->io_spa, bp, 2649770499e1SDan Kimmel zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority, 2650b24ab676SJeff Bonwick ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 2651b24ab676SJeff Bonwick 2652b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2653b24ab676SJeff Bonwick } 2654e14bb325SJeff Bonwick 2655b24ab676SJeff Bonwick static int 2656b24ab676SJeff Bonwick zio_ddt_read_done(zio_t *zio) 2657b24ab676SJeff Bonwick { 2658b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2659b24ab676SJeff Bonwick 2660d6e1c446SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) { 2661b24ab676SJeff Bonwick return (ZIO_PIPELINE_STOP); 2662d6e1c446SGeorge Wilson } 2663b24ab676SJeff Bonwick 2664b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(bp)); 2665b24ab676SJeff Bonwick ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2666b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2667b24ab676SJeff Bonwick 2668b24ab676SJeff Bonwick if (zio->io_child_error[ZIO_CHILD_DDT]) { 2669b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, bp); 2670b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_vsd; 2671b24ab676SJeff Bonwick if (ddt == NULL) { 2672b16da2e2SGeorge Wilson ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 2673b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2674b24ab676SJeff Bonwick } 2675b24ab676SJeff Bonwick if (dde == NULL) { 2676b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 267735a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2678b24ab676SJeff Bonwick return (ZIO_PIPELINE_STOP); 2679b24ab676SJeff Bonwick } 2680770499e1SDan Kimmel if (dde->dde_repair_abd != NULL) { 2681770499e1SDan Kimmel abd_copy(zio->io_abd, dde->dde_repair_abd, 2682770499e1SDan Kimmel zio->io_size); 2683b24ab676SJeff Bonwick zio->io_child_error[ZIO_CHILD_DDT] = 0; 2684b24ab676SJeff Bonwick } 2685b24ab676SJeff Bonwick ddt_repair_done(ddt, dde); 2686b24ab676SJeff Bonwick zio->io_vsd = NULL; 2687b24ab676SJeff Bonwick } 2688b24ab676SJeff Bonwick 2689b24ab676SJeff Bonwick ASSERT(zio->io_vsd == NULL); 2690b24ab676SJeff Bonwick 2691b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2692b24ab676SJeff Bonwick } 2693b24ab676SJeff Bonwick 2694b24ab676SJeff Bonwick static boolean_t 2695b24ab676SJeff Bonwick zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 2696b24ab676SJeff Bonwick { 2697b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 2698*eb633035STom Caputi boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW); 26995602294fSDan Kimmel 27005602294fSDan Kimmel /* We should never get a raw, override zio */ 27015602294fSDan Kimmel ASSERT(!(zio->io_bp_override && do_raw)); 2702b24ab676SJeff Bonwick 2703b24ab676SJeff Bonwick /* 2704b24ab676SJeff Bonwick * Note: we compare the original data, not the transformed data, 2705b24ab676SJeff Bonwick * because when zio->io_bp is an override bp, we will not have 2706b24ab676SJeff Bonwick * pushed the I/O transforms. That's an important optimization 2707b24ab676SJeff Bonwick * because otherwise we'd compress/encrypt all dmu_sync() data twice. 2708*eb633035STom Caputi * However, we should never get a raw, override zio so in these 2709*eb633035STom Caputi * cases we can compare the io_data directly. This is useful because 2710*eb633035STom Caputi * it allows us to do dedup verification even if we don't have access 2711*eb633035STom Caputi * to the original data (for instance, if the encryption keys aren't 2712*eb633035STom Caputi * loaded). 2713b24ab676SJeff Bonwick */ 2714*eb633035STom Caputi 2715b24ab676SJeff Bonwick for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2716b24ab676SJeff Bonwick zio_t *lio = dde->dde_lead_zio[p]; 2717b24ab676SJeff Bonwick 2718*eb633035STom Caputi if (lio != NULL && do_raw) { 2719*eb633035STom Caputi return (lio->io_size != zio->io_size || 2720*eb633035STom Caputi abd_cmp(zio->io_abd, lio->io_abd, 2721*eb633035STom Caputi zio->io_size) != 0); 2722*eb633035STom Caputi } else if (lio != NULL) { 2723b24ab676SJeff Bonwick return (lio->io_orig_size != zio->io_orig_size || 2724770499e1SDan Kimmel abd_cmp(zio->io_orig_abd, lio->io_orig_abd, 2725b24ab676SJeff Bonwick zio->io_orig_size) != 0); 2726b24ab676SJeff Bonwick } 2727b24ab676SJeff Bonwick } 2728b24ab676SJeff Bonwick 2729b24ab676SJeff Bonwick for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2730b24ab676SJeff Bonwick ddt_phys_t *ddp = &dde->dde_phys[p]; 2731b24ab676SJeff Bonwick 2732*eb633035STom Caputi if (ddp->ddp_phys_birth != 0 && do_raw) { 2733*eb633035STom Caputi blkptr_t blk = *zio->io_bp; 2734*eb633035STom Caputi uint64_t psize; 2735*eb633035STom Caputi abd_t *tmpabd; 2736*eb633035STom Caputi int error; 2737*eb633035STom Caputi 2738*eb633035STom Caputi ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2739*eb633035STom Caputi psize = BP_GET_PSIZE(&blk); 2740*eb633035STom Caputi 2741*eb633035STom Caputi if (psize != zio->io_size) 2742*eb633035STom Caputi return (B_TRUE); 2743*eb633035STom Caputi 2744*eb633035STom Caputi ddt_exit(ddt); 2745*eb633035STom Caputi 2746*eb633035STom Caputi tmpabd = abd_alloc_for_io(psize, B_TRUE); 2747*eb633035STom Caputi 2748*eb633035STom Caputi error = zio_wait(zio_read(NULL, spa, &blk, tmpabd, 2749*eb633035STom Caputi psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ, 2750*eb633035STom Caputi ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 2751*eb633035STom Caputi ZIO_FLAG_RAW, &zio->io_bookmark)); 2752*eb633035STom Caputi 2753*eb633035STom Caputi if (error == 0) { 2754*eb633035STom Caputi if (abd_cmp(tmpabd, zio->io_abd, psize) != 0) 2755*eb633035STom Caputi error = SET_ERROR(ENOENT); 2756*eb633035STom Caputi } 2757*eb633035STom Caputi 2758*eb633035STom Caputi abd_free(tmpabd); 2759*eb633035STom Caputi ddt_enter(ddt); 2760*eb633035STom Caputi return (error != 0); 2761*eb633035STom Caputi } else if (ddp->ddp_phys_birth != 0) { 2762b24ab676SJeff Bonwick arc_buf_t *abuf = NULL; 27637adb730bSGeorge Wilson arc_flags_t aflags = ARC_FLAG_WAIT; 27645602294fSDan Kimmel int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 2765b24ab676SJeff Bonwick blkptr_t blk = *zio->io_bp; 2766b24ab676SJeff Bonwick int error; 2767b24ab676SJeff Bonwick 2768b24ab676SJeff Bonwick ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2769b24ab676SJeff Bonwick 2770*eb633035STom Caputi if (BP_GET_LSIZE(&blk) != zio->io_orig_size) 2771*eb633035STom Caputi return (B_TRUE); 2772*eb633035STom Caputi 2773b24ab676SJeff Bonwick ddt_exit(ddt); 2774b24ab676SJeff Bonwick 27755602294fSDan Kimmel /* 27765602294fSDan Kimmel * Intuitively, it would make more sense to compare 2777770499e1SDan Kimmel * io_abd than io_orig_abd in the raw case since you 27785602294fSDan Kimmel * don't want to look at any transformations that have 27795602294fSDan Kimmel * happened to the data. However, for raw I/Os the 2780770499e1SDan Kimmel * data will actually be the same in io_abd and 2781770499e1SDan Kimmel * io_orig_abd, so all we have to do is issue this as 27825602294fSDan Kimmel * a raw ARC read. 27835602294fSDan Kimmel */ 27845602294fSDan Kimmel if (do_raw) { 27855602294fSDan Kimmel zio_flags |= ZIO_FLAG_RAW; 27865602294fSDan Kimmel ASSERT3U(zio->io_size, ==, zio->io_orig_size); 2787770499e1SDan Kimmel ASSERT0(abd_cmp(zio->io_abd, zio->io_orig_abd, 27885602294fSDan Kimmel zio->io_size)); 27895602294fSDan Kimmel ASSERT3P(zio->io_transform_stack, ==, NULL); 27905602294fSDan Kimmel } 27915602294fSDan Kimmel 27921b912ec7SGeorge Wilson error = arc_read(NULL, spa, &blk, 2793b24ab676SJeff Bonwick arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 27945602294fSDan Kimmel zio_flags, &aflags, &zio->io_bookmark); 2795b24ab676SJeff Bonwick 2796b24ab676SJeff Bonwick if (error == 0) { 2797*eb633035STom Caputi if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data, 2798b24ab676SJeff Bonwick zio->io_orig_size) != 0) 2799*eb633035STom Caputi error = SET_ERROR(ENOENT); 2800dcbf3bd6SGeorge Wilson arc_buf_destroy(abuf, &abuf); 2801b24ab676SJeff Bonwick } 2802b24ab676SJeff Bonwick 2803b24ab676SJeff Bonwick ddt_enter(ddt); 2804b24ab676SJeff Bonwick return (error != 0); 2805b24ab676SJeff Bonwick } 2806b24ab676SJeff Bonwick } 2807b24ab676SJeff Bonwick 2808b24ab676SJeff Bonwick return (B_FALSE); 2809b24ab676SJeff Bonwick } 2810b24ab676SJeff Bonwick 2811b24ab676SJeff Bonwick static void 2812b24ab676SJeff Bonwick zio_ddt_child_write_ready(zio_t *zio) 2813b24ab676SJeff Bonwick { 2814b24ab676SJeff Bonwick int p = zio->io_prop.zp_copies; 2815b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2816b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_private; 2817b24ab676SJeff Bonwick ddt_phys_t *ddp = &dde->dde_phys[p]; 2818b24ab676SJeff Bonwick zio_t *pio; 2819b24ab676SJeff Bonwick 2820b24ab676SJeff Bonwick if (zio->io_error) 2821b24ab676SJeff Bonwick return; 2822b24ab676SJeff Bonwick 2823b24ab676SJeff Bonwick ddt_enter(ddt); 2824b24ab676SJeff Bonwick 2825b24ab676SJeff Bonwick ASSERT(dde->dde_lead_zio[p] == zio); 2826b24ab676SJeff Bonwick 2827b24ab676SJeff Bonwick ddt_phys_fill(ddp, zio->io_bp); 2828b24ab676SJeff Bonwick 28290f7643c7SGeorge Wilson zio_link_t *zl = NULL; 28300f7643c7SGeorge Wilson while ((pio = zio_walk_parents(zio, &zl)) != NULL) 2831b24ab676SJeff Bonwick ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 2832b24ab676SJeff Bonwick 2833b24ab676SJeff Bonwick ddt_exit(ddt); 2834b24ab676SJeff Bonwick } 2835b24ab676SJeff Bonwick 2836b24ab676SJeff Bonwick static void 2837b24ab676SJeff Bonwick zio_ddt_child_write_done(zio_t *zio) 2838b24ab676SJeff Bonwick { 2839b24ab676SJeff Bonwick int p = zio->io_prop.zp_copies; 2840b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2841b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_private; 2842b24ab676SJeff Bonwick ddt_phys_t *ddp = &dde->dde_phys[p]; 2843b24ab676SJeff Bonwick 2844b24ab676SJeff Bonwick ddt_enter(ddt); 2845b24ab676SJeff Bonwick 2846b24ab676SJeff Bonwick ASSERT(ddp->ddp_refcnt == 0); 2847b24ab676SJeff Bonwick ASSERT(dde->dde_lead_zio[p] == zio); 2848b24ab676SJeff Bonwick dde->dde_lead_zio[p] = NULL; 2849b24ab676SJeff Bonwick 2850b24ab676SJeff Bonwick if (zio->io_error == 0) { 28510f7643c7SGeorge Wilson zio_link_t *zl = NULL; 28520f7643c7SGeorge Wilson while (zio_walk_parents(zio, &zl) != NULL) 2853b24ab676SJeff Bonwick ddt_phys_addref(ddp); 2854b24ab676SJeff Bonwick } else { 2855b24ab676SJeff Bonwick ddt_phys_clear(ddp); 2856b24ab676SJeff Bonwick } 2857b24ab676SJeff Bonwick 2858b24ab676SJeff Bonwick ddt_exit(ddt); 2859b24ab676SJeff Bonwick } 2860b24ab676SJeff Bonwick 2861b24ab676SJeff Bonwick static void 2862b24ab676SJeff Bonwick zio_ddt_ditto_write_done(zio_t *zio) 2863b24ab676SJeff Bonwick { 2864b24ab676SJeff Bonwick int p = DDT_PHYS_DITTO; 2865b24ab676SJeff Bonwick zio_prop_t *zp = &zio->io_prop; 2866b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2867b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, bp); 2868b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_private; 2869b24ab676SJeff Bonwick ddt_phys_t *ddp = &dde->dde_phys[p]; 2870b24ab676SJeff Bonwick ddt_key_t *ddk = &dde->dde_key; 2871b24ab676SJeff Bonwick 2872b24ab676SJeff Bonwick ddt_enter(ddt); 2873b24ab676SJeff Bonwick 2874b24ab676SJeff Bonwick ASSERT(ddp->ddp_refcnt == 0); 2875b24ab676SJeff Bonwick ASSERT(dde->dde_lead_zio[p] == zio); 2876b24ab676SJeff Bonwick dde->dde_lead_zio[p] = NULL; 2877b24ab676SJeff Bonwick 2878b24ab676SJeff Bonwick if (zio->io_error == 0) { 2879b24ab676SJeff Bonwick ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2880b24ab676SJeff Bonwick ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2881b24ab676SJeff Bonwick ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2882b24ab676SJeff Bonwick if (ddp->ddp_phys_birth != 0) 2883b24ab676SJeff Bonwick ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2884b24ab676SJeff Bonwick ddt_phys_fill(ddp, bp); 2885b24ab676SJeff Bonwick } 2886b24ab676SJeff Bonwick 2887b24ab676SJeff Bonwick ddt_exit(ddt); 2888b24ab676SJeff Bonwick } 2889b24ab676SJeff Bonwick 2890b24ab676SJeff Bonwick static int 2891b24ab676SJeff Bonwick zio_ddt_write(zio_t *zio) 2892b24ab676SJeff Bonwick { 2893b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 2894b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2895b24ab676SJeff Bonwick uint64_t txg = zio->io_txg; 2896b24ab676SJeff Bonwick zio_prop_t *zp = &zio->io_prop; 2897b24ab676SJeff Bonwick int p = zp->zp_copies; 2898b24ab676SJeff Bonwick int ditto_copies; 2899b24ab676SJeff Bonwick zio_t *cio = NULL; 2900b24ab676SJeff Bonwick zio_t *dio = NULL; 2901b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(spa, bp); 2902b24ab676SJeff Bonwick ddt_entry_t *dde; 2903b24ab676SJeff Bonwick ddt_phys_t *ddp; 2904b24ab676SJeff Bonwick 2905b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(bp)); 2906b24ab676SJeff Bonwick ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2907b24ab676SJeff Bonwick ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 29085602294fSDan Kimmel ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW))); 2909b24ab676SJeff Bonwick 2910b24ab676SJeff Bonwick ddt_enter(ddt); 2911b24ab676SJeff Bonwick dde = ddt_lookup(ddt, bp, B_TRUE); 2912b24ab676SJeff Bonwick ddp = &dde->dde_phys[p]; 2913b24ab676SJeff Bonwick 2914b24ab676SJeff Bonwick if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2915b24ab676SJeff Bonwick /* 2916b24ab676SJeff Bonwick * If we're using a weak checksum, upgrade to a strong checksum 2917b24ab676SJeff Bonwick * and try again. If we're already using a strong checksum, 2918b24ab676SJeff Bonwick * we can't resolve it, so just convert to an ordinary write. 2919b24ab676SJeff Bonwick * (And automatically e-mail a paper to Nature?) 2920b24ab676SJeff Bonwick */ 292145818ee1SMatthew Ahrens if (!(zio_checksum_table[zp->zp_checksum].ci_flags & 292245818ee1SMatthew Ahrens ZCHECKSUM_FLAG_DEDUP)) { 2923b24ab676SJeff Bonwick zp->zp_checksum = spa_dedup_checksum(spa); 2924b24ab676SJeff Bonwick zio_pop_transforms(zio); 2925b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_OPEN; 2926b24ab676SJeff Bonwick BP_ZERO(bp); 2927b24ab676SJeff Bonwick } else { 292880901aeaSGeorge Wilson zp->zp_dedup = B_FALSE; 29295602294fSDan Kimmel BP_SET_DEDUP(bp, B_FALSE); 2930b24ab676SJeff Bonwick } 29315602294fSDan Kimmel ASSERT(!BP_GET_DEDUP(bp)); 2932b24ab676SJeff Bonwick zio->io_pipeline = ZIO_WRITE_PIPELINE; 2933b24ab676SJeff Bonwick ddt_exit(ddt); 2934b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2935b24ab676SJeff Bonwick } 2936b24ab676SJeff Bonwick 2937b24ab676SJeff Bonwick ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2938b24ab676SJeff Bonwick ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2939b24ab676SJeff Bonwick 2940b24ab676SJeff Bonwick if (ditto_copies > ddt_ditto_copies_present(dde) && 2941b24ab676SJeff Bonwick dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2942b24ab676SJeff Bonwick zio_prop_t czp = *zp; 2943b24ab676SJeff Bonwick 2944b24ab676SJeff Bonwick czp.zp_copies = ditto_copies; 2945b24ab676SJeff Bonwick 2946b24ab676SJeff Bonwick /* 2947b24ab676SJeff Bonwick * If we arrived here with an override bp, we won't have run 2948b24ab676SJeff Bonwick * the transform stack, so we won't have the data we need to 2949b24ab676SJeff Bonwick * generate a child i/o. So, toss the override bp and restart. 2950b24ab676SJeff Bonwick * This is safe, because using the override bp is just an 2951b24ab676SJeff Bonwick * optimization; and it's rare, so the cost doesn't matter. 2952b24ab676SJeff Bonwick */ 2953b24ab676SJeff Bonwick if (zio->io_bp_override) { 2954b24ab676SJeff Bonwick zio_pop_transforms(zio); 2955b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_OPEN; 2956b24ab676SJeff Bonwick zio->io_pipeline = ZIO_WRITE_PIPELINE; 2957b24ab676SJeff Bonwick zio->io_bp_override = NULL; 2958b24ab676SJeff Bonwick BP_ZERO(bp); 2959b24ab676SJeff Bonwick ddt_exit(ddt); 2960b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2961b24ab676SJeff Bonwick } 2962b24ab676SJeff Bonwick 2963770499e1SDan Kimmel dio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 29645602294fSDan Kimmel zio->io_orig_size, zio->io_orig_size, &czp, NULL, NULL, 29658df0bcf0SPaul Dagnelie NULL, zio_ddt_ditto_write_done, dde, zio->io_priority, 2966b24ab676SJeff Bonwick ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2967b24ab676SJeff Bonwick 2968770499e1SDan Kimmel zio_push_transform(dio, zio->io_abd, zio->io_size, 0, NULL); 2969b24ab676SJeff Bonwick dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2970b24ab676SJeff Bonwick } 2971b24ab676SJeff Bonwick 2972b24ab676SJeff Bonwick if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2973b24ab676SJeff Bonwick if (ddp->ddp_phys_birth != 0) 2974b24ab676SJeff Bonwick ddt_bp_fill(ddp, bp, txg); 2975b24ab676SJeff Bonwick if (dde->dde_lead_zio[p] != NULL) 2976b24ab676SJeff Bonwick zio_add_child(zio, dde->dde_lead_zio[p]); 2977b24ab676SJeff Bonwick else 2978b24ab676SJeff Bonwick ddt_phys_addref(ddp); 2979b24ab676SJeff Bonwick } else if (zio->io_bp_override) { 2980b24ab676SJeff Bonwick ASSERT(bp->blk_birth == txg); 2981b24ab676SJeff Bonwick ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2982b24ab676SJeff Bonwick ddt_phys_fill(ddp, bp); 2983b24ab676SJeff Bonwick ddt_phys_addref(ddp); 2984b24ab676SJeff Bonwick } else { 2985770499e1SDan Kimmel cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 29865602294fSDan Kimmel zio->io_orig_size, zio->io_orig_size, zp, 29878df0bcf0SPaul Dagnelie zio_ddt_child_write_ready, NULL, NULL, 2988b24ab676SJeff Bonwick zio_ddt_child_write_done, dde, zio->io_priority, 2989b24ab676SJeff Bonwick ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2990b24ab676SJeff Bonwick 2991770499e1SDan Kimmel zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL); 2992b24ab676SJeff Bonwick dde->dde_lead_zio[p] = cio; 2993b24ab676SJeff Bonwick } 2994b24ab676SJeff Bonwick 2995b24ab676SJeff Bonwick ddt_exit(ddt); 2996b24ab676SJeff Bonwick 2997b24ab676SJeff Bonwick if (cio) 2998b24ab676SJeff Bonwick zio_nowait(cio); 2999b24ab676SJeff Bonwick if (dio) 3000b24ab676SJeff Bonwick zio_nowait(dio); 3001b24ab676SJeff Bonwick 3002b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 3003b24ab676SJeff Bonwick } 3004b24ab676SJeff Bonwick 30053f9d6ad7SLin Ling ddt_entry_t *freedde; /* for debugging */ 30063f9d6ad7SLin Ling 3007b24ab676SJeff Bonwick static int 3008b24ab676SJeff Bonwick zio_ddt_free(zio_t *zio) 3009b24ab676SJeff Bonwick { 3010b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 3011b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 3012b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(spa, bp); 3013b24ab676SJeff Bonwick ddt_entry_t *dde; 3014b24ab676SJeff Bonwick ddt_phys_t *ddp; 3015b24ab676SJeff Bonwick 3016b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(bp)); 3017b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3018b24ab676SJeff Bonwick 3019b24ab676SJeff Bonwick ddt_enter(ddt); 30203f9d6ad7SLin Ling freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 3021b24ab676SJeff Bonwick ddp = ddt_phys_select(dde, bp); 3022b24ab676SJeff Bonwick ddt_phys_decref(ddp); 3023b24ab676SJeff Bonwick ddt_exit(ddt); 3024b24ab676SJeff Bonwick 3025b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 3026b24ab676SJeff Bonwick } 3027b24ab676SJeff Bonwick 3028b24ab676SJeff Bonwick /* 3029b24ab676SJeff Bonwick * ========================================================================== 3030b24ab676SJeff Bonwick * Allocate and free blocks 3031b24ab676SJeff Bonwick * ========================================================================== 3032b24ab676SJeff Bonwick */ 30330f7643c7SGeorge Wilson 30340f7643c7SGeorge Wilson static zio_t * 3035f78cdc34SPaul Dagnelie zio_io_to_allocate(spa_t *spa, int allocator) 30360f7643c7SGeorge Wilson { 30370f7643c7SGeorge Wilson zio_t *zio; 30380f7643c7SGeorge Wilson 3039f78cdc34SPaul Dagnelie ASSERT(MUTEX_HELD(&spa->spa_alloc_locks[allocator])); 30400f7643c7SGeorge Wilson 3041f78cdc34SPaul Dagnelie zio = avl_first(&spa->spa_alloc_trees[allocator]); 30420f7643c7SGeorge Wilson if (zio == NULL) 30430f7643c7SGeorge Wilson return (NULL); 30440f7643c7SGeorge Wilson 30450f7643c7SGeorge Wilson ASSERT(IO_IS_ALLOCATING(zio)); 30460f7643c7SGeorge Wilson 30470f7643c7SGeorge Wilson /* 30480f7643c7SGeorge Wilson * Try to place a reservation for this zio. If we're unable to 30490f7643c7SGeorge Wilson * reserve then we throttle. 30500f7643c7SGeorge Wilson */ 3051f78cdc34SPaul Dagnelie ASSERT3U(zio->io_allocator, ==, allocator); 3052663207adSDon Brady if (!metaslab_class_throttle_reserve(zio->io_metaslab_class, 3053f78cdc34SPaul Dagnelie zio->io_prop.zp_copies, zio->io_allocator, zio, 0)) { 30540f7643c7SGeorge Wilson return (NULL); 30550f7643c7SGeorge Wilson } 30560f7643c7SGeorge Wilson 3057f78cdc34SPaul Dagnelie avl_remove(&spa->spa_alloc_trees[allocator], zio); 30580f7643c7SGeorge Wilson ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); 30590f7643c7SGeorge Wilson 30600f7643c7SGeorge Wilson return (zio); 30610f7643c7SGeorge Wilson } 30620f7643c7SGeorge Wilson 30630f7643c7SGeorge Wilson static int 30640f7643c7SGeorge Wilson zio_dva_throttle(zio_t *zio) 30650f7643c7SGeorge Wilson { 30660f7643c7SGeorge Wilson spa_t *spa = zio->io_spa; 30670f7643c7SGeorge Wilson zio_t *nio; 3068663207adSDon Brady metaslab_class_t *mc; 3069663207adSDon Brady 3070663207adSDon Brady /* locate an appropriate allocation class */ 3071663207adSDon Brady mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type, 3072663207adSDon Brady zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk); 30730f7643c7SGeorge Wilson 30740f7643c7SGeorge Wilson if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE || 3075663207adSDon Brady !mc->mc_alloc_throttle_enabled || 30760f7643c7SGeorge Wilson zio->io_child_type == ZIO_CHILD_GANG || 30770f7643c7SGeorge Wilson zio->io_flags & ZIO_FLAG_NODATA) { 30780f7643c7SGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 30790f7643c7SGeorge Wilson } 30800f7643c7SGeorge Wilson 30810f7643c7SGeorge Wilson ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 30820f7643c7SGeorge Wilson 30830f7643c7SGeorge Wilson ASSERT3U(zio->io_queued_timestamp, >, 0); 30840f7643c7SGeorge Wilson ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE); 30850f7643c7SGeorge Wilson 3086f78cdc34SPaul Dagnelie zbookmark_phys_t *bm = &zio->io_bookmark; 3087f78cdc34SPaul Dagnelie /* 3088f78cdc34SPaul Dagnelie * We want to try to use as many allocators as possible to help improve 3089f78cdc34SPaul Dagnelie * performance, but we also want logically adjacent IOs to be physically 3090f78cdc34SPaul Dagnelie * adjacent to improve sequential read performance. We chunk each object 3091f78cdc34SPaul Dagnelie * into 2^20 block regions, and then hash based on the objset, object, 3092f78cdc34SPaul Dagnelie * level, and region to accomplish both of these goals. 3093f78cdc34SPaul Dagnelie */ 3094f78cdc34SPaul Dagnelie zio->io_allocator = cityhash4(bm->zb_objset, bm->zb_object, 3095f78cdc34SPaul Dagnelie bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count; 3096f78cdc34SPaul Dagnelie mutex_enter(&spa->spa_alloc_locks[zio->io_allocator]); 30970f7643c7SGeorge Wilson ASSERT(zio->io_type == ZIO_TYPE_WRITE); 3098663207adSDon Brady zio->io_metaslab_class = mc; 3099f78cdc34SPaul Dagnelie avl_add(&spa->spa_alloc_trees[zio->io_allocator], zio); 3100663207adSDon Brady nio = zio_io_to_allocate(spa, zio->io_allocator); 3101f78cdc34SPaul Dagnelie mutex_exit(&spa->spa_alloc_locks[zio->io_allocator]); 31020f7643c7SGeorge Wilson 31030f7643c7SGeorge Wilson if (nio == zio) 31040f7643c7SGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 31050f7643c7SGeorge Wilson 31060f7643c7SGeorge Wilson if (nio != NULL) { 31070f7643c7SGeorge Wilson ASSERT(nio->io_stage == ZIO_STAGE_DVA_THROTTLE); 31080f7643c7SGeorge Wilson /* 31090f7643c7SGeorge Wilson * We are passing control to a new zio so make sure that 31100f7643c7SGeorge Wilson * it is processed by a different thread. We do this to 31110f7643c7SGeorge Wilson * avoid stack overflows that can occur when parents are 31120f7643c7SGeorge Wilson * throttled and children are making progress. We allow 31130f7643c7SGeorge Wilson * it to go to the head of the taskq since it's already 31140f7643c7SGeorge Wilson * been waiting. 31150f7643c7SGeorge Wilson */ 31160f7643c7SGeorge Wilson zio_taskq_dispatch(nio, ZIO_TASKQ_ISSUE, B_TRUE); 31170f7643c7SGeorge Wilson } 31180f7643c7SGeorge Wilson return (ZIO_PIPELINE_STOP); 31190f7643c7SGeorge Wilson } 31200f7643c7SGeorge Wilson 3121663207adSDon Brady static void 3122f78cdc34SPaul Dagnelie zio_allocate_dispatch(spa_t *spa, int allocator) 31230f7643c7SGeorge Wilson { 31240f7643c7SGeorge Wilson zio_t *zio; 31250f7643c7SGeorge Wilson 3126f78cdc34SPaul Dagnelie mutex_enter(&spa->spa_alloc_locks[allocator]); 3127f78cdc34SPaul Dagnelie zio = zio_io_to_allocate(spa, allocator); 3128f78cdc34SPaul Dagnelie mutex_exit(&spa->spa_alloc_locks[allocator]); 31290f7643c7SGeorge Wilson if (zio == NULL) 31300f7643c7SGeorge Wilson return; 31310f7643c7SGeorge Wilson 31320f7643c7SGeorge Wilson ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE); 31330f7643c7SGeorge Wilson ASSERT0(zio->io_error); 31340f7643c7SGeorge Wilson zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE); 31350f7643c7SGeorge Wilson } 31360f7643c7SGeorge Wilson 3137e05725b1Sbonwick static int 3138fa9e4066Sahrens zio_dva_allocate(zio_t *zio) 3139fa9e4066Sahrens { 31408654d025Sperrin spa_t *spa = zio->io_spa; 3141663207adSDon Brady metaslab_class_t *mc; 3142fa9e4066Sahrens blkptr_t *bp = zio->io_bp; 3143fa9e4066Sahrens int error; 314409c9d376SGeorge Wilson int flags = 0; 3145fa9e4066Sahrens 3146f5383399SBill Moore if (zio->io_gang_leader == NULL) { 3147f5383399SBill Moore ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 3148f5383399SBill Moore zio->io_gang_leader = zio; 3149f5383399SBill Moore } 3150f5383399SBill Moore 3151fa9e4066Sahrens ASSERT(BP_IS_HOLE(bp)); 3152fb09f5aaSMadhav Suresh ASSERT0(BP_GET_NDVAS(bp)); 3153b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, >, 0); 3154b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 3155fa9e4066Sahrens ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 3156fa9e4066Sahrens 3157663207adSDon Brady if (zio->io_flags & ZIO_FLAG_NODATA) 31580f7643c7SGeorge Wilson flags |= METASLAB_DONT_THROTTLE; 3159663207adSDon Brady if (zio->io_flags & ZIO_FLAG_GANG_CHILD) 31600f7643c7SGeorge Wilson flags |= METASLAB_GANG_CHILD; 3161663207adSDon Brady if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) 31620f7643c7SGeorge Wilson flags |= METASLAB_ASYNC_ALLOC; 3163663207adSDon Brady 3164663207adSDon Brady /* 3165663207adSDon Brady * if not already chosen, locate an appropriate allocation class 3166663207adSDon Brady */ 3167663207adSDon Brady mc = zio->io_metaslab_class; 3168663207adSDon Brady if (mc == NULL) { 3169663207adSDon Brady mc = spa_preferred_class(spa, zio->io_size, 3170663207adSDon Brady zio->io_prop.zp_type, zio->io_prop.zp_level, 3171663207adSDon Brady zio->io_prop.zp_zpl_smallblk); 3172663207adSDon Brady zio->io_metaslab_class = mc; 31730f7643c7SGeorge Wilson } 31740f7643c7SGeorge Wilson 3175e14bb325SJeff Bonwick error = metaslab_alloc(spa, mc, zio->io_size, bp, 31768363e80aSGeorge Wilson zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 3177f78cdc34SPaul Dagnelie &zio->io_alloc_list, zio, zio->io_allocator); 3178fa9e4066Sahrens 3179663207adSDon Brady /* 3180663207adSDon Brady * Fallback to normal class when an alloc class is full 3181663207adSDon Brady */ 3182663207adSDon Brady if (error == ENOSPC && mc != spa_normal_class(spa)) { 3183663207adSDon Brady /* 3184663207adSDon Brady * If throttling, transfer reservation over to normal class. 3185663207adSDon Brady * The io_allocator slot can remain the same even though we 3186663207adSDon Brady * are switching classes. 3187663207adSDon Brady */ 3188663207adSDon Brady if (mc->mc_alloc_throttle_enabled && 3189663207adSDon Brady (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) { 3190663207adSDon Brady metaslab_class_throttle_unreserve(mc, 3191663207adSDon Brady zio->io_prop.zp_copies, zio->io_allocator, zio); 3192663207adSDon Brady zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING; 3193663207adSDon Brady 3194663207adSDon Brady mc = spa_normal_class(spa); 3195663207adSDon Brady VERIFY(metaslab_class_throttle_reserve(mc, 3196663207adSDon Brady zio->io_prop.zp_copies, zio->io_allocator, zio, 3197663207adSDon Brady flags | METASLAB_MUST_RESERVE)); 3198663207adSDon Brady } else { 3199663207adSDon Brady mc = spa_normal_class(spa); 3200663207adSDon Brady } 3201663207adSDon Brady zio->io_metaslab_class = mc; 3202663207adSDon Brady 3203663207adSDon Brady error = metaslab_alloc(spa, mc, zio->io_size, bp, 3204663207adSDon Brady zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 3205663207adSDon Brady &zio->io_alloc_list, zio, zio->io_allocator); 3206663207adSDon Brady } 3207663207adSDon Brady 32080f7643c7SGeorge Wilson if (error != 0) { 320921f7c81cSMatthew Ahrens zfs_dbgmsg("%s: metaslab allocation failure: zio %p, " 321009c9d376SGeorge Wilson "size %llu, error %d", spa_name(spa), zio, zio->io_size, 321109c9d376SGeorge Wilson error); 3212e14bb325SJeff Bonwick if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 3213e14bb325SJeff Bonwick return (zio_write_gang_block(zio)); 3214fa9e4066Sahrens zio->io_error = error; 3215fa9e4066Sahrens } 3216e05725b1Sbonwick 3217e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3218fa9e4066Sahrens } 3219fa9e4066Sahrens 3220e05725b1Sbonwick static int 3221fa9e4066Sahrens zio_dva_free(zio_t *zio) 3222fa9e4066Sahrens { 3223e14bb325SJeff Bonwick metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 3224fa9e4066Sahrens 3225e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3226fa9e4066Sahrens } 3227fa9e4066Sahrens 3228e05725b1Sbonwick static int 3229fa9e4066Sahrens zio_dva_claim(zio_t *zio) 3230fa9e4066Sahrens { 3231e14bb325SJeff Bonwick int error; 3232e14bb325SJeff Bonwick 3233e14bb325SJeff Bonwick error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 3234e14bb325SJeff Bonwick if (error) 3235e14bb325SJeff Bonwick zio->io_error = error; 3236fa9e4066Sahrens 3237e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3238fa9e4066Sahrens } 3239fa9e4066Sahrens 3240e14bb325SJeff Bonwick /* 3241e14bb325SJeff Bonwick * Undo an allocation. This is used by zio_done() when an I/O fails 3242e14bb325SJeff Bonwick * and we want to give back the block we just allocated. 3243e14bb325SJeff Bonwick * This handles both normal blocks and gang blocks. 3244e14bb325SJeff Bonwick */ 3245e14bb325SJeff Bonwick static void 3246e14bb325SJeff Bonwick zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 3247e14bb325SJeff Bonwick { 3248e14bb325SJeff Bonwick ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 3249b24ab676SJeff Bonwick ASSERT(zio->io_bp_override == NULL); 3250e14bb325SJeff Bonwick 3251e14bb325SJeff Bonwick if (!BP_IS_HOLE(bp)) 3252b24ab676SJeff Bonwick metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 3253e14bb325SJeff Bonwick 3254e14bb325SJeff Bonwick if (gn != NULL) { 3255e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 3256e14bb325SJeff Bonwick zio_dva_unallocate(zio, gn->gn_child[g], 3257e14bb325SJeff Bonwick &gn->gn_gbh->zg_blkptr[g]); 3258e14bb325SJeff Bonwick } 3259e14bb325SJeff Bonwick } 3260e14bb325SJeff Bonwick } 3261e14bb325SJeff Bonwick 3262e14bb325SJeff Bonwick /* 3263e14bb325SJeff Bonwick * Try to allocate an intent log block. Return 0 on success, errno on failure. 3264e14bb325SJeff Bonwick */ 3265e14bb325SJeff Bonwick int 3266*eb633035STom Caputi zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp, 3267f78cdc34SPaul Dagnelie blkptr_t *old_bp, uint64_t size, boolean_t *slog) 3268e14bb325SJeff Bonwick { 3269e09fa4daSNeil Perrin int error = 1; 32708363e80aSGeorge Wilson zio_alloc_list_t io_alloc_list; 3271e14bb325SJeff Bonwick 3272b24ab676SJeff Bonwick ASSERT(txg > spa_syncing_txg(spa)); 3273b24ab676SJeff Bonwick 32748363e80aSGeorge Wilson metaslab_trace_init(&io_alloc_list); 3275663207adSDon Brady 3276663207adSDon Brady /* 3277663207adSDon Brady * Block pointer fields are useful to metaslabs for stats and debugging. 3278663207adSDon Brady * Fill in the obvious ones before calling into metaslab_alloc(). 3279663207adSDon Brady */ 3280663207adSDon Brady BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3281663207adSDon Brady BP_SET_PSIZE(new_bp, size); 3282663207adSDon Brady BP_SET_LEVEL(new_bp, 0); 3283663207adSDon Brady 3284f78cdc34SPaul Dagnelie /* 3285f78cdc34SPaul Dagnelie * When allocating a zil block, we don't have information about 3286f78cdc34SPaul Dagnelie * the final destination of the block except the objset it's part 3287f78cdc34SPaul Dagnelie * of, so we just hash the objset ID to pick the allocator to get 3288f78cdc34SPaul Dagnelie * some parallelism. 3289f78cdc34SPaul Dagnelie */ 3290c5ee4681SAlexander Motin error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1, 3291f78cdc34SPaul Dagnelie txg, old_bp, METASLAB_HINTBP_AVOID, &io_alloc_list, NULL, 3292*eb633035STom Caputi cityhash4(0, 0, 0, 3293*eb633035STom Caputi os->os_dsl_dataset->ds_object) % spa->spa_alloc_count); 3294c5ee4681SAlexander Motin if (error == 0) { 3295c5ee4681SAlexander Motin *slog = TRUE; 3296c5ee4681SAlexander Motin } else { 3297b24ab676SJeff Bonwick error = metaslab_alloc(spa, spa_normal_class(spa), size, 32988363e80aSGeorge Wilson new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID, 3299*eb633035STom Caputi &io_alloc_list, NULL, cityhash4(0, 0, 0, 3300*eb633035STom Caputi os->os_dsl_dataset->ds_object) % spa->spa_alloc_count); 3301c5ee4681SAlexander Motin if (error == 0) 3302c5ee4681SAlexander Motin *slog = FALSE; 3303840345f6SGeorge Wilson } 33048363e80aSGeorge Wilson metaslab_trace_fini(&io_alloc_list); 3305e14bb325SJeff Bonwick 3306e14bb325SJeff Bonwick if (error == 0) { 3307e14bb325SJeff Bonwick BP_SET_LSIZE(new_bp, size); 3308e14bb325SJeff Bonwick BP_SET_PSIZE(new_bp, size); 3309e14bb325SJeff Bonwick BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 33106e1f5caaSNeil Perrin BP_SET_CHECKSUM(new_bp, 33116e1f5caaSNeil Perrin spa_version(spa) >= SPA_VERSION_SLIM_ZIL 33126e1f5caaSNeil Perrin ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 3313e14bb325SJeff Bonwick BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 3314e14bb325SJeff Bonwick BP_SET_LEVEL(new_bp, 0); 3315b24ab676SJeff Bonwick BP_SET_DEDUP(new_bp, 0); 3316e14bb325SJeff Bonwick BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 3317*eb633035STom Caputi 3318*eb633035STom Caputi /* 3319*eb633035STom Caputi * encrypted blocks will require an IV and salt. We generate 3320*eb633035STom Caputi * these now since we will not be rewriting the bp at 3321*eb633035STom Caputi * rewrite time. 3322*eb633035STom Caputi */ 3323*eb633035STom Caputi if (os->os_encrypted) { 3324*eb633035STom Caputi uint8_t iv[ZIO_DATA_IV_LEN]; 3325*eb633035STom Caputi uint8_t salt[ZIO_DATA_SALT_LEN]; 3326*eb633035STom Caputi 3327*eb633035STom Caputi BP_SET_CRYPT(new_bp, B_TRUE); 3328*eb633035STom Caputi VERIFY0(spa_crypt_get_salt(spa, 3329*eb633035STom Caputi dmu_objset_id(os), salt)); 3330*eb633035STom Caputi VERIFY0(zio_crypt_generate_iv(iv)); 3331*eb633035STom Caputi 3332*eb633035STom Caputi zio_crypt_encode_params_bp(new_bp, salt, iv); 3333*eb633035STom Caputi } 33341271e4b1SPrakash Surya } else { 33351271e4b1SPrakash Surya zfs_dbgmsg("%s: zil block allocation failure: " 33361271e4b1SPrakash Surya "size %llu, error %d", spa_name(spa), size, error); 3337e14bb325SJeff Bonwick } 3338e14bb325SJeff Bonwick 3339e14bb325SJeff Bonwick return (error); 3340e14bb325SJeff Bonwick } 3341e14bb325SJeff Bonwick 3342fa9e4066Sahrens /* 3343fa9e4066Sahrens * ========================================================================== 3344fa9e4066Sahrens * Read and write to physical devices 3345fa9e4066Sahrens * ========================================================================== 3346fa9e4066Sahrens */ 3347738f37bcSGeorge Wilson 3348738f37bcSGeorge Wilson 3349738f37bcSGeorge Wilson /* 3350738f37bcSGeorge Wilson * Issue an I/O to the underlying vdev. Typically the issue pipeline 3351738f37bcSGeorge Wilson * stops after this stage and will resume upon I/O completion. 3352738f37bcSGeorge Wilson * However, there are instances where the vdev layer may need to 3353738f37bcSGeorge Wilson * continue the pipeline when an I/O was not issued. Since the I/O 3354738f37bcSGeorge Wilson * that was sent to the vdev layer might be different than the one 3355738f37bcSGeorge Wilson * currently active in the pipeline (see vdev_queue_io()), we explicitly 3356738f37bcSGeorge Wilson * force the underlying vdev layers to call either zio_execute() or 3357738f37bcSGeorge Wilson * zio_interrupt() to ensure that the pipeline continues with the correct I/O. 3358738f37bcSGeorge Wilson */ 3359e05725b1Sbonwick static int 336044cd46caSbillm zio_vdev_io_start(zio_t *zio) 3361fa9e4066Sahrens { 3362fa9e4066Sahrens vdev_t *vd = zio->io_vd; 336344cd46caSbillm uint64_t align; 33640a4e9518Sgw spa_t *spa = zio->io_spa; 33650a4e9518Sgw 3366e14bb325SJeff Bonwick ASSERT(zio->io_error == 0); 3367e14bb325SJeff Bonwick ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 3368fa9e4066Sahrens 3369e14bb325SJeff Bonwick if (vd == NULL) { 3370e14bb325SJeff Bonwick if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3371e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 3372fa9e4066Sahrens 3373e14bb325SJeff Bonwick /* 3374e14bb325SJeff Bonwick * The mirror_ops handle multiple DVAs in a single BP. 3375e14bb325SJeff Bonwick */ 3376738f37bcSGeorge Wilson vdev_mirror_ops.vdev_op_io_start(zio); 3377738f37bcSGeorge Wilson return (ZIO_PIPELINE_STOP); 3378fa9e4066Sahrens } 3379fa9e4066Sahrens 33800f7643c7SGeorge Wilson ASSERT3P(zio->io_logical, !=, zio); 33816f793812SPavel Zakharov if (zio->io_type == ZIO_TYPE_WRITE) { 33826f793812SPavel Zakharov ASSERT(spa->spa_trust_config); 33836f793812SPavel Zakharov 3384a3874b8bSToomas Soome /* 3385a3874b8bSToomas Soome * Note: the code can handle other kinds of writes, 3386a3874b8bSToomas Soome * but we don't expect them. 3387a3874b8bSToomas Soome */ 33886f793812SPavel Zakharov if (zio->io_vd->vdev_removing) { 33896f793812SPavel Zakharov ASSERT(zio->io_flags & 33906f793812SPavel Zakharov (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL | 33913a4b1be9SMatthew Ahrens ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)); 33926f793812SPavel Zakharov } 33935cabbc6bSPrashanth Sreenivasa } 33940f7643c7SGeorge Wilson 3395e14bb325SJeff Bonwick align = 1ULL << vd->vdev_top->vdev_ashift; 3396e14bb325SJeff Bonwick 33972a104a52SAlex Reece if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 33982a104a52SAlex Reece P2PHASE(zio->io_size, align) != 0) { 33992a104a52SAlex Reece /* Transform logical writes to be a full physical block size. */ 3400ecc2d604Sbonwick uint64_t asize = P2ROUNDUP(zio->io_size, align); 3401770499e1SDan Kimmel abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize); 3402e14bb325SJeff Bonwick ASSERT(vd == vd->vdev_top); 3403ecc2d604Sbonwick if (zio->io_type == ZIO_TYPE_WRITE) { 3404770499e1SDan Kimmel abd_copy(abuf, zio->io_abd, zio->io_size); 3405770499e1SDan Kimmel abd_zero_off(abuf, zio->io_size, asize - zio->io_size); 3406ecc2d604Sbonwick } 3407e14bb325SJeff Bonwick zio_push_transform(zio, abuf, asize, asize, zio_subblock); 3408ecc2d604Sbonwick } 3409ecc2d604Sbonwick 34102a104a52SAlex Reece /* 34112a104a52SAlex Reece * If this is not a physical io, make sure that it is properly aligned 34122a104a52SAlex Reece * before proceeding. 34132a104a52SAlex Reece */ 34142a104a52SAlex Reece if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 34152a104a52SAlex Reece ASSERT0(P2PHASE(zio->io_offset, align)); 34162a104a52SAlex Reece ASSERT0(P2PHASE(zio->io_size, align)); 34172a104a52SAlex Reece } else { 34182a104a52SAlex Reece /* 34192a104a52SAlex Reece * For physical writes, we allow 512b aligned writes and assume 34202a104a52SAlex Reece * the device will perform a read-modify-write as necessary. 34212a104a52SAlex Reece */ 34222a104a52SAlex Reece ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 34232a104a52SAlex Reece ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 34242a104a52SAlex Reece } 34252a104a52SAlex Reece 3426f9af39baSGeorge Wilson VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 34278ad4d6ddSJeff Bonwick 34288ad4d6ddSJeff Bonwick /* 34298ad4d6ddSJeff Bonwick * If this is a repair I/O, and there's no self-healing involved -- 34308ad4d6ddSJeff Bonwick * that is, we're just resilvering what we expect to resilver -- 34318ad4d6ddSJeff Bonwick * then don't do the I/O unless zio's txg is actually in vd's DTL. 34323a4b1be9SMatthew Ahrens * This prevents spurious resilvering. 34333a4b1be9SMatthew Ahrens * 34343a4b1be9SMatthew Ahrens * There are a few ways that we can end up creating these spurious 34353a4b1be9SMatthew Ahrens * resilver i/os: 34363a4b1be9SMatthew Ahrens * 34373a4b1be9SMatthew Ahrens * 1. A resilver i/o will be issued if any DVA in the BP has a 34383a4b1be9SMatthew Ahrens * dirty DTL. The mirror code will issue resilver writes to 34393a4b1be9SMatthew Ahrens * each DVA, including the one(s) that are not on vdevs with dirty 34403a4b1be9SMatthew Ahrens * DTLs. 34413a4b1be9SMatthew Ahrens * 34423a4b1be9SMatthew Ahrens * 2. With nested replication, which happens when we have a 34433a4b1be9SMatthew Ahrens * "replacing" or "spare" vdev that's a child of a mirror or raidz. 34443a4b1be9SMatthew Ahrens * For example, given mirror(replacing(A+B), C), it's likely that 34453a4b1be9SMatthew Ahrens * only A is out of date (it's the new device). In this case, we'll 34463a4b1be9SMatthew Ahrens * read from C, then use the data to resilver A+B -- but we don't 34473a4b1be9SMatthew Ahrens * actually want to resilver B, just A. The top-level mirror has no 34483a4b1be9SMatthew Ahrens * way to know this, so instead we just discard unnecessary repairs 34493a4b1be9SMatthew Ahrens * as we work our way down the vdev tree. 34503a4b1be9SMatthew Ahrens * 34513a4b1be9SMatthew Ahrens * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc. 34523a4b1be9SMatthew Ahrens * The same logic applies to any form of nested replication: ditto 34533a4b1be9SMatthew Ahrens * + mirror, RAID-Z + replacing, etc. 34543a4b1be9SMatthew Ahrens * 34553a4b1be9SMatthew Ahrens * However, indirect vdevs point off to other vdevs which may have 34563a4b1be9SMatthew Ahrens * DTL's, so we never bypass them. The child i/os on concrete vdevs 34573a4b1be9SMatthew Ahrens * will be properly bypassed instead. 34588ad4d6ddSJeff Bonwick */ 34598ad4d6ddSJeff Bonwick if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 34608ad4d6ddSJeff Bonwick !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 34618ad4d6ddSJeff Bonwick zio->io_txg != 0 && /* not a delegated i/o */ 34623a4b1be9SMatthew Ahrens vd->vdev_ops != &vdev_indirect_ops && 34638ad4d6ddSJeff Bonwick !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 34648ad4d6ddSJeff Bonwick ASSERT(zio->io_type == ZIO_TYPE_WRITE); 34658ad4d6ddSJeff Bonwick zio_vdev_io_bypass(zio); 34668ad4d6ddSJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 34678ad4d6ddSJeff Bonwick } 3468fa9e4066Sahrens 3469e14bb325SJeff Bonwick if (vd->vdev_ops->vdev_op_leaf && 3470e14bb325SJeff Bonwick (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { 3471e14bb325SJeff Bonwick 347243466aaeSMax Grossman if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio)) 3473a3f829aeSBill Moore return (ZIO_PIPELINE_CONTINUE); 3474e14bb325SJeff Bonwick 3475e14bb325SJeff Bonwick if ((zio = vdev_queue_io(zio)) == NULL) 3476e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3477e14bb325SJeff Bonwick 3478e14bb325SJeff Bonwick if (!vdev_accessible(vd, zio)) { 3479be6fd75aSMatthew Ahrens zio->io_error = SET_ERROR(ENXIO); 3480e14bb325SJeff Bonwick zio_interrupt(zio); 3481e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3482e14bb325SJeff Bonwick } 3483e14bb325SJeff Bonwick } 3484e14bb325SJeff Bonwick 3485738f37bcSGeorge Wilson vd->vdev_ops->vdev_op_io_start(zio); 3486738f37bcSGeorge Wilson return (ZIO_PIPELINE_STOP); 3487fa9e4066Sahrens } 3488fa9e4066Sahrens 3489e05725b1Sbonwick static int 3490fa9e4066Sahrens zio_vdev_io_done(zio_t *zio) 3491fa9e4066Sahrens { 3492e14bb325SJeff Bonwick vdev_t *vd = zio->io_vd; 3493e14bb325SJeff Bonwick vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 3494e14bb325SJeff Bonwick boolean_t unexpected_error = B_FALSE; 3495e05725b1Sbonwick 3496d6e1c446SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3497e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3498d6e1c446SGeorge Wilson } 3499fa9e4066Sahrens 3500e14bb325SJeff Bonwick ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 3501e14bb325SJeff Bonwick 3502e14bb325SJeff Bonwick if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { 3503e14bb325SJeff Bonwick 3504e14bb325SJeff Bonwick vdev_queue_io_done(zio); 3505fa9e4066Sahrens 3506e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_WRITE) 3507e14bb325SJeff Bonwick vdev_cache_write(zio); 3508e14bb325SJeff Bonwick 3509e14bb325SJeff Bonwick if (zio_injection_enabled && zio->io_error == 0) 35108956713aSEric Schrock zio->io_error = zio_handle_device_injection(vd, 35118956713aSEric Schrock zio, EIO); 3512e14bb325SJeff Bonwick 3513e14bb325SJeff Bonwick if (zio_injection_enabled && zio->io_error == 0) 3514e14bb325SJeff Bonwick zio->io_error = zio_handle_label_injection(zio, EIO); 3515e14bb325SJeff Bonwick 3516e14bb325SJeff Bonwick if (zio->io_error) { 3517e14bb325SJeff Bonwick if (!vdev_accessible(vd, zio)) { 3518be6fd75aSMatthew Ahrens zio->io_error = SET_ERROR(ENXIO); 3519e14bb325SJeff Bonwick } else { 3520e14bb325SJeff Bonwick unexpected_error = B_TRUE; 3521e14bb325SJeff Bonwick } 3522e14bb325SJeff Bonwick } 352351ece835Seschrock } 3524fa9e4066Sahrens 3525e14bb325SJeff Bonwick ops->vdev_op_io_done(zio); 3526e14bb325SJeff Bonwick 3527e14bb325SJeff Bonwick if (unexpected_error) 3528a3f829aeSBill Moore VERIFY(vdev_probe(vd, zio) == NULL); 3529e14bb325SJeff Bonwick 3530e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 3531fa9e4066Sahrens } 3532fa9e4066Sahrens 3533a3874b8bSToomas Soome /* 3534a3874b8bSToomas Soome * This function is used to change the priority of an existing zio that is 3535a3874b8bSToomas Soome * currently in-flight. This is used by the arc to upgrade priority in the 3536a3874b8bSToomas Soome * event that a demand read is made for a block that is currently queued 3537a3874b8bSToomas Soome * as a scrub or async read IO. Otherwise, the high priority read request 3538a3874b8bSToomas Soome * would end up having to wait for the lower priority IO. 3539a3874b8bSToomas Soome */ 3540a3874b8bSToomas Soome void 3541a3874b8bSToomas Soome zio_change_priority(zio_t *pio, zio_priority_t priority) 3542a3874b8bSToomas Soome { 3543a3874b8bSToomas Soome zio_t *cio, *cio_next; 3544a3874b8bSToomas Soome zio_link_t *zl = NULL; 3545a3874b8bSToomas Soome 3546a3874b8bSToomas Soome ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 3547a3874b8bSToomas Soome 3548a3874b8bSToomas Soome if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) { 3549a3874b8bSToomas Soome vdev_queue_change_io_priority(pio, priority); 3550a3874b8bSToomas Soome } else { 3551a3874b8bSToomas Soome pio->io_priority = priority; 3552a3874b8bSToomas Soome } 3553a3874b8bSToomas Soome 3554a3874b8bSToomas Soome mutex_enter(&pio->io_lock); 3555a3874b8bSToomas Soome for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 3556a3874b8bSToomas Soome cio_next = zio_walk_children(pio, &zl); 3557a3874b8bSToomas Soome zio_change_priority(cio, priority); 3558a3874b8bSToomas Soome } 3559a3874b8bSToomas Soome mutex_exit(&pio->io_lock); 3560a3874b8bSToomas Soome } 3561a3874b8bSToomas Soome 356222fe2c88SJonathan Adams /* 356322fe2c88SJonathan Adams * For non-raidz ZIOs, we can just copy aside the bad data read from the 356422fe2c88SJonathan Adams * disk, and use that to finish the checksum ereport later. 356522fe2c88SJonathan Adams */ 356622fe2c88SJonathan Adams static void 356722fe2c88SJonathan Adams zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 3568*eb633035STom Caputi const abd_t *good_buf) 356922fe2c88SJonathan Adams { 357022fe2c88SJonathan Adams /* no processing needed */ 357122fe2c88SJonathan Adams zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 357222fe2c88SJonathan Adams } 357322fe2c88SJonathan Adams 357422fe2c88SJonathan Adams /*ARGSUSED*/ 357522fe2c88SJonathan Adams void 357622fe2c88SJonathan Adams zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 357722fe2c88SJonathan Adams { 3578*eb633035STom Caputi void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size); 357922fe2c88SJonathan Adams 3580*eb633035STom Caputi abd_copy(abd, zio->io_abd, zio->io_size); 358122fe2c88SJonathan Adams 358222fe2c88SJonathan Adams zcr->zcr_cbinfo = zio->io_size; 3583*eb633035STom Caputi zcr->zcr_cbdata = abd; 358422fe2c88SJonathan Adams zcr->zcr_finish = zio_vsd_default_cksum_finish; 3585*eb633035STom Caputi zcr->zcr_free = zio_abd_free; 358622fe2c88SJonathan Adams } 358722fe2c88SJonathan Adams 3588e05725b1Sbonwick static int 3589fa9e4066Sahrens zio_vdev_io_assess(zio_t *zio) 3590fa9e4066Sahrens { 3591fa9e4066Sahrens vdev_t *vd = zio->io_vd; 3592e14bb325SJeff Bonwick 3593d6e1c446SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { 3594e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3595d6e1c446SGeorge Wilson } 3596e14bb325SJeff Bonwick 3597e14bb325SJeff Bonwick if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3598e14bb325SJeff Bonwick spa_config_exit(zio->io_spa, SCL_ZIO, zio); 3599e14bb325SJeff Bonwick 3600e14bb325SJeff Bonwick if (zio->io_vsd != NULL) { 360122fe2c88SJonathan Adams zio->io_vsd_ops->vsd_free(zio); 3602e14bb325SJeff Bonwick zio->io_vsd = NULL; 3603ecc2d604Sbonwick } 3604ecc2d604Sbonwick 3605e14bb325SJeff Bonwick if (zio_injection_enabled && zio->io_error == 0) 3606ea8dc4b6Seschrock zio->io_error = zio_handle_fault_injection(zio, EIO); 3607ea8dc4b6Seschrock 3608fa9e4066Sahrens /* 3609fa9e4066Sahrens * If the I/O failed, determine whether we should attempt to retry it. 361035a5a358SJonathan Adams * 361135a5a358SJonathan Adams * On retry, we cut in line in the issue queue, since we don't want 361235a5a358SJonathan Adams * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 3613fa9e4066Sahrens */ 3614e14bb325SJeff Bonwick if (zio->io_error && vd == NULL && 3615e14bb325SJeff Bonwick !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 3616e14bb325SJeff Bonwick ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 3617e14bb325SJeff Bonwick ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 3618fa9e4066Sahrens zio->io_error = 0; 3619e14bb325SJeff Bonwick zio->io_flags |= ZIO_FLAG_IO_RETRY | 3620e14bb325SJeff Bonwick ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 3621b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 362235a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 362335a5a358SJonathan Adams zio_requeue_io_start_cut_in_line); 3624e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3625ea8dc4b6Seschrock } 3626fa9e4066Sahrens 3627e14bb325SJeff Bonwick /* 3628e14bb325SJeff Bonwick * If we got an error on a leaf device, convert it to ENXIO 3629e14bb325SJeff Bonwick * if the device is not accessible at all. 3630e14bb325SJeff Bonwick */ 3631e14bb325SJeff Bonwick if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 3632e14bb325SJeff Bonwick !vdev_accessible(vd, zio)) 3633be6fd75aSMatthew Ahrens zio->io_error = SET_ERROR(ENXIO); 3634e14bb325SJeff Bonwick 3635e14bb325SJeff Bonwick /* 3636e14bb325SJeff Bonwick * If we can't write to an interior vdev (mirror or RAID-Z), 3637e14bb325SJeff Bonwick * set vdev_cant_write so that we stop trying to allocate from it. 3638e14bb325SJeff Bonwick */ 3639e14bb325SJeff Bonwick if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 36403b2aab18SMatthew Ahrens vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 3641e14bb325SJeff Bonwick vd->vdev_cant_write = B_TRUE; 36423b2aab18SMatthew Ahrens } 3643e14bb325SJeff Bonwick 3644295438baSHans Rosenfeld /* 3645295438baSHans Rosenfeld * If a cache flush returns ENOTSUP or ENOTTY, we know that no future 3646295438baSHans Rosenfeld * attempts will ever succeed. In this case we set a persistent bit so 3647295438baSHans Rosenfeld * that we don't bother with it in the future. 3648295438baSHans Rosenfeld */ 3649295438baSHans Rosenfeld if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) && 3650295438baSHans Rosenfeld zio->io_type == ZIO_TYPE_IOCTL && 3651295438baSHans Rosenfeld zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL) 3652295438baSHans Rosenfeld vd->vdev_nowritecache = B_TRUE; 3653295438baSHans Rosenfeld 3654e14bb325SJeff Bonwick if (zio->io_error) 3655e14bb325SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3656e14bb325SJeff Bonwick 365769962b56SMatthew Ahrens if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 365869962b56SMatthew Ahrens zio->io_physdone != NULL) { 365969962b56SMatthew Ahrens ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); 366069962b56SMatthew Ahrens ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); 366169962b56SMatthew Ahrens zio->io_physdone(zio->io_logical); 366269962b56SMatthew Ahrens } 366369962b56SMatthew Ahrens 3664e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3665fa9e4066Sahrens } 3666fa9e4066Sahrens 3667fa9e4066Sahrens void 3668fa9e4066Sahrens zio_vdev_io_reissue(zio_t *zio) 3669fa9e4066Sahrens { 3670fa9e4066Sahrens ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3671fa9e4066Sahrens ASSERT(zio->io_error == 0); 3672fa9e4066Sahrens 3673b24ab676SJeff Bonwick zio->io_stage >>= 1; 3674fa9e4066Sahrens } 3675fa9e4066Sahrens 3676fa9e4066Sahrens void 3677fa9e4066Sahrens zio_vdev_io_redone(zio_t *zio) 3678fa9e4066Sahrens { 3679fa9e4066Sahrens ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 3680fa9e4066Sahrens 3681b24ab676SJeff Bonwick zio->io_stage >>= 1; 3682fa9e4066Sahrens } 3683fa9e4066Sahrens 3684fa9e4066Sahrens void 3685fa9e4066Sahrens zio_vdev_io_bypass(zio_t *zio) 3686fa9e4066Sahrens { 3687fa9e4066Sahrens ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3688fa9e4066Sahrens ASSERT(zio->io_error == 0); 3689fa9e4066Sahrens 3690fa9e4066Sahrens zio->io_flags |= ZIO_FLAG_IO_BYPASS; 3691b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 3692fa9e4066Sahrens } 3693fa9e4066Sahrens 3694*eb633035STom Caputi /* 3695*eb633035STom Caputi * ========================================================================== 3696*eb633035STom Caputi * Encrypt and store encryption parameters 3697*eb633035STom Caputi * ========================================================================== 3698*eb633035STom Caputi */ 3699*eb633035STom Caputi 3700*eb633035STom Caputi 3701*eb633035STom Caputi /* 3702*eb633035STom Caputi * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for 3703*eb633035STom Caputi * managing the storage of encryption parameters and passing them to the 3704*eb633035STom Caputi * lower-level encryption functions. 3705*eb633035STom Caputi */ 3706*eb633035STom Caputi static int 3707*eb633035STom Caputi zio_encrypt(zio_t *zio) 3708*eb633035STom Caputi { 3709*eb633035STom Caputi zio_prop_t *zp = &zio->io_prop; 3710*eb633035STom Caputi spa_t *spa = zio->io_spa; 3711*eb633035STom Caputi blkptr_t *bp = zio->io_bp; 3712*eb633035STom Caputi uint64_t psize = BP_GET_PSIZE(bp); 3713*eb633035STom Caputi uint64_t dsobj = zio->io_bookmark.zb_objset; 3714*eb633035STom Caputi dmu_object_type_t ot = BP_GET_TYPE(bp); 3715*eb633035STom Caputi void *enc_buf = NULL; 3716*eb633035STom Caputi abd_t *eabd = NULL; 3717*eb633035STom Caputi uint8_t salt[ZIO_DATA_SALT_LEN]; 3718*eb633035STom Caputi uint8_t iv[ZIO_DATA_IV_LEN]; 3719*eb633035STom Caputi uint8_t mac[ZIO_DATA_MAC_LEN]; 3720*eb633035STom Caputi boolean_t no_crypt = B_FALSE; 3721*eb633035STom Caputi 3722*eb633035STom Caputi /* the root zio already encrypted the data */ 3723*eb633035STom Caputi if (zio->io_child_type == ZIO_CHILD_GANG) 3724*eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3725*eb633035STom Caputi 3726*eb633035STom Caputi /* only ZIL blocks are re-encrypted on rewrite */ 3727*eb633035STom Caputi if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG) 3728*eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3729*eb633035STom Caputi 3730*eb633035STom Caputi if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) { 3731*eb633035STom Caputi BP_SET_CRYPT(bp, B_FALSE); 3732*eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3733*eb633035STom Caputi } 3734*eb633035STom Caputi 3735*eb633035STom Caputi /* if we are doing raw encryption set the provided encryption params */ 3736*eb633035STom Caputi if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) { 3737*eb633035STom Caputi ASSERT0(BP_GET_LEVEL(bp)); 3738*eb633035STom Caputi BP_SET_CRYPT(bp, B_TRUE); 3739*eb633035STom Caputi BP_SET_BYTEORDER(bp, zp->zp_byteorder); 3740*eb633035STom Caputi if (ot != DMU_OT_OBJSET) 3741*eb633035STom Caputi zio_crypt_encode_mac_bp(bp, zp->zp_mac); 3742*eb633035STom Caputi 3743*eb633035STom Caputi /* dnode blocks must be written out in the provided byteorder */ 3744*eb633035STom Caputi if (zp->zp_byteorder != ZFS_HOST_BYTEORDER && 3745*eb633035STom Caputi ot == DMU_OT_DNODE) { 3746*eb633035STom Caputi void *bswap_buf = zio_buf_alloc(psize); 3747*eb633035STom Caputi abd_t *babd = abd_get_from_buf(bswap_buf, psize); 3748*eb633035STom Caputi 3749*eb633035STom Caputi ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 3750*eb633035STom Caputi abd_copy_to_buf(bswap_buf, zio->io_abd, psize); 3751*eb633035STom Caputi dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf, 3752*eb633035STom Caputi psize); 3753*eb633035STom Caputi 3754*eb633035STom Caputi abd_take_ownership_of_buf(babd, B_TRUE); 3755*eb633035STom Caputi zio_push_transform(zio, babd, psize, psize, NULL); 3756*eb633035STom Caputi } 3757*eb633035STom Caputi 3758*eb633035STom Caputi if (DMU_OT_IS_ENCRYPTED(ot)) 3759*eb633035STom Caputi zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv); 3760*eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3761*eb633035STom Caputi } 3762*eb633035STom Caputi 3763*eb633035STom Caputi /* indirect blocks only maintain a cksum of the lower level MACs */ 3764*eb633035STom Caputi if (BP_GET_LEVEL(bp) > 0) { 3765*eb633035STom Caputi BP_SET_CRYPT(bp, B_TRUE); 3766*eb633035STom Caputi VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE, 3767*eb633035STom Caputi zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp), 3768*eb633035STom Caputi mac)); 3769*eb633035STom Caputi zio_crypt_encode_mac_bp(bp, mac); 3770*eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3771*eb633035STom Caputi } 3772*eb633035STom Caputi 3773*eb633035STom Caputi /* 3774*eb633035STom Caputi * Objset blocks are a special case since they have 2 256-bit MACs 3775*eb633035STom Caputi * embedded within them. 3776*eb633035STom Caputi */ 3777*eb633035STom Caputi if (ot == DMU_OT_OBJSET) { 3778*eb633035STom Caputi ASSERT0(DMU_OT_IS_ENCRYPTED(ot)); 3779*eb633035STom Caputi ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 3780*eb633035STom Caputi BP_SET_CRYPT(bp, B_TRUE); 3781*eb633035STom Caputi VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj, 3782*eb633035STom Caputi zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp))); 3783*eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3784*eb633035STom Caputi } 3785*eb633035STom Caputi 3786*eb633035STom Caputi /* unencrypted object types are only authenticated with a MAC */ 3787*eb633035STom Caputi if (!DMU_OT_IS_ENCRYPTED(ot)) { 3788*eb633035STom Caputi BP_SET_CRYPT(bp, B_TRUE); 3789*eb633035STom Caputi VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj, 3790*eb633035STom Caputi zio->io_abd, psize, mac)); 3791*eb633035STom Caputi zio_crypt_encode_mac_bp(bp, mac); 3792*eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3793*eb633035STom Caputi } 3794*eb633035STom Caputi 3795*eb633035STom Caputi /* 3796*eb633035STom Caputi * Later passes of sync-to-convergence may decide to rewrite data 3797*eb633035STom Caputi * in place to avoid more disk reallocations. This presents a problem 3798*eb633035STom Caputi * for encryption because this consitutes rewriting the new data with 3799*eb633035STom Caputi * the same encryption key and IV. However, this only applies to blocks 3800*eb633035STom Caputi * in the MOS (particularly the spacemaps) and we do not encrypt the 3801*eb633035STom Caputi * MOS. We assert that the zio is allocating or an intent log write 3802*eb633035STom Caputi * to enforce this. 3803*eb633035STom Caputi */ 3804*eb633035STom Caputi ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG); 3805*eb633035STom Caputi ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG); 3806*eb633035STom Caputi ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION)); 3807*eb633035STom Caputi ASSERT3U(psize, !=, 0); 3808*eb633035STom Caputi 3809*eb633035STom Caputi enc_buf = zio_buf_alloc(psize); 3810*eb633035STom Caputi eabd = abd_get_from_buf(enc_buf, psize); 3811*eb633035STom Caputi abd_take_ownership_of_buf(eabd, B_TRUE); 3812*eb633035STom Caputi 3813*eb633035STom Caputi /* 3814*eb633035STom Caputi * For an explanation of what encryption parameters are stored 3815*eb633035STom Caputi * where, see the block comment in zio_crypt.c. 3816*eb633035STom Caputi */ 3817*eb633035STom Caputi if (ot == DMU_OT_INTENT_LOG) { 3818*eb633035STom Caputi zio_crypt_decode_params_bp(bp, salt, iv); 3819*eb633035STom Caputi } else { 3820*eb633035STom Caputi BP_SET_CRYPT(bp, B_TRUE); 3821*eb633035STom Caputi } 3822*eb633035STom Caputi 3823*eb633035STom Caputi /* Perform the encryption. This should not fail */ 3824*eb633035STom Caputi VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark, 3825*eb633035STom Caputi BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), 3826*eb633035STom Caputi salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt)); 3827*eb633035STom Caputi 3828*eb633035STom Caputi /* encode encryption metadata into the bp */ 3829*eb633035STom Caputi if (ot == DMU_OT_INTENT_LOG) { 3830*eb633035STom Caputi /* 3831*eb633035STom Caputi * ZIL blocks store the MAC in the embedded checksum, so the 3832*eb633035STom Caputi * transform must always be applied. 3833*eb633035STom Caputi */ 3834*eb633035STom Caputi zio_crypt_encode_mac_zil(enc_buf, mac); 3835*eb633035STom Caputi zio_push_transform(zio, eabd, psize, psize, NULL); 3836*eb633035STom Caputi } else { 3837*eb633035STom Caputi BP_SET_CRYPT(bp, B_TRUE); 3838*eb633035STom Caputi zio_crypt_encode_params_bp(bp, salt, iv); 3839*eb633035STom Caputi zio_crypt_encode_mac_bp(bp, mac); 3840*eb633035STom Caputi 3841*eb633035STom Caputi if (no_crypt) { 3842*eb633035STom Caputi ASSERT3U(ot, ==, DMU_OT_DNODE); 3843*eb633035STom Caputi abd_free(eabd); 3844*eb633035STom Caputi } else { 3845*eb633035STom Caputi zio_push_transform(zio, eabd, psize, psize, NULL); 3846*eb633035STom Caputi } 3847*eb633035STom Caputi } 3848*eb633035STom Caputi 3849*eb633035STom Caputi return (ZIO_PIPELINE_CONTINUE); 3850*eb633035STom Caputi } 3851*eb633035STom Caputi 3852fa9e4066Sahrens /* 3853fa9e4066Sahrens * ========================================================================== 3854fa9e4066Sahrens * Generate and verify checksums 3855fa9e4066Sahrens * ========================================================================== 3856fa9e4066Sahrens */ 3857e05725b1Sbonwick static int 3858fa9e4066Sahrens zio_checksum_generate(zio_t *zio) 3859fa9e4066Sahrens { 3860fa9e4066Sahrens blkptr_t *bp = zio->io_bp; 3861e14bb325SJeff Bonwick enum zio_checksum checksum; 3862fa9e4066Sahrens 3863e14bb325SJeff Bonwick if (bp == NULL) { 3864e14bb325SJeff Bonwick /* 3865e14bb325SJeff Bonwick * This is zio_write_phys(). 3866e14bb325SJeff Bonwick * We're either generating a label checksum, or none at all. 3867e14bb325SJeff Bonwick */ 3868e14bb325SJeff Bonwick checksum = zio->io_prop.zp_checksum; 3869e14bb325SJeff Bonwick 3870e14bb325SJeff Bonwick if (checksum == ZIO_CHECKSUM_OFF) 3871e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 3872fa9e4066Sahrens 3873e14bb325SJeff Bonwick ASSERT(checksum == ZIO_CHECKSUM_LABEL); 3874e14bb325SJeff Bonwick } else { 3875e14bb325SJeff Bonwick if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 3876e14bb325SJeff Bonwick ASSERT(!IO_IS_ALLOCATING(zio)); 3877e14bb325SJeff Bonwick checksum = ZIO_CHECKSUM_GANG_HEADER; 3878e14bb325SJeff Bonwick } else { 3879e14bb325SJeff Bonwick checksum = BP_GET_CHECKSUM(bp); 3880e14bb325SJeff Bonwick } 3881e14bb325SJeff Bonwick } 3882fa9e4066Sahrens 3883770499e1SDan Kimmel zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size); 3884fa9e4066Sahrens 3885e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3886fa9e4066Sahrens } 3887fa9e4066Sahrens 3888e05725b1Sbonwick static int 3889e14bb325SJeff Bonwick zio_checksum_verify(zio_t *zio) 3890fa9e4066Sahrens { 389122fe2c88SJonathan Adams zio_bad_cksum_t info; 3892e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 3893e14bb325SJeff Bonwick int error; 3894fa9e4066Sahrens 3895b24ab676SJeff Bonwick ASSERT(zio->io_vd != NULL); 3896b24ab676SJeff Bonwick 3897e14bb325SJeff Bonwick if (bp == NULL) { 3898e14bb325SJeff Bonwick /* 3899e14bb325SJeff Bonwick * This is zio_read_phys(). 3900e14bb325SJeff Bonwick * We're either verifying a label checksum, or nothing at all. 3901e14bb325SJeff Bonwick */ 3902e14bb325SJeff Bonwick if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 3903e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 3904fa9e4066Sahrens 3905e14bb325SJeff Bonwick ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 3906e14bb325SJeff Bonwick } 3907fa9e4066Sahrens 390822fe2c88SJonathan Adams if ((error = zio_checksum_error(zio, &info)) != 0) { 3909e14bb325SJeff Bonwick zio->io_error = error; 3910373dc1cfSMatthew Ahrens if (error == ECKSUM && 3911373dc1cfSMatthew Ahrens !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 391222fe2c88SJonathan Adams zfs_ereport_start_checksum(zio->io_spa, 3913*eb633035STom Caputi zio->io_vd, &zio->io_bookmark, zio, 3914*eb633035STom Caputi zio->io_offset, zio->io_size, NULL, &info); 3915e14bb325SJeff Bonwick } 3916fa9e4066Sahrens } 3917fa9e4066Sahrens 3918e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3919fa9e4066Sahrens } 3920fa9e4066Sahrens 3921fa9e4066Sahrens /* 3922fa9e4066Sahrens * Called by RAID-Z to ensure we don't compute the checksum twice. 3923fa9e4066Sahrens */ 3924fa9e4066Sahrens void 3925fa9e4066Sahrens zio_checksum_verified(zio_t *zio) 3926fa9e4066Sahrens { 3927b24ab676SJeff Bonwick zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 3928fa9e4066Sahrens } 3929fa9e4066Sahrens 3930fa9e4066Sahrens /* 3931e14bb325SJeff Bonwick * ========================================================================== 3932e14bb325SJeff Bonwick * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 39335d7b4d43SMatthew Ahrens * An error of 0 indicates success. ENXIO indicates whole-device failure, 3934e14bb325SJeff Bonwick * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 3935e14bb325SJeff Bonwick * indicate errors that are specific to one I/O, and most likely permanent. 3936e14bb325SJeff Bonwick * Any other error is presumed to be worse because we weren't expecting it. 3937e14bb325SJeff Bonwick * ========================================================================== 3938fa9e4066Sahrens */ 3939e14bb325SJeff Bonwick int 3940e14bb325SJeff Bonwick zio_worst_error(int e1, int e2) 3941fa9e4066Sahrens { 3942e14bb325SJeff Bonwick static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 3943e14bb325SJeff Bonwick int r1, r2; 3944e14bb325SJeff Bonwick 3945e14bb325SJeff Bonwick for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 3946e14bb325SJeff Bonwick if (e1 == zio_error_rank[r1]) 3947e14bb325SJeff Bonwick break; 3948e14bb325SJeff Bonwick 3949e14bb325SJeff Bonwick for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 3950e14bb325SJeff Bonwick if (e2 == zio_error_rank[r2]) 3951e14bb325SJeff Bonwick break; 395244cd46caSbillm 3953e14bb325SJeff Bonwick return (r1 > r2 ? e1 : e2); 3954fa9e4066Sahrens } 3955fa9e4066Sahrens 3956fa9e4066Sahrens /* 3957fa9e4066Sahrens * ========================================================================== 3958e14bb325SJeff Bonwick * I/O completion 3959fa9e4066Sahrens * ========================================================================== 3960fa9e4066Sahrens */ 3961e14bb325SJeff Bonwick static int 3962e14bb325SJeff Bonwick zio_ready(zio_t *zio) 3963fa9e4066Sahrens { 3964e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 3965a3f829aeSBill Moore zio_t *pio, *pio_next; 39660f7643c7SGeorge Wilson zio_link_t *zl = NULL; 3967fa9e4066Sahrens 3968d6e1c446SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, 3969d6e1c446SGeorge Wilson ZIO_WAIT_READY)) { 3970f5383399SBill Moore return (ZIO_PIPELINE_STOP); 3971d6e1c446SGeorge Wilson } 3972fa9e4066Sahrens 3973f5383399SBill Moore if (zio->io_ready) { 3974e14bb325SJeff Bonwick ASSERT(IO_IS_ALLOCATING(zio)); 397580901aeaSGeorge Wilson ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 397680901aeaSGeorge Wilson (zio->io_flags & ZIO_FLAG_NOPWRITE)); 3977e14bb325SJeff Bonwick ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 3978fa9e4066Sahrens 3979e14bb325SJeff Bonwick zio->io_ready(zio); 3980e14bb325SJeff Bonwick } 3981fa9e4066Sahrens 3982e14bb325SJeff Bonwick if (bp != NULL && bp != &zio->io_bp_copy) 3983e14bb325SJeff Bonwick zio->io_bp_copy = *bp; 3984fa9e4066Sahrens 39850f7643c7SGeorge Wilson if (zio->io_error != 0) { 3986e14bb325SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3987fa9e4066Sahrens 39880f7643c7SGeorge Wilson if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 39890f7643c7SGeorge Wilson ASSERT(IO_IS_ALLOCATING(zio)); 39900f7643c7SGeorge Wilson ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 3991663207adSDon Brady ASSERT(zio->io_metaslab_class != NULL); 3992663207adSDon Brady 39930f7643c7SGeorge Wilson /* 39940f7643c7SGeorge Wilson * We were unable to allocate anything, unreserve and 39950f7643c7SGeorge Wilson * issue the next I/O to allocate. 39960f7643c7SGeorge Wilson */ 39970f7643c7SGeorge Wilson metaslab_class_throttle_unreserve( 3998663207adSDon Brady zio->io_metaslab_class, zio->io_prop.zp_copies, 3999663207adSDon Brady zio->io_allocator, zio); 4000f78cdc34SPaul Dagnelie zio_allocate_dispatch(zio->io_spa, zio->io_allocator); 40010f7643c7SGeorge Wilson } 40020f7643c7SGeorge Wilson } 40030f7643c7SGeorge Wilson 4004a3f829aeSBill Moore mutex_enter(&zio->io_lock); 4005a3f829aeSBill Moore zio->io_state[ZIO_WAIT_READY] = 1; 40060f7643c7SGeorge Wilson pio = zio_walk_parents(zio, &zl); 4007a3f829aeSBill Moore mutex_exit(&zio->io_lock); 4008a3f829aeSBill Moore 4009a3f829aeSBill Moore /* 4010a3f829aeSBill Moore * As we notify zio's parents, new parents could be added. 4011a3f829aeSBill Moore * New parents go to the head of zio's io_parent_list, however, 4012a3f829aeSBill Moore * so we will (correctly) not notify them. The remainder of zio's 4013a3f829aeSBill Moore * io_parent_list, from 'pio_next' onward, cannot change because 4014a3f829aeSBill Moore * all parents must wait for us to be done before they can be done. 4015a3f829aeSBill Moore */ 4016a3f829aeSBill Moore for (; pio != NULL; pio = pio_next) { 40170f7643c7SGeorge Wilson pio_next = zio_walk_parents(zio, &zl); 4018e14bb325SJeff Bonwick zio_notify_parent(pio, zio, ZIO_WAIT_READY); 4019a3f829aeSBill Moore } 4020fa9e4066Sahrens 4021b24ab676SJeff Bonwick if (zio->io_flags & ZIO_FLAG_NODATA) { 4022b24ab676SJeff Bonwick if (BP_IS_GANG(bp)) { 4023b24ab676SJeff Bonwick zio->io_flags &= ~ZIO_FLAG_NODATA; 4024b24ab676SJeff Bonwick } else { 4025770499e1SDan Kimmel ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE); 4026b24ab676SJeff Bonwick zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 4027b24ab676SJeff Bonwick } 4028b24ab676SJeff Bonwick } 4029b24ab676SJeff Bonwick 4030a33cae98STim Haley if (zio_injection_enabled && 4031a33cae98STim Haley zio->io_spa->spa_syncing_txg == zio->io_txg) 4032a33cae98STim Haley zio_handle_ignored_writes(zio); 4033a33cae98STim Haley 4034e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 4035fa9e4066Sahrens } 4036fa9e4066Sahrens 40370f7643c7SGeorge Wilson /* 40380f7643c7SGeorge Wilson * Update the allocation throttle accounting. 40390f7643c7SGeorge Wilson */ 40400f7643c7SGeorge Wilson static void 40410f7643c7SGeorge Wilson zio_dva_throttle_done(zio_t *zio) 40420f7643c7SGeorge Wilson { 40430f7643c7SGeorge Wilson zio_t *lio = zio->io_logical; 40440f7643c7SGeorge Wilson zio_t *pio = zio_unique_parent(zio); 40450f7643c7SGeorge Wilson vdev_t *vd = zio->io_vd; 40460f7643c7SGeorge Wilson int flags = METASLAB_ASYNC_ALLOC; 40470f7643c7SGeorge Wilson 40480f7643c7SGeorge Wilson ASSERT3P(zio->io_bp, !=, NULL); 40490f7643c7SGeorge Wilson ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 40500f7643c7SGeorge Wilson ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE); 40510f7643c7SGeorge Wilson ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 40520f7643c7SGeorge Wilson ASSERT(vd != NULL); 40530f7643c7SGeorge Wilson ASSERT3P(vd, ==, vd->vdev_top); 40540f7643c7SGeorge Wilson ASSERT(!(zio->io_flags & (ZIO_FLAG_IO_REPAIR | ZIO_FLAG_IO_RETRY))); 40550f7643c7SGeorge Wilson ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); 40560f7643c7SGeorge Wilson ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE)); 40570f7643c7SGeorge Wilson ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA)); 40580f7643c7SGeorge Wilson 40590f7643c7SGeorge Wilson /* 40600f7643c7SGeorge Wilson * Parents of gang children can have two flavors -- ones that 40610f7643c7SGeorge Wilson * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set) 40620f7643c7SGeorge Wilson * and ones that allocated the constituent blocks. The allocation 40630f7643c7SGeorge Wilson * throttle needs to know the allocating parent zio so we must find 40640f7643c7SGeorge Wilson * it here. 40650f7643c7SGeorge Wilson */ 40660f7643c7SGeorge Wilson if (pio->io_child_type == ZIO_CHILD_GANG) { 40670f7643c7SGeorge Wilson /* 40680f7643c7SGeorge Wilson * If our parent is a rewrite gang child then our grandparent 40690f7643c7SGeorge Wilson * would have been the one that performed the allocation. 40700f7643c7SGeorge Wilson */ 40710f7643c7SGeorge Wilson if (pio->io_flags & ZIO_FLAG_IO_REWRITE) 40720f7643c7SGeorge Wilson pio = zio_unique_parent(pio); 40730f7643c7SGeorge Wilson flags |= METASLAB_GANG_CHILD; 40740f7643c7SGeorge Wilson } 40750f7643c7SGeorge Wilson 40760f7643c7SGeorge Wilson ASSERT(IO_IS_ALLOCATING(pio)); 40770f7643c7SGeorge Wilson ASSERT3P(zio, !=, zio->io_logical); 40780f7643c7SGeorge Wilson ASSERT(zio->io_logical != NULL); 40790f7643c7SGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 40800f7643c7SGeorge Wilson ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE); 4081663207adSDon Brady ASSERT(zio->io_metaslab_class != NULL); 40820f7643c7SGeorge Wilson 40830f7643c7SGeorge Wilson mutex_enter(&pio->io_lock); 4084f78cdc34SPaul Dagnelie metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags, 4085f78cdc34SPaul Dagnelie pio->io_allocator, B_TRUE); 40860f7643c7SGeorge Wilson mutex_exit(&pio->io_lock); 40870f7643c7SGeorge Wilson 4088663207adSDon Brady metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1, 4089663207adSDon Brady pio->io_allocator, pio); 40900f7643c7SGeorge Wilson 40910f7643c7SGeorge Wilson /* 40920f7643c7SGeorge Wilson * Call into the pipeline to see if there is more work that 40930f7643c7SGeorge Wilson * needs to be done. If there is work to be done it will be 40940f7643c7SGeorge Wilson * dispatched to another taskq thread. 40950f7643c7SGeorge Wilson */ 4096f78cdc34SPaul Dagnelie zio_allocate_dispatch(zio->io_spa, pio->io_allocator); 40970f7643c7SGeorge Wilson } 40980f7643c7SGeorge Wilson 4099e14bb325SJeff Bonwick static int 4100e14bb325SJeff Bonwick zio_done(zio_t *zio) 4101d63d470bSgw { 4102e14bb325SJeff Bonwick spa_t *spa = zio->io_spa; 4103e14bb325SJeff Bonwick zio_t *lio = zio->io_logical; 4104e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 4105e14bb325SJeff Bonwick vdev_t *vd = zio->io_vd; 4106e14bb325SJeff Bonwick uint64_t psize = zio->io_size; 4107a3f829aeSBill Moore zio_t *pio, *pio_next; 41080f7643c7SGeorge Wilson zio_link_t *zl = NULL; 4109d63d470bSgw 4110e14bb325SJeff Bonwick /* 4111f5383399SBill Moore * If our children haven't all completed, 4112e14bb325SJeff Bonwick * wait for them and then repeat this pipeline stage. 4113e14bb325SJeff Bonwick */ 4114d6e1c446SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) { 4115e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 4116d6e1c446SGeorge Wilson } 4117d63d470bSgw 41180f7643c7SGeorge Wilson /* 41190f7643c7SGeorge Wilson * If the allocation throttle is enabled, then update the accounting. 41200f7643c7SGeorge Wilson * We only track child I/Os that are part of an allocating async 41210f7643c7SGeorge Wilson * write. We must do this since the allocation is performed 41220f7643c7SGeorge Wilson * by the logical I/O but the actual write is done by child I/Os. 41230f7643c7SGeorge Wilson */ 41240f7643c7SGeorge Wilson if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && 41250f7643c7SGeorge Wilson zio->io_child_type == ZIO_CHILD_VDEV) { 4126663207adSDon Brady ASSERT(zio->io_metaslab_class != NULL); 4127663207adSDon Brady ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled); 41280f7643c7SGeorge Wilson zio_dva_throttle_done(zio); 41290f7643c7SGeorge Wilson } 41300f7643c7SGeorge Wilson 41310f7643c7SGeorge Wilson /* 41320f7643c7SGeorge Wilson * If the allocation throttle is enabled, verify that 41330f7643c7SGeorge Wilson * we have decremented the refcounts for every I/O that was throttled. 41340f7643c7SGeorge Wilson */ 41350f7643c7SGeorge Wilson if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 41360f7643c7SGeorge Wilson ASSERT(zio->io_type == ZIO_TYPE_WRITE); 41370f7643c7SGeorge Wilson ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 41380f7643c7SGeorge Wilson ASSERT(bp != NULL); 4139663207adSDon Brady 4140f78cdc34SPaul Dagnelie metaslab_group_alloc_verify(spa, zio->io_bp, zio, 4141f78cdc34SPaul Dagnelie zio->io_allocator); 4142e914ace2STim Schumacher VERIFY(zfs_refcount_not_held( 4143663207adSDon Brady &zio->io_metaslab_class->mc_alloc_slots[zio->io_allocator], 4144663207adSDon Brady zio)); 41450f7643c7SGeorge Wilson } 41460f7643c7SGeorge Wilson 4147e14bb325SJeff Bonwick for (int c = 0; c < ZIO_CHILD_TYPES; c++) 4148e14bb325SJeff Bonwick for (int w = 0; w < ZIO_WAIT_TYPES; w++) 4149e14bb325SJeff Bonwick ASSERT(zio->io_children[c][w] == 0); 4150e14bb325SJeff Bonwick 41515d7b4d43SMatthew Ahrens if (bp != NULL && !BP_IS_EMBEDDED(bp)) { 4152e14bb325SJeff Bonwick ASSERT(bp->blk_pad[0] == 0); 4153e14bb325SJeff Bonwick ASSERT(bp->blk_pad[1] == 0); 4154e14bb325SJeff Bonwick ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 4155a3f829aeSBill Moore (bp == zio_unique_parent(zio)->io_bp)); 4156e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 4157b24ab676SJeff Bonwick zio->io_bp_override == NULL && 4158e14bb325SJeff Bonwick !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 4159b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 4160e14bb325SJeff Bonwick ASSERT(BP_COUNT_GANG(bp) == 0 || 4161e14bb325SJeff Bonwick (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 4162e14bb325SJeff Bonwick } 416380901aeaSGeorge Wilson if (zio->io_flags & ZIO_FLAG_NOPWRITE) 416480901aeaSGeorge Wilson VERIFY(BP_EQUAL(bp, &zio->io_bp_orig)); 4165e14bb325SJeff Bonwick } 4166fa9e4066Sahrens 4167e14bb325SJeff Bonwick /* 4168b24ab676SJeff Bonwick * If there were child vdev/gang/ddt errors, they apply to us now. 4169e14bb325SJeff Bonwick */ 4170e14bb325SJeff Bonwick zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 4171e14bb325SJeff Bonwick zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 4172b24ab676SJeff Bonwick zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 4173b24ab676SJeff Bonwick 4174b24ab676SJeff Bonwick /* 4175b24ab676SJeff Bonwick * If the I/O on the transformed data was successful, generate any 4176b24ab676SJeff Bonwick * checksum reports now while we still have the transformed data. 4177b24ab676SJeff Bonwick */ 4178b24ab676SJeff Bonwick if (zio->io_error == 0) { 4179b24ab676SJeff Bonwick while (zio->io_cksum_report != NULL) { 4180b24ab676SJeff Bonwick zio_cksum_report_t *zcr = zio->io_cksum_report; 4181b24ab676SJeff Bonwick uint64_t align = zcr->zcr_align; 4182b24ab676SJeff Bonwick uint64_t asize = P2ROUNDUP(psize, align); 4183770499e1SDan Kimmel abd_t *adata = zio->io_abd; 4184b24ab676SJeff Bonwick 4185b24ab676SJeff Bonwick if (asize != psize) { 4186*eb633035STom Caputi adata = abd_alloc(asize, B_TRUE); 4187770499e1SDan Kimmel abd_copy(adata, zio->io_abd, psize); 4188770499e1SDan Kimmel abd_zero_off(adata, psize, asize - psize); 4189b24ab676SJeff Bonwick } 4190b24ab676SJeff Bonwick 4191b24ab676SJeff Bonwick zio->io_cksum_report = zcr->zcr_next; 4192b24ab676SJeff Bonwick zcr->zcr_next = NULL; 4193*eb633035STom Caputi zcr->zcr_finish(zcr, adata); 4194b24ab676SJeff Bonwick zfs_ereport_free_checksum(zcr); 4195b24ab676SJeff Bonwick 4196b24ab676SJeff Bonwick if (asize != psize) 4197770499e1SDan Kimmel abd_free(adata); 4198b24ab676SJeff Bonwick } 4199b24ab676SJeff Bonwick } 4200e14bb325SJeff Bonwick 4201e14bb325SJeff Bonwick zio_pop_transforms(zio); /* note: may set zio->io_error */ 4202e14bb325SJeff Bonwick 4203e14bb325SJeff Bonwick vdev_stat_update(zio, psize); 4204e14bb325SJeff Bonwick 4205e14bb325SJeff Bonwick if (zio->io_error) { 4206e14bb325SJeff Bonwick /* 4207e14bb325SJeff Bonwick * If this I/O is attached to a particular vdev, 4208e14bb325SJeff Bonwick * generate an error message describing the I/O failure 4209e14bb325SJeff Bonwick * at the block level. We ignore these errors if the 4210e14bb325SJeff Bonwick * device is currently unavailable. 4211e14bb325SJeff Bonwick */ 4212e14bb325SJeff Bonwick if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 4213*eb633035STom Caputi zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, 4214*eb633035STom Caputi &zio->io_bookmark, zio, 0, 0); 4215e14bb325SJeff Bonwick 42168f18d1faSGeorge Wilson if ((zio->io_error == EIO || !(zio->io_flags & 42178f18d1faSGeorge Wilson (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 42188f18d1faSGeorge Wilson zio == lio) { 4219e14bb325SJeff Bonwick /* 4220e14bb325SJeff Bonwick * For logical I/O requests, tell the SPA to log the 4221e14bb325SJeff Bonwick * error and generate a logical data ereport. 4222e14bb325SJeff Bonwick */ 4223*eb633035STom Caputi spa_log_error(spa, &zio->io_bookmark); 4224*eb633035STom Caputi zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, 4225*eb633035STom Caputi &zio->io_bookmark, zio, 0, 0); 4226e14bb325SJeff Bonwick } 4227e14bb325SJeff Bonwick } 4228fa9e4066Sahrens 4229e14bb325SJeff Bonwick if (zio->io_error && zio == lio) { 4230e14bb325SJeff Bonwick /* 4231e14bb325SJeff Bonwick * Determine whether zio should be reexecuted. This will 4232e14bb325SJeff Bonwick * propagate all the way to the root via zio_notify_parent(). 4233e14bb325SJeff Bonwick */ 4234e14bb325SJeff Bonwick ASSERT(vd == NULL && bp != NULL); 4235b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 4236e14bb325SJeff Bonwick 4237b24ab676SJeff Bonwick if (IO_IS_ALLOCATING(zio) && 4238b24ab676SJeff Bonwick !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 4239e14bb325SJeff Bonwick if (zio->io_error != ENOSPC) 4240e14bb325SJeff Bonwick zio->io_reexecute |= ZIO_REEXECUTE_NOW; 4241e14bb325SJeff Bonwick else 4242e14bb325SJeff Bonwick zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4243b24ab676SJeff Bonwick } 4244e14bb325SJeff Bonwick 4245e14bb325SJeff Bonwick if ((zio->io_type == ZIO_TYPE_READ || 4246e14bb325SJeff Bonwick zio->io_type == ZIO_TYPE_FREE) && 424744ecc532SGeorge Wilson !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 4248e14bb325SJeff Bonwick zio->io_error == ENXIO && 4249b16da2e2SGeorge Wilson spa_load_state(spa) == SPA_LOAD_NONE && 4250e14bb325SJeff Bonwick spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 4251e14bb325SJeff Bonwick zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 4252e14bb325SJeff Bonwick 4253e14bb325SJeff Bonwick if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 4254e14bb325SJeff Bonwick zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 425522fe2c88SJonathan Adams 425622fe2c88SJonathan Adams /* 425722fe2c88SJonathan Adams * Here is a possibly good place to attempt to do 425822fe2c88SJonathan Adams * either combinatorial reconstruction or error correction 425922fe2c88SJonathan Adams * based on checksums. It also might be a good place 426022fe2c88SJonathan Adams * to send out preliminary ereports before we suspend 426122fe2c88SJonathan Adams * processing. 426222fe2c88SJonathan Adams */ 4263d63d470bSgw } 4264d63d470bSgw 426567bd71c6Sperrin /* 4266e14bb325SJeff Bonwick * If there were logical child errors, they apply to us now. 4267e14bb325SJeff Bonwick * We defer this until now to avoid conflating logical child 4268e14bb325SJeff Bonwick * errors with errors that happened to the zio itself when 4269e14bb325SJeff Bonwick * updating vdev stats and reporting FMA events above. 427067bd71c6Sperrin */ 4271e14bb325SJeff Bonwick zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 42728654d025Sperrin 4273b24ab676SJeff Bonwick if ((zio->io_error || zio->io_reexecute) && 4274b24ab676SJeff Bonwick IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 427580901aeaSGeorge Wilson !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 4276f5383399SBill Moore zio_dva_unallocate(zio, zio->io_gang_tree, bp); 4277f5383399SBill Moore 4278f5383399SBill Moore zio_gang_tree_free(&zio->io_gang_tree); 4279f5383399SBill Moore 428033a372edSGeorge Wilson /* 428133a372edSGeorge Wilson * Godfather I/Os should never suspend. 428233a372edSGeorge Wilson */ 428333a372edSGeorge Wilson if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 428433a372edSGeorge Wilson (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 428533a372edSGeorge Wilson zio->io_reexecute = 0; 428633a372edSGeorge Wilson 428733a372edSGeorge Wilson if (zio->io_reexecute) { 4288e14bb325SJeff Bonwick /* 4289e14bb325SJeff Bonwick * This is a logical I/O that wants to reexecute. 4290e14bb325SJeff Bonwick * 4291e14bb325SJeff Bonwick * Reexecute is top-down. When an i/o fails, if it's not 4292e14bb325SJeff Bonwick * the root, it simply notifies its parent and sticks around. 4293e14bb325SJeff Bonwick * The parent, seeing that it still has children in zio_done(), 4294e14bb325SJeff Bonwick * does the same. This percolates all the way up to the root. 4295e14bb325SJeff Bonwick * The root i/o will reexecute or suspend the entire tree. 4296e14bb325SJeff Bonwick * 4297e14bb325SJeff Bonwick * This approach ensures that zio_reexecute() honors 4298e14bb325SJeff Bonwick * all the original i/o dependency relationships, e.g. 4299e14bb325SJeff Bonwick * parents not executing until children are ready. 4300e14bb325SJeff Bonwick */ 4301e14bb325SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 4302fa9e4066Sahrens 4303f5383399SBill Moore zio->io_gang_leader = NULL; 4304e14bb325SJeff Bonwick 4305a3f829aeSBill Moore mutex_enter(&zio->io_lock); 4306a3f829aeSBill Moore zio->io_state[ZIO_WAIT_DONE] = 1; 4307a3f829aeSBill Moore mutex_exit(&zio->io_lock); 4308a3f829aeSBill Moore 430954d692b7SGeorge Wilson /* 431054d692b7SGeorge Wilson * "The Godfather" I/O monitors its children but is 431154d692b7SGeorge Wilson * not a true parent to them. It will track them through 431254d692b7SGeorge Wilson * the pipeline but severs its ties whenever they get into 431354d692b7SGeorge Wilson * trouble (e.g. suspended). This allows "The Godfather" 431454d692b7SGeorge Wilson * I/O to return status without blocking. 431554d692b7SGeorge Wilson */ 43160f7643c7SGeorge Wilson zl = NULL; 43170f7643c7SGeorge Wilson for (pio = zio_walk_parents(zio, &zl); pio != NULL; 43180f7643c7SGeorge Wilson pio = pio_next) { 43190f7643c7SGeorge Wilson zio_link_t *remove_zl = zl; 43200f7643c7SGeorge Wilson pio_next = zio_walk_parents(zio, &zl); 432154d692b7SGeorge Wilson 432254d692b7SGeorge Wilson if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 432354d692b7SGeorge Wilson (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 43240f7643c7SGeorge Wilson zio_remove_child(pio, zio, remove_zl); 432554d692b7SGeorge Wilson zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 432654d692b7SGeorge Wilson } 432754d692b7SGeorge Wilson } 432854d692b7SGeorge Wilson 4329a3f829aeSBill Moore if ((pio = zio_unique_parent(zio)) != NULL) { 4330e14bb325SJeff Bonwick /* 4331e14bb325SJeff Bonwick * We're not a root i/o, so there's nothing to do 4332e14bb325SJeff Bonwick * but notify our parent. Don't propagate errors 4333e14bb325SJeff Bonwick * upward since we haven't permanently failed yet. 4334e14bb325SJeff Bonwick */ 433533a372edSGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 4336e14bb325SJeff Bonwick zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 4337e14bb325SJeff Bonwick zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 4338e14bb325SJeff Bonwick } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 4339e14bb325SJeff Bonwick /* 4340e14bb325SJeff Bonwick * We'd fail again if we reexecuted now, so suspend 4341e14bb325SJeff Bonwick * until conditions improve (e.g. device comes online). 4342e14bb325SJeff Bonwick */ 4343e0f1c0afSOlaf Faaland zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR); 4344e14bb325SJeff Bonwick } else { 4345e14bb325SJeff Bonwick /* 4346e14bb325SJeff Bonwick * Reexecution is potentially a huge amount of work. 4347e14bb325SJeff Bonwick * Hand it off to the otherwise-unused claim taskq. 4348e14bb325SJeff Bonwick */ 43495aeb9474SGarrett D'Amore ASSERT(zio->io_tqent.tqent_next == NULL); 4350ec94d322SAdam Leventhal spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM, 4351ec94d322SAdam Leventhal ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio, 4352ec94d322SAdam Leventhal 0, &zio->io_tqent); 4353e14bb325SJeff Bonwick } 4354e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 4355fa9e4066Sahrens } 4356fa9e4066Sahrens 4357b24ab676SJeff Bonwick ASSERT(zio->io_child_count == 0); 435833a372edSGeorge Wilson ASSERT(zio->io_reexecute == 0); 4359e14bb325SJeff Bonwick ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 4360fa9e4066Sahrens 4361b24ab676SJeff Bonwick /* 4362b24ab676SJeff Bonwick * Report any checksum errors, since the I/O is complete. 4363b24ab676SJeff Bonwick */ 436422fe2c88SJonathan Adams while (zio->io_cksum_report != NULL) { 4365b24ab676SJeff Bonwick zio_cksum_report_t *zcr = zio->io_cksum_report; 4366b24ab676SJeff Bonwick zio->io_cksum_report = zcr->zcr_next; 4367b24ab676SJeff Bonwick zcr->zcr_next = NULL; 4368b24ab676SJeff Bonwick zcr->zcr_finish(zcr, NULL); 4369b24ab676SJeff Bonwick zfs_ereport_free_checksum(zcr); 437022fe2c88SJonathan Adams } 437122fe2c88SJonathan Adams 4372a3f829aeSBill Moore /* 4373a3f829aeSBill Moore * It is the responsibility of the done callback to ensure that this 4374a3f829aeSBill Moore * particular zio is no longer discoverable for adoption, and as 4375a3f829aeSBill Moore * such, cannot acquire any new parents. 4376a3f829aeSBill Moore */ 4377e14bb325SJeff Bonwick if (zio->io_done) 4378e14bb325SJeff Bonwick zio->io_done(zio); 4379fa9e4066Sahrens 4380a3f829aeSBill Moore mutex_enter(&zio->io_lock); 4381a3f829aeSBill Moore zio->io_state[ZIO_WAIT_DONE] = 1; 4382a3f829aeSBill Moore mutex_exit(&zio->io_lock); 4383fa9e4066Sahrens 43840f7643c7SGeorge Wilson zl = NULL; 43850f7643c7SGeorge Wilson for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) { 43860f7643c7SGeorge Wilson zio_link_t *remove_zl = zl; 43870f7643c7SGeorge Wilson pio_next = zio_walk_parents(zio, &zl); 43880f7643c7SGeorge Wilson zio_remove_child(pio, zio, remove_zl); 4389e14bb325SJeff Bonwick zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 4390e14bb325SJeff Bonwick } 4391fa9e4066Sahrens 4392e14bb325SJeff Bonwick if (zio->io_waiter != NULL) { 4393e14bb325SJeff Bonwick mutex_enter(&zio->io_lock); 4394e14bb325SJeff Bonwick zio->io_executor = NULL; 4395e14bb325SJeff Bonwick cv_broadcast(&zio->io_cv); 4396e14bb325SJeff Bonwick mutex_exit(&zio->io_lock); 4397e14bb325SJeff Bonwick } else { 4398e14bb325SJeff Bonwick zio_destroy(zio); 4399e14bb325SJeff Bonwick } 4400fa9e4066Sahrens 4401e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 4402fa9e4066Sahrens } 440346341222Sperrin 440446341222Sperrin /* 4405e14bb325SJeff Bonwick * ========================================================================== 4406e14bb325SJeff Bonwick * I/O pipeline definition 4407e14bb325SJeff Bonwick * ========================================================================== 440846341222Sperrin */ 4409b24ab676SJeff Bonwick static zio_pipe_stage_t *zio_pipeline[] = { 4410e14bb325SJeff Bonwick NULL, 4411e14bb325SJeff Bonwick zio_read_bp_init, 44120f7643c7SGeorge Wilson zio_write_bp_init, 4413b24ab676SJeff Bonwick zio_free_bp_init, 4414b24ab676SJeff Bonwick zio_issue_async, 44150f7643c7SGeorge Wilson zio_write_compress, 4416*eb633035STom Caputi zio_encrypt, 4417e14bb325SJeff Bonwick zio_checksum_generate, 441880901aeaSGeorge Wilson zio_nop_write, 4419b24ab676SJeff Bonwick zio_ddt_read_start, 4420b24ab676SJeff Bonwick zio_ddt_read_done, 4421b24ab676SJeff Bonwick zio_ddt_write, 4422b24ab676SJeff Bonwick zio_ddt_free, 4423e14bb325SJeff Bonwick zio_gang_assemble, 4424e14bb325SJeff Bonwick zio_gang_issue, 44250f7643c7SGeorge Wilson zio_dva_throttle, 4426e14bb325SJeff Bonwick zio_dva_allocate, 4427e14bb325SJeff Bonwick zio_dva_free, 4428e14bb325SJeff Bonwick zio_dva_claim, 4429e14bb325SJeff Bonwick zio_ready, 4430e14bb325SJeff Bonwick zio_vdev_io_start, 4431e14bb325SJeff Bonwick zio_vdev_io_done, 4432e14bb325SJeff Bonwick zio_vdev_io_assess, 4433e14bb325SJeff Bonwick zio_checksum_verify, 4434e14bb325SJeff Bonwick zio_done 4435e14bb325SJeff Bonwick }; 4436ad135b5dSChristopher Siden 4437ad135b5dSChristopher Siden 4438ad135b5dSChristopher Siden 4439ad135b5dSChristopher Siden 4440a2cdcdd2SPaul Dagnelie /* 4441a2cdcdd2SPaul Dagnelie * Compare two zbookmark_phys_t's to see which we would reach first in a 4442a2cdcdd2SPaul Dagnelie * pre-order traversal of the object tree. 4443a2cdcdd2SPaul Dagnelie * 4444a2cdcdd2SPaul Dagnelie * This is simple in every case aside from the meta-dnode object. For all other 4445a2cdcdd2SPaul Dagnelie * objects, we traverse them in order (object 1 before object 2, and so on). 4446a2cdcdd2SPaul Dagnelie * However, all of these objects are traversed while traversing object 0, since 4447a2cdcdd2SPaul Dagnelie * the data it points to is the list of objects. Thus, we need to convert to a 4448a2cdcdd2SPaul Dagnelie * canonical representation so we can compare meta-dnode bookmarks to 4449a2cdcdd2SPaul Dagnelie * non-meta-dnode bookmarks. 4450a2cdcdd2SPaul Dagnelie * 4451a2cdcdd2SPaul Dagnelie * We do this by calculating "equivalents" for each field of the zbookmark. 4452a2cdcdd2SPaul Dagnelie * zbookmarks outside of the meta-dnode use their own object and level, and 4453a2cdcdd2SPaul Dagnelie * calculate the level 0 equivalent (the first L0 blkid that is contained in the 4454a2cdcdd2SPaul Dagnelie * blocks this bookmark refers to) by multiplying their blkid by their span 4455a2cdcdd2SPaul Dagnelie * (the number of L0 blocks contained within one block at their level). 4456a2cdcdd2SPaul Dagnelie * zbookmarks inside the meta-dnode calculate their object equivalent 4457a2cdcdd2SPaul Dagnelie * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use 4458a2cdcdd2SPaul Dagnelie * level + 1<<31 (any value larger than a level could ever be) for their level. 4459a2cdcdd2SPaul Dagnelie * This causes them to always compare before a bookmark in their object 4460a2cdcdd2SPaul Dagnelie * equivalent, compare appropriately to bookmarks in other objects, and to 4461a2cdcdd2SPaul Dagnelie * compare appropriately to other bookmarks in the meta-dnode. 4462a2cdcdd2SPaul Dagnelie */ 4463a2cdcdd2SPaul Dagnelie int 4464a2cdcdd2SPaul Dagnelie zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, 4465a2cdcdd2SPaul Dagnelie const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) 4466a2cdcdd2SPaul Dagnelie { 4467a2cdcdd2SPaul Dagnelie /* 4468a2cdcdd2SPaul Dagnelie * These variables represent the "equivalent" values for the zbookmark, 4469a2cdcdd2SPaul Dagnelie * after converting zbookmarks inside the meta dnode to their 4470a2cdcdd2SPaul Dagnelie * normal-object equivalents. 4471a2cdcdd2SPaul Dagnelie */ 4472a2cdcdd2SPaul Dagnelie uint64_t zb1obj, zb2obj; 4473a2cdcdd2SPaul Dagnelie uint64_t zb1L0, zb2L0; 4474a2cdcdd2SPaul Dagnelie uint64_t zb1level, zb2level; 4475ad135b5dSChristopher Siden 4476a2cdcdd2SPaul Dagnelie if (zb1->zb_object == zb2->zb_object && 4477a2cdcdd2SPaul Dagnelie zb1->zb_level == zb2->zb_level && 4478a2cdcdd2SPaul Dagnelie zb1->zb_blkid == zb2->zb_blkid) 4479a2cdcdd2SPaul Dagnelie return (0); 4480a2cdcdd2SPaul Dagnelie 4481a2cdcdd2SPaul Dagnelie /* 4482a2cdcdd2SPaul Dagnelie * BP_SPANB calculates the span in blocks. 4483a2cdcdd2SPaul Dagnelie */ 4484a2cdcdd2SPaul Dagnelie zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); 4485a2cdcdd2SPaul Dagnelie zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); 4486ad135b5dSChristopher Siden 4487ad135b5dSChristopher Siden if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 4488a2cdcdd2SPaul Dagnelie zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 4489a2cdcdd2SPaul Dagnelie zb1L0 = 0; 4490a2cdcdd2SPaul Dagnelie zb1level = zb1->zb_level + COMPARE_META_LEVEL; 4491a2cdcdd2SPaul Dagnelie } else { 4492a2cdcdd2SPaul Dagnelie zb1obj = zb1->zb_object; 4493a2cdcdd2SPaul Dagnelie zb1level = zb1->zb_level; 4494ad135b5dSChristopher Siden } 4495ad135b5dSChristopher Siden 4496a2cdcdd2SPaul Dagnelie if (zb2->zb_object == DMU_META_DNODE_OBJECT) { 4497a2cdcdd2SPaul Dagnelie zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 4498a2cdcdd2SPaul Dagnelie zb2L0 = 0; 4499a2cdcdd2SPaul Dagnelie zb2level = zb2->zb_level + COMPARE_META_LEVEL; 4500a2cdcdd2SPaul Dagnelie } else { 4501a2cdcdd2SPaul Dagnelie zb2obj = zb2->zb_object; 4502a2cdcdd2SPaul Dagnelie zb2level = zb2->zb_level; 4503a2cdcdd2SPaul Dagnelie } 4504a2cdcdd2SPaul Dagnelie 4505a2cdcdd2SPaul Dagnelie /* Now that we have a canonical representation, do the comparison. */ 4506a2cdcdd2SPaul Dagnelie if (zb1obj != zb2obj) 4507a2cdcdd2SPaul Dagnelie return (zb1obj < zb2obj ? -1 : 1); 4508a2cdcdd2SPaul Dagnelie else if (zb1L0 != zb2L0) 4509a2cdcdd2SPaul Dagnelie return (zb1L0 < zb2L0 ? -1 : 1); 4510a2cdcdd2SPaul Dagnelie else if (zb1level != zb2level) 4511a2cdcdd2SPaul Dagnelie return (zb1level > zb2level ? -1 : 1); 4512a2cdcdd2SPaul Dagnelie /* 4513a2cdcdd2SPaul Dagnelie * This can (theoretically) happen if the bookmarks have the same object 4514a2cdcdd2SPaul Dagnelie * and level, but different blkids, if the block sizes are not the same. 4515a2cdcdd2SPaul Dagnelie * There is presently no way to change the indirect block sizes 4516a2cdcdd2SPaul Dagnelie */ 4517a2cdcdd2SPaul Dagnelie return (0); 4518a2cdcdd2SPaul Dagnelie } 4519a2cdcdd2SPaul Dagnelie 4520a2cdcdd2SPaul Dagnelie /* 4521a2cdcdd2SPaul Dagnelie * This function checks the following: given that last_block is the place that 4522a2cdcdd2SPaul Dagnelie * our traversal stopped last time, does that guarantee that we've visited 4523a2cdcdd2SPaul Dagnelie * every node under subtree_root? Therefore, we can't just use the raw output 4524a2cdcdd2SPaul Dagnelie * of zbookmark_compare. We have to pass in a modified version of 4525a2cdcdd2SPaul Dagnelie * subtree_root; by incrementing the block id, and then checking whether 4526a2cdcdd2SPaul Dagnelie * last_block is before or equal to that, we can tell whether or not having 4527a2cdcdd2SPaul Dagnelie * visited last_block implies that all of subtree_root's children have been 4528a2cdcdd2SPaul Dagnelie * visited. 4529a2cdcdd2SPaul Dagnelie */ 4530a2cdcdd2SPaul Dagnelie boolean_t 4531a2cdcdd2SPaul Dagnelie zbookmark_subtree_completed(const dnode_phys_t *dnp, 4532a2cdcdd2SPaul Dagnelie const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 4533a2cdcdd2SPaul Dagnelie { 4534a2cdcdd2SPaul Dagnelie zbookmark_phys_t mod_zb = *subtree_root; 4535a2cdcdd2SPaul Dagnelie mod_zb.zb_blkid++; 4536a2cdcdd2SPaul Dagnelie ASSERT(last_block->zb_level == 0); 4537a2cdcdd2SPaul Dagnelie 4538a2cdcdd2SPaul Dagnelie /* The objset_phys_t isn't before anything. */ 4539a2cdcdd2SPaul Dagnelie if (dnp == NULL) 4540ad135b5dSChristopher Siden return (B_FALSE); 4541a2cdcdd2SPaul Dagnelie 4542a2cdcdd2SPaul Dagnelie /* 4543a2cdcdd2SPaul Dagnelie * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the 4544a2cdcdd2SPaul Dagnelie * data block size in sectors, because that variable is only used if 4545a2cdcdd2SPaul Dagnelie * the bookmark refers to a block in the meta-dnode. Since we don't 4546a2cdcdd2SPaul Dagnelie * know without examining it what object it refers to, and there's no 4547a2cdcdd2SPaul Dagnelie * harm in passing in this value in other cases, we always pass it in. 4548a2cdcdd2SPaul Dagnelie * 4549a2cdcdd2SPaul Dagnelie * We pass in 0 for the indirect block size shift because zb2 must be 4550a2cdcdd2SPaul Dagnelie * level 0. The indirect block size is only used to calculate the span 4551a2cdcdd2SPaul Dagnelie * of the bookmark, but since the bookmark must be level 0, the span is 4552a2cdcdd2SPaul Dagnelie * always 1, so the math works out. 4553a2cdcdd2SPaul Dagnelie * 4554a2cdcdd2SPaul Dagnelie * If you make changes to how the zbookmark_compare code works, be sure 4555a2cdcdd2SPaul Dagnelie * to make sure that this code still works afterwards. 4556a2cdcdd2SPaul Dagnelie */ 4557a2cdcdd2SPaul Dagnelie return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 4558a2cdcdd2SPaul Dagnelie 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, 4559a2cdcdd2SPaul Dagnelie last_block) <= 0); 4560ad135b5dSChristopher Siden } 4561