1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 223f9d6ad7SLin Ling * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 2394c2d0ebSMatthew Ahrens * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 245aeb9474SGarrett D'Amore * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. 25c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 26fa9e4066Sahrens */ 27fa9e4066Sahrens 28de710d24SJosef 'Jeff' Sipek #include <sys/sysmacros.h> 29fa9e4066Sahrens #include <sys/zfs_context.h> 30ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h> 31fa9e4066Sahrens #include <sys/spa.h> 32fa9e4066Sahrens #include <sys/txg.h> 33fa9e4066Sahrens #include <sys/spa_impl.h> 34fa9e4066Sahrens #include <sys/vdev_impl.h> 35fa9e4066Sahrens #include <sys/zio_impl.h> 36fa9e4066Sahrens #include <sys/zio_compress.h> 37fa9e4066Sahrens #include <sys/zio_checksum.h> 38b24ab676SJeff Bonwick #include <sys/dmu_objset.h> 39b24ab676SJeff Bonwick #include <sys/arc.h> 40b24ab676SJeff Bonwick #include <sys/ddt.h> 415d7b4d43SMatthew Ahrens #include <sys/blkptr.h> 4243466aaeSMax Grossman #include <sys/zfeature.h> 430f7643c7SGeorge Wilson #include <sys/metaslab_impl.h> 44770499e1SDan Kimmel #include <sys/abd.h> 45fa9e4066Sahrens 46fa9e4066Sahrens /* 47fa9e4066Sahrens * ========================================================================== 48fa9e4066Sahrens * I/O type descriptions 49fa9e4066Sahrens * ========================================================================== 50fa9e4066Sahrens */ 5169962b56SMatthew Ahrens const char *zio_type_name[ZIO_TYPES] = { 5280eb36f2SGeorge Wilson "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim", 5380eb36f2SGeorge Wilson "zio_ioctl" 5480eb36f2SGeorge Wilson }; 55fa9e4066Sahrens 560f7643c7SGeorge Wilson boolean_t zio_dva_throttle_enabled = B_TRUE; 570f7643c7SGeorge Wilson 58fa9e4066Sahrens /* 59fa9e4066Sahrens * ========================================================================== 60fa9e4066Sahrens * I/O kmem caches 61fa9e4066Sahrens * ========================================================================== 62fa9e4066Sahrens */ 63ccae0b50Seschrock kmem_cache_t *zio_cache; 64a3f829aeSBill Moore kmem_cache_t *zio_link_cache; 65fa9e4066Sahrens kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 66ad23a2dbSjohansen kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 67ad23a2dbSjohansen 68ad23a2dbSjohansen #ifdef _KERNEL 69ad23a2dbSjohansen extern vmem_t *zio_alloc_arena; 70ad23a2dbSjohansen #endif 71fa9e4066Sahrens 72738f37bcSGeorge Wilson #define ZIO_PIPELINE_CONTINUE 0x100 73738f37bcSGeorge Wilson #define ZIO_PIPELINE_STOP 0x101 74738f37bcSGeorge Wilson 75a2cdcdd2SPaul Dagnelie #define BP_SPANB(indblkshift, level) \ 76a2cdcdd2SPaul Dagnelie (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) 77a2cdcdd2SPaul Dagnelie #define COMPARE_META_LEVEL 0x80000000ul 7801f55e48SGeorge Wilson /* 7901f55e48SGeorge Wilson * The following actions directly effect the spa's sync-to-convergence logic. 8001f55e48SGeorge Wilson * The values below define the sync pass when we start performing the action. 8101f55e48SGeorge Wilson * Care should be taken when changing these values as they directly impact 8201f55e48SGeorge Wilson * spa_sync() performance. Tuning these values may introduce subtle performance 8301f55e48SGeorge Wilson * pathologies and should only be done in the context of performance analysis. 8401f55e48SGeorge Wilson * These tunables will eventually be removed and replaced with #defines once 8501f55e48SGeorge Wilson * enough analysis has been done to determine optimal values. 8601f55e48SGeorge Wilson * 8701f55e48SGeorge Wilson * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that 8801f55e48SGeorge Wilson * regular blocks are not deferred. 8901f55e48SGeorge Wilson */ 9001f55e48SGeorge Wilson int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ 9101f55e48SGeorge Wilson int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ 9201f55e48SGeorge Wilson int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ 9301f55e48SGeorge Wilson 940a4e9518Sgw /* 95e14bb325SJeff Bonwick * An allocating zio is one that either currently has the DVA allocate 96e14bb325SJeff Bonwick * stage set or will have it later in its lifetime. 970a4e9518Sgw */ 98b24ab676SJeff Bonwick #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) 99b24ab676SJeff Bonwick 10035a5a358SJonathan Adams boolean_t zio_requeue_io_start_cut_in_line = B_TRUE; 10135a5a358SJonathan Adams 102b24ab676SJeff Bonwick #ifdef ZFS_DEBUG 103b24ab676SJeff Bonwick int zio_buf_debug_limit = 16384; 104b24ab676SJeff Bonwick #else 105b24ab676SJeff Bonwick int zio_buf_debug_limit = 0; 106b24ab676SJeff Bonwick #endif 1070a4e9518Sgw 1080f7643c7SGeorge Wilson static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t); 1090f7643c7SGeorge Wilson 110fa9e4066Sahrens void 111fa9e4066Sahrens zio_init(void) 112fa9e4066Sahrens { 113fa9e4066Sahrens size_t c; 114ad23a2dbSjohansen vmem_t *data_alloc_arena = NULL; 115ad23a2dbSjohansen 116ad23a2dbSjohansen #ifdef _KERNEL 117ad23a2dbSjohansen data_alloc_arena = zio_alloc_arena; 118ad23a2dbSjohansen #endif 119a3f829aeSBill Moore zio_cache = kmem_cache_create("zio_cache", 120a3f829aeSBill Moore sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 121a3f829aeSBill Moore zio_link_cache = kmem_cache_create("zio_link_cache", 122a3f829aeSBill Moore sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 123ccae0b50Seschrock 124fa9e4066Sahrens /* 125fa9e4066Sahrens * For small buffers, we want a cache for each multiple of 126b5152584SMatthew Ahrens * SPA_MINBLOCKSIZE. For larger buffers, we want a cache 127b5152584SMatthew Ahrens * for each quarter-power of 2. 128fa9e4066Sahrens */ 129fa9e4066Sahrens for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 130fa9e4066Sahrens size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 131fa9e4066Sahrens size_t p2 = size; 132fa9e4066Sahrens size_t align = 0; 133e291592aSJonathan Adams size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; 134fa9e4066Sahrens 135de710d24SJosef 'Jeff' Sipek while (!ISP2(p2)) 136fa9e4066Sahrens p2 &= p2 - 1; 137fa9e4066Sahrens 138cd1c8b85SMatthew Ahrens #ifndef _KERNEL 139cd1c8b85SMatthew Ahrens /* 140cd1c8b85SMatthew Ahrens * If we are using watchpoints, put each buffer on its own page, 141cd1c8b85SMatthew Ahrens * to eliminate the performance overhead of trapping to the 142cd1c8b85SMatthew Ahrens * kernel when modifying a non-watched buffer that shares the 143cd1c8b85SMatthew Ahrens * page with a watched buffer. 144cd1c8b85SMatthew Ahrens */ 145cd1c8b85SMatthew Ahrens if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) 146cd1c8b85SMatthew Ahrens continue; 147cd1c8b85SMatthew Ahrens #endif 148fa9e4066Sahrens if (size <= 4 * SPA_MINBLOCKSIZE) { 149fa9e4066Sahrens align = SPA_MINBLOCKSIZE; 150cd1c8b85SMatthew Ahrens } else if (IS_P2ALIGNED(size, p2 >> 2)) { 151b5152584SMatthew Ahrens align = MIN(p2 >> 2, PAGESIZE); 152fa9e4066Sahrens } 153fa9e4066Sahrens 154fa9e4066Sahrens if (align != 0) { 155ad23a2dbSjohansen char name[36]; 1565ad82045Snd (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 157fa9e4066Sahrens zio_buf_cache[c] = kmem_cache_create(name, size, 158e291592aSJonathan Adams align, NULL, NULL, NULL, NULL, NULL, cflags); 159ad23a2dbSjohansen 160e291592aSJonathan Adams /* 161e291592aSJonathan Adams * Since zio_data bufs do not appear in crash dumps, we 162e291592aSJonathan Adams * pass KMC_NOTOUCH so that no allocator metadata is 163e291592aSJonathan Adams * stored with the buffers. 164e291592aSJonathan Adams */ 165ad23a2dbSjohansen (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 166ad23a2dbSjohansen zio_data_buf_cache[c] = kmem_cache_create(name, size, 167ad23a2dbSjohansen align, NULL, NULL, NULL, NULL, data_alloc_arena, 168e291592aSJonathan Adams cflags | KMC_NOTOUCH); 169fa9e4066Sahrens } 170fa9e4066Sahrens } 171fa9e4066Sahrens 172fa9e4066Sahrens while (--c != 0) { 173fa9e4066Sahrens ASSERT(zio_buf_cache[c] != NULL); 174fa9e4066Sahrens if (zio_buf_cache[c - 1] == NULL) 175fa9e4066Sahrens zio_buf_cache[c - 1] = zio_buf_cache[c]; 176ad23a2dbSjohansen 177ad23a2dbSjohansen ASSERT(zio_data_buf_cache[c] != NULL); 178ad23a2dbSjohansen if (zio_data_buf_cache[c - 1] == NULL) 179ad23a2dbSjohansen zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 180fa9e4066Sahrens } 181ea8dc4b6Seschrock 182ea8dc4b6Seschrock zio_inject_init(); 183fa9e4066Sahrens } 184fa9e4066Sahrens 185fa9e4066Sahrens void 186fa9e4066Sahrens zio_fini(void) 187fa9e4066Sahrens { 188fa9e4066Sahrens size_t c; 189fa9e4066Sahrens kmem_cache_t *last_cache = NULL; 190ad23a2dbSjohansen kmem_cache_t *last_data_cache = NULL; 191fa9e4066Sahrens 192fa9e4066Sahrens for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 193fa9e4066Sahrens if (zio_buf_cache[c] != last_cache) { 194fa9e4066Sahrens last_cache = zio_buf_cache[c]; 195fa9e4066Sahrens kmem_cache_destroy(zio_buf_cache[c]); 196fa9e4066Sahrens } 197fa9e4066Sahrens zio_buf_cache[c] = NULL; 198ad23a2dbSjohansen 199ad23a2dbSjohansen if (zio_data_buf_cache[c] != last_data_cache) { 200ad23a2dbSjohansen last_data_cache = zio_data_buf_cache[c]; 201ad23a2dbSjohansen kmem_cache_destroy(zio_data_buf_cache[c]); 202ad23a2dbSjohansen } 203ad23a2dbSjohansen zio_data_buf_cache[c] = NULL; 204fa9e4066Sahrens } 205ea8dc4b6Seschrock 206a3f829aeSBill Moore kmem_cache_destroy(zio_link_cache); 207ccae0b50Seschrock kmem_cache_destroy(zio_cache); 208ccae0b50Seschrock 209ea8dc4b6Seschrock zio_inject_fini(); 210fa9e4066Sahrens } 211fa9e4066Sahrens 212fa9e4066Sahrens /* 213fa9e4066Sahrens * ========================================================================== 214fa9e4066Sahrens * Allocate and free I/O buffers 215fa9e4066Sahrens * ========================================================================== 216fa9e4066Sahrens */ 217ad23a2dbSjohansen 218ad23a2dbSjohansen /* 219ad23a2dbSjohansen * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 220ad23a2dbSjohansen * crashdump if the kernel panics, so use it judiciously. Obviously, it's 221ad23a2dbSjohansen * useful to inspect ZFS metadata, but if possible, we should avoid keeping 222ad23a2dbSjohansen * excess / transient data in-core during a crashdump. 223ad23a2dbSjohansen */ 224fa9e4066Sahrens void * 225fa9e4066Sahrens zio_buf_alloc(size_t size) 226fa9e4066Sahrens { 227fa9e4066Sahrens size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 228fa9e4066Sahrens 229f63ab3d5SMatthew Ahrens VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 230fa9e4066Sahrens 2311ab7f2deSmaybee return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); 232fa9e4066Sahrens } 233fa9e4066Sahrens 234ad23a2dbSjohansen /* 235ad23a2dbSjohansen * Use zio_data_buf_alloc to allocate data. The data will not appear in a 236ad23a2dbSjohansen * crashdump if the kernel panics. This exists so that we will limit the amount 237ad23a2dbSjohansen * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 238ad23a2dbSjohansen * of kernel heap dumped to disk when the kernel panics) 239ad23a2dbSjohansen */ 240ad23a2dbSjohansen void * 241ad23a2dbSjohansen zio_data_buf_alloc(size_t size) 242ad23a2dbSjohansen { 243ad23a2dbSjohansen size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 244ad23a2dbSjohansen 245f63ab3d5SMatthew Ahrens VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 246ad23a2dbSjohansen 2471ab7f2deSmaybee return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); 248ad23a2dbSjohansen } 249ad23a2dbSjohansen 250fa9e4066Sahrens void 251fa9e4066Sahrens zio_buf_free(void *buf, size_t size) 252fa9e4066Sahrens { 253fa9e4066Sahrens size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 254fa9e4066Sahrens 255f63ab3d5SMatthew Ahrens VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 256fa9e4066Sahrens 257fa9e4066Sahrens kmem_cache_free(zio_buf_cache[c], buf); 258fa9e4066Sahrens } 259fa9e4066Sahrens 260ad23a2dbSjohansen void 261ad23a2dbSjohansen zio_data_buf_free(void *buf, size_t size) 262ad23a2dbSjohansen { 263ad23a2dbSjohansen size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 264ad23a2dbSjohansen 265f63ab3d5SMatthew Ahrens VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 266ad23a2dbSjohansen 267ad23a2dbSjohansen kmem_cache_free(zio_data_buf_cache[c], buf); 268ad23a2dbSjohansen } 269b3995adbSahrens 270fa9e4066Sahrens /* 271fa9e4066Sahrens * ========================================================================== 272fa9e4066Sahrens * Push and pop I/O transform buffers 273fa9e4066Sahrens * ========================================================================== 274fa9e4066Sahrens */ 275dcbf3bd6SGeorge Wilson void 276770499e1SDan Kimmel zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize, 2779a686fbcSPaul Dagnelie zio_transform_func_t *transform) 278fa9e4066Sahrens { 279fa9e4066Sahrens zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 280fa9e4066Sahrens 281770499e1SDan Kimmel /* 282770499e1SDan Kimmel * Ensure that anyone expecting this zio to contain a linear ABD isn't 283770499e1SDan Kimmel * going to get a nasty surprise when they try to access the data. 284770499e1SDan Kimmel */ 285770499e1SDan Kimmel IMPLY(abd_is_linear(zio->io_abd), abd_is_linear(data)); 286770499e1SDan Kimmel 287770499e1SDan Kimmel zt->zt_orig_abd = zio->io_abd; 288e14bb325SJeff Bonwick zt->zt_orig_size = zio->io_size; 289fa9e4066Sahrens zt->zt_bufsize = bufsize; 290e14bb325SJeff Bonwick zt->zt_transform = transform; 291fa9e4066Sahrens 292fa9e4066Sahrens zt->zt_next = zio->io_transform_stack; 293fa9e4066Sahrens zio->io_transform_stack = zt; 294fa9e4066Sahrens 295770499e1SDan Kimmel zio->io_abd = data; 296fa9e4066Sahrens zio->io_size = size; 297fa9e4066Sahrens } 298fa9e4066Sahrens 299dcbf3bd6SGeorge Wilson void 300e14bb325SJeff Bonwick zio_pop_transforms(zio_t *zio) 301fa9e4066Sahrens { 302e14bb325SJeff Bonwick zio_transform_t *zt; 303e14bb325SJeff Bonwick 304e14bb325SJeff Bonwick while ((zt = zio->io_transform_stack) != NULL) { 305e14bb325SJeff Bonwick if (zt->zt_transform != NULL) 306e14bb325SJeff Bonwick zt->zt_transform(zio, 307770499e1SDan Kimmel zt->zt_orig_abd, zt->zt_orig_size); 308fa9e4066Sahrens 309b24ab676SJeff Bonwick if (zt->zt_bufsize != 0) 310770499e1SDan Kimmel abd_free(zio->io_abd); 311fa9e4066Sahrens 312770499e1SDan Kimmel zio->io_abd = zt->zt_orig_abd; 313e14bb325SJeff Bonwick zio->io_size = zt->zt_orig_size; 314e14bb325SJeff Bonwick zio->io_transform_stack = zt->zt_next; 315fa9e4066Sahrens 316e14bb325SJeff Bonwick kmem_free(zt, sizeof (zio_transform_t)); 317fa9e4066Sahrens } 318fa9e4066Sahrens } 319fa9e4066Sahrens 320e14bb325SJeff Bonwick /* 321e14bb325SJeff Bonwick * ========================================================================== 322e14bb325SJeff Bonwick * I/O transform callbacks for subblocks and decompression 323e14bb325SJeff Bonwick * ========================================================================== 324e14bb325SJeff Bonwick */ 325e14bb325SJeff Bonwick static void 326770499e1SDan Kimmel zio_subblock(zio_t *zio, abd_t *data, uint64_t size) 327e14bb325SJeff Bonwick { 328e14bb325SJeff Bonwick ASSERT(zio->io_size > size); 329e14bb325SJeff Bonwick 330e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_READ) 331770499e1SDan Kimmel abd_copy(data, zio->io_abd, size); 332e14bb325SJeff Bonwick } 333e14bb325SJeff Bonwick 334e14bb325SJeff Bonwick static void 335770499e1SDan Kimmel zio_decompress(zio_t *zio, abd_t *data, uint64_t size) 336e14bb325SJeff Bonwick { 337770499e1SDan Kimmel if (zio->io_error == 0) { 338770499e1SDan Kimmel void *tmp = abd_borrow_buf(data, size); 339770499e1SDan Kimmel int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), 340770499e1SDan Kimmel zio->io_abd, tmp, zio->io_size, size); 341770499e1SDan Kimmel abd_return_buf_copy(data, tmp, size); 342770499e1SDan Kimmel 343770499e1SDan Kimmel if (ret != 0) 344770499e1SDan Kimmel zio->io_error = SET_ERROR(EIO); 345770499e1SDan Kimmel } 346e14bb325SJeff Bonwick } 347e14bb325SJeff Bonwick 348e14bb325SJeff Bonwick /* 349e14bb325SJeff Bonwick * ========================================================================== 350e14bb325SJeff Bonwick * I/O parent/child relationships and pipeline interlocks 351e14bb325SJeff Bonwick * ========================================================================== 352e14bb325SJeff Bonwick */ 353a3f829aeSBill Moore zio_t * 3540f7643c7SGeorge Wilson zio_walk_parents(zio_t *cio, zio_link_t **zl) 355a3f829aeSBill Moore { 356a3f829aeSBill Moore list_t *pl = &cio->io_parent_list; 357e14bb325SJeff Bonwick 3580f7643c7SGeorge Wilson *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl); 3590f7643c7SGeorge Wilson if (*zl == NULL) 360a3f829aeSBill Moore return (NULL); 361a3f829aeSBill Moore 3620f7643c7SGeorge Wilson ASSERT((*zl)->zl_child == cio); 3630f7643c7SGeorge Wilson return ((*zl)->zl_parent); 364a3f829aeSBill Moore } 365a3f829aeSBill Moore 366a3f829aeSBill Moore zio_t * 3670f7643c7SGeorge Wilson zio_walk_children(zio_t *pio, zio_link_t **zl) 368a3f829aeSBill Moore { 369a3f829aeSBill Moore list_t *cl = &pio->io_child_list; 370a3f829aeSBill Moore 3710f7643c7SGeorge Wilson *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl); 3720f7643c7SGeorge Wilson if (*zl == NULL) 373a3f829aeSBill Moore return (NULL); 374a3f829aeSBill Moore 3750f7643c7SGeorge Wilson ASSERT((*zl)->zl_parent == pio); 3760f7643c7SGeorge Wilson return ((*zl)->zl_child); 377a3f829aeSBill Moore } 378a3f829aeSBill Moore 379a3f829aeSBill Moore zio_t * 380a3f829aeSBill Moore zio_unique_parent(zio_t *cio) 381a3f829aeSBill Moore { 3820f7643c7SGeorge Wilson zio_link_t *zl = NULL; 3830f7643c7SGeorge Wilson zio_t *pio = zio_walk_parents(cio, &zl); 384a3f829aeSBill Moore 3850f7643c7SGeorge Wilson VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL); 386a3f829aeSBill Moore return (pio); 387a3f829aeSBill Moore } 388a3f829aeSBill Moore 389a3f829aeSBill Moore void 390a3f829aeSBill Moore zio_add_child(zio_t *pio, zio_t *cio) 391e14bb325SJeff Bonwick { 392a3f829aeSBill Moore zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); 393a3f829aeSBill Moore 394a3f829aeSBill Moore /* 395a3f829aeSBill Moore * Logical I/Os can have logical, gang, or vdev children. 396a3f829aeSBill Moore * Gang I/Os can have gang or vdev children. 397a3f829aeSBill Moore * Vdev I/Os can only have vdev children. 398a3f829aeSBill Moore * The following ASSERT captures all of these constraints. 399a3f829aeSBill Moore */ 400*1271e4b1SPrakash Surya ASSERT3S(cio->io_child_type, <=, pio->io_child_type); 401a3f829aeSBill Moore 402a3f829aeSBill Moore zl->zl_parent = pio; 403a3f829aeSBill Moore zl->zl_child = cio; 404a3f829aeSBill Moore 405a3f829aeSBill Moore mutex_enter(&cio->io_lock); 406e14bb325SJeff Bonwick mutex_enter(&pio->io_lock); 407a3f829aeSBill Moore 408a3f829aeSBill Moore ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); 409a3f829aeSBill Moore 410a3f829aeSBill Moore for (int w = 0; w < ZIO_WAIT_TYPES; w++) 411a3f829aeSBill Moore pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; 412a3f829aeSBill Moore 413a3f829aeSBill Moore list_insert_head(&pio->io_child_list, zl); 414a3f829aeSBill Moore list_insert_head(&cio->io_parent_list, zl); 415a3f829aeSBill Moore 416b24ab676SJeff Bonwick pio->io_child_count++; 417b24ab676SJeff Bonwick cio->io_parent_count++; 418b24ab676SJeff Bonwick 419e14bb325SJeff Bonwick mutex_exit(&pio->io_lock); 420a3f829aeSBill Moore mutex_exit(&cio->io_lock); 421e14bb325SJeff Bonwick } 422e14bb325SJeff Bonwick 423fa9e4066Sahrens static void 424a3f829aeSBill Moore zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) 425e14bb325SJeff Bonwick { 426a3f829aeSBill Moore ASSERT(zl->zl_parent == pio); 427a3f829aeSBill Moore ASSERT(zl->zl_child == cio); 428e14bb325SJeff Bonwick 429a3f829aeSBill Moore mutex_enter(&cio->io_lock); 430e14bb325SJeff Bonwick mutex_enter(&pio->io_lock); 431a3f829aeSBill Moore 432a3f829aeSBill Moore list_remove(&pio->io_child_list, zl); 433a3f829aeSBill Moore list_remove(&cio->io_parent_list, zl); 434a3f829aeSBill Moore 435b24ab676SJeff Bonwick pio->io_child_count--; 436b24ab676SJeff Bonwick cio->io_parent_count--; 437b24ab676SJeff Bonwick 438e14bb325SJeff Bonwick mutex_exit(&pio->io_lock); 439a3f829aeSBill Moore mutex_exit(&cio->io_lock); 440a3f829aeSBill Moore 441a3f829aeSBill Moore kmem_cache_free(zio_link_cache, zl); 442e14bb325SJeff Bonwick } 443e14bb325SJeff Bonwick 444e14bb325SJeff Bonwick static boolean_t 445e14bb325SJeff Bonwick zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) 446fa9e4066Sahrens { 447e14bb325SJeff Bonwick uint64_t *countp = &zio->io_children[child][wait]; 448e14bb325SJeff Bonwick boolean_t waiting = B_FALSE; 449e14bb325SJeff Bonwick 450e14bb325SJeff Bonwick mutex_enter(&zio->io_lock); 451e14bb325SJeff Bonwick ASSERT(zio->io_stall == NULL); 452e14bb325SJeff Bonwick if (*countp != 0) { 453b24ab676SJeff Bonwick zio->io_stage >>= 1; 4540f7643c7SGeorge Wilson ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN); 455e14bb325SJeff Bonwick zio->io_stall = countp; 456e14bb325SJeff Bonwick waiting = B_TRUE; 457e14bb325SJeff Bonwick } 458e14bb325SJeff Bonwick mutex_exit(&zio->io_lock); 459e14bb325SJeff Bonwick 460e14bb325SJeff Bonwick return (waiting); 461e14bb325SJeff Bonwick } 462fa9e4066Sahrens 463e14bb325SJeff Bonwick static void 464e14bb325SJeff Bonwick zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) 465e14bb325SJeff Bonwick { 466e14bb325SJeff Bonwick uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; 467e14bb325SJeff Bonwick int *errorp = &pio->io_child_error[zio->io_child_type]; 468fa9e4066Sahrens 469e14bb325SJeff Bonwick mutex_enter(&pio->io_lock); 470e14bb325SJeff Bonwick if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 471e14bb325SJeff Bonwick *errorp = zio_worst_error(*errorp, zio->io_error); 472e14bb325SJeff Bonwick pio->io_reexecute |= zio->io_reexecute; 473e14bb325SJeff Bonwick ASSERT3U(*countp, >, 0); 47469962b56SMatthew Ahrens 47569962b56SMatthew Ahrens (*countp)--; 47669962b56SMatthew Ahrens 47769962b56SMatthew Ahrens if (*countp == 0 && pio->io_stall == countp) { 4780f7643c7SGeorge Wilson zio_taskq_type_t type = 4790f7643c7SGeorge Wilson pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE : 4800f7643c7SGeorge Wilson ZIO_TASKQ_INTERRUPT; 481e14bb325SJeff Bonwick pio->io_stall = NULL; 482e14bb325SJeff Bonwick mutex_exit(&pio->io_lock); 4830f7643c7SGeorge Wilson /* 4840f7643c7SGeorge Wilson * Dispatch the parent zio in its own taskq so that 4850f7643c7SGeorge Wilson * the child can continue to make progress. This also 4860f7643c7SGeorge Wilson * prevents overflowing the stack when we have deeply nested 4870f7643c7SGeorge Wilson * parent-child relationships. 4880f7643c7SGeorge Wilson */ 4890f7643c7SGeorge Wilson zio_taskq_dispatch(pio, type, B_FALSE); 490e14bb325SJeff Bonwick } else { 491e14bb325SJeff Bonwick mutex_exit(&pio->io_lock); 492fa9e4066Sahrens } 493fa9e4066Sahrens } 494fa9e4066Sahrens 495e14bb325SJeff Bonwick static void 496e14bb325SJeff Bonwick zio_inherit_child_errors(zio_t *zio, enum zio_child c) 497e14bb325SJeff Bonwick { 498e14bb325SJeff Bonwick if (zio->io_child_error[c] != 0 && zio->io_error == 0) 499e14bb325SJeff Bonwick zio->io_error = zio->io_child_error[c]; 500e14bb325SJeff Bonwick } 501e14bb325SJeff Bonwick 5020f7643c7SGeorge Wilson int 50394c2d0ebSMatthew Ahrens zio_bookmark_compare(const void *x1, const void *x2) 5040f7643c7SGeorge Wilson { 5050f7643c7SGeorge Wilson const zio_t *z1 = x1; 5060f7643c7SGeorge Wilson const zio_t *z2 = x2; 5070f7643c7SGeorge Wilson 50894c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset) 5090f7643c7SGeorge Wilson return (-1); 51094c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset) 5110f7643c7SGeorge Wilson return (1); 5120f7643c7SGeorge Wilson 51394c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object) 5140f7643c7SGeorge Wilson return (-1); 51594c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object) 51694c2d0ebSMatthew Ahrens return (1); 51794c2d0ebSMatthew Ahrens 51894c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level) 51994c2d0ebSMatthew Ahrens return (-1); 52094c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level) 52194c2d0ebSMatthew Ahrens return (1); 52294c2d0ebSMatthew Ahrens 52394c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid) 52494c2d0ebSMatthew Ahrens return (-1); 52594c2d0ebSMatthew Ahrens if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid) 5260f7643c7SGeorge Wilson return (1); 5270f7643c7SGeorge Wilson 5280f7643c7SGeorge Wilson if (z1 < z2) 5290f7643c7SGeorge Wilson return (-1); 5300f7643c7SGeorge Wilson if (z1 > z2) 5310f7643c7SGeorge Wilson return (1); 5320f7643c7SGeorge Wilson 5330f7643c7SGeorge Wilson return (0); 5340f7643c7SGeorge Wilson } 5350f7643c7SGeorge Wilson 536fa9e4066Sahrens /* 537fa9e4066Sahrens * ========================================================================== 538e14bb325SJeff Bonwick * Create the various types of I/O (read, write, free, etc) 539fa9e4066Sahrens * ========================================================================== 540fa9e4066Sahrens */ 541fa9e4066Sahrens static zio_t * 542b24ab676SJeff Bonwick zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 543770499e1SDan Kimmel abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done, 5445602294fSDan Kimmel void *private, zio_type_t type, zio_priority_t priority, 5455602294fSDan Kimmel enum zio_flag flags, vdev_t *vd, uint64_t offset, 5465602294fSDan Kimmel const zbookmark_phys_t *zb, enum zio_stage stage, enum zio_stage pipeline) 547fa9e4066Sahrens { 548fa9e4066Sahrens zio_t *zio; 549fa9e4066Sahrens 5505602294fSDan Kimmel ASSERT3U(psize, <=, SPA_MAXBLOCKSIZE); 5515602294fSDan Kimmel ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0); 552e14bb325SJeff Bonwick ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 553fa9e4066Sahrens 554e14bb325SJeff Bonwick ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); 555e14bb325SJeff Bonwick ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); 556e14bb325SJeff Bonwick ASSERT(vd || stage == ZIO_STAGE_OPEN); 557088f3894Sahrens 5585602294fSDan Kimmel IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW) != 0); 5595602294fSDan Kimmel 560ccae0b50Seschrock zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 561ccae0b50Seschrock bzero(zio, sizeof (zio_t)); 562e14bb325SJeff Bonwick 563e14bb325SJeff Bonwick mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 564e14bb325SJeff Bonwick cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); 565e14bb325SJeff Bonwick 566a3f829aeSBill Moore list_create(&zio->io_parent_list, sizeof (zio_link_t), 567a3f829aeSBill Moore offsetof(zio_link_t, zl_parent_node)); 568a3f829aeSBill Moore list_create(&zio->io_child_list, sizeof (zio_link_t), 569a3f829aeSBill Moore offsetof(zio_link_t, zl_child_node)); 5708363e80aSGeorge Wilson metaslab_trace_init(&zio->io_alloc_list); 571a3f829aeSBill Moore 572e14bb325SJeff Bonwick if (vd != NULL) 573e14bb325SJeff Bonwick zio->io_child_type = ZIO_CHILD_VDEV; 574e14bb325SJeff Bonwick else if (flags & ZIO_FLAG_GANG_CHILD) 575e14bb325SJeff Bonwick zio->io_child_type = ZIO_CHILD_GANG; 576b24ab676SJeff Bonwick else if (flags & ZIO_FLAG_DDT_CHILD) 577b24ab676SJeff Bonwick zio->io_child_type = ZIO_CHILD_DDT; 578e14bb325SJeff Bonwick else 579e14bb325SJeff Bonwick zio->io_child_type = ZIO_CHILD_LOGICAL; 580e14bb325SJeff Bonwick 581fa9e4066Sahrens if (bp != NULL) { 582b24ab676SJeff Bonwick zio->io_bp = (blkptr_t *)bp; 583fa9e4066Sahrens zio->io_bp_copy = *bp; 584fa9e4066Sahrens zio->io_bp_orig = *bp; 585b24ab676SJeff Bonwick if (type != ZIO_TYPE_WRITE || 586b24ab676SJeff Bonwick zio->io_child_type == ZIO_CHILD_DDT) 587e14bb325SJeff Bonwick zio->io_bp = &zio->io_bp_copy; /* so caller can free */ 588f5383399SBill Moore if (zio->io_child_type == ZIO_CHILD_LOGICAL) 589e14bb325SJeff Bonwick zio->io_logical = zio; 590f5383399SBill Moore if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) 591f5383399SBill Moore pipeline |= ZIO_GANG_STAGES; 592fa9e4066Sahrens } 593e14bb325SJeff Bonwick 594e14bb325SJeff Bonwick zio->io_spa = spa; 595e14bb325SJeff Bonwick zio->io_txg = txg; 596fa9e4066Sahrens zio->io_done = done; 597fa9e4066Sahrens zio->io_private = private; 598fa9e4066Sahrens zio->io_type = type; 599fa9e4066Sahrens zio->io_priority = priority; 600e14bb325SJeff Bonwick zio->io_vd = vd; 601e14bb325SJeff Bonwick zio->io_offset = offset; 602770499e1SDan Kimmel zio->io_orig_abd = zio->io_abd = data; 6035602294fSDan Kimmel zio->io_orig_size = zio->io_size = psize; 6045602294fSDan Kimmel zio->io_lsize = lsize; 605e14bb325SJeff Bonwick zio->io_orig_flags = zio->io_flags = flags; 606e14bb325SJeff Bonwick zio->io_orig_stage = zio->io_stage = stage; 607e14bb325SJeff Bonwick zio->io_orig_pipeline = zio->io_pipeline = pipeline; 6080f7643c7SGeorge Wilson zio->io_pipeline_trace = ZIO_STAGE_OPEN; 609fa9e4066Sahrens 610a3f829aeSBill Moore zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); 611a3f829aeSBill Moore zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); 612a3f829aeSBill Moore 613e14bb325SJeff Bonwick if (zb != NULL) 614e14bb325SJeff Bonwick zio->io_bookmark = *zb; 615e14bb325SJeff Bonwick 616e14bb325SJeff Bonwick if (pio != NULL) { 617e14bb325SJeff Bonwick if (zio->io_logical == NULL) 618ea8dc4b6Seschrock zio->io_logical = pio->io_logical; 619f5383399SBill Moore if (zio->io_child_type == ZIO_CHILD_GANG) 620f5383399SBill Moore zio->io_gang_leader = pio->io_gang_leader; 621e14bb325SJeff Bonwick zio_add_child(pio, zio); 622fa9e4066Sahrens } 623fa9e4066Sahrens 624fa9e4066Sahrens return (zio); 625fa9e4066Sahrens } 626fa9e4066Sahrens 6270a4e9518Sgw static void 628e14bb325SJeff Bonwick zio_destroy(zio_t *zio) 6290a4e9518Sgw { 6308363e80aSGeorge Wilson metaslab_trace_fini(&zio->io_alloc_list); 631a3f829aeSBill Moore list_destroy(&zio->io_parent_list); 632a3f829aeSBill Moore list_destroy(&zio->io_child_list); 633e14bb325SJeff Bonwick mutex_destroy(&zio->io_lock); 634e14bb325SJeff Bonwick cv_destroy(&zio->io_cv); 635e14bb325SJeff Bonwick kmem_cache_free(zio_cache, zio); 6360a4e9518Sgw } 6370a4e9518Sgw 638fa9e4066Sahrens zio_t * 639a3f829aeSBill Moore zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, 640b24ab676SJeff Bonwick void *private, enum zio_flag flags) 641fa9e4066Sahrens { 642fa9e4066Sahrens zio_t *zio; 643fa9e4066Sahrens 6445602294fSDan Kimmel zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 645a3f829aeSBill Moore ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 646e14bb325SJeff Bonwick ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); 647fa9e4066Sahrens 648fa9e4066Sahrens return (zio); 649fa9e4066Sahrens } 650fa9e4066Sahrens 651fa9e4066Sahrens zio_t * 652b24ab676SJeff Bonwick zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) 653fa9e4066Sahrens { 654a3f829aeSBill Moore return (zio_null(NULL, spa, NULL, done, private, flags)); 655fa9e4066Sahrens } 656fa9e4066Sahrens 657f63ab3d5SMatthew Ahrens void 658f63ab3d5SMatthew Ahrens zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp) 659f63ab3d5SMatthew Ahrens { 660f63ab3d5SMatthew Ahrens if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { 661f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid TYPE %llu", 662f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_TYPE(bp)); 663f63ab3d5SMatthew Ahrens } 664f63ab3d5SMatthew Ahrens if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS || 665f63ab3d5SMatthew Ahrens BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) { 666f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu", 667f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_CHECKSUM(bp)); 668f63ab3d5SMatthew Ahrens } 669f63ab3d5SMatthew Ahrens if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS || 670f63ab3d5SMatthew Ahrens BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) { 671f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu", 672f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_COMPRESS(bp)); 673f63ab3d5SMatthew Ahrens } 674f63ab3d5SMatthew Ahrens if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { 675f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid LSIZE %llu", 676f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_LSIZE(bp)); 677f63ab3d5SMatthew Ahrens } 678f63ab3d5SMatthew Ahrens if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { 679f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid PSIZE %llu", 680f63ab3d5SMatthew Ahrens bp, (longlong_t)BP_GET_PSIZE(bp)); 681f63ab3d5SMatthew Ahrens } 682f63ab3d5SMatthew Ahrens 683f63ab3d5SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) { 684f63ab3d5SMatthew Ahrens if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) { 685f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p has invalid ETYPE %llu", 686f63ab3d5SMatthew Ahrens bp, (longlong_t)BPE_GET_ETYPE(bp)); 687f63ab3d5SMatthew Ahrens } 688f63ab3d5SMatthew Ahrens } 689f63ab3d5SMatthew Ahrens 690f63ab3d5SMatthew Ahrens /* 691f63ab3d5SMatthew Ahrens * Pool-specific checks. 692f63ab3d5SMatthew Ahrens * 693f63ab3d5SMatthew Ahrens * Note: it would be nice to verify that the blk_birth and 694f63ab3d5SMatthew Ahrens * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() 695f63ab3d5SMatthew Ahrens * allows the birth time of log blocks (and dmu_sync()-ed blocks 696f63ab3d5SMatthew Ahrens * that are in the log) to be arbitrarily large. 697f63ab3d5SMatthew Ahrens */ 698f63ab3d5SMatthew Ahrens for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 699f63ab3d5SMatthew Ahrens uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); 700f63ab3d5SMatthew Ahrens if (vdevid >= spa->spa_root_vdev->vdev_children) { 701f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p DVA %u has invalid " 702f63ab3d5SMatthew Ahrens "VDEV %llu", 703f63ab3d5SMatthew Ahrens bp, i, (longlong_t)vdevid); 7045897eb49SJustin Gibbs continue; 705f63ab3d5SMatthew Ahrens } 706f63ab3d5SMatthew Ahrens vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; 707f63ab3d5SMatthew Ahrens if (vd == NULL) { 708f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p DVA %u has invalid " 709f63ab3d5SMatthew Ahrens "VDEV %llu", 710f63ab3d5SMatthew Ahrens bp, i, (longlong_t)vdevid); 7115897eb49SJustin Gibbs continue; 712f63ab3d5SMatthew Ahrens } 713f63ab3d5SMatthew Ahrens if (vd->vdev_ops == &vdev_hole_ops) { 714f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p DVA %u has hole " 715f63ab3d5SMatthew Ahrens "VDEV %llu", 716f63ab3d5SMatthew Ahrens bp, i, (longlong_t)vdevid); 7175897eb49SJustin Gibbs continue; 718f63ab3d5SMatthew Ahrens } 719f63ab3d5SMatthew Ahrens if (vd->vdev_ops == &vdev_missing_ops) { 720f63ab3d5SMatthew Ahrens /* 721f63ab3d5SMatthew Ahrens * "missing" vdevs are valid during import, but we 722f63ab3d5SMatthew Ahrens * don't have their detailed info (e.g. asize), so 723f63ab3d5SMatthew Ahrens * we can't perform any more checks on them. 724f63ab3d5SMatthew Ahrens */ 725f63ab3d5SMatthew Ahrens continue; 726f63ab3d5SMatthew Ahrens } 727f63ab3d5SMatthew Ahrens uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 728f63ab3d5SMatthew Ahrens uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]); 729f63ab3d5SMatthew Ahrens if (BP_IS_GANG(bp)) 730f63ab3d5SMatthew Ahrens asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 731f63ab3d5SMatthew Ahrens if (offset + asize > vd->vdev_asize) { 732f63ab3d5SMatthew Ahrens zfs_panic_recover("blkptr at %p DVA %u has invalid " 733f63ab3d5SMatthew Ahrens "OFFSET %llu", 734f63ab3d5SMatthew Ahrens bp, i, (longlong_t)offset); 735f63ab3d5SMatthew Ahrens } 736f63ab3d5SMatthew Ahrens } 737f63ab3d5SMatthew Ahrens } 738f63ab3d5SMatthew Ahrens 739fa9e4066Sahrens zio_t * 740e14bb325SJeff Bonwick zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, 741770499e1SDan Kimmel abd_t *data, uint64_t size, zio_done_func_t *done, void *private, 7427802d7bfSMatthew Ahrens zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) 743fa9e4066Sahrens { 744fa9e4066Sahrens zio_t *zio; 745fa9e4066Sahrens 746f63ab3d5SMatthew Ahrens zfs_blkptr_verify(spa, bp); 747f63ab3d5SMatthew Ahrens 748b24ab676SJeff Bonwick zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, 7495602294fSDan Kimmel data, size, size, done, private, 750e14bb325SJeff Bonwick ZIO_TYPE_READ, priority, flags, NULL, 0, zb, 751b24ab676SJeff Bonwick ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 752b24ab676SJeff Bonwick ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); 753fa9e4066Sahrens 754fa9e4066Sahrens return (zio); 755fa9e4066Sahrens } 756fa9e4066Sahrens 757fa9e4066Sahrens zio_t * 758e14bb325SJeff Bonwick zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 759770499e1SDan Kimmel abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp, 7608df0bcf0SPaul Dagnelie zio_done_func_t *ready, zio_done_func_t *children_ready, 7618df0bcf0SPaul Dagnelie zio_done_func_t *physdone, zio_done_func_t *done, 7628df0bcf0SPaul Dagnelie void *private, zio_priority_t priority, enum zio_flag flags, 7638df0bcf0SPaul Dagnelie const zbookmark_phys_t *zb) 764fa9e4066Sahrens { 765fa9e4066Sahrens zio_t *zio; 766fa9e4066Sahrens 767e14bb325SJeff Bonwick ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && 768e14bb325SJeff Bonwick zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && 769e14bb325SJeff Bonwick zp->zp_compress >= ZIO_COMPRESS_OFF && 770e14bb325SJeff Bonwick zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && 771ad135b5dSChristopher Siden DMU_OT_IS_VALID(zp->zp_type) && 772e14bb325SJeff Bonwick zp->zp_level < 32 && 773b24ab676SJeff Bonwick zp->zp_copies > 0 && 77480901aeaSGeorge Wilson zp->zp_copies <= spa_max_replication(spa)); 7750a4e9518Sgw 7765602294fSDan Kimmel zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private, 777e14bb325SJeff Bonwick ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, 778b24ab676SJeff Bonwick ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? 779b24ab676SJeff Bonwick ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); 780fa9e4066Sahrens 781c717a561Smaybee zio->io_ready = ready; 7828df0bcf0SPaul Dagnelie zio->io_children_ready = children_ready; 78369962b56SMatthew Ahrens zio->io_physdone = physdone; 784e14bb325SJeff Bonwick zio->io_prop = *zp; 785fa9e4066Sahrens 7865d7b4d43SMatthew Ahrens /* 7875d7b4d43SMatthew Ahrens * Data can be NULL if we are going to call zio_write_override() to 7885d7b4d43SMatthew Ahrens * provide the already-allocated BP. But we may need the data to 7895d7b4d43SMatthew Ahrens * verify a dedup hit (if requested). In this case, don't try to 7905d7b4d43SMatthew Ahrens * dedup (just take the already-allocated BP verbatim). 7915d7b4d43SMatthew Ahrens */ 7925d7b4d43SMatthew Ahrens if (data == NULL && zio->io_prop.zp_dedup_verify) { 7935d7b4d43SMatthew Ahrens zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; 7945d7b4d43SMatthew Ahrens } 7955d7b4d43SMatthew Ahrens 796fa9e4066Sahrens return (zio); 797fa9e4066Sahrens } 798fa9e4066Sahrens 799fa9e4066Sahrens zio_t * 800770499e1SDan Kimmel zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data, 80169962b56SMatthew Ahrens uint64_t size, zio_done_func_t *done, void *private, 8027802d7bfSMatthew Ahrens zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb) 803fa9e4066Sahrens { 804fa9e4066Sahrens zio_t *zio; 805fa9e4066Sahrens 8065602294fSDan Kimmel zio = zio_create(pio, spa, txg, bp, data, size, size, done, private, 8070f7643c7SGeorge Wilson ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb, 808e14bb325SJeff Bonwick ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 809fa9e4066Sahrens 810fa9e4066Sahrens return (zio); 811fa9e4066Sahrens } 812fa9e4066Sahrens 813b24ab676SJeff Bonwick void 81480901aeaSGeorge Wilson zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) 815b24ab676SJeff Bonwick { 816b24ab676SJeff Bonwick ASSERT(zio->io_type == ZIO_TYPE_WRITE); 817b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 818b24ab676SJeff Bonwick ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 819b24ab676SJeff Bonwick ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); 820b24ab676SJeff Bonwick 82180901aeaSGeorge Wilson /* 82280901aeaSGeorge Wilson * We must reset the io_prop to match the values that existed 82380901aeaSGeorge Wilson * when the bp was first written by dmu_sync() keeping in mind 82480901aeaSGeorge Wilson * that nopwrite and dedup are mutually exclusive. 82580901aeaSGeorge Wilson */ 82680901aeaSGeorge Wilson zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; 82780901aeaSGeorge Wilson zio->io_prop.zp_nopwrite = nopwrite; 828b24ab676SJeff Bonwick zio->io_prop.zp_copies = copies; 829b24ab676SJeff Bonwick zio->io_bp_override = bp; 830b24ab676SJeff Bonwick } 831b24ab676SJeff Bonwick 832b24ab676SJeff Bonwick void 833b24ab676SJeff Bonwick zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) 834b24ab676SJeff Bonwick { 8355d7b4d43SMatthew Ahrens 8365d7b4d43SMatthew Ahrens /* 8375d7b4d43SMatthew Ahrens * The check for EMBEDDED is a performance optimization. We 8385d7b4d43SMatthew Ahrens * process the free here (by ignoring it) rather than 8395d7b4d43SMatthew Ahrens * putting it on the list and then processing it in zio_free_sync(). 8405d7b4d43SMatthew Ahrens */ 8415d7b4d43SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) 8425d7b4d43SMatthew Ahrens return; 8433b2aab18SMatthew Ahrens metaslab_check_free(spa, bp); 8449cb154a3SMatthew Ahrens 8459cb154a3SMatthew Ahrens /* 8469cb154a3SMatthew Ahrens * Frees that are for the currently-syncing txg, are not going to be 8479cb154a3SMatthew Ahrens * deferred, and which will not need to do a read (i.e. not GANG or 8489cb154a3SMatthew Ahrens * DEDUP), can be processed immediately. Otherwise, put them on the 8499cb154a3SMatthew Ahrens * in-memory list for later processing. 8509cb154a3SMatthew Ahrens */ 8519cb154a3SMatthew Ahrens if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp) || 8529cb154a3SMatthew Ahrens txg != spa->spa_syncing_txg || 8539cb154a3SMatthew Ahrens spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) { 8549cb154a3SMatthew Ahrens bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); 8559cb154a3SMatthew Ahrens } else { 8569cb154a3SMatthew Ahrens VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0))); 8579cb154a3SMatthew Ahrens } 858b24ab676SJeff Bonwick } 859b24ab676SJeff Bonwick 860fa9e4066Sahrens zio_t * 861b24ab676SJeff Bonwick zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 862b24ab676SJeff Bonwick enum zio_flag flags) 863fa9e4066Sahrens { 864fa9e4066Sahrens zio_t *zio; 8659cb154a3SMatthew Ahrens enum zio_stage stage = ZIO_FREE_PIPELINE; 866fa9e4066Sahrens 867fa9e4066Sahrens ASSERT(!BP_IS_HOLE(bp)); 868b24ab676SJeff Bonwick ASSERT(spa_syncing_txg(spa) == txg); 86901f55e48SGeorge Wilson ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); 870fa9e4066Sahrens 8715d7b4d43SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) 8725d7b4d43SMatthew Ahrens return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 8735d7b4d43SMatthew Ahrens 8743b2aab18SMatthew Ahrens metaslab_check_free(spa, bp); 8756e6d5868SMatthew Ahrens arc_freed(spa, bp); 8763b2aab18SMatthew Ahrens 8779cb154a3SMatthew Ahrens /* 8789cb154a3SMatthew Ahrens * GANG and DEDUP blocks can induce a read (for the gang block header, 8799cb154a3SMatthew Ahrens * or the DDT), so issue them asynchronously so that this thread is 8809cb154a3SMatthew Ahrens * not tied up. 8819cb154a3SMatthew Ahrens */ 8829cb154a3SMatthew Ahrens if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) 8839cb154a3SMatthew Ahrens stage |= ZIO_STAGE_ISSUE_ASYNC; 8849cb154a3SMatthew Ahrens 885e14bb325SJeff Bonwick zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 8865602294fSDan Kimmel BP_GET_PSIZE(bp), NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, 8875602294fSDan Kimmel flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage); 8889cb154a3SMatthew Ahrens 889fa9e4066Sahrens return (zio); 890fa9e4066Sahrens } 891fa9e4066Sahrens 892fa9e4066Sahrens zio_t * 893b24ab676SJeff Bonwick zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, 894b24ab676SJeff Bonwick zio_done_func_t *done, void *private, enum zio_flag flags) 895fa9e4066Sahrens { 896fa9e4066Sahrens zio_t *zio; 897fa9e4066Sahrens 8985d7b4d43SMatthew Ahrens dprintf_bp(bp, "claiming in txg %llu", txg); 8995d7b4d43SMatthew Ahrens 9005d7b4d43SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) 9015d7b4d43SMatthew Ahrens return (zio_null(pio, spa, NULL, NULL, NULL, 0)); 9025d7b4d43SMatthew Ahrens 903fa9e4066Sahrens /* 904fa9e4066Sahrens * A claim is an allocation of a specific block. Claims are needed 905fa9e4066Sahrens * to support immediate writes in the intent log. The issue is that 906fa9e4066Sahrens * immediate writes contain committed data, but in a txg that was 907fa9e4066Sahrens * *not* committed. Upon opening the pool after an unclean shutdown, 908fa9e4066Sahrens * the intent log claims all blocks that contain immediate write data 909fa9e4066Sahrens * so that the SPA knows they're in use. 910fa9e4066Sahrens * 911fa9e4066Sahrens * All claims *must* be resolved in the first txg -- before the SPA 912fa9e4066Sahrens * starts allocating blocks -- so that nothing is allocated twice. 913b24ab676SJeff Bonwick * If txg == 0 we just verify that the block is claimable. 914fa9e4066Sahrens */ 915fa9e4066Sahrens ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); 916b24ab676SJeff Bonwick ASSERT(txg == spa_first_txg(spa) || txg == 0); 917b24ab676SJeff Bonwick ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ 918fa9e4066Sahrens 919e14bb325SJeff Bonwick zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), 9205602294fSDan Kimmel BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 9215602294fSDan Kimmel flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 9220f7643c7SGeorge Wilson ASSERT0(zio->io_queued_timestamp); 923fa9e4066Sahrens 924fa9e4066Sahrens return (zio); 925fa9e4066Sahrens } 926fa9e4066Sahrens 927fa9e4066Sahrens zio_t * 928fa9e4066Sahrens zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 92969962b56SMatthew Ahrens zio_done_func_t *done, void *private, enum zio_flag flags) 930fa9e4066Sahrens { 931fa9e4066Sahrens zio_t *zio; 932fa9e4066Sahrens int c; 933fa9e4066Sahrens 934fa9e4066Sahrens if (vd->vdev_children == 0) { 9355602294fSDan Kimmel zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, 93669962b56SMatthew Ahrens ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, 937fa9e4066Sahrens ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 938fa9e4066Sahrens 939fa9e4066Sahrens zio->io_cmd = cmd; 940fa9e4066Sahrens } else { 941a3f829aeSBill Moore zio = zio_null(pio, spa, NULL, NULL, NULL, flags); 942fa9e4066Sahrens 943fa9e4066Sahrens for (c = 0; c < vd->vdev_children; c++) 944fa9e4066Sahrens zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 94569962b56SMatthew Ahrens done, private, flags)); 946fa9e4066Sahrens } 947fa9e4066Sahrens 948fa9e4066Sahrens return (zio); 949fa9e4066Sahrens } 950fa9e4066Sahrens 951fa9e4066Sahrens zio_t * 952fa9e4066Sahrens zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 953770499e1SDan Kimmel abd_t *data, int checksum, zio_done_func_t *done, void *private, 95469962b56SMatthew Ahrens zio_priority_t priority, enum zio_flag flags, boolean_t labels) 955fa9e4066Sahrens { 956fa9e4066Sahrens zio_t *zio; 9570a4e9518Sgw 958e14bb325SJeff Bonwick ASSERT(vd->vdev_children == 0); 959e14bb325SJeff Bonwick ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 960e14bb325SJeff Bonwick offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 961e14bb325SJeff Bonwick ASSERT3U(offset + size, <=, vd->vdev_psize); 962fa9e4066Sahrens 9635602294fSDan Kimmel zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 9645602294fSDan Kimmel private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, 9655602294fSDan Kimmel offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 966fa9e4066Sahrens 967e14bb325SJeff Bonwick zio->io_prop.zp_checksum = checksum; 968fa9e4066Sahrens 969fa9e4066Sahrens return (zio); 970fa9e4066Sahrens } 971fa9e4066Sahrens 972fa9e4066Sahrens zio_t * 973fa9e4066Sahrens zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 974770499e1SDan Kimmel abd_t *data, int checksum, zio_done_func_t *done, void *private, 97569962b56SMatthew Ahrens zio_priority_t priority, enum zio_flag flags, boolean_t labels) 976fa9e4066Sahrens { 977fa9e4066Sahrens zio_t *zio; 9780a4e9518Sgw 979e14bb325SJeff Bonwick ASSERT(vd->vdev_children == 0); 980e14bb325SJeff Bonwick ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || 981e14bb325SJeff Bonwick offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 982e14bb325SJeff Bonwick ASSERT3U(offset + size, <=, vd->vdev_psize); 983fa9e4066Sahrens 9845602294fSDan Kimmel zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, 9855602294fSDan Kimmel private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, 9865602294fSDan Kimmel offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 987fa9e4066Sahrens 988e14bb325SJeff Bonwick zio->io_prop.zp_checksum = checksum; 989fa9e4066Sahrens 99045818ee1SMatthew Ahrens if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) { 991fa9e4066Sahrens /* 9926e1f5caaSNeil Perrin * zec checksums are necessarily destructive -- they modify 993e14bb325SJeff Bonwick * the end of the write buffer to hold the verifier/checksum. 994fa9e4066Sahrens * Therefore, we must make a local copy in case the data is 995e14bb325SJeff Bonwick * being written to multiple places in parallel. 996fa9e4066Sahrens */ 997770499e1SDan Kimmel abd_t *wbuf = abd_alloc_sametype(data, size); 998770499e1SDan Kimmel abd_copy(wbuf, data, size); 999770499e1SDan Kimmel 1000e14bb325SJeff Bonwick zio_push_transform(zio, wbuf, size, size, NULL); 1001fa9e4066Sahrens } 1002fa9e4066Sahrens 1003fa9e4066Sahrens return (zio); 1004fa9e4066Sahrens } 1005fa9e4066Sahrens 1006fa9e4066Sahrens /* 1007e14bb325SJeff Bonwick * Create a child I/O to do some work for us. 1008fa9e4066Sahrens */ 1009fa9e4066Sahrens zio_t * 1010e14bb325SJeff Bonwick zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 1011770499e1SDan Kimmel abd_t *data, uint64_t size, int type, zio_priority_t priority, 1012dcbf3bd6SGeorge Wilson enum zio_flag flags, zio_done_func_t *done, void *private) 1013fa9e4066Sahrens { 1014b24ab676SJeff Bonwick enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; 1015e14bb325SJeff Bonwick zio_t *zio; 1016e14bb325SJeff Bonwick 1017e14bb325SJeff Bonwick ASSERT(vd->vdev_parent == 1018e14bb325SJeff Bonwick (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev)); 1019fa9e4066Sahrens 1020fa9e4066Sahrens if (type == ZIO_TYPE_READ && bp != NULL) { 1021fa9e4066Sahrens /* 1022fa9e4066Sahrens * If we have the bp, then the child should perform the 1023fa9e4066Sahrens * checksum and the parent need not. This pushes error 1024fa9e4066Sahrens * detection as close to the leaves as possible and 1025fa9e4066Sahrens * eliminates redundant checksums in the interior nodes. 1026fa9e4066Sahrens */ 1027b24ab676SJeff Bonwick pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; 1028b24ab676SJeff Bonwick pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 1029fa9e4066Sahrens } 1030fa9e4066Sahrens 1031e14bb325SJeff Bonwick if (vd->vdev_children == 0) 1032e14bb325SJeff Bonwick offset += VDEV_LABEL_START_SIZE; 1033e14bb325SJeff Bonwick 1034b24ab676SJeff Bonwick flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE; 1035b24ab676SJeff Bonwick 1036b24ab676SJeff Bonwick /* 1037b24ab676SJeff Bonwick * If we've decided to do a repair, the write is not speculative -- 1038b24ab676SJeff Bonwick * even if the original read was. 1039b24ab676SJeff Bonwick */ 1040b24ab676SJeff Bonwick if (flags & ZIO_FLAG_IO_REPAIR) 1041b24ab676SJeff Bonwick flags &= ~ZIO_FLAG_SPECULATIVE; 1042b24ab676SJeff Bonwick 10430f7643c7SGeorge Wilson /* 10440f7643c7SGeorge Wilson * If we're creating a child I/O that is not associated with a 10450f7643c7SGeorge Wilson * top-level vdev, then the child zio is not an allocating I/O. 10460f7643c7SGeorge Wilson * If this is a retried I/O then we ignore it since we will 10470f7643c7SGeorge Wilson * have already processed the original allocating I/O. 10480f7643c7SGeorge Wilson */ 10490f7643c7SGeorge Wilson if (flags & ZIO_FLAG_IO_ALLOCATING && 10500f7643c7SGeorge Wilson (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { 10510f7643c7SGeorge Wilson metaslab_class_t *mc = spa_normal_class(pio->io_spa); 10520f7643c7SGeorge Wilson 10530f7643c7SGeorge Wilson ASSERT(mc->mc_alloc_throttle_enabled); 10540f7643c7SGeorge Wilson ASSERT(type == ZIO_TYPE_WRITE); 10550f7643c7SGeorge Wilson ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE); 10560f7643c7SGeorge Wilson ASSERT(!(flags & ZIO_FLAG_IO_REPAIR)); 10570f7643c7SGeorge Wilson ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || 10580f7643c7SGeorge Wilson pio->io_child_type == ZIO_CHILD_GANG); 10590f7643c7SGeorge Wilson 10600f7643c7SGeorge Wilson flags &= ~ZIO_FLAG_IO_ALLOCATING; 10610f7643c7SGeorge Wilson } 10620f7643c7SGeorge Wilson 10635602294fSDan Kimmel zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size, 1064b24ab676SJeff Bonwick done, private, type, priority, flags, vd, offset, &pio->io_bookmark, 1065b24ab676SJeff Bonwick ZIO_STAGE_VDEV_IO_START >> 1, pipeline); 10660f7643c7SGeorge Wilson ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 1067fa9e4066Sahrens 106869962b56SMatthew Ahrens zio->io_physdone = pio->io_physdone; 106969962b56SMatthew Ahrens if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) 107069962b56SMatthew Ahrens zio->io_logical->io_phys_children++; 107169962b56SMatthew Ahrens 1072e14bb325SJeff Bonwick return (zio); 107332b87932Sek } 107432b87932Sek 1075e14bb325SJeff Bonwick zio_t * 1076770499e1SDan Kimmel zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size, 10779a686fbcSPaul Dagnelie int type, zio_priority_t priority, enum zio_flag flags, 10789a686fbcSPaul Dagnelie zio_done_func_t *done, void *private) 1079fa9e4066Sahrens { 1080e14bb325SJeff Bonwick zio_t *zio; 1081fa9e4066Sahrens 1082e14bb325SJeff Bonwick ASSERT(vd->vdev_ops->vdev_op_leaf); 1083fa9e4066Sahrens 1084e14bb325SJeff Bonwick zio = zio_create(NULL, vd->vdev_spa, 0, NULL, 10855602294fSDan Kimmel data, size, size, done, private, type, priority, 108669962b56SMatthew Ahrens flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, 1087e14bb325SJeff Bonwick vd, offset, NULL, 1088b24ab676SJeff Bonwick ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); 1089fa9e4066Sahrens 1090e14bb325SJeff Bonwick return (zio); 1091e05725b1Sbonwick } 1092e05725b1Sbonwick 1093e05725b1Sbonwick void 1094e14bb325SJeff Bonwick zio_flush(zio_t *zio, vdev_t *vd) 1095e05725b1Sbonwick { 1096e14bb325SJeff Bonwick zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, 109769962b56SMatthew Ahrens NULL, NULL, 1098e14bb325SJeff Bonwick ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); 1099fa9e4066Sahrens } 1100fa9e4066Sahrens 11016e1f5caaSNeil Perrin void 11026e1f5caaSNeil Perrin zio_shrink(zio_t *zio, uint64_t size) 11036e1f5caaSNeil Perrin { 1104*1271e4b1SPrakash Surya ASSERT3P(zio->io_executor, ==, NULL); 1105*1271e4b1SPrakash Surya ASSERT3P(zio->io_orig_size, ==, zio->io_size); 1106*1271e4b1SPrakash Surya ASSERT3U(size, <=, zio->io_size); 11076e1f5caaSNeil Perrin 11086e1f5caaSNeil Perrin /* 11096e1f5caaSNeil Perrin * We don't shrink for raidz because of problems with the 11106e1f5caaSNeil Perrin * reconstruction when reading back less than the block size. 11116e1f5caaSNeil Perrin * Note, BP_IS_RAIDZ() assumes no compression. 11126e1f5caaSNeil Perrin */ 11136e1f5caaSNeil Perrin ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); 11145602294fSDan Kimmel if (!BP_IS_RAIDZ(zio->io_bp)) { 11155602294fSDan Kimmel /* we are not doing a raw write */ 11165602294fSDan Kimmel ASSERT3U(zio->io_size, ==, zio->io_lsize); 11175602294fSDan Kimmel zio->io_orig_size = zio->io_size = zio->io_lsize = size; 11185602294fSDan Kimmel } 11196e1f5caaSNeil Perrin } 11206e1f5caaSNeil Perrin 1121fa9e4066Sahrens /* 1122fa9e4066Sahrens * ========================================================================== 1123e14bb325SJeff Bonwick * Prepare to read and write logical blocks 1124fa9e4066Sahrens * ========================================================================== 1125fa9e4066Sahrens */ 1126e14bb325SJeff Bonwick 1127e05725b1Sbonwick static int 1128e14bb325SJeff Bonwick zio_read_bp_init(zio_t *zio) 1129fa9e4066Sahrens { 1130e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 1131e05725b1Sbonwick 113203361682SJeff Bonwick if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 1133f5383399SBill Moore zio->io_child_type == ZIO_CHILD_LOGICAL && 1134f5383399SBill Moore !(zio->io_flags & ZIO_FLAG_RAW)) { 11355d7b4d43SMatthew Ahrens uint64_t psize = 11365d7b4d43SMatthew Ahrens BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); 1137770499e1SDan Kimmel zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), 1138770499e1SDan Kimmel psize, psize, zio_decompress); 1139e14bb325SJeff Bonwick } 1140fa9e4066Sahrens 11415d7b4d43SMatthew Ahrens if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { 11425d7b4d43SMatthew Ahrens zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1143770499e1SDan Kimmel 1144770499e1SDan Kimmel int psize = BPE_GET_PSIZE(bp); 1145770499e1SDan Kimmel void *data = abd_borrow_buf(zio->io_abd, psize); 1146770499e1SDan Kimmel decode_embedded_bp_compressed(bp, data); 1147770499e1SDan Kimmel abd_return_buf_copy(zio->io_abd, data, psize); 11485d7b4d43SMatthew Ahrens } else { 11495d7b4d43SMatthew Ahrens ASSERT(!BP_IS_EMBEDDED(bp)); 11505d7b4d43SMatthew Ahrens } 11515d7b4d43SMatthew Ahrens 1152ad135b5dSChristopher Siden if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) 1153e14bb325SJeff Bonwick zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1154fa9e4066Sahrens 1155bbfd46c4SJeff Bonwick if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) 1156bbfd46c4SJeff Bonwick zio->io_flags |= ZIO_FLAG_DONT_CACHE; 1157bbfd46c4SJeff Bonwick 1158b24ab676SJeff Bonwick if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) 1159b24ab676SJeff Bonwick zio->io_pipeline = ZIO_DDT_READ_PIPELINE; 1160b24ab676SJeff Bonwick 1161e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 1162fa9e4066Sahrens } 1163fa9e4066Sahrens 1164e05725b1Sbonwick static int 1165e14bb325SJeff Bonwick zio_write_bp_init(zio_t *zio) 11660a4e9518Sgw { 1167e14bb325SJeff Bonwick if (!IO_IS_ALLOCATING(zio)) 1168e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 11690a4e9518Sgw 1170b24ab676SJeff Bonwick ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 1171b24ab676SJeff Bonwick 1172b24ab676SJeff Bonwick if (zio->io_bp_override) { 11730f7643c7SGeorge Wilson blkptr_t *bp = zio->io_bp; 11740f7643c7SGeorge Wilson zio_prop_t *zp = &zio->io_prop; 11750f7643c7SGeorge Wilson 1176b24ab676SJeff Bonwick ASSERT(bp->blk_birth != zio->io_txg); 1177b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); 1178b24ab676SJeff Bonwick 1179b24ab676SJeff Bonwick *bp = *zio->io_bp_override; 1180b24ab676SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1181b24ab676SJeff Bonwick 11825d7b4d43SMatthew Ahrens if (BP_IS_EMBEDDED(bp)) 11835d7b4d43SMatthew Ahrens return (ZIO_PIPELINE_CONTINUE); 11845d7b4d43SMatthew Ahrens 118580901aeaSGeorge Wilson /* 118680901aeaSGeorge Wilson * If we've been overridden and nopwrite is set then 118780901aeaSGeorge Wilson * set the flag accordingly to indicate that a nopwrite 118880901aeaSGeorge Wilson * has already occurred. 118980901aeaSGeorge Wilson */ 119080901aeaSGeorge Wilson if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { 119180901aeaSGeorge Wilson ASSERT(!zp->zp_dedup); 11920f7643c7SGeorge Wilson ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum); 119380901aeaSGeorge Wilson zio->io_flags |= ZIO_FLAG_NOPWRITE; 119480901aeaSGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 119580901aeaSGeorge Wilson } 119680901aeaSGeorge Wilson 119780901aeaSGeorge Wilson ASSERT(!zp->zp_nopwrite); 119880901aeaSGeorge Wilson 1199b24ab676SJeff Bonwick if (BP_IS_HOLE(bp) || !zp->zp_dedup) 1200b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 1201b24ab676SJeff Bonwick 120245818ee1SMatthew Ahrens ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags & 120345818ee1SMatthew Ahrens ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify); 1204b24ab676SJeff Bonwick 1205b24ab676SJeff Bonwick if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { 1206b24ab676SJeff Bonwick BP_SET_DEDUP(bp, 1); 1207b24ab676SJeff Bonwick zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; 1208b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 1209b24ab676SJeff Bonwick } 12100f7643c7SGeorge Wilson 12110f7643c7SGeorge Wilson /* 12120f7643c7SGeorge Wilson * We were unable to handle this as an override bp, treat 12130f7643c7SGeorge Wilson * it as a regular write I/O. 12140f7643c7SGeorge Wilson */ 1215b39b744bSMatthew Ahrens zio->io_bp_override = NULL; 12160f7643c7SGeorge Wilson *bp = zio->io_bp_orig; 12170f7643c7SGeorge Wilson zio->io_pipeline = zio->io_orig_pipeline; 1218b24ab676SJeff Bonwick } 12190a4e9518Sgw 12200f7643c7SGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 12210f7643c7SGeorge Wilson } 12220f7643c7SGeorge Wilson 12230f7643c7SGeorge Wilson static int 12240f7643c7SGeorge Wilson zio_write_compress(zio_t *zio) 12250f7643c7SGeorge Wilson { 12260f7643c7SGeorge Wilson spa_t *spa = zio->io_spa; 12270f7643c7SGeorge Wilson zio_prop_t *zp = &zio->io_prop; 12280f7643c7SGeorge Wilson enum zio_compress compress = zp->zp_compress; 12290f7643c7SGeorge Wilson blkptr_t *bp = zio->io_bp; 12305602294fSDan Kimmel uint64_t lsize = zio->io_lsize; 12315602294fSDan Kimmel uint64_t psize = zio->io_size; 12320f7643c7SGeorge Wilson int pass = 1; 12330f7643c7SGeorge Wilson 12345602294fSDan Kimmel EQUIV(lsize != psize, (zio->io_flags & ZIO_FLAG_RAW) != 0); 12355602294fSDan Kimmel 12360f7643c7SGeorge Wilson /* 12370f7643c7SGeorge Wilson * If our children haven't all reached the ready stage, 12380f7643c7SGeorge Wilson * wait for them and then repeat this pipeline stage. 12390f7643c7SGeorge Wilson */ 12400f7643c7SGeorge Wilson if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 12410f7643c7SGeorge Wilson zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY)) 12420f7643c7SGeorge Wilson return (ZIO_PIPELINE_STOP); 12430f7643c7SGeorge Wilson 12440f7643c7SGeorge Wilson if (!IO_IS_ALLOCATING(zio)) 12450f7643c7SGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 12460f7643c7SGeorge Wilson 12470f7643c7SGeorge Wilson if (zio->io_children_ready != NULL) { 12480f7643c7SGeorge Wilson /* 12490f7643c7SGeorge Wilson * Now that all our children are ready, run the callback 12500f7643c7SGeorge Wilson * associated with this zio in case it wants to modify the 12510f7643c7SGeorge Wilson * data to be written. 12520f7643c7SGeorge Wilson */ 12530f7643c7SGeorge Wilson ASSERT3U(zp->zp_level, >, 0); 12540f7643c7SGeorge Wilson zio->io_children_ready(zio); 12550f7643c7SGeorge Wilson } 12560f7643c7SGeorge Wilson 12570f7643c7SGeorge Wilson ASSERT(zio->io_child_type != ZIO_CHILD_DDT); 12580f7643c7SGeorge Wilson ASSERT(zio->io_bp_override == NULL); 12590f7643c7SGeorge Wilson 126043466aaeSMax Grossman if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { 1261e14bb325SJeff Bonwick /* 1262e14bb325SJeff Bonwick * We're rewriting an existing block, which means we're 1263e14bb325SJeff Bonwick * working on behalf of spa_sync(). For spa_sync() to 1264e14bb325SJeff Bonwick * converge, it must eventually be the case that we don't 1265e14bb325SJeff Bonwick * have to allocate new blocks. But compression changes 1266e14bb325SJeff Bonwick * the blocksize, which forces a reallocate, and makes 1267e14bb325SJeff Bonwick * convergence take longer. Therefore, after the first 1268e14bb325SJeff Bonwick * few passes, stop compressing to ensure convergence. 1269e14bb325SJeff Bonwick */ 1270b24ab676SJeff Bonwick pass = spa_sync_pass(spa); 1271b24ab676SJeff Bonwick 1272b24ab676SJeff Bonwick ASSERT(zio->io_txg == spa_syncing_txg(spa)); 1273b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1274b24ab676SJeff Bonwick ASSERT(!BP_GET_DEDUP(bp)); 1275e05725b1Sbonwick 127601f55e48SGeorge Wilson if (pass >= zfs_sync_pass_dont_compress) 1277e14bb325SJeff Bonwick compress = ZIO_COMPRESS_OFF; 1278e05725b1Sbonwick 1279e14bb325SJeff Bonwick /* Make sure someone doesn't change their mind on overwrites */ 12805d7b4d43SMatthew Ahrens ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), 1281b24ab676SJeff Bonwick spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 1282e14bb325SJeff Bonwick } 1283fa9e4066Sahrens 12845602294fSDan Kimmel /* If it's a compressed write that is not raw, compress the buffer. */ 12855602294fSDan Kimmel if (compress != ZIO_COMPRESS_OFF && psize == lsize) { 1286b24ab676SJeff Bonwick void *cbuf = zio_buf_alloc(lsize); 1287770499e1SDan Kimmel psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize); 1288b24ab676SJeff Bonwick if (psize == 0 || psize == lsize) { 1289e14bb325SJeff Bonwick compress = ZIO_COMPRESS_OFF; 1290b24ab676SJeff Bonwick zio_buf_free(cbuf, lsize); 12915d7b4d43SMatthew Ahrens } else if (!zp->zp_dedup && psize <= BPE_PAYLOAD_SIZE && 12925d7b4d43SMatthew Ahrens zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && 12935d7b4d43SMatthew Ahrens spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { 12945d7b4d43SMatthew Ahrens encode_embedded_bp_compressed(bp, 12955d7b4d43SMatthew Ahrens cbuf, compress, lsize, psize); 12965d7b4d43SMatthew Ahrens BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); 12975d7b4d43SMatthew Ahrens BP_SET_TYPE(bp, zio->io_prop.zp_type); 12985d7b4d43SMatthew Ahrens BP_SET_LEVEL(bp, zio->io_prop.zp_level); 12995d7b4d43SMatthew Ahrens zio_buf_free(cbuf, lsize); 13005d7b4d43SMatthew Ahrens bp->blk_birth = zio->io_txg; 13015d7b4d43SMatthew Ahrens zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 13025d7b4d43SMatthew Ahrens ASSERT(spa_feature_is_active(spa, 13035d7b4d43SMatthew Ahrens SPA_FEATURE_EMBEDDED_DATA)); 13045d7b4d43SMatthew Ahrens return (ZIO_PIPELINE_CONTINUE); 1305b24ab676SJeff Bonwick } else { 13065d7b4d43SMatthew Ahrens /* 130781cd5c55SMatthew Ahrens * Round up compressed size up to the ashift 130881cd5c55SMatthew Ahrens * of the smallest-ashift device, and zero the tail. 130981cd5c55SMatthew Ahrens * This ensures that the compressed size of the BP 131081cd5c55SMatthew Ahrens * (and thus compressratio property) are correct, 131181cd5c55SMatthew Ahrens * in that we charge for the padding used to fill out 131281cd5c55SMatthew Ahrens * the last sector. 13135d7b4d43SMatthew Ahrens */ 131481cd5c55SMatthew Ahrens ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); 131581cd5c55SMatthew Ahrens size_t rounded = (size_t)P2ROUNDUP(psize, 131681cd5c55SMatthew Ahrens 1ULL << spa->spa_min_ashift); 131781cd5c55SMatthew Ahrens if (rounded >= lsize) { 13185d7b4d43SMatthew Ahrens compress = ZIO_COMPRESS_OFF; 13195d7b4d43SMatthew Ahrens zio_buf_free(cbuf, lsize); 132081cd5c55SMatthew Ahrens psize = lsize; 13215d7b4d43SMatthew Ahrens } else { 1322770499e1SDan Kimmel abd_t *cdata = abd_get_from_buf(cbuf, lsize); 1323770499e1SDan Kimmel abd_take_ownership_of_buf(cdata, B_TRUE); 1324770499e1SDan Kimmel abd_zero_off(cdata, psize, rounded - psize); 132581cd5c55SMatthew Ahrens psize = rounded; 1326770499e1SDan Kimmel zio_push_transform(zio, cdata, 13275d7b4d43SMatthew Ahrens psize, lsize, NULL); 13285d7b4d43SMatthew Ahrens } 1329e14bb325SJeff Bonwick } 13300f7643c7SGeorge Wilson 13310f7643c7SGeorge Wilson /* 13320f7643c7SGeorge Wilson * We were unable to handle this as an override bp, treat 13330f7643c7SGeorge Wilson * it as a regular write I/O. 13340f7643c7SGeorge Wilson */ 13350f7643c7SGeorge Wilson zio->io_bp_override = NULL; 13360f7643c7SGeorge Wilson *bp = zio->io_bp_orig; 13370f7643c7SGeorge Wilson zio->io_pipeline = zio->io_orig_pipeline; 13385602294fSDan Kimmel } else { 13395602294fSDan Kimmel ASSERT3U(psize, !=, 0); 1340e14bb325SJeff Bonwick } 1341c717a561Smaybee 1342e14bb325SJeff Bonwick /* 1343e14bb325SJeff Bonwick * The final pass of spa_sync() must be all rewrites, but the first 1344e14bb325SJeff Bonwick * few passes offer a trade-off: allocating blocks defers convergence, 1345e14bb325SJeff Bonwick * but newly allocated blocks are sequential, so they can be written 1346e14bb325SJeff Bonwick * to disk faster. Therefore, we allow the first few passes of 1347e14bb325SJeff Bonwick * spa_sync() to allocate new blocks, but force rewrites after that. 1348e14bb325SJeff Bonwick * There should only be a handful of blocks after pass 1 in any case. 1349e14bb325SJeff Bonwick */ 135043466aaeSMax Grossman if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && 135143466aaeSMax Grossman BP_GET_PSIZE(bp) == psize && 135201f55e48SGeorge Wilson pass >= zfs_sync_pass_rewrite) { 1353b24ab676SJeff Bonwick ASSERT(psize != 0); 1354b24ab676SJeff Bonwick enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; 1355e14bb325SJeff Bonwick zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; 1356e14bb325SJeff Bonwick zio->io_flags |= ZIO_FLAG_IO_REWRITE; 1357e14bb325SJeff Bonwick } else { 1358e14bb325SJeff Bonwick BP_ZERO(bp); 1359e14bb325SJeff Bonwick zio->io_pipeline = ZIO_WRITE_PIPELINE; 1360e14bb325SJeff Bonwick } 1361fa9e4066Sahrens 1362b24ab676SJeff Bonwick if (psize == 0) { 136343466aaeSMax Grossman if (zio->io_bp_orig.blk_birth != 0 && 136443466aaeSMax Grossman spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { 136543466aaeSMax Grossman BP_SET_LSIZE(bp, lsize); 136643466aaeSMax Grossman BP_SET_TYPE(bp, zp->zp_type); 136743466aaeSMax Grossman BP_SET_LEVEL(bp, zp->zp_level); 136843466aaeSMax Grossman BP_SET_BIRTH(bp, zio->io_txg, 0); 136943466aaeSMax Grossman } 1370e14bb325SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1371e14bb325SJeff Bonwick } else { 1372e14bb325SJeff Bonwick ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); 1373e14bb325SJeff Bonwick BP_SET_LSIZE(bp, lsize); 137443466aaeSMax Grossman BP_SET_TYPE(bp, zp->zp_type); 137543466aaeSMax Grossman BP_SET_LEVEL(bp, zp->zp_level); 1376b24ab676SJeff Bonwick BP_SET_PSIZE(bp, psize); 1377e14bb325SJeff Bonwick BP_SET_COMPRESS(bp, compress); 1378e14bb325SJeff Bonwick BP_SET_CHECKSUM(bp, zp->zp_checksum); 1379b24ab676SJeff Bonwick BP_SET_DEDUP(bp, zp->zp_dedup); 1380e14bb325SJeff Bonwick BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1381b24ab676SJeff Bonwick if (zp->zp_dedup) { 1382b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1383b24ab676SJeff Bonwick ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 1384b24ab676SJeff Bonwick zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; 1385b24ab676SJeff Bonwick } 138680901aeaSGeorge Wilson if (zp->zp_nopwrite) { 138780901aeaSGeorge Wilson ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 138880901aeaSGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 138980901aeaSGeorge Wilson zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; 139080901aeaSGeorge Wilson } 1391b24ab676SJeff Bonwick } 1392b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 1393b24ab676SJeff Bonwick } 1394b24ab676SJeff Bonwick 1395b24ab676SJeff Bonwick static int 1396b24ab676SJeff Bonwick zio_free_bp_init(zio_t *zio) 1397b24ab676SJeff Bonwick { 1398b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 1399b24ab676SJeff Bonwick 1400b24ab676SJeff Bonwick if (zio->io_child_type == ZIO_CHILD_LOGICAL) { 1401b24ab676SJeff Bonwick if (BP_GET_DEDUP(bp)) 1402b24ab676SJeff Bonwick zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; 1403e14bb325SJeff Bonwick } 1404fa9e4066Sahrens 1405e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 1406fa9e4066Sahrens } 1407fa9e4066Sahrens 1408e14bb325SJeff Bonwick /* 1409e14bb325SJeff Bonwick * ========================================================================== 1410e14bb325SJeff Bonwick * Execute the I/O pipeline 1411e14bb325SJeff Bonwick * ========================================================================== 1412e14bb325SJeff Bonwick */ 1413e14bb325SJeff Bonwick 1414e14bb325SJeff Bonwick static void 1415ec94d322SAdam Leventhal zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) 1416fa9e4066Sahrens { 141780eb36f2SGeorge Wilson spa_t *spa = zio->io_spa; 1418e14bb325SJeff Bonwick zio_type_t t = zio->io_type; 14195aeb9474SGarrett D'Amore int flags = (cutinline ? TQ_FRONT : 0); 14200a4e9518Sgw 14210a4e9518Sgw /* 1422bbe36defSGeorge Wilson * If we're a config writer or a probe, the normal issue and 1423bbe36defSGeorge Wilson * interrupt threads may all be blocked waiting for the config lock. 1424bbe36defSGeorge Wilson * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. 14250a4e9518Sgw */ 1426bbe36defSGeorge Wilson if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) 1427e14bb325SJeff Bonwick t = ZIO_TYPE_NULL; 14280a4e9518Sgw 14290a4e9518Sgw /* 1430e14bb325SJeff Bonwick * A similar issue exists for the L2ARC write thread until L2ARC 2.0. 14310a4e9518Sgw */ 1432e14bb325SJeff Bonwick if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) 1433e14bb325SJeff Bonwick t = ZIO_TYPE_NULL; 14340a4e9518Sgw 143580eb36f2SGeorge Wilson /* 1436ec94d322SAdam Leventhal * If this is a high priority I/O, then use the high priority taskq if 1437ec94d322SAdam Leventhal * available. 143880eb36f2SGeorge Wilson */ 143980eb36f2SGeorge Wilson if (zio->io_priority == ZIO_PRIORITY_NOW && 1440ec94d322SAdam Leventhal spa->spa_zio_taskq[t][q + 1].stqs_count != 0) 144180eb36f2SGeorge Wilson q++; 144280eb36f2SGeorge Wilson 144380eb36f2SGeorge Wilson ASSERT3U(q, <, ZIO_TASKQ_TYPES); 14445aeb9474SGarrett D'Amore 14455aeb9474SGarrett D'Amore /* 14465aeb9474SGarrett D'Amore * NB: We are assuming that the zio can only be dispatched 14475aeb9474SGarrett D'Amore * to a single taskq at a time. It would be a grievous error 14485aeb9474SGarrett D'Amore * to dispatch the zio to another taskq at the same time. 14495aeb9474SGarrett D'Amore */ 14505aeb9474SGarrett D'Amore ASSERT(zio->io_tqent.tqent_next == NULL); 1451ec94d322SAdam Leventhal spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, 1452ec94d322SAdam Leventhal flags, &zio->io_tqent); 1453e14bb325SJeff Bonwick } 14540a4e9518Sgw 1455e14bb325SJeff Bonwick static boolean_t 1456ec94d322SAdam Leventhal zio_taskq_member(zio_t *zio, zio_taskq_type_t q) 1457e14bb325SJeff Bonwick { 1458e14bb325SJeff Bonwick kthread_t *executor = zio->io_executor; 1459e14bb325SJeff Bonwick spa_t *spa = zio->io_spa; 14600a4e9518Sgw 1461ec94d322SAdam Leventhal for (zio_type_t t = 0; t < ZIO_TYPES; t++) { 1462ec94d322SAdam Leventhal spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 1463ec94d322SAdam Leventhal uint_t i; 1464ec94d322SAdam Leventhal for (i = 0; i < tqs->stqs_count; i++) { 1465ec94d322SAdam Leventhal if (taskq_member(tqs->stqs_taskq[i], executor)) 1466ec94d322SAdam Leventhal return (B_TRUE); 1467ec94d322SAdam Leventhal } 1468ec94d322SAdam Leventhal } 14690a4e9518Sgw 1470e14bb325SJeff Bonwick return (B_FALSE); 1471e14bb325SJeff Bonwick } 1472e05725b1Sbonwick 1473e14bb325SJeff Bonwick static int 1474e14bb325SJeff Bonwick zio_issue_async(zio_t *zio) 1475e14bb325SJeff Bonwick { 147635a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 1477e14bb325SJeff Bonwick 1478e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 14790a4e9518Sgw } 14800a4e9518Sgw 1481e14bb325SJeff Bonwick void 1482e14bb325SJeff Bonwick zio_interrupt(zio_t *zio) 14830a4e9518Sgw { 148435a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); 1485e14bb325SJeff Bonwick } 14860a4e9518Sgw 148797e81309SPrakash Surya void 148897e81309SPrakash Surya zio_delay_interrupt(zio_t *zio) 148997e81309SPrakash Surya { 149097e81309SPrakash Surya /* 149197e81309SPrakash Surya * The timeout_generic() function isn't defined in userspace, so 149297e81309SPrakash Surya * rather than trying to implement the function, the zio delay 149397e81309SPrakash Surya * functionality has been disabled for userspace builds. 149497e81309SPrakash Surya */ 149597e81309SPrakash Surya 149697e81309SPrakash Surya #ifdef _KERNEL 149797e81309SPrakash Surya /* 149897e81309SPrakash Surya * If io_target_timestamp is zero, then no delay has been registered 149997e81309SPrakash Surya * for this IO, thus jump to the end of this function and "skip" the 150097e81309SPrakash Surya * delay; issuing it directly to the zio layer. 150197e81309SPrakash Surya */ 150297e81309SPrakash Surya if (zio->io_target_timestamp != 0) { 150397e81309SPrakash Surya hrtime_t now = gethrtime(); 150497e81309SPrakash Surya 150597e81309SPrakash Surya if (now >= zio->io_target_timestamp) { 150697e81309SPrakash Surya /* 150797e81309SPrakash Surya * This IO has already taken longer than the target 150897e81309SPrakash Surya * delay to complete, so we don't want to delay it 150997e81309SPrakash Surya * any longer; we "miss" the delay and issue it 151097e81309SPrakash Surya * directly to the zio layer. This is likely due to 151197e81309SPrakash Surya * the target latency being set to a value less than 151297e81309SPrakash Surya * the underlying hardware can satisfy (e.g. delay 151397e81309SPrakash Surya * set to 1ms, but the disks take 10ms to complete an 151497e81309SPrakash Surya * IO request). 151597e81309SPrakash Surya */ 151697e81309SPrakash Surya 151797e81309SPrakash Surya DTRACE_PROBE2(zio__delay__miss, zio_t *, zio, 151897e81309SPrakash Surya hrtime_t, now); 151997e81309SPrakash Surya 152097e81309SPrakash Surya zio_interrupt(zio); 152197e81309SPrakash Surya } else { 152297e81309SPrakash Surya hrtime_t diff = zio->io_target_timestamp - now; 152397e81309SPrakash Surya 152497e81309SPrakash Surya DTRACE_PROBE3(zio__delay__hit, zio_t *, zio, 152597e81309SPrakash Surya hrtime_t, now, hrtime_t, diff); 152697e81309SPrakash Surya 152797e81309SPrakash Surya (void) timeout_generic(CALLOUT_NORMAL, 152897e81309SPrakash Surya (void (*)(void *))zio_interrupt, zio, diff, 1, 0); 152997e81309SPrakash Surya } 153097e81309SPrakash Surya 153197e81309SPrakash Surya return; 153297e81309SPrakash Surya } 153397e81309SPrakash Surya #endif 153497e81309SPrakash Surya 153597e81309SPrakash Surya DTRACE_PROBE1(zio__delay__skip, zio_t *, zio); 153697e81309SPrakash Surya zio_interrupt(zio); 153797e81309SPrakash Surya } 153897e81309SPrakash Surya 1539e14bb325SJeff Bonwick /* 1540e14bb325SJeff Bonwick * Execute the I/O pipeline until one of the following occurs: 1541f7170741SWill Andrews * 1542f7170741SWill Andrews * (1) the I/O completes 1543f7170741SWill Andrews * (2) the pipeline stalls waiting for dependent child I/Os 1544f7170741SWill Andrews * (3) the I/O issues, so we're waiting for an I/O completion interrupt 1545f7170741SWill Andrews * (4) the I/O is delegated by vdev-level caching or aggregation 1546f7170741SWill Andrews * (5) the I/O is deferred due to vdev-level queueing 1547f7170741SWill Andrews * (6) the I/O is handed off to another thread. 1548f7170741SWill Andrews * 1549f7170741SWill Andrews * In all cases, the pipeline stops whenever there's no CPU work; it never 1550f7170741SWill Andrews * burns a thread in cv_wait(). 1551e14bb325SJeff Bonwick * 1552e14bb325SJeff Bonwick * There's no locking on io_stage because there's no legitimate way 1553e14bb325SJeff Bonwick * for multiple threads to be attempting to process the same I/O. 1554e14bb325SJeff Bonwick */ 1555b24ab676SJeff Bonwick static zio_pipe_stage_t *zio_pipeline[]; 15560a4e9518Sgw 1557e14bb325SJeff Bonwick void 1558e14bb325SJeff Bonwick zio_execute(zio_t *zio) 1559e14bb325SJeff Bonwick { 1560e14bb325SJeff Bonwick zio->io_executor = curthread; 15610a4e9518Sgw 15620f7643c7SGeorge Wilson ASSERT3U(zio->io_queued_timestamp, >, 0); 15630f7643c7SGeorge Wilson 1564e14bb325SJeff Bonwick while (zio->io_stage < ZIO_STAGE_DONE) { 1565b24ab676SJeff Bonwick enum zio_stage pipeline = zio->io_pipeline; 1566b24ab676SJeff Bonwick enum zio_stage stage = zio->io_stage; 1567e14bb325SJeff Bonwick int rv; 15680a4e9518Sgw 1569e14bb325SJeff Bonwick ASSERT(!MUTEX_HELD(&zio->io_lock)); 1570b24ab676SJeff Bonwick ASSERT(ISP2(stage)); 1571b24ab676SJeff Bonwick ASSERT(zio->io_stall == NULL); 15720a4e9518Sgw 1573b24ab676SJeff Bonwick do { 1574b24ab676SJeff Bonwick stage <<= 1; 1575b24ab676SJeff Bonwick } while ((stage & pipeline) == 0); 1576e14bb325SJeff Bonwick 1577e14bb325SJeff Bonwick ASSERT(stage <= ZIO_STAGE_DONE); 15780a4e9518Sgw 15790a4e9518Sgw /* 1580e14bb325SJeff Bonwick * If we are in interrupt context and this pipeline stage 1581e14bb325SJeff Bonwick * will grab a config lock that is held across I/O, 1582b24ab676SJeff Bonwick * or may wait for an I/O that needs an interrupt thread 1583b24ab676SJeff Bonwick * to complete, issue async to avoid deadlock. 158435a5a358SJonathan Adams * 158535a5a358SJonathan Adams * For VDEV_IO_START, we cut in line so that the io will 158635a5a358SJonathan Adams * be sent to disk promptly. 15870a4e9518Sgw */ 1588b24ab676SJeff Bonwick if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && 1589e14bb325SJeff Bonwick zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { 159035a5a358SJonathan Adams boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? 159135a5a358SJonathan Adams zio_requeue_io_start_cut_in_line : B_FALSE; 159235a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); 1593e14bb325SJeff Bonwick return; 15940a4e9518Sgw } 15950a4e9518Sgw 1596e14bb325SJeff Bonwick zio->io_stage = stage; 15970f7643c7SGeorge Wilson zio->io_pipeline_trace |= zio->io_stage; 1598bf16b11eSMatthew Ahrens rv = zio_pipeline[highbit64(stage) - 1](zio); 15990a4e9518Sgw 1600e14bb325SJeff Bonwick if (rv == ZIO_PIPELINE_STOP) 1601e14bb325SJeff Bonwick return; 16020a4e9518Sgw 1603e14bb325SJeff Bonwick ASSERT(rv == ZIO_PIPELINE_CONTINUE); 1604e14bb325SJeff Bonwick } 16050a4e9518Sgw } 16060a4e9518Sgw 1607e14bb325SJeff Bonwick /* 1608e14bb325SJeff Bonwick * ========================================================================== 1609e14bb325SJeff Bonwick * Initiate I/O, either sync or async 1610e14bb325SJeff Bonwick * ========================================================================== 1611e14bb325SJeff Bonwick */ 1612e14bb325SJeff Bonwick int 1613e14bb325SJeff Bonwick zio_wait(zio_t *zio) 16140a4e9518Sgw { 1615e14bb325SJeff Bonwick int error; 16160a4e9518Sgw 1617*1271e4b1SPrakash Surya ASSERT3P(zio->io_stage, ==, ZIO_STAGE_OPEN); 1618*1271e4b1SPrakash Surya ASSERT3P(zio->io_executor, ==, NULL); 16190a4e9518Sgw 1620e14bb325SJeff Bonwick zio->io_waiter = curthread; 16210f7643c7SGeorge Wilson ASSERT0(zio->io_queued_timestamp); 16220f7643c7SGeorge Wilson zio->io_queued_timestamp = gethrtime(); 1623e05725b1Sbonwick 1624e14bb325SJeff Bonwick zio_execute(zio); 16250a4e9518Sgw 1626e14bb325SJeff Bonwick mutex_enter(&zio->io_lock); 1627e14bb325SJeff Bonwick while (zio->io_executor != NULL) 1628e14bb325SJeff Bonwick cv_wait(&zio->io_cv, &zio->io_lock); 1629e14bb325SJeff Bonwick mutex_exit(&zio->io_lock); 163032b87932Sek 1631e14bb325SJeff Bonwick error = zio->io_error; 1632e14bb325SJeff Bonwick zio_destroy(zio); 163332b87932Sek 1634e14bb325SJeff Bonwick return (error); 163532b87932Sek } 163632b87932Sek 1637e14bb325SJeff Bonwick void 1638e14bb325SJeff Bonwick zio_nowait(zio_t *zio) 16390a4e9518Sgw { 1640*1271e4b1SPrakash Surya ASSERT3P(zio->io_executor, ==, NULL); 1641fa9e4066Sahrens 1642a3f829aeSBill Moore if (zio->io_child_type == ZIO_CHILD_LOGICAL && 1643a3f829aeSBill Moore zio_unique_parent(zio) == NULL) { 1644ea8dc4b6Seschrock /* 1645e14bb325SJeff Bonwick * This is a logical async I/O with no parent to wait for it. 164654d692b7SGeorge Wilson * We add it to the spa_async_root_zio "Godfather" I/O which 164754d692b7SGeorge Wilson * will ensure they complete prior to unloading the pool. 1648ea8dc4b6Seschrock */ 1649e14bb325SJeff Bonwick spa_t *spa = zio->io_spa; 165054d692b7SGeorge Wilson 16516f834bc1SMatthew Ahrens zio_add_child(spa->spa_async_zio_root[CPU_SEQID], zio); 1652e14bb325SJeff Bonwick } 1653ea8dc4b6Seschrock 16540f7643c7SGeorge Wilson ASSERT0(zio->io_queued_timestamp); 16550f7643c7SGeorge Wilson zio->io_queued_timestamp = gethrtime(); 1656e14bb325SJeff Bonwick zio_execute(zio); 1657e14bb325SJeff Bonwick } 1658ea8dc4b6Seschrock 1659e14bb325SJeff Bonwick /* 1660e14bb325SJeff Bonwick * ========================================================================== 1661*1271e4b1SPrakash Surya * Reexecute, cancel, or suspend/resume failed I/O 1662e14bb325SJeff Bonwick * ========================================================================== 1663e14bb325SJeff Bonwick */ 1664fa9e4066Sahrens 1665e14bb325SJeff Bonwick static void 1666e14bb325SJeff Bonwick zio_reexecute(zio_t *pio) 1667e14bb325SJeff Bonwick { 1668a3f829aeSBill Moore zio_t *cio, *cio_next; 1669a3f829aeSBill Moore 1670a3f829aeSBill Moore ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); 1671a3f829aeSBill Moore ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); 1672f5383399SBill Moore ASSERT(pio->io_gang_leader == NULL); 1673f5383399SBill Moore ASSERT(pio->io_gang_tree == NULL); 1674e05725b1Sbonwick 1675e14bb325SJeff Bonwick pio->io_flags = pio->io_orig_flags; 1676e14bb325SJeff Bonwick pio->io_stage = pio->io_orig_stage; 1677e14bb325SJeff Bonwick pio->io_pipeline = pio->io_orig_pipeline; 1678e14bb325SJeff Bonwick pio->io_reexecute = 0; 167980901aeaSGeorge Wilson pio->io_flags |= ZIO_FLAG_REEXECUTED; 16800f7643c7SGeorge Wilson pio->io_pipeline_trace = 0; 1681e14bb325SJeff Bonwick pio->io_error = 0; 1682a3f829aeSBill Moore for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1683a3f829aeSBill Moore pio->io_state[w] = 0; 1684e14bb325SJeff Bonwick for (int c = 0; c < ZIO_CHILD_TYPES; c++) 1685e14bb325SJeff Bonwick pio->io_child_error[c] = 0; 16860a4e9518Sgw 1687b24ab676SJeff Bonwick if (IO_IS_ALLOCATING(pio)) 1688b24ab676SJeff Bonwick BP_ZERO(pio->io_bp); 1689d58459f4Sek 1690e14bb325SJeff Bonwick /* 1691e14bb325SJeff Bonwick * As we reexecute pio's children, new children could be created. 1692a3f829aeSBill Moore * New children go to the head of pio's io_child_list, however, 1693e14bb325SJeff Bonwick * so we will (correctly) not reexecute them. The key is that 1694a3f829aeSBill Moore * the remainder of pio's io_child_list, from 'cio_next' onward, 1695a3f829aeSBill Moore * cannot be affected by any side effects of reexecuting 'cio'. 1696e14bb325SJeff Bonwick */ 16970f7643c7SGeorge Wilson zio_link_t *zl = NULL; 16980f7643c7SGeorge Wilson for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { 16990f7643c7SGeorge Wilson cio_next = zio_walk_children(pio, &zl); 1700e14bb325SJeff Bonwick mutex_enter(&pio->io_lock); 1701a3f829aeSBill Moore for (int w = 0; w < ZIO_WAIT_TYPES; w++) 1702a3f829aeSBill Moore pio->io_children[cio->io_child_type][w]++; 1703e14bb325SJeff Bonwick mutex_exit(&pio->io_lock); 1704a3f829aeSBill Moore zio_reexecute(cio); 1705fa9e4066Sahrens } 1706e05725b1Sbonwick 1707e14bb325SJeff Bonwick /* 1708e14bb325SJeff Bonwick * Now that all children have been reexecuted, execute the parent. 170954d692b7SGeorge Wilson * We don't reexecute "The Godfather" I/O here as it's the 171048bbca81SDaniel Hoffman * responsibility of the caller to wait on it. 1711e14bb325SJeff Bonwick */ 17120f7643c7SGeorge Wilson if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) { 17130f7643c7SGeorge Wilson pio->io_queued_timestamp = gethrtime(); 171454d692b7SGeorge Wilson zio_execute(pio); 17150f7643c7SGeorge Wilson } 17160a4e9518Sgw } 17170a4e9518Sgw 1718*1271e4b1SPrakash Surya void 1719*1271e4b1SPrakash Surya zio_cancel(zio_t *zio) 1720*1271e4b1SPrakash Surya { 1721*1271e4b1SPrakash Surya /* 1722*1271e4b1SPrakash Surya * Disallow cancellation of a zio that's already been issued. 1723*1271e4b1SPrakash Surya */ 1724*1271e4b1SPrakash Surya VERIFY3P(zio->io_executor, ==, NULL); 1725*1271e4b1SPrakash Surya 1726*1271e4b1SPrakash Surya zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 1727*1271e4b1SPrakash Surya zio->io_done = NULL; 1728*1271e4b1SPrakash Surya 1729*1271e4b1SPrakash Surya zio_nowait(zio); 1730*1271e4b1SPrakash Surya } 1731*1271e4b1SPrakash Surya 1732e14bb325SJeff Bonwick void 1733e14bb325SJeff Bonwick zio_suspend(spa_t *spa, zio_t *zio) 17340a4e9518Sgw { 1735e14bb325SJeff Bonwick if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) 1736e14bb325SJeff Bonwick fm_panic("Pool '%s' has encountered an uncorrectable I/O " 1737e14bb325SJeff Bonwick "failure and the failure mode property for this pool " 1738e14bb325SJeff Bonwick "is set to panic.", spa_name(spa)); 17390a4e9518Sgw 1740e14bb325SJeff Bonwick zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); 17410a4e9518Sgw 1742e14bb325SJeff Bonwick mutex_enter(&spa->spa_suspend_lock); 1743fa9e4066Sahrens 1744e14bb325SJeff Bonwick if (spa->spa_suspend_zio_root == NULL) 174554d692b7SGeorge Wilson spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, 174654d692b7SGeorge Wilson ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | 174754d692b7SGeorge Wilson ZIO_FLAG_GODFATHER); 1748fa9e4066Sahrens 1749e14bb325SJeff Bonwick spa->spa_suspended = B_TRUE; 1750fa9e4066Sahrens 1751e14bb325SJeff Bonwick if (zio != NULL) { 175254d692b7SGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 1753e14bb325SJeff Bonwick ASSERT(zio != spa->spa_suspend_zio_root); 1754e14bb325SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 1755a3f829aeSBill Moore ASSERT(zio_unique_parent(zio) == NULL); 1756e14bb325SJeff Bonwick ASSERT(zio->io_stage == ZIO_STAGE_DONE); 1757e14bb325SJeff Bonwick zio_add_child(spa->spa_suspend_zio_root, zio); 1758e14bb325SJeff Bonwick } 1759fa9e4066Sahrens 1760e14bb325SJeff Bonwick mutex_exit(&spa->spa_suspend_lock); 1761e14bb325SJeff Bonwick } 1762fa9e4066Sahrens 176354d692b7SGeorge Wilson int 1764e14bb325SJeff Bonwick zio_resume(spa_t *spa) 1765e14bb325SJeff Bonwick { 176654d692b7SGeorge Wilson zio_t *pio; 1767fa9e4066Sahrens 1768b3995adbSahrens /* 1769e14bb325SJeff Bonwick * Reexecute all previously suspended i/o. 1770b3995adbSahrens */ 1771e14bb325SJeff Bonwick mutex_enter(&spa->spa_suspend_lock); 1772e14bb325SJeff Bonwick spa->spa_suspended = B_FALSE; 1773e14bb325SJeff Bonwick cv_broadcast(&spa->spa_suspend_cv); 1774e14bb325SJeff Bonwick pio = spa->spa_suspend_zio_root; 1775e14bb325SJeff Bonwick spa->spa_suspend_zio_root = NULL; 1776e14bb325SJeff Bonwick mutex_exit(&spa->spa_suspend_lock); 1777e14bb325SJeff Bonwick 1778e14bb325SJeff Bonwick if (pio == NULL) 177954d692b7SGeorge Wilson return (0); 1780e14bb325SJeff Bonwick 178154d692b7SGeorge Wilson zio_reexecute(pio); 178254d692b7SGeorge Wilson return (zio_wait(pio)); 1783e14bb325SJeff Bonwick } 1784e14bb325SJeff Bonwick 1785e14bb325SJeff Bonwick void 1786e14bb325SJeff Bonwick zio_resume_wait(spa_t *spa) 1787e14bb325SJeff Bonwick { 1788e14bb325SJeff Bonwick mutex_enter(&spa->spa_suspend_lock); 1789e14bb325SJeff Bonwick while (spa_suspended(spa)) 1790e14bb325SJeff Bonwick cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); 1791e14bb325SJeff Bonwick mutex_exit(&spa->spa_suspend_lock); 1792fa9e4066Sahrens } 1793fa9e4066Sahrens 1794fa9e4066Sahrens /* 1795fa9e4066Sahrens * ========================================================================== 1796e14bb325SJeff Bonwick * Gang blocks. 1797e14bb325SJeff Bonwick * 1798e14bb325SJeff Bonwick * A gang block is a collection of small blocks that looks to the DMU 1799e14bb325SJeff Bonwick * like one large block. When zio_dva_allocate() cannot find a block 1800e14bb325SJeff Bonwick * of the requested size, due to either severe fragmentation or the pool 1801e14bb325SJeff Bonwick * being nearly full, it calls zio_write_gang_block() to construct the 1802e14bb325SJeff Bonwick * block from smaller fragments. 1803e14bb325SJeff Bonwick * 1804e14bb325SJeff Bonwick * A gang block consists of a gang header (zio_gbh_phys_t) and up to 1805e14bb325SJeff Bonwick * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like 1806e14bb325SJeff Bonwick * an indirect block: it's an array of block pointers. It consumes 1807e14bb325SJeff Bonwick * only one sector and hence is allocatable regardless of fragmentation. 1808e14bb325SJeff Bonwick * The gang header's bps point to its gang members, which hold the data. 1809e14bb325SJeff Bonwick * 1810e14bb325SJeff Bonwick * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> 1811e14bb325SJeff Bonwick * as the verifier to ensure uniqueness of the SHA256 checksum. 1812e14bb325SJeff Bonwick * Critically, the gang block bp's blk_cksum is the checksum of the data, 1813e14bb325SJeff Bonwick * not the gang header. This ensures that data block signatures (needed for 1814e14bb325SJeff Bonwick * deduplication) are independent of how the block is physically stored. 1815e14bb325SJeff Bonwick * 1816e14bb325SJeff Bonwick * Gang blocks can be nested: a gang member may itself be a gang block. 1817e14bb325SJeff Bonwick * Thus every gang block is a tree in which root and all interior nodes are 1818e14bb325SJeff Bonwick * gang headers, and the leaves are normal blocks that contain user data. 1819e14bb325SJeff Bonwick * The root of the gang tree is called the gang leader. 1820e14bb325SJeff Bonwick * 1821e14bb325SJeff Bonwick * To perform any operation (read, rewrite, free, claim) on a gang block, 1822e14bb325SJeff Bonwick * zio_gang_assemble() first assembles the gang tree (minus data leaves) 1823e14bb325SJeff Bonwick * in the io_gang_tree field of the original logical i/o by recursively 1824e14bb325SJeff Bonwick * reading the gang leader and all gang headers below it. This yields 1825e14bb325SJeff Bonwick * an in-core tree containing the contents of every gang header and the 1826e14bb325SJeff Bonwick * bps for every constituent of the gang block. 1827e14bb325SJeff Bonwick * 1828e14bb325SJeff Bonwick * With the gang tree now assembled, zio_gang_issue() just walks the gang tree 1829e14bb325SJeff Bonwick * and invokes a callback on each bp. To free a gang block, zio_gang_issue() 1830e14bb325SJeff Bonwick * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. 1831e14bb325SJeff Bonwick * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). 1832e14bb325SJeff Bonwick * zio_read_gang() is a wrapper around zio_read() that omits reading gang 1833e14bb325SJeff Bonwick * headers, since we already have those in io_gang_tree. zio_rewrite_gang() 1834e14bb325SJeff Bonwick * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() 1835e14bb325SJeff Bonwick * of the gang header plus zio_checksum_compute() of the data to update the 1836e14bb325SJeff Bonwick * gang header's blk_cksum as described above. 1837e14bb325SJeff Bonwick * 1838e14bb325SJeff Bonwick * The two-phase assemble/issue model solves the problem of partial failure -- 1839e14bb325SJeff Bonwick * what if you'd freed part of a gang block but then couldn't read the 1840e14bb325SJeff Bonwick * gang header for another part? Assembling the entire gang tree first 1841e14bb325SJeff Bonwick * ensures that all the necessary gang header I/O has succeeded before 1842e14bb325SJeff Bonwick * starting the actual work of free, claim, or write. Once the gang tree 1843e14bb325SJeff Bonwick * is assembled, free and claim are in-memory operations that cannot fail. 1844e14bb325SJeff Bonwick * 1845e14bb325SJeff Bonwick * In the event that a gang write fails, zio_dva_unallocate() walks the 1846e14bb325SJeff Bonwick * gang tree to immediately free (i.e. insert back into the space map) 1847e14bb325SJeff Bonwick * everything we've allocated. This ensures that we don't get ENOSPC 1848e14bb325SJeff Bonwick * errors during repeated suspend/resume cycles due to a flaky device. 1849e14bb325SJeff Bonwick * 1850e14bb325SJeff Bonwick * Gang rewrites only happen during sync-to-convergence. If we can't assemble 1851e14bb325SJeff Bonwick * the gang tree, we won't modify the block, so we can safely defer the free 1852e14bb325SJeff Bonwick * (knowing that the block is still intact). If we *can* assemble the gang 1853e14bb325SJeff Bonwick * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free 1854e14bb325SJeff Bonwick * each constituent bp and we can allocate a new block on the next sync pass. 1855e14bb325SJeff Bonwick * 1856e14bb325SJeff Bonwick * In all cases, the gang tree allows complete recovery from partial failure. 1857fa9e4066Sahrens * ========================================================================== 1858fa9e4066Sahrens */ 1859e14bb325SJeff Bonwick 1860770499e1SDan Kimmel static void 1861770499e1SDan Kimmel zio_gang_issue_func_done(zio_t *zio) 1862770499e1SDan Kimmel { 1863770499e1SDan Kimmel abd_put(zio->io_abd); 1864770499e1SDan Kimmel } 1865770499e1SDan Kimmel 1866e14bb325SJeff Bonwick static zio_t * 1867770499e1SDan Kimmel zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1868770499e1SDan Kimmel uint64_t offset) 1869fa9e4066Sahrens { 1870e14bb325SJeff Bonwick if (gn != NULL) 1871e14bb325SJeff Bonwick return (pio); 1872fa9e4066Sahrens 1873770499e1SDan Kimmel return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset), 1874770499e1SDan Kimmel BP_GET_PSIZE(bp), zio_gang_issue_func_done, 1875770499e1SDan Kimmel NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1876e14bb325SJeff Bonwick &pio->io_bookmark)); 1877e14bb325SJeff Bonwick } 1878e14bb325SJeff Bonwick 1879770499e1SDan Kimmel static zio_t * 1880770499e1SDan Kimmel zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1881770499e1SDan Kimmel uint64_t offset) 1882e14bb325SJeff Bonwick { 1883e14bb325SJeff Bonwick zio_t *zio; 1884e14bb325SJeff Bonwick 1885e14bb325SJeff Bonwick if (gn != NULL) { 1886770499e1SDan Kimmel abd_t *gbh_abd = 1887770499e1SDan Kimmel abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1888e14bb325SJeff Bonwick zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1889770499e1SDan Kimmel gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL, 1890770499e1SDan Kimmel pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), 1891770499e1SDan Kimmel &pio->io_bookmark); 1892fa9e4066Sahrens /* 1893e14bb325SJeff Bonwick * As we rewrite each gang header, the pipeline will compute 1894e14bb325SJeff Bonwick * a new gang block header checksum for it; but no one will 1895e14bb325SJeff Bonwick * compute a new data checksum, so we do that here. The one 1896e14bb325SJeff Bonwick * exception is the gang leader: the pipeline already computed 1897e14bb325SJeff Bonwick * its data checksum because that stage precedes gang assembly. 1898e14bb325SJeff Bonwick * (Presently, nothing actually uses interior data checksums; 1899e14bb325SJeff Bonwick * this is just good hygiene.) 1900fa9e4066Sahrens */ 1901f5383399SBill Moore if (gn != pio->io_gang_leader->io_gang_tree) { 1902770499e1SDan Kimmel abd_t *buf = abd_get_offset(data, offset); 1903770499e1SDan Kimmel 1904e14bb325SJeff Bonwick zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), 1905770499e1SDan Kimmel buf, BP_GET_PSIZE(bp)); 1906770499e1SDan Kimmel 1907770499e1SDan Kimmel abd_put(buf); 1908e14bb325SJeff Bonwick } 1909b24ab676SJeff Bonwick /* 1910b24ab676SJeff Bonwick * If we are here to damage data for testing purposes, 1911b24ab676SJeff Bonwick * leave the GBH alone so that we can detect the damage. 1912b24ab676SJeff Bonwick */ 1913b24ab676SJeff Bonwick if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) 1914b24ab676SJeff Bonwick zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 1915fa9e4066Sahrens } else { 1916e14bb325SJeff Bonwick zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, 1917770499e1SDan Kimmel abd_get_offset(data, offset), BP_GET_PSIZE(bp), 1918770499e1SDan Kimmel zio_gang_issue_func_done, NULL, pio->io_priority, 1919e14bb325SJeff Bonwick ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 1920fa9e4066Sahrens } 1921fa9e4066Sahrens 1922e14bb325SJeff Bonwick return (zio); 1923e14bb325SJeff Bonwick } 1924fa9e4066Sahrens 1925e14bb325SJeff Bonwick /* ARGSUSED */ 1926770499e1SDan Kimmel static zio_t * 1927770499e1SDan Kimmel zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1928770499e1SDan Kimmel uint64_t offset) 1929e14bb325SJeff Bonwick { 1930b24ab676SJeff Bonwick return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, 1931b24ab676SJeff Bonwick ZIO_GANG_CHILD_FLAGS(pio))); 1932fa9e4066Sahrens } 1933fa9e4066Sahrens 1934e14bb325SJeff Bonwick /* ARGSUSED */ 1935770499e1SDan Kimmel static zio_t * 1936770499e1SDan Kimmel zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, 1937770499e1SDan Kimmel uint64_t offset) 1938fa9e4066Sahrens { 1939e14bb325SJeff Bonwick return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, 1940e14bb325SJeff Bonwick NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); 1941e14bb325SJeff Bonwick } 1942fa9e4066Sahrens 1943e14bb325SJeff Bonwick static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { 1944e14bb325SJeff Bonwick NULL, 1945e14bb325SJeff Bonwick zio_read_gang, 1946e14bb325SJeff Bonwick zio_rewrite_gang, 1947e14bb325SJeff Bonwick zio_free_gang, 1948e14bb325SJeff Bonwick zio_claim_gang, 1949e14bb325SJeff Bonwick NULL 1950e14bb325SJeff Bonwick }; 1951fa9e4066Sahrens 1952e14bb325SJeff Bonwick static void zio_gang_tree_assemble_done(zio_t *zio); 1953fa9e4066Sahrens 1954e14bb325SJeff Bonwick static zio_gang_node_t * 1955e14bb325SJeff Bonwick zio_gang_node_alloc(zio_gang_node_t **gnpp) 1956e14bb325SJeff Bonwick { 1957e14bb325SJeff Bonwick zio_gang_node_t *gn; 1958fa9e4066Sahrens 1959e14bb325SJeff Bonwick ASSERT(*gnpp == NULL); 1960fa9e4066Sahrens 1961e14bb325SJeff Bonwick gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); 1962e14bb325SJeff Bonwick gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); 1963e14bb325SJeff Bonwick *gnpp = gn; 1964e14bb325SJeff Bonwick 1965e14bb325SJeff Bonwick return (gn); 1966fa9e4066Sahrens } 1967fa9e4066Sahrens 1968fa9e4066Sahrens static void 1969e14bb325SJeff Bonwick zio_gang_node_free(zio_gang_node_t **gnpp) 1970fa9e4066Sahrens { 1971e14bb325SJeff Bonwick zio_gang_node_t *gn = *gnpp; 1972fa9e4066Sahrens 1973e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1974e14bb325SJeff Bonwick ASSERT(gn->gn_child[g] == NULL); 1975e14bb325SJeff Bonwick 1976e14bb325SJeff Bonwick zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); 1977e14bb325SJeff Bonwick kmem_free(gn, sizeof (*gn)); 1978e14bb325SJeff Bonwick *gnpp = NULL; 1979fa9e4066Sahrens } 1980fa9e4066Sahrens 1981e14bb325SJeff Bonwick static void 1982e14bb325SJeff Bonwick zio_gang_tree_free(zio_gang_node_t **gnpp) 1983fa9e4066Sahrens { 1984e14bb325SJeff Bonwick zio_gang_node_t *gn = *gnpp; 1985fa9e4066Sahrens 1986e14bb325SJeff Bonwick if (gn == NULL) 1987e14bb325SJeff Bonwick return; 1988fa9e4066Sahrens 1989e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) 1990e14bb325SJeff Bonwick zio_gang_tree_free(&gn->gn_child[g]); 1991fa9e4066Sahrens 1992e14bb325SJeff Bonwick zio_gang_node_free(gnpp); 1993fa9e4066Sahrens } 1994fa9e4066Sahrens 1995e14bb325SJeff Bonwick static void 1996f5383399SBill Moore zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) 1997fa9e4066Sahrens { 1998e14bb325SJeff Bonwick zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); 1999770499e1SDan Kimmel abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); 2000e14bb325SJeff Bonwick 2001f5383399SBill Moore ASSERT(gio->io_gang_leader == gio); 2002e14bb325SJeff Bonwick ASSERT(BP_IS_GANG(bp)); 2003fa9e4066Sahrens 2004770499e1SDan Kimmel zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2005770499e1SDan Kimmel zio_gang_tree_assemble_done, gn, gio->io_priority, 2006770499e1SDan Kimmel ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); 2007e14bb325SJeff Bonwick } 2008fa9e4066Sahrens 2009e14bb325SJeff Bonwick static void 2010e14bb325SJeff Bonwick zio_gang_tree_assemble_done(zio_t *zio) 2011e14bb325SJeff Bonwick { 2012f5383399SBill Moore zio_t *gio = zio->io_gang_leader; 2013e14bb325SJeff Bonwick zio_gang_node_t *gn = zio->io_private; 2014e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 2015fa9e4066Sahrens 2016f5383399SBill Moore ASSERT(gio == zio_unique_parent(zio)); 2017b24ab676SJeff Bonwick ASSERT(zio->io_child_count == 0); 2018fa9e4066Sahrens 2019e14bb325SJeff Bonwick if (zio->io_error) 2020e14bb325SJeff Bonwick return; 2021fa9e4066Sahrens 2022770499e1SDan Kimmel /* this ABD was created from a linear buf in zio_gang_tree_assemble */ 2023e14bb325SJeff Bonwick if (BP_SHOULD_BYTESWAP(bp)) 2024770499e1SDan Kimmel byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size); 2025fa9e4066Sahrens 2026770499e1SDan Kimmel ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh); 2027e14bb325SJeff Bonwick ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 20286e1f5caaSNeil Perrin ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2029e05725b1Sbonwick 2030770499e1SDan Kimmel abd_put(zio->io_abd); 2031770499e1SDan Kimmel 2032e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2033e14bb325SJeff Bonwick blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2034e14bb325SJeff Bonwick if (!BP_IS_GANG(gbp)) 2035e14bb325SJeff Bonwick continue; 2036f5383399SBill Moore zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); 2037e14bb325SJeff Bonwick } 2038fa9e4066Sahrens } 2039fa9e4066Sahrens 2040e14bb325SJeff Bonwick static void 2041770499e1SDan Kimmel zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data, 2042770499e1SDan Kimmel uint64_t offset) 2043fa9e4066Sahrens { 2044f5383399SBill Moore zio_t *gio = pio->io_gang_leader; 2045e14bb325SJeff Bonwick zio_t *zio; 2046fa9e4066Sahrens 2047e14bb325SJeff Bonwick ASSERT(BP_IS_GANG(bp) == !!gn); 2048f5383399SBill Moore ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); 2049f5383399SBill Moore ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); 2050fa9e4066Sahrens 2051e14bb325SJeff Bonwick /* 2052e14bb325SJeff Bonwick * If you're a gang header, your data is in gn->gn_gbh. 2053e14bb325SJeff Bonwick * If you're a gang member, your data is in 'data' and gn == NULL. 2054e14bb325SJeff Bonwick */ 2055770499e1SDan Kimmel zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset); 2056fa9e4066Sahrens 2057e14bb325SJeff Bonwick if (gn != NULL) { 20586e1f5caaSNeil Perrin ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); 2059fa9e4066Sahrens 2060e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2061e14bb325SJeff Bonwick blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; 2062e14bb325SJeff Bonwick if (BP_IS_HOLE(gbp)) 2063e14bb325SJeff Bonwick continue; 2064770499e1SDan Kimmel zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data, 2065770499e1SDan Kimmel offset); 2066770499e1SDan Kimmel offset += BP_GET_PSIZE(gbp); 2067e14bb325SJeff Bonwick } 2068fa9e4066Sahrens } 2069fa9e4066Sahrens 2070f5383399SBill Moore if (gn == gio->io_gang_tree) 2071770499e1SDan Kimmel ASSERT3U(gio->io_size, ==, offset); 2072e05725b1Sbonwick 2073e14bb325SJeff Bonwick if (zio != pio) 2074e14bb325SJeff Bonwick zio_nowait(zio); 2075fa9e4066Sahrens } 2076fa9e4066Sahrens 2077e05725b1Sbonwick static int 2078e14bb325SJeff Bonwick zio_gang_assemble(zio_t *zio) 2079fa9e4066Sahrens { 2080e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 2081fa9e4066Sahrens 2082f5383399SBill Moore ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); 2083f5383399SBill Moore ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2084f5383399SBill Moore 2085f5383399SBill Moore zio->io_gang_leader = zio; 2086fa9e4066Sahrens 2087e14bb325SJeff Bonwick zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); 2088e05725b1Sbonwick 2089e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 2090fa9e4066Sahrens } 2091fa9e4066Sahrens 2092e05725b1Sbonwick static int 2093e14bb325SJeff Bonwick zio_gang_issue(zio_t *zio) 2094fa9e4066Sahrens { 2095e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 2096fa9e4066Sahrens 2097e14bb325SJeff Bonwick if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE)) 2098e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 2099fa9e4066Sahrens 2100f5383399SBill Moore ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); 2101f5383399SBill Moore ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2102fa9e4066Sahrens 2103e14bb325SJeff Bonwick if (zio->io_child_error[ZIO_CHILD_GANG] == 0) 2104770499e1SDan Kimmel zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd, 2105770499e1SDan Kimmel 0); 2106e14bb325SJeff Bonwick else 2107f5383399SBill Moore zio_gang_tree_free(&zio->io_gang_tree); 2108fa9e4066Sahrens 2109e14bb325SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2110e05725b1Sbonwick 2111e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 2112fa9e4066Sahrens } 2113fa9e4066Sahrens 2114fa9e4066Sahrens static void 2115e14bb325SJeff Bonwick zio_write_gang_member_ready(zio_t *zio) 2116fa9e4066Sahrens { 2117a3f829aeSBill Moore zio_t *pio = zio_unique_parent(zio); 2118f5383399SBill Moore zio_t *gio = zio->io_gang_leader; 211944cd46caSbillm dva_t *cdva = zio->io_bp->blk_dva; 212044cd46caSbillm dva_t *pdva = pio->io_bp->blk_dva; 2121fa9e4066Sahrens uint64_t asize; 2122fa9e4066Sahrens 2123e14bb325SJeff Bonwick if (BP_IS_HOLE(zio->io_bp)) 2124e14bb325SJeff Bonwick return; 2125e14bb325SJeff Bonwick 2126e14bb325SJeff Bonwick ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); 2127e14bb325SJeff Bonwick 2128e14bb325SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_GANG); 2129b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); 2130b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); 2131b24ab676SJeff Bonwick ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); 213244cd46caSbillm ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 2133fa9e4066Sahrens 2134fa9e4066Sahrens mutex_enter(&pio->io_lock); 2135e14bb325SJeff Bonwick for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { 213644cd46caSbillm ASSERT(DVA_GET_GANG(&pdva[d])); 213744cd46caSbillm asize = DVA_GET_ASIZE(&pdva[d]); 213844cd46caSbillm asize += DVA_GET_ASIZE(&cdva[d]); 213944cd46caSbillm DVA_SET_ASIZE(&pdva[d], asize); 214044cd46caSbillm } 2141fa9e4066Sahrens mutex_exit(&pio->io_lock); 2142fa9e4066Sahrens } 2143fa9e4066Sahrens 2144770499e1SDan Kimmel static void 2145770499e1SDan Kimmel zio_write_gang_done(zio_t *zio) 2146770499e1SDan Kimmel { 2147770499e1SDan Kimmel abd_put(zio->io_abd); 2148770499e1SDan Kimmel } 2149770499e1SDan Kimmel 21500a4e9518Sgw static int 2151e14bb325SJeff Bonwick zio_write_gang_block(zio_t *pio) 2152fa9e4066Sahrens { 2153e14bb325SJeff Bonwick spa_t *spa = pio->io_spa; 21540f7643c7SGeorge Wilson metaslab_class_t *mc = spa_normal_class(spa); 2155e14bb325SJeff Bonwick blkptr_t *bp = pio->io_bp; 2156f5383399SBill Moore zio_t *gio = pio->io_gang_leader; 2157e14bb325SJeff Bonwick zio_t *zio; 2158e14bb325SJeff Bonwick zio_gang_node_t *gn, **gnpp; 2159fa9e4066Sahrens zio_gbh_phys_t *gbh; 2160770499e1SDan Kimmel abd_t *gbh_abd; 2161e14bb325SJeff Bonwick uint64_t txg = pio->io_txg; 2162e14bb325SJeff Bonwick uint64_t resid = pio->io_size; 2163e14bb325SJeff Bonwick uint64_t lsize; 2164b24ab676SJeff Bonwick int copies = gio->io_prop.zp_copies; 2165b24ab676SJeff Bonwick int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); 2166e14bb325SJeff Bonwick zio_prop_t zp; 2167fa9e4066Sahrens int error; 2168fa9e4066Sahrens 21690f7643c7SGeorge Wilson int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER; 21700f7643c7SGeorge Wilson if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 21710f7643c7SGeorge Wilson ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 21720f7643c7SGeorge Wilson ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA)); 21730f7643c7SGeorge Wilson 21740f7643c7SGeorge Wilson flags |= METASLAB_ASYNC_ALLOC; 21750f7643c7SGeorge Wilson VERIFY(refcount_held(&mc->mc_alloc_slots, pio)); 21760f7643c7SGeorge Wilson 21770f7643c7SGeorge Wilson /* 21780f7643c7SGeorge Wilson * The logical zio has already placed a reservation for 21790f7643c7SGeorge Wilson * 'copies' allocation slots but gang blocks may require 21800f7643c7SGeorge Wilson * additional copies. These additional copies 21810f7643c7SGeorge Wilson * (i.e. gbh_copies - copies) are guaranteed to succeed 21820f7643c7SGeorge Wilson * since metaslab_class_throttle_reserve() always allows 21830f7643c7SGeorge Wilson * additional reservations for gang blocks. 21840f7643c7SGeorge Wilson */ 21850f7643c7SGeorge Wilson VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies, 21860f7643c7SGeorge Wilson pio, flags)); 21870f7643c7SGeorge Wilson } 21880f7643c7SGeorge Wilson 21890f7643c7SGeorge Wilson error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, 21908363e80aSGeorge Wilson bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, 21918363e80aSGeorge Wilson &pio->io_alloc_list, pio); 2192e05725b1Sbonwick if (error) { 21930f7643c7SGeorge Wilson if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 21940f7643c7SGeorge Wilson ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 21950f7643c7SGeorge Wilson ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA)); 21960f7643c7SGeorge Wilson 21970f7643c7SGeorge Wilson /* 21980f7643c7SGeorge Wilson * If we failed to allocate the gang block header then 21990f7643c7SGeorge Wilson * we remove any additional allocation reservations that 22000f7643c7SGeorge Wilson * we placed here. The original reservation will 22010f7643c7SGeorge Wilson * be removed when the logical I/O goes to the ready 22020f7643c7SGeorge Wilson * stage. 22030f7643c7SGeorge Wilson */ 22040f7643c7SGeorge Wilson metaslab_class_throttle_unreserve(mc, 22050f7643c7SGeorge Wilson gbh_copies - copies, pio); 22060f7643c7SGeorge Wilson } 2207e14bb325SJeff Bonwick pio->io_error = error; 2208e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 2209e05725b1Sbonwick } 2210fa9e4066Sahrens 2211f5383399SBill Moore if (pio == gio) { 2212f5383399SBill Moore gnpp = &gio->io_gang_tree; 2213e14bb325SJeff Bonwick } else { 2214e14bb325SJeff Bonwick gnpp = pio->io_private; 2215e14bb325SJeff Bonwick ASSERT(pio->io_ready == zio_write_gang_member_ready); 2216fa9e4066Sahrens } 2217fa9e4066Sahrens 2218e14bb325SJeff Bonwick gn = zio_gang_node_alloc(gnpp); 2219e14bb325SJeff Bonwick gbh = gn->gn_gbh; 2220e14bb325SJeff Bonwick bzero(gbh, SPA_GANGBLOCKSIZE); 2221770499e1SDan Kimmel gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE); 2222fa9e4066Sahrens 2223e14bb325SJeff Bonwick /* 2224e14bb325SJeff Bonwick * Create the gang header. 2225e14bb325SJeff Bonwick */ 2226770499e1SDan Kimmel zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE, 2227770499e1SDan Kimmel zio_write_gang_done, NULL, pio->io_priority, 2228770499e1SDan Kimmel ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 2229fa9e4066Sahrens 2230e14bb325SJeff Bonwick /* 2231e14bb325SJeff Bonwick * Create and nowait the gang children. 2232e14bb325SJeff Bonwick */ 2233e14bb325SJeff Bonwick for (int g = 0; resid != 0; resid -= lsize, g++) { 2234e14bb325SJeff Bonwick lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), 2235e14bb325SJeff Bonwick SPA_MINBLOCKSIZE); 2236e14bb325SJeff Bonwick ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); 2237e14bb325SJeff Bonwick 2238f5383399SBill Moore zp.zp_checksum = gio->io_prop.zp_checksum; 2239e14bb325SJeff Bonwick zp.zp_compress = ZIO_COMPRESS_OFF; 2240e14bb325SJeff Bonwick zp.zp_type = DMU_OT_NONE; 2241e14bb325SJeff Bonwick zp.zp_level = 0; 2242b24ab676SJeff Bonwick zp.zp_copies = gio->io_prop.zp_copies; 224380901aeaSGeorge Wilson zp.zp_dedup = B_FALSE; 224480901aeaSGeorge Wilson zp.zp_dedup_verify = B_FALSE; 224580901aeaSGeorge Wilson zp.zp_nopwrite = B_FALSE; 2246e14bb325SJeff Bonwick 22470f7643c7SGeorge Wilson zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], 2248770499e1SDan Kimmel abd_get_offset(pio->io_abd, pio->io_size - resid), lsize, 2249770499e1SDan Kimmel lsize, &zp, zio_write_gang_member_ready, NULL, NULL, 2250770499e1SDan Kimmel zio_write_gang_done, &gn->gn_child[g], pio->io_priority, 22510f7643c7SGeorge Wilson ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); 22520f7643c7SGeorge Wilson 22530f7643c7SGeorge Wilson if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 22540f7643c7SGeorge Wilson ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 22550f7643c7SGeorge Wilson ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA)); 22560f7643c7SGeorge Wilson 22570f7643c7SGeorge Wilson /* 22580f7643c7SGeorge Wilson * Gang children won't throttle but we should 22590f7643c7SGeorge Wilson * account for their work, so reserve an allocation 22600f7643c7SGeorge Wilson * slot for them here. 22610f7643c7SGeorge Wilson */ 22620f7643c7SGeorge Wilson VERIFY(metaslab_class_throttle_reserve(mc, 22630f7643c7SGeorge Wilson zp.zp_copies, cio, flags)); 22640f7643c7SGeorge Wilson } 22650f7643c7SGeorge Wilson zio_nowait(cio); 2266e14bb325SJeff Bonwick } 2267e05725b1Sbonwick 226844cd46caSbillm /* 2269e14bb325SJeff Bonwick * Set pio's pipeline to just wait for zio to finish. 227044cd46caSbillm */ 2271e14bb325SJeff Bonwick pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 2272e14bb325SJeff Bonwick 2273e14bb325SJeff Bonwick zio_nowait(zio); 2274e14bb325SJeff Bonwick 2275e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2276fa9e4066Sahrens } 2277fa9e4066Sahrens 227880901aeaSGeorge Wilson /* 227945818ee1SMatthew Ahrens * The zio_nop_write stage in the pipeline determines if allocating a 228045818ee1SMatthew Ahrens * new bp is necessary. The nopwrite feature can handle writes in 228145818ee1SMatthew Ahrens * either syncing or open context (i.e. zil writes) and as a result is 228245818ee1SMatthew Ahrens * mutually exclusive with dedup. 228345818ee1SMatthew Ahrens * 228445818ee1SMatthew Ahrens * By leveraging a cryptographically secure checksum, such as SHA256, we 228545818ee1SMatthew Ahrens * can compare the checksums of the new data and the old to determine if 228645818ee1SMatthew Ahrens * allocating a new block is required. Note that our requirements for 228745818ee1SMatthew Ahrens * cryptographic strength are fairly weak: there can't be any accidental 228845818ee1SMatthew Ahrens * hash collisions, but we don't need to be secure against intentional 228945818ee1SMatthew Ahrens * (malicious) collisions. To trigger a nopwrite, you have to be able 229045818ee1SMatthew Ahrens * to write the file to begin with, and triggering an incorrect (hash 229145818ee1SMatthew Ahrens * collision) nopwrite is no worse than simply writing to the file. 229245818ee1SMatthew Ahrens * That said, there are no known attacks against the checksum algorithms 229345818ee1SMatthew Ahrens * used for nopwrite, assuming that the salt and the checksums 229445818ee1SMatthew Ahrens * themselves remain secret. 229580901aeaSGeorge Wilson */ 229680901aeaSGeorge Wilson static int 229780901aeaSGeorge Wilson zio_nop_write(zio_t *zio) 229880901aeaSGeorge Wilson { 229980901aeaSGeorge Wilson blkptr_t *bp = zio->io_bp; 230080901aeaSGeorge Wilson blkptr_t *bp_orig = &zio->io_bp_orig; 230180901aeaSGeorge Wilson zio_prop_t *zp = &zio->io_prop; 230280901aeaSGeorge Wilson 230380901aeaSGeorge Wilson ASSERT(BP_GET_LEVEL(bp) == 0); 230480901aeaSGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); 230580901aeaSGeorge Wilson ASSERT(zp->zp_nopwrite); 230680901aeaSGeorge Wilson ASSERT(!zp->zp_dedup); 230780901aeaSGeorge Wilson ASSERT(zio->io_bp_override == NULL); 230880901aeaSGeorge Wilson ASSERT(IO_IS_ALLOCATING(zio)); 230980901aeaSGeorge Wilson 231080901aeaSGeorge Wilson /* 231180901aeaSGeorge Wilson * Check to see if the original bp and the new bp have matching 231280901aeaSGeorge Wilson * characteristics (i.e. same checksum, compression algorithms, etc). 231380901aeaSGeorge Wilson * If they don't then just continue with the pipeline which will 231480901aeaSGeorge Wilson * allocate a new bp. 231580901aeaSGeorge Wilson */ 231680901aeaSGeorge Wilson if (BP_IS_HOLE(bp_orig) || 231745818ee1SMatthew Ahrens !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags & 231845818ee1SMatthew Ahrens ZCHECKSUM_FLAG_NOPWRITE) || 231980901aeaSGeorge Wilson BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || 232080901aeaSGeorge Wilson BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || 232180901aeaSGeorge Wilson BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || 232280901aeaSGeorge Wilson zp->zp_copies != BP_GET_NDVAS(bp_orig)) 232380901aeaSGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 232480901aeaSGeorge Wilson 232580901aeaSGeorge Wilson /* 232680901aeaSGeorge Wilson * If the checksums match then reset the pipeline so that we 232780901aeaSGeorge Wilson * avoid allocating a new bp and issuing any I/O. 232880901aeaSGeorge Wilson */ 232980901aeaSGeorge Wilson if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { 233045818ee1SMatthew Ahrens ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags & 233145818ee1SMatthew Ahrens ZCHECKSUM_FLAG_NOPWRITE); 233280901aeaSGeorge Wilson ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); 233380901aeaSGeorge Wilson ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); 233480901aeaSGeorge Wilson ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); 233580901aeaSGeorge Wilson ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, 233680901aeaSGeorge Wilson sizeof (uint64_t)) == 0); 233780901aeaSGeorge Wilson 233880901aeaSGeorge Wilson *bp = *bp_orig; 233980901aeaSGeorge Wilson zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 234080901aeaSGeorge Wilson zio->io_flags |= ZIO_FLAG_NOPWRITE; 234180901aeaSGeorge Wilson } 234280901aeaSGeorge Wilson 234380901aeaSGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 234480901aeaSGeorge Wilson } 234580901aeaSGeorge Wilson 2346fa9e4066Sahrens /* 2347fa9e4066Sahrens * ========================================================================== 2348b24ab676SJeff Bonwick * Dedup 2349fa9e4066Sahrens * ========================================================================== 2350fa9e4066Sahrens */ 2351b24ab676SJeff Bonwick static void 2352b24ab676SJeff Bonwick zio_ddt_child_read_done(zio_t *zio) 2353b24ab676SJeff Bonwick { 2354b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2355b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_private; 2356b24ab676SJeff Bonwick ddt_phys_t *ddp; 2357b24ab676SJeff Bonwick zio_t *pio = zio_unique_parent(zio); 2358b24ab676SJeff Bonwick 2359b24ab676SJeff Bonwick mutex_enter(&pio->io_lock); 2360b24ab676SJeff Bonwick ddp = ddt_phys_select(dde, bp); 2361b24ab676SJeff Bonwick if (zio->io_error == 0) 2362b24ab676SJeff Bonwick ddt_phys_clear(ddp); /* this ddp doesn't need repair */ 2363770499e1SDan Kimmel 2364770499e1SDan Kimmel if (zio->io_error == 0 && dde->dde_repair_abd == NULL) 2365770499e1SDan Kimmel dde->dde_repair_abd = zio->io_abd; 2366b24ab676SJeff Bonwick else 2367770499e1SDan Kimmel abd_free(zio->io_abd); 2368b24ab676SJeff Bonwick mutex_exit(&pio->io_lock); 2369b24ab676SJeff Bonwick } 2370b24ab676SJeff Bonwick 2371b24ab676SJeff Bonwick static int 2372b24ab676SJeff Bonwick zio_ddt_read_start(zio_t *zio) 2373b24ab676SJeff Bonwick { 2374b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2375b24ab676SJeff Bonwick 2376b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(bp)); 2377b24ab676SJeff Bonwick ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2378b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2379b24ab676SJeff Bonwick 2380b24ab676SJeff Bonwick if (zio->io_child_error[ZIO_CHILD_DDT]) { 2381b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, bp); 2382b24ab676SJeff Bonwick ddt_entry_t *dde = ddt_repair_start(ddt, bp); 2383b24ab676SJeff Bonwick ddt_phys_t *ddp = dde->dde_phys; 2384b24ab676SJeff Bonwick ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); 2385b24ab676SJeff Bonwick blkptr_t blk; 2386b24ab676SJeff Bonwick 2387b24ab676SJeff Bonwick ASSERT(zio->io_vsd == NULL); 2388b24ab676SJeff Bonwick zio->io_vsd = dde; 2389b24ab676SJeff Bonwick 2390b24ab676SJeff Bonwick if (ddp_self == NULL) 2391b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2392b24ab676SJeff Bonwick 2393b24ab676SJeff Bonwick for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2394b24ab676SJeff Bonwick if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) 2395b24ab676SJeff Bonwick continue; 2396bbfd46c4SJeff Bonwick ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, 2397bbfd46c4SJeff Bonwick &blk); 2398b24ab676SJeff Bonwick zio_nowait(zio_read(zio, zio->io_spa, &blk, 2399770499e1SDan Kimmel abd_alloc_for_io(zio->io_size, B_TRUE), 2400770499e1SDan Kimmel zio->io_size, zio_ddt_child_read_done, dde, 2401770499e1SDan Kimmel zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) | 2402770499e1SDan Kimmel ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark)); 2403b24ab676SJeff Bonwick } 2404b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2405b24ab676SJeff Bonwick } 2406b24ab676SJeff Bonwick 2407b24ab676SJeff Bonwick zio_nowait(zio_read(zio, zio->io_spa, bp, 2408770499e1SDan Kimmel zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority, 2409b24ab676SJeff Bonwick ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); 2410b24ab676SJeff Bonwick 2411b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2412b24ab676SJeff Bonwick } 2413e14bb325SJeff Bonwick 2414b24ab676SJeff Bonwick static int 2415b24ab676SJeff Bonwick zio_ddt_read_done(zio_t *zio) 2416b24ab676SJeff Bonwick { 2417b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2418b24ab676SJeff Bonwick 2419b24ab676SJeff Bonwick if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE)) 2420b24ab676SJeff Bonwick return (ZIO_PIPELINE_STOP); 2421b24ab676SJeff Bonwick 2422b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(bp)); 2423b24ab676SJeff Bonwick ASSERT(BP_GET_PSIZE(bp) == zio->io_size); 2424b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2425b24ab676SJeff Bonwick 2426b24ab676SJeff Bonwick if (zio->io_child_error[ZIO_CHILD_DDT]) { 2427b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, bp); 2428b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_vsd; 2429b24ab676SJeff Bonwick if (ddt == NULL) { 2430b16da2e2SGeorge Wilson ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); 2431b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2432b24ab676SJeff Bonwick } 2433b24ab676SJeff Bonwick if (dde == NULL) { 2434b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; 243535a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); 2436b24ab676SJeff Bonwick return (ZIO_PIPELINE_STOP); 2437b24ab676SJeff Bonwick } 2438770499e1SDan Kimmel if (dde->dde_repair_abd != NULL) { 2439770499e1SDan Kimmel abd_copy(zio->io_abd, dde->dde_repair_abd, 2440770499e1SDan Kimmel zio->io_size); 2441b24ab676SJeff Bonwick zio->io_child_error[ZIO_CHILD_DDT] = 0; 2442b24ab676SJeff Bonwick } 2443b24ab676SJeff Bonwick ddt_repair_done(ddt, dde); 2444b24ab676SJeff Bonwick zio->io_vsd = NULL; 2445b24ab676SJeff Bonwick } 2446b24ab676SJeff Bonwick 2447b24ab676SJeff Bonwick ASSERT(zio->io_vsd == NULL); 2448b24ab676SJeff Bonwick 2449b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2450b24ab676SJeff Bonwick } 2451b24ab676SJeff Bonwick 2452b24ab676SJeff Bonwick static boolean_t 2453b24ab676SJeff Bonwick zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) 2454b24ab676SJeff Bonwick { 2455b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 24565602294fSDan Kimmel boolean_t do_raw = (zio->io_flags & ZIO_FLAG_RAW); 24575602294fSDan Kimmel 24585602294fSDan Kimmel /* We should never get a raw, override zio */ 24595602294fSDan Kimmel ASSERT(!(zio->io_bp_override && do_raw)); 2460b24ab676SJeff Bonwick 2461b24ab676SJeff Bonwick /* 2462b24ab676SJeff Bonwick * Note: we compare the original data, not the transformed data, 2463b24ab676SJeff Bonwick * because when zio->io_bp is an override bp, we will not have 2464b24ab676SJeff Bonwick * pushed the I/O transforms. That's an important optimization 2465b24ab676SJeff Bonwick * because otherwise we'd compress/encrypt all dmu_sync() data twice. 2466b24ab676SJeff Bonwick */ 2467b24ab676SJeff Bonwick for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2468b24ab676SJeff Bonwick zio_t *lio = dde->dde_lead_zio[p]; 2469b24ab676SJeff Bonwick 2470b24ab676SJeff Bonwick if (lio != NULL) { 2471b24ab676SJeff Bonwick return (lio->io_orig_size != zio->io_orig_size || 2472770499e1SDan Kimmel abd_cmp(zio->io_orig_abd, lio->io_orig_abd, 2473b24ab676SJeff Bonwick zio->io_orig_size) != 0); 2474b24ab676SJeff Bonwick } 2475b24ab676SJeff Bonwick } 2476b24ab676SJeff Bonwick 2477b24ab676SJeff Bonwick for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { 2478b24ab676SJeff Bonwick ddt_phys_t *ddp = &dde->dde_phys[p]; 2479b24ab676SJeff Bonwick 2480b24ab676SJeff Bonwick if (ddp->ddp_phys_birth != 0) { 2481b24ab676SJeff Bonwick arc_buf_t *abuf = NULL; 24827adb730bSGeorge Wilson arc_flags_t aflags = ARC_FLAG_WAIT; 24835602294fSDan Kimmel int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 2484b24ab676SJeff Bonwick blkptr_t blk = *zio->io_bp; 2485b24ab676SJeff Bonwick int error; 2486b24ab676SJeff Bonwick 2487b24ab676SJeff Bonwick ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); 2488b24ab676SJeff Bonwick 2489b24ab676SJeff Bonwick ddt_exit(ddt); 2490b24ab676SJeff Bonwick 24915602294fSDan Kimmel /* 24925602294fSDan Kimmel * Intuitively, it would make more sense to compare 2493770499e1SDan Kimmel * io_abd than io_orig_abd in the raw case since you 24945602294fSDan Kimmel * don't want to look at any transformations that have 24955602294fSDan Kimmel * happened to the data. However, for raw I/Os the 2496770499e1SDan Kimmel * data will actually be the same in io_abd and 2497770499e1SDan Kimmel * io_orig_abd, so all we have to do is issue this as 24985602294fSDan Kimmel * a raw ARC read. 24995602294fSDan Kimmel */ 25005602294fSDan Kimmel if (do_raw) { 25015602294fSDan Kimmel zio_flags |= ZIO_FLAG_RAW; 25025602294fSDan Kimmel ASSERT3U(zio->io_size, ==, zio->io_orig_size); 2503770499e1SDan Kimmel ASSERT0(abd_cmp(zio->io_abd, zio->io_orig_abd, 25045602294fSDan Kimmel zio->io_size)); 25055602294fSDan Kimmel ASSERT3P(zio->io_transform_stack, ==, NULL); 25065602294fSDan Kimmel } 25075602294fSDan Kimmel 25081b912ec7SGeorge Wilson error = arc_read(NULL, spa, &blk, 2509b24ab676SJeff Bonwick arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, 25105602294fSDan Kimmel zio_flags, &aflags, &zio->io_bookmark); 2511b24ab676SJeff Bonwick 2512b24ab676SJeff Bonwick if (error == 0) { 2513b24ab676SJeff Bonwick if (arc_buf_size(abuf) != zio->io_orig_size || 2514770499e1SDan Kimmel abd_cmp_buf(zio->io_orig_abd, abuf->b_data, 2515b24ab676SJeff Bonwick zio->io_orig_size) != 0) 2516be6fd75aSMatthew Ahrens error = SET_ERROR(EEXIST); 2517dcbf3bd6SGeorge Wilson arc_buf_destroy(abuf, &abuf); 2518b24ab676SJeff Bonwick } 2519b24ab676SJeff Bonwick 2520b24ab676SJeff Bonwick ddt_enter(ddt); 2521b24ab676SJeff Bonwick return (error != 0); 2522b24ab676SJeff Bonwick } 2523b24ab676SJeff Bonwick } 2524b24ab676SJeff Bonwick 2525b24ab676SJeff Bonwick return (B_FALSE); 2526b24ab676SJeff Bonwick } 2527b24ab676SJeff Bonwick 2528b24ab676SJeff Bonwick static void 2529b24ab676SJeff Bonwick zio_ddt_child_write_ready(zio_t *zio) 2530b24ab676SJeff Bonwick { 2531b24ab676SJeff Bonwick int p = zio->io_prop.zp_copies; 2532b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2533b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_private; 2534b24ab676SJeff Bonwick ddt_phys_t *ddp = &dde->dde_phys[p]; 2535b24ab676SJeff Bonwick zio_t *pio; 2536b24ab676SJeff Bonwick 2537b24ab676SJeff Bonwick if (zio->io_error) 2538b24ab676SJeff Bonwick return; 2539b24ab676SJeff Bonwick 2540b24ab676SJeff Bonwick ddt_enter(ddt); 2541b24ab676SJeff Bonwick 2542b24ab676SJeff Bonwick ASSERT(dde->dde_lead_zio[p] == zio); 2543b24ab676SJeff Bonwick 2544b24ab676SJeff Bonwick ddt_phys_fill(ddp, zio->io_bp); 2545b24ab676SJeff Bonwick 25460f7643c7SGeorge Wilson zio_link_t *zl = NULL; 25470f7643c7SGeorge Wilson while ((pio = zio_walk_parents(zio, &zl)) != NULL) 2548b24ab676SJeff Bonwick ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); 2549b24ab676SJeff Bonwick 2550b24ab676SJeff Bonwick ddt_exit(ddt); 2551b24ab676SJeff Bonwick } 2552b24ab676SJeff Bonwick 2553b24ab676SJeff Bonwick static void 2554b24ab676SJeff Bonwick zio_ddt_child_write_done(zio_t *zio) 2555b24ab676SJeff Bonwick { 2556b24ab676SJeff Bonwick int p = zio->io_prop.zp_copies; 2557b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); 2558b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_private; 2559b24ab676SJeff Bonwick ddt_phys_t *ddp = &dde->dde_phys[p]; 2560b24ab676SJeff Bonwick 2561b24ab676SJeff Bonwick ddt_enter(ddt); 2562b24ab676SJeff Bonwick 2563b24ab676SJeff Bonwick ASSERT(ddp->ddp_refcnt == 0); 2564b24ab676SJeff Bonwick ASSERT(dde->dde_lead_zio[p] == zio); 2565b24ab676SJeff Bonwick dde->dde_lead_zio[p] = NULL; 2566b24ab676SJeff Bonwick 2567b24ab676SJeff Bonwick if (zio->io_error == 0) { 25680f7643c7SGeorge Wilson zio_link_t *zl = NULL; 25690f7643c7SGeorge Wilson while (zio_walk_parents(zio, &zl) != NULL) 2570b24ab676SJeff Bonwick ddt_phys_addref(ddp); 2571b24ab676SJeff Bonwick } else { 2572b24ab676SJeff Bonwick ddt_phys_clear(ddp); 2573b24ab676SJeff Bonwick } 2574b24ab676SJeff Bonwick 2575b24ab676SJeff Bonwick ddt_exit(ddt); 2576b24ab676SJeff Bonwick } 2577b24ab676SJeff Bonwick 2578b24ab676SJeff Bonwick static void 2579b24ab676SJeff Bonwick zio_ddt_ditto_write_done(zio_t *zio) 2580b24ab676SJeff Bonwick { 2581b24ab676SJeff Bonwick int p = DDT_PHYS_DITTO; 2582b24ab676SJeff Bonwick zio_prop_t *zp = &zio->io_prop; 2583b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2584b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(zio->io_spa, bp); 2585b24ab676SJeff Bonwick ddt_entry_t *dde = zio->io_private; 2586b24ab676SJeff Bonwick ddt_phys_t *ddp = &dde->dde_phys[p]; 2587b24ab676SJeff Bonwick ddt_key_t *ddk = &dde->dde_key; 2588b24ab676SJeff Bonwick 2589b24ab676SJeff Bonwick ddt_enter(ddt); 2590b24ab676SJeff Bonwick 2591b24ab676SJeff Bonwick ASSERT(ddp->ddp_refcnt == 0); 2592b24ab676SJeff Bonwick ASSERT(dde->dde_lead_zio[p] == zio); 2593b24ab676SJeff Bonwick dde->dde_lead_zio[p] = NULL; 2594b24ab676SJeff Bonwick 2595b24ab676SJeff Bonwick if (zio->io_error == 0) { 2596b24ab676SJeff Bonwick ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); 2597b24ab676SJeff Bonwick ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); 2598b24ab676SJeff Bonwick ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); 2599b24ab676SJeff Bonwick if (ddp->ddp_phys_birth != 0) 2600b24ab676SJeff Bonwick ddt_phys_free(ddt, ddk, ddp, zio->io_txg); 2601b24ab676SJeff Bonwick ddt_phys_fill(ddp, bp); 2602b24ab676SJeff Bonwick } 2603b24ab676SJeff Bonwick 2604b24ab676SJeff Bonwick ddt_exit(ddt); 2605b24ab676SJeff Bonwick } 2606b24ab676SJeff Bonwick 2607b24ab676SJeff Bonwick static int 2608b24ab676SJeff Bonwick zio_ddt_write(zio_t *zio) 2609b24ab676SJeff Bonwick { 2610b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 2611b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2612b24ab676SJeff Bonwick uint64_t txg = zio->io_txg; 2613b24ab676SJeff Bonwick zio_prop_t *zp = &zio->io_prop; 2614b24ab676SJeff Bonwick int p = zp->zp_copies; 2615b24ab676SJeff Bonwick int ditto_copies; 2616b24ab676SJeff Bonwick zio_t *cio = NULL; 2617b24ab676SJeff Bonwick zio_t *dio = NULL; 2618b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(spa, bp); 2619b24ab676SJeff Bonwick ddt_entry_t *dde; 2620b24ab676SJeff Bonwick ddt_phys_t *ddp; 2621b24ab676SJeff Bonwick 2622b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(bp)); 2623b24ab676SJeff Bonwick ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); 2624b24ab676SJeff Bonwick ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); 26255602294fSDan Kimmel ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW))); 2626b24ab676SJeff Bonwick 2627b24ab676SJeff Bonwick ddt_enter(ddt); 2628b24ab676SJeff Bonwick dde = ddt_lookup(ddt, bp, B_TRUE); 2629b24ab676SJeff Bonwick ddp = &dde->dde_phys[p]; 2630b24ab676SJeff Bonwick 2631b24ab676SJeff Bonwick if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { 2632b24ab676SJeff Bonwick /* 2633b24ab676SJeff Bonwick * If we're using a weak checksum, upgrade to a strong checksum 2634b24ab676SJeff Bonwick * and try again. If we're already using a strong checksum, 2635b24ab676SJeff Bonwick * we can't resolve it, so just convert to an ordinary write. 2636b24ab676SJeff Bonwick * (And automatically e-mail a paper to Nature?) 2637b24ab676SJeff Bonwick */ 263845818ee1SMatthew Ahrens if (!(zio_checksum_table[zp->zp_checksum].ci_flags & 263945818ee1SMatthew Ahrens ZCHECKSUM_FLAG_DEDUP)) { 2640b24ab676SJeff Bonwick zp->zp_checksum = spa_dedup_checksum(spa); 2641b24ab676SJeff Bonwick zio_pop_transforms(zio); 2642b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_OPEN; 2643b24ab676SJeff Bonwick BP_ZERO(bp); 2644b24ab676SJeff Bonwick } else { 264580901aeaSGeorge Wilson zp->zp_dedup = B_FALSE; 26465602294fSDan Kimmel BP_SET_DEDUP(bp, B_FALSE); 2647b24ab676SJeff Bonwick } 26485602294fSDan Kimmel ASSERT(!BP_GET_DEDUP(bp)); 2649b24ab676SJeff Bonwick zio->io_pipeline = ZIO_WRITE_PIPELINE; 2650b24ab676SJeff Bonwick ddt_exit(ddt); 2651b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2652b24ab676SJeff Bonwick } 2653b24ab676SJeff Bonwick 2654b24ab676SJeff Bonwick ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); 2655b24ab676SJeff Bonwick ASSERT(ditto_copies < SPA_DVAS_PER_BP); 2656b24ab676SJeff Bonwick 2657b24ab676SJeff Bonwick if (ditto_copies > ddt_ditto_copies_present(dde) && 2658b24ab676SJeff Bonwick dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { 2659b24ab676SJeff Bonwick zio_prop_t czp = *zp; 2660b24ab676SJeff Bonwick 2661b24ab676SJeff Bonwick czp.zp_copies = ditto_copies; 2662b24ab676SJeff Bonwick 2663b24ab676SJeff Bonwick /* 2664b24ab676SJeff Bonwick * If we arrived here with an override bp, we won't have run 2665b24ab676SJeff Bonwick * the transform stack, so we won't have the data we need to 2666b24ab676SJeff Bonwick * generate a child i/o. So, toss the override bp and restart. 2667b24ab676SJeff Bonwick * This is safe, because using the override bp is just an 2668b24ab676SJeff Bonwick * optimization; and it's rare, so the cost doesn't matter. 2669b24ab676SJeff Bonwick */ 2670b24ab676SJeff Bonwick if (zio->io_bp_override) { 2671b24ab676SJeff Bonwick zio_pop_transforms(zio); 2672b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_OPEN; 2673b24ab676SJeff Bonwick zio->io_pipeline = ZIO_WRITE_PIPELINE; 2674b24ab676SJeff Bonwick zio->io_bp_override = NULL; 2675b24ab676SJeff Bonwick BP_ZERO(bp); 2676b24ab676SJeff Bonwick ddt_exit(ddt); 2677b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2678b24ab676SJeff Bonwick } 2679b24ab676SJeff Bonwick 2680770499e1SDan Kimmel dio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 26815602294fSDan Kimmel zio->io_orig_size, zio->io_orig_size, &czp, NULL, NULL, 26828df0bcf0SPaul Dagnelie NULL, zio_ddt_ditto_write_done, dde, zio->io_priority, 2683b24ab676SJeff Bonwick ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2684b24ab676SJeff Bonwick 2685770499e1SDan Kimmel zio_push_transform(dio, zio->io_abd, zio->io_size, 0, NULL); 2686b24ab676SJeff Bonwick dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; 2687b24ab676SJeff Bonwick } 2688b24ab676SJeff Bonwick 2689b24ab676SJeff Bonwick if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { 2690b24ab676SJeff Bonwick if (ddp->ddp_phys_birth != 0) 2691b24ab676SJeff Bonwick ddt_bp_fill(ddp, bp, txg); 2692b24ab676SJeff Bonwick if (dde->dde_lead_zio[p] != NULL) 2693b24ab676SJeff Bonwick zio_add_child(zio, dde->dde_lead_zio[p]); 2694b24ab676SJeff Bonwick else 2695b24ab676SJeff Bonwick ddt_phys_addref(ddp); 2696b24ab676SJeff Bonwick } else if (zio->io_bp_override) { 2697b24ab676SJeff Bonwick ASSERT(bp->blk_birth == txg); 2698b24ab676SJeff Bonwick ASSERT(BP_EQUAL(bp, zio->io_bp_override)); 2699b24ab676SJeff Bonwick ddt_phys_fill(ddp, bp); 2700b24ab676SJeff Bonwick ddt_phys_addref(ddp); 2701b24ab676SJeff Bonwick } else { 2702770499e1SDan Kimmel cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, 27035602294fSDan Kimmel zio->io_orig_size, zio->io_orig_size, zp, 27048df0bcf0SPaul Dagnelie zio_ddt_child_write_ready, NULL, NULL, 2705b24ab676SJeff Bonwick zio_ddt_child_write_done, dde, zio->io_priority, 2706b24ab676SJeff Bonwick ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); 2707b24ab676SJeff Bonwick 2708770499e1SDan Kimmel zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL); 2709b24ab676SJeff Bonwick dde->dde_lead_zio[p] = cio; 2710b24ab676SJeff Bonwick } 2711b24ab676SJeff Bonwick 2712b24ab676SJeff Bonwick ddt_exit(ddt); 2713b24ab676SJeff Bonwick 2714b24ab676SJeff Bonwick if (cio) 2715b24ab676SJeff Bonwick zio_nowait(cio); 2716b24ab676SJeff Bonwick if (dio) 2717b24ab676SJeff Bonwick zio_nowait(dio); 2718b24ab676SJeff Bonwick 2719b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2720b24ab676SJeff Bonwick } 2721b24ab676SJeff Bonwick 27223f9d6ad7SLin Ling ddt_entry_t *freedde; /* for debugging */ 27233f9d6ad7SLin Ling 2724b24ab676SJeff Bonwick static int 2725b24ab676SJeff Bonwick zio_ddt_free(zio_t *zio) 2726b24ab676SJeff Bonwick { 2727b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 2728b24ab676SJeff Bonwick blkptr_t *bp = zio->io_bp; 2729b24ab676SJeff Bonwick ddt_t *ddt = ddt_select(spa, bp); 2730b24ab676SJeff Bonwick ddt_entry_t *dde; 2731b24ab676SJeff Bonwick ddt_phys_t *ddp; 2732b24ab676SJeff Bonwick 2733b24ab676SJeff Bonwick ASSERT(BP_GET_DEDUP(bp)); 2734b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 2735b24ab676SJeff Bonwick 2736b24ab676SJeff Bonwick ddt_enter(ddt); 27373f9d6ad7SLin Ling freedde = dde = ddt_lookup(ddt, bp, B_TRUE); 2738b24ab676SJeff Bonwick ddp = ddt_phys_select(dde, bp); 2739b24ab676SJeff Bonwick ddt_phys_decref(ddp); 2740b24ab676SJeff Bonwick ddt_exit(ddt); 2741b24ab676SJeff Bonwick 2742b24ab676SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 2743b24ab676SJeff Bonwick } 2744b24ab676SJeff Bonwick 2745b24ab676SJeff Bonwick /* 2746b24ab676SJeff Bonwick * ========================================================================== 2747b24ab676SJeff Bonwick * Allocate and free blocks 2748b24ab676SJeff Bonwick * ========================================================================== 2749b24ab676SJeff Bonwick */ 27500f7643c7SGeorge Wilson 27510f7643c7SGeorge Wilson static zio_t * 27520f7643c7SGeorge Wilson zio_io_to_allocate(spa_t *spa) 27530f7643c7SGeorge Wilson { 27540f7643c7SGeorge Wilson zio_t *zio; 27550f7643c7SGeorge Wilson 27560f7643c7SGeorge Wilson ASSERT(MUTEX_HELD(&spa->spa_alloc_lock)); 27570f7643c7SGeorge Wilson 27580f7643c7SGeorge Wilson zio = avl_first(&spa->spa_alloc_tree); 27590f7643c7SGeorge Wilson if (zio == NULL) 27600f7643c7SGeorge Wilson return (NULL); 27610f7643c7SGeorge Wilson 27620f7643c7SGeorge Wilson ASSERT(IO_IS_ALLOCATING(zio)); 27630f7643c7SGeorge Wilson 27640f7643c7SGeorge Wilson /* 27650f7643c7SGeorge Wilson * Try to place a reservation for this zio. If we're unable to 27660f7643c7SGeorge Wilson * reserve then we throttle. 27670f7643c7SGeorge Wilson */ 27680f7643c7SGeorge Wilson if (!metaslab_class_throttle_reserve(spa_normal_class(spa), 27690f7643c7SGeorge Wilson zio->io_prop.zp_copies, zio, 0)) { 27700f7643c7SGeorge Wilson return (NULL); 27710f7643c7SGeorge Wilson } 27720f7643c7SGeorge Wilson 27730f7643c7SGeorge Wilson avl_remove(&spa->spa_alloc_tree, zio); 27740f7643c7SGeorge Wilson ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); 27750f7643c7SGeorge Wilson 27760f7643c7SGeorge Wilson return (zio); 27770f7643c7SGeorge Wilson } 27780f7643c7SGeorge Wilson 27790f7643c7SGeorge Wilson static int 27800f7643c7SGeorge Wilson zio_dva_throttle(zio_t *zio) 27810f7643c7SGeorge Wilson { 27820f7643c7SGeorge Wilson spa_t *spa = zio->io_spa; 27830f7643c7SGeorge Wilson zio_t *nio; 27840f7643c7SGeorge Wilson 27850f7643c7SGeorge Wilson if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE || 27860f7643c7SGeorge Wilson !spa_normal_class(zio->io_spa)->mc_alloc_throttle_enabled || 27870f7643c7SGeorge Wilson zio->io_child_type == ZIO_CHILD_GANG || 27880f7643c7SGeorge Wilson zio->io_flags & ZIO_FLAG_NODATA) { 27890f7643c7SGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 27900f7643c7SGeorge Wilson } 27910f7643c7SGeorge Wilson 27920f7643c7SGeorge Wilson ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 27930f7643c7SGeorge Wilson 27940f7643c7SGeorge Wilson ASSERT3U(zio->io_queued_timestamp, >, 0); 27950f7643c7SGeorge Wilson ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE); 27960f7643c7SGeorge Wilson 27970f7643c7SGeorge Wilson mutex_enter(&spa->spa_alloc_lock); 27980f7643c7SGeorge Wilson 27990f7643c7SGeorge Wilson ASSERT(zio->io_type == ZIO_TYPE_WRITE); 28000f7643c7SGeorge Wilson avl_add(&spa->spa_alloc_tree, zio); 28010f7643c7SGeorge Wilson 28020f7643c7SGeorge Wilson nio = zio_io_to_allocate(zio->io_spa); 28030f7643c7SGeorge Wilson mutex_exit(&spa->spa_alloc_lock); 28040f7643c7SGeorge Wilson 28050f7643c7SGeorge Wilson if (nio == zio) 28060f7643c7SGeorge Wilson return (ZIO_PIPELINE_CONTINUE); 28070f7643c7SGeorge Wilson 28080f7643c7SGeorge Wilson if (nio != NULL) { 28090f7643c7SGeorge Wilson ASSERT(nio->io_stage == ZIO_STAGE_DVA_THROTTLE); 28100f7643c7SGeorge Wilson /* 28110f7643c7SGeorge Wilson * We are passing control to a new zio so make sure that 28120f7643c7SGeorge Wilson * it is processed by a different thread. We do this to 28130f7643c7SGeorge Wilson * avoid stack overflows that can occur when parents are 28140f7643c7SGeorge Wilson * throttled and children are making progress. We allow 28150f7643c7SGeorge Wilson * it to go to the head of the taskq since it's already 28160f7643c7SGeorge Wilson * been waiting. 28170f7643c7SGeorge Wilson */ 28180f7643c7SGeorge Wilson zio_taskq_dispatch(nio, ZIO_TASKQ_ISSUE, B_TRUE); 28190f7643c7SGeorge Wilson } 28200f7643c7SGeorge Wilson return (ZIO_PIPELINE_STOP); 28210f7643c7SGeorge Wilson } 28220f7643c7SGeorge Wilson 28230f7643c7SGeorge Wilson void 28240f7643c7SGeorge Wilson zio_allocate_dispatch(spa_t *spa) 28250f7643c7SGeorge Wilson { 28260f7643c7SGeorge Wilson zio_t *zio; 28270f7643c7SGeorge Wilson 28280f7643c7SGeorge Wilson mutex_enter(&spa->spa_alloc_lock); 28290f7643c7SGeorge Wilson zio = zio_io_to_allocate(spa); 28300f7643c7SGeorge Wilson mutex_exit(&spa->spa_alloc_lock); 28310f7643c7SGeorge Wilson if (zio == NULL) 28320f7643c7SGeorge Wilson return; 28330f7643c7SGeorge Wilson 28340f7643c7SGeorge Wilson ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE); 28350f7643c7SGeorge Wilson ASSERT0(zio->io_error); 28360f7643c7SGeorge Wilson zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE); 28370f7643c7SGeorge Wilson } 28380f7643c7SGeorge Wilson 2839e05725b1Sbonwick static int 2840fa9e4066Sahrens zio_dva_allocate(zio_t *zio) 2841fa9e4066Sahrens { 28428654d025Sperrin spa_t *spa = zio->io_spa; 2843b24ab676SJeff Bonwick metaslab_class_t *mc = spa_normal_class(spa); 2844fa9e4066Sahrens blkptr_t *bp = zio->io_bp; 2845fa9e4066Sahrens int error; 284609c9d376SGeorge Wilson int flags = 0; 2847fa9e4066Sahrens 2848f5383399SBill Moore if (zio->io_gang_leader == NULL) { 2849f5383399SBill Moore ASSERT(zio->io_child_type > ZIO_CHILD_GANG); 2850f5383399SBill Moore zio->io_gang_leader = zio; 2851f5383399SBill Moore } 2852f5383399SBill Moore 2853fa9e4066Sahrens ASSERT(BP_IS_HOLE(bp)); 2854fb09f5aaSMadhav Suresh ASSERT0(BP_GET_NDVAS(bp)); 2855b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, >, 0); 2856b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); 2857fa9e4066Sahrens ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 2858fa9e4066Sahrens 28590f7643c7SGeorge Wilson if (zio->io_flags & ZIO_FLAG_NODATA) { 28600f7643c7SGeorge Wilson flags |= METASLAB_DONT_THROTTLE; 28610f7643c7SGeorge Wilson } 28620f7643c7SGeorge Wilson if (zio->io_flags & ZIO_FLAG_GANG_CHILD) { 28630f7643c7SGeorge Wilson flags |= METASLAB_GANG_CHILD; 28640f7643c7SGeorge Wilson } 28650f7643c7SGeorge Wilson if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) { 28660f7643c7SGeorge Wilson flags |= METASLAB_ASYNC_ALLOC; 28670f7643c7SGeorge Wilson } 28680f7643c7SGeorge Wilson 2869e14bb325SJeff Bonwick error = metaslab_alloc(spa, mc, zio->io_size, bp, 28708363e80aSGeorge Wilson zio->io_prop.zp_copies, zio->io_txg, NULL, flags, 28718363e80aSGeorge Wilson &zio->io_alloc_list, zio); 2872fa9e4066Sahrens 28730f7643c7SGeorge Wilson if (error != 0) { 287409c9d376SGeorge Wilson spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, " 287509c9d376SGeorge Wilson "size %llu, error %d", spa_name(spa), zio, zio->io_size, 287609c9d376SGeorge Wilson error); 2877e14bb325SJeff Bonwick if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) 2878e14bb325SJeff Bonwick return (zio_write_gang_block(zio)); 2879fa9e4066Sahrens zio->io_error = error; 2880fa9e4066Sahrens } 2881e05725b1Sbonwick 2882e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 2883fa9e4066Sahrens } 2884fa9e4066Sahrens 2885e05725b1Sbonwick static int 2886fa9e4066Sahrens zio_dva_free(zio_t *zio) 2887fa9e4066Sahrens { 2888e14bb325SJeff Bonwick metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); 2889fa9e4066Sahrens 2890e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 2891fa9e4066Sahrens } 2892fa9e4066Sahrens 2893e05725b1Sbonwick static int 2894fa9e4066Sahrens zio_dva_claim(zio_t *zio) 2895fa9e4066Sahrens { 2896e14bb325SJeff Bonwick int error; 2897e14bb325SJeff Bonwick 2898e14bb325SJeff Bonwick error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 2899e14bb325SJeff Bonwick if (error) 2900e14bb325SJeff Bonwick zio->io_error = error; 2901fa9e4066Sahrens 2902e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 2903fa9e4066Sahrens } 2904fa9e4066Sahrens 2905e14bb325SJeff Bonwick /* 2906e14bb325SJeff Bonwick * Undo an allocation. This is used by zio_done() when an I/O fails 2907e14bb325SJeff Bonwick * and we want to give back the block we just allocated. 2908e14bb325SJeff Bonwick * This handles both normal blocks and gang blocks. 2909e14bb325SJeff Bonwick */ 2910e14bb325SJeff Bonwick static void 2911e14bb325SJeff Bonwick zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) 2912e14bb325SJeff Bonwick { 2913e14bb325SJeff Bonwick ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); 2914b24ab676SJeff Bonwick ASSERT(zio->io_bp_override == NULL); 2915e14bb325SJeff Bonwick 2916e14bb325SJeff Bonwick if (!BP_IS_HOLE(bp)) 2917b24ab676SJeff Bonwick metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); 2918e14bb325SJeff Bonwick 2919e14bb325SJeff Bonwick if (gn != NULL) { 2920e14bb325SJeff Bonwick for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { 2921e14bb325SJeff Bonwick zio_dva_unallocate(zio, gn->gn_child[g], 2922e14bb325SJeff Bonwick &gn->gn_gbh->zg_blkptr[g]); 2923e14bb325SJeff Bonwick } 2924e14bb325SJeff Bonwick } 2925e14bb325SJeff Bonwick } 2926e14bb325SJeff Bonwick 2927e14bb325SJeff Bonwick /* 2928e14bb325SJeff Bonwick * Try to allocate an intent log block. Return 0 on success, errno on failure. 2929e14bb325SJeff Bonwick */ 2930e14bb325SJeff Bonwick int 2931b24ab676SJeff Bonwick zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp, 2932c5ee4681SAlexander Motin uint64_t size, boolean_t *slog) 2933e14bb325SJeff Bonwick { 2934e09fa4daSNeil Perrin int error = 1; 29358363e80aSGeorge Wilson zio_alloc_list_t io_alloc_list; 2936e14bb325SJeff Bonwick 2937b24ab676SJeff Bonwick ASSERT(txg > spa_syncing_txg(spa)); 2938b24ab676SJeff Bonwick 29398363e80aSGeorge Wilson metaslab_trace_init(&io_alloc_list); 2940c5ee4681SAlexander Motin error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1, 2941c5ee4681SAlexander Motin txg, old_bp, METASLAB_HINTBP_AVOID, &io_alloc_list, NULL); 2942c5ee4681SAlexander Motin if (error == 0) { 2943c5ee4681SAlexander Motin *slog = TRUE; 2944c5ee4681SAlexander Motin } else { 2945b24ab676SJeff Bonwick error = metaslab_alloc(spa, spa_normal_class(spa), size, 29468363e80aSGeorge Wilson new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID, 29478363e80aSGeorge Wilson &io_alloc_list, NULL); 2948c5ee4681SAlexander Motin if (error == 0) 2949c5ee4681SAlexander Motin *slog = FALSE; 2950840345f6SGeorge Wilson } 29518363e80aSGeorge Wilson metaslab_trace_fini(&io_alloc_list); 2952e14bb325SJeff Bonwick 2953e14bb325SJeff Bonwick if (error == 0) { 2954e14bb325SJeff Bonwick BP_SET_LSIZE(new_bp, size); 2955e14bb325SJeff Bonwick BP_SET_PSIZE(new_bp, size); 2956e14bb325SJeff Bonwick BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 29576e1f5caaSNeil Perrin BP_SET_CHECKSUM(new_bp, 29586e1f5caaSNeil Perrin spa_version(spa) >= SPA_VERSION_SLIM_ZIL 29596e1f5caaSNeil Perrin ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); 2960e14bb325SJeff Bonwick BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 2961e14bb325SJeff Bonwick BP_SET_LEVEL(new_bp, 0); 2962b24ab676SJeff Bonwick BP_SET_DEDUP(new_bp, 0); 2963e14bb325SJeff Bonwick BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 2964*1271e4b1SPrakash Surya } else { 2965*1271e4b1SPrakash Surya zfs_dbgmsg("%s: zil block allocation failure: " 2966*1271e4b1SPrakash Surya "size %llu, error %d", spa_name(spa), size, error); 2967e14bb325SJeff Bonwick } 2968e14bb325SJeff Bonwick 2969e14bb325SJeff Bonwick return (error); 2970e14bb325SJeff Bonwick } 2971e14bb325SJeff Bonwick 2972e14bb325SJeff Bonwick /* 2973b24ab676SJeff Bonwick * Free an intent log block. 2974e14bb325SJeff Bonwick */ 2975e14bb325SJeff Bonwick void 2976b24ab676SJeff Bonwick zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp) 2977e14bb325SJeff Bonwick { 2978b24ab676SJeff Bonwick ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG); 2979e14bb325SJeff Bonwick ASSERT(!BP_IS_GANG(bp)); 2980e14bb325SJeff Bonwick 2981b24ab676SJeff Bonwick zio_free(spa, txg, bp); 2982e14bb325SJeff Bonwick } 2983e14bb325SJeff Bonwick 2984fa9e4066Sahrens /* 2985fa9e4066Sahrens * ========================================================================== 2986fa9e4066Sahrens * Read and write to physical devices 2987fa9e4066Sahrens * ========================================================================== 2988fa9e4066Sahrens */ 2989738f37bcSGeorge Wilson 2990738f37bcSGeorge Wilson 2991738f37bcSGeorge Wilson /* 2992738f37bcSGeorge Wilson * Issue an I/O to the underlying vdev. Typically the issue pipeline 2993738f37bcSGeorge Wilson * stops after this stage and will resume upon I/O completion. 2994738f37bcSGeorge Wilson * However, there are instances where the vdev layer may need to 2995738f37bcSGeorge Wilson * continue the pipeline when an I/O was not issued. Since the I/O 2996738f37bcSGeorge Wilson * that was sent to the vdev layer might be different than the one 2997738f37bcSGeorge Wilson * currently active in the pipeline (see vdev_queue_io()), we explicitly 2998738f37bcSGeorge Wilson * force the underlying vdev layers to call either zio_execute() or 2999738f37bcSGeorge Wilson * zio_interrupt() to ensure that the pipeline continues with the correct I/O. 3000738f37bcSGeorge Wilson */ 3001e05725b1Sbonwick static int 300244cd46caSbillm zio_vdev_io_start(zio_t *zio) 3003fa9e4066Sahrens { 3004fa9e4066Sahrens vdev_t *vd = zio->io_vd; 300544cd46caSbillm uint64_t align; 30060a4e9518Sgw spa_t *spa = zio->io_spa; 30070a4e9518Sgw 3008e14bb325SJeff Bonwick ASSERT(zio->io_error == 0); 3009e14bb325SJeff Bonwick ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); 3010fa9e4066Sahrens 3011e14bb325SJeff Bonwick if (vd == NULL) { 3012e14bb325SJeff Bonwick if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3013e14bb325SJeff Bonwick spa_config_enter(spa, SCL_ZIO, zio, RW_READER); 3014fa9e4066Sahrens 3015e14bb325SJeff Bonwick /* 3016e14bb325SJeff Bonwick * The mirror_ops handle multiple DVAs in a single BP. 3017e14bb325SJeff Bonwick */ 3018738f37bcSGeorge Wilson vdev_mirror_ops.vdev_op_io_start(zio); 3019738f37bcSGeorge Wilson return (ZIO_PIPELINE_STOP); 3020fa9e4066Sahrens } 3021fa9e4066Sahrens 30220f7643c7SGeorge Wilson ASSERT3P(zio->io_logical, !=, zio); 30230f7643c7SGeorge Wilson 302444ecc532SGeorge Wilson /* 302544ecc532SGeorge Wilson * We keep track of time-sensitive I/Os so that the scan thread 302644ecc532SGeorge Wilson * can quickly react to certain workloads. In particular, we care 302744ecc532SGeorge Wilson * about non-scrubbing, top-level reads and writes with the following 302844ecc532SGeorge Wilson * characteristics: 3029738f37bcSGeorge Wilson * - synchronous writes of user data to non-slog devices 303044ecc532SGeorge Wilson * - any reads of user data 303144ecc532SGeorge Wilson * When these conditions are met, adjust the timestamp of spa_last_io 303244ecc532SGeorge Wilson * which allows the scan thread to adjust its workload accordingly. 303344ecc532SGeorge Wilson */ 303444ecc532SGeorge Wilson if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && 303544ecc532SGeorge Wilson vd == vd->vdev_top && !vd->vdev_islog && 303644ecc532SGeorge Wilson zio->io_bookmark.zb_objset != DMU_META_OBJSET && 303744ecc532SGeorge Wilson zio->io_txg != spa_syncing_txg(spa)) { 303844ecc532SGeorge Wilson uint64_t old = spa->spa_last_io; 303944ecc532SGeorge Wilson uint64_t new = ddi_get_lbolt64(); 304044ecc532SGeorge Wilson if (old != new) 304144ecc532SGeorge Wilson (void) atomic_cas_64(&spa->spa_last_io, old, new); 304244ecc532SGeorge Wilson } 304344ecc532SGeorge Wilson 3044e14bb325SJeff Bonwick align = 1ULL << vd->vdev_top->vdev_ashift; 3045e14bb325SJeff Bonwick 30462a104a52SAlex Reece if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 30472a104a52SAlex Reece P2PHASE(zio->io_size, align) != 0) { 30482a104a52SAlex Reece /* Transform logical writes to be a full physical block size. */ 3049ecc2d604Sbonwick uint64_t asize = P2ROUNDUP(zio->io_size, align); 3050770499e1SDan Kimmel abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize); 3051e14bb325SJeff Bonwick ASSERT(vd == vd->vdev_top); 3052ecc2d604Sbonwick if (zio->io_type == ZIO_TYPE_WRITE) { 3053770499e1SDan Kimmel abd_copy(abuf, zio->io_abd, zio->io_size); 3054770499e1SDan Kimmel abd_zero_off(abuf, zio->io_size, asize - zio->io_size); 3055ecc2d604Sbonwick } 3056e14bb325SJeff Bonwick zio_push_transform(zio, abuf, asize, asize, zio_subblock); 3057ecc2d604Sbonwick } 3058ecc2d604Sbonwick 30592a104a52SAlex Reece /* 30602a104a52SAlex Reece * If this is not a physical io, make sure that it is properly aligned 30612a104a52SAlex Reece * before proceeding. 30622a104a52SAlex Reece */ 30632a104a52SAlex Reece if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { 30642a104a52SAlex Reece ASSERT0(P2PHASE(zio->io_offset, align)); 30652a104a52SAlex Reece ASSERT0(P2PHASE(zio->io_size, align)); 30662a104a52SAlex Reece } else { 30672a104a52SAlex Reece /* 30682a104a52SAlex Reece * For physical writes, we allow 512b aligned writes and assume 30692a104a52SAlex Reece * the device will perform a read-modify-write as necessary. 30702a104a52SAlex Reece */ 30712a104a52SAlex Reece ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); 30722a104a52SAlex Reece ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); 30732a104a52SAlex Reece } 30742a104a52SAlex Reece 3075f9af39baSGeorge Wilson VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); 30768ad4d6ddSJeff Bonwick 30778ad4d6ddSJeff Bonwick /* 30788ad4d6ddSJeff Bonwick * If this is a repair I/O, and there's no self-healing involved -- 30798ad4d6ddSJeff Bonwick * that is, we're just resilvering what we expect to resilver -- 30808ad4d6ddSJeff Bonwick * then don't do the I/O unless zio's txg is actually in vd's DTL. 30818ad4d6ddSJeff Bonwick * This prevents spurious resilvering with nested replication. 30828ad4d6ddSJeff Bonwick * For example, given a mirror of mirrors, (A+B)+(C+D), if only 30838ad4d6ddSJeff Bonwick * A is out of date, we'll read from C+D, then use the data to 30848ad4d6ddSJeff Bonwick * resilver A+B -- but we don't actually want to resilver B, just A. 30858ad4d6ddSJeff Bonwick * The top-level mirror has no way to know this, so instead we just 30868ad4d6ddSJeff Bonwick * discard unnecessary repairs as we work our way down the vdev tree. 30878ad4d6ddSJeff Bonwick * The same logic applies to any form of nested replication: 30888ad4d6ddSJeff Bonwick * ditto + mirror, RAID-Z + replacing, etc. This covers them all. 30898ad4d6ddSJeff Bonwick */ 30908ad4d6ddSJeff Bonwick if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && 30918ad4d6ddSJeff Bonwick !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && 30928ad4d6ddSJeff Bonwick zio->io_txg != 0 && /* not a delegated i/o */ 30938ad4d6ddSJeff Bonwick !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { 30948ad4d6ddSJeff Bonwick ASSERT(zio->io_type == ZIO_TYPE_WRITE); 30958ad4d6ddSJeff Bonwick zio_vdev_io_bypass(zio); 30968ad4d6ddSJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 30978ad4d6ddSJeff Bonwick } 3098fa9e4066Sahrens 3099e14bb325SJeff Bonwick if (vd->vdev_ops->vdev_op_leaf && 3100e14bb325SJeff Bonwick (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { 3101e14bb325SJeff Bonwick 310243466aaeSMax Grossman if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio)) 3103a3f829aeSBill Moore return (ZIO_PIPELINE_CONTINUE); 3104e14bb325SJeff Bonwick 3105e14bb325SJeff Bonwick if ((zio = vdev_queue_io(zio)) == NULL) 3106e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3107e14bb325SJeff Bonwick 3108e14bb325SJeff Bonwick if (!vdev_accessible(vd, zio)) { 3109be6fd75aSMatthew Ahrens zio->io_error = SET_ERROR(ENXIO); 3110e14bb325SJeff Bonwick zio_interrupt(zio); 3111e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3112e14bb325SJeff Bonwick } 3113e14bb325SJeff Bonwick } 3114e14bb325SJeff Bonwick 3115738f37bcSGeorge Wilson vd->vdev_ops->vdev_op_io_start(zio); 3116738f37bcSGeorge Wilson return (ZIO_PIPELINE_STOP); 3117fa9e4066Sahrens } 3118fa9e4066Sahrens 3119e05725b1Sbonwick static int 3120fa9e4066Sahrens zio_vdev_io_done(zio_t *zio) 3121fa9e4066Sahrens { 3122e14bb325SJeff Bonwick vdev_t *vd = zio->io_vd; 3123e14bb325SJeff Bonwick vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; 3124e14bb325SJeff Bonwick boolean_t unexpected_error = B_FALSE; 3125e05725b1Sbonwick 3126e14bb325SJeff Bonwick if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 3127e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3128fa9e4066Sahrens 3129e14bb325SJeff Bonwick ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 3130e14bb325SJeff Bonwick 3131e14bb325SJeff Bonwick if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { 3132e14bb325SJeff Bonwick 3133e14bb325SJeff Bonwick vdev_queue_io_done(zio); 3134fa9e4066Sahrens 3135e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_WRITE) 3136e14bb325SJeff Bonwick vdev_cache_write(zio); 3137e14bb325SJeff Bonwick 3138e14bb325SJeff Bonwick if (zio_injection_enabled && zio->io_error == 0) 31398956713aSEric Schrock zio->io_error = zio_handle_device_injection(vd, 31408956713aSEric Schrock zio, EIO); 3141e14bb325SJeff Bonwick 3142e14bb325SJeff Bonwick if (zio_injection_enabled && zio->io_error == 0) 3143e14bb325SJeff Bonwick zio->io_error = zio_handle_label_injection(zio, EIO); 3144e14bb325SJeff Bonwick 3145e14bb325SJeff Bonwick if (zio->io_error) { 3146e14bb325SJeff Bonwick if (!vdev_accessible(vd, zio)) { 3147be6fd75aSMatthew Ahrens zio->io_error = SET_ERROR(ENXIO); 3148e14bb325SJeff Bonwick } else { 3149e14bb325SJeff Bonwick unexpected_error = B_TRUE; 3150e14bb325SJeff Bonwick } 3151e14bb325SJeff Bonwick } 315251ece835Seschrock } 3153fa9e4066Sahrens 3154e14bb325SJeff Bonwick ops->vdev_op_io_done(zio); 3155e14bb325SJeff Bonwick 3156e14bb325SJeff Bonwick if (unexpected_error) 3157a3f829aeSBill Moore VERIFY(vdev_probe(vd, zio) == NULL); 3158e14bb325SJeff Bonwick 3159e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 3160fa9e4066Sahrens } 3161fa9e4066Sahrens 316222fe2c88SJonathan Adams /* 316322fe2c88SJonathan Adams * For non-raidz ZIOs, we can just copy aside the bad data read from the 316422fe2c88SJonathan Adams * disk, and use that to finish the checksum ereport later. 316522fe2c88SJonathan Adams */ 316622fe2c88SJonathan Adams static void 316722fe2c88SJonathan Adams zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, 316822fe2c88SJonathan Adams const void *good_buf) 316922fe2c88SJonathan Adams { 317022fe2c88SJonathan Adams /* no processing needed */ 317122fe2c88SJonathan Adams zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); 317222fe2c88SJonathan Adams } 317322fe2c88SJonathan Adams 317422fe2c88SJonathan Adams /*ARGSUSED*/ 317522fe2c88SJonathan Adams void 317622fe2c88SJonathan Adams zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) 317722fe2c88SJonathan Adams { 317822fe2c88SJonathan Adams void *buf = zio_buf_alloc(zio->io_size); 317922fe2c88SJonathan Adams 3180770499e1SDan Kimmel abd_copy_to_buf(buf, zio->io_abd, zio->io_size); 318122fe2c88SJonathan Adams 318222fe2c88SJonathan Adams zcr->zcr_cbinfo = zio->io_size; 318322fe2c88SJonathan Adams zcr->zcr_cbdata = buf; 318422fe2c88SJonathan Adams zcr->zcr_finish = zio_vsd_default_cksum_finish; 318522fe2c88SJonathan Adams zcr->zcr_free = zio_buf_free; 318622fe2c88SJonathan Adams } 318722fe2c88SJonathan Adams 3188e05725b1Sbonwick static int 3189fa9e4066Sahrens zio_vdev_io_assess(zio_t *zio) 3190fa9e4066Sahrens { 3191fa9e4066Sahrens vdev_t *vd = zio->io_vd; 3192e14bb325SJeff Bonwick 3193e14bb325SJeff Bonwick if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) 3194e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3195e14bb325SJeff Bonwick 3196e14bb325SJeff Bonwick if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) 3197e14bb325SJeff Bonwick spa_config_exit(zio->io_spa, SCL_ZIO, zio); 3198e14bb325SJeff Bonwick 3199e14bb325SJeff Bonwick if (zio->io_vsd != NULL) { 320022fe2c88SJonathan Adams zio->io_vsd_ops->vsd_free(zio); 3201e14bb325SJeff Bonwick zio->io_vsd = NULL; 3202ecc2d604Sbonwick } 3203ecc2d604Sbonwick 3204e14bb325SJeff Bonwick if (zio_injection_enabled && zio->io_error == 0) 3205ea8dc4b6Seschrock zio->io_error = zio_handle_fault_injection(zio, EIO); 3206ea8dc4b6Seschrock 3207fa9e4066Sahrens /* 3208fa9e4066Sahrens * If the I/O failed, determine whether we should attempt to retry it. 320935a5a358SJonathan Adams * 321035a5a358SJonathan Adams * On retry, we cut in line in the issue queue, since we don't want 321135a5a358SJonathan Adams * compression/checksumming/etc. work to prevent our (cheap) IO reissue. 3212fa9e4066Sahrens */ 3213e14bb325SJeff Bonwick if (zio->io_error && vd == NULL && 3214e14bb325SJeff Bonwick !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { 3215e14bb325SJeff Bonwick ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ 3216e14bb325SJeff Bonwick ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ 3217fa9e4066Sahrens zio->io_error = 0; 3218e14bb325SJeff Bonwick zio->io_flags |= ZIO_FLAG_IO_RETRY | 3219e14bb325SJeff Bonwick ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; 3220b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; 322135a5a358SJonathan Adams zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, 322235a5a358SJonathan Adams zio_requeue_io_start_cut_in_line); 3223e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3224ea8dc4b6Seschrock } 3225fa9e4066Sahrens 3226e14bb325SJeff Bonwick /* 3227e14bb325SJeff Bonwick * If we got an error on a leaf device, convert it to ENXIO 3228e14bb325SJeff Bonwick * if the device is not accessible at all. 3229e14bb325SJeff Bonwick */ 3230e14bb325SJeff Bonwick if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && 3231e14bb325SJeff Bonwick !vdev_accessible(vd, zio)) 3232be6fd75aSMatthew Ahrens zio->io_error = SET_ERROR(ENXIO); 3233e14bb325SJeff Bonwick 3234e14bb325SJeff Bonwick /* 3235e14bb325SJeff Bonwick * If we can't write to an interior vdev (mirror or RAID-Z), 3236e14bb325SJeff Bonwick * set vdev_cant_write so that we stop trying to allocate from it. 3237e14bb325SJeff Bonwick */ 3238e14bb325SJeff Bonwick if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && 32393b2aab18SMatthew Ahrens vd != NULL && !vd->vdev_ops->vdev_op_leaf) { 3240e14bb325SJeff Bonwick vd->vdev_cant_write = B_TRUE; 32413b2aab18SMatthew Ahrens } 3242e14bb325SJeff Bonwick 3243295438baSHans Rosenfeld /* 3244295438baSHans Rosenfeld * If a cache flush returns ENOTSUP or ENOTTY, we know that no future 3245295438baSHans Rosenfeld * attempts will ever succeed. In this case we set a persistent bit so 3246295438baSHans Rosenfeld * that we don't bother with it in the future. 3247295438baSHans Rosenfeld */ 3248295438baSHans Rosenfeld if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) && 3249295438baSHans Rosenfeld zio->io_type == ZIO_TYPE_IOCTL && 3250295438baSHans Rosenfeld zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL) 3251295438baSHans Rosenfeld vd->vdev_nowritecache = B_TRUE; 3252295438baSHans Rosenfeld 3253e14bb325SJeff Bonwick if (zio->io_error) 3254e14bb325SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3255e14bb325SJeff Bonwick 325669962b56SMatthew Ahrens if (vd != NULL && vd->vdev_ops->vdev_op_leaf && 325769962b56SMatthew Ahrens zio->io_physdone != NULL) { 325869962b56SMatthew Ahrens ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); 325969962b56SMatthew Ahrens ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); 326069962b56SMatthew Ahrens zio->io_physdone(zio->io_logical); 326169962b56SMatthew Ahrens } 326269962b56SMatthew Ahrens 3263e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3264fa9e4066Sahrens } 3265fa9e4066Sahrens 3266fa9e4066Sahrens void 3267fa9e4066Sahrens zio_vdev_io_reissue(zio_t *zio) 3268fa9e4066Sahrens { 3269fa9e4066Sahrens ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3270fa9e4066Sahrens ASSERT(zio->io_error == 0); 3271fa9e4066Sahrens 3272b24ab676SJeff Bonwick zio->io_stage >>= 1; 3273fa9e4066Sahrens } 3274fa9e4066Sahrens 3275fa9e4066Sahrens void 3276fa9e4066Sahrens zio_vdev_io_redone(zio_t *zio) 3277fa9e4066Sahrens { 3278fa9e4066Sahrens ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 3279fa9e4066Sahrens 3280b24ab676SJeff Bonwick zio->io_stage >>= 1; 3281fa9e4066Sahrens } 3282fa9e4066Sahrens 3283fa9e4066Sahrens void 3284fa9e4066Sahrens zio_vdev_io_bypass(zio_t *zio) 3285fa9e4066Sahrens { 3286fa9e4066Sahrens ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 3287fa9e4066Sahrens ASSERT(zio->io_error == 0); 3288fa9e4066Sahrens 3289fa9e4066Sahrens zio->io_flags |= ZIO_FLAG_IO_BYPASS; 3290b24ab676SJeff Bonwick zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; 3291fa9e4066Sahrens } 3292fa9e4066Sahrens 3293fa9e4066Sahrens /* 3294fa9e4066Sahrens * ========================================================================== 3295fa9e4066Sahrens * Generate and verify checksums 3296fa9e4066Sahrens * ========================================================================== 3297fa9e4066Sahrens */ 3298e05725b1Sbonwick static int 3299fa9e4066Sahrens zio_checksum_generate(zio_t *zio) 3300fa9e4066Sahrens { 3301fa9e4066Sahrens blkptr_t *bp = zio->io_bp; 3302e14bb325SJeff Bonwick enum zio_checksum checksum; 3303fa9e4066Sahrens 3304e14bb325SJeff Bonwick if (bp == NULL) { 3305e14bb325SJeff Bonwick /* 3306e14bb325SJeff Bonwick * This is zio_write_phys(). 3307e14bb325SJeff Bonwick * We're either generating a label checksum, or none at all. 3308e14bb325SJeff Bonwick */ 3309e14bb325SJeff Bonwick checksum = zio->io_prop.zp_checksum; 3310e14bb325SJeff Bonwick 3311e14bb325SJeff Bonwick if (checksum == ZIO_CHECKSUM_OFF) 3312e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 3313fa9e4066Sahrens 3314e14bb325SJeff Bonwick ASSERT(checksum == ZIO_CHECKSUM_LABEL); 3315e14bb325SJeff Bonwick } else { 3316e14bb325SJeff Bonwick if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { 3317e14bb325SJeff Bonwick ASSERT(!IO_IS_ALLOCATING(zio)); 3318e14bb325SJeff Bonwick checksum = ZIO_CHECKSUM_GANG_HEADER; 3319e14bb325SJeff Bonwick } else { 3320e14bb325SJeff Bonwick checksum = BP_GET_CHECKSUM(bp); 3321e14bb325SJeff Bonwick } 3322e14bb325SJeff Bonwick } 3323fa9e4066Sahrens 3324770499e1SDan Kimmel zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size); 3325fa9e4066Sahrens 3326e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3327fa9e4066Sahrens } 3328fa9e4066Sahrens 3329e05725b1Sbonwick static int 3330e14bb325SJeff Bonwick zio_checksum_verify(zio_t *zio) 3331fa9e4066Sahrens { 333222fe2c88SJonathan Adams zio_bad_cksum_t info; 3333e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 3334e14bb325SJeff Bonwick int error; 3335fa9e4066Sahrens 3336b24ab676SJeff Bonwick ASSERT(zio->io_vd != NULL); 3337b24ab676SJeff Bonwick 3338e14bb325SJeff Bonwick if (bp == NULL) { 3339e14bb325SJeff Bonwick /* 3340e14bb325SJeff Bonwick * This is zio_read_phys(). 3341e14bb325SJeff Bonwick * We're either verifying a label checksum, or nothing at all. 3342e14bb325SJeff Bonwick */ 3343e14bb325SJeff Bonwick if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) 3344e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 3345fa9e4066Sahrens 3346e14bb325SJeff Bonwick ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); 3347e14bb325SJeff Bonwick } 3348fa9e4066Sahrens 334922fe2c88SJonathan Adams if ((error = zio_checksum_error(zio, &info)) != 0) { 3350e14bb325SJeff Bonwick zio->io_error = error; 3351373dc1cfSMatthew Ahrens if (error == ECKSUM && 3352373dc1cfSMatthew Ahrens !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { 335322fe2c88SJonathan Adams zfs_ereport_start_checksum(zio->io_spa, 335422fe2c88SJonathan Adams zio->io_vd, zio, zio->io_offset, 335522fe2c88SJonathan Adams zio->io_size, NULL, &info); 3356e14bb325SJeff Bonwick } 3357fa9e4066Sahrens } 3358fa9e4066Sahrens 3359e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 3360fa9e4066Sahrens } 3361fa9e4066Sahrens 3362fa9e4066Sahrens /* 3363fa9e4066Sahrens * Called by RAID-Z to ensure we don't compute the checksum twice. 3364fa9e4066Sahrens */ 3365fa9e4066Sahrens void 3366fa9e4066Sahrens zio_checksum_verified(zio_t *zio) 3367fa9e4066Sahrens { 3368b24ab676SJeff Bonwick zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; 3369fa9e4066Sahrens } 3370fa9e4066Sahrens 3371fa9e4066Sahrens /* 3372e14bb325SJeff Bonwick * ========================================================================== 3373e14bb325SJeff Bonwick * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. 33745d7b4d43SMatthew Ahrens * An error of 0 indicates success. ENXIO indicates whole-device failure, 3375e14bb325SJeff Bonwick * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO 3376e14bb325SJeff Bonwick * indicate errors that are specific to one I/O, and most likely permanent. 3377e14bb325SJeff Bonwick * Any other error is presumed to be worse because we weren't expecting it. 3378e14bb325SJeff Bonwick * ========================================================================== 3379fa9e4066Sahrens */ 3380e14bb325SJeff Bonwick int 3381e14bb325SJeff Bonwick zio_worst_error(int e1, int e2) 3382fa9e4066Sahrens { 3383e14bb325SJeff Bonwick static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; 3384e14bb325SJeff Bonwick int r1, r2; 3385e14bb325SJeff Bonwick 3386e14bb325SJeff Bonwick for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) 3387e14bb325SJeff Bonwick if (e1 == zio_error_rank[r1]) 3388e14bb325SJeff Bonwick break; 3389e14bb325SJeff Bonwick 3390e14bb325SJeff Bonwick for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) 3391e14bb325SJeff Bonwick if (e2 == zio_error_rank[r2]) 3392e14bb325SJeff Bonwick break; 339344cd46caSbillm 3394e14bb325SJeff Bonwick return (r1 > r2 ? e1 : e2); 3395fa9e4066Sahrens } 3396fa9e4066Sahrens 3397fa9e4066Sahrens /* 3398fa9e4066Sahrens * ========================================================================== 3399e14bb325SJeff Bonwick * I/O completion 3400fa9e4066Sahrens * ========================================================================== 3401fa9e4066Sahrens */ 3402e14bb325SJeff Bonwick static int 3403e14bb325SJeff Bonwick zio_ready(zio_t *zio) 3404fa9e4066Sahrens { 3405e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 3406a3f829aeSBill Moore zio_t *pio, *pio_next; 34070f7643c7SGeorge Wilson zio_link_t *zl = NULL; 3408fa9e4066Sahrens 3409b24ab676SJeff Bonwick if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || 3410b24ab676SJeff Bonwick zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY)) 3411f5383399SBill Moore return (ZIO_PIPELINE_STOP); 3412fa9e4066Sahrens 3413f5383399SBill Moore if (zio->io_ready) { 3414e14bb325SJeff Bonwick ASSERT(IO_IS_ALLOCATING(zio)); 341580901aeaSGeorge Wilson ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || 341680901aeaSGeorge Wilson (zio->io_flags & ZIO_FLAG_NOPWRITE)); 3417e14bb325SJeff Bonwick ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); 3418fa9e4066Sahrens 3419e14bb325SJeff Bonwick zio->io_ready(zio); 3420e14bb325SJeff Bonwick } 3421fa9e4066Sahrens 3422e14bb325SJeff Bonwick if (bp != NULL && bp != &zio->io_bp_copy) 3423e14bb325SJeff Bonwick zio->io_bp_copy = *bp; 3424fa9e4066Sahrens 34250f7643c7SGeorge Wilson if (zio->io_error != 0) { 3426e14bb325SJeff Bonwick zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; 3427fa9e4066Sahrens 34280f7643c7SGeorge Wilson if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 34290f7643c7SGeorge Wilson ASSERT(IO_IS_ALLOCATING(zio)); 34300f7643c7SGeorge Wilson ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 34310f7643c7SGeorge Wilson /* 34320f7643c7SGeorge Wilson * We were unable to allocate anything, unreserve and 34330f7643c7SGeorge Wilson * issue the next I/O to allocate. 34340f7643c7SGeorge Wilson */ 34350f7643c7SGeorge Wilson metaslab_class_throttle_unreserve( 34360f7643c7SGeorge Wilson spa_normal_class(zio->io_spa), 34370f7643c7SGeorge Wilson zio->io_prop.zp_copies, zio); 34380f7643c7SGeorge Wilson zio_allocate_dispatch(zio->io_spa); 34390f7643c7SGeorge Wilson } 34400f7643c7SGeorge Wilson } 34410f7643c7SGeorge Wilson 3442a3f829aeSBill Moore mutex_enter(&zio->io_lock); 3443a3f829aeSBill Moore zio->io_state[ZIO_WAIT_READY] = 1; 34440f7643c7SGeorge Wilson pio = zio_walk_parents(zio, &zl); 3445a3f829aeSBill Moore mutex_exit(&zio->io_lock); 3446a3f829aeSBill Moore 3447a3f829aeSBill Moore /* 3448a3f829aeSBill Moore * As we notify zio's parents, new parents could be added. 3449a3f829aeSBill Moore * New parents go to the head of zio's io_parent_list, however, 3450a3f829aeSBill Moore * so we will (correctly) not notify them. The remainder of zio's 3451a3f829aeSBill Moore * io_parent_list, from 'pio_next' onward, cannot change because 3452a3f829aeSBill Moore * all parents must wait for us to be done before they can be done. 3453a3f829aeSBill Moore */ 3454a3f829aeSBill Moore for (; pio != NULL; pio = pio_next) { 34550f7643c7SGeorge Wilson pio_next = zio_walk_parents(zio, &zl); 3456e14bb325SJeff Bonwick zio_notify_parent(pio, zio, ZIO_WAIT_READY); 3457a3f829aeSBill Moore } 3458fa9e4066Sahrens 3459b24ab676SJeff Bonwick if (zio->io_flags & ZIO_FLAG_NODATA) { 3460b24ab676SJeff Bonwick if (BP_IS_GANG(bp)) { 3461b24ab676SJeff Bonwick zio->io_flags &= ~ZIO_FLAG_NODATA; 3462b24ab676SJeff Bonwick } else { 3463770499e1SDan Kimmel ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE); 3464b24ab676SJeff Bonwick zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; 3465b24ab676SJeff Bonwick } 3466b24ab676SJeff Bonwick } 3467b24ab676SJeff Bonwick 3468a33cae98STim Haley if (zio_injection_enabled && 3469a33cae98STim Haley zio->io_spa->spa_syncing_txg == zio->io_txg) 3470a33cae98STim Haley zio_handle_ignored_writes(zio); 3471a33cae98STim Haley 3472e14bb325SJeff Bonwick return (ZIO_PIPELINE_CONTINUE); 3473fa9e4066Sahrens } 3474fa9e4066Sahrens 34750f7643c7SGeorge Wilson /* 34760f7643c7SGeorge Wilson * Update the allocation throttle accounting. 34770f7643c7SGeorge Wilson */ 34780f7643c7SGeorge Wilson static void 34790f7643c7SGeorge Wilson zio_dva_throttle_done(zio_t *zio) 34800f7643c7SGeorge Wilson { 34810f7643c7SGeorge Wilson zio_t *lio = zio->io_logical; 34820f7643c7SGeorge Wilson zio_t *pio = zio_unique_parent(zio); 34830f7643c7SGeorge Wilson vdev_t *vd = zio->io_vd; 34840f7643c7SGeorge Wilson int flags = METASLAB_ASYNC_ALLOC; 34850f7643c7SGeorge Wilson 34860f7643c7SGeorge Wilson ASSERT3P(zio->io_bp, !=, NULL); 34870f7643c7SGeorge Wilson ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); 34880f7643c7SGeorge Wilson ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE); 34890f7643c7SGeorge Wilson ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); 34900f7643c7SGeorge Wilson ASSERT(vd != NULL); 34910f7643c7SGeorge Wilson ASSERT3P(vd, ==, vd->vdev_top); 34920f7643c7SGeorge Wilson ASSERT(!(zio->io_flags & (ZIO_FLAG_IO_REPAIR | ZIO_FLAG_IO_RETRY))); 34930f7643c7SGeorge Wilson ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); 34940f7643c7SGeorge Wilson ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE)); 34950f7643c7SGeorge Wilson ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA)); 34960f7643c7SGeorge Wilson 34970f7643c7SGeorge Wilson /* 34980f7643c7SGeorge Wilson * Parents of gang children can have two flavors -- ones that 34990f7643c7SGeorge Wilson * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set) 35000f7643c7SGeorge Wilson * and ones that allocated the constituent blocks. The allocation 35010f7643c7SGeorge Wilson * throttle needs to know the allocating parent zio so we must find 35020f7643c7SGeorge Wilson * it here. 35030f7643c7SGeorge Wilson */ 35040f7643c7SGeorge Wilson if (pio->io_child_type == ZIO_CHILD_GANG) { 35050f7643c7SGeorge Wilson /* 35060f7643c7SGeorge Wilson * If our parent is a rewrite gang child then our grandparent 35070f7643c7SGeorge Wilson * would have been the one that performed the allocation. 35080f7643c7SGeorge Wilson */ 35090f7643c7SGeorge Wilson if (pio->io_flags & ZIO_FLAG_IO_REWRITE) 35100f7643c7SGeorge Wilson pio = zio_unique_parent(pio); 35110f7643c7SGeorge Wilson flags |= METASLAB_GANG_CHILD; 35120f7643c7SGeorge Wilson } 35130f7643c7SGeorge Wilson 35140f7643c7SGeorge Wilson ASSERT(IO_IS_ALLOCATING(pio)); 35150f7643c7SGeorge Wilson ASSERT3P(zio, !=, zio->io_logical); 35160f7643c7SGeorge Wilson ASSERT(zio->io_logical != NULL); 35170f7643c7SGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); 35180f7643c7SGeorge Wilson ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE); 35190f7643c7SGeorge Wilson 35200f7643c7SGeorge Wilson mutex_enter(&pio->io_lock); 35210f7643c7SGeorge Wilson metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags); 35220f7643c7SGeorge Wilson mutex_exit(&pio->io_lock); 35230f7643c7SGeorge Wilson 35240f7643c7SGeorge Wilson metaslab_class_throttle_unreserve(spa_normal_class(zio->io_spa), 35250f7643c7SGeorge Wilson 1, pio); 35260f7643c7SGeorge Wilson 35270f7643c7SGeorge Wilson /* 35280f7643c7SGeorge Wilson * Call into the pipeline to see if there is more work that 35290f7643c7SGeorge Wilson * needs to be done. If there is work to be done it will be 35300f7643c7SGeorge Wilson * dispatched to another taskq thread. 35310f7643c7SGeorge Wilson */ 35320f7643c7SGeorge Wilson zio_allocate_dispatch(zio->io_spa); 35330f7643c7SGeorge Wilson } 35340f7643c7SGeorge Wilson 3535e14bb325SJeff Bonwick static int 3536e14bb325SJeff Bonwick zio_done(zio_t *zio) 3537d63d470bSgw { 3538e14bb325SJeff Bonwick spa_t *spa = zio->io_spa; 3539e14bb325SJeff Bonwick zio_t *lio = zio->io_logical; 3540e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 3541e14bb325SJeff Bonwick vdev_t *vd = zio->io_vd; 3542e14bb325SJeff Bonwick uint64_t psize = zio->io_size; 3543a3f829aeSBill Moore zio_t *pio, *pio_next; 35440f7643c7SGeorge Wilson metaslab_class_t *mc = spa_normal_class(spa); 35450f7643c7SGeorge Wilson zio_link_t *zl = NULL; 3546d63d470bSgw 3547e14bb325SJeff Bonwick /* 3548f5383399SBill Moore * If our children haven't all completed, 3549e14bb325SJeff Bonwick * wait for them and then repeat this pipeline stage. 3550e14bb325SJeff Bonwick */ 3551e14bb325SJeff Bonwick if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) || 3552e14bb325SJeff Bonwick zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) || 3553b24ab676SJeff Bonwick zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) || 3554e14bb325SJeff Bonwick zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) 3555e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3556d63d470bSgw 35570f7643c7SGeorge Wilson /* 35580f7643c7SGeorge Wilson * If the allocation throttle is enabled, then update the accounting. 35590f7643c7SGeorge Wilson * We only track child I/Os that are part of an allocating async 35600f7643c7SGeorge Wilson * write. We must do this since the allocation is performed 35610f7643c7SGeorge Wilson * by the logical I/O but the actual write is done by child I/Os. 35620f7643c7SGeorge Wilson */ 35630f7643c7SGeorge Wilson if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && 35640f7643c7SGeorge Wilson zio->io_child_type == ZIO_CHILD_VDEV) { 35650f7643c7SGeorge Wilson ASSERT(mc->mc_alloc_throttle_enabled); 35660f7643c7SGeorge Wilson zio_dva_throttle_done(zio); 35670f7643c7SGeorge Wilson } 35680f7643c7SGeorge Wilson 35690f7643c7SGeorge Wilson /* 35700f7643c7SGeorge Wilson * If the allocation throttle is enabled, verify that 35710f7643c7SGeorge Wilson * we have decremented the refcounts for every I/O that was throttled. 35720f7643c7SGeorge Wilson */ 35730f7643c7SGeorge Wilson if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { 35740f7643c7SGeorge Wilson ASSERT(zio->io_type == ZIO_TYPE_WRITE); 35750f7643c7SGeorge Wilson ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); 35760f7643c7SGeorge Wilson ASSERT(bp != NULL); 35770f7643c7SGeorge Wilson metaslab_group_alloc_verify(spa, zio->io_bp, zio); 35780f7643c7SGeorge Wilson VERIFY(refcount_not_held(&mc->mc_alloc_slots, zio)); 35790f7643c7SGeorge Wilson } 35800f7643c7SGeorge Wilson 3581e14bb325SJeff Bonwick for (int c = 0; c < ZIO_CHILD_TYPES; c++) 3582e14bb325SJeff Bonwick for (int w = 0; w < ZIO_WAIT_TYPES; w++) 3583e14bb325SJeff Bonwick ASSERT(zio->io_children[c][w] == 0); 3584e14bb325SJeff Bonwick 35855d7b4d43SMatthew Ahrens if (bp != NULL && !BP_IS_EMBEDDED(bp)) { 3586e14bb325SJeff Bonwick ASSERT(bp->blk_pad[0] == 0); 3587e14bb325SJeff Bonwick ASSERT(bp->blk_pad[1] == 0); 3588e14bb325SJeff Bonwick ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || 3589a3f829aeSBill Moore (bp == zio_unique_parent(zio)->io_bp)); 3590e14bb325SJeff Bonwick if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 3591b24ab676SJeff Bonwick zio->io_bp_override == NULL && 3592e14bb325SJeff Bonwick !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 3593e14bb325SJeff Bonwick ASSERT(!BP_SHOULD_BYTESWAP(bp)); 3594b24ab676SJeff Bonwick ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(bp)); 3595e14bb325SJeff Bonwick ASSERT(BP_COUNT_GANG(bp) == 0 || 3596e14bb325SJeff Bonwick (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 3597e14bb325SJeff Bonwick } 359880901aeaSGeorge Wilson if (zio->io_flags & ZIO_FLAG_NOPWRITE) 359980901aeaSGeorge Wilson VERIFY(BP_EQUAL(bp, &zio->io_bp_orig)); 3600e14bb325SJeff Bonwick } 3601fa9e4066Sahrens 3602e14bb325SJeff Bonwick /* 3603b24ab676SJeff Bonwick * If there were child vdev/gang/ddt errors, they apply to us now. 3604e14bb325SJeff Bonwick */ 3605e14bb325SJeff Bonwick zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); 3606e14bb325SJeff Bonwick zio_inherit_child_errors(zio, ZIO_CHILD_GANG); 3607b24ab676SJeff Bonwick zio_inherit_child_errors(zio, ZIO_CHILD_DDT); 3608b24ab676SJeff Bonwick 3609b24ab676SJeff Bonwick /* 3610b24ab676SJeff Bonwick * If the I/O on the transformed data was successful, generate any 3611b24ab676SJeff Bonwick * checksum reports now while we still have the transformed data. 3612b24ab676SJeff Bonwick */ 3613b24ab676SJeff Bonwick if (zio->io_error == 0) { 3614b24ab676SJeff Bonwick while (zio->io_cksum_report != NULL) { 3615b24ab676SJeff Bonwick zio_cksum_report_t *zcr = zio->io_cksum_report; 3616b24ab676SJeff Bonwick uint64_t align = zcr->zcr_align; 3617b24ab676SJeff Bonwick uint64_t asize = P2ROUNDUP(psize, align); 3618770499e1SDan Kimmel char *abuf = NULL; 3619770499e1SDan Kimmel abd_t *adata = zio->io_abd; 3620b24ab676SJeff Bonwick 3621b24ab676SJeff Bonwick if (asize != psize) { 3622770499e1SDan Kimmel adata = abd_alloc_linear(asize, B_TRUE); 3623770499e1SDan Kimmel abd_copy(adata, zio->io_abd, psize); 3624770499e1SDan Kimmel abd_zero_off(adata, psize, asize - psize); 3625b24ab676SJeff Bonwick } 3626b24ab676SJeff Bonwick 3627770499e1SDan Kimmel if (adata != NULL) 3628770499e1SDan Kimmel abuf = abd_borrow_buf_copy(adata, asize); 3629770499e1SDan Kimmel 3630b24ab676SJeff Bonwick zio->io_cksum_report = zcr->zcr_next; 3631b24ab676SJeff Bonwick zcr->zcr_next = NULL; 3632b24ab676SJeff Bonwick zcr->zcr_finish(zcr, abuf); 3633b24ab676SJeff Bonwick zfs_ereport_free_checksum(zcr); 3634b24ab676SJeff Bonwick 3635770499e1SDan Kimmel if (adata != NULL) 3636770499e1SDan Kimmel abd_return_buf(adata, abuf, asize); 3637770499e1SDan Kimmel 3638b24ab676SJeff Bonwick if (asize != psize) 3639770499e1SDan Kimmel abd_free(adata); 3640b24ab676SJeff Bonwick } 3641b24ab676SJeff Bonwick } 3642e14bb325SJeff Bonwick 3643e14bb325SJeff Bonwick zio_pop_transforms(zio); /* note: may set zio->io_error */ 3644e14bb325SJeff Bonwick 3645e14bb325SJeff Bonwick vdev_stat_update(zio, psize); 3646e14bb325SJeff Bonwick 3647e14bb325SJeff Bonwick if (zio->io_error) { 3648e14bb325SJeff Bonwick /* 3649e14bb325SJeff Bonwick * If this I/O is attached to a particular vdev, 3650e14bb325SJeff Bonwick * generate an error message describing the I/O failure 3651e14bb325SJeff Bonwick * at the block level. We ignore these errors if the 3652e14bb325SJeff Bonwick * device is currently unavailable. 3653e14bb325SJeff Bonwick */ 3654e14bb325SJeff Bonwick if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 3655e14bb325SJeff Bonwick zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0); 3656e14bb325SJeff Bonwick 36578f18d1faSGeorge Wilson if ((zio->io_error == EIO || !(zio->io_flags & 36588f18d1faSGeorge Wilson (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && 36598f18d1faSGeorge Wilson zio == lio) { 3660e14bb325SJeff Bonwick /* 3661e14bb325SJeff Bonwick * For logical I/O requests, tell the SPA to log the 3662e14bb325SJeff Bonwick * error and generate a logical data ereport. 3663e14bb325SJeff Bonwick */ 3664e14bb325SJeff Bonwick spa_log_error(spa, zio); 3665e14bb325SJeff Bonwick zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio, 3666e14bb325SJeff Bonwick 0, 0); 3667e14bb325SJeff Bonwick } 3668e14bb325SJeff Bonwick } 3669fa9e4066Sahrens 3670e14bb325SJeff Bonwick if (zio->io_error && zio == lio) { 3671e14bb325SJeff Bonwick /* 3672e14bb325SJeff Bonwick * Determine whether zio should be reexecuted. This will 3673e14bb325SJeff Bonwick * propagate all the way to the root via zio_notify_parent(). 3674e14bb325SJeff Bonwick */ 3675e14bb325SJeff Bonwick ASSERT(vd == NULL && bp != NULL); 3676b24ab676SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3677e14bb325SJeff Bonwick 3678b24ab676SJeff Bonwick if (IO_IS_ALLOCATING(zio) && 3679b24ab676SJeff Bonwick !(zio->io_flags & ZIO_FLAG_CANFAIL)) { 3680e14bb325SJeff Bonwick if (zio->io_error != ENOSPC) 3681e14bb325SJeff Bonwick zio->io_reexecute |= ZIO_REEXECUTE_NOW; 3682e14bb325SJeff Bonwick else 3683e14bb325SJeff Bonwick zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3684b24ab676SJeff Bonwick } 3685e14bb325SJeff Bonwick 3686e14bb325SJeff Bonwick if ((zio->io_type == ZIO_TYPE_READ || 3687e14bb325SJeff Bonwick zio->io_type == ZIO_TYPE_FREE) && 368844ecc532SGeorge Wilson !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && 3689e14bb325SJeff Bonwick zio->io_error == ENXIO && 3690b16da2e2SGeorge Wilson spa_load_state(spa) == SPA_LOAD_NONE && 3691e14bb325SJeff Bonwick spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE) 3692e14bb325SJeff Bonwick zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 3693e14bb325SJeff Bonwick 3694e14bb325SJeff Bonwick if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) 3695e14bb325SJeff Bonwick zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; 369622fe2c88SJonathan Adams 369722fe2c88SJonathan Adams /* 369822fe2c88SJonathan Adams * Here is a possibly good place to attempt to do 369922fe2c88SJonathan Adams * either combinatorial reconstruction or error correction 370022fe2c88SJonathan Adams * based on checksums. It also might be a good place 370122fe2c88SJonathan Adams * to send out preliminary ereports before we suspend 370222fe2c88SJonathan Adams * processing. 370322fe2c88SJonathan Adams */ 3704d63d470bSgw } 3705d63d470bSgw 370667bd71c6Sperrin /* 3707e14bb325SJeff Bonwick * If there were logical child errors, they apply to us now. 3708e14bb325SJeff Bonwick * We defer this until now to avoid conflating logical child 3709e14bb325SJeff Bonwick * errors with errors that happened to the zio itself when 3710e14bb325SJeff Bonwick * updating vdev stats and reporting FMA events above. 371167bd71c6Sperrin */ 3712e14bb325SJeff Bonwick zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); 37138654d025Sperrin 3714b24ab676SJeff Bonwick if ((zio->io_error || zio->io_reexecute) && 3715b24ab676SJeff Bonwick IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && 371680901aeaSGeorge Wilson !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) 3717f5383399SBill Moore zio_dva_unallocate(zio, zio->io_gang_tree, bp); 3718f5383399SBill Moore 3719f5383399SBill Moore zio_gang_tree_free(&zio->io_gang_tree); 3720f5383399SBill Moore 372133a372edSGeorge Wilson /* 372233a372edSGeorge Wilson * Godfather I/Os should never suspend. 372333a372edSGeorge Wilson */ 372433a372edSGeorge Wilson if ((zio->io_flags & ZIO_FLAG_GODFATHER) && 372533a372edSGeorge Wilson (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) 372633a372edSGeorge Wilson zio->io_reexecute = 0; 372733a372edSGeorge Wilson 372833a372edSGeorge Wilson if (zio->io_reexecute) { 3729e14bb325SJeff Bonwick /* 3730e14bb325SJeff Bonwick * This is a logical I/O that wants to reexecute. 3731e14bb325SJeff Bonwick * 3732e14bb325SJeff Bonwick * Reexecute is top-down. When an i/o fails, if it's not 3733e14bb325SJeff Bonwick * the root, it simply notifies its parent and sticks around. 3734e14bb325SJeff Bonwick * The parent, seeing that it still has children in zio_done(), 3735e14bb325SJeff Bonwick * does the same. This percolates all the way up to the root. 3736e14bb325SJeff Bonwick * The root i/o will reexecute or suspend the entire tree. 3737e14bb325SJeff Bonwick * 3738e14bb325SJeff Bonwick * This approach ensures that zio_reexecute() honors 3739e14bb325SJeff Bonwick * all the original i/o dependency relationships, e.g. 3740e14bb325SJeff Bonwick * parents not executing until children are ready. 3741e14bb325SJeff Bonwick */ 3742e14bb325SJeff Bonwick ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); 3743fa9e4066Sahrens 3744f5383399SBill Moore zio->io_gang_leader = NULL; 3745e14bb325SJeff Bonwick 3746a3f829aeSBill Moore mutex_enter(&zio->io_lock); 3747a3f829aeSBill Moore zio->io_state[ZIO_WAIT_DONE] = 1; 3748a3f829aeSBill Moore mutex_exit(&zio->io_lock); 3749a3f829aeSBill Moore 375054d692b7SGeorge Wilson /* 375154d692b7SGeorge Wilson * "The Godfather" I/O monitors its children but is 375254d692b7SGeorge Wilson * not a true parent to them. It will track them through 375354d692b7SGeorge Wilson * the pipeline but severs its ties whenever they get into 375454d692b7SGeorge Wilson * trouble (e.g. suspended). This allows "The Godfather" 375554d692b7SGeorge Wilson * I/O to return status without blocking. 375654d692b7SGeorge Wilson */ 37570f7643c7SGeorge Wilson zl = NULL; 37580f7643c7SGeorge Wilson for (pio = zio_walk_parents(zio, &zl); pio != NULL; 37590f7643c7SGeorge Wilson pio = pio_next) { 37600f7643c7SGeorge Wilson zio_link_t *remove_zl = zl; 37610f7643c7SGeorge Wilson pio_next = zio_walk_parents(zio, &zl); 376254d692b7SGeorge Wilson 376354d692b7SGeorge Wilson if ((pio->io_flags & ZIO_FLAG_GODFATHER) && 376454d692b7SGeorge Wilson (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { 37650f7643c7SGeorge Wilson zio_remove_child(pio, zio, remove_zl); 376654d692b7SGeorge Wilson zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 376754d692b7SGeorge Wilson } 376854d692b7SGeorge Wilson } 376954d692b7SGeorge Wilson 3770a3f829aeSBill Moore if ((pio = zio_unique_parent(zio)) != NULL) { 3771e14bb325SJeff Bonwick /* 3772e14bb325SJeff Bonwick * We're not a root i/o, so there's nothing to do 3773e14bb325SJeff Bonwick * but notify our parent. Don't propagate errors 3774e14bb325SJeff Bonwick * upward since we haven't permanently failed yet. 3775e14bb325SJeff Bonwick */ 377633a372edSGeorge Wilson ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); 3777e14bb325SJeff Bonwick zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; 3778e14bb325SJeff Bonwick zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3779e14bb325SJeff Bonwick } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { 3780e14bb325SJeff Bonwick /* 3781e14bb325SJeff Bonwick * We'd fail again if we reexecuted now, so suspend 3782e14bb325SJeff Bonwick * until conditions improve (e.g. device comes online). 3783e14bb325SJeff Bonwick */ 3784e14bb325SJeff Bonwick zio_suspend(spa, zio); 3785e14bb325SJeff Bonwick } else { 3786e14bb325SJeff Bonwick /* 3787e14bb325SJeff Bonwick * Reexecution is potentially a huge amount of work. 3788e14bb325SJeff Bonwick * Hand it off to the otherwise-unused claim taskq. 3789e14bb325SJeff Bonwick */ 37905aeb9474SGarrett D'Amore ASSERT(zio->io_tqent.tqent_next == NULL); 3791ec94d322SAdam Leventhal spa_taskq_dispatch_ent(spa, ZIO_TYPE_CLAIM, 3792ec94d322SAdam Leventhal ZIO_TASKQ_ISSUE, (task_func_t *)zio_reexecute, zio, 3793ec94d322SAdam Leventhal 0, &zio->io_tqent); 3794e14bb325SJeff Bonwick } 3795e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3796fa9e4066Sahrens } 3797fa9e4066Sahrens 3798b24ab676SJeff Bonwick ASSERT(zio->io_child_count == 0); 379933a372edSGeorge Wilson ASSERT(zio->io_reexecute == 0); 3800e14bb325SJeff Bonwick ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); 3801fa9e4066Sahrens 3802b24ab676SJeff Bonwick /* 3803b24ab676SJeff Bonwick * Report any checksum errors, since the I/O is complete. 3804b24ab676SJeff Bonwick */ 380522fe2c88SJonathan Adams while (zio->io_cksum_report != NULL) { 3806b24ab676SJeff Bonwick zio_cksum_report_t *zcr = zio->io_cksum_report; 3807b24ab676SJeff Bonwick zio->io_cksum_report = zcr->zcr_next; 3808b24ab676SJeff Bonwick zcr->zcr_next = NULL; 3809b24ab676SJeff Bonwick zcr->zcr_finish(zcr, NULL); 3810b24ab676SJeff Bonwick zfs_ereport_free_checksum(zcr); 381122fe2c88SJonathan Adams } 381222fe2c88SJonathan Adams 3813a3f829aeSBill Moore /* 3814a3f829aeSBill Moore * It is the responsibility of the done callback to ensure that this 3815a3f829aeSBill Moore * particular zio is no longer discoverable for adoption, and as 3816a3f829aeSBill Moore * such, cannot acquire any new parents. 3817a3f829aeSBill Moore */ 3818e14bb325SJeff Bonwick if (zio->io_done) 3819e14bb325SJeff Bonwick zio->io_done(zio); 3820fa9e4066Sahrens 3821a3f829aeSBill Moore mutex_enter(&zio->io_lock); 3822a3f829aeSBill Moore zio->io_state[ZIO_WAIT_DONE] = 1; 3823a3f829aeSBill Moore mutex_exit(&zio->io_lock); 3824fa9e4066Sahrens 38250f7643c7SGeorge Wilson zl = NULL; 38260f7643c7SGeorge Wilson for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) { 38270f7643c7SGeorge Wilson zio_link_t *remove_zl = zl; 38280f7643c7SGeorge Wilson pio_next = zio_walk_parents(zio, &zl); 38290f7643c7SGeorge Wilson zio_remove_child(pio, zio, remove_zl); 3830e14bb325SJeff Bonwick zio_notify_parent(pio, zio, ZIO_WAIT_DONE); 3831e14bb325SJeff Bonwick } 3832fa9e4066Sahrens 3833e14bb325SJeff Bonwick if (zio->io_waiter != NULL) { 3834e14bb325SJeff Bonwick mutex_enter(&zio->io_lock); 3835e14bb325SJeff Bonwick zio->io_executor = NULL; 3836e14bb325SJeff Bonwick cv_broadcast(&zio->io_cv); 3837e14bb325SJeff Bonwick mutex_exit(&zio->io_lock); 3838e14bb325SJeff Bonwick } else { 3839e14bb325SJeff Bonwick zio_destroy(zio); 3840e14bb325SJeff Bonwick } 3841fa9e4066Sahrens 3842e14bb325SJeff Bonwick return (ZIO_PIPELINE_STOP); 3843fa9e4066Sahrens } 384446341222Sperrin 384546341222Sperrin /* 3846e14bb325SJeff Bonwick * ========================================================================== 3847e14bb325SJeff Bonwick * I/O pipeline definition 3848e14bb325SJeff Bonwick * ========================================================================== 384946341222Sperrin */ 3850b24ab676SJeff Bonwick static zio_pipe_stage_t *zio_pipeline[] = { 3851e14bb325SJeff Bonwick NULL, 3852e14bb325SJeff Bonwick zio_read_bp_init, 38530f7643c7SGeorge Wilson zio_write_bp_init, 3854b24ab676SJeff Bonwick zio_free_bp_init, 3855b24ab676SJeff Bonwick zio_issue_async, 38560f7643c7SGeorge Wilson zio_write_compress, 3857e14bb325SJeff Bonwick zio_checksum_generate, 385880901aeaSGeorge Wilson zio_nop_write, 3859b24ab676SJeff Bonwick zio_ddt_read_start, 3860b24ab676SJeff Bonwick zio_ddt_read_done, 3861b24ab676SJeff Bonwick zio_ddt_write, 3862b24ab676SJeff Bonwick zio_ddt_free, 3863e14bb325SJeff Bonwick zio_gang_assemble, 3864e14bb325SJeff Bonwick zio_gang_issue, 38650f7643c7SGeorge Wilson zio_dva_throttle, 3866e14bb325SJeff Bonwick zio_dva_allocate, 3867e14bb325SJeff Bonwick zio_dva_free, 3868e14bb325SJeff Bonwick zio_dva_claim, 3869e14bb325SJeff Bonwick zio_ready, 3870e14bb325SJeff Bonwick zio_vdev_io_start, 3871e14bb325SJeff Bonwick zio_vdev_io_done, 3872e14bb325SJeff Bonwick zio_vdev_io_assess, 3873e14bb325SJeff Bonwick zio_checksum_verify, 3874e14bb325SJeff Bonwick zio_done 3875e14bb325SJeff Bonwick }; 3876ad135b5dSChristopher Siden 3877ad135b5dSChristopher Siden 3878ad135b5dSChristopher Siden 3879ad135b5dSChristopher Siden 3880a2cdcdd2SPaul Dagnelie /* 3881a2cdcdd2SPaul Dagnelie * Compare two zbookmark_phys_t's to see which we would reach first in a 3882a2cdcdd2SPaul Dagnelie * pre-order traversal of the object tree. 3883a2cdcdd2SPaul Dagnelie * 3884a2cdcdd2SPaul Dagnelie * This is simple in every case aside from the meta-dnode object. For all other 3885a2cdcdd2SPaul Dagnelie * objects, we traverse them in order (object 1 before object 2, and so on). 3886a2cdcdd2SPaul Dagnelie * However, all of these objects are traversed while traversing object 0, since 3887a2cdcdd2SPaul Dagnelie * the data it points to is the list of objects. Thus, we need to convert to a 3888a2cdcdd2SPaul Dagnelie * canonical representation so we can compare meta-dnode bookmarks to 3889a2cdcdd2SPaul Dagnelie * non-meta-dnode bookmarks. 3890a2cdcdd2SPaul Dagnelie * 3891a2cdcdd2SPaul Dagnelie * We do this by calculating "equivalents" for each field of the zbookmark. 3892a2cdcdd2SPaul Dagnelie * zbookmarks outside of the meta-dnode use their own object and level, and 3893a2cdcdd2SPaul Dagnelie * calculate the level 0 equivalent (the first L0 blkid that is contained in the 3894a2cdcdd2SPaul Dagnelie * blocks this bookmark refers to) by multiplying their blkid by their span 3895a2cdcdd2SPaul Dagnelie * (the number of L0 blocks contained within one block at their level). 3896a2cdcdd2SPaul Dagnelie * zbookmarks inside the meta-dnode calculate their object equivalent 3897a2cdcdd2SPaul Dagnelie * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use 3898a2cdcdd2SPaul Dagnelie * level + 1<<31 (any value larger than a level could ever be) for their level. 3899a2cdcdd2SPaul Dagnelie * This causes them to always compare before a bookmark in their object 3900a2cdcdd2SPaul Dagnelie * equivalent, compare appropriately to bookmarks in other objects, and to 3901a2cdcdd2SPaul Dagnelie * compare appropriately to other bookmarks in the meta-dnode. 3902a2cdcdd2SPaul Dagnelie */ 3903a2cdcdd2SPaul Dagnelie int 3904a2cdcdd2SPaul Dagnelie zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, 3905a2cdcdd2SPaul Dagnelie const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) 3906a2cdcdd2SPaul Dagnelie { 3907a2cdcdd2SPaul Dagnelie /* 3908a2cdcdd2SPaul Dagnelie * These variables represent the "equivalent" values for the zbookmark, 3909a2cdcdd2SPaul Dagnelie * after converting zbookmarks inside the meta dnode to their 3910a2cdcdd2SPaul Dagnelie * normal-object equivalents. 3911a2cdcdd2SPaul Dagnelie */ 3912a2cdcdd2SPaul Dagnelie uint64_t zb1obj, zb2obj; 3913a2cdcdd2SPaul Dagnelie uint64_t zb1L0, zb2L0; 3914a2cdcdd2SPaul Dagnelie uint64_t zb1level, zb2level; 3915ad135b5dSChristopher Siden 3916a2cdcdd2SPaul Dagnelie if (zb1->zb_object == zb2->zb_object && 3917a2cdcdd2SPaul Dagnelie zb1->zb_level == zb2->zb_level && 3918a2cdcdd2SPaul Dagnelie zb1->zb_blkid == zb2->zb_blkid) 3919a2cdcdd2SPaul Dagnelie return (0); 3920a2cdcdd2SPaul Dagnelie 3921a2cdcdd2SPaul Dagnelie /* 3922a2cdcdd2SPaul Dagnelie * BP_SPANB calculates the span in blocks. 3923a2cdcdd2SPaul Dagnelie */ 3924a2cdcdd2SPaul Dagnelie zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); 3925a2cdcdd2SPaul Dagnelie zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); 3926ad135b5dSChristopher Siden 3927ad135b5dSChristopher Siden if (zb1->zb_object == DMU_META_DNODE_OBJECT) { 3928a2cdcdd2SPaul Dagnelie zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 3929a2cdcdd2SPaul Dagnelie zb1L0 = 0; 3930a2cdcdd2SPaul Dagnelie zb1level = zb1->zb_level + COMPARE_META_LEVEL; 3931a2cdcdd2SPaul Dagnelie } else { 3932a2cdcdd2SPaul Dagnelie zb1obj = zb1->zb_object; 3933a2cdcdd2SPaul Dagnelie zb1level = zb1->zb_level; 3934ad135b5dSChristopher Siden } 3935ad135b5dSChristopher Siden 3936a2cdcdd2SPaul Dagnelie if (zb2->zb_object == DMU_META_DNODE_OBJECT) { 3937a2cdcdd2SPaul Dagnelie zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); 3938a2cdcdd2SPaul Dagnelie zb2L0 = 0; 3939a2cdcdd2SPaul Dagnelie zb2level = zb2->zb_level + COMPARE_META_LEVEL; 3940a2cdcdd2SPaul Dagnelie } else { 3941a2cdcdd2SPaul Dagnelie zb2obj = zb2->zb_object; 3942a2cdcdd2SPaul Dagnelie zb2level = zb2->zb_level; 3943a2cdcdd2SPaul Dagnelie } 3944a2cdcdd2SPaul Dagnelie 3945a2cdcdd2SPaul Dagnelie /* Now that we have a canonical representation, do the comparison. */ 3946a2cdcdd2SPaul Dagnelie if (zb1obj != zb2obj) 3947a2cdcdd2SPaul Dagnelie return (zb1obj < zb2obj ? -1 : 1); 3948a2cdcdd2SPaul Dagnelie else if (zb1L0 != zb2L0) 3949a2cdcdd2SPaul Dagnelie return (zb1L0 < zb2L0 ? -1 : 1); 3950a2cdcdd2SPaul Dagnelie else if (zb1level != zb2level) 3951a2cdcdd2SPaul Dagnelie return (zb1level > zb2level ? -1 : 1); 3952a2cdcdd2SPaul Dagnelie /* 3953a2cdcdd2SPaul Dagnelie * This can (theoretically) happen if the bookmarks have the same object 3954a2cdcdd2SPaul Dagnelie * and level, but different blkids, if the block sizes are not the same. 3955a2cdcdd2SPaul Dagnelie * There is presently no way to change the indirect block sizes 3956a2cdcdd2SPaul Dagnelie */ 3957a2cdcdd2SPaul Dagnelie return (0); 3958a2cdcdd2SPaul Dagnelie } 3959a2cdcdd2SPaul Dagnelie 3960a2cdcdd2SPaul Dagnelie /* 3961a2cdcdd2SPaul Dagnelie * This function checks the following: given that last_block is the place that 3962a2cdcdd2SPaul Dagnelie * our traversal stopped last time, does that guarantee that we've visited 3963a2cdcdd2SPaul Dagnelie * every node under subtree_root? Therefore, we can't just use the raw output 3964a2cdcdd2SPaul Dagnelie * of zbookmark_compare. We have to pass in a modified version of 3965a2cdcdd2SPaul Dagnelie * subtree_root; by incrementing the block id, and then checking whether 3966a2cdcdd2SPaul Dagnelie * last_block is before or equal to that, we can tell whether or not having 3967a2cdcdd2SPaul Dagnelie * visited last_block implies that all of subtree_root's children have been 3968a2cdcdd2SPaul Dagnelie * visited. 3969a2cdcdd2SPaul Dagnelie */ 3970a2cdcdd2SPaul Dagnelie boolean_t 3971a2cdcdd2SPaul Dagnelie zbookmark_subtree_completed(const dnode_phys_t *dnp, 3972a2cdcdd2SPaul Dagnelie const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) 3973a2cdcdd2SPaul Dagnelie { 3974a2cdcdd2SPaul Dagnelie zbookmark_phys_t mod_zb = *subtree_root; 3975a2cdcdd2SPaul Dagnelie mod_zb.zb_blkid++; 3976a2cdcdd2SPaul Dagnelie ASSERT(last_block->zb_level == 0); 3977a2cdcdd2SPaul Dagnelie 3978a2cdcdd2SPaul Dagnelie /* The objset_phys_t isn't before anything. */ 3979a2cdcdd2SPaul Dagnelie if (dnp == NULL) 3980ad135b5dSChristopher Siden return (B_FALSE); 3981a2cdcdd2SPaul Dagnelie 3982a2cdcdd2SPaul Dagnelie /* 3983a2cdcdd2SPaul Dagnelie * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the 3984a2cdcdd2SPaul Dagnelie * data block size in sectors, because that variable is only used if 3985a2cdcdd2SPaul Dagnelie * the bookmark refers to a block in the meta-dnode. Since we don't 3986a2cdcdd2SPaul Dagnelie * know without examining it what object it refers to, and there's no 3987a2cdcdd2SPaul Dagnelie * harm in passing in this value in other cases, we always pass it in. 3988a2cdcdd2SPaul Dagnelie * 3989a2cdcdd2SPaul Dagnelie * We pass in 0 for the indirect block size shift because zb2 must be 3990a2cdcdd2SPaul Dagnelie * level 0. The indirect block size is only used to calculate the span 3991a2cdcdd2SPaul Dagnelie * of the bookmark, but since the bookmark must be level 0, the span is 3992a2cdcdd2SPaul Dagnelie * always 1, so the math works out. 3993a2cdcdd2SPaul Dagnelie * 3994a2cdcdd2SPaul Dagnelie * If you make changes to how the zbookmark_compare code works, be sure 3995a2cdcdd2SPaul Dagnelie * to make sure that this code still works afterwards. 3996a2cdcdd2SPaul Dagnelie */ 3997a2cdcdd2SPaul Dagnelie return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, 3998a2cdcdd2SPaul Dagnelie 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, 3999a2cdcdd2SPaul Dagnelie last_block) <= 0); 4000ad135b5dSChristopher Siden } 4001