1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 22a3f829aeSBill Moore * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23fa9e4066Sahrens * Use is subject to license terms. 24fa9e4066Sahrens */ 25fa9e4066Sahrens 26283b8460SGeorge.Wilson /* 27*69962b56SMatthew Ahrens * Copyright (c) 2013 by Delphix. All rights reserved. 28283b8460SGeorge.Wilson */ 29283b8460SGeorge.Wilson 30fa9e4066Sahrens #include <sys/zfs_context.h> 31fa9e4066Sahrens #include <sys/vdev_impl.h> 32c3a66015SMatthew Ahrens #include <sys/spa_impl.h> 33fa9e4066Sahrens #include <sys/zio.h> 34fa9e4066Sahrens #include <sys/avl.h> 35*69962b56SMatthew Ahrens #include <sys/dsl_pool.h> 36fa9e4066Sahrens 37614409b5Sahrens /* 38*69962b56SMatthew Ahrens * ZFS I/O Scheduler 39*69962b56SMatthew Ahrens * --------------- 40*69962b56SMatthew Ahrens * 41*69962b56SMatthew Ahrens * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios. The 42*69962b56SMatthew Ahrens * I/O scheduler determines when and in what order those operations are 43*69962b56SMatthew Ahrens * issued. The I/O scheduler divides operations into five I/O classes 44*69962b56SMatthew Ahrens * prioritized in the following order: sync read, sync write, async read, 45*69962b56SMatthew Ahrens * async write, and scrub/resilver. Each queue defines the minimum and 46*69962b56SMatthew Ahrens * maximum number of concurrent operations that may be issued to the device. 47*69962b56SMatthew Ahrens * In addition, the device has an aggregate maximum. Note that the sum of the 48*69962b56SMatthew Ahrens * per-queue minimums must not exceed the aggregate maximum, and if the 49*69962b56SMatthew Ahrens * aggregate maximum is equal to or greater than the sum of the per-queue 50*69962b56SMatthew Ahrens * maximums, the per-queue minimum has no effect. 51*69962b56SMatthew Ahrens * 52*69962b56SMatthew Ahrens * For many physical devices, throughput increases with the number of 53*69962b56SMatthew Ahrens * concurrent operations, but latency typically suffers. Further, physical 54*69962b56SMatthew Ahrens * devices typically have a limit at which more concurrent operations have no 55*69962b56SMatthew Ahrens * effect on throughput or can actually cause it to decrease. 56*69962b56SMatthew Ahrens * 57*69962b56SMatthew Ahrens * The scheduler selects the next operation to issue by first looking for an 58*69962b56SMatthew Ahrens * I/O class whose minimum has not been satisfied. Once all are satisfied and 59*69962b56SMatthew Ahrens * the aggregate maximum has not been hit, the scheduler looks for classes 60*69962b56SMatthew Ahrens * whose maximum has not been satisfied. Iteration through the I/O classes is 61*69962b56SMatthew Ahrens * done in the order specified above. No further operations are issued if the 62*69962b56SMatthew Ahrens * aggregate maximum number of concurrent operations has been hit or if there 63*69962b56SMatthew Ahrens * are no operations queued for an I/O class that has not hit its maximum. 64*69962b56SMatthew Ahrens * Every time an i/o is queued or an operation completes, the I/O scheduler 65*69962b56SMatthew Ahrens * looks for new operations to issue. 66*69962b56SMatthew Ahrens * 67*69962b56SMatthew Ahrens * All I/O classes have a fixed maximum number of outstanding operations 68*69962b56SMatthew Ahrens * except for the async write class. Asynchronous writes represent the data 69*69962b56SMatthew Ahrens * that is committed to stable storage during the syncing stage for 70*69962b56SMatthew Ahrens * transaction groups (see txg.c). Transaction groups enter the syncing state 71*69962b56SMatthew Ahrens * periodically so the number of queued async writes will quickly burst up and 72*69962b56SMatthew Ahrens * then bleed down to zero. Rather than servicing them as quickly as possible, 73*69962b56SMatthew Ahrens * the I/O scheduler changes the maximum number of active async write i/os 74*69962b56SMatthew Ahrens * according to the amount of dirty data in the pool (see dsl_pool.c). Since 75*69962b56SMatthew Ahrens * both throughput and latency typically increase with the number of 76*69962b56SMatthew Ahrens * concurrent operations issued to physical devices, reducing the burstiness 77*69962b56SMatthew Ahrens * in the number of concurrent operations also stabilizes the response time of 78*69962b56SMatthew Ahrens * operations from other -- and in particular synchronous -- queues. In broad 79*69962b56SMatthew Ahrens * strokes, the I/O scheduler will issue more concurrent operations from the 80*69962b56SMatthew Ahrens * async write queue as there's more dirty data in the pool. 81*69962b56SMatthew Ahrens * 82*69962b56SMatthew Ahrens * Async Writes 83*69962b56SMatthew Ahrens * 84*69962b56SMatthew Ahrens * The number of concurrent operations issued for the async write I/O class 85*69962b56SMatthew Ahrens * follows a piece-wise linear function defined by a few adjustable points. 86*69962b56SMatthew Ahrens * 87*69962b56SMatthew Ahrens * | o---------| <-- zfs_vdev_async_write_max_active 88*69962b56SMatthew Ahrens * ^ | /^ | 89*69962b56SMatthew Ahrens * | | / | | 90*69962b56SMatthew Ahrens * active | / | | 91*69962b56SMatthew Ahrens * I/O | / | | 92*69962b56SMatthew Ahrens * count | / | | 93*69962b56SMatthew Ahrens * | / | | 94*69962b56SMatthew Ahrens * |------------o | | <-- zfs_vdev_async_write_min_active 95*69962b56SMatthew Ahrens * 0|____________^______|_________| 96*69962b56SMatthew Ahrens * 0% | | 100% of zfs_dirty_data_max 97*69962b56SMatthew Ahrens * | | 98*69962b56SMatthew Ahrens * | `-- zfs_vdev_async_write_active_max_dirty_percent 99*69962b56SMatthew Ahrens * `--------- zfs_vdev_async_write_active_min_dirty_percent 100*69962b56SMatthew Ahrens * 101*69962b56SMatthew Ahrens * Until the amount of dirty data exceeds a minimum percentage of the dirty 102*69962b56SMatthew Ahrens * data allowed in the pool, the I/O scheduler will limit the number of 103*69962b56SMatthew Ahrens * concurrent operations to the minimum. As that threshold is crossed, the 104*69962b56SMatthew Ahrens * number of concurrent operations issued increases linearly to the maximum at 105*69962b56SMatthew Ahrens * the specified maximum percentage of the dirty data allowed in the pool. 106*69962b56SMatthew Ahrens * 107*69962b56SMatthew Ahrens * Ideally, the amount of dirty data on a busy pool will stay in the sloped 108*69962b56SMatthew Ahrens * part of the function between zfs_vdev_async_write_active_min_dirty_percent 109*69962b56SMatthew Ahrens * and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the 110*69962b56SMatthew Ahrens * maximum percentage, this indicates that the rate of incoming data is 111*69962b56SMatthew Ahrens * greater than the rate that the backend storage can handle. In this case, we 112*69962b56SMatthew Ahrens * must further throttle incoming writes (see dmu_tx_delay() for details). 113614409b5Sahrens */ 114f7170741SWill Andrews 115614409b5Sahrens /* 116*69962b56SMatthew Ahrens * The maximum number of i/os active to each device. Ideally, this will be >= 117*69962b56SMatthew Ahrens * the sum of each queue's max_active. It must be at least the sum of each 118*69962b56SMatthew Ahrens * queue's min_active. 119614409b5Sahrens */ 120*69962b56SMatthew Ahrens uint32_t zfs_vdev_max_active = 1000; 121614409b5Sahrens 122c55e05cbSMatthew Ahrens /* 123*69962b56SMatthew Ahrens * Per-queue limits on the number of i/os active to each device. If the 124*69962b56SMatthew Ahrens * sum of the queue's max_active is < zfs_vdev_max_active, then the 125*69962b56SMatthew Ahrens * min_active comes into play. We will send min_active from each queue, 126*69962b56SMatthew Ahrens * and then select from queues in the order defined by zio_priority_t. 127*69962b56SMatthew Ahrens * 128*69962b56SMatthew Ahrens * In general, smaller max_active's will lead to lower latency of synchronous 129*69962b56SMatthew Ahrens * operations. Larger max_active's may lead to higher overall throughput, 130*69962b56SMatthew Ahrens * depending on underlying storage. 131*69962b56SMatthew Ahrens * 132*69962b56SMatthew Ahrens * The ratio of the queues' max_actives determines the balance of performance 133*69962b56SMatthew Ahrens * between reads, writes, and scrubs. E.g., increasing 134*69962b56SMatthew Ahrens * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete 135*69962b56SMatthew Ahrens * more quickly, but reads and writes to have higher latency and lower 136*69962b56SMatthew Ahrens * throughput. 137c55e05cbSMatthew Ahrens */ 138*69962b56SMatthew Ahrens uint32_t zfs_vdev_sync_read_min_active = 10; 139*69962b56SMatthew Ahrens uint32_t zfs_vdev_sync_read_max_active = 10; 140*69962b56SMatthew Ahrens uint32_t zfs_vdev_sync_write_min_active = 10; 141*69962b56SMatthew Ahrens uint32_t zfs_vdev_sync_write_max_active = 10; 142*69962b56SMatthew Ahrens uint32_t zfs_vdev_async_read_min_active = 1; 143*69962b56SMatthew Ahrens uint32_t zfs_vdev_async_read_max_active = 3; 144*69962b56SMatthew Ahrens uint32_t zfs_vdev_async_write_min_active = 1; 145*69962b56SMatthew Ahrens uint32_t zfs_vdev_async_write_max_active = 10; 146*69962b56SMatthew Ahrens uint32_t zfs_vdev_scrub_min_active = 1; 147*69962b56SMatthew Ahrens uint32_t zfs_vdev_scrub_max_active = 2; 148614409b5Sahrens 149*69962b56SMatthew Ahrens /* 150*69962b56SMatthew Ahrens * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent 151*69962b56SMatthew Ahrens * dirty data, use zfs_vdev_async_write_min_active. When it has more than 152*69962b56SMatthew Ahrens * zfs_vdev_async_write_active_max_dirty_percent, use 153*69962b56SMatthew Ahrens * zfs_vdev_async_write_max_active. The value is linearly interpolated 154*69962b56SMatthew Ahrens * between min and max. 155*69962b56SMatthew Ahrens */ 156*69962b56SMatthew Ahrens int zfs_vdev_async_write_active_min_dirty_percent = 30; 157*69962b56SMatthew Ahrens int zfs_vdev_async_write_active_max_dirty_percent = 60; 158614409b5Sahrens 159614409b5Sahrens /* 160f94275ceSAdam Leventhal * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O. 161f94275ceSAdam Leventhal * For read I/Os, we also aggregate across small adjacency gaps; for writes 162f94275ceSAdam Leventhal * we include spans of optional I/Os to aid aggregation at the disk even when 163f94275ceSAdam Leventhal * they aren't able to help us aggregate at this level. 164614409b5Sahrens */ 165614409b5Sahrens int zfs_vdev_aggregation_limit = SPA_MAXBLOCKSIZE; 1666f708f7cSJeff Bonwick int zfs_vdev_read_gap_limit = 32 << 10; 167f94275ceSAdam Leventhal int zfs_vdev_write_gap_limit = 4 << 10; 168614409b5Sahrens 169fa9e4066Sahrens int 170*69962b56SMatthew Ahrens vdev_queue_offset_compare(const void *x1, const void *x2) 171fa9e4066Sahrens { 172fa9e4066Sahrens const zio_t *z1 = x1; 173fa9e4066Sahrens const zio_t *z2 = x2; 174fa9e4066Sahrens 175fa9e4066Sahrens if (z1->io_offset < z2->io_offset) 176fa9e4066Sahrens return (-1); 177fa9e4066Sahrens if (z1->io_offset > z2->io_offset) 178fa9e4066Sahrens return (1); 179fa9e4066Sahrens 180fa9e4066Sahrens if (z1 < z2) 181fa9e4066Sahrens return (-1); 182fa9e4066Sahrens if (z1 > z2) 183fa9e4066Sahrens return (1); 184fa9e4066Sahrens 185fa9e4066Sahrens return (0); 186fa9e4066Sahrens } 187fa9e4066Sahrens 188fa9e4066Sahrens int 189*69962b56SMatthew Ahrens vdev_queue_timestamp_compare(const void *x1, const void *x2) 190fa9e4066Sahrens { 191fa9e4066Sahrens const zio_t *z1 = x1; 192fa9e4066Sahrens const zio_t *z2 = x2; 193fa9e4066Sahrens 194*69962b56SMatthew Ahrens if (z1->io_timestamp < z2->io_timestamp) 195fa9e4066Sahrens return (-1); 196*69962b56SMatthew Ahrens if (z1->io_timestamp > z2->io_timestamp) 197fa9e4066Sahrens return (1); 198fa9e4066Sahrens 199fa9e4066Sahrens if (z1 < z2) 200fa9e4066Sahrens return (-1); 201fa9e4066Sahrens if (z1 > z2) 202fa9e4066Sahrens return (1); 203fa9e4066Sahrens 204fa9e4066Sahrens return (0); 205fa9e4066Sahrens } 206fa9e4066Sahrens 207fa9e4066Sahrens void 208fa9e4066Sahrens vdev_queue_init(vdev_t *vd) 209fa9e4066Sahrens { 210fa9e4066Sahrens vdev_queue_t *vq = &vd->vdev_queue; 211fa9e4066Sahrens 212fa9e4066Sahrens mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL); 213*69962b56SMatthew Ahrens vq->vq_vdev = vd; 214fa9e4066Sahrens 215*69962b56SMatthew Ahrens avl_create(&vq->vq_active_tree, vdev_queue_offset_compare, 216*69962b56SMatthew Ahrens sizeof (zio_t), offsetof(struct zio, io_queue_node)); 217fa9e4066Sahrens 218*69962b56SMatthew Ahrens for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 219*69962b56SMatthew Ahrens /* 220*69962b56SMatthew Ahrens * The synchronous i/o queues are FIFO rather than LBA ordered. 221*69962b56SMatthew Ahrens * This provides more consistent latency for these i/os, and 222*69962b56SMatthew Ahrens * they tend to not be tightly clustered anyway so there is 223*69962b56SMatthew Ahrens * little to no throughput loss. 224*69962b56SMatthew Ahrens */ 225*69962b56SMatthew Ahrens boolean_t fifo = (p == ZIO_PRIORITY_SYNC_READ || 226*69962b56SMatthew Ahrens p == ZIO_PRIORITY_SYNC_WRITE); 227*69962b56SMatthew Ahrens avl_create(&vq->vq_class[p].vqc_queued_tree, 228*69962b56SMatthew Ahrens fifo ? vdev_queue_timestamp_compare : 229*69962b56SMatthew Ahrens vdev_queue_offset_compare, 230*69962b56SMatthew Ahrens sizeof (zio_t), offsetof(struct zio, io_queue_node)); 231*69962b56SMatthew Ahrens } 232fa9e4066Sahrens } 233fa9e4066Sahrens 234fa9e4066Sahrens void 235fa9e4066Sahrens vdev_queue_fini(vdev_t *vd) 236fa9e4066Sahrens { 237fa9e4066Sahrens vdev_queue_t *vq = &vd->vdev_queue; 238fa9e4066Sahrens 239*69962b56SMatthew Ahrens for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) 240*69962b56SMatthew Ahrens avl_destroy(&vq->vq_class[p].vqc_queued_tree); 241*69962b56SMatthew Ahrens avl_destroy(&vq->vq_active_tree); 242fa9e4066Sahrens 243fa9e4066Sahrens mutex_destroy(&vq->vq_lock); 244fa9e4066Sahrens } 245fa9e4066Sahrens 246ea8dc4b6Seschrock static void 247ea8dc4b6Seschrock vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) 248ea8dc4b6Seschrock { 249c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 250*69962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 251*69962b56SMatthew Ahrens avl_add(&vq->vq_class[zio->io_priority].vqc_queued_tree, zio); 252c3a66015SMatthew Ahrens 253*69962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 254*69962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_queued++; 255*69962b56SMatthew Ahrens if (spa->spa_iokstat != NULL) 256c3a66015SMatthew Ahrens kstat_waitq_enter(spa->spa_iokstat->ks_data); 257*69962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 258ea8dc4b6Seschrock } 259ea8dc4b6Seschrock 260ea8dc4b6Seschrock static void 261ea8dc4b6Seschrock vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) 262ea8dc4b6Seschrock { 263c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 264*69962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 265*69962b56SMatthew Ahrens avl_remove(&vq->vq_class[zio->io_priority].vqc_queued_tree, zio); 266c3a66015SMatthew Ahrens 267*69962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 268*69962b56SMatthew Ahrens ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0); 269*69962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_queued--; 270*69962b56SMatthew Ahrens if (spa->spa_iokstat != NULL) 271c3a66015SMatthew Ahrens kstat_waitq_exit(spa->spa_iokstat->ks_data); 272*69962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 273c3a66015SMatthew Ahrens } 274c3a66015SMatthew Ahrens 275c3a66015SMatthew Ahrens static void 276c3a66015SMatthew Ahrens vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio) 277c3a66015SMatthew Ahrens { 278c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 279*69962b56SMatthew Ahrens ASSERT(MUTEX_HELD(&vq->vq_lock)); 280*69962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 281*69962b56SMatthew Ahrens vq->vq_class[zio->io_priority].vqc_active++; 282*69962b56SMatthew Ahrens avl_add(&vq->vq_active_tree, zio); 283*69962b56SMatthew Ahrens 284*69962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 285*69962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_active++; 286*69962b56SMatthew Ahrens if (spa->spa_iokstat != NULL) 287c3a66015SMatthew Ahrens kstat_runq_enter(spa->spa_iokstat->ks_data); 288*69962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 289c3a66015SMatthew Ahrens } 290c3a66015SMatthew Ahrens 291c3a66015SMatthew Ahrens static void 292c3a66015SMatthew Ahrens vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio) 293c3a66015SMatthew Ahrens { 294c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 295*69962b56SMatthew Ahrens ASSERT(MUTEX_HELD(&vq->vq_lock)); 296*69962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 297*69962b56SMatthew Ahrens vq->vq_class[zio->io_priority].vqc_active--; 298*69962b56SMatthew Ahrens avl_remove(&vq->vq_active_tree, zio); 299*69962b56SMatthew Ahrens 300*69962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 301*69962b56SMatthew Ahrens ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0); 302*69962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_active--; 303c3a66015SMatthew Ahrens if (spa->spa_iokstat != NULL) { 304c3a66015SMatthew Ahrens kstat_io_t *ksio = spa->spa_iokstat->ks_data; 305c3a66015SMatthew Ahrens 306c3a66015SMatthew Ahrens kstat_runq_exit(spa->spa_iokstat->ks_data); 307c3a66015SMatthew Ahrens if (zio->io_type == ZIO_TYPE_READ) { 308c3a66015SMatthew Ahrens ksio->reads++; 309c3a66015SMatthew Ahrens ksio->nread += zio->io_size; 310c3a66015SMatthew Ahrens } else if (zio->io_type == ZIO_TYPE_WRITE) { 311c3a66015SMatthew Ahrens ksio->writes++; 312c3a66015SMatthew Ahrens ksio->nwritten += zio->io_size; 313c3a66015SMatthew Ahrens } 314c3a66015SMatthew Ahrens } 315*69962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 316ea8dc4b6Seschrock } 317ea8dc4b6Seschrock 318fa9e4066Sahrens static void 319fa9e4066Sahrens vdev_queue_agg_io_done(zio_t *aio) 320fa9e4066Sahrens { 321*69962b56SMatthew Ahrens if (aio->io_type == ZIO_TYPE_READ) { 322*69962b56SMatthew Ahrens zio_t *pio; 323*69962b56SMatthew Ahrens while ((pio = zio_walk_parents(aio)) != NULL) { 324a3f829aeSBill Moore bcopy((char *)aio->io_data + (pio->io_offset - 325a3f829aeSBill Moore aio->io_offset), pio->io_data, pio->io_size); 326*69962b56SMatthew Ahrens } 327*69962b56SMatthew Ahrens } 328fa9e4066Sahrens 329fa9e4066Sahrens zio_buf_free(aio->io_data, aio->io_size); 330fa9e4066Sahrens } 331fa9e4066Sahrens 332*69962b56SMatthew Ahrens static int 333*69962b56SMatthew Ahrens vdev_queue_class_min_active(zio_priority_t p) 334*69962b56SMatthew Ahrens { 335*69962b56SMatthew Ahrens switch (p) { 336*69962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_READ: 337*69962b56SMatthew Ahrens return (zfs_vdev_sync_read_min_active); 338*69962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_WRITE: 339*69962b56SMatthew Ahrens return (zfs_vdev_sync_write_min_active); 340*69962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_READ: 341*69962b56SMatthew Ahrens return (zfs_vdev_async_read_min_active); 342*69962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_WRITE: 343*69962b56SMatthew Ahrens return (zfs_vdev_async_write_min_active); 344*69962b56SMatthew Ahrens case ZIO_PRIORITY_SCRUB: 345*69962b56SMatthew Ahrens return (zfs_vdev_scrub_min_active); 346*69962b56SMatthew Ahrens default: 347*69962b56SMatthew Ahrens panic("invalid priority %u", p); 348*69962b56SMatthew Ahrens return (0); 349*69962b56SMatthew Ahrens } 350*69962b56SMatthew Ahrens } 351*69962b56SMatthew Ahrens 352*69962b56SMatthew Ahrens static int 353*69962b56SMatthew Ahrens vdev_queue_max_async_writes(uint64_t dirty) 354*69962b56SMatthew Ahrens { 355*69962b56SMatthew Ahrens int writes; 356*69962b56SMatthew Ahrens uint64_t min_bytes = zfs_dirty_data_max * 357*69962b56SMatthew Ahrens zfs_vdev_async_write_active_min_dirty_percent / 100; 358*69962b56SMatthew Ahrens uint64_t max_bytes = zfs_dirty_data_max * 359*69962b56SMatthew Ahrens zfs_vdev_async_write_active_max_dirty_percent / 100; 360*69962b56SMatthew Ahrens 361*69962b56SMatthew Ahrens if (dirty < min_bytes) 362*69962b56SMatthew Ahrens return (zfs_vdev_async_write_min_active); 363*69962b56SMatthew Ahrens if (dirty > max_bytes) 364*69962b56SMatthew Ahrens return (zfs_vdev_async_write_max_active); 365*69962b56SMatthew Ahrens 366*69962b56SMatthew Ahrens /* 367*69962b56SMatthew Ahrens * linear interpolation: 368*69962b56SMatthew Ahrens * slope = (max_writes - min_writes) / (max_bytes - min_bytes) 369*69962b56SMatthew Ahrens * move right by min_bytes 370*69962b56SMatthew Ahrens * move up by min_writes 371*69962b56SMatthew Ahrens */ 372*69962b56SMatthew Ahrens writes = (dirty - min_bytes) * 373*69962b56SMatthew Ahrens (zfs_vdev_async_write_max_active - 374*69962b56SMatthew Ahrens zfs_vdev_async_write_min_active) / 375*69962b56SMatthew Ahrens (max_bytes - min_bytes) + 376*69962b56SMatthew Ahrens zfs_vdev_async_write_min_active; 377*69962b56SMatthew Ahrens ASSERT3U(writes, >=, zfs_vdev_async_write_min_active); 378*69962b56SMatthew Ahrens ASSERT3U(writes, <=, zfs_vdev_async_write_max_active); 379*69962b56SMatthew Ahrens return (writes); 380*69962b56SMatthew Ahrens } 381*69962b56SMatthew Ahrens 382*69962b56SMatthew Ahrens static int 383*69962b56SMatthew Ahrens vdev_queue_class_max_active(spa_t *spa, zio_priority_t p) 384*69962b56SMatthew Ahrens { 385*69962b56SMatthew Ahrens switch (p) { 386*69962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_READ: 387*69962b56SMatthew Ahrens return (zfs_vdev_sync_read_max_active); 388*69962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_WRITE: 389*69962b56SMatthew Ahrens return (zfs_vdev_sync_write_max_active); 390*69962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_READ: 391*69962b56SMatthew Ahrens return (zfs_vdev_async_read_max_active); 392*69962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_WRITE: 393*69962b56SMatthew Ahrens return (vdev_queue_max_async_writes( 394*69962b56SMatthew Ahrens spa->spa_dsl_pool->dp_dirty_total)); 395*69962b56SMatthew Ahrens case ZIO_PRIORITY_SCRUB: 396*69962b56SMatthew Ahrens return (zfs_vdev_scrub_max_active); 397*69962b56SMatthew Ahrens default: 398*69962b56SMatthew Ahrens panic("invalid priority %u", p); 399*69962b56SMatthew Ahrens return (0); 400*69962b56SMatthew Ahrens } 401*69962b56SMatthew Ahrens } 402*69962b56SMatthew Ahrens 403*69962b56SMatthew Ahrens /* 404*69962b56SMatthew Ahrens * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if 405*69962b56SMatthew Ahrens * there is no eligible class. 406*69962b56SMatthew Ahrens */ 407*69962b56SMatthew Ahrens static zio_priority_t 408*69962b56SMatthew Ahrens vdev_queue_class_to_issue(vdev_queue_t *vq) 409*69962b56SMatthew Ahrens { 410*69962b56SMatthew Ahrens spa_t *spa = vq->vq_vdev->vdev_spa; 411*69962b56SMatthew Ahrens zio_priority_t p; 412*69962b56SMatthew Ahrens 413*69962b56SMatthew Ahrens if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active) 414*69962b56SMatthew Ahrens return (ZIO_PRIORITY_NUM_QUEUEABLE); 415*69962b56SMatthew Ahrens 416*69962b56SMatthew Ahrens /* find a queue that has not reached its minimum # outstanding i/os */ 417*69962b56SMatthew Ahrens for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 418*69962b56SMatthew Ahrens if (avl_numnodes(&vq->vq_class[p].vqc_queued_tree) > 0 && 419*69962b56SMatthew Ahrens vq->vq_class[p].vqc_active < 420*69962b56SMatthew Ahrens vdev_queue_class_min_active(p)) 421*69962b56SMatthew Ahrens return (p); 422*69962b56SMatthew Ahrens } 423*69962b56SMatthew Ahrens 424*69962b56SMatthew Ahrens /* 425*69962b56SMatthew Ahrens * If we haven't found a queue, look for one that hasn't reached its 426*69962b56SMatthew Ahrens * maximum # outstanding i/os. 427*69962b56SMatthew Ahrens */ 428*69962b56SMatthew Ahrens for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 429*69962b56SMatthew Ahrens if (avl_numnodes(&vq->vq_class[p].vqc_queued_tree) > 0 && 430*69962b56SMatthew Ahrens vq->vq_class[p].vqc_active < 431*69962b56SMatthew Ahrens vdev_queue_class_max_active(spa, p)) 432*69962b56SMatthew Ahrens return (p); 433*69962b56SMatthew Ahrens } 434*69962b56SMatthew Ahrens 435*69962b56SMatthew Ahrens /* No eligible queued i/os */ 436*69962b56SMatthew Ahrens return (ZIO_PRIORITY_NUM_QUEUEABLE); 437*69962b56SMatthew Ahrens } 438*69962b56SMatthew Ahrens 4396f708f7cSJeff Bonwick /* 4406f708f7cSJeff Bonwick * Compute the range spanned by two i/os, which is the endpoint of the last 4416f708f7cSJeff Bonwick * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset). 4426f708f7cSJeff Bonwick * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio); 4436f708f7cSJeff Bonwick * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0. 4446f708f7cSJeff Bonwick */ 4456f708f7cSJeff Bonwick #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset) 4466f708f7cSJeff Bonwick #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio)) 447fa9e4066Sahrens 448fa9e4066Sahrens static zio_t * 449*69962b56SMatthew Ahrens vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio) 450fa9e4066Sahrens { 451*69962b56SMatthew Ahrens zio_t *first, *last, *aio, *dio, *mandatory, *nio; 452*69962b56SMatthew Ahrens uint64_t maxgap = 0; 453*69962b56SMatthew Ahrens uint64_t size; 454*69962b56SMatthew Ahrens boolean_t stretch = B_FALSE; 455*69962b56SMatthew Ahrens vdev_queue_class_t *vqc = &vq->vq_class[zio->io_priority]; 456*69962b56SMatthew Ahrens avl_tree_t *t = &vqc->vqc_queued_tree; 457*69962b56SMatthew Ahrens enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT; 458*69962b56SMatthew Ahrens 459*69962b56SMatthew Ahrens if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE) 460*69962b56SMatthew Ahrens return (NULL); 461fa9e4066Sahrens 462*69962b56SMatthew Ahrens /* 463*69962b56SMatthew Ahrens * The synchronous i/o queues are not sorted by LBA, so we can't 464*69962b56SMatthew Ahrens * find adjacent i/os. These i/os tend to not be tightly clustered, 465*69962b56SMatthew Ahrens * or too large to aggregate, so this has little impact on performance. 466*69962b56SMatthew Ahrens */ 467*69962b56SMatthew Ahrens if (zio->io_priority == ZIO_PRIORITY_SYNC_READ || 468*69962b56SMatthew Ahrens zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) 469fa9e4066Sahrens return (NULL); 470fa9e4066Sahrens 471*69962b56SMatthew Ahrens first = last = zio; 472fa9e4066Sahrens 473*69962b56SMatthew Ahrens if (zio->io_type == ZIO_TYPE_READ) 474*69962b56SMatthew Ahrens maxgap = zfs_vdev_read_gap_limit; 4758ad4d6ddSJeff Bonwick 476*69962b56SMatthew Ahrens /* 477*69962b56SMatthew Ahrens * We can aggregate I/Os that are sufficiently adjacent and of 478*69962b56SMatthew Ahrens * the same flavor, as expressed by the AGG_INHERIT flags. 479*69962b56SMatthew Ahrens * The latter requirement is necessary so that certain 480*69962b56SMatthew Ahrens * attributes of the I/O, such as whether it's a normal I/O 481*69962b56SMatthew Ahrens * or a scrub/resilver, can be preserved in the aggregate. 482*69962b56SMatthew Ahrens * We can include optional I/Os, but don't allow them 483*69962b56SMatthew Ahrens * to begin a range as they add no benefit in that situation. 484*69962b56SMatthew Ahrens */ 485f94275ceSAdam Leventhal 486*69962b56SMatthew Ahrens /* 487*69962b56SMatthew Ahrens * We keep track of the last non-optional I/O. 488*69962b56SMatthew Ahrens */ 489*69962b56SMatthew Ahrens mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first; 490f94275ceSAdam Leventhal 491*69962b56SMatthew Ahrens /* 492*69962b56SMatthew Ahrens * Walk backwards through sufficiently contiguous I/Os 493*69962b56SMatthew Ahrens * recording the last non-option I/O. 494*69962b56SMatthew Ahrens */ 495*69962b56SMatthew Ahrens while ((dio = AVL_PREV(t, first)) != NULL && 496*69962b56SMatthew Ahrens (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 497*69962b56SMatthew Ahrens IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit && 498*69962b56SMatthew Ahrens IO_GAP(dio, first) <= maxgap) { 499*69962b56SMatthew Ahrens first = dio; 500*69962b56SMatthew Ahrens if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL)) 501*69962b56SMatthew Ahrens mandatory = first; 502*69962b56SMatthew Ahrens } 503f94275ceSAdam Leventhal 504*69962b56SMatthew Ahrens /* 505*69962b56SMatthew Ahrens * Skip any initial optional I/Os. 506*69962b56SMatthew Ahrens */ 507*69962b56SMatthew Ahrens while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) { 508*69962b56SMatthew Ahrens first = AVL_NEXT(t, first); 509*69962b56SMatthew Ahrens ASSERT(first != NULL); 510*69962b56SMatthew Ahrens } 5116f708f7cSJeff Bonwick 512*69962b56SMatthew Ahrens /* 513*69962b56SMatthew Ahrens * Walk forward through sufficiently contiguous I/Os. 514*69962b56SMatthew Ahrens */ 515*69962b56SMatthew Ahrens while ((dio = AVL_NEXT(t, last)) != NULL && 516*69962b56SMatthew Ahrens (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 517*69962b56SMatthew Ahrens IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit && 518*69962b56SMatthew Ahrens IO_GAP(last, dio) <= maxgap) { 519*69962b56SMatthew Ahrens last = dio; 520*69962b56SMatthew Ahrens if (!(last->io_flags & ZIO_FLAG_OPTIONAL)) 521*69962b56SMatthew Ahrens mandatory = last; 522*69962b56SMatthew Ahrens } 523f94275ceSAdam Leventhal 524*69962b56SMatthew Ahrens /* 525*69962b56SMatthew Ahrens * Now that we've established the range of the I/O aggregation 526*69962b56SMatthew Ahrens * we must decide what to do with trailing optional I/Os. 527*69962b56SMatthew Ahrens * For reads, there's nothing to do. While we are unable to 528*69962b56SMatthew Ahrens * aggregate further, it's possible that a trailing optional 529*69962b56SMatthew Ahrens * I/O would allow the underlying device to aggregate with 530*69962b56SMatthew Ahrens * subsequent I/Os. We must therefore determine if the next 531*69962b56SMatthew Ahrens * non-optional I/O is close enough to make aggregation 532*69962b56SMatthew Ahrens * worthwhile. 533*69962b56SMatthew Ahrens */ 534*69962b56SMatthew Ahrens if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) { 535*69962b56SMatthew Ahrens zio_t *nio = last; 536*69962b56SMatthew Ahrens while ((dio = AVL_NEXT(t, nio)) != NULL && 537*69962b56SMatthew Ahrens IO_GAP(nio, dio) == 0 && 538*69962b56SMatthew Ahrens IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) { 539*69962b56SMatthew Ahrens nio = dio; 540*69962b56SMatthew Ahrens if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) { 541*69962b56SMatthew Ahrens stretch = B_TRUE; 542*69962b56SMatthew Ahrens break; 543f94275ceSAdam Leventhal } 544f94275ceSAdam Leventhal } 545*69962b56SMatthew Ahrens } 546f94275ceSAdam Leventhal 547*69962b56SMatthew Ahrens if (stretch) { 548*69962b56SMatthew Ahrens /* This may be a no-op. */ 549*69962b56SMatthew Ahrens dio = AVL_NEXT(t, last); 550*69962b56SMatthew Ahrens dio->io_flags &= ~ZIO_FLAG_OPTIONAL; 551*69962b56SMatthew Ahrens } else { 552*69962b56SMatthew Ahrens while (last != mandatory && last != first) { 553*69962b56SMatthew Ahrens ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL); 554*69962b56SMatthew Ahrens last = AVL_PREV(t, last); 555*69962b56SMatthew Ahrens ASSERT(last != NULL); 556f94275ceSAdam Leventhal } 557fa9e4066Sahrens } 558fa9e4066Sahrens 559*69962b56SMatthew Ahrens if (first == last) 560*69962b56SMatthew Ahrens return (NULL); 561*69962b56SMatthew Ahrens 562*69962b56SMatthew Ahrens size = IO_SPAN(first, last); 563*69962b56SMatthew Ahrens ASSERT3U(size, <=, zfs_vdev_aggregation_limit); 564*69962b56SMatthew Ahrens 565*69962b56SMatthew Ahrens aio = zio_vdev_delegated_io(first->io_vd, first->io_offset, 566*69962b56SMatthew Ahrens zio_buf_alloc(size), size, first->io_type, zio->io_priority, 567*69962b56SMatthew Ahrens flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE, 568*69962b56SMatthew Ahrens vdev_queue_agg_io_done, NULL); 569*69962b56SMatthew Ahrens aio->io_timestamp = first->io_timestamp; 570*69962b56SMatthew Ahrens 571*69962b56SMatthew Ahrens nio = first; 572*69962b56SMatthew Ahrens do { 573*69962b56SMatthew Ahrens dio = nio; 574*69962b56SMatthew Ahrens nio = AVL_NEXT(t, dio); 575*69962b56SMatthew Ahrens ASSERT3U(dio->io_type, ==, aio->io_type); 576*69962b56SMatthew Ahrens 577*69962b56SMatthew Ahrens if (dio->io_flags & ZIO_FLAG_NODATA) { 578*69962b56SMatthew Ahrens ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE); 579*69962b56SMatthew Ahrens bzero((char *)aio->io_data + (dio->io_offset - 580*69962b56SMatthew Ahrens aio->io_offset), dio->io_size); 581*69962b56SMatthew Ahrens } else if (dio->io_type == ZIO_TYPE_WRITE) { 582*69962b56SMatthew Ahrens bcopy(dio->io_data, (char *)aio->io_data + 583*69962b56SMatthew Ahrens (dio->io_offset - aio->io_offset), 584*69962b56SMatthew Ahrens dio->io_size); 585*69962b56SMatthew Ahrens } 586a3f829aeSBill Moore 587*69962b56SMatthew Ahrens zio_add_child(dio, aio); 588*69962b56SMatthew Ahrens vdev_queue_io_remove(vq, dio); 589*69962b56SMatthew Ahrens zio_vdev_io_bypass(dio); 590*69962b56SMatthew Ahrens zio_execute(dio); 591*69962b56SMatthew Ahrens } while (dio != last); 592*69962b56SMatthew Ahrens 593*69962b56SMatthew Ahrens return (aio); 594*69962b56SMatthew Ahrens } 595*69962b56SMatthew Ahrens 596*69962b56SMatthew Ahrens static zio_t * 597*69962b56SMatthew Ahrens vdev_queue_io_to_issue(vdev_queue_t *vq) 598*69962b56SMatthew Ahrens { 599*69962b56SMatthew Ahrens zio_t *zio, *aio; 600*69962b56SMatthew Ahrens zio_priority_t p; 601*69962b56SMatthew Ahrens avl_index_t idx; 602*69962b56SMatthew Ahrens vdev_queue_class_t *vqc; 603*69962b56SMatthew Ahrens zio_t search; 604*69962b56SMatthew Ahrens 605*69962b56SMatthew Ahrens again: 606*69962b56SMatthew Ahrens ASSERT(MUTEX_HELD(&vq->vq_lock)); 607fa9e4066Sahrens 608*69962b56SMatthew Ahrens p = vdev_queue_class_to_issue(vq); 609fa9e4066Sahrens 610*69962b56SMatthew Ahrens if (p == ZIO_PRIORITY_NUM_QUEUEABLE) { 611*69962b56SMatthew Ahrens /* No eligible queued i/os */ 612*69962b56SMatthew Ahrens return (NULL); 613fa9e4066Sahrens } 614fa9e4066Sahrens 615*69962b56SMatthew Ahrens /* 616*69962b56SMatthew Ahrens * For LBA-ordered queues (async / scrub), issue the i/o which follows 617*69962b56SMatthew Ahrens * the most recently issued i/o in LBA (offset) order. 618*69962b56SMatthew Ahrens * 619*69962b56SMatthew Ahrens * For FIFO queues (sync), issue the i/o with the lowest timestamp. 620*69962b56SMatthew Ahrens */ 621*69962b56SMatthew Ahrens vqc = &vq->vq_class[p]; 622*69962b56SMatthew Ahrens search.io_timestamp = 0; 623*69962b56SMatthew Ahrens search.io_offset = vq->vq_last_offset + 1; 624*69962b56SMatthew Ahrens VERIFY3P(avl_find(&vqc->vqc_queued_tree, &search, &idx), ==, NULL); 625*69962b56SMatthew Ahrens zio = avl_nearest(&vqc->vqc_queued_tree, idx, AVL_AFTER); 626*69962b56SMatthew Ahrens if (zio == NULL) 627*69962b56SMatthew Ahrens zio = avl_first(&vqc->vqc_queued_tree); 628*69962b56SMatthew Ahrens ASSERT3U(zio->io_priority, ==, p); 629*69962b56SMatthew Ahrens 630*69962b56SMatthew Ahrens aio = vdev_queue_aggregate(vq, zio); 631*69962b56SMatthew Ahrens if (aio != NULL) 632*69962b56SMatthew Ahrens zio = aio; 633*69962b56SMatthew Ahrens else 634*69962b56SMatthew Ahrens vdev_queue_io_remove(vq, zio); 635fa9e4066Sahrens 636f94275ceSAdam Leventhal /* 637f94275ceSAdam Leventhal * If the I/O is or was optional and therefore has no data, we need to 638f94275ceSAdam Leventhal * simply discard it. We need to drop the vdev queue's lock to avoid a 639f94275ceSAdam Leventhal * deadlock that we could encounter since this I/O will complete 640f94275ceSAdam Leventhal * immediately. 641f94275ceSAdam Leventhal */ 642*69962b56SMatthew Ahrens if (zio->io_flags & ZIO_FLAG_NODATA) { 643f94275ceSAdam Leventhal mutex_exit(&vq->vq_lock); 644*69962b56SMatthew Ahrens zio_vdev_io_bypass(zio); 645*69962b56SMatthew Ahrens zio_execute(zio); 646f94275ceSAdam Leventhal mutex_enter(&vq->vq_lock); 647f94275ceSAdam Leventhal goto again; 648f94275ceSAdam Leventhal } 649f94275ceSAdam Leventhal 650*69962b56SMatthew Ahrens vdev_queue_pending_add(vq, zio); 651*69962b56SMatthew Ahrens vq->vq_last_offset = zio->io_offset; 652fa9e4066Sahrens 653*69962b56SMatthew Ahrens return (zio); 654fa9e4066Sahrens } 655fa9e4066Sahrens 656fa9e4066Sahrens zio_t * 657fa9e4066Sahrens vdev_queue_io(zio_t *zio) 658fa9e4066Sahrens { 659fa9e4066Sahrens vdev_queue_t *vq = &zio->io_vd->vdev_queue; 660fa9e4066Sahrens zio_t *nio; 661fa9e4066Sahrens 662fa9e4066Sahrens if (zio->io_flags & ZIO_FLAG_DONT_QUEUE) 663fa9e4066Sahrens return (zio); 664fa9e4066Sahrens 665*69962b56SMatthew Ahrens /* 666*69962b56SMatthew Ahrens * Children i/os inherent their parent's priority, which might 667*69962b56SMatthew Ahrens * not match the child's i/o type. Fix it up here. 668*69962b56SMatthew Ahrens */ 669*69962b56SMatthew Ahrens if (zio->io_type == ZIO_TYPE_READ) { 670*69962b56SMatthew Ahrens if (zio->io_priority != ZIO_PRIORITY_SYNC_READ && 671*69962b56SMatthew Ahrens zio->io_priority != ZIO_PRIORITY_ASYNC_READ && 672*69962b56SMatthew Ahrens zio->io_priority != ZIO_PRIORITY_SCRUB) 673*69962b56SMatthew Ahrens zio->io_priority = ZIO_PRIORITY_ASYNC_READ; 674*69962b56SMatthew Ahrens } else { 675*69962b56SMatthew Ahrens ASSERT(zio->io_type == ZIO_TYPE_WRITE); 676*69962b56SMatthew Ahrens if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE && 677*69962b56SMatthew Ahrens zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE) 678*69962b56SMatthew Ahrens zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE; 679*69962b56SMatthew Ahrens } 680fa9e4066Sahrens 681*69962b56SMatthew Ahrens zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE; 682fa9e4066Sahrens 683fa9e4066Sahrens mutex_enter(&vq->vq_lock); 684c55e05cbSMatthew Ahrens zio->io_timestamp = gethrtime(); 685ea8dc4b6Seschrock vdev_queue_io_add(vq, zio); 686*69962b56SMatthew Ahrens nio = vdev_queue_io_to_issue(vq); 687fa9e4066Sahrens mutex_exit(&vq->vq_lock); 688fa9e4066Sahrens 689e05725b1Sbonwick if (nio == NULL) 690e05725b1Sbonwick return (NULL); 691e05725b1Sbonwick 692e05725b1Sbonwick if (nio->io_done == vdev_queue_agg_io_done) { 693e05725b1Sbonwick zio_nowait(nio); 694e05725b1Sbonwick return (NULL); 695e05725b1Sbonwick } 696fa9e4066Sahrens 697e05725b1Sbonwick return (nio); 698fa9e4066Sahrens } 699fa9e4066Sahrens 700fa9e4066Sahrens void 701fa9e4066Sahrens vdev_queue_io_done(zio_t *zio) 702fa9e4066Sahrens { 703fa9e4066Sahrens vdev_queue_t *vq = &zio->io_vd->vdev_queue; 704*69962b56SMatthew Ahrens zio_t *nio; 705fa9e4066Sahrens 706283b8460SGeorge.Wilson if (zio_injection_enabled) 707283b8460SGeorge.Wilson delay(SEC_TO_TICK(zio_handle_io_delay(zio))); 708283b8460SGeorge.Wilson 709fa9e4066Sahrens mutex_enter(&vq->vq_lock); 710fa9e4066Sahrens 711c3a66015SMatthew Ahrens vdev_queue_pending_remove(vq, zio); 712fa9e4066Sahrens 713c55e05cbSMatthew Ahrens vq->vq_io_complete_ts = gethrtime(); 714283b8460SGeorge.Wilson 715*69962b56SMatthew Ahrens while ((nio = vdev_queue_io_to_issue(vq)) != NULL) { 716fa9e4066Sahrens mutex_exit(&vq->vq_lock); 717e05725b1Sbonwick if (nio->io_done == vdev_queue_agg_io_done) { 718e05725b1Sbonwick zio_nowait(nio); 719e05725b1Sbonwick } else { 720fa9e4066Sahrens zio_vdev_io_reissue(nio); 721e05725b1Sbonwick zio_execute(nio); 722e05725b1Sbonwick } 723fa9e4066Sahrens mutex_enter(&vq->vq_lock); 724fa9e4066Sahrens } 725fa9e4066Sahrens 726fa9e4066Sahrens mutex_exit(&vq->vq_lock); 727fa9e4066Sahrens } 728