1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 22a3f829aeSBill Moore * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23fa9e4066Sahrens * Use is subject to license terms. 24fa9e4066Sahrens */ 25fa9e4066Sahrens 26283b8460SGeorge.Wilson /* 2773527f44SAlex Reece * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 28c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 29283b8460SGeorge.Wilson */ 30283b8460SGeorge.Wilson 31fa9e4066Sahrens #include <sys/zfs_context.h> 32fa9e4066Sahrens #include <sys/vdev_impl.h> 33c3a66015SMatthew Ahrens #include <sys/spa_impl.h> 34fa9e4066Sahrens #include <sys/zio.h> 35fa9e4066Sahrens #include <sys/avl.h> 3669962b56SMatthew Ahrens #include <sys/dsl_pool.h> 37*0f7643c7SGeorge Wilson #include <sys/metaslab_impl.h> 38fa9e4066Sahrens 39614409b5Sahrens /* 4069962b56SMatthew Ahrens * ZFS I/O Scheduler 4169962b56SMatthew Ahrens * --------------- 4269962b56SMatthew Ahrens * 4369962b56SMatthew Ahrens * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios. The 4469962b56SMatthew Ahrens * I/O scheduler determines when and in what order those operations are 4569962b56SMatthew Ahrens * issued. The I/O scheduler divides operations into five I/O classes 4669962b56SMatthew Ahrens * prioritized in the following order: sync read, sync write, async read, 4769962b56SMatthew Ahrens * async write, and scrub/resilver. Each queue defines the minimum and 4869962b56SMatthew Ahrens * maximum number of concurrent operations that may be issued to the device. 4969962b56SMatthew Ahrens * In addition, the device has an aggregate maximum. Note that the sum of the 5069962b56SMatthew Ahrens * per-queue minimums must not exceed the aggregate maximum, and if the 5169962b56SMatthew Ahrens * aggregate maximum is equal to or greater than the sum of the per-queue 5269962b56SMatthew Ahrens * maximums, the per-queue minimum has no effect. 5369962b56SMatthew Ahrens * 5469962b56SMatthew Ahrens * For many physical devices, throughput increases with the number of 5569962b56SMatthew Ahrens * concurrent operations, but latency typically suffers. Further, physical 5669962b56SMatthew Ahrens * devices typically have a limit at which more concurrent operations have no 5769962b56SMatthew Ahrens * effect on throughput or can actually cause it to decrease. 5869962b56SMatthew Ahrens * 5969962b56SMatthew Ahrens * The scheduler selects the next operation to issue by first looking for an 6069962b56SMatthew Ahrens * I/O class whose minimum has not been satisfied. Once all are satisfied and 6169962b56SMatthew Ahrens * the aggregate maximum has not been hit, the scheduler looks for classes 6269962b56SMatthew Ahrens * whose maximum has not been satisfied. Iteration through the I/O classes is 6369962b56SMatthew Ahrens * done in the order specified above. No further operations are issued if the 6469962b56SMatthew Ahrens * aggregate maximum number of concurrent operations has been hit or if there 6569962b56SMatthew Ahrens * are no operations queued for an I/O class that has not hit its maximum. 6669962b56SMatthew Ahrens * Every time an i/o is queued or an operation completes, the I/O scheduler 6769962b56SMatthew Ahrens * looks for new operations to issue. 6869962b56SMatthew Ahrens * 6969962b56SMatthew Ahrens * All I/O classes have a fixed maximum number of outstanding operations 7069962b56SMatthew Ahrens * except for the async write class. Asynchronous writes represent the data 7169962b56SMatthew Ahrens * that is committed to stable storage during the syncing stage for 7269962b56SMatthew Ahrens * transaction groups (see txg.c). Transaction groups enter the syncing state 7369962b56SMatthew Ahrens * periodically so the number of queued async writes will quickly burst up and 7469962b56SMatthew Ahrens * then bleed down to zero. Rather than servicing them as quickly as possible, 7569962b56SMatthew Ahrens * the I/O scheduler changes the maximum number of active async write i/os 7669962b56SMatthew Ahrens * according to the amount of dirty data in the pool (see dsl_pool.c). Since 7769962b56SMatthew Ahrens * both throughput and latency typically increase with the number of 7869962b56SMatthew Ahrens * concurrent operations issued to physical devices, reducing the burstiness 7969962b56SMatthew Ahrens * in the number of concurrent operations also stabilizes the response time of 8069962b56SMatthew Ahrens * operations from other -- and in particular synchronous -- queues. In broad 8169962b56SMatthew Ahrens * strokes, the I/O scheduler will issue more concurrent operations from the 8269962b56SMatthew Ahrens * async write queue as there's more dirty data in the pool. 8369962b56SMatthew Ahrens * 8469962b56SMatthew Ahrens * Async Writes 8569962b56SMatthew Ahrens * 8669962b56SMatthew Ahrens * The number of concurrent operations issued for the async write I/O class 8769962b56SMatthew Ahrens * follows a piece-wise linear function defined by a few adjustable points. 8869962b56SMatthew Ahrens * 8969962b56SMatthew Ahrens * | o---------| <-- zfs_vdev_async_write_max_active 9069962b56SMatthew Ahrens * ^ | /^ | 9169962b56SMatthew Ahrens * | | / | | 9269962b56SMatthew Ahrens * active | / | | 9369962b56SMatthew Ahrens * I/O | / | | 9469962b56SMatthew Ahrens * count | / | | 9569962b56SMatthew Ahrens * | / | | 9669962b56SMatthew Ahrens * |------------o | | <-- zfs_vdev_async_write_min_active 9769962b56SMatthew Ahrens * 0|____________^______|_________| 9869962b56SMatthew Ahrens * 0% | | 100% of zfs_dirty_data_max 9969962b56SMatthew Ahrens * | | 10069962b56SMatthew Ahrens * | `-- zfs_vdev_async_write_active_max_dirty_percent 10169962b56SMatthew Ahrens * `--------- zfs_vdev_async_write_active_min_dirty_percent 10269962b56SMatthew Ahrens * 10369962b56SMatthew Ahrens * Until the amount of dirty data exceeds a minimum percentage of the dirty 10469962b56SMatthew Ahrens * data allowed in the pool, the I/O scheduler will limit the number of 10569962b56SMatthew Ahrens * concurrent operations to the minimum. As that threshold is crossed, the 10669962b56SMatthew Ahrens * number of concurrent operations issued increases linearly to the maximum at 10769962b56SMatthew Ahrens * the specified maximum percentage of the dirty data allowed in the pool. 10869962b56SMatthew Ahrens * 10969962b56SMatthew Ahrens * Ideally, the amount of dirty data on a busy pool will stay in the sloped 11069962b56SMatthew Ahrens * part of the function between zfs_vdev_async_write_active_min_dirty_percent 11169962b56SMatthew Ahrens * and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the 11269962b56SMatthew Ahrens * maximum percentage, this indicates that the rate of incoming data is 11369962b56SMatthew Ahrens * greater than the rate that the backend storage can handle. In this case, we 11469962b56SMatthew Ahrens * must further throttle incoming writes (see dmu_tx_delay() for details). 115614409b5Sahrens */ 116f7170741SWill Andrews 117614409b5Sahrens /* 11869962b56SMatthew Ahrens * The maximum number of i/os active to each device. Ideally, this will be >= 11969962b56SMatthew Ahrens * the sum of each queue's max_active. It must be at least the sum of each 12069962b56SMatthew Ahrens * queue's min_active. 121614409b5Sahrens */ 12269962b56SMatthew Ahrens uint32_t zfs_vdev_max_active = 1000; 123614409b5Sahrens 124c55e05cbSMatthew Ahrens /* 12569962b56SMatthew Ahrens * Per-queue limits on the number of i/os active to each device. If the 12669962b56SMatthew Ahrens * sum of the queue's max_active is < zfs_vdev_max_active, then the 12769962b56SMatthew Ahrens * min_active comes into play. We will send min_active from each queue, 12869962b56SMatthew Ahrens * and then select from queues in the order defined by zio_priority_t. 12969962b56SMatthew Ahrens * 13069962b56SMatthew Ahrens * In general, smaller max_active's will lead to lower latency of synchronous 13169962b56SMatthew Ahrens * operations. Larger max_active's may lead to higher overall throughput, 13269962b56SMatthew Ahrens * depending on underlying storage. 13369962b56SMatthew Ahrens * 13469962b56SMatthew Ahrens * The ratio of the queues' max_actives determines the balance of performance 13569962b56SMatthew Ahrens * between reads, writes, and scrubs. E.g., increasing 13669962b56SMatthew Ahrens * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete 13769962b56SMatthew Ahrens * more quickly, but reads and writes to have higher latency and lower 13869962b56SMatthew Ahrens * throughput. 139c55e05cbSMatthew Ahrens */ 14069962b56SMatthew Ahrens uint32_t zfs_vdev_sync_read_min_active = 10; 14169962b56SMatthew Ahrens uint32_t zfs_vdev_sync_read_max_active = 10; 14269962b56SMatthew Ahrens uint32_t zfs_vdev_sync_write_min_active = 10; 14369962b56SMatthew Ahrens uint32_t zfs_vdev_sync_write_max_active = 10; 14469962b56SMatthew Ahrens uint32_t zfs_vdev_async_read_min_active = 1; 14569962b56SMatthew Ahrens uint32_t zfs_vdev_async_read_max_active = 3; 14669962b56SMatthew Ahrens uint32_t zfs_vdev_async_write_min_active = 1; 14769962b56SMatthew Ahrens uint32_t zfs_vdev_async_write_max_active = 10; 14869962b56SMatthew Ahrens uint32_t zfs_vdev_scrub_min_active = 1; 14969962b56SMatthew Ahrens uint32_t zfs_vdev_scrub_max_active = 2; 150614409b5Sahrens 15169962b56SMatthew Ahrens /* 15269962b56SMatthew Ahrens * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent 15369962b56SMatthew Ahrens * dirty data, use zfs_vdev_async_write_min_active. When it has more than 15469962b56SMatthew Ahrens * zfs_vdev_async_write_active_max_dirty_percent, use 15569962b56SMatthew Ahrens * zfs_vdev_async_write_max_active. The value is linearly interpolated 15669962b56SMatthew Ahrens * between min and max. 15769962b56SMatthew Ahrens */ 15869962b56SMatthew Ahrens int zfs_vdev_async_write_active_min_dirty_percent = 30; 15969962b56SMatthew Ahrens int zfs_vdev_async_write_active_max_dirty_percent = 60; 160614409b5Sahrens 161614409b5Sahrens /* 162f94275ceSAdam Leventhal * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O. 163f94275ceSAdam Leventhal * For read I/Os, we also aggregate across small adjacency gaps; for writes 164f94275ceSAdam Leventhal * we include spans of optional I/Os to aid aggregation at the disk even when 165f94275ceSAdam Leventhal * they aren't able to help us aggregate at this level. 166614409b5Sahrens */ 167b5152584SMatthew Ahrens int zfs_vdev_aggregation_limit = SPA_OLD_MAXBLOCKSIZE; 1686f708f7cSJeff Bonwick int zfs_vdev_read_gap_limit = 32 << 10; 169f94275ceSAdam Leventhal int zfs_vdev_write_gap_limit = 4 << 10; 170614409b5Sahrens 171*0f7643c7SGeorge Wilson /* 172*0f7643c7SGeorge Wilson * Define the queue depth percentage for each top-level. This percentage is 173*0f7643c7SGeorge Wilson * used in conjunction with zfs_vdev_async_max_active to determine how many 174*0f7643c7SGeorge Wilson * allocations a specific top-level vdev should handle. Once the queue depth 175*0f7643c7SGeorge Wilson * reaches zfs_vdev_queue_depth_pct * zfs_vdev_async_write_max_active / 100 176*0f7643c7SGeorge Wilson * then allocator will stop allocating blocks on that top-level device. 177*0f7643c7SGeorge Wilson * The default kernel setting is 1000% which will yield 100 allocations per 178*0f7643c7SGeorge Wilson * device. For userland testing, the default setting is 300% which equates 179*0f7643c7SGeorge Wilson * to 30 allocations per device. 180*0f7643c7SGeorge Wilson */ 181*0f7643c7SGeorge Wilson #ifdef _KERNEL 182*0f7643c7SGeorge Wilson int zfs_vdev_queue_depth_pct = 1000; 183*0f7643c7SGeorge Wilson #else 184*0f7643c7SGeorge Wilson int zfs_vdev_queue_depth_pct = 300; 185*0f7643c7SGeorge Wilson #endif 186*0f7643c7SGeorge Wilson 187*0f7643c7SGeorge Wilson 188fa9e4066Sahrens int 18969962b56SMatthew Ahrens vdev_queue_offset_compare(const void *x1, const void *x2) 190fa9e4066Sahrens { 191fa9e4066Sahrens const zio_t *z1 = x1; 192fa9e4066Sahrens const zio_t *z2 = x2; 193fa9e4066Sahrens 194fa9e4066Sahrens if (z1->io_offset < z2->io_offset) 195fa9e4066Sahrens return (-1); 196fa9e4066Sahrens if (z1->io_offset > z2->io_offset) 197fa9e4066Sahrens return (1); 198fa9e4066Sahrens 199fa9e4066Sahrens if (z1 < z2) 200fa9e4066Sahrens return (-1); 201fa9e4066Sahrens if (z1 > z2) 202fa9e4066Sahrens return (1); 203fa9e4066Sahrens 204fa9e4066Sahrens return (0); 205fa9e4066Sahrens } 206fa9e4066Sahrens 207fe319232SJustin T. Gibbs static inline avl_tree_t * 208fe319232SJustin T. Gibbs vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p) 209fe319232SJustin T. Gibbs { 210fe319232SJustin T. Gibbs return (&vq->vq_class[p].vqc_queued_tree); 211fe319232SJustin T. Gibbs } 212fe319232SJustin T. Gibbs 213fe319232SJustin T. Gibbs static inline avl_tree_t * 214fe319232SJustin T. Gibbs vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t) 215fe319232SJustin T. Gibbs { 216fe319232SJustin T. Gibbs ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE); 217fe319232SJustin T. Gibbs if (t == ZIO_TYPE_READ) 218fe319232SJustin T. Gibbs return (&vq->vq_read_offset_tree); 219fe319232SJustin T. Gibbs else 220fe319232SJustin T. Gibbs return (&vq->vq_write_offset_tree); 221fe319232SJustin T. Gibbs } 222fe319232SJustin T. Gibbs 223fa9e4066Sahrens int 22469962b56SMatthew Ahrens vdev_queue_timestamp_compare(const void *x1, const void *x2) 225fa9e4066Sahrens { 226fa9e4066Sahrens const zio_t *z1 = x1; 227fa9e4066Sahrens const zio_t *z2 = x2; 228fa9e4066Sahrens 22969962b56SMatthew Ahrens if (z1->io_timestamp < z2->io_timestamp) 230fa9e4066Sahrens return (-1); 23169962b56SMatthew Ahrens if (z1->io_timestamp > z2->io_timestamp) 232fa9e4066Sahrens return (1); 233fa9e4066Sahrens 234fa9e4066Sahrens if (z1 < z2) 235fa9e4066Sahrens return (-1); 236fa9e4066Sahrens if (z1 > z2) 237fa9e4066Sahrens return (1); 238fa9e4066Sahrens 239fa9e4066Sahrens return (0); 240fa9e4066Sahrens } 241fa9e4066Sahrens 242fa9e4066Sahrens void 243fa9e4066Sahrens vdev_queue_init(vdev_t *vd) 244fa9e4066Sahrens { 245fa9e4066Sahrens vdev_queue_t *vq = &vd->vdev_queue; 246fa9e4066Sahrens 247fa9e4066Sahrens mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL); 24869962b56SMatthew Ahrens vq->vq_vdev = vd; 249fa9e4066Sahrens 25069962b56SMatthew Ahrens avl_create(&vq->vq_active_tree, vdev_queue_offset_compare, 25169962b56SMatthew Ahrens sizeof (zio_t), offsetof(struct zio, io_queue_node)); 252fe319232SJustin T. Gibbs avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ), 253fe319232SJustin T. Gibbs vdev_queue_offset_compare, sizeof (zio_t), 254fe319232SJustin T. Gibbs offsetof(struct zio, io_offset_node)); 255fe319232SJustin T. Gibbs avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE), 256fe319232SJustin T. Gibbs vdev_queue_offset_compare, sizeof (zio_t), 257fe319232SJustin T. Gibbs offsetof(struct zio, io_offset_node)); 258fa9e4066Sahrens 25969962b56SMatthew Ahrens for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 260fe319232SJustin T. Gibbs int (*compfn) (const void *, const void *); 261fe319232SJustin T. Gibbs 26269962b56SMatthew Ahrens /* 263fe319232SJustin T. Gibbs * The synchronous i/o queues are dispatched in FIFO rather 264fe319232SJustin T. Gibbs * than LBA order. This provides more consistent latency for 265fe319232SJustin T. Gibbs * these i/os. 26669962b56SMatthew Ahrens */ 267fe319232SJustin T. Gibbs if (p == ZIO_PRIORITY_SYNC_READ || p == ZIO_PRIORITY_SYNC_WRITE) 268fe319232SJustin T. Gibbs compfn = vdev_queue_timestamp_compare; 269fe319232SJustin T. Gibbs else 270fe319232SJustin T. Gibbs compfn = vdev_queue_offset_compare; 271fe319232SJustin T. Gibbs 272fe319232SJustin T. Gibbs avl_create(vdev_queue_class_tree(vq, p), compfn, 27369962b56SMatthew Ahrens sizeof (zio_t), offsetof(struct zio, io_queue_node)); 27469962b56SMatthew Ahrens } 275fa9e4066Sahrens } 276fa9e4066Sahrens 277fa9e4066Sahrens void 278fa9e4066Sahrens vdev_queue_fini(vdev_t *vd) 279fa9e4066Sahrens { 280fa9e4066Sahrens vdev_queue_t *vq = &vd->vdev_queue; 281fa9e4066Sahrens 28269962b56SMatthew Ahrens for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) 283fe319232SJustin T. Gibbs avl_destroy(vdev_queue_class_tree(vq, p)); 28469962b56SMatthew Ahrens avl_destroy(&vq->vq_active_tree); 285fe319232SJustin T. Gibbs avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ)); 286fe319232SJustin T. Gibbs avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE)); 287fa9e4066Sahrens 288fa9e4066Sahrens mutex_destroy(&vq->vq_lock); 289fa9e4066Sahrens } 290fa9e4066Sahrens 291ea8dc4b6Seschrock static void 292ea8dc4b6Seschrock vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) 293ea8dc4b6Seschrock { 294c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 295*0f7643c7SGeorge Wilson 29669962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 297fe319232SJustin T. Gibbs avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); 298fe319232SJustin T. Gibbs avl_add(vdev_queue_type_tree(vq, zio->io_type), zio); 299c3a66015SMatthew Ahrens 30069962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 30169962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_queued++; 30269962b56SMatthew Ahrens if (spa->spa_iokstat != NULL) 303c3a66015SMatthew Ahrens kstat_waitq_enter(spa->spa_iokstat->ks_data); 30469962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 305ea8dc4b6Seschrock } 306ea8dc4b6Seschrock 307ea8dc4b6Seschrock static void 308ea8dc4b6Seschrock vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) 309ea8dc4b6Seschrock { 310c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 311*0f7643c7SGeorge Wilson 31269962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 313fe319232SJustin T. Gibbs avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); 314fe319232SJustin T. Gibbs avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio); 315c3a66015SMatthew Ahrens 31669962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 31769962b56SMatthew Ahrens ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0); 31869962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_queued--; 31969962b56SMatthew Ahrens if (spa->spa_iokstat != NULL) 320c3a66015SMatthew Ahrens kstat_waitq_exit(spa->spa_iokstat->ks_data); 32169962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 322c3a66015SMatthew Ahrens } 323c3a66015SMatthew Ahrens 324c3a66015SMatthew Ahrens static void 325c3a66015SMatthew Ahrens vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio) 326c3a66015SMatthew Ahrens { 327c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 32869962b56SMatthew Ahrens ASSERT(MUTEX_HELD(&vq->vq_lock)); 32969962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 33069962b56SMatthew Ahrens vq->vq_class[zio->io_priority].vqc_active++; 33169962b56SMatthew Ahrens avl_add(&vq->vq_active_tree, zio); 33269962b56SMatthew Ahrens 33369962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 33469962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_active++; 33569962b56SMatthew Ahrens if (spa->spa_iokstat != NULL) 336c3a66015SMatthew Ahrens kstat_runq_enter(spa->spa_iokstat->ks_data); 33769962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 338c3a66015SMatthew Ahrens } 339c3a66015SMatthew Ahrens 340c3a66015SMatthew Ahrens static void 341c3a66015SMatthew Ahrens vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio) 342c3a66015SMatthew Ahrens { 343c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 34469962b56SMatthew Ahrens ASSERT(MUTEX_HELD(&vq->vq_lock)); 34569962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 34669962b56SMatthew Ahrens vq->vq_class[zio->io_priority].vqc_active--; 34769962b56SMatthew Ahrens avl_remove(&vq->vq_active_tree, zio); 34869962b56SMatthew Ahrens 34969962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 35069962b56SMatthew Ahrens ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0); 35169962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_active--; 352c3a66015SMatthew Ahrens if (spa->spa_iokstat != NULL) { 353c3a66015SMatthew Ahrens kstat_io_t *ksio = spa->spa_iokstat->ks_data; 354c3a66015SMatthew Ahrens 355c3a66015SMatthew Ahrens kstat_runq_exit(spa->spa_iokstat->ks_data); 356c3a66015SMatthew Ahrens if (zio->io_type == ZIO_TYPE_READ) { 357c3a66015SMatthew Ahrens ksio->reads++; 358c3a66015SMatthew Ahrens ksio->nread += zio->io_size; 359c3a66015SMatthew Ahrens } else if (zio->io_type == ZIO_TYPE_WRITE) { 360c3a66015SMatthew Ahrens ksio->writes++; 361c3a66015SMatthew Ahrens ksio->nwritten += zio->io_size; 362c3a66015SMatthew Ahrens } 363c3a66015SMatthew Ahrens } 36469962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 365ea8dc4b6Seschrock } 366ea8dc4b6Seschrock 367fa9e4066Sahrens static void 368fa9e4066Sahrens vdev_queue_agg_io_done(zio_t *aio) 369fa9e4066Sahrens { 37069962b56SMatthew Ahrens if (aio->io_type == ZIO_TYPE_READ) { 37169962b56SMatthew Ahrens zio_t *pio; 372*0f7643c7SGeorge Wilson zio_link_t *zl = NULL; 373*0f7643c7SGeorge Wilson while ((pio = zio_walk_parents(aio, &zl)) != NULL) { 374a3f829aeSBill Moore bcopy((char *)aio->io_data + (pio->io_offset - 375a3f829aeSBill Moore aio->io_offset), pio->io_data, pio->io_size); 37669962b56SMatthew Ahrens } 37769962b56SMatthew Ahrens } 378fa9e4066Sahrens 379fa9e4066Sahrens zio_buf_free(aio->io_data, aio->io_size); 380fa9e4066Sahrens } 381fa9e4066Sahrens 38269962b56SMatthew Ahrens static int 38369962b56SMatthew Ahrens vdev_queue_class_min_active(zio_priority_t p) 38469962b56SMatthew Ahrens { 38569962b56SMatthew Ahrens switch (p) { 38669962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_READ: 38769962b56SMatthew Ahrens return (zfs_vdev_sync_read_min_active); 38869962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_WRITE: 38969962b56SMatthew Ahrens return (zfs_vdev_sync_write_min_active); 39069962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_READ: 39169962b56SMatthew Ahrens return (zfs_vdev_async_read_min_active); 39269962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_WRITE: 39369962b56SMatthew Ahrens return (zfs_vdev_async_write_min_active); 39469962b56SMatthew Ahrens case ZIO_PRIORITY_SCRUB: 39569962b56SMatthew Ahrens return (zfs_vdev_scrub_min_active); 39669962b56SMatthew Ahrens default: 39769962b56SMatthew Ahrens panic("invalid priority %u", p); 39869962b56SMatthew Ahrens return (0); 39969962b56SMatthew Ahrens } 40069962b56SMatthew Ahrens } 40169962b56SMatthew Ahrens 40269962b56SMatthew Ahrens static int 40373527f44SAlex Reece vdev_queue_max_async_writes(spa_t *spa) 40469962b56SMatthew Ahrens { 40569962b56SMatthew Ahrens int writes; 40673527f44SAlex Reece uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total; 40769962b56SMatthew Ahrens uint64_t min_bytes = zfs_dirty_data_max * 40869962b56SMatthew Ahrens zfs_vdev_async_write_active_min_dirty_percent / 100; 40969962b56SMatthew Ahrens uint64_t max_bytes = zfs_dirty_data_max * 41069962b56SMatthew Ahrens zfs_vdev_async_write_active_max_dirty_percent / 100; 41169962b56SMatthew Ahrens 41273527f44SAlex Reece /* 41373527f44SAlex Reece * Sync tasks correspond to interactive user actions. To reduce the 41473527f44SAlex Reece * execution time of those actions we push data out as fast as possible. 41573527f44SAlex Reece */ 41673527f44SAlex Reece if (spa_has_pending_synctask(spa)) { 41773527f44SAlex Reece return (zfs_vdev_async_write_max_active); 41873527f44SAlex Reece } 41973527f44SAlex Reece 42069962b56SMatthew Ahrens if (dirty < min_bytes) 42169962b56SMatthew Ahrens return (zfs_vdev_async_write_min_active); 42269962b56SMatthew Ahrens if (dirty > max_bytes) 42369962b56SMatthew Ahrens return (zfs_vdev_async_write_max_active); 42469962b56SMatthew Ahrens 42569962b56SMatthew Ahrens /* 42669962b56SMatthew Ahrens * linear interpolation: 42769962b56SMatthew Ahrens * slope = (max_writes - min_writes) / (max_bytes - min_bytes) 42869962b56SMatthew Ahrens * move right by min_bytes 42969962b56SMatthew Ahrens * move up by min_writes 43069962b56SMatthew Ahrens */ 43169962b56SMatthew Ahrens writes = (dirty - min_bytes) * 43269962b56SMatthew Ahrens (zfs_vdev_async_write_max_active - 43369962b56SMatthew Ahrens zfs_vdev_async_write_min_active) / 43469962b56SMatthew Ahrens (max_bytes - min_bytes) + 43569962b56SMatthew Ahrens zfs_vdev_async_write_min_active; 43669962b56SMatthew Ahrens ASSERT3U(writes, >=, zfs_vdev_async_write_min_active); 43769962b56SMatthew Ahrens ASSERT3U(writes, <=, zfs_vdev_async_write_max_active); 43869962b56SMatthew Ahrens return (writes); 43969962b56SMatthew Ahrens } 44069962b56SMatthew Ahrens 44169962b56SMatthew Ahrens static int 44269962b56SMatthew Ahrens vdev_queue_class_max_active(spa_t *spa, zio_priority_t p) 44369962b56SMatthew Ahrens { 44469962b56SMatthew Ahrens switch (p) { 44569962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_READ: 44669962b56SMatthew Ahrens return (zfs_vdev_sync_read_max_active); 44769962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_WRITE: 44869962b56SMatthew Ahrens return (zfs_vdev_sync_write_max_active); 44969962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_READ: 45069962b56SMatthew Ahrens return (zfs_vdev_async_read_max_active); 45169962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_WRITE: 45273527f44SAlex Reece return (vdev_queue_max_async_writes(spa)); 45369962b56SMatthew Ahrens case ZIO_PRIORITY_SCRUB: 45469962b56SMatthew Ahrens return (zfs_vdev_scrub_max_active); 45569962b56SMatthew Ahrens default: 45669962b56SMatthew Ahrens panic("invalid priority %u", p); 45769962b56SMatthew Ahrens return (0); 45869962b56SMatthew Ahrens } 45969962b56SMatthew Ahrens } 46069962b56SMatthew Ahrens 46169962b56SMatthew Ahrens /* 46269962b56SMatthew Ahrens * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if 46369962b56SMatthew Ahrens * there is no eligible class. 46469962b56SMatthew Ahrens */ 46569962b56SMatthew Ahrens static zio_priority_t 46669962b56SMatthew Ahrens vdev_queue_class_to_issue(vdev_queue_t *vq) 46769962b56SMatthew Ahrens { 46869962b56SMatthew Ahrens spa_t *spa = vq->vq_vdev->vdev_spa; 46969962b56SMatthew Ahrens zio_priority_t p; 47069962b56SMatthew Ahrens 47169962b56SMatthew Ahrens if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active) 47269962b56SMatthew Ahrens return (ZIO_PRIORITY_NUM_QUEUEABLE); 47369962b56SMatthew Ahrens 47469962b56SMatthew Ahrens /* find a queue that has not reached its minimum # outstanding i/os */ 47569962b56SMatthew Ahrens for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 476fe319232SJustin T. Gibbs if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 && 47769962b56SMatthew Ahrens vq->vq_class[p].vqc_active < 47869962b56SMatthew Ahrens vdev_queue_class_min_active(p)) 47969962b56SMatthew Ahrens return (p); 48069962b56SMatthew Ahrens } 48169962b56SMatthew Ahrens 48269962b56SMatthew Ahrens /* 48369962b56SMatthew Ahrens * If we haven't found a queue, look for one that hasn't reached its 48469962b56SMatthew Ahrens * maximum # outstanding i/os. 48569962b56SMatthew Ahrens */ 48669962b56SMatthew Ahrens for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 487fe319232SJustin T. Gibbs if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 && 48869962b56SMatthew Ahrens vq->vq_class[p].vqc_active < 48969962b56SMatthew Ahrens vdev_queue_class_max_active(spa, p)) 49069962b56SMatthew Ahrens return (p); 49169962b56SMatthew Ahrens } 49269962b56SMatthew Ahrens 49369962b56SMatthew Ahrens /* No eligible queued i/os */ 49469962b56SMatthew Ahrens return (ZIO_PRIORITY_NUM_QUEUEABLE); 49569962b56SMatthew Ahrens } 49669962b56SMatthew Ahrens 4976f708f7cSJeff Bonwick /* 4986f708f7cSJeff Bonwick * Compute the range spanned by two i/os, which is the endpoint of the last 4996f708f7cSJeff Bonwick * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset). 5006f708f7cSJeff Bonwick * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio); 5016f708f7cSJeff Bonwick * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0. 5026f708f7cSJeff Bonwick */ 5036f708f7cSJeff Bonwick #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset) 5046f708f7cSJeff Bonwick #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio)) 505fa9e4066Sahrens 506fa9e4066Sahrens static zio_t * 50769962b56SMatthew Ahrens vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio) 508fa9e4066Sahrens { 50969962b56SMatthew Ahrens zio_t *first, *last, *aio, *dio, *mandatory, *nio; 51069962b56SMatthew Ahrens uint64_t maxgap = 0; 51169962b56SMatthew Ahrens uint64_t size; 51269962b56SMatthew Ahrens boolean_t stretch = B_FALSE; 513fe319232SJustin T. Gibbs avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type); 51469962b56SMatthew Ahrens enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT; 51569962b56SMatthew Ahrens 51669962b56SMatthew Ahrens if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE) 51769962b56SMatthew Ahrens return (NULL); 518fa9e4066Sahrens 51969962b56SMatthew Ahrens first = last = zio; 520fa9e4066Sahrens 52169962b56SMatthew Ahrens if (zio->io_type == ZIO_TYPE_READ) 52269962b56SMatthew Ahrens maxgap = zfs_vdev_read_gap_limit; 5238ad4d6ddSJeff Bonwick 52469962b56SMatthew Ahrens /* 52569962b56SMatthew Ahrens * We can aggregate I/Os that are sufficiently adjacent and of 52669962b56SMatthew Ahrens * the same flavor, as expressed by the AGG_INHERIT flags. 52769962b56SMatthew Ahrens * The latter requirement is necessary so that certain 52869962b56SMatthew Ahrens * attributes of the I/O, such as whether it's a normal I/O 52969962b56SMatthew Ahrens * or a scrub/resilver, can be preserved in the aggregate. 53069962b56SMatthew Ahrens * We can include optional I/Os, but don't allow them 53169962b56SMatthew Ahrens * to begin a range as they add no benefit in that situation. 53269962b56SMatthew Ahrens */ 533f94275ceSAdam Leventhal 53469962b56SMatthew Ahrens /* 53569962b56SMatthew Ahrens * We keep track of the last non-optional I/O. 53669962b56SMatthew Ahrens */ 53769962b56SMatthew Ahrens mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first; 538f94275ceSAdam Leventhal 53969962b56SMatthew Ahrens /* 54069962b56SMatthew Ahrens * Walk backwards through sufficiently contiguous I/Os 54169962b56SMatthew Ahrens * recording the last non-option I/O. 54269962b56SMatthew Ahrens */ 54369962b56SMatthew Ahrens while ((dio = AVL_PREV(t, first)) != NULL && 54469962b56SMatthew Ahrens (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 54569962b56SMatthew Ahrens IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit && 54669962b56SMatthew Ahrens IO_GAP(dio, first) <= maxgap) { 54769962b56SMatthew Ahrens first = dio; 54869962b56SMatthew Ahrens if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL)) 54969962b56SMatthew Ahrens mandatory = first; 55069962b56SMatthew Ahrens } 551f94275ceSAdam Leventhal 55269962b56SMatthew Ahrens /* 55369962b56SMatthew Ahrens * Skip any initial optional I/Os. 55469962b56SMatthew Ahrens */ 55569962b56SMatthew Ahrens while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) { 55669962b56SMatthew Ahrens first = AVL_NEXT(t, first); 55769962b56SMatthew Ahrens ASSERT(first != NULL); 55869962b56SMatthew Ahrens } 5596f708f7cSJeff Bonwick 56069962b56SMatthew Ahrens /* 56169962b56SMatthew Ahrens * Walk forward through sufficiently contiguous I/Os. 56269962b56SMatthew Ahrens */ 56369962b56SMatthew Ahrens while ((dio = AVL_NEXT(t, last)) != NULL && 56469962b56SMatthew Ahrens (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 56569962b56SMatthew Ahrens IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit && 56669962b56SMatthew Ahrens IO_GAP(last, dio) <= maxgap) { 56769962b56SMatthew Ahrens last = dio; 56869962b56SMatthew Ahrens if (!(last->io_flags & ZIO_FLAG_OPTIONAL)) 56969962b56SMatthew Ahrens mandatory = last; 57069962b56SMatthew Ahrens } 571f94275ceSAdam Leventhal 57269962b56SMatthew Ahrens /* 57369962b56SMatthew Ahrens * Now that we've established the range of the I/O aggregation 57469962b56SMatthew Ahrens * we must decide what to do with trailing optional I/Os. 57569962b56SMatthew Ahrens * For reads, there's nothing to do. While we are unable to 57669962b56SMatthew Ahrens * aggregate further, it's possible that a trailing optional 57769962b56SMatthew Ahrens * I/O would allow the underlying device to aggregate with 57869962b56SMatthew Ahrens * subsequent I/Os. We must therefore determine if the next 57969962b56SMatthew Ahrens * non-optional I/O is close enough to make aggregation 58069962b56SMatthew Ahrens * worthwhile. 58169962b56SMatthew Ahrens */ 58269962b56SMatthew Ahrens if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) { 58369962b56SMatthew Ahrens zio_t *nio = last; 58469962b56SMatthew Ahrens while ((dio = AVL_NEXT(t, nio)) != NULL && 58569962b56SMatthew Ahrens IO_GAP(nio, dio) == 0 && 58669962b56SMatthew Ahrens IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) { 58769962b56SMatthew Ahrens nio = dio; 58869962b56SMatthew Ahrens if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) { 58969962b56SMatthew Ahrens stretch = B_TRUE; 59069962b56SMatthew Ahrens break; 591f94275ceSAdam Leventhal } 592f94275ceSAdam Leventhal } 59369962b56SMatthew Ahrens } 594f94275ceSAdam Leventhal 59569962b56SMatthew Ahrens if (stretch) { 59669962b56SMatthew Ahrens /* This may be a no-op. */ 59769962b56SMatthew Ahrens dio = AVL_NEXT(t, last); 59869962b56SMatthew Ahrens dio->io_flags &= ~ZIO_FLAG_OPTIONAL; 59969962b56SMatthew Ahrens } else { 60069962b56SMatthew Ahrens while (last != mandatory && last != first) { 60169962b56SMatthew Ahrens ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL); 60269962b56SMatthew Ahrens last = AVL_PREV(t, last); 60369962b56SMatthew Ahrens ASSERT(last != NULL); 604f94275ceSAdam Leventhal } 605fa9e4066Sahrens } 606fa9e4066Sahrens 60769962b56SMatthew Ahrens if (first == last) 60869962b56SMatthew Ahrens return (NULL); 60969962b56SMatthew Ahrens 61069962b56SMatthew Ahrens size = IO_SPAN(first, last); 61169962b56SMatthew Ahrens ASSERT3U(size, <=, zfs_vdev_aggregation_limit); 61269962b56SMatthew Ahrens 61369962b56SMatthew Ahrens aio = zio_vdev_delegated_io(first->io_vd, first->io_offset, 61469962b56SMatthew Ahrens zio_buf_alloc(size), size, first->io_type, zio->io_priority, 61569962b56SMatthew Ahrens flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE, 61669962b56SMatthew Ahrens vdev_queue_agg_io_done, NULL); 61769962b56SMatthew Ahrens aio->io_timestamp = first->io_timestamp; 61869962b56SMatthew Ahrens 61969962b56SMatthew Ahrens nio = first; 62069962b56SMatthew Ahrens do { 62169962b56SMatthew Ahrens dio = nio; 62269962b56SMatthew Ahrens nio = AVL_NEXT(t, dio); 62369962b56SMatthew Ahrens ASSERT3U(dio->io_type, ==, aio->io_type); 62469962b56SMatthew Ahrens 62569962b56SMatthew Ahrens if (dio->io_flags & ZIO_FLAG_NODATA) { 62669962b56SMatthew Ahrens ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE); 62769962b56SMatthew Ahrens bzero((char *)aio->io_data + (dio->io_offset - 62869962b56SMatthew Ahrens aio->io_offset), dio->io_size); 62969962b56SMatthew Ahrens } else if (dio->io_type == ZIO_TYPE_WRITE) { 63069962b56SMatthew Ahrens bcopy(dio->io_data, (char *)aio->io_data + 63169962b56SMatthew Ahrens (dio->io_offset - aio->io_offset), 63269962b56SMatthew Ahrens dio->io_size); 63369962b56SMatthew Ahrens } 634a3f829aeSBill Moore 63569962b56SMatthew Ahrens zio_add_child(dio, aio); 63669962b56SMatthew Ahrens vdev_queue_io_remove(vq, dio); 63769962b56SMatthew Ahrens zio_vdev_io_bypass(dio); 63869962b56SMatthew Ahrens zio_execute(dio); 63969962b56SMatthew Ahrens } while (dio != last); 64069962b56SMatthew Ahrens 64169962b56SMatthew Ahrens return (aio); 64269962b56SMatthew Ahrens } 64369962b56SMatthew Ahrens 64469962b56SMatthew Ahrens static zio_t * 64569962b56SMatthew Ahrens vdev_queue_io_to_issue(vdev_queue_t *vq) 64669962b56SMatthew Ahrens { 64769962b56SMatthew Ahrens zio_t *zio, *aio; 64869962b56SMatthew Ahrens zio_priority_t p; 64969962b56SMatthew Ahrens avl_index_t idx; 650fe319232SJustin T. Gibbs avl_tree_t *tree; 65169962b56SMatthew Ahrens zio_t search; 65269962b56SMatthew Ahrens 65369962b56SMatthew Ahrens again: 65469962b56SMatthew Ahrens ASSERT(MUTEX_HELD(&vq->vq_lock)); 655fa9e4066Sahrens 65669962b56SMatthew Ahrens p = vdev_queue_class_to_issue(vq); 657fa9e4066Sahrens 65869962b56SMatthew Ahrens if (p == ZIO_PRIORITY_NUM_QUEUEABLE) { 65969962b56SMatthew Ahrens /* No eligible queued i/os */ 66069962b56SMatthew Ahrens return (NULL); 661fa9e4066Sahrens } 662fa9e4066Sahrens 66369962b56SMatthew Ahrens /* 66469962b56SMatthew Ahrens * For LBA-ordered queues (async / scrub), issue the i/o which follows 66569962b56SMatthew Ahrens * the most recently issued i/o in LBA (offset) order. 66669962b56SMatthew Ahrens * 66769962b56SMatthew Ahrens * For FIFO queues (sync), issue the i/o with the lowest timestamp. 66869962b56SMatthew Ahrens */ 669fe319232SJustin T. Gibbs tree = vdev_queue_class_tree(vq, p); 67069962b56SMatthew Ahrens search.io_timestamp = 0; 67169962b56SMatthew Ahrens search.io_offset = vq->vq_last_offset + 1; 672fe319232SJustin T. Gibbs VERIFY3P(avl_find(tree, &search, &idx), ==, NULL); 673fe319232SJustin T. Gibbs zio = avl_nearest(tree, idx, AVL_AFTER); 67469962b56SMatthew Ahrens if (zio == NULL) 675fe319232SJustin T. Gibbs zio = avl_first(tree); 67669962b56SMatthew Ahrens ASSERT3U(zio->io_priority, ==, p); 67769962b56SMatthew Ahrens 67869962b56SMatthew Ahrens aio = vdev_queue_aggregate(vq, zio); 67969962b56SMatthew Ahrens if (aio != NULL) 68069962b56SMatthew Ahrens zio = aio; 68169962b56SMatthew Ahrens else 68269962b56SMatthew Ahrens vdev_queue_io_remove(vq, zio); 683fa9e4066Sahrens 684f94275ceSAdam Leventhal /* 685f94275ceSAdam Leventhal * If the I/O is or was optional and therefore has no data, we need to 686f94275ceSAdam Leventhal * simply discard it. We need to drop the vdev queue's lock to avoid a 687f94275ceSAdam Leventhal * deadlock that we could encounter since this I/O will complete 688f94275ceSAdam Leventhal * immediately. 689f94275ceSAdam Leventhal */ 69069962b56SMatthew Ahrens if (zio->io_flags & ZIO_FLAG_NODATA) { 691f94275ceSAdam Leventhal mutex_exit(&vq->vq_lock); 69269962b56SMatthew Ahrens zio_vdev_io_bypass(zio); 69369962b56SMatthew Ahrens zio_execute(zio); 694f94275ceSAdam Leventhal mutex_enter(&vq->vq_lock); 695f94275ceSAdam Leventhal goto again; 696f94275ceSAdam Leventhal } 697f94275ceSAdam Leventhal 69869962b56SMatthew Ahrens vdev_queue_pending_add(vq, zio); 69969962b56SMatthew Ahrens vq->vq_last_offset = zio->io_offset; 700fa9e4066Sahrens 70169962b56SMatthew Ahrens return (zio); 702fa9e4066Sahrens } 703fa9e4066Sahrens 704fa9e4066Sahrens zio_t * 705fa9e4066Sahrens vdev_queue_io(zio_t *zio) 706fa9e4066Sahrens { 707fa9e4066Sahrens vdev_queue_t *vq = &zio->io_vd->vdev_queue; 708fa9e4066Sahrens zio_t *nio; 709fa9e4066Sahrens 710fa9e4066Sahrens if (zio->io_flags & ZIO_FLAG_DONT_QUEUE) 711fa9e4066Sahrens return (zio); 712fa9e4066Sahrens 71369962b56SMatthew Ahrens /* 71469962b56SMatthew Ahrens * Children i/os inherent their parent's priority, which might 71569962b56SMatthew Ahrens * not match the child's i/o type. Fix it up here. 71669962b56SMatthew Ahrens */ 71769962b56SMatthew Ahrens if (zio->io_type == ZIO_TYPE_READ) { 71869962b56SMatthew Ahrens if (zio->io_priority != ZIO_PRIORITY_SYNC_READ && 71969962b56SMatthew Ahrens zio->io_priority != ZIO_PRIORITY_ASYNC_READ && 72069962b56SMatthew Ahrens zio->io_priority != ZIO_PRIORITY_SCRUB) 72169962b56SMatthew Ahrens zio->io_priority = ZIO_PRIORITY_ASYNC_READ; 72269962b56SMatthew Ahrens } else { 72369962b56SMatthew Ahrens ASSERT(zio->io_type == ZIO_TYPE_WRITE); 72469962b56SMatthew Ahrens if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE && 72569962b56SMatthew Ahrens zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE) 72669962b56SMatthew Ahrens zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE; 72769962b56SMatthew Ahrens } 728fa9e4066Sahrens 72969962b56SMatthew Ahrens zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE; 730fa9e4066Sahrens 731fa9e4066Sahrens mutex_enter(&vq->vq_lock); 732c55e05cbSMatthew Ahrens zio->io_timestamp = gethrtime(); 733ea8dc4b6Seschrock vdev_queue_io_add(vq, zio); 73469962b56SMatthew Ahrens nio = vdev_queue_io_to_issue(vq); 735fa9e4066Sahrens mutex_exit(&vq->vq_lock); 736fa9e4066Sahrens 737e05725b1Sbonwick if (nio == NULL) 738e05725b1Sbonwick return (NULL); 739e05725b1Sbonwick 740e05725b1Sbonwick if (nio->io_done == vdev_queue_agg_io_done) { 741e05725b1Sbonwick zio_nowait(nio); 742e05725b1Sbonwick return (NULL); 743e05725b1Sbonwick } 744fa9e4066Sahrens 745e05725b1Sbonwick return (nio); 746fa9e4066Sahrens } 747fa9e4066Sahrens 748fa9e4066Sahrens void 749fa9e4066Sahrens vdev_queue_io_done(zio_t *zio) 750fa9e4066Sahrens { 751fa9e4066Sahrens vdev_queue_t *vq = &zio->io_vd->vdev_queue; 75269962b56SMatthew Ahrens zio_t *nio; 753fa9e4066Sahrens 754fa9e4066Sahrens mutex_enter(&vq->vq_lock); 755fa9e4066Sahrens 756c3a66015SMatthew Ahrens vdev_queue_pending_remove(vq, zio); 757fa9e4066Sahrens 758c55e05cbSMatthew Ahrens vq->vq_io_complete_ts = gethrtime(); 759283b8460SGeorge.Wilson 76069962b56SMatthew Ahrens while ((nio = vdev_queue_io_to_issue(vq)) != NULL) { 761fa9e4066Sahrens mutex_exit(&vq->vq_lock); 762e05725b1Sbonwick if (nio->io_done == vdev_queue_agg_io_done) { 763e05725b1Sbonwick zio_nowait(nio); 764e05725b1Sbonwick } else { 765fa9e4066Sahrens zio_vdev_io_reissue(nio); 766e05725b1Sbonwick zio_execute(nio); 767e05725b1Sbonwick } 768fa9e4066Sahrens mutex_enter(&vq->vq_lock); 769fa9e4066Sahrens } 770fa9e4066Sahrens 771fa9e4066Sahrens mutex_exit(&vq->vq_lock); 772fa9e4066Sahrens } 773