1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 22a3f829aeSBill Moore * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23fa9e4066Sahrens * Use is subject to license terms. 24fa9e4066Sahrens */ 25fa9e4066Sahrens 26283b8460SGeorge.Wilson /* 27f78cdc34SPaul Dagnelie * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 28c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 29283b8460SGeorge.Wilson */ 30283b8460SGeorge.Wilson 31fa9e4066Sahrens #include <sys/zfs_context.h> 32fa9e4066Sahrens #include <sys/vdev_impl.h> 33c3a66015SMatthew Ahrens #include <sys/spa_impl.h> 34fa9e4066Sahrens #include <sys/zio.h> 35fa9e4066Sahrens #include <sys/avl.h> 3669962b56SMatthew Ahrens #include <sys/dsl_pool.h> 370f7643c7SGeorge Wilson #include <sys/metaslab_impl.h> 38770499e1SDan Kimmel #include <sys/abd.h> 39fa9e4066Sahrens 40614409b5Sahrens /* 4169962b56SMatthew Ahrens * ZFS I/O Scheduler 4269962b56SMatthew Ahrens * --------------- 4369962b56SMatthew Ahrens * 4469962b56SMatthew Ahrens * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios. The 4569962b56SMatthew Ahrens * I/O scheduler determines when and in what order those operations are 4669962b56SMatthew Ahrens * issued. The I/O scheduler divides operations into five I/O classes 4769962b56SMatthew Ahrens * prioritized in the following order: sync read, sync write, async read, 4869962b56SMatthew Ahrens * async write, and scrub/resilver. Each queue defines the minimum and 4969962b56SMatthew Ahrens * maximum number of concurrent operations that may be issued to the device. 5069962b56SMatthew Ahrens * In addition, the device has an aggregate maximum. Note that the sum of the 5169962b56SMatthew Ahrens * per-queue minimums must not exceed the aggregate maximum, and if the 5269962b56SMatthew Ahrens * aggregate maximum is equal to or greater than the sum of the per-queue 5369962b56SMatthew Ahrens * maximums, the per-queue minimum has no effect. 5469962b56SMatthew Ahrens * 5569962b56SMatthew Ahrens * For many physical devices, throughput increases with the number of 5669962b56SMatthew Ahrens * concurrent operations, but latency typically suffers. Further, physical 5769962b56SMatthew Ahrens * devices typically have a limit at which more concurrent operations have no 5869962b56SMatthew Ahrens * effect on throughput or can actually cause it to decrease. 5969962b56SMatthew Ahrens * 6069962b56SMatthew Ahrens * The scheduler selects the next operation to issue by first looking for an 6169962b56SMatthew Ahrens * I/O class whose minimum has not been satisfied. Once all are satisfied and 6269962b56SMatthew Ahrens * the aggregate maximum has not been hit, the scheduler looks for classes 6369962b56SMatthew Ahrens * whose maximum has not been satisfied. Iteration through the I/O classes is 6469962b56SMatthew Ahrens * done in the order specified above. No further operations are issued if the 6569962b56SMatthew Ahrens * aggregate maximum number of concurrent operations has been hit or if there 6669962b56SMatthew Ahrens * are no operations queued for an I/O class that has not hit its maximum. 6769962b56SMatthew Ahrens * Every time an i/o is queued or an operation completes, the I/O scheduler 6869962b56SMatthew Ahrens * looks for new operations to issue. 6969962b56SMatthew Ahrens * 7069962b56SMatthew Ahrens * All I/O classes have a fixed maximum number of outstanding operations 7169962b56SMatthew Ahrens * except for the async write class. Asynchronous writes represent the data 7269962b56SMatthew Ahrens * that is committed to stable storage during the syncing stage for 7369962b56SMatthew Ahrens * transaction groups (see txg.c). Transaction groups enter the syncing state 7469962b56SMatthew Ahrens * periodically so the number of queued async writes will quickly burst up and 7569962b56SMatthew Ahrens * then bleed down to zero. Rather than servicing them as quickly as possible, 7669962b56SMatthew Ahrens * the I/O scheduler changes the maximum number of active async write i/os 7769962b56SMatthew Ahrens * according to the amount of dirty data in the pool (see dsl_pool.c). Since 7869962b56SMatthew Ahrens * both throughput and latency typically increase with the number of 7969962b56SMatthew Ahrens * concurrent operations issued to physical devices, reducing the burstiness 8069962b56SMatthew Ahrens * in the number of concurrent operations also stabilizes the response time of 8169962b56SMatthew Ahrens * operations from other -- and in particular synchronous -- queues. In broad 8269962b56SMatthew Ahrens * strokes, the I/O scheduler will issue more concurrent operations from the 8369962b56SMatthew Ahrens * async write queue as there's more dirty data in the pool. 8469962b56SMatthew Ahrens * 8569962b56SMatthew Ahrens * Async Writes 8669962b56SMatthew Ahrens * 8769962b56SMatthew Ahrens * The number of concurrent operations issued for the async write I/O class 8869962b56SMatthew Ahrens * follows a piece-wise linear function defined by a few adjustable points. 8969962b56SMatthew Ahrens * 9069962b56SMatthew Ahrens * | o---------| <-- zfs_vdev_async_write_max_active 9169962b56SMatthew Ahrens * ^ | /^ | 9269962b56SMatthew Ahrens * | | / | | 9369962b56SMatthew Ahrens * active | / | | 9469962b56SMatthew Ahrens * I/O | / | | 9569962b56SMatthew Ahrens * count | / | | 9669962b56SMatthew Ahrens * | / | | 9769962b56SMatthew Ahrens * |------------o | | <-- zfs_vdev_async_write_min_active 9869962b56SMatthew Ahrens * 0|____________^______|_________| 9969962b56SMatthew Ahrens * 0% | | 100% of zfs_dirty_data_max 10069962b56SMatthew Ahrens * | | 10169962b56SMatthew Ahrens * | `-- zfs_vdev_async_write_active_max_dirty_percent 10269962b56SMatthew Ahrens * `--------- zfs_vdev_async_write_active_min_dirty_percent 10369962b56SMatthew Ahrens * 10469962b56SMatthew Ahrens * Until the amount of dirty data exceeds a minimum percentage of the dirty 10569962b56SMatthew Ahrens * data allowed in the pool, the I/O scheduler will limit the number of 10669962b56SMatthew Ahrens * concurrent operations to the minimum. As that threshold is crossed, the 10769962b56SMatthew Ahrens * number of concurrent operations issued increases linearly to the maximum at 10869962b56SMatthew Ahrens * the specified maximum percentage of the dirty data allowed in the pool. 10969962b56SMatthew Ahrens * 11069962b56SMatthew Ahrens * Ideally, the amount of dirty data on a busy pool will stay in the sloped 11169962b56SMatthew Ahrens * part of the function between zfs_vdev_async_write_active_min_dirty_percent 11269962b56SMatthew Ahrens * and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the 11369962b56SMatthew Ahrens * maximum percentage, this indicates that the rate of incoming data is 11469962b56SMatthew Ahrens * greater than the rate that the backend storage can handle. In this case, we 11569962b56SMatthew Ahrens * must further throttle incoming writes (see dmu_tx_delay() for details). 116614409b5Sahrens */ 117f7170741SWill Andrews 118614409b5Sahrens /* 11969962b56SMatthew Ahrens * The maximum number of i/os active to each device. Ideally, this will be >= 12069962b56SMatthew Ahrens * the sum of each queue's max_active. It must be at least the sum of each 12169962b56SMatthew Ahrens * queue's min_active. 122614409b5Sahrens */ 12369962b56SMatthew Ahrens uint32_t zfs_vdev_max_active = 1000; 124614409b5Sahrens 125c55e05cbSMatthew Ahrens /* 12669962b56SMatthew Ahrens * Per-queue limits on the number of i/os active to each device. If the 12769962b56SMatthew Ahrens * sum of the queue's max_active is < zfs_vdev_max_active, then the 12869962b56SMatthew Ahrens * min_active comes into play. We will send min_active from each queue, 12969962b56SMatthew Ahrens * and then select from queues in the order defined by zio_priority_t. 13069962b56SMatthew Ahrens * 13169962b56SMatthew Ahrens * In general, smaller max_active's will lead to lower latency of synchronous 13269962b56SMatthew Ahrens * operations. Larger max_active's may lead to higher overall throughput, 13369962b56SMatthew Ahrens * depending on underlying storage. 13469962b56SMatthew Ahrens * 13569962b56SMatthew Ahrens * The ratio of the queues' max_actives determines the balance of performance 13669962b56SMatthew Ahrens * between reads, writes, and scrubs. E.g., increasing 13769962b56SMatthew Ahrens * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete 13869962b56SMatthew Ahrens * more quickly, but reads and writes to have higher latency and lower 13969962b56SMatthew Ahrens * throughput. 140c55e05cbSMatthew Ahrens */ 14169962b56SMatthew Ahrens uint32_t zfs_vdev_sync_read_min_active = 10; 14269962b56SMatthew Ahrens uint32_t zfs_vdev_sync_read_max_active = 10; 14369962b56SMatthew Ahrens uint32_t zfs_vdev_sync_write_min_active = 10; 14469962b56SMatthew Ahrens uint32_t zfs_vdev_sync_write_max_active = 10; 14569962b56SMatthew Ahrens uint32_t zfs_vdev_async_read_min_active = 1; 14669962b56SMatthew Ahrens uint32_t zfs_vdev_async_read_max_active = 3; 14769962b56SMatthew Ahrens uint32_t zfs_vdev_async_write_min_active = 1; 14869962b56SMatthew Ahrens uint32_t zfs_vdev_async_write_max_active = 10; 14969962b56SMatthew Ahrens uint32_t zfs_vdev_scrub_min_active = 1; 15069962b56SMatthew Ahrens uint32_t zfs_vdev_scrub_max_active = 2; 1515cabbc6bSPrashanth Sreenivasa uint32_t zfs_vdev_removal_min_active = 1; 1525cabbc6bSPrashanth Sreenivasa uint32_t zfs_vdev_removal_max_active = 2; 153094e47e9SGeorge Wilson uint32_t zfs_vdev_initializing_min_active = 1; 154094e47e9SGeorge Wilson uint32_t zfs_vdev_initializing_max_active = 1; 155614409b5Sahrens 15669962b56SMatthew Ahrens /* 15769962b56SMatthew Ahrens * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent 15869962b56SMatthew Ahrens * dirty data, use zfs_vdev_async_write_min_active. When it has more than 15969962b56SMatthew Ahrens * zfs_vdev_async_write_active_max_dirty_percent, use 16069962b56SMatthew Ahrens * zfs_vdev_async_write_max_active. The value is linearly interpolated 16169962b56SMatthew Ahrens * between min and max. 16269962b56SMatthew Ahrens */ 16369962b56SMatthew Ahrens int zfs_vdev_async_write_active_min_dirty_percent = 30; 16469962b56SMatthew Ahrens int zfs_vdev_async_write_active_max_dirty_percent = 60; 165614409b5Sahrens 166614409b5Sahrens /* 167f94275ceSAdam Leventhal * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O. 168f94275ceSAdam Leventhal * For read I/Os, we also aggregate across small adjacency gaps; for writes 169f94275ceSAdam Leventhal * we include spans of optional I/Os to aid aggregation at the disk even when 170f94275ceSAdam Leventhal * they aren't able to help us aggregate at this level. 171614409b5Sahrens */ 172*a3874b8bSToomas Soome int zfs_vdev_aggregation_limit = 1 << 20; 1736f708f7cSJeff Bonwick int zfs_vdev_read_gap_limit = 32 << 10; 174f94275ceSAdam Leventhal int zfs_vdev_write_gap_limit = 4 << 10; 175614409b5Sahrens 1760f7643c7SGeorge Wilson /* 1770f7643c7SGeorge Wilson * Define the queue depth percentage for each top-level. This percentage is 1780f7643c7SGeorge Wilson * used in conjunction with zfs_vdev_async_max_active to determine how many 1790f7643c7SGeorge Wilson * allocations a specific top-level vdev should handle. Once the queue depth 1800f7643c7SGeorge Wilson * reaches zfs_vdev_queue_depth_pct * zfs_vdev_async_write_max_active / 100 1810f7643c7SGeorge Wilson * then allocator will stop allocating blocks on that top-level device. 1820f7643c7SGeorge Wilson * The default kernel setting is 1000% which will yield 100 allocations per 1830f7643c7SGeorge Wilson * device. For userland testing, the default setting is 300% which equates 1840f7643c7SGeorge Wilson * to 30 allocations per device. 1850f7643c7SGeorge Wilson */ 1860f7643c7SGeorge Wilson #ifdef _KERNEL 1870f7643c7SGeorge Wilson int zfs_vdev_queue_depth_pct = 1000; 1880f7643c7SGeorge Wilson #else 1890f7643c7SGeorge Wilson int zfs_vdev_queue_depth_pct = 300; 1900f7643c7SGeorge Wilson #endif 1910f7643c7SGeorge Wilson 192f78cdc34SPaul Dagnelie /* 193f78cdc34SPaul Dagnelie * When performing allocations for a given metaslab, we want to make sure that 194f78cdc34SPaul Dagnelie * there are enough IOs to aggregate together to improve throughput. We want to 195f78cdc34SPaul Dagnelie * ensure that there are at least 128k worth of IOs that can be aggregated, and 196f78cdc34SPaul Dagnelie * we assume that the average allocation size is 4k, so we need the queue depth 197f78cdc34SPaul Dagnelie * to be 32 per allocator to get good aggregation of sequential writes. 198f78cdc34SPaul Dagnelie */ 199f78cdc34SPaul Dagnelie int zfs_vdev_def_queue_depth = 32; 200f78cdc34SPaul Dagnelie 2010f7643c7SGeorge Wilson 202fa9e4066Sahrens int 20369962b56SMatthew Ahrens vdev_queue_offset_compare(const void *x1, const void *x2) 204fa9e4066Sahrens { 205c4ab0d3fSGvozden Neskovic const zio_t *z1 = (const zio_t *)x1; 206c4ab0d3fSGvozden Neskovic const zio_t *z2 = (const zio_t *)x2; 207fa9e4066Sahrens 208c4ab0d3fSGvozden Neskovic int cmp = AVL_CMP(z1->io_offset, z2->io_offset); 209fa9e4066Sahrens 210c4ab0d3fSGvozden Neskovic if (likely(cmp)) 211c4ab0d3fSGvozden Neskovic return (cmp); 212fa9e4066Sahrens 213c4ab0d3fSGvozden Neskovic return (AVL_PCMP(z1, z2)); 214fa9e4066Sahrens } 215fa9e4066Sahrens 216fe319232SJustin T. Gibbs static inline avl_tree_t * 217fe319232SJustin T. Gibbs vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p) 218fe319232SJustin T. Gibbs { 219fe319232SJustin T. Gibbs return (&vq->vq_class[p].vqc_queued_tree); 220fe319232SJustin T. Gibbs } 221fe319232SJustin T. Gibbs 222fe319232SJustin T. Gibbs static inline avl_tree_t * 223fe319232SJustin T. Gibbs vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t) 224fe319232SJustin T. Gibbs { 225fe319232SJustin T. Gibbs ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE); 226fe319232SJustin T. Gibbs if (t == ZIO_TYPE_READ) 227fe319232SJustin T. Gibbs return (&vq->vq_read_offset_tree); 228fe319232SJustin T. Gibbs else 229fe319232SJustin T. Gibbs return (&vq->vq_write_offset_tree); 230fe319232SJustin T. Gibbs } 231fe319232SJustin T. Gibbs 232fa9e4066Sahrens int 23369962b56SMatthew Ahrens vdev_queue_timestamp_compare(const void *x1, const void *x2) 234fa9e4066Sahrens { 235c4ab0d3fSGvozden Neskovic const zio_t *z1 = (const zio_t *)x1; 236c4ab0d3fSGvozden Neskovic const zio_t *z2 = (const zio_t *)x2; 237fa9e4066Sahrens 238c4ab0d3fSGvozden Neskovic int cmp = AVL_CMP(z1->io_timestamp, z2->io_timestamp); 239fa9e4066Sahrens 240c4ab0d3fSGvozden Neskovic if (likely(cmp)) 241c4ab0d3fSGvozden Neskovic return (cmp); 242fa9e4066Sahrens 243c4ab0d3fSGvozden Neskovic return (AVL_PCMP(z1, z2)); 244fa9e4066Sahrens } 245fa9e4066Sahrens 246fa9e4066Sahrens void 247fa9e4066Sahrens vdev_queue_init(vdev_t *vd) 248fa9e4066Sahrens { 249fa9e4066Sahrens vdev_queue_t *vq = &vd->vdev_queue; 250fa9e4066Sahrens 251fa9e4066Sahrens mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL); 25269962b56SMatthew Ahrens vq->vq_vdev = vd; 253fa9e4066Sahrens 25469962b56SMatthew Ahrens avl_create(&vq->vq_active_tree, vdev_queue_offset_compare, 25569962b56SMatthew Ahrens sizeof (zio_t), offsetof(struct zio, io_queue_node)); 256fe319232SJustin T. Gibbs avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ), 257fe319232SJustin T. Gibbs vdev_queue_offset_compare, sizeof (zio_t), 258fe319232SJustin T. Gibbs offsetof(struct zio, io_offset_node)); 259fe319232SJustin T. Gibbs avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE), 260fe319232SJustin T. Gibbs vdev_queue_offset_compare, sizeof (zio_t), 261fe319232SJustin T. Gibbs offsetof(struct zio, io_offset_node)); 262fa9e4066Sahrens 26369962b56SMatthew Ahrens for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 264fe319232SJustin T. Gibbs int (*compfn) (const void *, const void *); 265fe319232SJustin T. Gibbs 26669962b56SMatthew Ahrens /* 267fe319232SJustin T. Gibbs * The synchronous i/o queues are dispatched in FIFO rather 268fe319232SJustin T. Gibbs * than LBA order. This provides more consistent latency for 269fe319232SJustin T. Gibbs * these i/os. 27069962b56SMatthew Ahrens */ 271fe319232SJustin T. Gibbs if (p == ZIO_PRIORITY_SYNC_READ || p == ZIO_PRIORITY_SYNC_WRITE) 272fe319232SJustin T. Gibbs compfn = vdev_queue_timestamp_compare; 273fe319232SJustin T. Gibbs else 274fe319232SJustin T. Gibbs compfn = vdev_queue_offset_compare; 275fe319232SJustin T. Gibbs 276fe319232SJustin T. Gibbs avl_create(vdev_queue_class_tree(vq, p), compfn, 27769962b56SMatthew Ahrens sizeof (zio_t), offsetof(struct zio, io_queue_node)); 27869962b56SMatthew Ahrens } 279fa9e4066Sahrens } 280fa9e4066Sahrens 281fa9e4066Sahrens void 282fa9e4066Sahrens vdev_queue_fini(vdev_t *vd) 283fa9e4066Sahrens { 284fa9e4066Sahrens vdev_queue_t *vq = &vd->vdev_queue; 285fa9e4066Sahrens 28669962b56SMatthew Ahrens for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) 287fe319232SJustin T. Gibbs avl_destroy(vdev_queue_class_tree(vq, p)); 28869962b56SMatthew Ahrens avl_destroy(&vq->vq_active_tree); 289fe319232SJustin T. Gibbs avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ)); 290fe319232SJustin T. Gibbs avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE)); 291fa9e4066Sahrens 292fa9e4066Sahrens mutex_destroy(&vq->vq_lock); 293fa9e4066Sahrens } 294fa9e4066Sahrens 295ea8dc4b6Seschrock static void 296ea8dc4b6Seschrock vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) 297ea8dc4b6Seschrock { 298c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 2990f7643c7SGeorge Wilson 30069962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 301fe319232SJustin T. Gibbs avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); 302fe319232SJustin T. Gibbs avl_add(vdev_queue_type_tree(vq, zio->io_type), zio); 303c3a66015SMatthew Ahrens 30469962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 30569962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_queued++; 30669962b56SMatthew Ahrens if (spa->spa_iokstat != NULL) 307c3a66015SMatthew Ahrens kstat_waitq_enter(spa->spa_iokstat->ks_data); 30869962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 309ea8dc4b6Seschrock } 310ea8dc4b6Seschrock 311ea8dc4b6Seschrock static void 312ea8dc4b6Seschrock vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) 313ea8dc4b6Seschrock { 314c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 3150f7643c7SGeorge Wilson 31669962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 317fe319232SJustin T. Gibbs avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); 318fe319232SJustin T. Gibbs avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio); 319c3a66015SMatthew Ahrens 32069962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 32169962b56SMatthew Ahrens ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0); 32269962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_queued--; 32369962b56SMatthew Ahrens if (spa->spa_iokstat != NULL) 324c3a66015SMatthew Ahrens kstat_waitq_exit(spa->spa_iokstat->ks_data); 32569962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 326c3a66015SMatthew Ahrens } 327c3a66015SMatthew Ahrens 328c3a66015SMatthew Ahrens static void 329c3a66015SMatthew Ahrens vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio) 330c3a66015SMatthew Ahrens { 331c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 33269962b56SMatthew Ahrens ASSERT(MUTEX_HELD(&vq->vq_lock)); 33369962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 33469962b56SMatthew Ahrens vq->vq_class[zio->io_priority].vqc_active++; 33569962b56SMatthew Ahrens avl_add(&vq->vq_active_tree, zio); 33669962b56SMatthew Ahrens 33769962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 33869962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_active++; 33969962b56SMatthew Ahrens if (spa->spa_iokstat != NULL) 340c3a66015SMatthew Ahrens kstat_runq_enter(spa->spa_iokstat->ks_data); 34169962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 342c3a66015SMatthew Ahrens } 343c3a66015SMatthew Ahrens 344c3a66015SMatthew Ahrens static void 345c3a66015SMatthew Ahrens vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio) 346c3a66015SMatthew Ahrens { 347c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 34869962b56SMatthew Ahrens ASSERT(MUTEX_HELD(&vq->vq_lock)); 34969962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 35069962b56SMatthew Ahrens vq->vq_class[zio->io_priority].vqc_active--; 35169962b56SMatthew Ahrens avl_remove(&vq->vq_active_tree, zio); 35269962b56SMatthew Ahrens 35369962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 35469962b56SMatthew Ahrens ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0); 35569962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_active--; 356c3a66015SMatthew Ahrens if (spa->spa_iokstat != NULL) { 357c3a66015SMatthew Ahrens kstat_io_t *ksio = spa->spa_iokstat->ks_data; 358c3a66015SMatthew Ahrens 359c3a66015SMatthew Ahrens kstat_runq_exit(spa->spa_iokstat->ks_data); 360c3a66015SMatthew Ahrens if (zio->io_type == ZIO_TYPE_READ) { 361c3a66015SMatthew Ahrens ksio->reads++; 362c3a66015SMatthew Ahrens ksio->nread += zio->io_size; 363c3a66015SMatthew Ahrens } else if (zio->io_type == ZIO_TYPE_WRITE) { 364c3a66015SMatthew Ahrens ksio->writes++; 365c3a66015SMatthew Ahrens ksio->nwritten += zio->io_size; 366c3a66015SMatthew Ahrens } 367c3a66015SMatthew Ahrens } 36869962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 369ea8dc4b6Seschrock } 370ea8dc4b6Seschrock 371fa9e4066Sahrens static void 372fa9e4066Sahrens vdev_queue_agg_io_done(zio_t *aio) 373fa9e4066Sahrens { 37469962b56SMatthew Ahrens if (aio->io_type == ZIO_TYPE_READ) { 37569962b56SMatthew Ahrens zio_t *pio; 3760f7643c7SGeorge Wilson zio_link_t *zl = NULL; 3770f7643c7SGeorge Wilson while ((pio = zio_walk_parents(aio, &zl)) != NULL) { 378770499e1SDan Kimmel abd_copy_off(pio->io_abd, aio->io_abd, 379770499e1SDan Kimmel 0, pio->io_offset - aio->io_offset, pio->io_size); 38069962b56SMatthew Ahrens } 38169962b56SMatthew Ahrens } 382fa9e4066Sahrens 383770499e1SDan Kimmel abd_free(aio->io_abd); 384fa9e4066Sahrens } 385fa9e4066Sahrens 38669962b56SMatthew Ahrens static int 38769962b56SMatthew Ahrens vdev_queue_class_min_active(zio_priority_t p) 38869962b56SMatthew Ahrens { 38969962b56SMatthew Ahrens switch (p) { 39069962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_READ: 39169962b56SMatthew Ahrens return (zfs_vdev_sync_read_min_active); 39269962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_WRITE: 39369962b56SMatthew Ahrens return (zfs_vdev_sync_write_min_active); 39469962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_READ: 39569962b56SMatthew Ahrens return (zfs_vdev_async_read_min_active); 39669962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_WRITE: 39769962b56SMatthew Ahrens return (zfs_vdev_async_write_min_active); 39869962b56SMatthew Ahrens case ZIO_PRIORITY_SCRUB: 39969962b56SMatthew Ahrens return (zfs_vdev_scrub_min_active); 4005cabbc6bSPrashanth Sreenivasa case ZIO_PRIORITY_REMOVAL: 4015cabbc6bSPrashanth Sreenivasa return (zfs_vdev_removal_min_active); 402094e47e9SGeorge Wilson case ZIO_PRIORITY_INITIALIZING: 403094e47e9SGeorge Wilson return (zfs_vdev_initializing_min_active); 40469962b56SMatthew Ahrens default: 40569962b56SMatthew Ahrens panic("invalid priority %u", p); 40669962b56SMatthew Ahrens return (0); 40769962b56SMatthew Ahrens } 40869962b56SMatthew Ahrens } 40969962b56SMatthew Ahrens 41069962b56SMatthew Ahrens static int 41173527f44SAlex Reece vdev_queue_max_async_writes(spa_t *spa) 41269962b56SMatthew Ahrens { 41369962b56SMatthew Ahrens int writes; 41473527f44SAlex Reece uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total; 41569962b56SMatthew Ahrens uint64_t min_bytes = zfs_dirty_data_max * 41669962b56SMatthew Ahrens zfs_vdev_async_write_active_min_dirty_percent / 100; 41769962b56SMatthew Ahrens uint64_t max_bytes = zfs_dirty_data_max * 41869962b56SMatthew Ahrens zfs_vdev_async_write_active_max_dirty_percent / 100; 41969962b56SMatthew Ahrens 42073527f44SAlex Reece /* 42173527f44SAlex Reece * Sync tasks correspond to interactive user actions. To reduce the 42273527f44SAlex Reece * execution time of those actions we push data out as fast as possible. 42373527f44SAlex Reece */ 42473527f44SAlex Reece if (spa_has_pending_synctask(spa)) { 42573527f44SAlex Reece return (zfs_vdev_async_write_max_active); 42673527f44SAlex Reece } 42773527f44SAlex Reece 42869962b56SMatthew Ahrens if (dirty < min_bytes) 42969962b56SMatthew Ahrens return (zfs_vdev_async_write_min_active); 43069962b56SMatthew Ahrens if (dirty > max_bytes) 43169962b56SMatthew Ahrens return (zfs_vdev_async_write_max_active); 43269962b56SMatthew Ahrens 43369962b56SMatthew Ahrens /* 43469962b56SMatthew Ahrens * linear interpolation: 43569962b56SMatthew Ahrens * slope = (max_writes - min_writes) / (max_bytes - min_bytes) 43669962b56SMatthew Ahrens * move right by min_bytes 43769962b56SMatthew Ahrens * move up by min_writes 43869962b56SMatthew Ahrens */ 43969962b56SMatthew Ahrens writes = (dirty - min_bytes) * 44069962b56SMatthew Ahrens (zfs_vdev_async_write_max_active - 44169962b56SMatthew Ahrens zfs_vdev_async_write_min_active) / 44269962b56SMatthew Ahrens (max_bytes - min_bytes) + 44369962b56SMatthew Ahrens zfs_vdev_async_write_min_active; 44469962b56SMatthew Ahrens ASSERT3U(writes, >=, zfs_vdev_async_write_min_active); 44569962b56SMatthew Ahrens ASSERT3U(writes, <=, zfs_vdev_async_write_max_active); 44669962b56SMatthew Ahrens return (writes); 44769962b56SMatthew Ahrens } 44869962b56SMatthew Ahrens 44969962b56SMatthew Ahrens static int 45069962b56SMatthew Ahrens vdev_queue_class_max_active(spa_t *spa, zio_priority_t p) 45169962b56SMatthew Ahrens { 45269962b56SMatthew Ahrens switch (p) { 45369962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_READ: 45469962b56SMatthew Ahrens return (zfs_vdev_sync_read_max_active); 45569962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_WRITE: 45669962b56SMatthew Ahrens return (zfs_vdev_sync_write_max_active); 45769962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_READ: 45869962b56SMatthew Ahrens return (zfs_vdev_async_read_max_active); 45969962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_WRITE: 46073527f44SAlex Reece return (vdev_queue_max_async_writes(spa)); 46169962b56SMatthew Ahrens case ZIO_PRIORITY_SCRUB: 46269962b56SMatthew Ahrens return (zfs_vdev_scrub_max_active); 4635cabbc6bSPrashanth Sreenivasa case ZIO_PRIORITY_REMOVAL: 4645cabbc6bSPrashanth Sreenivasa return (zfs_vdev_removal_max_active); 465094e47e9SGeorge Wilson case ZIO_PRIORITY_INITIALIZING: 466094e47e9SGeorge Wilson return (zfs_vdev_initializing_max_active); 46769962b56SMatthew Ahrens default: 46869962b56SMatthew Ahrens panic("invalid priority %u", p); 46969962b56SMatthew Ahrens return (0); 47069962b56SMatthew Ahrens } 47169962b56SMatthew Ahrens } 47269962b56SMatthew Ahrens 47369962b56SMatthew Ahrens /* 47469962b56SMatthew Ahrens * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if 47569962b56SMatthew Ahrens * there is no eligible class. 47669962b56SMatthew Ahrens */ 47769962b56SMatthew Ahrens static zio_priority_t 47869962b56SMatthew Ahrens vdev_queue_class_to_issue(vdev_queue_t *vq) 47969962b56SMatthew Ahrens { 48069962b56SMatthew Ahrens spa_t *spa = vq->vq_vdev->vdev_spa; 48169962b56SMatthew Ahrens zio_priority_t p; 48269962b56SMatthew Ahrens 48369962b56SMatthew Ahrens if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active) 48469962b56SMatthew Ahrens return (ZIO_PRIORITY_NUM_QUEUEABLE); 48569962b56SMatthew Ahrens 48669962b56SMatthew Ahrens /* find a queue that has not reached its minimum # outstanding i/os */ 48769962b56SMatthew Ahrens for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 488fe319232SJustin T. Gibbs if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 && 48969962b56SMatthew Ahrens vq->vq_class[p].vqc_active < 49069962b56SMatthew Ahrens vdev_queue_class_min_active(p)) 49169962b56SMatthew Ahrens return (p); 49269962b56SMatthew Ahrens } 49369962b56SMatthew Ahrens 49469962b56SMatthew Ahrens /* 49569962b56SMatthew Ahrens * If we haven't found a queue, look for one that hasn't reached its 49669962b56SMatthew Ahrens * maximum # outstanding i/os. 49769962b56SMatthew Ahrens */ 49869962b56SMatthew Ahrens for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 499fe319232SJustin T. Gibbs if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 && 50069962b56SMatthew Ahrens vq->vq_class[p].vqc_active < 50169962b56SMatthew Ahrens vdev_queue_class_max_active(spa, p)) 50269962b56SMatthew Ahrens return (p); 50369962b56SMatthew Ahrens } 50469962b56SMatthew Ahrens 50569962b56SMatthew Ahrens /* No eligible queued i/os */ 50669962b56SMatthew Ahrens return (ZIO_PRIORITY_NUM_QUEUEABLE); 50769962b56SMatthew Ahrens } 50869962b56SMatthew Ahrens 5096f708f7cSJeff Bonwick /* 5106f708f7cSJeff Bonwick * Compute the range spanned by two i/os, which is the endpoint of the last 5116f708f7cSJeff Bonwick * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset). 5126f708f7cSJeff Bonwick * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio); 5136f708f7cSJeff Bonwick * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0. 5146f708f7cSJeff Bonwick */ 5156f708f7cSJeff Bonwick #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset) 5166f708f7cSJeff Bonwick #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio)) 517fa9e4066Sahrens 518fa9e4066Sahrens static zio_t * 51969962b56SMatthew Ahrens vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio) 520fa9e4066Sahrens { 52169962b56SMatthew Ahrens zio_t *first, *last, *aio, *dio, *mandatory, *nio; 522*a3874b8bSToomas Soome zio_link_t *zl = NULL; 52369962b56SMatthew Ahrens uint64_t maxgap = 0; 52469962b56SMatthew Ahrens uint64_t size; 52569962b56SMatthew Ahrens boolean_t stretch = B_FALSE; 526fe319232SJustin T. Gibbs avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type); 52769962b56SMatthew Ahrens enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT; 52869962b56SMatthew Ahrens 52969962b56SMatthew Ahrens if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE) 53069962b56SMatthew Ahrens return (NULL); 531fa9e4066Sahrens 53269962b56SMatthew Ahrens first = last = zio; 533fa9e4066Sahrens 53469962b56SMatthew Ahrens if (zio->io_type == ZIO_TYPE_READ) 53569962b56SMatthew Ahrens maxgap = zfs_vdev_read_gap_limit; 5368ad4d6ddSJeff Bonwick 53769962b56SMatthew Ahrens /* 53869962b56SMatthew Ahrens * We can aggregate I/Os that are sufficiently adjacent and of 53969962b56SMatthew Ahrens * the same flavor, as expressed by the AGG_INHERIT flags. 54069962b56SMatthew Ahrens * The latter requirement is necessary so that certain 54169962b56SMatthew Ahrens * attributes of the I/O, such as whether it's a normal I/O 54269962b56SMatthew Ahrens * or a scrub/resilver, can be preserved in the aggregate. 54369962b56SMatthew Ahrens * We can include optional I/Os, but don't allow them 54469962b56SMatthew Ahrens * to begin a range as they add no benefit in that situation. 54569962b56SMatthew Ahrens */ 546f94275ceSAdam Leventhal 54769962b56SMatthew Ahrens /* 54869962b56SMatthew Ahrens * We keep track of the last non-optional I/O. 54969962b56SMatthew Ahrens */ 55069962b56SMatthew Ahrens mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first; 551f94275ceSAdam Leventhal 55269962b56SMatthew Ahrens /* 55369962b56SMatthew Ahrens * Walk backwards through sufficiently contiguous I/Os 5545b062782SMatthew Ahrens * recording the last non-optional I/O. 55569962b56SMatthew Ahrens */ 55669962b56SMatthew Ahrens while ((dio = AVL_PREV(t, first)) != NULL && 55769962b56SMatthew Ahrens (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 55869962b56SMatthew Ahrens IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit && 5595cabbc6bSPrashanth Sreenivasa IO_GAP(dio, first) <= maxgap && 5605cabbc6bSPrashanth Sreenivasa dio->io_type == zio->io_type) { 56169962b56SMatthew Ahrens first = dio; 56269962b56SMatthew Ahrens if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL)) 56369962b56SMatthew Ahrens mandatory = first; 56469962b56SMatthew Ahrens } 565f94275ceSAdam Leventhal 56669962b56SMatthew Ahrens /* 56769962b56SMatthew Ahrens * Skip any initial optional I/Os. 56869962b56SMatthew Ahrens */ 56969962b56SMatthew Ahrens while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) { 57069962b56SMatthew Ahrens first = AVL_NEXT(t, first); 57169962b56SMatthew Ahrens ASSERT(first != NULL); 57269962b56SMatthew Ahrens } 5736f708f7cSJeff Bonwick 57469962b56SMatthew Ahrens /* 57569962b56SMatthew Ahrens * Walk forward through sufficiently contiguous I/Os. 5765b062782SMatthew Ahrens * The aggregation limit does not apply to optional i/os, so that 5775b062782SMatthew Ahrens * we can issue contiguous writes even if they are larger than the 5785b062782SMatthew Ahrens * aggregation limit. 57969962b56SMatthew Ahrens */ 58069962b56SMatthew Ahrens while ((dio = AVL_NEXT(t, last)) != NULL && 58169962b56SMatthew Ahrens (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 5825b062782SMatthew Ahrens (IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit || 5835b062782SMatthew Ahrens (dio->io_flags & ZIO_FLAG_OPTIONAL)) && 5845cabbc6bSPrashanth Sreenivasa IO_GAP(last, dio) <= maxgap && 5855cabbc6bSPrashanth Sreenivasa dio->io_type == zio->io_type) { 58669962b56SMatthew Ahrens last = dio; 58769962b56SMatthew Ahrens if (!(last->io_flags & ZIO_FLAG_OPTIONAL)) 58869962b56SMatthew Ahrens mandatory = last; 58969962b56SMatthew Ahrens } 590f94275ceSAdam Leventhal 59169962b56SMatthew Ahrens /* 59269962b56SMatthew Ahrens * Now that we've established the range of the I/O aggregation 59369962b56SMatthew Ahrens * we must decide what to do with trailing optional I/Os. 59469962b56SMatthew Ahrens * For reads, there's nothing to do. While we are unable to 59569962b56SMatthew Ahrens * aggregate further, it's possible that a trailing optional 59669962b56SMatthew Ahrens * I/O would allow the underlying device to aggregate with 59769962b56SMatthew Ahrens * subsequent I/Os. We must therefore determine if the next 59869962b56SMatthew Ahrens * non-optional I/O is close enough to make aggregation 59969962b56SMatthew Ahrens * worthwhile. 60069962b56SMatthew Ahrens */ 60169962b56SMatthew Ahrens if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) { 60269962b56SMatthew Ahrens zio_t *nio = last; 60369962b56SMatthew Ahrens while ((dio = AVL_NEXT(t, nio)) != NULL && 60469962b56SMatthew Ahrens IO_GAP(nio, dio) == 0 && 60569962b56SMatthew Ahrens IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) { 60669962b56SMatthew Ahrens nio = dio; 60769962b56SMatthew Ahrens if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) { 60869962b56SMatthew Ahrens stretch = B_TRUE; 60969962b56SMatthew Ahrens break; 610f94275ceSAdam Leventhal } 611f94275ceSAdam Leventhal } 61269962b56SMatthew Ahrens } 613f94275ceSAdam Leventhal 61469962b56SMatthew Ahrens if (stretch) { 6155b062782SMatthew Ahrens /* 6165b062782SMatthew Ahrens * We are going to include an optional io in our aggregated 6175b062782SMatthew Ahrens * span, thus closing the write gap. Only mandatory i/os can 6185b062782SMatthew Ahrens * start aggregated spans, so make sure that the next i/o 6195b062782SMatthew Ahrens * after our span is mandatory. 6205b062782SMatthew Ahrens */ 62169962b56SMatthew Ahrens dio = AVL_NEXT(t, last); 62269962b56SMatthew Ahrens dio->io_flags &= ~ZIO_FLAG_OPTIONAL; 62369962b56SMatthew Ahrens } else { 6245b062782SMatthew Ahrens /* do not include the optional i/o */ 62569962b56SMatthew Ahrens while (last != mandatory && last != first) { 62669962b56SMatthew Ahrens ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL); 62769962b56SMatthew Ahrens last = AVL_PREV(t, last); 62869962b56SMatthew Ahrens ASSERT(last != NULL); 629f94275ceSAdam Leventhal } 630fa9e4066Sahrens } 631fa9e4066Sahrens 63269962b56SMatthew Ahrens if (first == last) 63369962b56SMatthew Ahrens return (NULL); 63469962b56SMatthew Ahrens 63569962b56SMatthew Ahrens size = IO_SPAN(first, last); 6365b062782SMatthew Ahrens ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 63769962b56SMatthew Ahrens 63869962b56SMatthew Ahrens aio = zio_vdev_delegated_io(first->io_vd, first->io_offset, 639770499e1SDan Kimmel abd_alloc_for_io(size, B_TRUE), size, first->io_type, 640770499e1SDan Kimmel zio->io_priority, flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE, 64169962b56SMatthew Ahrens vdev_queue_agg_io_done, NULL); 64269962b56SMatthew Ahrens aio->io_timestamp = first->io_timestamp; 64369962b56SMatthew Ahrens 64469962b56SMatthew Ahrens nio = first; 64569962b56SMatthew Ahrens do { 64669962b56SMatthew Ahrens dio = nio; 64769962b56SMatthew Ahrens nio = AVL_NEXT(t, dio); 64869962b56SMatthew Ahrens ASSERT3U(dio->io_type, ==, aio->io_type); 64969962b56SMatthew Ahrens 65069962b56SMatthew Ahrens if (dio->io_flags & ZIO_FLAG_NODATA) { 65169962b56SMatthew Ahrens ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE); 652770499e1SDan Kimmel abd_zero_off(aio->io_abd, 653770499e1SDan Kimmel dio->io_offset - aio->io_offset, dio->io_size); 65469962b56SMatthew Ahrens } else if (dio->io_type == ZIO_TYPE_WRITE) { 655770499e1SDan Kimmel abd_copy_off(aio->io_abd, dio->io_abd, 656770499e1SDan Kimmel dio->io_offset - aio->io_offset, 0, dio->io_size); 65769962b56SMatthew Ahrens } 658a3f829aeSBill Moore 65969962b56SMatthew Ahrens zio_add_child(dio, aio); 66069962b56SMatthew Ahrens vdev_queue_io_remove(vq, dio); 661*a3874b8bSToomas Soome } while (dio != last); 662*a3874b8bSToomas Soome 663*a3874b8bSToomas Soome /* 664*a3874b8bSToomas Soome * We need to drop the vdev queue's lock to avoid a deadlock that we 665*a3874b8bSToomas Soome * could encounter since this I/O will complete immediately. 666*a3874b8bSToomas Soome */ 667*a3874b8bSToomas Soome mutex_exit(&vq->vq_lock); 668*a3874b8bSToomas Soome while ((dio = zio_walk_parents(aio, &zl)) != NULL) { 66969962b56SMatthew Ahrens zio_vdev_io_bypass(dio); 67069962b56SMatthew Ahrens zio_execute(dio); 671*a3874b8bSToomas Soome } 672*a3874b8bSToomas Soome mutex_enter(&vq->vq_lock); 67369962b56SMatthew Ahrens 67469962b56SMatthew Ahrens return (aio); 67569962b56SMatthew Ahrens } 67669962b56SMatthew Ahrens 67769962b56SMatthew Ahrens static zio_t * 67869962b56SMatthew Ahrens vdev_queue_io_to_issue(vdev_queue_t *vq) 67969962b56SMatthew Ahrens { 68069962b56SMatthew Ahrens zio_t *zio, *aio; 68169962b56SMatthew Ahrens zio_priority_t p; 68269962b56SMatthew Ahrens avl_index_t idx; 683fe319232SJustin T. Gibbs avl_tree_t *tree; 68469962b56SMatthew Ahrens zio_t search; 68569962b56SMatthew Ahrens 68669962b56SMatthew Ahrens again: 68769962b56SMatthew Ahrens ASSERT(MUTEX_HELD(&vq->vq_lock)); 688fa9e4066Sahrens 68969962b56SMatthew Ahrens p = vdev_queue_class_to_issue(vq); 690fa9e4066Sahrens 69169962b56SMatthew Ahrens if (p == ZIO_PRIORITY_NUM_QUEUEABLE) { 69269962b56SMatthew Ahrens /* No eligible queued i/os */ 69369962b56SMatthew Ahrens return (NULL); 694fa9e4066Sahrens } 695fa9e4066Sahrens 69669962b56SMatthew Ahrens /* 697094e47e9SGeorge Wilson * For LBA-ordered queues (async / scrub / initializing), issue the 698094e47e9SGeorge Wilson * i/o which follows the most recently issued i/o in LBA (offset) order. 69969962b56SMatthew Ahrens * 70069962b56SMatthew Ahrens * For FIFO queues (sync), issue the i/o with the lowest timestamp. 70169962b56SMatthew Ahrens */ 702fe319232SJustin T. Gibbs tree = vdev_queue_class_tree(vq, p); 70369962b56SMatthew Ahrens search.io_timestamp = 0; 70469962b56SMatthew Ahrens search.io_offset = vq->vq_last_offset + 1; 705fe319232SJustin T. Gibbs VERIFY3P(avl_find(tree, &search, &idx), ==, NULL); 706fe319232SJustin T. Gibbs zio = avl_nearest(tree, idx, AVL_AFTER); 70769962b56SMatthew Ahrens if (zio == NULL) 708fe319232SJustin T. Gibbs zio = avl_first(tree); 70969962b56SMatthew Ahrens ASSERT3U(zio->io_priority, ==, p); 71069962b56SMatthew Ahrens 71169962b56SMatthew Ahrens aio = vdev_queue_aggregate(vq, zio); 71269962b56SMatthew Ahrens if (aio != NULL) 71369962b56SMatthew Ahrens zio = aio; 71469962b56SMatthew Ahrens else 71569962b56SMatthew Ahrens vdev_queue_io_remove(vq, zio); 716fa9e4066Sahrens 717f94275ceSAdam Leventhal /* 718f94275ceSAdam Leventhal * If the I/O is or was optional and therefore has no data, we need to 719f94275ceSAdam Leventhal * simply discard it. We need to drop the vdev queue's lock to avoid a 720f94275ceSAdam Leventhal * deadlock that we could encounter since this I/O will complete 721f94275ceSAdam Leventhal * immediately. 722f94275ceSAdam Leventhal */ 72369962b56SMatthew Ahrens if (zio->io_flags & ZIO_FLAG_NODATA) { 724f94275ceSAdam Leventhal mutex_exit(&vq->vq_lock); 72569962b56SMatthew Ahrens zio_vdev_io_bypass(zio); 72669962b56SMatthew Ahrens zio_execute(zio); 727f94275ceSAdam Leventhal mutex_enter(&vq->vq_lock); 728f94275ceSAdam Leventhal goto again; 729f94275ceSAdam Leventhal } 730f94275ceSAdam Leventhal 73169962b56SMatthew Ahrens vdev_queue_pending_add(vq, zio); 73269962b56SMatthew Ahrens vq->vq_last_offset = zio->io_offset; 733fa9e4066Sahrens 73469962b56SMatthew Ahrens return (zio); 735fa9e4066Sahrens } 736fa9e4066Sahrens 737fa9e4066Sahrens zio_t * 738fa9e4066Sahrens vdev_queue_io(zio_t *zio) 739fa9e4066Sahrens { 740fa9e4066Sahrens vdev_queue_t *vq = &zio->io_vd->vdev_queue; 741fa9e4066Sahrens zio_t *nio; 742fa9e4066Sahrens 743fa9e4066Sahrens if (zio->io_flags & ZIO_FLAG_DONT_QUEUE) 744fa9e4066Sahrens return (zio); 745fa9e4066Sahrens 74669962b56SMatthew Ahrens /* 74769962b56SMatthew Ahrens * Children i/os inherent their parent's priority, which might 74869962b56SMatthew Ahrens * not match the child's i/o type. Fix it up here. 74969962b56SMatthew Ahrens */ 75069962b56SMatthew Ahrens if (zio->io_type == ZIO_TYPE_READ) { 75169962b56SMatthew Ahrens if (zio->io_priority != ZIO_PRIORITY_SYNC_READ && 75269962b56SMatthew Ahrens zio->io_priority != ZIO_PRIORITY_ASYNC_READ && 7535cabbc6bSPrashanth Sreenivasa zio->io_priority != ZIO_PRIORITY_SCRUB && 754094e47e9SGeorge Wilson zio->io_priority != ZIO_PRIORITY_REMOVAL && 755094e47e9SGeorge Wilson zio->io_priority != ZIO_PRIORITY_INITIALIZING) 75669962b56SMatthew Ahrens zio->io_priority = ZIO_PRIORITY_ASYNC_READ; 75769962b56SMatthew Ahrens } else { 75869962b56SMatthew Ahrens ASSERT(zio->io_type == ZIO_TYPE_WRITE); 75969962b56SMatthew Ahrens if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE && 7605cabbc6bSPrashanth Sreenivasa zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE && 761094e47e9SGeorge Wilson zio->io_priority != ZIO_PRIORITY_REMOVAL && 762094e47e9SGeorge Wilson zio->io_priority != ZIO_PRIORITY_INITIALIZING) 76369962b56SMatthew Ahrens zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE; 76469962b56SMatthew Ahrens } 765fa9e4066Sahrens 76669962b56SMatthew Ahrens zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE; 767fa9e4066Sahrens 768fa9e4066Sahrens mutex_enter(&vq->vq_lock); 769c55e05cbSMatthew Ahrens zio->io_timestamp = gethrtime(); 770ea8dc4b6Seschrock vdev_queue_io_add(vq, zio); 77169962b56SMatthew Ahrens nio = vdev_queue_io_to_issue(vq); 772fa9e4066Sahrens mutex_exit(&vq->vq_lock); 773fa9e4066Sahrens 774e05725b1Sbonwick if (nio == NULL) 775e05725b1Sbonwick return (NULL); 776e05725b1Sbonwick 777e05725b1Sbonwick if (nio->io_done == vdev_queue_agg_io_done) { 778e05725b1Sbonwick zio_nowait(nio); 779e05725b1Sbonwick return (NULL); 780e05725b1Sbonwick } 781fa9e4066Sahrens 782e05725b1Sbonwick return (nio); 783fa9e4066Sahrens } 784fa9e4066Sahrens 785fa9e4066Sahrens void 786fa9e4066Sahrens vdev_queue_io_done(zio_t *zio) 787fa9e4066Sahrens { 788fa9e4066Sahrens vdev_queue_t *vq = &zio->io_vd->vdev_queue; 78969962b56SMatthew Ahrens zio_t *nio; 790fa9e4066Sahrens 791fa9e4066Sahrens mutex_enter(&vq->vq_lock); 792fa9e4066Sahrens 793c3a66015SMatthew Ahrens vdev_queue_pending_remove(vq, zio); 794fa9e4066Sahrens 795c55e05cbSMatthew Ahrens vq->vq_io_complete_ts = gethrtime(); 796283b8460SGeorge.Wilson 79769962b56SMatthew Ahrens while ((nio = vdev_queue_io_to_issue(vq)) != NULL) { 798fa9e4066Sahrens mutex_exit(&vq->vq_lock); 799e05725b1Sbonwick if (nio->io_done == vdev_queue_agg_io_done) { 800e05725b1Sbonwick zio_nowait(nio); 801e05725b1Sbonwick } else { 802fa9e4066Sahrens zio_vdev_io_reissue(nio); 803e05725b1Sbonwick zio_execute(nio); 804e05725b1Sbonwick } 805fa9e4066Sahrens mutex_enter(&vq->vq_lock); 806fa9e4066Sahrens } 807fa9e4066Sahrens 808fa9e4066Sahrens mutex_exit(&vq->vq_lock); 809fa9e4066Sahrens } 810*a3874b8bSToomas Soome 811*a3874b8bSToomas Soome void 812*a3874b8bSToomas Soome vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority) 813*a3874b8bSToomas Soome { 814*a3874b8bSToomas Soome vdev_queue_t *vq = &zio->io_vd->vdev_queue; 815*a3874b8bSToomas Soome avl_tree_t *tree; 816*a3874b8bSToomas Soome 817*a3874b8bSToomas Soome /* 818*a3874b8bSToomas Soome * ZIO_PRIORITY_NOW is used by the vdev cache code and the aggregate zio 819*a3874b8bSToomas Soome * code to issue IOs without adding them to the vdev queue. In this 820*a3874b8bSToomas Soome * case, the zio is already going to be issued as quickly as possible 821*a3874b8bSToomas Soome * and so it doesn't need any reprioitization to help. 822*a3874b8bSToomas Soome */ 823*a3874b8bSToomas Soome if (zio->io_priority == ZIO_PRIORITY_NOW) 824*a3874b8bSToomas Soome return; 825*a3874b8bSToomas Soome 826*a3874b8bSToomas Soome ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 827*a3874b8bSToomas Soome ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 828*a3874b8bSToomas Soome 829*a3874b8bSToomas Soome if (zio->io_type == ZIO_TYPE_READ) { 830*a3874b8bSToomas Soome if (priority != ZIO_PRIORITY_SYNC_READ && 831*a3874b8bSToomas Soome priority != ZIO_PRIORITY_ASYNC_READ && 832*a3874b8bSToomas Soome priority != ZIO_PRIORITY_SCRUB) 833*a3874b8bSToomas Soome priority = ZIO_PRIORITY_ASYNC_READ; 834*a3874b8bSToomas Soome } else { 835*a3874b8bSToomas Soome ASSERT(zio->io_type == ZIO_TYPE_WRITE); 836*a3874b8bSToomas Soome if (priority != ZIO_PRIORITY_SYNC_WRITE && 837*a3874b8bSToomas Soome priority != ZIO_PRIORITY_ASYNC_WRITE) 838*a3874b8bSToomas Soome priority = ZIO_PRIORITY_ASYNC_WRITE; 839*a3874b8bSToomas Soome } 840*a3874b8bSToomas Soome 841*a3874b8bSToomas Soome mutex_enter(&vq->vq_lock); 842*a3874b8bSToomas Soome 843*a3874b8bSToomas Soome /* 844*a3874b8bSToomas Soome * If the zio is in none of the queues we can simply change 845*a3874b8bSToomas Soome * the priority. If the zio is waiting to be submitted we must 846*a3874b8bSToomas Soome * remove it from the queue and re-insert it with the new priority. 847*a3874b8bSToomas Soome * Otherwise, the zio is currently active and we cannot change its 848*a3874b8bSToomas Soome * priority. 849*a3874b8bSToomas Soome */ 850*a3874b8bSToomas Soome tree = vdev_queue_class_tree(vq, zio->io_priority); 851*a3874b8bSToomas Soome if (avl_find(tree, zio, NULL) == zio) { 852*a3874b8bSToomas Soome avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); 853*a3874b8bSToomas Soome zio->io_priority = priority; 854*a3874b8bSToomas Soome avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); 855*a3874b8bSToomas Soome } else if (avl_find(&vq->vq_active_tree, zio, NULL) != zio) { 856*a3874b8bSToomas Soome zio->io_priority = priority; 857*a3874b8bSToomas Soome } 858*a3874b8bSToomas Soome 859*a3874b8bSToomas Soome mutex_exit(&vq->vq_lock); 860*a3874b8bSToomas Soome } 861