1fa9e4066Sahrens /*
2fa9e4066Sahrens * CDDL HEADER START
3fa9e4066Sahrens *
4fa9e4066Sahrens * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock * You may not use this file except in compliance with the License.
7fa9e4066Sahrens *
8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens * See the License for the specific language governing permissions
11fa9e4066Sahrens * and limitations under the License.
12fa9e4066Sahrens *
13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens *
19fa9e4066Sahrens * CDDL HEADER END
20fa9e4066Sahrens */
21fa9e4066Sahrens /*
22a3f829aeSBill Moore * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23fa9e4066Sahrens * Use is subject to license terms.
24fa9e4066Sahrens */
25fa9e4066Sahrens
26283b8460SGeorge.Wilson /*
27f78cdc34SPaul Dagnelie * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
28c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com]
2944bf619dSJohn Levon * Copyright 2019 Joyent, Inc.
30283b8460SGeorge.Wilson */
31283b8460SGeorge.Wilson
32fa9e4066Sahrens #include <sys/zfs_context.h>
33fa9e4066Sahrens #include <sys/vdev_impl.h>
34c3a66015SMatthew Ahrens #include <sys/spa_impl.h>
35fa9e4066Sahrens #include <sys/zio.h>
36fa9e4066Sahrens #include <sys/avl.h>
3769962b56SMatthew Ahrens #include <sys/dsl_pool.h>
380f7643c7SGeorge Wilson #include <sys/metaslab_impl.h>
39770499e1SDan Kimmel #include <sys/abd.h>
40fa9e4066Sahrens
41614409b5Sahrens /*
4269962b56SMatthew Ahrens * ZFS I/O Scheduler
4369962b56SMatthew Ahrens * ---------------
4469962b56SMatthew Ahrens *
4569962b56SMatthew Ahrens * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios. The
4669962b56SMatthew Ahrens * I/O scheduler determines when and in what order those operations are
4769962b56SMatthew Ahrens * issued. The I/O scheduler divides operations into five I/O classes
4869962b56SMatthew Ahrens * prioritized in the following order: sync read, sync write, async read,
4969962b56SMatthew Ahrens * async write, and scrub/resilver. Each queue defines the minimum and
5069962b56SMatthew Ahrens * maximum number of concurrent operations that may be issued to the device.
5169962b56SMatthew Ahrens * In addition, the device has an aggregate maximum. Note that the sum of the
5269962b56SMatthew Ahrens * per-queue minimums must not exceed the aggregate maximum, and if the
5369962b56SMatthew Ahrens * aggregate maximum is equal to or greater than the sum of the per-queue
5469962b56SMatthew Ahrens * maximums, the per-queue minimum has no effect.
5569962b56SMatthew Ahrens *
5669962b56SMatthew Ahrens * For many physical devices, throughput increases with the number of
5769962b56SMatthew Ahrens * concurrent operations, but latency typically suffers. Further, physical
5869962b56SMatthew Ahrens * devices typically have a limit at which more concurrent operations have no
5969962b56SMatthew Ahrens * effect on throughput or can actually cause it to decrease.
6069962b56SMatthew Ahrens *
6169962b56SMatthew Ahrens * The scheduler selects the next operation to issue by first looking for an
6269962b56SMatthew Ahrens * I/O class whose minimum has not been satisfied. Once all are satisfied and
6369962b56SMatthew Ahrens * the aggregate maximum has not been hit, the scheduler looks for classes
6469962b56SMatthew Ahrens * whose maximum has not been satisfied. Iteration through the I/O classes is
6569962b56SMatthew Ahrens * done in the order specified above. No further operations are issued if the
6669962b56SMatthew Ahrens * aggregate maximum number of concurrent operations has been hit or if there
6769962b56SMatthew Ahrens * are no operations queued for an I/O class that has not hit its maximum.
6869962b56SMatthew Ahrens * Every time an i/o is queued or an operation completes, the I/O scheduler
6969962b56SMatthew Ahrens * looks for new operations to issue.
7069962b56SMatthew Ahrens *
7169962b56SMatthew Ahrens * All I/O classes have a fixed maximum number of outstanding operations
7269962b56SMatthew Ahrens * except for the async write class. Asynchronous writes represent the data
7369962b56SMatthew Ahrens * that is committed to stable storage during the syncing stage for
7469962b56SMatthew Ahrens * transaction groups (see txg.c). Transaction groups enter the syncing state
7569962b56SMatthew Ahrens * periodically so the number of queued async writes will quickly burst up and
7669962b56SMatthew Ahrens * then bleed down to zero. Rather than servicing them as quickly as possible,
7769962b56SMatthew Ahrens * the I/O scheduler changes the maximum number of active async write i/os
7869962b56SMatthew Ahrens * according to the amount of dirty data in the pool (see dsl_pool.c). Since
7969962b56SMatthew Ahrens * both throughput and latency typically increase with the number of
8069962b56SMatthew Ahrens * concurrent operations issued to physical devices, reducing the burstiness
8169962b56SMatthew Ahrens * in the number of concurrent operations also stabilizes the response time of
8269962b56SMatthew Ahrens * operations from other -- and in particular synchronous -- queues. In broad
8369962b56SMatthew Ahrens * strokes, the I/O scheduler will issue more concurrent operations from the
8469962b56SMatthew Ahrens * async write queue as there's more dirty data in the pool.
8569962b56SMatthew Ahrens *
8669962b56SMatthew Ahrens * Async Writes
8769962b56SMatthew Ahrens *
8869962b56SMatthew Ahrens * The number of concurrent operations issued for the async write I/O class
8969962b56SMatthew Ahrens * follows a piece-wise linear function defined by a few adjustable points.
9069962b56SMatthew Ahrens *
9169962b56SMatthew Ahrens * | o---------| <-- zfs_vdev_async_write_max_active
9269962b56SMatthew Ahrens * ^ | /^ |
9369962b56SMatthew Ahrens * | | / | |
9469962b56SMatthew Ahrens * active | / | |
9569962b56SMatthew Ahrens * I/O | / | |
9669962b56SMatthew Ahrens * count | / | |
9769962b56SMatthew Ahrens * | / | |
9869962b56SMatthew Ahrens * |------------o | | <-- zfs_vdev_async_write_min_active
9969962b56SMatthew Ahrens * 0|____________^______|_________|
10069962b56SMatthew Ahrens * 0% | | 100% of zfs_dirty_data_max
10169962b56SMatthew Ahrens * | |
10269962b56SMatthew Ahrens * | `-- zfs_vdev_async_write_active_max_dirty_percent
10369962b56SMatthew Ahrens * `--------- zfs_vdev_async_write_active_min_dirty_percent
10469962b56SMatthew Ahrens *
10569962b56SMatthew Ahrens * Until the amount of dirty data exceeds a minimum percentage of the dirty
10669962b56SMatthew Ahrens * data allowed in the pool, the I/O scheduler will limit the number of
10769962b56SMatthew Ahrens * concurrent operations to the minimum. As that threshold is crossed, the
10869962b56SMatthew Ahrens * number of concurrent operations issued increases linearly to the maximum at
10969962b56SMatthew Ahrens * the specified maximum percentage of the dirty data allowed in the pool.
11069962b56SMatthew Ahrens *
11169962b56SMatthew Ahrens * Ideally, the amount of dirty data on a busy pool will stay in the sloped
11269962b56SMatthew Ahrens * part of the function between zfs_vdev_async_write_active_min_dirty_percent
11369962b56SMatthew Ahrens * and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the
11469962b56SMatthew Ahrens * maximum percentage, this indicates that the rate of incoming data is
11569962b56SMatthew Ahrens * greater than the rate that the backend storage can handle. In this case, we
11669962b56SMatthew Ahrens * must further throttle incoming writes (see dmu_tx_delay() for details).
117614409b5Sahrens */
118f7170741SWill Andrews
119614409b5Sahrens /*
12069962b56SMatthew Ahrens * The maximum number of i/os active to each device. Ideally, this will be >=
12169962b56SMatthew Ahrens * the sum of each queue's max_active. It must be at least the sum of each
12269962b56SMatthew Ahrens * queue's min_active.
123614409b5Sahrens */
12469962b56SMatthew Ahrens uint32_t zfs_vdev_max_active = 1000;
125614409b5Sahrens
126c55e05cbSMatthew Ahrens /*
12769962b56SMatthew Ahrens * Per-queue limits on the number of i/os active to each device. If the
12869962b56SMatthew Ahrens * sum of the queue's max_active is < zfs_vdev_max_active, then the
12969962b56SMatthew Ahrens * min_active comes into play. We will send min_active from each queue,
13069962b56SMatthew Ahrens * and then select from queues in the order defined by zio_priority_t.
13169962b56SMatthew Ahrens *
13269962b56SMatthew Ahrens * In general, smaller max_active's will lead to lower latency of synchronous
13369962b56SMatthew Ahrens * operations. Larger max_active's may lead to higher overall throughput,
13469962b56SMatthew Ahrens * depending on underlying storage.
13569962b56SMatthew Ahrens *
13669962b56SMatthew Ahrens * The ratio of the queues' max_actives determines the balance of performance
13769962b56SMatthew Ahrens * between reads, writes, and scrubs. E.g., increasing
13869962b56SMatthew Ahrens * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete
13969962b56SMatthew Ahrens * more quickly, but reads and writes to have higher latency and lower
14069962b56SMatthew Ahrens * throughput.
141c55e05cbSMatthew Ahrens */
14269962b56SMatthew Ahrens uint32_t zfs_vdev_sync_read_min_active = 10;
14369962b56SMatthew Ahrens uint32_t zfs_vdev_sync_read_max_active = 10;
14469962b56SMatthew Ahrens uint32_t zfs_vdev_sync_write_min_active = 10;
14569962b56SMatthew Ahrens uint32_t zfs_vdev_sync_write_max_active = 10;
14669962b56SMatthew Ahrens uint32_t zfs_vdev_async_read_min_active = 1;
14769962b56SMatthew Ahrens uint32_t zfs_vdev_async_read_max_active = 3;
14869962b56SMatthew Ahrens uint32_t zfs_vdev_async_write_min_active = 1;
14969962b56SMatthew Ahrens uint32_t zfs_vdev_async_write_max_active = 10;
15069962b56SMatthew Ahrens uint32_t zfs_vdev_scrub_min_active = 1;
15169962b56SMatthew Ahrens uint32_t zfs_vdev_scrub_max_active = 2;
1525cabbc6bSPrashanth Sreenivasa uint32_t zfs_vdev_removal_min_active = 1;
1535cabbc6bSPrashanth Sreenivasa uint32_t zfs_vdev_removal_max_active = 2;
154094e47e9SGeorge Wilson uint32_t zfs_vdev_initializing_min_active = 1;
155094e47e9SGeorge Wilson uint32_t zfs_vdev_initializing_max_active = 1;
156084fd14fSBrian Behlendorf uint32_t zfs_vdev_trim_min_active = 1;
157084fd14fSBrian Behlendorf uint32_t zfs_vdev_trim_max_active = 2;
158614409b5Sahrens
15969962b56SMatthew Ahrens /*
16069962b56SMatthew Ahrens * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
16169962b56SMatthew Ahrens * dirty data, use zfs_vdev_async_write_min_active. When it has more than
16269962b56SMatthew Ahrens * zfs_vdev_async_write_active_max_dirty_percent, use
16369962b56SMatthew Ahrens * zfs_vdev_async_write_max_active. The value is linearly interpolated
16469962b56SMatthew Ahrens * between min and max.
16569962b56SMatthew Ahrens */
16669962b56SMatthew Ahrens int zfs_vdev_async_write_active_min_dirty_percent = 30;
16769962b56SMatthew Ahrens int zfs_vdev_async_write_active_max_dirty_percent = 60;
168614409b5Sahrens
169614409b5Sahrens /*
170f94275ceSAdam Leventhal * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
171f94275ceSAdam Leventhal * For read I/Os, we also aggregate across small adjacency gaps; for writes
172f94275ceSAdam Leventhal * we include spans of optional I/Os to aid aggregation at the disk even when
173f94275ceSAdam Leventhal * they aren't able to help us aggregate at this level.
174614409b5Sahrens */
175a3874b8bSToomas Soome int zfs_vdev_aggregation_limit = 1 << 20;
1766f708f7cSJeff Bonwick int zfs_vdev_read_gap_limit = 32 << 10;
177f94275ceSAdam Leventhal int zfs_vdev_write_gap_limit = 4 << 10;
178614409b5Sahrens
1790f7643c7SGeorge Wilson /*
1800f7643c7SGeorge Wilson * Define the queue depth percentage for each top-level. This percentage is
1810f7643c7SGeorge Wilson * used in conjunction with zfs_vdev_async_max_active to determine how many
1820f7643c7SGeorge Wilson * allocations a specific top-level vdev should handle. Once the queue depth
1830f7643c7SGeorge Wilson * reaches zfs_vdev_queue_depth_pct * zfs_vdev_async_write_max_active / 100
1840f7643c7SGeorge Wilson * then allocator will stop allocating blocks on that top-level device.
1850f7643c7SGeorge Wilson * The default kernel setting is 1000% which will yield 100 allocations per
1860f7643c7SGeorge Wilson * device. For userland testing, the default setting is 300% which equates
1870f7643c7SGeorge Wilson * to 30 allocations per device.
1880f7643c7SGeorge Wilson */
1890f7643c7SGeorge Wilson #ifdef _KERNEL
1900f7643c7SGeorge Wilson int zfs_vdev_queue_depth_pct = 1000;
1910f7643c7SGeorge Wilson #else
1920f7643c7SGeorge Wilson int zfs_vdev_queue_depth_pct = 300;
1930f7643c7SGeorge Wilson #endif
1940f7643c7SGeorge Wilson
195f78cdc34SPaul Dagnelie /*
196f78cdc34SPaul Dagnelie * When performing allocations for a given metaslab, we want to make sure that
197f78cdc34SPaul Dagnelie * there are enough IOs to aggregate together to improve throughput. We want to
198f78cdc34SPaul Dagnelie * ensure that there are at least 128k worth of IOs that can be aggregated, and
199f78cdc34SPaul Dagnelie * we assume that the average allocation size is 4k, so we need the queue depth
200f78cdc34SPaul Dagnelie * to be 32 per allocator to get good aggregation of sequential writes.
201f78cdc34SPaul Dagnelie */
202f78cdc34SPaul Dagnelie int zfs_vdev_def_queue_depth = 32;
203f78cdc34SPaul Dagnelie
204084fd14fSBrian Behlendorf /*
205084fd14fSBrian Behlendorf * Allow TRIM I/Os to be aggregated. This should normally not be needed since
206084fd14fSBrian Behlendorf * TRIM I/O for extents up to zfs_trim_extent_bytes_max (128M) can be submitted
207084fd14fSBrian Behlendorf * by the TRIM code in zfs_trim.c.
208084fd14fSBrian Behlendorf */
209084fd14fSBrian Behlendorf int zfs_vdev_aggregate_trim = 0;
2100f7643c7SGeorge Wilson
211fa9e4066Sahrens int
vdev_queue_offset_compare(const void * x1,const void * x2)21269962b56SMatthew Ahrens vdev_queue_offset_compare(const void *x1, const void *x2)
213fa9e4066Sahrens {
214c4ab0d3fSGvozden Neskovic const zio_t *z1 = (const zio_t *)x1;
215c4ab0d3fSGvozden Neskovic const zio_t *z2 = (const zio_t *)x2;
216fa9e4066Sahrens
2174d7988d6SPaul Dagnelie int cmp = TREE_CMP(z1->io_offset, z2->io_offset);
218fa9e4066Sahrens
219c4ab0d3fSGvozden Neskovic if (likely(cmp))
220c4ab0d3fSGvozden Neskovic return (cmp);
221fa9e4066Sahrens
2224d7988d6SPaul Dagnelie return (TREE_PCMP(z1, z2));
223fa9e4066Sahrens }
224fa9e4066Sahrens
225fe319232SJustin T. Gibbs static inline avl_tree_t *
vdev_queue_class_tree(vdev_queue_t * vq,zio_priority_t p)226fe319232SJustin T. Gibbs vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p)
227fe319232SJustin T. Gibbs {
228fe319232SJustin T. Gibbs return (&vq->vq_class[p].vqc_queued_tree);
229fe319232SJustin T. Gibbs }
230fe319232SJustin T. Gibbs
231fe319232SJustin T. Gibbs static inline avl_tree_t *
vdev_queue_type_tree(vdev_queue_t * vq,zio_type_t t)232fe319232SJustin T. Gibbs vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t)
233fe319232SJustin T. Gibbs {
234084fd14fSBrian Behlendorf ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE || t == ZIO_TYPE_TRIM);
235fe319232SJustin T. Gibbs if (t == ZIO_TYPE_READ)
236fe319232SJustin T. Gibbs return (&vq->vq_read_offset_tree);
237084fd14fSBrian Behlendorf else if (t == ZIO_TYPE_WRITE)
238fe319232SJustin T. Gibbs return (&vq->vq_write_offset_tree);
239084fd14fSBrian Behlendorf else
240084fd14fSBrian Behlendorf return (&vq->vq_trim_offset_tree);
241fe319232SJustin T. Gibbs }
242fe319232SJustin T. Gibbs
243fa9e4066Sahrens int
vdev_queue_timestamp_compare(const void * x1,const void * x2)24469962b56SMatthew Ahrens vdev_queue_timestamp_compare(const void *x1, const void *x2)
245fa9e4066Sahrens {
246c4ab0d3fSGvozden Neskovic const zio_t *z1 = (const zio_t *)x1;
247c4ab0d3fSGvozden Neskovic const zio_t *z2 = (const zio_t *)x2;
248fa9e4066Sahrens
2494d7988d6SPaul Dagnelie int cmp = TREE_CMP(z1->io_timestamp, z2->io_timestamp);
250fa9e4066Sahrens
251c4ab0d3fSGvozden Neskovic if (likely(cmp))
252c4ab0d3fSGvozden Neskovic return (cmp);
253fa9e4066Sahrens
2544d7988d6SPaul Dagnelie return (TREE_PCMP(z1, z2));
255fa9e4066Sahrens }
256fa9e4066Sahrens
257fa9e4066Sahrens void
vdev_queue_init(vdev_t * vd)258fa9e4066Sahrens vdev_queue_init(vdev_t *vd)
259fa9e4066Sahrens {
260fa9e4066Sahrens vdev_queue_t *vq = &vd->vdev_queue;
261fa9e4066Sahrens
262fa9e4066Sahrens mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
26369962b56SMatthew Ahrens vq->vq_vdev = vd;
264fa9e4066Sahrens
26569962b56SMatthew Ahrens avl_create(&vq->vq_active_tree, vdev_queue_offset_compare,
26669962b56SMatthew Ahrens sizeof (zio_t), offsetof(struct zio, io_queue_node));
267fe319232SJustin T. Gibbs avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ),
268fe319232SJustin T. Gibbs vdev_queue_offset_compare, sizeof (zio_t),
269fe319232SJustin T. Gibbs offsetof(struct zio, io_offset_node));
270fe319232SJustin T. Gibbs avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE),
271fe319232SJustin T. Gibbs vdev_queue_offset_compare, sizeof (zio_t),
272fe319232SJustin T. Gibbs offsetof(struct zio, io_offset_node));
273084fd14fSBrian Behlendorf avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_TRIM),
274084fd14fSBrian Behlendorf vdev_queue_offset_compare, sizeof (zio_t),
275084fd14fSBrian Behlendorf offsetof(struct zio, io_offset_node));
276fa9e4066Sahrens
27769962b56SMatthew Ahrens for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
278fe319232SJustin T. Gibbs int (*compfn) (const void *, const void *);
279fe319232SJustin T. Gibbs
28069962b56SMatthew Ahrens /*
281084fd14fSBrian Behlendorf * The synchronous/trim i/o queues are dispatched in FIFO rather
282084fd14fSBrian Behlendorf * than LBA order. This provides more consistent latency for
283fe319232SJustin T. Gibbs * these i/os.
28469962b56SMatthew Ahrens */
285084fd14fSBrian Behlendorf if (p == ZIO_PRIORITY_SYNC_READ ||
286084fd14fSBrian Behlendorf p == ZIO_PRIORITY_SYNC_WRITE ||
287084fd14fSBrian Behlendorf p == ZIO_PRIORITY_TRIM) {
288fe319232SJustin T. Gibbs compfn = vdev_queue_timestamp_compare;
289084fd14fSBrian Behlendorf } else {
290fe319232SJustin T. Gibbs compfn = vdev_queue_offset_compare;
291084fd14fSBrian Behlendorf }
292fe319232SJustin T. Gibbs
293fe319232SJustin T. Gibbs avl_create(vdev_queue_class_tree(vq, p), compfn,
29469962b56SMatthew Ahrens sizeof (zio_t), offsetof(struct zio, io_queue_node));
29569962b56SMatthew Ahrens }
29612a8814cSTom Caputi
29712a8814cSTom Caputi vq->vq_last_offset = 0;
298fa9e4066Sahrens }
299fa9e4066Sahrens
300fa9e4066Sahrens void
vdev_queue_fini(vdev_t * vd)301fa9e4066Sahrens vdev_queue_fini(vdev_t *vd)
302fa9e4066Sahrens {
303fa9e4066Sahrens vdev_queue_t *vq = &vd->vdev_queue;
304fa9e4066Sahrens
30569962b56SMatthew Ahrens for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++)
306fe319232SJustin T. Gibbs avl_destroy(vdev_queue_class_tree(vq, p));
30769962b56SMatthew Ahrens avl_destroy(&vq->vq_active_tree);
308fe319232SJustin T. Gibbs avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ));
309fe319232SJustin T. Gibbs avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE));
310084fd14fSBrian Behlendorf avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_TRIM));
311fa9e4066Sahrens
312fa9e4066Sahrens mutex_destroy(&vq->vq_lock);
313fa9e4066Sahrens }
314fa9e4066Sahrens
315ea8dc4b6Seschrock static void
vdev_queue_io_add(vdev_queue_t * vq,zio_t * zio)316ea8dc4b6Seschrock vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
317ea8dc4b6Seschrock {
318c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa;
3190f7643c7SGeorge Wilson
32069962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
321fe319232SJustin T. Gibbs avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
322fe319232SJustin T. Gibbs avl_add(vdev_queue_type_tree(vq, zio->io_type), zio);
323c3a66015SMatthew Ahrens
32469962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock);
32569962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_queued++;
32669962b56SMatthew Ahrens if (spa->spa_iokstat != NULL)
327c3a66015SMatthew Ahrens kstat_waitq_enter(spa->spa_iokstat->ks_data);
32869962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock);
329ea8dc4b6Seschrock }
330ea8dc4b6Seschrock
331ea8dc4b6Seschrock static void
vdev_queue_io_remove(vdev_queue_t * vq,zio_t * zio)332ea8dc4b6Seschrock vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
333ea8dc4b6Seschrock {
334c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa;
3350f7643c7SGeorge Wilson
33669962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
337fe319232SJustin T. Gibbs avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
338fe319232SJustin T. Gibbs avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio);
339c3a66015SMatthew Ahrens
34069962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock);
34169962b56SMatthew Ahrens ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0);
34269962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_queued--;
34369962b56SMatthew Ahrens if (spa->spa_iokstat != NULL)
344c3a66015SMatthew Ahrens kstat_waitq_exit(spa->spa_iokstat->ks_data);
34569962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock);
346c3a66015SMatthew Ahrens }
347c3a66015SMatthew Ahrens
348c3a66015SMatthew Ahrens static void
vdev_queue_pending_add(vdev_queue_t * vq,zio_t * zio)349c3a66015SMatthew Ahrens vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
350c3a66015SMatthew Ahrens {
351c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa;
35269962b56SMatthew Ahrens ASSERT(MUTEX_HELD(&vq->vq_lock));
35369962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
35469962b56SMatthew Ahrens vq->vq_class[zio->io_priority].vqc_active++;
35569962b56SMatthew Ahrens avl_add(&vq->vq_active_tree, zio);
35669962b56SMatthew Ahrens
35769962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock);
35869962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_active++;
35969962b56SMatthew Ahrens if (spa->spa_iokstat != NULL)
360c3a66015SMatthew Ahrens kstat_runq_enter(spa->spa_iokstat->ks_data);
36169962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock);
362c3a66015SMatthew Ahrens }
363c3a66015SMatthew Ahrens
364c3a66015SMatthew Ahrens static void
vdev_queue_pending_remove(vdev_queue_t * vq,zio_t * zio)365c3a66015SMatthew Ahrens vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
366c3a66015SMatthew Ahrens {
367c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa;
36869962b56SMatthew Ahrens ASSERT(MUTEX_HELD(&vq->vq_lock));
36969962b56SMatthew Ahrens ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
37069962b56SMatthew Ahrens vq->vq_class[zio->io_priority].vqc_active--;
37169962b56SMatthew Ahrens avl_remove(&vq->vq_active_tree, zio);
37269962b56SMatthew Ahrens
37369962b56SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock);
37469962b56SMatthew Ahrens ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0);
37569962b56SMatthew Ahrens spa->spa_queue_stats[zio->io_priority].spa_active--;
376c3a66015SMatthew Ahrens if (spa->spa_iokstat != NULL) {
377c3a66015SMatthew Ahrens kstat_io_t *ksio = spa->spa_iokstat->ks_data;
378c3a66015SMatthew Ahrens
379c3a66015SMatthew Ahrens kstat_runq_exit(spa->spa_iokstat->ks_data);
380c3a66015SMatthew Ahrens if (zio->io_type == ZIO_TYPE_READ) {
381c3a66015SMatthew Ahrens ksio->reads++;
382c3a66015SMatthew Ahrens ksio->nread += zio->io_size;
383c3a66015SMatthew Ahrens } else if (zio->io_type == ZIO_TYPE_WRITE) {
384c3a66015SMatthew Ahrens ksio->writes++;
385c3a66015SMatthew Ahrens ksio->nwritten += zio->io_size;
386c3a66015SMatthew Ahrens }
387c3a66015SMatthew Ahrens }
38869962b56SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock);
389ea8dc4b6Seschrock }
390ea8dc4b6Seschrock
391fa9e4066Sahrens static void
vdev_queue_agg_io_done(zio_t * aio)392fa9e4066Sahrens vdev_queue_agg_io_done(zio_t *aio)
393fa9e4066Sahrens {
39469962b56SMatthew Ahrens if (aio->io_type == ZIO_TYPE_READ) {
39569962b56SMatthew Ahrens zio_t *pio;
3960f7643c7SGeorge Wilson zio_link_t *zl = NULL;
3970f7643c7SGeorge Wilson while ((pio = zio_walk_parents(aio, &zl)) != NULL) {
398770499e1SDan Kimmel abd_copy_off(pio->io_abd, aio->io_abd,
399770499e1SDan Kimmel 0, pio->io_offset - aio->io_offset, pio->io_size);
40069962b56SMatthew Ahrens }
40169962b56SMatthew Ahrens }
402fa9e4066Sahrens
403770499e1SDan Kimmel abd_free(aio->io_abd);
404fa9e4066Sahrens }
405fa9e4066Sahrens
40669962b56SMatthew Ahrens static int
vdev_queue_class_min_active(zio_priority_t p)40769962b56SMatthew Ahrens vdev_queue_class_min_active(zio_priority_t p)
40869962b56SMatthew Ahrens {
40969962b56SMatthew Ahrens switch (p) {
41069962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_READ:
41169962b56SMatthew Ahrens return (zfs_vdev_sync_read_min_active);
41269962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_WRITE:
41369962b56SMatthew Ahrens return (zfs_vdev_sync_write_min_active);
41469962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_READ:
41569962b56SMatthew Ahrens return (zfs_vdev_async_read_min_active);
41669962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_WRITE:
41769962b56SMatthew Ahrens return (zfs_vdev_async_write_min_active);
41869962b56SMatthew Ahrens case ZIO_PRIORITY_SCRUB:
41969962b56SMatthew Ahrens return (zfs_vdev_scrub_min_active);
4205cabbc6bSPrashanth Sreenivasa case ZIO_PRIORITY_REMOVAL:
4215cabbc6bSPrashanth Sreenivasa return (zfs_vdev_removal_min_active);
422094e47e9SGeorge Wilson case ZIO_PRIORITY_INITIALIZING:
423094e47e9SGeorge Wilson return (zfs_vdev_initializing_min_active);
424084fd14fSBrian Behlendorf case ZIO_PRIORITY_TRIM:
425084fd14fSBrian Behlendorf return (zfs_vdev_trim_min_active);
42669962b56SMatthew Ahrens default:
42769962b56SMatthew Ahrens panic("invalid priority %u", p);
42869962b56SMatthew Ahrens }
42969962b56SMatthew Ahrens }
43069962b56SMatthew Ahrens
43169962b56SMatthew Ahrens static int
vdev_queue_max_async_writes(spa_t * spa)43273527f44SAlex Reece vdev_queue_max_async_writes(spa_t *spa)
43369962b56SMatthew Ahrens {
43469962b56SMatthew Ahrens int writes;
43573527f44SAlex Reece uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total;
43669962b56SMatthew Ahrens uint64_t min_bytes = zfs_dirty_data_max *
43769962b56SMatthew Ahrens zfs_vdev_async_write_active_min_dirty_percent / 100;
43869962b56SMatthew Ahrens uint64_t max_bytes = zfs_dirty_data_max *
43969962b56SMatthew Ahrens zfs_vdev_async_write_active_max_dirty_percent / 100;
44069962b56SMatthew Ahrens
44173527f44SAlex Reece /*
44273527f44SAlex Reece * Sync tasks correspond to interactive user actions. To reduce the
44373527f44SAlex Reece * execution time of those actions we push data out as fast as possible.
44473527f44SAlex Reece */
44573527f44SAlex Reece if (spa_has_pending_synctask(spa)) {
44673527f44SAlex Reece return (zfs_vdev_async_write_max_active);
44773527f44SAlex Reece }
44873527f44SAlex Reece
44969962b56SMatthew Ahrens if (dirty < min_bytes)
45069962b56SMatthew Ahrens return (zfs_vdev_async_write_min_active);
45169962b56SMatthew Ahrens if (dirty > max_bytes)
45269962b56SMatthew Ahrens return (zfs_vdev_async_write_max_active);
45369962b56SMatthew Ahrens
45469962b56SMatthew Ahrens /*
45569962b56SMatthew Ahrens * linear interpolation:
45669962b56SMatthew Ahrens * slope = (max_writes - min_writes) / (max_bytes - min_bytes)
45769962b56SMatthew Ahrens * move right by min_bytes
45869962b56SMatthew Ahrens * move up by min_writes
45969962b56SMatthew Ahrens */
46069962b56SMatthew Ahrens writes = (dirty - min_bytes) *
46169962b56SMatthew Ahrens (zfs_vdev_async_write_max_active -
46269962b56SMatthew Ahrens zfs_vdev_async_write_min_active) /
46369962b56SMatthew Ahrens (max_bytes - min_bytes) +
46469962b56SMatthew Ahrens zfs_vdev_async_write_min_active;
46569962b56SMatthew Ahrens ASSERT3U(writes, >=, zfs_vdev_async_write_min_active);
46669962b56SMatthew Ahrens ASSERT3U(writes, <=, zfs_vdev_async_write_max_active);
46769962b56SMatthew Ahrens return (writes);
46869962b56SMatthew Ahrens }
46969962b56SMatthew Ahrens
47069962b56SMatthew Ahrens static int
vdev_queue_class_max_active(spa_t * spa,zio_priority_t p)47169962b56SMatthew Ahrens vdev_queue_class_max_active(spa_t *spa, zio_priority_t p)
47269962b56SMatthew Ahrens {
47369962b56SMatthew Ahrens switch (p) {
47469962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_READ:
47569962b56SMatthew Ahrens return (zfs_vdev_sync_read_max_active);
47669962b56SMatthew Ahrens case ZIO_PRIORITY_SYNC_WRITE:
47769962b56SMatthew Ahrens return (zfs_vdev_sync_write_max_active);
47869962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_READ:
47969962b56SMatthew Ahrens return (zfs_vdev_async_read_max_active);
48069962b56SMatthew Ahrens case ZIO_PRIORITY_ASYNC_WRITE:
48173527f44SAlex Reece return (vdev_queue_max_async_writes(spa));
48269962b56SMatthew Ahrens case ZIO_PRIORITY_SCRUB:
48369962b56SMatthew Ahrens return (zfs_vdev_scrub_max_active);
4845cabbc6bSPrashanth Sreenivasa case ZIO_PRIORITY_REMOVAL:
4855cabbc6bSPrashanth Sreenivasa return (zfs_vdev_removal_max_active);
486094e47e9SGeorge Wilson case ZIO_PRIORITY_INITIALIZING:
487094e47e9SGeorge Wilson return (zfs_vdev_initializing_max_active);
488084fd14fSBrian Behlendorf case ZIO_PRIORITY_TRIM:
489084fd14fSBrian Behlendorf return (zfs_vdev_trim_max_active);
49069962b56SMatthew Ahrens default:
49169962b56SMatthew Ahrens panic("invalid priority %u", p);
49269962b56SMatthew Ahrens }
49369962b56SMatthew Ahrens }
49469962b56SMatthew Ahrens
49569962b56SMatthew Ahrens /*
49669962b56SMatthew Ahrens * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if
49769962b56SMatthew Ahrens * there is no eligible class.
49869962b56SMatthew Ahrens */
49969962b56SMatthew Ahrens static zio_priority_t
vdev_queue_class_to_issue(vdev_queue_t * vq)50069962b56SMatthew Ahrens vdev_queue_class_to_issue(vdev_queue_t *vq)
50169962b56SMatthew Ahrens {
50269962b56SMatthew Ahrens spa_t *spa = vq->vq_vdev->vdev_spa;
50369962b56SMatthew Ahrens zio_priority_t p;
50469962b56SMatthew Ahrens
50569962b56SMatthew Ahrens if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active)
50669962b56SMatthew Ahrens return (ZIO_PRIORITY_NUM_QUEUEABLE);
50769962b56SMatthew Ahrens
50869962b56SMatthew Ahrens /* find a queue that has not reached its minimum # outstanding i/os */
50969962b56SMatthew Ahrens for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
510fe319232SJustin T. Gibbs if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
51169962b56SMatthew Ahrens vq->vq_class[p].vqc_active <
51269962b56SMatthew Ahrens vdev_queue_class_min_active(p))
51369962b56SMatthew Ahrens return (p);
51469962b56SMatthew Ahrens }
51569962b56SMatthew Ahrens
51669962b56SMatthew Ahrens /*
51769962b56SMatthew Ahrens * If we haven't found a queue, look for one that hasn't reached its
51869962b56SMatthew Ahrens * maximum # outstanding i/os.
51969962b56SMatthew Ahrens */
52069962b56SMatthew Ahrens for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
521fe319232SJustin T. Gibbs if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
52269962b56SMatthew Ahrens vq->vq_class[p].vqc_active <
52369962b56SMatthew Ahrens vdev_queue_class_max_active(spa, p))
52469962b56SMatthew Ahrens return (p);
52569962b56SMatthew Ahrens }
52669962b56SMatthew Ahrens
52769962b56SMatthew Ahrens /* No eligible queued i/os */
52869962b56SMatthew Ahrens return (ZIO_PRIORITY_NUM_QUEUEABLE);
52969962b56SMatthew Ahrens }
53069962b56SMatthew Ahrens
5316f708f7cSJeff Bonwick /*
5326f708f7cSJeff Bonwick * Compute the range spanned by two i/os, which is the endpoint of the last
5336f708f7cSJeff Bonwick * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
5346f708f7cSJeff Bonwick * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
5356f708f7cSJeff Bonwick * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
5366f708f7cSJeff Bonwick */
5376f708f7cSJeff Bonwick #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
5386f708f7cSJeff Bonwick #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
539fa9e4066Sahrens
540fa9e4066Sahrens static zio_t *
vdev_queue_aggregate(vdev_queue_t * vq,zio_t * zio)54169962b56SMatthew Ahrens vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
542fa9e4066Sahrens {
54369962b56SMatthew Ahrens zio_t *first, *last, *aio, *dio, *mandatory, *nio;
544a3874b8bSToomas Soome zio_link_t *zl = NULL;
54569962b56SMatthew Ahrens uint64_t maxgap = 0;
54669962b56SMatthew Ahrens uint64_t size;
54769962b56SMatthew Ahrens boolean_t stretch = B_FALSE;
548fe319232SJustin T. Gibbs avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type);
54969962b56SMatthew Ahrens enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT;
55069962b56SMatthew Ahrens
55169962b56SMatthew Ahrens if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE)
55269962b56SMatthew Ahrens return (NULL);
553fa9e4066Sahrens
554084fd14fSBrian Behlendorf /*
555084fd14fSBrian Behlendorf * While TRIM commands could be aggregated based on offset this
556084fd14fSBrian Behlendorf * behavior is disabled until it's determined to be beneficial.
557084fd14fSBrian Behlendorf */
558084fd14fSBrian Behlendorf if (zio->io_type == ZIO_TYPE_TRIM && !zfs_vdev_aggregate_trim)
559084fd14fSBrian Behlendorf return (NULL);
560084fd14fSBrian Behlendorf
56169962b56SMatthew Ahrens first = last = zio;
562fa9e4066Sahrens
56369962b56SMatthew Ahrens if (zio->io_type == ZIO_TYPE_READ)
56469962b56SMatthew Ahrens maxgap = zfs_vdev_read_gap_limit;
5658ad4d6ddSJeff Bonwick
56669962b56SMatthew Ahrens /*
56769962b56SMatthew Ahrens * We can aggregate I/Os that are sufficiently adjacent and of
56869962b56SMatthew Ahrens * the same flavor, as expressed by the AGG_INHERIT flags.
56969962b56SMatthew Ahrens * The latter requirement is necessary so that certain
57069962b56SMatthew Ahrens * attributes of the I/O, such as whether it's a normal I/O
57169962b56SMatthew Ahrens * or a scrub/resilver, can be preserved in the aggregate.
57269962b56SMatthew Ahrens * We can include optional I/Os, but don't allow them
57369962b56SMatthew Ahrens * to begin a range as they add no benefit in that situation.
57469962b56SMatthew Ahrens */
575f94275ceSAdam Leventhal
57669962b56SMatthew Ahrens /*
57769962b56SMatthew Ahrens * We keep track of the last non-optional I/O.
57869962b56SMatthew Ahrens */
57969962b56SMatthew Ahrens mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first;
580f94275ceSAdam Leventhal
58169962b56SMatthew Ahrens /*
58269962b56SMatthew Ahrens * Walk backwards through sufficiently contiguous I/Os
5835b062782SMatthew Ahrens * recording the last non-optional I/O.
58469962b56SMatthew Ahrens */
58569962b56SMatthew Ahrens while ((dio = AVL_PREV(t, first)) != NULL &&
58669962b56SMatthew Ahrens (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
58769962b56SMatthew Ahrens IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit &&
5885cabbc6bSPrashanth Sreenivasa IO_GAP(dio, first) <= maxgap &&
5895cabbc6bSPrashanth Sreenivasa dio->io_type == zio->io_type) {
59069962b56SMatthew Ahrens first = dio;
59169962b56SMatthew Ahrens if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL))
59269962b56SMatthew Ahrens mandatory = first;
59369962b56SMatthew Ahrens }
594f94275ceSAdam Leventhal
59569962b56SMatthew Ahrens /*
59669962b56SMatthew Ahrens * Skip any initial optional I/Os.
59769962b56SMatthew Ahrens */
59869962b56SMatthew Ahrens while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) {
59969962b56SMatthew Ahrens first = AVL_NEXT(t, first);
60069962b56SMatthew Ahrens ASSERT(first != NULL);
60169962b56SMatthew Ahrens }
6026f708f7cSJeff Bonwick
60369962b56SMatthew Ahrens /*
60469962b56SMatthew Ahrens * Walk forward through sufficiently contiguous I/Os.
6055b062782SMatthew Ahrens * The aggregation limit does not apply to optional i/os, so that
6065b062782SMatthew Ahrens * we can issue contiguous writes even if they are larger than the
6075b062782SMatthew Ahrens * aggregation limit.
60869962b56SMatthew Ahrens */
60969962b56SMatthew Ahrens while ((dio = AVL_NEXT(t, last)) != NULL &&
61069962b56SMatthew Ahrens (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
6115b062782SMatthew Ahrens (IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit ||
6125b062782SMatthew Ahrens (dio->io_flags & ZIO_FLAG_OPTIONAL)) &&
6135cabbc6bSPrashanth Sreenivasa IO_GAP(last, dio) <= maxgap &&
6145cabbc6bSPrashanth Sreenivasa dio->io_type == zio->io_type) {
61569962b56SMatthew Ahrens last = dio;
61669962b56SMatthew Ahrens if (!(last->io_flags & ZIO_FLAG_OPTIONAL))
61769962b56SMatthew Ahrens mandatory = last;
61869962b56SMatthew Ahrens }
619f94275ceSAdam Leventhal
62069962b56SMatthew Ahrens /*
62169962b56SMatthew Ahrens * Now that we've established the range of the I/O aggregation
62269962b56SMatthew Ahrens * we must decide what to do with trailing optional I/Os.
62369962b56SMatthew Ahrens * For reads, there's nothing to do. While we are unable to
62469962b56SMatthew Ahrens * aggregate further, it's possible that a trailing optional
62569962b56SMatthew Ahrens * I/O would allow the underlying device to aggregate with
62669962b56SMatthew Ahrens * subsequent I/Os. We must therefore determine if the next
62769962b56SMatthew Ahrens * non-optional I/O is close enough to make aggregation
62869962b56SMatthew Ahrens * worthwhile.
62969962b56SMatthew Ahrens */
63069962b56SMatthew Ahrens if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) {
63169962b56SMatthew Ahrens zio_t *nio = last;
63269962b56SMatthew Ahrens while ((dio = AVL_NEXT(t, nio)) != NULL &&
63369962b56SMatthew Ahrens IO_GAP(nio, dio) == 0 &&
63469962b56SMatthew Ahrens IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) {
63569962b56SMatthew Ahrens nio = dio;
63669962b56SMatthew Ahrens if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
63769962b56SMatthew Ahrens stretch = B_TRUE;
63869962b56SMatthew Ahrens break;
639f94275ceSAdam Leventhal }
640f94275ceSAdam Leventhal }
64169962b56SMatthew Ahrens }
642f94275ceSAdam Leventhal
64369962b56SMatthew Ahrens if (stretch) {
6445b062782SMatthew Ahrens /*
6455b062782SMatthew Ahrens * We are going to include an optional io in our aggregated
6465b062782SMatthew Ahrens * span, thus closing the write gap. Only mandatory i/os can
6475b062782SMatthew Ahrens * start aggregated spans, so make sure that the next i/o
6485b062782SMatthew Ahrens * after our span is mandatory.
6495b062782SMatthew Ahrens */
65069962b56SMatthew Ahrens dio = AVL_NEXT(t, last);
65169962b56SMatthew Ahrens dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
65269962b56SMatthew Ahrens } else {
6535b062782SMatthew Ahrens /* do not include the optional i/o */
65469962b56SMatthew Ahrens while (last != mandatory && last != first) {
65569962b56SMatthew Ahrens ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL);
65669962b56SMatthew Ahrens last = AVL_PREV(t, last);
65769962b56SMatthew Ahrens ASSERT(last != NULL);
658f94275ceSAdam Leventhal }
659fa9e4066Sahrens }
660fa9e4066Sahrens
66169962b56SMatthew Ahrens if (first == last)
66269962b56SMatthew Ahrens return (NULL);
66369962b56SMatthew Ahrens
66469962b56SMatthew Ahrens size = IO_SPAN(first, last);
6655b062782SMatthew Ahrens ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
66669962b56SMatthew Ahrens
66769962b56SMatthew Ahrens aio = zio_vdev_delegated_io(first->io_vd, first->io_offset,
668770499e1SDan Kimmel abd_alloc_for_io(size, B_TRUE), size, first->io_type,
669770499e1SDan Kimmel zio->io_priority, flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
67069962b56SMatthew Ahrens vdev_queue_agg_io_done, NULL);
67169962b56SMatthew Ahrens aio->io_timestamp = first->io_timestamp;
67269962b56SMatthew Ahrens
67369962b56SMatthew Ahrens nio = first;
67469962b56SMatthew Ahrens do {
67569962b56SMatthew Ahrens dio = nio;
67669962b56SMatthew Ahrens nio = AVL_NEXT(t, dio);
67769962b56SMatthew Ahrens ASSERT3U(dio->io_type, ==, aio->io_type);
67869962b56SMatthew Ahrens
67969962b56SMatthew Ahrens if (dio->io_flags & ZIO_FLAG_NODATA) {
68069962b56SMatthew Ahrens ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
681770499e1SDan Kimmel abd_zero_off(aio->io_abd,
682770499e1SDan Kimmel dio->io_offset - aio->io_offset, dio->io_size);
68369962b56SMatthew Ahrens } else if (dio->io_type == ZIO_TYPE_WRITE) {
684770499e1SDan Kimmel abd_copy_off(aio->io_abd, dio->io_abd,
685770499e1SDan Kimmel dio->io_offset - aio->io_offset, 0, dio->io_size);
68669962b56SMatthew Ahrens }
687a3f829aeSBill Moore
68869962b56SMatthew Ahrens zio_add_child(dio, aio);
68969962b56SMatthew Ahrens vdev_queue_io_remove(vq, dio);
690a3874b8bSToomas Soome } while (dio != last);
691a3874b8bSToomas Soome
692a3874b8bSToomas Soome /*
693a3874b8bSToomas Soome * We need to drop the vdev queue's lock to avoid a deadlock that we
694a3874b8bSToomas Soome * could encounter since this I/O will complete immediately.
695a3874b8bSToomas Soome */
696a3874b8bSToomas Soome mutex_exit(&vq->vq_lock);
697a3874b8bSToomas Soome while ((dio = zio_walk_parents(aio, &zl)) != NULL) {
69869962b56SMatthew Ahrens zio_vdev_io_bypass(dio);
69969962b56SMatthew Ahrens zio_execute(dio);
700a3874b8bSToomas Soome }
701a3874b8bSToomas Soome mutex_enter(&vq->vq_lock);
70269962b56SMatthew Ahrens
70369962b56SMatthew Ahrens return (aio);
70469962b56SMatthew Ahrens }
70569962b56SMatthew Ahrens
70669962b56SMatthew Ahrens static zio_t *
vdev_queue_io_to_issue(vdev_queue_t * vq)70769962b56SMatthew Ahrens vdev_queue_io_to_issue(vdev_queue_t *vq)
70869962b56SMatthew Ahrens {
70969962b56SMatthew Ahrens zio_t *zio, *aio;
71069962b56SMatthew Ahrens zio_priority_t p;
71169962b56SMatthew Ahrens avl_index_t idx;
712fe319232SJustin T. Gibbs avl_tree_t *tree;
71369962b56SMatthew Ahrens zio_t search;
71469962b56SMatthew Ahrens
71569962b56SMatthew Ahrens again:
71669962b56SMatthew Ahrens ASSERT(MUTEX_HELD(&vq->vq_lock));
717fa9e4066Sahrens
71869962b56SMatthew Ahrens p = vdev_queue_class_to_issue(vq);
719fa9e4066Sahrens
72069962b56SMatthew Ahrens if (p == ZIO_PRIORITY_NUM_QUEUEABLE) {
72169962b56SMatthew Ahrens /* No eligible queued i/os */
72269962b56SMatthew Ahrens return (NULL);
723fa9e4066Sahrens }
724fa9e4066Sahrens
72569962b56SMatthew Ahrens /*
726094e47e9SGeorge Wilson * For LBA-ordered queues (async / scrub / initializing), issue the
727094e47e9SGeorge Wilson * i/o which follows the most recently issued i/o in LBA (offset) order.
72869962b56SMatthew Ahrens *
729084fd14fSBrian Behlendorf * For FIFO queues (sync/trim), issue the i/o with the lowest timestamp.
73069962b56SMatthew Ahrens */
731fe319232SJustin T. Gibbs tree = vdev_queue_class_tree(vq, p);
73269962b56SMatthew Ahrens search.io_timestamp = 0;
73312a8814cSTom Caputi search.io_offset = vq->vq_last_offset - 1;
734fe319232SJustin T. Gibbs VERIFY3P(avl_find(tree, &search, &idx), ==, NULL);
735fe319232SJustin T. Gibbs zio = avl_nearest(tree, idx, AVL_AFTER);
73669962b56SMatthew Ahrens if (zio == NULL)
737fe319232SJustin T. Gibbs zio = avl_first(tree);
73869962b56SMatthew Ahrens ASSERT3U(zio->io_priority, ==, p);
73969962b56SMatthew Ahrens
74069962b56SMatthew Ahrens aio = vdev_queue_aggregate(vq, zio);
74169962b56SMatthew Ahrens if (aio != NULL)
74269962b56SMatthew Ahrens zio = aio;
74369962b56SMatthew Ahrens else
74469962b56SMatthew Ahrens vdev_queue_io_remove(vq, zio);
745fa9e4066Sahrens
746f94275ceSAdam Leventhal /*
747f94275ceSAdam Leventhal * If the I/O is or was optional and therefore has no data, we need to
748f94275ceSAdam Leventhal * simply discard it. We need to drop the vdev queue's lock to avoid a
749f94275ceSAdam Leventhal * deadlock that we could encounter since this I/O will complete
750f94275ceSAdam Leventhal * immediately.
751f94275ceSAdam Leventhal */
75269962b56SMatthew Ahrens if (zio->io_flags & ZIO_FLAG_NODATA) {
753f94275ceSAdam Leventhal mutex_exit(&vq->vq_lock);
75469962b56SMatthew Ahrens zio_vdev_io_bypass(zio);
75569962b56SMatthew Ahrens zio_execute(zio);
756f94275ceSAdam Leventhal mutex_enter(&vq->vq_lock);
757f94275ceSAdam Leventhal goto again;
758f94275ceSAdam Leventhal }
759f94275ceSAdam Leventhal
76069962b56SMatthew Ahrens vdev_queue_pending_add(vq, zio);
76112a8814cSTom Caputi vq->vq_last_offset = zio->io_offset + zio->io_size;
762fa9e4066Sahrens
76369962b56SMatthew Ahrens return (zio);
764fa9e4066Sahrens }
765fa9e4066Sahrens
766fa9e4066Sahrens zio_t *
vdev_queue_io(zio_t * zio)767fa9e4066Sahrens vdev_queue_io(zio_t *zio)
768fa9e4066Sahrens {
769fa9e4066Sahrens vdev_queue_t *vq = &zio->io_vd->vdev_queue;
770fa9e4066Sahrens zio_t *nio;
771fa9e4066Sahrens
772fa9e4066Sahrens if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
773fa9e4066Sahrens return (zio);
774fa9e4066Sahrens
77569962b56SMatthew Ahrens /*
77669962b56SMatthew Ahrens * Children i/os inherent their parent's priority, which might
77769962b56SMatthew Ahrens * not match the child's i/o type. Fix it up here.
77869962b56SMatthew Ahrens */
77969962b56SMatthew Ahrens if (zio->io_type == ZIO_TYPE_READ) {
780084fd14fSBrian Behlendorf ASSERT(zio->io_priority != ZIO_PRIORITY_TRIM);
781084fd14fSBrian Behlendorf
78269962b56SMatthew Ahrens if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
78369962b56SMatthew Ahrens zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
7845cabbc6bSPrashanth Sreenivasa zio->io_priority != ZIO_PRIORITY_SCRUB &&
785094e47e9SGeorge Wilson zio->io_priority != ZIO_PRIORITY_REMOVAL &&
786084fd14fSBrian Behlendorf zio->io_priority != ZIO_PRIORITY_INITIALIZING) {
78769962b56SMatthew Ahrens zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
788084fd14fSBrian Behlendorf }
789084fd14fSBrian Behlendorf } else if (zio->io_type == ZIO_TYPE_WRITE) {
790084fd14fSBrian Behlendorf ASSERT(zio->io_priority != ZIO_PRIORITY_TRIM);
791084fd14fSBrian Behlendorf
79269962b56SMatthew Ahrens if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
7935cabbc6bSPrashanth Sreenivasa zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE &&
794094e47e9SGeorge Wilson zio->io_priority != ZIO_PRIORITY_REMOVAL &&
795084fd14fSBrian Behlendorf zio->io_priority != ZIO_PRIORITY_INITIALIZING) {
79669962b56SMatthew Ahrens zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
797084fd14fSBrian Behlendorf }
798084fd14fSBrian Behlendorf } else {
799084fd14fSBrian Behlendorf ASSERT(zio->io_type == ZIO_TYPE_TRIM);
800084fd14fSBrian Behlendorf ASSERT(zio->io_priority == ZIO_PRIORITY_TRIM);
80169962b56SMatthew Ahrens }
802fa9e4066Sahrens
80369962b56SMatthew Ahrens zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
804fa9e4066Sahrens
805fa9e4066Sahrens mutex_enter(&vq->vq_lock);
806c55e05cbSMatthew Ahrens zio->io_timestamp = gethrtime();
807ea8dc4b6Seschrock vdev_queue_io_add(vq, zio);
80869962b56SMatthew Ahrens nio = vdev_queue_io_to_issue(vq);
809fa9e4066Sahrens mutex_exit(&vq->vq_lock);
810fa9e4066Sahrens
811e05725b1Sbonwick if (nio == NULL)
812e05725b1Sbonwick return (NULL);
813e05725b1Sbonwick
814e05725b1Sbonwick if (nio->io_done == vdev_queue_agg_io_done) {
815e05725b1Sbonwick zio_nowait(nio);
816e05725b1Sbonwick return (NULL);
817e05725b1Sbonwick }
818fa9e4066Sahrens
819e05725b1Sbonwick return (nio);
820fa9e4066Sahrens }
821fa9e4066Sahrens
822fa9e4066Sahrens void
vdev_queue_io_done(zio_t * zio)823fa9e4066Sahrens vdev_queue_io_done(zio_t *zio)
824fa9e4066Sahrens {
825fa9e4066Sahrens vdev_queue_t *vq = &zio->io_vd->vdev_queue;
82669962b56SMatthew Ahrens zio_t *nio;
827fa9e4066Sahrens
828fa9e4066Sahrens mutex_enter(&vq->vq_lock);
829fa9e4066Sahrens
830c3a66015SMatthew Ahrens vdev_queue_pending_remove(vq, zio);
831fa9e4066Sahrens
832*dd50e0ccSTony Hutter zio->io_delta = gethrtime() - zio->io_timestamp;
833c55e05cbSMatthew Ahrens vq->vq_io_complete_ts = gethrtime();
834283b8460SGeorge.Wilson
83569962b56SMatthew Ahrens while ((nio = vdev_queue_io_to_issue(vq)) != NULL) {
836fa9e4066Sahrens mutex_exit(&vq->vq_lock);
837e05725b1Sbonwick if (nio->io_done == vdev_queue_agg_io_done) {
838e05725b1Sbonwick zio_nowait(nio);
839e05725b1Sbonwick } else {
840fa9e4066Sahrens zio_vdev_io_reissue(nio);
841e05725b1Sbonwick zio_execute(nio);
842e05725b1Sbonwick }
843fa9e4066Sahrens mutex_enter(&vq->vq_lock);
844fa9e4066Sahrens }
845fa9e4066Sahrens
846fa9e4066Sahrens mutex_exit(&vq->vq_lock);
847fa9e4066Sahrens }
848a3874b8bSToomas Soome
849a3874b8bSToomas Soome void
vdev_queue_change_io_priority(zio_t * zio,zio_priority_t priority)850a3874b8bSToomas Soome vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority)
851a3874b8bSToomas Soome {
852a3874b8bSToomas Soome vdev_queue_t *vq = &zio->io_vd->vdev_queue;
853a3874b8bSToomas Soome avl_tree_t *tree;
854a3874b8bSToomas Soome
855a3874b8bSToomas Soome /*
856a3874b8bSToomas Soome * ZIO_PRIORITY_NOW is used by the vdev cache code and the aggregate zio
857a3874b8bSToomas Soome * code to issue IOs without adding them to the vdev queue. In this
858a3874b8bSToomas Soome * case, the zio is already going to be issued as quickly as possible
859a3874b8bSToomas Soome * and so it doesn't need any reprioitization to help.
860a3874b8bSToomas Soome */
861a3874b8bSToomas Soome if (zio->io_priority == ZIO_PRIORITY_NOW)
862a3874b8bSToomas Soome return;
863a3874b8bSToomas Soome
864a3874b8bSToomas Soome ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
865a3874b8bSToomas Soome ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
866a3874b8bSToomas Soome
867a3874b8bSToomas Soome if (zio->io_type == ZIO_TYPE_READ) {
868a3874b8bSToomas Soome if (priority != ZIO_PRIORITY_SYNC_READ &&
869a3874b8bSToomas Soome priority != ZIO_PRIORITY_ASYNC_READ &&
870a3874b8bSToomas Soome priority != ZIO_PRIORITY_SCRUB)
871a3874b8bSToomas Soome priority = ZIO_PRIORITY_ASYNC_READ;
872a3874b8bSToomas Soome } else {
873a3874b8bSToomas Soome ASSERT(zio->io_type == ZIO_TYPE_WRITE);
874a3874b8bSToomas Soome if (priority != ZIO_PRIORITY_SYNC_WRITE &&
875a3874b8bSToomas Soome priority != ZIO_PRIORITY_ASYNC_WRITE)
876a3874b8bSToomas Soome priority = ZIO_PRIORITY_ASYNC_WRITE;
877a3874b8bSToomas Soome }
878a3874b8bSToomas Soome
879a3874b8bSToomas Soome mutex_enter(&vq->vq_lock);
880a3874b8bSToomas Soome
881a3874b8bSToomas Soome /*
882a3874b8bSToomas Soome * If the zio is in none of the queues we can simply change
883a3874b8bSToomas Soome * the priority. If the zio is waiting to be submitted we must
884a3874b8bSToomas Soome * remove it from the queue and re-insert it with the new priority.
885a3874b8bSToomas Soome * Otherwise, the zio is currently active and we cannot change its
886a3874b8bSToomas Soome * priority.
887a3874b8bSToomas Soome */
888a3874b8bSToomas Soome tree = vdev_queue_class_tree(vq, zio->io_priority);
889a3874b8bSToomas Soome if (avl_find(tree, zio, NULL) == zio) {
89012a8814cSTom Caputi spa_t *spa = zio->io_spa;
89112a8814cSTom Caputi zio_priority_t oldpri = zio->io_priority;
89212a8814cSTom Caputi
893a3874b8bSToomas Soome avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
894a3874b8bSToomas Soome zio->io_priority = priority;
895a3874b8bSToomas Soome avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
89612a8814cSTom Caputi
89712a8814cSTom Caputi mutex_enter(&spa->spa_iokstat_lock);
89812a8814cSTom Caputi ASSERT3U(spa->spa_queue_stats[oldpri].spa_queued, >, 0);
89912a8814cSTom Caputi spa->spa_queue_stats[oldpri].spa_queued--;
90012a8814cSTom Caputi spa->spa_queue_stats[zio->io_priority].spa_queued++;
90112a8814cSTom Caputi mutex_exit(&spa->spa_iokstat_lock);
902a3874b8bSToomas Soome } else if (avl_find(&vq->vq_active_tree, zio, NULL) != zio) {
903a3874b8bSToomas Soome zio->io_priority = priority;
904a3874b8bSToomas Soome }
905a3874b8bSToomas Soome
906a3874b8bSToomas Soome mutex_exit(&vq->vq_lock);
907a3874b8bSToomas Soome }
90812a8814cSTom Caputi
90912a8814cSTom Caputi /*
91012a8814cSTom Caputi * As these two methods are only used for load calculations we're not
91112a8814cSTom Caputi * concerned if we get an incorrect value on 32bit platforms due to lack of
91212a8814cSTom Caputi * vq_lock mutex use here, instead we prefer to keep it lock free for
91312a8814cSTom Caputi * performance.
91412a8814cSTom Caputi */
91512a8814cSTom Caputi int
vdev_queue_length(vdev_t * vd)91612a8814cSTom Caputi vdev_queue_length(vdev_t *vd)
91712a8814cSTom Caputi {
91812a8814cSTom Caputi return (avl_numnodes(&vd->vdev_queue.vq_active_tree));
91912a8814cSTom Caputi }
92012a8814cSTom Caputi
92112a8814cSTom Caputi uint64_t
vdev_queue_last_offset(vdev_t * vd)92212a8814cSTom Caputi vdev_queue_last_offset(vdev_t *vd)
92312a8814cSTom Caputi {
92412a8814cSTom Caputi return (vd->vdev_queue.vq_last_offset);
92512a8814cSTom Caputi }
926