xref: /illumos-gate/usr/src/uts/common/fs/zfs/vdev_queue.c (revision c3d26abc)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5ea8dc4b6Seschrock  * Common Development and Distribution License (the "License").
6ea8dc4b6Seschrock  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
21fa9e4066Sahrens /*
22a3f829aeSBill Moore  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23fa9e4066Sahrens  * Use is subject to license terms.
24fa9e4066Sahrens  */
25fa9e4066Sahrens 
26283b8460SGeorge.Wilson /*
2773527f44SAlex Reece  * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
28*c3d26abcSMatthew Ahrens  * Copyright (c) 2014 Integros [integros.com]
29283b8460SGeorge.Wilson  */
30283b8460SGeorge.Wilson 
31fa9e4066Sahrens #include <sys/zfs_context.h>
32fa9e4066Sahrens #include <sys/vdev_impl.h>
33c3a66015SMatthew Ahrens #include <sys/spa_impl.h>
34fa9e4066Sahrens #include <sys/zio.h>
35fa9e4066Sahrens #include <sys/avl.h>
3669962b56SMatthew Ahrens #include <sys/dsl_pool.h>
37fa9e4066Sahrens 
38614409b5Sahrens /*
3969962b56SMatthew Ahrens  * ZFS I/O Scheduler
4069962b56SMatthew Ahrens  * ---------------
4169962b56SMatthew Ahrens  *
4269962b56SMatthew Ahrens  * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios.  The
4369962b56SMatthew Ahrens  * I/O scheduler determines when and in what order those operations are
4469962b56SMatthew Ahrens  * issued.  The I/O scheduler divides operations into five I/O classes
4569962b56SMatthew Ahrens  * prioritized in the following order: sync read, sync write, async read,
4669962b56SMatthew Ahrens  * async write, and scrub/resilver.  Each queue defines the minimum and
4769962b56SMatthew Ahrens  * maximum number of concurrent operations that may be issued to the device.
4869962b56SMatthew Ahrens  * In addition, the device has an aggregate maximum. Note that the sum of the
4969962b56SMatthew Ahrens  * per-queue minimums must not exceed the aggregate maximum, and if the
5069962b56SMatthew Ahrens  * aggregate maximum is equal to or greater than the sum of the per-queue
5169962b56SMatthew Ahrens  * maximums, the per-queue minimum has no effect.
5269962b56SMatthew Ahrens  *
5369962b56SMatthew Ahrens  * For many physical devices, throughput increases with the number of
5469962b56SMatthew Ahrens  * concurrent operations, but latency typically suffers. Further, physical
5569962b56SMatthew Ahrens  * devices typically have a limit at which more concurrent operations have no
5669962b56SMatthew Ahrens  * effect on throughput or can actually cause it to decrease.
5769962b56SMatthew Ahrens  *
5869962b56SMatthew Ahrens  * The scheduler selects the next operation to issue by first looking for an
5969962b56SMatthew Ahrens  * I/O class whose minimum has not been satisfied. Once all are satisfied and
6069962b56SMatthew Ahrens  * the aggregate maximum has not been hit, the scheduler looks for classes
6169962b56SMatthew Ahrens  * whose maximum has not been satisfied. Iteration through the I/O classes is
6269962b56SMatthew Ahrens  * done in the order specified above. No further operations are issued if the
6369962b56SMatthew Ahrens  * aggregate maximum number of concurrent operations has been hit or if there
6469962b56SMatthew Ahrens  * are no operations queued for an I/O class that has not hit its maximum.
6569962b56SMatthew Ahrens  * Every time an i/o is queued or an operation completes, the I/O scheduler
6669962b56SMatthew Ahrens  * looks for new operations to issue.
6769962b56SMatthew Ahrens  *
6869962b56SMatthew Ahrens  * All I/O classes have a fixed maximum number of outstanding operations
6969962b56SMatthew Ahrens  * except for the async write class. Asynchronous writes represent the data
7069962b56SMatthew Ahrens  * that is committed to stable storage during the syncing stage for
7169962b56SMatthew Ahrens  * transaction groups (see txg.c). Transaction groups enter the syncing state
7269962b56SMatthew Ahrens  * periodically so the number of queued async writes will quickly burst up and
7369962b56SMatthew Ahrens  * then bleed down to zero. Rather than servicing them as quickly as possible,
7469962b56SMatthew Ahrens  * the I/O scheduler changes the maximum number of active async write i/os
7569962b56SMatthew Ahrens  * according to the amount of dirty data in the pool (see dsl_pool.c). Since
7669962b56SMatthew Ahrens  * both throughput and latency typically increase with the number of
7769962b56SMatthew Ahrens  * concurrent operations issued to physical devices, reducing the burstiness
7869962b56SMatthew Ahrens  * in the number of concurrent operations also stabilizes the response time of
7969962b56SMatthew Ahrens  * operations from other -- and in particular synchronous -- queues. In broad
8069962b56SMatthew Ahrens  * strokes, the I/O scheduler will issue more concurrent operations from the
8169962b56SMatthew Ahrens  * async write queue as there's more dirty data in the pool.
8269962b56SMatthew Ahrens  *
8369962b56SMatthew Ahrens  * Async Writes
8469962b56SMatthew Ahrens  *
8569962b56SMatthew Ahrens  * The number of concurrent operations issued for the async write I/O class
8669962b56SMatthew Ahrens  * follows a piece-wise linear function defined by a few adjustable points.
8769962b56SMatthew Ahrens  *
8869962b56SMatthew Ahrens  *        |                   o---------| <-- zfs_vdev_async_write_max_active
8969962b56SMatthew Ahrens  *   ^    |                  /^         |
9069962b56SMatthew Ahrens  *   |    |                 / |         |
9169962b56SMatthew Ahrens  * active |                /  |         |
9269962b56SMatthew Ahrens  *  I/O   |               /   |         |
9369962b56SMatthew Ahrens  * count  |              /    |         |
9469962b56SMatthew Ahrens  *        |             /     |         |
9569962b56SMatthew Ahrens  *        |------------o      |         | <-- zfs_vdev_async_write_min_active
9669962b56SMatthew Ahrens  *       0|____________^______|_________|
9769962b56SMatthew Ahrens  *        0%           |      |       100% of zfs_dirty_data_max
9869962b56SMatthew Ahrens  *                     |      |
9969962b56SMatthew Ahrens  *                     |      `-- zfs_vdev_async_write_active_max_dirty_percent
10069962b56SMatthew Ahrens  *                     `--------- zfs_vdev_async_write_active_min_dirty_percent
10169962b56SMatthew Ahrens  *
10269962b56SMatthew Ahrens  * Until the amount of dirty data exceeds a minimum percentage of the dirty
10369962b56SMatthew Ahrens  * data allowed in the pool, the I/O scheduler will limit the number of
10469962b56SMatthew Ahrens  * concurrent operations to the minimum. As that threshold is crossed, the
10569962b56SMatthew Ahrens  * number of concurrent operations issued increases linearly to the maximum at
10669962b56SMatthew Ahrens  * the specified maximum percentage of the dirty data allowed in the pool.
10769962b56SMatthew Ahrens  *
10869962b56SMatthew Ahrens  * Ideally, the amount of dirty data on a busy pool will stay in the sloped
10969962b56SMatthew Ahrens  * part of the function between zfs_vdev_async_write_active_min_dirty_percent
11069962b56SMatthew Ahrens  * and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the
11169962b56SMatthew Ahrens  * maximum percentage, this indicates that the rate of incoming data is
11269962b56SMatthew Ahrens  * greater than the rate that the backend storage can handle. In this case, we
11369962b56SMatthew Ahrens  * must further throttle incoming writes (see dmu_tx_delay() for details).
114614409b5Sahrens  */
115f7170741SWill Andrews 
116614409b5Sahrens /*
11769962b56SMatthew Ahrens  * The maximum number of i/os active to each device.  Ideally, this will be >=
11869962b56SMatthew Ahrens  * the sum of each queue's max_active.  It must be at least the sum of each
11969962b56SMatthew Ahrens  * queue's min_active.
120614409b5Sahrens  */
12169962b56SMatthew Ahrens uint32_t zfs_vdev_max_active = 1000;
122614409b5Sahrens 
123c55e05cbSMatthew Ahrens /*
12469962b56SMatthew Ahrens  * Per-queue limits on the number of i/os active to each device.  If the
12569962b56SMatthew Ahrens  * sum of the queue's max_active is < zfs_vdev_max_active, then the
12669962b56SMatthew Ahrens  * min_active comes into play.  We will send min_active from each queue,
12769962b56SMatthew Ahrens  * and then select from queues in the order defined by zio_priority_t.
12869962b56SMatthew Ahrens  *
12969962b56SMatthew Ahrens  * In general, smaller max_active's will lead to lower latency of synchronous
13069962b56SMatthew Ahrens  * operations.  Larger max_active's may lead to higher overall throughput,
13169962b56SMatthew Ahrens  * depending on underlying storage.
13269962b56SMatthew Ahrens  *
13369962b56SMatthew Ahrens  * The ratio of the queues' max_actives determines the balance of performance
13469962b56SMatthew Ahrens  * between reads, writes, and scrubs.  E.g., increasing
13569962b56SMatthew Ahrens  * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete
13669962b56SMatthew Ahrens  * more quickly, but reads and writes to have higher latency and lower
13769962b56SMatthew Ahrens  * throughput.
138c55e05cbSMatthew Ahrens  */
13969962b56SMatthew Ahrens uint32_t zfs_vdev_sync_read_min_active = 10;
14069962b56SMatthew Ahrens uint32_t zfs_vdev_sync_read_max_active = 10;
14169962b56SMatthew Ahrens uint32_t zfs_vdev_sync_write_min_active = 10;
14269962b56SMatthew Ahrens uint32_t zfs_vdev_sync_write_max_active = 10;
14369962b56SMatthew Ahrens uint32_t zfs_vdev_async_read_min_active = 1;
14469962b56SMatthew Ahrens uint32_t zfs_vdev_async_read_max_active = 3;
14569962b56SMatthew Ahrens uint32_t zfs_vdev_async_write_min_active = 1;
14669962b56SMatthew Ahrens uint32_t zfs_vdev_async_write_max_active = 10;
14769962b56SMatthew Ahrens uint32_t zfs_vdev_scrub_min_active = 1;
14869962b56SMatthew Ahrens uint32_t zfs_vdev_scrub_max_active = 2;
149614409b5Sahrens 
15069962b56SMatthew Ahrens /*
15169962b56SMatthew Ahrens  * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
15269962b56SMatthew Ahrens  * dirty data, use zfs_vdev_async_write_min_active.  When it has more than
15369962b56SMatthew Ahrens  * zfs_vdev_async_write_active_max_dirty_percent, use
15469962b56SMatthew Ahrens  * zfs_vdev_async_write_max_active. The value is linearly interpolated
15569962b56SMatthew Ahrens  * between min and max.
15669962b56SMatthew Ahrens  */
15769962b56SMatthew Ahrens int zfs_vdev_async_write_active_min_dirty_percent = 30;
15869962b56SMatthew Ahrens int zfs_vdev_async_write_active_max_dirty_percent = 60;
159614409b5Sahrens 
160614409b5Sahrens /*
161f94275ceSAdam Leventhal  * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
162f94275ceSAdam Leventhal  * For read I/Os, we also aggregate across small adjacency gaps; for writes
163f94275ceSAdam Leventhal  * we include spans of optional I/Os to aid aggregation at the disk even when
164f94275ceSAdam Leventhal  * they aren't able to help us aggregate at this level.
165614409b5Sahrens  */
166b5152584SMatthew Ahrens int zfs_vdev_aggregation_limit = SPA_OLD_MAXBLOCKSIZE;
1676f708f7cSJeff Bonwick int zfs_vdev_read_gap_limit = 32 << 10;
168f94275ceSAdam Leventhal int zfs_vdev_write_gap_limit = 4 << 10;
169614409b5Sahrens 
170fa9e4066Sahrens int
17169962b56SMatthew Ahrens vdev_queue_offset_compare(const void *x1, const void *x2)
172fa9e4066Sahrens {
173fa9e4066Sahrens 	const zio_t *z1 = x1;
174fa9e4066Sahrens 	const zio_t *z2 = x2;
175fa9e4066Sahrens 
176fa9e4066Sahrens 	if (z1->io_offset < z2->io_offset)
177fa9e4066Sahrens 		return (-1);
178fa9e4066Sahrens 	if (z1->io_offset > z2->io_offset)
179fa9e4066Sahrens 		return (1);
180fa9e4066Sahrens 
181fa9e4066Sahrens 	if (z1 < z2)
182fa9e4066Sahrens 		return (-1);
183fa9e4066Sahrens 	if (z1 > z2)
184fa9e4066Sahrens 		return (1);
185fa9e4066Sahrens 
186fa9e4066Sahrens 	return (0);
187fa9e4066Sahrens }
188fa9e4066Sahrens 
189fe319232SJustin T. Gibbs static inline avl_tree_t *
190fe319232SJustin T. Gibbs vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p)
191fe319232SJustin T. Gibbs {
192fe319232SJustin T. Gibbs 	return (&vq->vq_class[p].vqc_queued_tree);
193fe319232SJustin T. Gibbs }
194fe319232SJustin T. Gibbs 
195fe319232SJustin T. Gibbs static inline avl_tree_t *
196fe319232SJustin T. Gibbs vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t)
197fe319232SJustin T. Gibbs {
198fe319232SJustin T. Gibbs 	ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE);
199fe319232SJustin T. Gibbs 	if (t == ZIO_TYPE_READ)
200fe319232SJustin T. Gibbs 		return (&vq->vq_read_offset_tree);
201fe319232SJustin T. Gibbs 	else
202fe319232SJustin T. Gibbs 		return (&vq->vq_write_offset_tree);
203fe319232SJustin T. Gibbs }
204fe319232SJustin T. Gibbs 
205fa9e4066Sahrens int
20669962b56SMatthew Ahrens vdev_queue_timestamp_compare(const void *x1, const void *x2)
207fa9e4066Sahrens {
208fa9e4066Sahrens 	const zio_t *z1 = x1;
209fa9e4066Sahrens 	const zio_t *z2 = x2;
210fa9e4066Sahrens 
21169962b56SMatthew Ahrens 	if (z1->io_timestamp < z2->io_timestamp)
212fa9e4066Sahrens 		return (-1);
21369962b56SMatthew Ahrens 	if (z1->io_timestamp > z2->io_timestamp)
214fa9e4066Sahrens 		return (1);
215fa9e4066Sahrens 
216fa9e4066Sahrens 	if (z1 < z2)
217fa9e4066Sahrens 		return (-1);
218fa9e4066Sahrens 	if (z1 > z2)
219fa9e4066Sahrens 		return (1);
220fa9e4066Sahrens 
221fa9e4066Sahrens 	return (0);
222fa9e4066Sahrens }
223fa9e4066Sahrens 
224fa9e4066Sahrens void
225fa9e4066Sahrens vdev_queue_init(vdev_t *vd)
226fa9e4066Sahrens {
227fa9e4066Sahrens 	vdev_queue_t *vq = &vd->vdev_queue;
228fa9e4066Sahrens 
229fa9e4066Sahrens 	mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
23069962b56SMatthew Ahrens 	vq->vq_vdev = vd;
231fa9e4066Sahrens 
23269962b56SMatthew Ahrens 	avl_create(&vq->vq_active_tree, vdev_queue_offset_compare,
23369962b56SMatthew Ahrens 	    sizeof (zio_t), offsetof(struct zio, io_queue_node));
234fe319232SJustin T. Gibbs 	avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ),
235fe319232SJustin T. Gibbs 	    vdev_queue_offset_compare, sizeof (zio_t),
236fe319232SJustin T. Gibbs 	    offsetof(struct zio, io_offset_node));
237fe319232SJustin T. Gibbs 	avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE),
238fe319232SJustin T. Gibbs 	    vdev_queue_offset_compare, sizeof (zio_t),
239fe319232SJustin T. Gibbs 	    offsetof(struct zio, io_offset_node));
240fa9e4066Sahrens 
24169962b56SMatthew Ahrens 	for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
242fe319232SJustin T. Gibbs 		int (*compfn) (const void *, const void *);
243fe319232SJustin T. Gibbs 
24469962b56SMatthew Ahrens 		/*
245fe319232SJustin T. Gibbs 		 * The synchronous i/o queues are dispatched in FIFO rather
246fe319232SJustin T. Gibbs 		 * than LBA order.  This provides more consistent latency for
247fe319232SJustin T. Gibbs 		 * these i/os.
24869962b56SMatthew Ahrens 		 */
249fe319232SJustin T. Gibbs 		if (p == ZIO_PRIORITY_SYNC_READ || p == ZIO_PRIORITY_SYNC_WRITE)
250fe319232SJustin T. Gibbs 			compfn = vdev_queue_timestamp_compare;
251fe319232SJustin T. Gibbs 		else
252fe319232SJustin T. Gibbs 			compfn = vdev_queue_offset_compare;
253fe319232SJustin T. Gibbs 
254fe319232SJustin T. Gibbs 		avl_create(vdev_queue_class_tree(vq, p), compfn,
25569962b56SMatthew Ahrens 		    sizeof (zio_t), offsetof(struct zio, io_queue_node));
25669962b56SMatthew Ahrens 	}
257fa9e4066Sahrens }
258fa9e4066Sahrens 
259fa9e4066Sahrens void
260fa9e4066Sahrens vdev_queue_fini(vdev_t *vd)
261fa9e4066Sahrens {
262fa9e4066Sahrens 	vdev_queue_t *vq = &vd->vdev_queue;
263fa9e4066Sahrens 
26469962b56SMatthew Ahrens 	for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++)
265fe319232SJustin T. Gibbs 		avl_destroy(vdev_queue_class_tree(vq, p));
26669962b56SMatthew Ahrens 	avl_destroy(&vq->vq_active_tree);
267fe319232SJustin T. Gibbs 	avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ));
268fe319232SJustin T. Gibbs 	avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE));
269fa9e4066Sahrens 
270fa9e4066Sahrens 	mutex_destroy(&vq->vq_lock);
271fa9e4066Sahrens }
272fa9e4066Sahrens 
273ea8dc4b6Seschrock static void
274ea8dc4b6Seschrock vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
275ea8dc4b6Seschrock {
276c3a66015SMatthew Ahrens 	spa_t *spa = zio->io_spa;
27769962b56SMatthew Ahrens 	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
278fe319232SJustin T. Gibbs 	avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
279fe319232SJustin T. Gibbs 	avl_add(vdev_queue_type_tree(vq, zio->io_type), zio);
280c3a66015SMatthew Ahrens 
28169962b56SMatthew Ahrens 	mutex_enter(&spa->spa_iokstat_lock);
28269962b56SMatthew Ahrens 	spa->spa_queue_stats[zio->io_priority].spa_queued++;
28369962b56SMatthew Ahrens 	if (spa->spa_iokstat != NULL)
284c3a66015SMatthew Ahrens 		kstat_waitq_enter(spa->spa_iokstat->ks_data);
28569962b56SMatthew Ahrens 	mutex_exit(&spa->spa_iokstat_lock);
286ea8dc4b6Seschrock }
287ea8dc4b6Seschrock 
288ea8dc4b6Seschrock static void
289ea8dc4b6Seschrock vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
290ea8dc4b6Seschrock {
291c3a66015SMatthew Ahrens 	spa_t *spa = zio->io_spa;
29269962b56SMatthew Ahrens 	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
293fe319232SJustin T. Gibbs 	avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
294fe319232SJustin T. Gibbs 	avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio);
295c3a66015SMatthew Ahrens 
29669962b56SMatthew Ahrens 	mutex_enter(&spa->spa_iokstat_lock);
29769962b56SMatthew Ahrens 	ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0);
29869962b56SMatthew Ahrens 	spa->spa_queue_stats[zio->io_priority].spa_queued--;
29969962b56SMatthew Ahrens 	if (spa->spa_iokstat != NULL)
300c3a66015SMatthew Ahrens 		kstat_waitq_exit(spa->spa_iokstat->ks_data);
30169962b56SMatthew Ahrens 	mutex_exit(&spa->spa_iokstat_lock);
302c3a66015SMatthew Ahrens }
303c3a66015SMatthew Ahrens 
304c3a66015SMatthew Ahrens static void
305c3a66015SMatthew Ahrens vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
306c3a66015SMatthew Ahrens {
307c3a66015SMatthew Ahrens 	spa_t *spa = zio->io_spa;
30869962b56SMatthew Ahrens 	ASSERT(MUTEX_HELD(&vq->vq_lock));
30969962b56SMatthew Ahrens 	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
31069962b56SMatthew Ahrens 	vq->vq_class[zio->io_priority].vqc_active++;
31169962b56SMatthew Ahrens 	avl_add(&vq->vq_active_tree, zio);
31269962b56SMatthew Ahrens 
31369962b56SMatthew Ahrens 	mutex_enter(&spa->spa_iokstat_lock);
31469962b56SMatthew Ahrens 	spa->spa_queue_stats[zio->io_priority].spa_active++;
31569962b56SMatthew Ahrens 	if (spa->spa_iokstat != NULL)
316c3a66015SMatthew Ahrens 		kstat_runq_enter(spa->spa_iokstat->ks_data);
31769962b56SMatthew Ahrens 	mutex_exit(&spa->spa_iokstat_lock);
318c3a66015SMatthew Ahrens }
319c3a66015SMatthew Ahrens 
320c3a66015SMatthew Ahrens static void
321c3a66015SMatthew Ahrens vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
322c3a66015SMatthew Ahrens {
323c3a66015SMatthew Ahrens 	spa_t *spa = zio->io_spa;
32469962b56SMatthew Ahrens 	ASSERT(MUTEX_HELD(&vq->vq_lock));
32569962b56SMatthew Ahrens 	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
32669962b56SMatthew Ahrens 	vq->vq_class[zio->io_priority].vqc_active--;
32769962b56SMatthew Ahrens 	avl_remove(&vq->vq_active_tree, zio);
32869962b56SMatthew Ahrens 
32969962b56SMatthew Ahrens 	mutex_enter(&spa->spa_iokstat_lock);
33069962b56SMatthew Ahrens 	ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0);
33169962b56SMatthew Ahrens 	spa->spa_queue_stats[zio->io_priority].spa_active--;
332c3a66015SMatthew Ahrens 	if (spa->spa_iokstat != NULL) {
333c3a66015SMatthew Ahrens 		kstat_io_t *ksio = spa->spa_iokstat->ks_data;
334c3a66015SMatthew Ahrens 
335c3a66015SMatthew Ahrens 		kstat_runq_exit(spa->spa_iokstat->ks_data);
336c3a66015SMatthew Ahrens 		if (zio->io_type == ZIO_TYPE_READ) {
337c3a66015SMatthew Ahrens 			ksio->reads++;
338c3a66015SMatthew Ahrens 			ksio->nread += zio->io_size;
339c3a66015SMatthew Ahrens 		} else if (zio->io_type == ZIO_TYPE_WRITE) {
340c3a66015SMatthew Ahrens 			ksio->writes++;
341c3a66015SMatthew Ahrens 			ksio->nwritten += zio->io_size;
342c3a66015SMatthew Ahrens 		}
343c3a66015SMatthew Ahrens 	}
34469962b56SMatthew Ahrens 	mutex_exit(&spa->spa_iokstat_lock);
345ea8dc4b6Seschrock }
346ea8dc4b6Seschrock 
347fa9e4066Sahrens static void
348fa9e4066Sahrens vdev_queue_agg_io_done(zio_t *aio)
349fa9e4066Sahrens {
35069962b56SMatthew Ahrens 	if (aio->io_type == ZIO_TYPE_READ) {
35169962b56SMatthew Ahrens 		zio_t *pio;
35269962b56SMatthew Ahrens 		while ((pio = zio_walk_parents(aio)) != NULL) {
353a3f829aeSBill Moore 			bcopy((char *)aio->io_data + (pio->io_offset -
354a3f829aeSBill Moore 			    aio->io_offset), pio->io_data, pio->io_size);
35569962b56SMatthew Ahrens 		}
35669962b56SMatthew Ahrens 	}
357fa9e4066Sahrens 
358fa9e4066Sahrens 	zio_buf_free(aio->io_data, aio->io_size);
359fa9e4066Sahrens }
360fa9e4066Sahrens 
36169962b56SMatthew Ahrens static int
36269962b56SMatthew Ahrens vdev_queue_class_min_active(zio_priority_t p)
36369962b56SMatthew Ahrens {
36469962b56SMatthew Ahrens 	switch (p) {
36569962b56SMatthew Ahrens 	case ZIO_PRIORITY_SYNC_READ:
36669962b56SMatthew Ahrens 		return (zfs_vdev_sync_read_min_active);
36769962b56SMatthew Ahrens 	case ZIO_PRIORITY_SYNC_WRITE:
36869962b56SMatthew Ahrens 		return (zfs_vdev_sync_write_min_active);
36969962b56SMatthew Ahrens 	case ZIO_PRIORITY_ASYNC_READ:
37069962b56SMatthew Ahrens 		return (zfs_vdev_async_read_min_active);
37169962b56SMatthew Ahrens 	case ZIO_PRIORITY_ASYNC_WRITE:
37269962b56SMatthew Ahrens 		return (zfs_vdev_async_write_min_active);
37369962b56SMatthew Ahrens 	case ZIO_PRIORITY_SCRUB:
37469962b56SMatthew Ahrens 		return (zfs_vdev_scrub_min_active);
37569962b56SMatthew Ahrens 	default:
37669962b56SMatthew Ahrens 		panic("invalid priority %u", p);
37769962b56SMatthew Ahrens 		return (0);
37869962b56SMatthew Ahrens 	}
37969962b56SMatthew Ahrens }
38069962b56SMatthew Ahrens 
38169962b56SMatthew Ahrens static int
38273527f44SAlex Reece vdev_queue_max_async_writes(spa_t *spa)
38369962b56SMatthew Ahrens {
38469962b56SMatthew Ahrens 	int writes;
38573527f44SAlex Reece 	uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total;
38669962b56SMatthew Ahrens 	uint64_t min_bytes = zfs_dirty_data_max *
38769962b56SMatthew Ahrens 	    zfs_vdev_async_write_active_min_dirty_percent / 100;
38869962b56SMatthew Ahrens 	uint64_t max_bytes = zfs_dirty_data_max *
38969962b56SMatthew Ahrens 	    zfs_vdev_async_write_active_max_dirty_percent / 100;
39069962b56SMatthew Ahrens 
39173527f44SAlex Reece 	/*
39273527f44SAlex Reece 	 * Sync tasks correspond to interactive user actions. To reduce the
39373527f44SAlex Reece 	 * execution time of those actions we push data out as fast as possible.
39473527f44SAlex Reece 	 */
39573527f44SAlex Reece 	if (spa_has_pending_synctask(spa)) {
39673527f44SAlex Reece 		return (zfs_vdev_async_write_max_active);
39773527f44SAlex Reece 	}
39873527f44SAlex Reece 
39969962b56SMatthew Ahrens 	if (dirty < min_bytes)
40069962b56SMatthew Ahrens 		return (zfs_vdev_async_write_min_active);
40169962b56SMatthew Ahrens 	if (dirty > max_bytes)
40269962b56SMatthew Ahrens 		return (zfs_vdev_async_write_max_active);
40369962b56SMatthew Ahrens 
40469962b56SMatthew Ahrens 	/*
40569962b56SMatthew Ahrens 	 * linear interpolation:
40669962b56SMatthew Ahrens 	 * slope = (max_writes - min_writes) / (max_bytes - min_bytes)
40769962b56SMatthew Ahrens 	 * move right by min_bytes
40869962b56SMatthew Ahrens 	 * move up by min_writes
40969962b56SMatthew Ahrens 	 */
41069962b56SMatthew Ahrens 	writes = (dirty - min_bytes) *
41169962b56SMatthew Ahrens 	    (zfs_vdev_async_write_max_active -
41269962b56SMatthew Ahrens 	    zfs_vdev_async_write_min_active) /
41369962b56SMatthew Ahrens 	    (max_bytes - min_bytes) +
41469962b56SMatthew Ahrens 	    zfs_vdev_async_write_min_active;
41569962b56SMatthew Ahrens 	ASSERT3U(writes, >=, zfs_vdev_async_write_min_active);
41669962b56SMatthew Ahrens 	ASSERT3U(writes, <=, zfs_vdev_async_write_max_active);
41769962b56SMatthew Ahrens 	return (writes);
41869962b56SMatthew Ahrens }
41969962b56SMatthew Ahrens 
42069962b56SMatthew Ahrens static int
42169962b56SMatthew Ahrens vdev_queue_class_max_active(spa_t *spa, zio_priority_t p)
42269962b56SMatthew Ahrens {
42369962b56SMatthew Ahrens 	switch (p) {
42469962b56SMatthew Ahrens 	case ZIO_PRIORITY_SYNC_READ:
42569962b56SMatthew Ahrens 		return (zfs_vdev_sync_read_max_active);
42669962b56SMatthew Ahrens 	case ZIO_PRIORITY_SYNC_WRITE:
42769962b56SMatthew Ahrens 		return (zfs_vdev_sync_write_max_active);
42869962b56SMatthew Ahrens 	case ZIO_PRIORITY_ASYNC_READ:
42969962b56SMatthew Ahrens 		return (zfs_vdev_async_read_max_active);
43069962b56SMatthew Ahrens 	case ZIO_PRIORITY_ASYNC_WRITE:
43173527f44SAlex Reece 		return (vdev_queue_max_async_writes(spa));
43269962b56SMatthew Ahrens 	case ZIO_PRIORITY_SCRUB:
43369962b56SMatthew Ahrens 		return (zfs_vdev_scrub_max_active);
43469962b56SMatthew Ahrens 	default:
43569962b56SMatthew Ahrens 		panic("invalid priority %u", p);
43669962b56SMatthew Ahrens 		return (0);
43769962b56SMatthew Ahrens 	}
43869962b56SMatthew Ahrens }
43969962b56SMatthew Ahrens 
44069962b56SMatthew Ahrens /*
44169962b56SMatthew Ahrens  * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if
44269962b56SMatthew Ahrens  * there is no eligible class.
44369962b56SMatthew Ahrens  */
44469962b56SMatthew Ahrens static zio_priority_t
44569962b56SMatthew Ahrens vdev_queue_class_to_issue(vdev_queue_t *vq)
44669962b56SMatthew Ahrens {
44769962b56SMatthew Ahrens 	spa_t *spa = vq->vq_vdev->vdev_spa;
44869962b56SMatthew Ahrens 	zio_priority_t p;
44969962b56SMatthew Ahrens 
45069962b56SMatthew Ahrens 	if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active)
45169962b56SMatthew Ahrens 		return (ZIO_PRIORITY_NUM_QUEUEABLE);
45269962b56SMatthew Ahrens 
45369962b56SMatthew Ahrens 	/* find a queue that has not reached its minimum # outstanding i/os */
45469962b56SMatthew Ahrens 	for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
455fe319232SJustin T. Gibbs 		if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
45669962b56SMatthew Ahrens 		    vq->vq_class[p].vqc_active <
45769962b56SMatthew Ahrens 		    vdev_queue_class_min_active(p))
45869962b56SMatthew Ahrens 			return (p);
45969962b56SMatthew Ahrens 	}
46069962b56SMatthew Ahrens 
46169962b56SMatthew Ahrens 	/*
46269962b56SMatthew Ahrens 	 * If we haven't found a queue, look for one that hasn't reached its
46369962b56SMatthew Ahrens 	 * maximum # outstanding i/os.
46469962b56SMatthew Ahrens 	 */
46569962b56SMatthew Ahrens 	for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
466fe319232SJustin T. Gibbs 		if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
46769962b56SMatthew Ahrens 		    vq->vq_class[p].vqc_active <
46869962b56SMatthew Ahrens 		    vdev_queue_class_max_active(spa, p))
46969962b56SMatthew Ahrens 			return (p);
47069962b56SMatthew Ahrens 	}
47169962b56SMatthew Ahrens 
47269962b56SMatthew Ahrens 	/* No eligible queued i/os */
47369962b56SMatthew Ahrens 	return (ZIO_PRIORITY_NUM_QUEUEABLE);
47469962b56SMatthew Ahrens }
47569962b56SMatthew Ahrens 
4766f708f7cSJeff Bonwick /*
4776f708f7cSJeff Bonwick  * Compute the range spanned by two i/os, which is the endpoint of the last
4786f708f7cSJeff Bonwick  * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
4796f708f7cSJeff Bonwick  * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
4806f708f7cSJeff Bonwick  * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
4816f708f7cSJeff Bonwick  */
4826f708f7cSJeff Bonwick #define	IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
4836f708f7cSJeff Bonwick #define	IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
484fa9e4066Sahrens 
485fa9e4066Sahrens static zio_t *
48669962b56SMatthew Ahrens vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
487fa9e4066Sahrens {
48869962b56SMatthew Ahrens 	zio_t *first, *last, *aio, *dio, *mandatory, *nio;
48969962b56SMatthew Ahrens 	uint64_t maxgap = 0;
49069962b56SMatthew Ahrens 	uint64_t size;
49169962b56SMatthew Ahrens 	boolean_t stretch = B_FALSE;
492fe319232SJustin T. Gibbs 	avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type);
49369962b56SMatthew Ahrens 	enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT;
49469962b56SMatthew Ahrens 
49569962b56SMatthew Ahrens 	if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE)
49669962b56SMatthew Ahrens 		return (NULL);
497fa9e4066Sahrens 
49869962b56SMatthew Ahrens 	first = last = zio;
499fa9e4066Sahrens 
50069962b56SMatthew Ahrens 	if (zio->io_type == ZIO_TYPE_READ)
50169962b56SMatthew Ahrens 		maxgap = zfs_vdev_read_gap_limit;
5028ad4d6ddSJeff Bonwick 
50369962b56SMatthew Ahrens 	/*
50469962b56SMatthew Ahrens 	 * We can aggregate I/Os that are sufficiently adjacent and of
50569962b56SMatthew Ahrens 	 * the same flavor, as expressed by the AGG_INHERIT flags.
50669962b56SMatthew Ahrens 	 * The latter requirement is necessary so that certain
50769962b56SMatthew Ahrens 	 * attributes of the I/O, such as whether it's a normal I/O
50869962b56SMatthew Ahrens 	 * or a scrub/resilver, can be preserved in the aggregate.
50969962b56SMatthew Ahrens 	 * We can include optional I/Os, but don't allow them
51069962b56SMatthew Ahrens 	 * to begin a range as they add no benefit in that situation.
51169962b56SMatthew Ahrens 	 */
512f94275ceSAdam Leventhal 
51369962b56SMatthew Ahrens 	/*
51469962b56SMatthew Ahrens 	 * We keep track of the last non-optional I/O.
51569962b56SMatthew Ahrens 	 */
51669962b56SMatthew Ahrens 	mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first;
517f94275ceSAdam Leventhal 
51869962b56SMatthew Ahrens 	/*
51969962b56SMatthew Ahrens 	 * Walk backwards through sufficiently contiguous I/Os
52069962b56SMatthew Ahrens 	 * recording the last non-option I/O.
52169962b56SMatthew Ahrens 	 */
52269962b56SMatthew Ahrens 	while ((dio = AVL_PREV(t, first)) != NULL &&
52369962b56SMatthew Ahrens 	    (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
52469962b56SMatthew Ahrens 	    IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit &&
52569962b56SMatthew Ahrens 	    IO_GAP(dio, first) <= maxgap) {
52669962b56SMatthew Ahrens 		first = dio;
52769962b56SMatthew Ahrens 		if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL))
52869962b56SMatthew Ahrens 			mandatory = first;
52969962b56SMatthew Ahrens 	}
530f94275ceSAdam Leventhal 
53169962b56SMatthew Ahrens 	/*
53269962b56SMatthew Ahrens 	 * Skip any initial optional I/Os.
53369962b56SMatthew Ahrens 	 */
53469962b56SMatthew Ahrens 	while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) {
53569962b56SMatthew Ahrens 		first = AVL_NEXT(t, first);
53669962b56SMatthew Ahrens 		ASSERT(first != NULL);
53769962b56SMatthew Ahrens 	}
5386f708f7cSJeff Bonwick 
53969962b56SMatthew Ahrens 	/*
54069962b56SMatthew Ahrens 	 * Walk forward through sufficiently contiguous I/Os.
54169962b56SMatthew Ahrens 	 */
54269962b56SMatthew Ahrens 	while ((dio = AVL_NEXT(t, last)) != NULL &&
54369962b56SMatthew Ahrens 	    (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
54469962b56SMatthew Ahrens 	    IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit &&
54569962b56SMatthew Ahrens 	    IO_GAP(last, dio) <= maxgap) {
54669962b56SMatthew Ahrens 		last = dio;
54769962b56SMatthew Ahrens 		if (!(last->io_flags & ZIO_FLAG_OPTIONAL))
54869962b56SMatthew Ahrens 			mandatory = last;
54969962b56SMatthew Ahrens 	}
550f94275ceSAdam Leventhal 
55169962b56SMatthew Ahrens 	/*
55269962b56SMatthew Ahrens 	 * Now that we've established the range of the I/O aggregation
55369962b56SMatthew Ahrens 	 * we must decide what to do with trailing optional I/Os.
55469962b56SMatthew Ahrens 	 * For reads, there's nothing to do. While we are unable to
55569962b56SMatthew Ahrens 	 * aggregate further, it's possible that a trailing optional
55669962b56SMatthew Ahrens 	 * I/O would allow the underlying device to aggregate with
55769962b56SMatthew Ahrens 	 * subsequent I/Os. We must therefore determine if the next
55869962b56SMatthew Ahrens 	 * non-optional I/O is close enough to make aggregation
55969962b56SMatthew Ahrens 	 * worthwhile.
56069962b56SMatthew Ahrens 	 */
56169962b56SMatthew Ahrens 	if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) {
56269962b56SMatthew Ahrens 		zio_t *nio = last;
56369962b56SMatthew Ahrens 		while ((dio = AVL_NEXT(t, nio)) != NULL &&
56469962b56SMatthew Ahrens 		    IO_GAP(nio, dio) == 0 &&
56569962b56SMatthew Ahrens 		    IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) {
56669962b56SMatthew Ahrens 			nio = dio;
56769962b56SMatthew Ahrens 			if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
56869962b56SMatthew Ahrens 				stretch = B_TRUE;
56969962b56SMatthew Ahrens 				break;
570f94275ceSAdam Leventhal 			}
571f94275ceSAdam Leventhal 		}
57269962b56SMatthew Ahrens 	}
573f94275ceSAdam Leventhal 
57469962b56SMatthew Ahrens 	if (stretch) {
57569962b56SMatthew Ahrens 		/* This may be a no-op. */
57669962b56SMatthew Ahrens 		dio = AVL_NEXT(t, last);
57769962b56SMatthew Ahrens 		dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
57869962b56SMatthew Ahrens 	} else {
57969962b56SMatthew Ahrens 		while (last != mandatory && last != first) {
58069962b56SMatthew Ahrens 			ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL);
58169962b56SMatthew Ahrens 			last = AVL_PREV(t, last);
58269962b56SMatthew Ahrens 			ASSERT(last != NULL);
583f94275ceSAdam Leventhal 		}
584fa9e4066Sahrens 	}
585fa9e4066Sahrens 
58669962b56SMatthew Ahrens 	if (first == last)
58769962b56SMatthew Ahrens 		return (NULL);
58869962b56SMatthew Ahrens 
58969962b56SMatthew Ahrens 	size = IO_SPAN(first, last);
59069962b56SMatthew Ahrens 	ASSERT3U(size, <=, zfs_vdev_aggregation_limit);
59169962b56SMatthew Ahrens 
59269962b56SMatthew Ahrens 	aio = zio_vdev_delegated_io(first->io_vd, first->io_offset,
59369962b56SMatthew Ahrens 	    zio_buf_alloc(size), size, first->io_type, zio->io_priority,
59469962b56SMatthew Ahrens 	    flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
59569962b56SMatthew Ahrens 	    vdev_queue_agg_io_done, NULL);
59669962b56SMatthew Ahrens 	aio->io_timestamp = first->io_timestamp;
59769962b56SMatthew Ahrens 
59869962b56SMatthew Ahrens 	nio = first;
59969962b56SMatthew Ahrens 	do {
60069962b56SMatthew Ahrens 		dio = nio;
60169962b56SMatthew Ahrens 		nio = AVL_NEXT(t, dio);
60269962b56SMatthew Ahrens 		ASSERT3U(dio->io_type, ==, aio->io_type);
60369962b56SMatthew Ahrens 
60469962b56SMatthew Ahrens 		if (dio->io_flags & ZIO_FLAG_NODATA) {
60569962b56SMatthew Ahrens 			ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
60669962b56SMatthew Ahrens 			bzero((char *)aio->io_data + (dio->io_offset -
60769962b56SMatthew Ahrens 			    aio->io_offset), dio->io_size);
60869962b56SMatthew Ahrens 		} else if (dio->io_type == ZIO_TYPE_WRITE) {
60969962b56SMatthew Ahrens 			bcopy(dio->io_data, (char *)aio->io_data +
61069962b56SMatthew Ahrens 			    (dio->io_offset - aio->io_offset),
61169962b56SMatthew Ahrens 			    dio->io_size);
61269962b56SMatthew Ahrens 		}
613a3f829aeSBill Moore 
61469962b56SMatthew Ahrens 		zio_add_child(dio, aio);
61569962b56SMatthew Ahrens 		vdev_queue_io_remove(vq, dio);
61669962b56SMatthew Ahrens 		zio_vdev_io_bypass(dio);
61769962b56SMatthew Ahrens 		zio_execute(dio);
61869962b56SMatthew Ahrens 	} while (dio != last);
61969962b56SMatthew Ahrens 
62069962b56SMatthew Ahrens 	return (aio);
62169962b56SMatthew Ahrens }
62269962b56SMatthew Ahrens 
62369962b56SMatthew Ahrens static zio_t *
62469962b56SMatthew Ahrens vdev_queue_io_to_issue(vdev_queue_t *vq)
62569962b56SMatthew Ahrens {
62669962b56SMatthew Ahrens 	zio_t *zio, *aio;
62769962b56SMatthew Ahrens 	zio_priority_t p;
62869962b56SMatthew Ahrens 	avl_index_t idx;
629fe319232SJustin T. Gibbs 	avl_tree_t *tree;
63069962b56SMatthew Ahrens 	zio_t search;
63169962b56SMatthew Ahrens 
63269962b56SMatthew Ahrens again:
63369962b56SMatthew Ahrens 	ASSERT(MUTEX_HELD(&vq->vq_lock));
634fa9e4066Sahrens 
63569962b56SMatthew Ahrens 	p = vdev_queue_class_to_issue(vq);
636fa9e4066Sahrens 
63769962b56SMatthew Ahrens 	if (p == ZIO_PRIORITY_NUM_QUEUEABLE) {
63869962b56SMatthew Ahrens 		/* No eligible queued i/os */
63969962b56SMatthew Ahrens 		return (NULL);
640fa9e4066Sahrens 	}
641fa9e4066Sahrens 
64269962b56SMatthew Ahrens 	/*
64369962b56SMatthew Ahrens 	 * For LBA-ordered queues (async / scrub), issue the i/o which follows
64469962b56SMatthew Ahrens 	 * the most recently issued i/o in LBA (offset) order.
64569962b56SMatthew Ahrens 	 *
64669962b56SMatthew Ahrens 	 * For FIFO queues (sync), issue the i/o with the lowest timestamp.
64769962b56SMatthew Ahrens 	 */
648fe319232SJustin T. Gibbs 	tree = vdev_queue_class_tree(vq, p);
64969962b56SMatthew Ahrens 	search.io_timestamp = 0;
65069962b56SMatthew Ahrens 	search.io_offset = vq->vq_last_offset + 1;
651fe319232SJustin T. Gibbs 	VERIFY3P(avl_find(tree, &search, &idx), ==, NULL);
652fe319232SJustin T. Gibbs 	zio = avl_nearest(tree, idx, AVL_AFTER);
65369962b56SMatthew Ahrens 	if (zio == NULL)
654fe319232SJustin T. Gibbs 		zio = avl_first(tree);
65569962b56SMatthew Ahrens 	ASSERT3U(zio->io_priority, ==, p);
65669962b56SMatthew Ahrens 
65769962b56SMatthew Ahrens 	aio = vdev_queue_aggregate(vq, zio);
65869962b56SMatthew Ahrens 	if (aio != NULL)
65969962b56SMatthew Ahrens 		zio = aio;
66069962b56SMatthew Ahrens 	else
66169962b56SMatthew Ahrens 		vdev_queue_io_remove(vq, zio);
662fa9e4066Sahrens 
663f94275ceSAdam Leventhal 	/*
664f94275ceSAdam Leventhal 	 * If the I/O is or was optional and therefore has no data, we need to
665f94275ceSAdam Leventhal 	 * simply discard it. We need to drop the vdev queue's lock to avoid a
666f94275ceSAdam Leventhal 	 * deadlock that we could encounter since this I/O will complete
667f94275ceSAdam Leventhal 	 * immediately.
668f94275ceSAdam Leventhal 	 */
66969962b56SMatthew Ahrens 	if (zio->io_flags & ZIO_FLAG_NODATA) {
670f94275ceSAdam Leventhal 		mutex_exit(&vq->vq_lock);
67169962b56SMatthew Ahrens 		zio_vdev_io_bypass(zio);
67269962b56SMatthew Ahrens 		zio_execute(zio);
673f94275ceSAdam Leventhal 		mutex_enter(&vq->vq_lock);
674f94275ceSAdam Leventhal 		goto again;
675f94275ceSAdam Leventhal 	}
676f94275ceSAdam Leventhal 
67769962b56SMatthew Ahrens 	vdev_queue_pending_add(vq, zio);
67869962b56SMatthew Ahrens 	vq->vq_last_offset = zio->io_offset;
679fa9e4066Sahrens 
68069962b56SMatthew Ahrens 	return (zio);
681fa9e4066Sahrens }
682fa9e4066Sahrens 
683fa9e4066Sahrens zio_t *
684fa9e4066Sahrens vdev_queue_io(zio_t *zio)
685fa9e4066Sahrens {
686fa9e4066Sahrens 	vdev_queue_t *vq = &zio->io_vd->vdev_queue;
687fa9e4066Sahrens 	zio_t *nio;
688fa9e4066Sahrens 
689fa9e4066Sahrens 	if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
690fa9e4066Sahrens 		return (zio);
691fa9e4066Sahrens 
69269962b56SMatthew Ahrens 	/*
69369962b56SMatthew Ahrens 	 * Children i/os inherent their parent's priority, which might
69469962b56SMatthew Ahrens 	 * not match the child's i/o type.  Fix it up here.
69569962b56SMatthew Ahrens 	 */
69669962b56SMatthew Ahrens 	if (zio->io_type == ZIO_TYPE_READ) {
69769962b56SMatthew Ahrens 		if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
69869962b56SMatthew Ahrens 		    zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
69969962b56SMatthew Ahrens 		    zio->io_priority != ZIO_PRIORITY_SCRUB)
70069962b56SMatthew Ahrens 			zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
70169962b56SMatthew Ahrens 	} else {
70269962b56SMatthew Ahrens 		ASSERT(zio->io_type == ZIO_TYPE_WRITE);
70369962b56SMatthew Ahrens 		if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
70469962b56SMatthew Ahrens 		    zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE)
70569962b56SMatthew Ahrens 			zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
70669962b56SMatthew Ahrens 	}
707fa9e4066Sahrens 
70869962b56SMatthew Ahrens 	zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
709fa9e4066Sahrens 
710fa9e4066Sahrens 	mutex_enter(&vq->vq_lock);
711c55e05cbSMatthew Ahrens 	zio->io_timestamp = gethrtime();
712ea8dc4b6Seschrock 	vdev_queue_io_add(vq, zio);
71369962b56SMatthew Ahrens 	nio = vdev_queue_io_to_issue(vq);
714fa9e4066Sahrens 	mutex_exit(&vq->vq_lock);
715fa9e4066Sahrens 
716e05725b1Sbonwick 	if (nio == NULL)
717e05725b1Sbonwick 		return (NULL);
718e05725b1Sbonwick 
719e05725b1Sbonwick 	if (nio->io_done == vdev_queue_agg_io_done) {
720e05725b1Sbonwick 		zio_nowait(nio);
721e05725b1Sbonwick 		return (NULL);
722e05725b1Sbonwick 	}
723fa9e4066Sahrens 
724e05725b1Sbonwick 	return (nio);
725fa9e4066Sahrens }
726fa9e4066Sahrens 
727fa9e4066Sahrens void
728fa9e4066Sahrens vdev_queue_io_done(zio_t *zio)
729fa9e4066Sahrens {
730fa9e4066Sahrens 	vdev_queue_t *vq = &zio->io_vd->vdev_queue;
73169962b56SMatthew Ahrens 	zio_t *nio;
732fa9e4066Sahrens 
733fa9e4066Sahrens 	mutex_enter(&vq->vq_lock);
734fa9e4066Sahrens 
735c3a66015SMatthew Ahrens 	vdev_queue_pending_remove(vq, zio);
736fa9e4066Sahrens 
737c55e05cbSMatthew Ahrens 	vq->vq_io_complete_ts = gethrtime();
738283b8460SGeorge.Wilson 
73969962b56SMatthew Ahrens 	while ((nio = vdev_queue_io_to_issue(vq)) != NULL) {
740fa9e4066Sahrens 		mutex_exit(&vq->vq_lock);
741e05725b1Sbonwick 		if (nio->io_done == vdev_queue_agg_io_done) {
742e05725b1Sbonwick 			zio_nowait(nio);
743e05725b1Sbonwick 		} else {
744fa9e4066Sahrens 			zio_vdev_io_reissue(nio);
745e05725b1Sbonwick 			zio_execute(nio);
746e05725b1Sbonwick 		}
747fa9e4066Sahrens 		mutex_enter(&vq->vq_lock);
748fa9e4066Sahrens 	}
749fa9e4066Sahrens 
750fa9e4066Sahrens 	mutex_exit(&vq->vq_lock);
751fa9e4066Sahrens }
752