1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 22a3f829aeSBill Moore * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23fa9e4066Sahrens * Use is subject to license terms. 24fa9e4066Sahrens */ 25fa9e4066Sahrens 26283b8460SGeorge.Wilson /* 27283b8460SGeorge.Wilson * Copyright (c) 2012 by Delphix. All rights reserved. 28283b8460SGeorge.Wilson */ 29283b8460SGeorge.Wilson 30fa9e4066Sahrens #include <sys/zfs_context.h> 31fa9e4066Sahrens #include <sys/vdev_impl.h> 32c3a66015SMatthew Ahrens #include <sys/spa_impl.h> 33fa9e4066Sahrens #include <sys/zio.h> 34fa9e4066Sahrens #include <sys/avl.h> 35fa9e4066Sahrens 36614409b5Sahrens /* 37614409b5Sahrens * These tunables are for performance analysis. 38614409b5Sahrens */ 39*f7170741SWill Andrews 40*f7170741SWill Andrews /* The maximum number of I/Os concurrently pending to each device. */ 41*f7170741SWill Andrews int zfs_vdev_max_pending = 10; 42*f7170741SWill Andrews 43614409b5Sahrens /* 44*f7170741SWill Andrews * The initial number of I/Os pending to each device, before it starts ramping 45*f7170741SWill Andrews * up to zfs_vdev_max_pending. 46614409b5Sahrens */ 47614409b5Sahrens int zfs_vdev_min_pending = 4; 48614409b5Sahrens 49c55e05cbSMatthew Ahrens /* 50c55e05cbSMatthew Ahrens * The deadlines are grouped into buckets based on zfs_vdev_time_shift: 51c55e05cbSMatthew Ahrens * deadline = pri + gethrtime() >> time_shift) 52c55e05cbSMatthew Ahrens */ 53c55e05cbSMatthew Ahrens int zfs_vdev_time_shift = 29; /* each bucket is 0.537 seconds */ 54614409b5Sahrens 55614409b5Sahrens /* exponential I/O issue ramp-up rate */ 56614409b5Sahrens int zfs_vdev_ramp_rate = 2; 57614409b5Sahrens 58614409b5Sahrens /* 59f94275ceSAdam Leventhal * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O. 60f94275ceSAdam Leventhal * For read I/Os, we also aggregate across small adjacency gaps; for writes 61f94275ceSAdam Leventhal * we include spans of optional I/Os to aid aggregation at the disk even when 62f94275ceSAdam Leventhal * they aren't able to help us aggregate at this level. 63614409b5Sahrens */ 64614409b5Sahrens int zfs_vdev_aggregation_limit = SPA_MAXBLOCKSIZE; 656f708f7cSJeff Bonwick int zfs_vdev_read_gap_limit = 32 << 10; 66f94275ceSAdam Leventhal int zfs_vdev_write_gap_limit = 4 << 10; 67614409b5Sahrens 68fa9e4066Sahrens /* 69fa9e4066Sahrens * Virtual device vector for disk I/O scheduling. 70fa9e4066Sahrens */ 71fa9e4066Sahrens int 72fa9e4066Sahrens vdev_queue_deadline_compare(const void *x1, const void *x2) 73fa9e4066Sahrens { 74fa9e4066Sahrens const zio_t *z1 = x1; 75fa9e4066Sahrens const zio_t *z2 = x2; 76fa9e4066Sahrens 77fa9e4066Sahrens if (z1->io_deadline < z2->io_deadline) 78fa9e4066Sahrens return (-1); 79fa9e4066Sahrens if (z1->io_deadline > z2->io_deadline) 80fa9e4066Sahrens return (1); 81fa9e4066Sahrens 82fa9e4066Sahrens if (z1->io_offset < z2->io_offset) 83fa9e4066Sahrens return (-1); 84fa9e4066Sahrens if (z1->io_offset > z2->io_offset) 85fa9e4066Sahrens return (1); 86fa9e4066Sahrens 87fa9e4066Sahrens if (z1 < z2) 88fa9e4066Sahrens return (-1); 89fa9e4066Sahrens if (z1 > z2) 90fa9e4066Sahrens return (1); 91fa9e4066Sahrens 92fa9e4066Sahrens return (0); 93fa9e4066Sahrens } 94fa9e4066Sahrens 95fa9e4066Sahrens int 96fa9e4066Sahrens vdev_queue_offset_compare(const void *x1, const void *x2) 97fa9e4066Sahrens { 98fa9e4066Sahrens const zio_t *z1 = x1; 99fa9e4066Sahrens const zio_t *z2 = x2; 100fa9e4066Sahrens 101fa9e4066Sahrens if (z1->io_offset < z2->io_offset) 102fa9e4066Sahrens return (-1); 103fa9e4066Sahrens if (z1->io_offset > z2->io_offset) 104fa9e4066Sahrens return (1); 105fa9e4066Sahrens 106fa9e4066Sahrens if (z1 < z2) 107fa9e4066Sahrens return (-1); 108fa9e4066Sahrens if (z1 > z2) 109fa9e4066Sahrens return (1); 110fa9e4066Sahrens 111fa9e4066Sahrens return (0); 112fa9e4066Sahrens } 113fa9e4066Sahrens 114fa9e4066Sahrens void 115fa9e4066Sahrens vdev_queue_init(vdev_t *vd) 116fa9e4066Sahrens { 117fa9e4066Sahrens vdev_queue_t *vq = &vd->vdev_queue; 118fa9e4066Sahrens 119fa9e4066Sahrens mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL); 120fa9e4066Sahrens 121fa9e4066Sahrens avl_create(&vq->vq_deadline_tree, vdev_queue_deadline_compare, 122fa9e4066Sahrens sizeof (zio_t), offsetof(struct zio, io_deadline_node)); 123fa9e4066Sahrens 124fa9e4066Sahrens avl_create(&vq->vq_read_tree, vdev_queue_offset_compare, 125fa9e4066Sahrens sizeof (zio_t), offsetof(struct zio, io_offset_node)); 126fa9e4066Sahrens 127fa9e4066Sahrens avl_create(&vq->vq_write_tree, vdev_queue_offset_compare, 128fa9e4066Sahrens sizeof (zio_t), offsetof(struct zio, io_offset_node)); 129fa9e4066Sahrens 130fa9e4066Sahrens avl_create(&vq->vq_pending_tree, vdev_queue_offset_compare, 131fa9e4066Sahrens sizeof (zio_t), offsetof(struct zio, io_offset_node)); 132fa9e4066Sahrens } 133fa9e4066Sahrens 134fa9e4066Sahrens void 135fa9e4066Sahrens vdev_queue_fini(vdev_t *vd) 136fa9e4066Sahrens { 137fa9e4066Sahrens vdev_queue_t *vq = &vd->vdev_queue; 138fa9e4066Sahrens 139fa9e4066Sahrens avl_destroy(&vq->vq_deadline_tree); 140fa9e4066Sahrens avl_destroy(&vq->vq_read_tree); 141fa9e4066Sahrens avl_destroy(&vq->vq_write_tree); 142fa9e4066Sahrens avl_destroy(&vq->vq_pending_tree); 143fa9e4066Sahrens 144fa9e4066Sahrens mutex_destroy(&vq->vq_lock); 145fa9e4066Sahrens } 146fa9e4066Sahrens 147ea8dc4b6Seschrock static void 148ea8dc4b6Seschrock vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) 149ea8dc4b6Seschrock { 150c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 151ea8dc4b6Seschrock avl_add(&vq->vq_deadline_tree, zio); 152ea8dc4b6Seschrock avl_add(zio->io_vdev_tree, zio); 153c3a66015SMatthew Ahrens 154c3a66015SMatthew Ahrens if (spa->spa_iokstat != NULL) { 155c3a66015SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 156c3a66015SMatthew Ahrens kstat_waitq_enter(spa->spa_iokstat->ks_data); 157c3a66015SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 158c3a66015SMatthew Ahrens } 159ea8dc4b6Seschrock } 160ea8dc4b6Seschrock 161ea8dc4b6Seschrock static void 162ea8dc4b6Seschrock vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) 163ea8dc4b6Seschrock { 164c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 165ea8dc4b6Seschrock avl_remove(&vq->vq_deadline_tree, zio); 166ea8dc4b6Seschrock avl_remove(zio->io_vdev_tree, zio); 167c3a66015SMatthew Ahrens 168c3a66015SMatthew Ahrens if (spa->spa_iokstat != NULL) { 169c3a66015SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 170c3a66015SMatthew Ahrens kstat_waitq_exit(spa->spa_iokstat->ks_data); 171c3a66015SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 172c3a66015SMatthew Ahrens } 173c3a66015SMatthew Ahrens } 174c3a66015SMatthew Ahrens 175c3a66015SMatthew Ahrens static void 176c3a66015SMatthew Ahrens vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio) 177c3a66015SMatthew Ahrens { 178c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 179c3a66015SMatthew Ahrens avl_add(&vq->vq_pending_tree, zio); 180c3a66015SMatthew Ahrens if (spa->spa_iokstat != NULL) { 181c3a66015SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 182c3a66015SMatthew Ahrens kstat_runq_enter(spa->spa_iokstat->ks_data); 183c3a66015SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 184c3a66015SMatthew Ahrens } 185c3a66015SMatthew Ahrens } 186c3a66015SMatthew Ahrens 187c3a66015SMatthew Ahrens static void 188c3a66015SMatthew Ahrens vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio) 189c3a66015SMatthew Ahrens { 190c3a66015SMatthew Ahrens spa_t *spa = zio->io_spa; 191c3a66015SMatthew Ahrens avl_remove(&vq->vq_pending_tree, zio); 192c3a66015SMatthew Ahrens if (spa->spa_iokstat != NULL) { 193c3a66015SMatthew Ahrens kstat_io_t *ksio = spa->spa_iokstat->ks_data; 194c3a66015SMatthew Ahrens 195c3a66015SMatthew Ahrens mutex_enter(&spa->spa_iokstat_lock); 196c3a66015SMatthew Ahrens kstat_runq_exit(spa->spa_iokstat->ks_data); 197c3a66015SMatthew Ahrens if (zio->io_type == ZIO_TYPE_READ) { 198c3a66015SMatthew Ahrens ksio->reads++; 199c3a66015SMatthew Ahrens ksio->nread += zio->io_size; 200c3a66015SMatthew Ahrens } else if (zio->io_type == ZIO_TYPE_WRITE) { 201c3a66015SMatthew Ahrens ksio->writes++; 202c3a66015SMatthew Ahrens ksio->nwritten += zio->io_size; 203c3a66015SMatthew Ahrens } 204c3a66015SMatthew Ahrens mutex_exit(&spa->spa_iokstat_lock); 205c3a66015SMatthew Ahrens } 206ea8dc4b6Seschrock } 207ea8dc4b6Seschrock 208fa9e4066Sahrens static void 209fa9e4066Sahrens vdev_queue_agg_io_done(zio_t *aio) 210fa9e4066Sahrens { 211a3f829aeSBill Moore zio_t *pio; 212fa9e4066Sahrens 213a3f829aeSBill Moore while ((pio = zio_walk_parents(aio)) != NULL) 214fa9e4066Sahrens if (aio->io_type == ZIO_TYPE_READ) 215a3f829aeSBill Moore bcopy((char *)aio->io_data + (pio->io_offset - 216a3f829aeSBill Moore aio->io_offset), pio->io_data, pio->io_size); 217fa9e4066Sahrens 218fa9e4066Sahrens zio_buf_free(aio->io_data, aio->io_size); 219fa9e4066Sahrens } 220fa9e4066Sahrens 2216f708f7cSJeff Bonwick /* 2226f708f7cSJeff Bonwick * Compute the range spanned by two i/os, which is the endpoint of the last 2236f708f7cSJeff Bonwick * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset). 2246f708f7cSJeff Bonwick * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio); 2256f708f7cSJeff Bonwick * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0. 2266f708f7cSJeff Bonwick */ 2276f708f7cSJeff Bonwick #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset) 2286f708f7cSJeff Bonwick #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio)) 229fa9e4066Sahrens 230fa9e4066Sahrens static zio_t * 231e05725b1Sbonwick vdev_queue_io_to_issue(vdev_queue_t *vq, uint64_t pending_limit) 232fa9e4066Sahrens { 233f94275ceSAdam Leventhal zio_t *fio, *lio, *aio, *dio, *nio, *mio; 234a3f829aeSBill Moore avl_tree_t *t; 2358ad4d6ddSJeff Bonwick int flags; 2366f708f7cSJeff Bonwick uint64_t maxspan = zfs_vdev_aggregation_limit; 2376f708f7cSJeff Bonwick uint64_t maxgap; 238f94275ceSAdam Leventhal int stretch; 239fa9e4066Sahrens 240f94275ceSAdam Leventhal again: 241fa9e4066Sahrens ASSERT(MUTEX_HELD(&vq->vq_lock)); 242fa9e4066Sahrens 243fa9e4066Sahrens if (avl_numnodes(&vq->vq_pending_tree) >= pending_limit || 244fa9e4066Sahrens avl_numnodes(&vq->vq_deadline_tree) == 0) 245fa9e4066Sahrens return (NULL); 246fa9e4066Sahrens 247fa9e4066Sahrens fio = lio = avl_first(&vq->vq_deadline_tree); 248fa9e4066Sahrens 249a3f829aeSBill Moore t = fio->io_vdev_tree; 2508ad4d6ddSJeff Bonwick flags = fio->io_flags & ZIO_FLAG_AGG_INHERIT; 2516f708f7cSJeff Bonwick maxgap = (t == &vq->vq_read_tree) ? zfs_vdev_read_gap_limit : 0; 2528ad4d6ddSJeff Bonwick 2538ad4d6ddSJeff Bonwick if (!(flags & ZIO_FLAG_DONT_AGGREGATE)) { 2548ad4d6ddSJeff Bonwick /* 255f94275ceSAdam Leventhal * We can aggregate I/Os that are sufficiently adjacent and of 256f94275ceSAdam Leventhal * the same flavor, as expressed by the AGG_INHERIT flags. 257f94275ceSAdam Leventhal * The latter requirement is necessary so that certain 258f94275ceSAdam Leventhal * attributes of the I/O, such as whether it's a normal I/O 259f94275ceSAdam Leventhal * or a scrub/resilver, can be preserved in the aggregate. 260f94275ceSAdam Leventhal * We can include optional I/Os, but don't allow them 261f94275ceSAdam Leventhal * to begin a range as they add no benefit in that situation. 262f94275ceSAdam Leventhal */ 263f94275ceSAdam Leventhal 264f94275ceSAdam Leventhal /* 265f94275ceSAdam Leventhal * We keep track of the last non-optional I/O. 266f94275ceSAdam Leventhal */ 267f94275ceSAdam Leventhal mio = (fio->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : fio; 268f94275ceSAdam Leventhal 269f94275ceSAdam Leventhal /* 270f94275ceSAdam Leventhal * Walk backwards through sufficiently contiguous I/Os 271f94275ceSAdam Leventhal * recording the last non-option I/O. 2728ad4d6ddSJeff Bonwick */ 273a3f829aeSBill Moore while ((dio = AVL_PREV(t, fio)) != NULL && 2748ad4d6ddSJeff Bonwick (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 275f94275ceSAdam Leventhal IO_SPAN(dio, lio) <= maxspan && 276f94275ceSAdam Leventhal IO_GAP(dio, fio) <= maxgap) { 2778ad4d6ddSJeff Bonwick fio = dio; 278f94275ceSAdam Leventhal if (mio == NULL && !(fio->io_flags & ZIO_FLAG_OPTIONAL)) 279f94275ceSAdam Leventhal mio = fio; 280f94275ceSAdam Leventhal } 281f94275ceSAdam Leventhal 282f94275ceSAdam Leventhal /* 283f94275ceSAdam Leventhal * Skip any initial optional I/Os. 284f94275ceSAdam Leventhal */ 285f94275ceSAdam Leventhal while ((fio->io_flags & ZIO_FLAG_OPTIONAL) && fio != lio) { 286f94275ceSAdam Leventhal fio = AVL_NEXT(t, fio); 287f94275ceSAdam Leventhal ASSERT(fio != NULL); 288f94275ceSAdam Leventhal } 2896f708f7cSJeff Bonwick 290f94275ceSAdam Leventhal /* 291f94275ceSAdam Leventhal * Walk forward through sufficiently contiguous I/Os. 292f94275ceSAdam Leventhal */ 293a3f829aeSBill Moore while ((dio = AVL_NEXT(t, lio)) != NULL && 2948ad4d6ddSJeff Bonwick (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 295f94275ceSAdam Leventhal IO_SPAN(fio, dio) <= maxspan && 296f94275ceSAdam Leventhal IO_GAP(lio, dio) <= maxgap) { 2978ad4d6ddSJeff Bonwick lio = dio; 298f94275ceSAdam Leventhal if (!(lio->io_flags & ZIO_FLAG_OPTIONAL)) 299f94275ceSAdam Leventhal mio = lio; 300f94275ceSAdam Leventhal } 301f94275ceSAdam Leventhal 302f94275ceSAdam Leventhal /* 303f94275ceSAdam Leventhal * Now that we've established the range of the I/O aggregation 304f94275ceSAdam Leventhal * we must decide what to do with trailing optional I/Os. 305f94275ceSAdam Leventhal * For reads, there's nothing to do. While we are unable to 306f94275ceSAdam Leventhal * aggregate further, it's possible that a trailing optional 307f94275ceSAdam Leventhal * I/O would allow the underlying device to aggregate with 308f94275ceSAdam Leventhal * subsequent I/Os. We must therefore determine if the next 309f94275ceSAdam Leventhal * non-optional I/O is close enough to make aggregation 310f94275ceSAdam Leventhal * worthwhile. 311f94275ceSAdam Leventhal */ 312f94275ceSAdam Leventhal stretch = B_FALSE; 313f94275ceSAdam Leventhal if (t != &vq->vq_read_tree && mio != NULL) { 314f94275ceSAdam Leventhal nio = lio; 315f94275ceSAdam Leventhal while ((dio = AVL_NEXT(t, nio)) != NULL && 316f94275ceSAdam Leventhal IO_GAP(nio, dio) == 0 && 317f94275ceSAdam Leventhal IO_GAP(mio, dio) <= zfs_vdev_write_gap_limit) { 318f94275ceSAdam Leventhal nio = dio; 319f94275ceSAdam Leventhal if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) { 320f94275ceSAdam Leventhal stretch = B_TRUE; 321f94275ceSAdam Leventhal break; 322f94275ceSAdam Leventhal } 323f94275ceSAdam Leventhal } 324f94275ceSAdam Leventhal } 325f94275ceSAdam Leventhal 326f94275ceSAdam Leventhal if (stretch) { 327f94275ceSAdam Leventhal /* This may be a no-op. */ 328f94275ceSAdam Leventhal VERIFY((dio = AVL_NEXT(t, lio)) != NULL); 329f94275ceSAdam Leventhal dio->io_flags &= ~ZIO_FLAG_OPTIONAL; 330f94275ceSAdam Leventhal } else { 331f94275ceSAdam Leventhal while (lio != mio && lio != fio) { 332f94275ceSAdam Leventhal ASSERT(lio->io_flags & ZIO_FLAG_OPTIONAL); 333f94275ceSAdam Leventhal lio = AVL_PREV(t, lio); 334f94275ceSAdam Leventhal ASSERT(lio != NULL); 335f94275ceSAdam Leventhal } 336f94275ceSAdam Leventhal } 337fa9e4066Sahrens } 338fa9e4066Sahrens 339fa9e4066Sahrens if (fio != lio) { 3406f708f7cSJeff Bonwick uint64_t size = IO_SPAN(fio, lio); 341614409b5Sahrens ASSERT(size <= zfs_vdev_aggregation_limit); 342fa9e4066Sahrens 343e14bb325SJeff Bonwick aio = zio_vdev_delegated_io(fio->io_vd, fio->io_offset, 34480eb36f2SGeorge Wilson zio_buf_alloc(size), size, fio->io_type, ZIO_PRIORITY_AGG, 3458ad4d6ddSJeff Bonwick flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE, 346fa9e4066Sahrens vdev_queue_agg_io_done, NULL); 347283b8460SGeorge.Wilson aio->io_timestamp = fio->io_timestamp; 348fa9e4066Sahrens 3496f708f7cSJeff Bonwick nio = fio; 3506f708f7cSJeff Bonwick do { 3516f708f7cSJeff Bonwick dio = nio; 3526f708f7cSJeff Bonwick nio = AVL_NEXT(t, dio); 353fa9e4066Sahrens ASSERT(dio->io_type == aio->io_type); 354a3f829aeSBill Moore ASSERT(dio->io_vdev_tree == t); 355a3f829aeSBill Moore 356f94275ceSAdam Leventhal if (dio->io_flags & ZIO_FLAG_NODATA) { 357f94275ceSAdam Leventhal ASSERT(dio->io_type == ZIO_TYPE_WRITE); 358f94275ceSAdam Leventhal bzero((char *)aio->io_data + (dio->io_offset - 359f94275ceSAdam Leventhal aio->io_offset), dio->io_size); 360f94275ceSAdam Leventhal } else if (dio->io_type == ZIO_TYPE_WRITE) { 361a3f829aeSBill Moore bcopy(dio->io_data, (char *)aio->io_data + 362a3f829aeSBill Moore (dio->io_offset - aio->io_offset), 363a3f829aeSBill Moore dio->io_size); 364f94275ceSAdam Leventhal } 365a3f829aeSBill Moore 366a3f829aeSBill Moore zio_add_child(dio, aio); 367ea8dc4b6Seschrock vdev_queue_io_remove(vq, dio); 368fa9e4066Sahrens zio_vdev_io_bypass(dio); 369a3f829aeSBill Moore zio_execute(dio); 3706f708f7cSJeff Bonwick } while (dio != lio); 371fa9e4066Sahrens 372c3a66015SMatthew Ahrens vdev_queue_pending_add(vq, aio); 373fa9e4066Sahrens 374fa9e4066Sahrens return (aio); 375fa9e4066Sahrens } 376fa9e4066Sahrens 377a3f829aeSBill Moore ASSERT(fio->io_vdev_tree == t); 378ea8dc4b6Seschrock vdev_queue_io_remove(vq, fio); 379fa9e4066Sahrens 380f94275ceSAdam Leventhal /* 381f94275ceSAdam Leventhal * If the I/O is or was optional and therefore has no data, we need to 382f94275ceSAdam Leventhal * simply discard it. We need to drop the vdev queue's lock to avoid a 383f94275ceSAdam Leventhal * deadlock that we could encounter since this I/O will complete 384f94275ceSAdam Leventhal * immediately. 385f94275ceSAdam Leventhal */ 386f94275ceSAdam Leventhal if (fio->io_flags & ZIO_FLAG_NODATA) { 387f94275ceSAdam Leventhal mutex_exit(&vq->vq_lock); 388f94275ceSAdam Leventhal zio_vdev_io_bypass(fio); 389f94275ceSAdam Leventhal zio_execute(fio); 390f94275ceSAdam Leventhal mutex_enter(&vq->vq_lock); 391f94275ceSAdam Leventhal goto again; 392f94275ceSAdam Leventhal } 393f94275ceSAdam Leventhal 394c3a66015SMatthew Ahrens vdev_queue_pending_add(vq, fio); 395fa9e4066Sahrens 396fa9e4066Sahrens return (fio); 397fa9e4066Sahrens } 398fa9e4066Sahrens 399fa9e4066Sahrens zio_t * 400fa9e4066Sahrens vdev_queue_io(zio_t *zio) 401fa9e4066Sahrens { 402fa9e4066Sahrens vdev_queue_t *vq = &zio->io_vd->vdev_queue; 403fa9e4066Sahrens zio_t *nio; 404fa9e4066Sahrens 405fa9e4066Sahrens ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 406fa9e4066Sahrens 407fa9e4066Sahrens if (zio->io_flags & ZIO_FLAG_DONT_QUEUE) 408fa9e4066Sahrens return (zio); 409fa9e4066Sahrens 410fa9e4066Sahrens zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE; 411fa9e4066Sahrens 412fa9e4066Sahrens if (zio->io_type == ZIO_TYPE_READ) 413fa9e4066Sahrens zio->io_vdev_tree = &vq->vq_read_tree; 414fa9e4066Sahrens else 415fa9e4066Sahrens zio->io_vdev_tree = &vq->vq_write_tree; 416fa9e4066Sahrens 417fa9e4066Sahrens mutex_enter(&vq->vq_lock); 418fa9e4066Sahrens 419c55e05cbSMatthew Ahrens zio->io_timestamp = gethrtime(); 420283b8460SGeorge.Wilson zio->io_deadline = (zio->io_timestamp >> zfs_vdev_time_shift) + 421d3d50737SRafael Vanoni zio->io_priority; 422fa9e4066Sahrens 423ea8dc4b6Seschrock vdev_queue_io_add(vq, zio); 424fa9e4066Sahrens 425e05725b1Sbonwick nio = vdev_queue_io_to_issue(vq, zfs_vdev_min_pending); 426fa9e4066Sahrens 427fa9e4066Sahrens mutex_exit(&vq->vq_lock); 428fa9e4066Sahrens 429e05725b1Sbonwick if (nio == NULL) 430e05725b1Sbonwick return (NULL); 431e05725b1Sbonwick 432e05725b1Sbonwick if (nio->io_done == vdev_queue_agg_io_done) { 433e05725b1Sbonwick zio_nowait(nio); 434e05725b1Sbonwick return (NULL); 435e05725b1Sbonwick } 436fa9e4066Sahrens 437e05725b1Sbonwick return (nio); 438fa9e4066Sahrens } 439fa9e4066Sahrens 440fa9e4066Sahrens void 441fa9e4066Sahrens vdev_queue_io_done(zio_t *zio) 442fa9e4066Sahrens { 443fa9e4066Sahrens vdev_queue_t *vq = &zio->io_vd->vdev_queue; 444fa9e4066Sahrens 445283b8460SGeorge.Wilson if (zio_injection_enabled) 446283b8460SGeorge.Wilson delay(SEC_TO_TICK(zio_handle_io_delay(zio))); 447283b8460SGeorge.Wilson 448fa9e4066Sahrens mutex_enter(&vq->vq_lock); 449fa9e4066Sahrens 450c3a66015SMatthew Ahrens vdev_queue_pending_remove(vq, zio); 451fa9e4066Sahrens 452c55e05cbSMatthew Ahrens vq->vq_io_complete_ts = gethrtime(); 453283b8460SGeorge.Wilson 454e14bb325SJeff Bonwick for (int i = 0; i < zfs_vdev_ramp_rate; i++) { 455e14bb325SJeff Bonwick zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending); 456fa9e4066Sahrens if (nio == NULL) 457fa9e4066Sahrens break; 458fa9e4066Sahrens mutex_exit(&vq->vq_lock); 459e05725b1Sbonwick if (nio->io_done == vdev_queue_agg_io_done) { 460e05725b1Sbonwick zio_nowait(nio); 461e05725b1Sbonwick } else { 462fa9e4066Sahrens zio_vdev_io_reissue(nio); 463e05725b1Sbonwick zio_execute(nio); 464e05725b1Sbonwick } 465fa9e4066Sahrens mutex_enter(&vq->vq_lock); 466fa9e4066Sahrens } 467fa9e4066Sahrens 468fa9e4066Sahrens mutex_exit(&vq->vq_lock); 469fa9e4066Sahrens } 470