Lines Matching refs:ring

103 vq_page_hold(viona_vring_t *ring, uint64_t gpa, bool writable)  in vq_page_hold()  argument
105 ASSERT3P(ring->vr_lease, !=, NULL); in vq_page_hold()
112 return (vmm_drv_page_hold(ring->vr_lease, gpa, prot)); in vq_page_hold()
129 vq_region_hold(viona_vring_t *ring, uint64_t gpa, uint32_t len, in vq_region_hold() argument
148 vmp = vq_page_hold(ring, gpa & PAGEMASK, writable); in vq_region_hold()
170 vmp = vq_page_hold(ring, gpa, writable); in vq_region_hold()
192 viona_vring_t *ring = arg; in viona_ring_lease_expire_cb() local
194 mutex_enter(&ring->vr_lock); in viona_ring_lease_expire_cb()
195 cv_broadcast(&ring->vr_cv); in viona_ring_lease_expire_cb()
196 mutex_exit(&ring->vr_lock); in viona_ring_lease_expire_cb()
203 viona_ring_lease_drop(viona_vring_t *ring) in viona_ring_lease_drop() argument
205 ASSERT(MUTEX_HELD(&ring->vr_lock)); in viona_ring_lease_drop()
207 if (ring->vr_lease != NULL) { in viona_ring_lease_drop()
208 vmm_hold_t *hold = ring->vr_link->l_vm_hold; in viona_ring_lease_drop()
216 viona_ring_unmap(ring); in viona_ring_lease_drop()
218 vmm_drv_lease_break(hold, ring->vr_lease); in viona_ring_lease_drop()
219 ring->vr_lease = NULL; in viona_ring_lease_drop()
224 viona_ring_lease_renew(viona_vring_t *ring) in viona_ring_lease_renew() argument
226 vmm_hold_t *hold = ring->vr_link->l_vm_hold; in viona_ring_lease_renew()
229 ASSERT(MUTEX_HELD(&ring->vr_lock)); in viona_ring_lease_renew()
231 viona_ring_lease_drop(ring); in viona_ring_lease_renew()
237 ring->vr_lease = vmm_drv_lease_sign(hold, viona_ring_lease_expire_cb, in viona_ring_lease_renew()
238 ring); in viona_ring_lease_renew()
239 if (ring->vr_lease != NULL) { in viona_ring_lease_renew()
241 if (ring->vr_pa != 0 && ring->vr_size != 0) { in viona_ring_lease_renew()
246 if (!viona_ring_map(ring, ring->vr_state == VRS_INIT)) { in viona_ring_lease_renew()
247 viona_ring_lease_drop(ring); in viona_ring_lease_renew()
252 return (ring->vr_lease != NULL); in viona_ring_lease_renew()
256 viona_ring_alloc(viona_link_t *link, viona_vring_t *ring) in viona_ring_alloc() argument
258 ring->vr_link = link; in viona_ring_alloc()
259 mutex_init(&ring->vr_lock, NULL, MUTEX_DRIVER, NULL); in viona_ring_alloc()
260 cv_init(&ring->vr_cv, NULL, CV_DRIVER, NULL); in viona_ring_alloc()
261 mutex_init(&ring->vr_a_mutex, NULL, MUTEX_DRIVER, NULL); in viona_ring_alloc()
262 mutex_init(&ring->vr_u_mutex, NULL, MUTEX_DRIVER, NULL); in viona_ring_alloc()
266 viona_ring_misc_free(viona_vring_t *ring) in viona_ring_misc_free() argument
268 const uint_t qsz = ring->vr_size; in viona_ring_misc_free()
270 viona_tx_ring_free(ring, qsz); in viona_ring_misc_free()
274 viona_ring_free(viona_vring_t *ring) in viona_ring_free() argument
276 mutex_destroy(&ring->vr_lock); in viona_ring_free()
277 cv_destroy(&ring->vr_cv); in viona_ring_free()
278 mutex_destroy(&ring->vr_a_mutex); in viona_ring_free()
279 mutex_destroy(&ring->vr_u_mutex); in viona_ring_free()
280 ring->vr_link = NULL; in viona_ring_free()
287 viona_vring_t *ring; in viona_ring_init() local
304 ring = &link->l_vrings[idx]; in viona_ring_init()
305 mutex_enter(&ring->vr_lock); in viona_ring_init()
306 if (ring->vr_state != VRS_RESET) { in viona_ring_init()
307 mutex_exit(&ring->vr_lock); in viona_ring_init()
310 VERIFY(ring->vr_state_flags == 0); in viona_ring_init()
312 ring->vr_lease = NULL; in viona_ring_init()
313 if (!viona_ring_lease_renew(ring)) { in viona_ring_init()
318 ring->vr_size = qsz; in viona_ring_init()
319 ring->vr_mask = (ring->vr_size - 1); in viona_ring_init()
320 ring->vr_pa = pa; in viona_ring_init()
321 if (!viona_ring_map(ring, true)) { in viona_ring_init()
327 ring->vr_cur_aidx = params->vrp_avail_idx; in viona_ring_init()
328 ring->vr_cur_uidx = params->vrp_used_idx; in viona_ring_init()
331 viona_tx_ring_alloc(ring, qsz); in viona_ring_init()
335 ring->vr_msi_addr = 0; in viona_ring_init()
336 ring->vr_msi_msg = 0; in viona_ring_init()
339 bzero(&ring->vr_stats, sizeof (ring->vr_stats)); in viona_ring_init()
341 t = viona_create_worker(ring); in viona_ring_init()
346 ring->vr_worker_thread = t; in viona_ring_init()
347 ring->vr_state = VRS_SETUP; in viona_ring_init()
348 cv_broadcast(&ring->vr_cv); in viona_ring_init()
349 mutex_exit(&ring->vr_lock); in viona_ring_init()
353 viona_ring_lease_drop(ring); in viona_ring_init()
354 viona_ring_misc_free(ring); in viona_ring_init()
355 ring->vr_size = 0; in viona_ring_init()
356 ring->vr_mask = 0; in viona_ring_init()
357 ring->vr_pa = 0; in viona_ring_init()
358 ring->vr_cur_aidx = 0; in viona_ring_init()
359 ring->vr_cur_uidx = 0; in viona_ring_init()
360 mutex_exit(&ring->vr_lock); in viona_ring_init()
368 viona_vring_t *ring; in viona_ring_get_state() local
374 ring = &link->l_vrings[idx]; in viona_ring_get_state()
375 mutex_enter(&ring->vr_lock); in viona_ring_get_state()
377 params->vrp_size = ring->vr_size; in viona_ring_get_state()
378 params->vrp_pa = ring->vr_pa; in viona_ring_get_state()
380 if (ring->vr_state == VRS_RUN) { in viona_ring_get_state()
382 mutex_enter(&ring->vr_a_mutex); in viona_ring_get_state()
383 params->vrp_avail_idx = ring->vr_cur_aidx; in viona_ring_get_state()
384 mutex_exit(&ring->vr_a_mutex); in viona_ring_get_state()
385 mutex_enter(&ring->vr_u_mutex); in viona_ring_get_state()
386 params->vrp_used_idx = ring->vr_cur_uidx; in viona_ring_get_state()
387 mutex_exit(&ring->vr_u_mutex); in viona_ring_get_state()
390 params->vrp_avail_idx = ring->vr_cur_aidx; in viona_ring_get_state()
391 params->vrp_used_idx = ring->vr_cur_uidx; in viona_ring_get_state()
394 mutex_exit(&ring->vr_lock); in viona_ring_get_state()
400 viona_ring_reset(viona_vring_t *ring, boolean_t heed_signals) in viona_ring_reset() argument
402 mutex_enter(&ring->vr_lock); in viona_ring_reset()
403 if (ring->vr_state == VRS_RESET) { in viona_ring_reset()
404 mutex_exit(&ring->vr_lock); in viona_ring_reset()
408 if ((ring->vr_state_flags & VRSF_REQ_STOP) == 0) { in viona_ring_reset()
409 ring->vr_state_flags |= VRSF_REQ_STOP; in viona_ring_reset()
410 cv_broadcast(&ring->vr_cv); in viona_ring_reset()
412 while (ring->vr_state != VRS_RESET) { in viona_ring_reset()
414 cv_wait(&ring->vr_cv, &ring->vr_lock); in viona_ring_reset()
418 rs = cv_wait_sig(&ring->vr_cv, &ring->vr_lock); in viona_ring_reset()
419 if (rs <= 0 && ring->vr_state != VRS_RESET) { in viona_ring_reset()
420 mutex_exit(&ring->vr_lock); in viona_ring_reset()
425 mutex_exit(&ring->vr_lock); in viona_ring_reset()
430 viona_ring_map(viona_vring_t *ring, bool defer_dirty) in viona_ring_map() argument
432 const uint16_t qsz = ring->vr_size; in viona_ring_map()
433 uintptr_t pa = ring->vr_pa; in viona_ring_map()
440 ASSERT(MUTEX_HELD(&ring->vr_lock)); in viona_ring_map()
441 ASSERT3P(ring->vr_map_pages, ==, NULL); in viona_ring_map()
444 ring->vr_map_pages = kmem_zalloc(npages * sizeof (void *), KM_SLEEP); in viona_ring_map()
470 vmp = vmm_drv_page_hold_ext(ring->vr_lease, pa, in viona_ring_map()
473 viona_ring_unmap(ring); in viona_ring_map()
482 ring->vr_map_hold = vmp; in viona_ring_map()
487 ring->vr_map_pages[i] = vmm_drv_page_writable(vmp); in viona_ring_map()
494 viona_ring_mark_dirty(viona_vring_t *ring) in viona_ring_mark_dirty() argument
496 ASSERT(MUTEX_HELD(&ring->vr_lock)); in viona_ring_mark_dirty()
497 ASSERT(ring->vr_map_hold != NULL); in viona_ring_mark_dirty()
499 for (vmm_page_t *vp = ring->vr_map_hold; vp != NULL; in viona_ring_mark_dirty()
506 viona_ring_unmap(viona_vring_t *ring) in viona_ring_unmap() argument
508 ASSERT(MUTEX_HELD(&ring->vr_lock)); in viona_ring_unmap()
510 void **map = ring->vr_map_pages; in viona_ring_unmap()
512 const uint_t npages = LEGACY_VQ_PAGES(ring->vr_size); in viona_ring_unmap()
514 ring->vr_map_pages = NULL; in viona_ring_unmap()
516 vmm_drv_page_release_chain(ring->vr_map_hold); in viona_ring_unmap()
517 ring->vr_map_hold = NULL; in viona_ring_unmap()
519 ASSERT3P(ring->vr_map_hold, ==, NULL); in viona_ring_unmap()
524 viona_ring_addr(viona_vring_t *ring, uint_t off) in viona_ring_addr() argument
526 ASSERT3P(ring->vr_map_pages, !=, NULL); in viona_ring_addr()
527 ASSERT3U(LEGACY_VQ_SIZE(ring->vr_size), >, off); in viona_ring_addr()
531 return ((caddr_t)ring->vr_map_pages[page_num] + page_off); in viona_ring_addr()
535 viona_intr_ring(viona_vring_t *ring, boolean_t skip_flags_check) in viona_intr_ring() argument
538 volatile uint16_t *avail_flags = viona_ring_addr(ring, in viona_intr_ring()
539 LEGACY_AVAIL_FLAGS_OFF(ring->vr_size)); in viona_intr_ring()
546 mutex_enter(&ring->vr_lock); in viona_intr_ring()
547 uint64_t addr = ring->vr_msi_addr; in viona_intr_ring()
548 uint64_t msg = ring->vr_msi_msg; in viona_intr_ring()
549 mutex_exit(&ring->vr_lock); in viona_intr_ring()
552 (void) vmm_drv_msi(ring->vr_lease, addr, msg); in viona_intr_ring()
555 if (atomic_cas_uint(&ring->vr_intr_enabled, 0, 1) == 0) { in viona_intr_ring()
556 pollwakeup(&ring->vr_link->l_pollhead, POLLRDBAND); in viona_intr_ring()
562 vring_stop_req(const viona_vring_t *ring) in vring_stop_req() argument
564 return ((ring->vr_state_flags & VRSF_REQ_STOP) != 0); in vring_stop_req()
568 vring_pause_req(const viona_vring_t *ring) in vring_pause_req() argument
570 return ((ring->vr_state_flags & VRSF_REQ_PAUSE) != 0); in vring_pause_req()
574 vring_start_req(const viona_vring_t *ring) in vring_start_req() argument
576 return ((ring->vr_state_flags & VRSF_REQ_START) != 0); in vring_start_req()
588 vring_need_bail_ext(const viona_vring_t *ring, bool stop_only) in vring_need_bail_ext() argument
590 ASSERT(MUTEX_HELD(&ring->vr_lock)); in vring_need_bail_ext()
592 if (vring_stop_req(ring) || in vring_need_bail_ext()
593 (!stop_only && vring_pause_req(ring))) { in vring_need_bail_ext()
597 kthread_t *t = ring->vr_worker_thread; in vring_need_bail_ext()
610 vring_need_bail(const viona_vring_t *ring) in vring_need_bail() argument
612 return (vring_need_bail_ext(ring, false)); in vring_need_bail()
616 viona_ring_pause(viona_vring_t *ring) in viona_ring_pause() argument
618 mutex_enter(&ring->vr_lock); in viona_ring_pause()
619 switch (ring->vr_state) { in viona_ring_pause()
630 ring->vr_state_flags &= ~VRSF_REQ_START; in viona_ring_pause()
631 mutex_exit(&ring->vr_lock); in viona_ring_pause()
635 if ((ring->vr_state_flags & VRSF_REQ_STOP) != 0) { in viona_ring_pause()
637 mutex_exit(&ring->vr_lock); in viona_ring_pause()
642 ring->vr_state_flags |= VRSF_REQ_PAUSE; in viona_ring_pause()
643 cv_broadcast(&ring->vr_cv); in viona_ring_pause()
647 panic("invalid ring state %d", ring->vr_state); in viona_ring_pause()
652 int res = cv_wait_sig(&ring->vr_cv, &ring->vr_lock); in viona_ring_pause()
654 if (ring->vr_state == VRS_INIT || in viona_ring_pause()
655 (ring->vr_state_flags & VRSF_REQ_PAUSE) == 0) { in viona_ring_pause()
657 mutex_exit(&ring->vr_lock); in viona_ring_pause()
662 mutex_exit(&ring->vr_lock); in viona_ring_pause()
672 viona_vring_t *ring = (viona_vring_t *)arg; in viona_worker() local
673 viona_link_t *link = ring->vr_link; in viona_worker()
675 mutex_enter(&ring->vr_lock); in viona_worker()
676 VERIFY3U(ring->vr_state, ==, VRS_SETUP); in viona_worker()
679 if (vring_need_bail_ext(ring, true)) { in viona_worker()
685 ring->vr_state = VRS_INIT; in viona_worker()
686 cv_broadcast(&ring->vr_cv); in viona_worker()
688 while (!vring_start_req(ring)) { in viona_worker()
693 if (vmm_drv_lease_expired(ring->vr_lease)) { in viona_worker()
694 if (!viona_ring_lease_renew(ring)) { in viona_worker()
699 (void) cv_wait_sig(&ring->vr_cv, &ring->vr_lock); in viona_worker()
701 if (vring_pause_req(ring)) { in viona_worker()
703 ring->vr_state_flags &= ~VRSF_REQ_PAUSE; in viona_worker()
705 if (vring_need_bail_ext(ring, true)) { in viona_worker()
710 ASSERT((ring->vr_state_flags & VRSF_REQ_START) != 0); in viona_worker()
711 ring->vr_state = VRS_RUN; in viona_worker()
712 ring->vr_state_flags &= ~VRSF_REQ_START; in viona_worker()
713 viona_ring_mark_dirty(ring); in viona_worker()
716 if (vmm_drv_lease_expired(ring->vr_lease)) { in viona_worker()
717 if (!viona_ring_lease_renew(ring)) { in viona_worker()
723 if (ring == &link->l_vrings[VIONA_VQ_RX]) { in viona_worker()
724 viona_worker_rx(ring, link); in viona_worker()
725 } else if (ring == &link->l_vrings[VIONA_VQ_TX]) { in viona_worker()
726 viona_worker_tx(ring, link); in viona_worker()
728 panic("unexpected ring: %p", (void *)ring); in viona_worker()
731 VERIFY3U(ring->vr_state, ==, VRS_STOP); in viona_worker()
732 VERIFY3U(ring->vr_xfer_outstanding, ==, 0); in viona_worker()
735 if (vring_pause_req(ring)) { in viona_worker()
736 ring->vr_state_flags &= ~VRSF_REQ_PAUSE; in viona_worker()
738 if (vring_need_bail_ext(ring, true)) { in viona_worker()
748 viona_ring_unmap(ring); in viona_worker()
749 if (viona_ring_map(ring, true)) { in viona_worker()
760 viona_ring_misc_free(ring); in viona_worker()
762 viona_ring_lease_drop(ring); in viona_worker()
763 ring->vr_cur_aidx = 0; in viona_worker()
764 ring->vr_size = 0; in viona_worker()
765 ring->vr_mask = 0; in viona_worker()
766 ring->vr_pa = 0; in viona_worker()
767 ring->vr_state = VRS_RESET; in viona_worker()
768 ring->vr_state_flags = 0; in viona_worker()
769 ring->vr_worker_thread = NULL; in viona_worker()
770 cv_broadcast(&ring->vr_cv); in viona_worker()
771 mutex_exit(&ring->vr_lock); in viona_worker()
778 viona_create_worker(viona_vring_t *ring) in viona_create_worker() argument
785 ASSERT(MUTEX_HELD(&ring->vr_lock)); in viona_create_worker()
786 ASSERT(ring->vr_state == VRS_RESET); in viona_create_worker()
789 lwp = lwp_create(viona_worker, (void *)ring, 0, p, TS_STOPPED, in viona_create_worker()
805 vq_read_desc(viona_vring_t *ring, uint16_t idx, struct virtio_desc *descp) in vq_read_desc() argument
809 ASSERT3U(idx, <, ring->vr_size); in vq_read_desc()
811 bcopy(viona_ring_addr(ring, entry_off), descp, sizeof (*descp)); in vq_read_desc()
815 vq_read_avail(viona_vring_t *ring, uint16_t idx) in vq_read_avail() argument
817 ASSERT3U(idx, <, ring->vr_size); in vq_read_avail()
820 viona_ring_addr(ring, LEGACY_AVAIL_ENT_OFF(ring->vr_size, idx)); in vq_read_avail()
830 vq_map_desc_bufs(viona_vring_t *ring, const struct virtio_desc *desc, in vq_map_desc_bufs() argument
836 VIONA_PROBE2(desc_bad_len, viona_vring_t *, ring, in vq_map_desc_bufs()
838 VIONA_RING_STAT_INCR(ring, desc_bad_len); in vq_map_desc_bufs()
842 err = vq_region_hold(ring, desc->vd_addr, desc->vd_len, in vq_map_desc_bufs()
846 VIONA_PROBE1(too_many_desc, viona_vring_t *, ring); in vq_map_desc_bufs()
847 VIONA_RING_STAT_INCR(ring, too_many_desc); in vq_map_desc_bufs()
850 VIONA_PROBE_BAD_RING_ADDR(ring, desc->vd_addr); in vq_map_desc_bufs()
851 VIONA_RING_STAT_INCR(ring, bad_ring_addr); in vq_map_desc_bufs()
865 vq_map_indir_desc_bufs(viona_vring_t *ring, const struct virtio_desc *desc, in vq_map_indir_desc_bufs() argument
871 indir_count > ring->vr_size || in vq_map_indir_desc_bufs()
873 VIONA_PROBE2(indir_bad_len, viona_vring_t *, ring, in vq_map_indir_desc_bufs()
875 VIONA_RING_STAT_INCR(ring, indir_bad_len); in vq_map_indir_desc_bufs()
899 vmp = vq_page_hold(ring, indir_page, false); in vq_map_indir_desc_bufs()
901 VIONA_PROBE_BAD_RING_ADDR(ring, indir_page); in vq_map_indir_desc_bufs()
902 VIONA_RING_STAT_INCR(ring, bad_ring_addr); in vq_map_indir_desc_bufs()
919 VIONA_PROBE1(indir_bad_nest, viona_vring_t *, ring); in vq_map_indir_desc_bufs()
920 VIONA_RING_STAT_INCR(ring, indir_bad_nest); in vq_map_indir_desc_bufs()
924 VIONA_PROBE2(desc_bad_len, viona_vring_t *, ring, in vq_map_indir_desc_bufs()
926 VIONA_RING_STAT_INCR(ring, desc_bad_len); in vq_map_indir_desc_bufs()
931 err = vq_map_desc_bufs(ring, &vp, region); in vq_map_indir_desc_bufs()
941 VIONA_PROBE1(too_many_desc, viona_vring_t *, ring); in vq_map_indir_desc_bufs()
942 VIONA_RING_STAT_INCR(ring, too_many_desc); in vq_map_indir_desc_bufs()
949 VIONA_PROBE3(indir_bad_next, viona_vring_t *, ring, in vq_map_indir_desc_bufs()
951 VIONA_RING_STAT_INCR(ring, indir_bad_next); in vq_map_indir_desc_bufs()
964 vq_popchain(viona_vring_t *ring, struct iovec *iov, uint_t niov, in vq_popchain() argument
978 mutex_enter(&ring->vr_a_mutex); in vq_popchain()
979 idx = ring->vr_cur_aidx; in vq_popchain()
980 ndesc = viona_ring_num_avail(ring); in vq_popchain()
983 mutex_exit(&ring->vr_a_mutex); in vq_popchain()
986 if (ndesc > ring->vr_size) { in vq_popchain()
994 VIONA_PROBE2(ndesc_too_high, viona_vring_t *, ring, in vq_popchain()
996 VIONA_RING_STAT_INCR(ring, ndesc_too_high); in vq_popchain()
999 head = vq_read_avail(ring, idx & ring->vr_mask); in vq_popchain()
1003 if (next >= ring->vr_size) { in vq_popchain()
1004 VIONA_PROBE2(bad_idx, viona_vring_t *, ring, in vq_popchain()
1006 VIONA_RING_STAT_INCR(ring, bad_idx); in vq_popchain()
1010 vq_read_desc(ring, next, &vdir); in vq_popchain()
1012 if (vq_map_desc_bufs(ring, &vdir, &region) != 0) { in vq_popchain()
1023 viona_vring_t *, ring, in vq_popchain()
1025 VIONA_RING_STAT_INCR(ring, indir_bad_next); in vq_popchain()
1029 if (vq_map_indir_desc_bufs(ring, &vdir, &region) != 0) { in vq_popchain()
1035 ring->vr_cur_aidx++; in vq_popchain()
1036 mutex_exit(&ring->vr_a_mutex); in vq_popchain()
1044 mutex_exit(&ring->vr_a_mutex); in vq_popchain()
1057 vq_write_used_ent(viona_vring_t *ring, uint16_t idx, uint16_t cookie, in vq_write_used_ent() argument
1066 const uint_t used_id_off = LEGACY_USED_ENT_OFF(ring->vr_size, idx); in vq_write_used_ent()
1068 volatile uint32_t *idp = viona_ring_addr(ring, used_id_off); in vq_write_used_ent()
1069 volatile uint32_t *lenp = viona_ring_addr(ring, used_len_off); in vq_write_used_ent()
1071 ASSERT(MUTEX_HELD(&ring->vr_u_mutex)); in vq_write_used_ent()
1078 vq_write_used_idx(viona_vring_t *ring, uint16_t idx) in vq_write_used_idx() argument
1080 ASSERT(MUTEX_HELD(&ring->vr_u_mutex)); in vq_write_used_idx()
1083 viona_ring_addr(ring, LEGACY_USED_IDX_OFF(ring->vr_size)); in vq_write_used_idx()
1088 vq_pushchain(viona_vring_t *ring, uint32_t len, uint16_t cookie) in vq_pushchain() argument
1092 mutex_enter(&ring->vr_u_mutex); in vq_pushchain()
1094 uidx = ring->vr_cur_uidx; in vq_pushchain()
1095 vq_write_used_ent(ring, uidx & ring->vr_mask, cookie, len); in vq_pushchain()
1099 vq_write_used_idx(ring, uidx); in vq_pushchain()
1100 ring->vr_cur_uidx = uidx; in vq_pushchain()
1102 mutex_exit(&ring->vr_u_mutex); in vq_pushchain()
1106 vq_pushchain_many(viona_vring_t *ring, uint_t num_bufs, used_elem_t *elem) in vq_pushchain_many() argument
1110 mutex_enter(&ring->vr_u_mutex); in vq_pushchain_many()
1112 uidx = ring->vr_cur_uidx; in vq_pushchain_many()
1115 vq_write_used_ent(ring, uidx & ring->vr_mask, elem[i].id, in vq_pushchain_many()
1120 vq_write_used_idx(ring, uidx); in vq_pushchain_many()
1121 ring->vr_cur_uidx = uidx; in vq_pushchain_many()
1123 mutex_exit(&ring->vr_u_mutex); in vq_pushchain_many()
1130 viona_ring_disable_notify(viona_vring_t *ring) in viona_ring_disable_notify() argument
1133 viona_ring_addr(ring, LEGACY_USED_FLAGS_OFF(ring->vr_size)); in viona_ring_disable_notify()
1142 viona_ring_enable_notify(viona_vring_t *ring) in viona_ring_enable_notify() argument
1145 viona_ring_addr(ring, LEGACY_USED_FLAGS_OFF(ring->vr_size)); in viona_ring_enable_notify()
1159 viona_ring_num_avail(viona_vring_t *ring) in viona_ring_num_avail() argument
1162 viona_ring_addr(ring, LEGACY_AVAIL_IDX_OFF(ring->vr_size)); in viona_ring_num_avail()
1164 return (*avail_idx - ring->vr_cur_aidx); in viona_ring_num_avail()