1993e3fafSRobert Mustacchi /*
2993e3fafSRobert Mustacchi  * This file and its contents are supplied under the terms of the
3993e3fafSRobert Mustacchi  * Common Development and Distribution License ("CDDL"), version 1.0.
4993e3fafSRobert Mustacchi  * You may only use this file in accordance with the terms of version
5993e3fafSRobert Mustacchi  * 1.0 of the CDDL.
6993e3fafSRobert Mustacchi  *
7993e3fafSRobert Mustacchi  * A full copy of the text of the CDDL should have accompanied this
8993e3fafSRobert Mustacchi  * source.  A copy of the CDDL is also available via the Internet at
9993e3fafSRobert Mustacchi  * http://www.illumos.org/license/CDDL.
10993e3fafSRobert Mustacchi  */
11993e3fafSRobert Mustacchi 
12993e3fafSRobert Mustacchi /*
13993e3fafSRobert Mustacchi  * Copyright 2016 Joyent, Inc.
14*ec82ef79SMatthias Scheler  * Copyright (c) 2019 by Western Digital Corporation
15993e3fafSRobert Mustacchi  */
16993e3fafSRobert Mustacchi 
17993e3fafSRobert Mustacchi /*
18993e3fafSRobert Mustacchi  * Event Ring Management
19993e3fafSRobert Mustacchi  *
20993e3fafSRobert Mustacchi  * All activity in xHCI is reported to an event ring, which corresponds directly
21993e3fafSRobert Mustacchi  * with an interrupt. Whether a command is issued or an I/O is issued to a given
22993e3fafSRobert Mustacchi  * device endpoint, it will end up being acknowledged, positively or negatively,
23993e3fafSRobert Mustacchi  * on an event ring.
24993e3fafSRobert Mustacchi  *
25993e3fafSRobert Mustacchi  * Unlike other rings, the OS is a consumer of the event rings, not a producer.
26993e3fafSRobert Mustacchi  * For more information on how the ring is used, see xhci_ring.c. For more
27993e3fafSRobert Mustacchi  * information generally, see xhci.c.
28993e3fafSRobert Mustacchi  *
29993e3fafSRobert Mustacchi  * All of the rings are described in the ERST -- Event Ring Segment Table. As we
30993e3fafSRobert Mustacchi  * only have a single interrupt and a single event ring, we only write a single
31993e3fafSRobert Mustacchi  * entry here.
32993e3fafSRobert Mustacchi  */
33993e3fafSRobert Mustacchi 
34993e3fafSRobert Mustacchi #include <sys/usb/hcd/xhci/xhci.h>
35993e3fafSRobert Mustacchi 
36993e3fafSRobert Mustacchi 
37993e3fafSRobert Mustacchi void
xhci_event_fini(xhci_t * xhcip)38993e3fafSRobert Mustacchi xhci_event_fini(xhci_t *xhcip)
39993e3fafSRobert Mustacchi {
40993e3fafSRobert Mustacchi 	xhci_event_ring_t *xev = &xhcip->xhci_event;
41993e3fafSRobert Mustacchi 	xhci_ring_free(&xev->xev_ring);
42993e3fafSRobert Mustacchi 	if (xev->xev_segs != NULL)
43993e3fafSRobert Mustacchi 		xhci_dma_free(&xev->xev_dma);
44993e3fafSRobert Mustacchi 	xev->xev_segs = NULL;
45993e3fafSRobert Mustacchi }
46993e3fafSRobert Mustacchi 
47993e3fafSRobert Mustacchi /*
48993e3fafSRobert Mustacchi  * Make sure that if we leave here we either have both the ring and table
49993e3fafSRobert Mustacchi  * addresses initialized or neither.
50993e3fafSRobert Mustacchi  */
51993e3fafSRobert Mustacchi static int
xhci_event_alloc(xhci_t * xhcip,xhci_event_ring_t * xev)52993e3fafSRobert Mustacchi xhci_event_alloc(xhci_t *xhcip, xhci_event_ring_t *xev)
53993e3fafSRobert Mustacchi {
54993e3fafSRobert Mustacchi 	int ret;
55993e3fafSRobert Mustacchi 	ddi_dma_attr_t attr;
56993e3fafSRobert Mustacchi 	ddi_device_acc_attr_t acc;
57993e3fafSRobert Mustacchi 
58993e3fafSRobert Mustacchi 	/*
59993e3fafSRobert Mustacchi 	 * This is allocating the segment table. It doesn't have any particular
60993e3fafSRobert Mustacchi 	 * requirements. Though it could be larger, we can get away with our
61993e3fafSRobert Mustacchi 	 * default data structure attributes unless we add a lot more entries.
62993e3fafSRobert Mustacchi 	 */
63993e3fafSRobert Mustacchi 	xhci_dma_acc_attr(xhcip, &acc);
64993e3fafSRobert Mustacchi 	xhci_dma_dma_attr(xhcip, &attr);
65993e3fafSRobert Mustacchi 	if (!xhci_dma_alloc(xhcip, &xev->xev_dma, &attr, &acc, B_FALSE,
66993e3fafSRobert Mustacchi 	    sizeof (xhci_event_segment_t) * XHCI_EVENT_NSEGS, B_FALSE))
67993e3fafSRobert Mustacchi 		return (ENOMEM);
68993e3fafSRobert Mustacchi 	if ((ret = xhci_ring_alloc(xhcip, &xev->xev_ring)) != 0) {
69993e3fafSRobert Mustacchi 		xhci_dma_free(&xev->xev_dma);
70993e3fafSRobert Mustacchi 		return (ret);
71993e3fafSRobert Mustacchi 	}
72993e3fafSRobert Mustacchi 
73993e3fafSRobert Mustacchi 	xev->xev_segs = (void *)xev->xev_dma.xdb_va;
74993e3fafSRobert Mustacchi 	return (0);
75993e3fafSRobert Mustacchi }
76993e3fafSRobert Mustacchi 
77993e3fafSRobert Mustacchi int
xhci_event_init(xhci_t * xhcip)78993e3fafSRobert Mustacchi xhci_event_init(xhci_t *xhcip)
79993e3fafSRobert Mustacchi {
80993e3fafSRobert Mustacchi 	int ret;
81993e3fafSRobert Mustacchi 	uint32_t reg;
82993e3fafSRobert Mustacchi 	xhci_event_ring_t *xev = &xhcip->xhci_event;
83993e3fafSRobert Mustacchi 
84993e3fafSRobert Mustacchi 	if (xev->xev_segs == NULL) {
85993e3fafSRobert Mustacchi 		if ((ret = xhci_event_alloc(xhcip, xev)) != 0)
86993e3fafSRobert Mustacchi 			return (ret);
87993e3fafSRobert Mustacchi 	}
88993e3fafSRobert Mustacchi 
89993e3fafSRobert Mustacchi 	if ((ret = xhci_ring_reset(xhcip, &xev->xev_ring)) != 0) {
90993e3fafSRobert Mustacchi 		xhci_event_fini(xhcip);
91993e3fafSRobert Mustacchi 		return (ret);
92993e3fafSRobert Mustacchi 	}
93993e3fafSRobert Mustacchi 
94993e3fafSRobert Mustacchi 	bzero(xev->xev_segs, sizeof (xhci_event_segment_t) * XHCI_EVENT_NSEGS);
95993e3fafSRobert Mustacchi 	xev->xev_segs[0].xes_addr = LE_64(xhci_dma_pa(&xev->xev_ring.xr_dma));
96993e3fafSRobert Mustacchi 	xev->xev_segs[0].xes_size = LE_16(xev->xev_ring.xr_ntrb);
97993e3fafSRobert Mustacchi 
98993e3fafSRobert Mustacchi 	reg = xhci_get32(xhcip, XHCI_R_RUN, XHCI_ERSTSZ(0));
99993e3fafSRobert Mustacchi 	reg &= ~XHCI_ERSTS_MASK;
100993e3fafSRobert Mustacchi 	reg |= XHCI_ERSTS_SET(XHCI_EVENT_NSEGS);
101993e3fafSRobert Mustacchi 	xhci_put32(xhcip, XHCI_R_RUN, XHCI_ERSTSZ(0), reg);
102993e3fafSRobert Mustacchi 
103993e3fafSRobert Mustacchi 	xhci_put64(xhcip, XHCI_R_RUN, XHCI_ERDP(0),
104993e3fafSRobert Mustacchi 	    xhci_dma_pa(&xev->xev_ring.xr_dma));
105993e3fafSRobert Mustacchi 	xhci_put64(xhcip, XHCI_R_RUN, XHCI_ERSTBA(0),
106993e3fafSRobert Mustacchi 	    xhci_dma_pa(&xev->xev_dma));
107993e3fafSRobert Mustacchi 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
108993e3fafSRobert Mustacchi 		xhci_event_fini(xhcip);
109993e3fafSRobert Mustacchi 		ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
110993e3fafSRobert Mustacchi 		return (EIO);
111993e3fafSRobert Mustacchi 	}
112993e3fafSRobert Mustacchi 
113993e3fafSRobert Mustacchi 	return (0);
114993e3fafSRobert Mustacchi }
115993e3fafSRobert Mustacchi 
116993e3fafSRobert Mustacchi static boolean_t
xhci_event_process_psc(xhci_t * xhcip,xhci_trb_t * trb)117993e3fafSRobert Mustacchi xhci_event_process_psc(xhci_t *xhcip, xhci_trb_t *trb)
118993e3fafSRobert Mustacchi {
119993e3fafSRobert Mustacchi 	uint32_t port;
120993e3fafSRobert Mustacchi 
121993e3fafSRobert Mustacchi 	if (XHCI_TRB_GET_CODE(LE_32(trb->trb_status)) != XHCI_CODE_SUCCESS) {
122993e3fafSRobert Mustacchi 		return (B_TRUE);
123993e3fafSRobert Mustacchi 	}
124993e3fafSRobert Mustacchi 
125993e3fafSRobert Mustacchi 	port = XHCI_TRB_PORTID(LE_64(trb->trb_addr));
126993e3fafSRobert Mustacchi 	if (port < 1 || port > xhcip->xhci_caps.xcap_max_ports) {
127993e3fafSRobert Mustacchi 		/*
128993e3fafSRobert Mustacchi 		 * At some point we may want to send a DDI_FM_DEVICE_INVAL_STATE
129993e3fafSRobert Mustacchi 		 * ereport as part of this.
130993e3fafSRobert Mustacchi 		 */
131993e3fafSRobert Mustacchi 		return (B_FALSE);
132993e3fafSRobert Mustacchi 	}
133993e3fafSRobert Mustacchi 
134993e3fafSRobert Mustacchi 	xhci_root_hub_psc_callback(xhcip);
135993e3fafSRobert Mustacchi 	return (B_TRUE);
136993e3fafSRobert Mustacchi }
137993e3fafSRobert Mustacchi 
138*ec82ef79SMatthias Scheler boolean_t
xhci_event_process_trb(xhci_t * xhcip,xhci_trb_t * trb)139*ec82ef79SMatthias Scheler xhci_event_process_trb(xhci_t *xhcip, xhci_trb_t *trb)
140*ec82ef79SMatthias Scheler {
141*ec82ef79SMatthias Scheler 	uint32_t type;
142*ec82ef79SMatthias Scheler 
143*ec82ef79SMatthias Scheler 	type = LE_32(trb->trb_flags) & XHCI_TRB_TYPE_MASK;
144*ec82ef79SMatthias Scheler 	switch (type) {
145*ec82ef79SMatthias Scheler 	case XHCI_EVT_PORT_CHANGE:
146*ec82ef79SMatthias Scheler 		if (!xhci_event_process_psc(xhcip, trb))
147*ec82ef79SMatthias Scheler 			return (B_FALSE);
148*ec82ef79SMatthias Scheler 		break;
149*ec82ef79SMatthias Scheler 	case XHCI_EVT_CMD_COMPLETE:
150*ec82ef79SMatthias Scheler 		if (!xhci_command_event_callback(xhcip, trb))
151*ec82ef79SMatthias Scheler 			return (B_FALSE);
152*ec82ef79SMatthias Scheler 		break;
153*ec82ef79SMatthias Scheler 	case XHCI_EVT_DOORBELL:
154*ec82ef79SMatthias Scheler 		/*
155*ec82ef79SMatthias Scheler 		 * Because we don't have any VF hardware, this event
156*ec82ef79SMatthias Scheler 		 * should never happen. If it does, that probably means
157*ec82ef79SMatthias Scheler 		 * something bad has happened and we should reset the
158*ec82ef79SMatthias Scheler 		 * device.
159*ec82ef79SMatthias Scheler 		 */
160*ec82ef79SMatthias Scheler 		xhci_error(xhcip, "received xHCI VF interrupt even "
161*ec82ef79SMatthias Scheler 		    "though virtual functions are not supported, "
162*ec82ef79SMatthias Scheler 		    "resetting device");
163*ec82ef79SMatthias Scheler 		xhci_fm_runtime_reset(xhcip);
164*ec82ef79SMatthias Scheler 		return (B_FALSE);
165*ec82ef79SMatthias Scheler 	case XHCI_EVT_XFER:
166*ec82ef79SMatthias Scheler 		if (!xhci_endpoint_transfer_callback(xhcip, trb))
167*ec82ef79SMatthias Scheler 			return (B_FALSE);
168*ec82ef79SMatthias Scheler 		break;
169*ec82ef79SMatthias Scheler 	/*
170*ec82ef79SMatthias Scheler 	 * Ignore other events that come in.
171*ec82ef79SMatthias Scheler 	 */
172*ec82ef79SMatthias Scheler 	default:
173*ec82ef79SMatthias Scheler 		break;
174*ec82ef79SMatthias Scheler 	}
175*ec82ef79SMatthias Scheler 
176*ec82ef79SMatthias Scheler 	return (B_TRUE);
177*ec82ef79SMatthias Scheler }
178*ec82ef79SMatthias Scheler 
179993e3fafSRobert Mustacchi /*
180993e3fafSRobert Mustacchi  * Process the event ring, note we're in interrupt context while doing this.
181993e3fafSRobert Mustacchi  */
182993e3fafSRobert Mustacchi boolean_t
xhci_event_process(xhci_t * xhcip)183993e3fafSRobert Mustacchi xhci_event_process(xhci_t *xhcip)
184993e3fafSRobert Mustacchi {
185993e3fafSRobert Mustacchi 	int nevents;
186993e3fafSRobert Mustacchi 	uint64_t addr;
187993e3fafSRobert Mustacchi 	xhci_ring_t *xrp = &xhcip->xhci_event.xev_ring;
188993e3fafSRobert Mustacchi 
189993e3fafSRobert Mustacchi 	/*
190993e3fafSRobert Mustacchi 	 * While it may be possible for us to transition to an error state at
191993e3fafSRobert Mustacchi 	 * any time because we are reasonably not holding the xhci_t's lock
192993e3fafSRobert Mustacchi 	 * during the entire interrupt (as it doesn't protect any of the event
193993e3fafSRobert Mustacchi 	 * ring's data), we still do an initial test to ensure that we don't go
194993e3fafSRobert Mustacchi 	 * too far down the path.
195993e3fafSRobert Mustacchi 	 */
196993e3fafSRobert Mustacchi 	mutex_enter(&xhcip->xhci_lock);
197993e3fafSRobert Mustacchi 	if (xhcip->xhci_state & XHCI_S_ERROR) {
198993e3fafSRobert Mustacchi 		mutex_exit(&xhcip->xhci_lock);
199993e3fafSRobert Mustacchi 		return (B_FALSE);
200993e3fafSRobert Mustacchi 	}
201993e3fafSRobert Mustacchi 	mutex_exit(&xhcip->xhci_lock);
202993e3fafSRobert Mustacchi 
203993e3fafSRobert Mustacchi 	/*
204993e3fafSRobert Mustacchi 	 * We've seen a few cases, particularly when dealing with controllers
205993e3fafSRobert Mustacchi 	 * where BIOS takeover is involved, that an interrupt gets injected into
206993e3fafSRobert Mustacchi 	 * the system before we've actually finished setting things up. If for
207993e3fafSRobert Mustacchi 	 * some reason that happens, and we don't actually have a ring yet,
208993e3fafSRobert Mustacchi 	 * don't try and do anything.
209993e3fafSRobert Mustacchi 	 */
210993e3fafSRobert Mustacchi 	if (xhcip->xhci_event.xev_segs == NULL)
211993e3fafSRobert Mustacchi 		return (B_TRUE);
212993e3fafSRobert Mustacchi 
213993e3fafSRobert Mustacchi 	XHCI_DMA_SYNC(xrp->xr_dma, DDI_DMA_SYNC_FORKERNEL);
214993e3fafSRobert Mustacchi 	if (xhci_check_dma_handle(xhcip, &xrp->xr_dma) != DDI_FM_OK) {
215993e3fafSRobert Mustacchi 		xhci_error(xhcip, "encountered fatal FM error trying to "
216993e3fafSRobert Mustacchi 		    "synchronize event ring: resetting device");
217993e3fafSRobert Mustacchi 		xhci_fm_runtime_reset(xhcip);
218993e3fafSRobert Mustacchi 		return (B_FALSE);
219993e3fafSRobert Mustacchi 	}
220993e3fafSRobert Mustacchi 
221993e3fafSRobert Mustacchi 	/*
222993e3fafSRobert Mustacchi 	 * Process at most a full ring worth of events.
223993e3fafSRobert Mustacchi 	 */
224993e3fafSRobert Mustacchi 	for (nevents = 0; nevents < xrp->xr_ntrb; nevents++) {
225993e3fafSRobert Mustacchi 		xhci_trb_t *trb;
226993e3fafSRobert Mustacchi 
227993e3fafSRobert Mustacchi 		if ((trb = xhci_ring_event_advance(xrp)) == NULL)
228993e3fafSRobert Mustacchi 			break;
229993e3fafSRobert Mustacchi 
230*ec82ef79SMatthias Scheler 		if (!xhci_event_process_trb(xhcip, trb))
231993e3fafSRobert Mustacchi 			return (B_FALSE);
232993e3fafSRobert Mustacchi 	}
233993e3fafSRobert Mustacchi 
234993e3fafSRobert Mustacchi 	addr = xhci_dma_pa(&xrp->xr_dma) + sizeof (xhci_trb_t) * xrp->xr_tail;
235993e3fafSRobert Mustacchi 	addr |= XHCI_ERDP_BUSY;
236993e3fafSRobert Mustacchi 	xhci_put64(xhcip, XHCI_R_RUN, XHCI_ERDP(0), addr);
237993e3fafSRobert Mustacchi 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
238993e3fafSRobert Mustacchi 		xhci_error(xhcip, "failed to write to event ring dequeue "
239993e3fafSRobert Mustacchi 		    "pointer: encountered fatal FM error, resetting device");
240993e3fafSRobert Mustacchi 		xhci_fm_runtime_reset(xhcip);
241993e3fafSRobert Mustacchi 		return (B_FALSE);
242993e3fafSRobert Mustacchi 	}
243993e3fafSRobert Mustacchi 
244993e3fafSRobert Mustacchi 	return (B_TRUE);
245993e3fafSRobert Mustacchi }
246