xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/xhci/xhci_endpoint.c (revision ec82ef794c304d675af6962e1428b3b12ca2be8b)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright (c) 2018, Joyent, Inc.
14  * Copyright (c) 2019 by Western Digital Corporation
15  */
16 
17 /*
18  * xHCI Endpoint Initialization and Management
19  *
20  * Please see the big theory statement in xhci.c for more information.
21  */
22 
23 #include <sys/usb/hcd/xhci/xhci.h>
24 #include <sys/sdt.h>
25 
26 boolean_t
27 xhci_endpoint_is_periodic_in(xhci_endpoint_t *xep)
28 {
29 	usba_pipe_handle_data_t *ph;
30 
31 	ASSERT(xep != NULL);
32 	ph = xep->xep_pipe;
33 	ASSERT(ph != NULL);
34 
35 	return ((xep->xep_type == USB_EP_ATTR_INTR ||
36 	    xep->xep_type == USB_EP_ATTR_ISOCH) &&
37 	    (ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN);
38 }
39 
40 /*
41  * Endpoints are a bit weirdly numbered. Endpoint zero is the default control
42  * endpoint, so the direction doesn't matter. For all the others, they're
43  * arranged as ep 1 out, ep 1 in, ep 2 out, ep 2 in. This is based on the layout
44  * of the Device Context Structure in xHCI 1.1 / 6.2.1. Therefore to go from the
45  * endpoint and direction, we know that endpoint n starts at 2n - 1.  e.g.
46  * endpoint 1 starts at entry 1, endpoint 2 at entry 3, etc. Finally, the OUT
47  * direction comes first, followed by the IN direction. So if we're getting the
48  * endpoint for one of those, then we have to deal with that.
49  */
50 uint_t
51 xhci_endpoint_pipe_to_epid(usba_pipe_handle_data_t *ph)
52 {
53 	int ep;
54 
55 	ep = ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK;
56 	if (ep == 0)
57 		return (ep);
58 	ep = ep * 2 - 1;
59 	if ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN)
60 		ep++;
61 
62 	VERIFY(ep < XHCI_NUM_ENDPOINTS);
63 	return (ep);
64 }
65 
66 /*
67  * The assumption is that someone calling this owns this endpoint / device and
68  * that it's in a state where it's safe to zero out that information.
69  */
70 void
71 xhci_endpoint_fini(xhci_device_t *xd, int endpoint)
72 {
73 	xhci_endpoint_t *xep = xd->xd_endpoints[endpoint];
74 
75 	VERIFY(xep != NULL);
76 	xd->xd_endpoints[endpoint] = NULL;
77 
78 	xhci_ring_free(&xep->xep_ring);
79 	cv_destroy(&xep->xep_state_cv);
80 	list_destroy(&xep->xep_transfers);
81 	kmem_free(xep, sizeof (xhci_endpoint_t));
82 }
83 
84 /*
85  * Set up the default control endpoint input context. This needs to be done
86  * before we address the device. Note, we separate out the default endpoint from
87  * others, as we must set this up before we have a pipe handle.
88  */
89 int
90 xhci_endpoint_setup_default_context(xhci_t *xhcip, xhci_device_t *xd,
91     xhci_endpoint_t *xep)
92 {
93 	uint_t mps;
94 	xhci_endpoint_context_t *ectx;
95 	uint64_t deq;
96 
97 	ectx = xd->xd_endin[xep->xep_num];
98 	VERIFY(ectx != NULL);
99 
100 	/*
101 	 * We may or may not have a device descriptor. This should match the
102 	 * same initial sizes that are done in hubd_create_child().
103 	 *
104 	 * Note, since we don't necessarily have an endpoint descriptor yet to
105 	 * base this on we instead use the device's defaults if available. This
106 	 * is different from normal endpoints for which there's always a
107 	 * specific descriptor.
108 	 */
109 	switch (xd->xd_usbdev->usb_port_status) {
110 	case USBA_LOW_SPEED_DEV:
111 		if (xd->xd_usbdev->usb_dev_descr != NULL) {
112 			mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
113 		} else {
114 			mps = 8;
115 		}
116 		break;
117 	case USBA_FULL_SPEED_DEV:
118 	case USBA_HIGH_SPEED_DEV:
119 		if (xd->xd_usbdev->usb_dev_descr != NULL) {
120 			mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
121 		} else {
122 			mps = 64;
123 		}
124 		break;
125 	case USBA_SUPER_SPEED_DEV:
126 	default:
127 		if (xd->xd_usbdev->usb_dev_descr != NULL) {
128 			mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
129 			mps = 1 << mps;
130 		} else {
131 			mps = 512;
132 		}
133 		break;
134 	}
135 
136 	bzero(ectx, sizeof (xhci_endpoint_context_t));
137 	ectx->xec_info = LE_32(0);
138 	ectx->xec_info2 = LE_32(XHCI_EPCTX_SET_CERR(3) |
139 	    XHCI_EPCTX_SET_EPTYPE(XHCI_EPCTX_TYPE_CTRL) |
140 	    XHCI_EPCTX_SET_MAXB(0) | XHCI_EPCTX_SET_MPS(mps));
141 	deq = xhci_dma_pa(&xep->xep_ring.xr_dma) + sizeof (xhci_trb_t) *
142 	    xep->xep_ring.xr_tail;
143 	ectx->xec_dequeue = LE_64(deq | xep->xep_ring.xr_cycle);
144 	ectx->xec_txinfo = LE_32(XHCI_EPCTX_MAX_ESIT_PAYLOAD(0) |
145 	    XHCI_EPCTX_AVG_TRB_LEN(XHCI_CONTEXT_DEF_CTRL_ATL));
146 
147 	XHCI_DMA_SYNC(xd->xd_ictx, DDI_DMA_SYNC_FORDEV);
148 	if (xhci_check_dma_handle(xhcip, &xd->xd_ictx) != DDI_FM_OK) {
149 		xhci_error(xhcip, "failed to initialize default device input "
150 		    "context on slot %d and port %d for endpoint %u:  "
151 		    "encountered fatal FM error synchronizing input context "
152 		    "DMA memory", xd->xd_slot, xd->xd_port, xep->xep_num);
153 		xhci_fm_runtime_reset(xhcip);
154 		return (EIO);
155 	}
156 
157 	return (0);
158 }
159 
160 /*
161  * Determine if we need to update the maximum packet size of the default
162  * control endpoint. This may happen because we start with the default size
163  * before we have a descriptor and then it may change. For example, with
164  * full-speed devices that may have either an 8 or 64 byte maximum packet size.
165  */
166 int
167 xhci_endpoint_update_default(xhci_t *xhcip, xhci_device_t *xd,
168     xhci_endpoint_t *xep)
169 {
170 	int mps, desc, info, ret;
171 	ASSERT(xd->xd_usbdev != NULL);
172 
173 	mps = XHCI_EPCTX_GET_MPS(xd->xd_endout[xep->xep_num]->xec_info2);
174 	desc = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
175 	if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
176 		desc = 1 << desc;
177 	}
178 
179 	if (mps == desc)
180 		return (USB_SUCCESS);
181 
182 	/*
183 	 * Update only the context for the default control endpoint.
184 	 */
185 	mutex_enter(&xd->xd_imtx);
186 	info = LE_32(xd->xd_endout[xep->xep_num]->xec_info2);
187 	info &= ~XHCI_EPCTX_SET_MPS(mps);
188 	info |= XHCI_EPCTX_SET_MPS(desc);
189 	xd->xd_endin[xep->xep_num]->xec_info2 = LE_32(info);
190 	xd->xd_input->xic_drop_flags = LE_32(0);
191 	xd->xd_input->xic_add_flags = LE_32(XHCI_INCTX_MASK_DCI(1));
192 
193 	ret = xhci_command_evaluate_context(xhcip, xd);
194 	mutex_exit(&xd->xd_imtx);
195 
196 	return (ret);
197 }
198 
199 static uint_t
200 xhci_endpoint_epdesc_to_type(usb_ep_descr_t *ep)
201 {
202 	int type = ep->bmAttributes & USB_EP_ATTR_MASK;
203 	boolean_t in = (ep->bEndpointAddress & USB_EP_DIR_MASK) ==
204 	    USB_EP_DIR_IN;
205 
206 	switch (type) {
207 	case USB_EP_ATTR_CONTROL:
208 		return (XHCI_EPCTX_TYPE_CTRL);
209 	case USB_EP_ATTR_ISOCH:
210 		if (in == B_TRUE)
211 			return (XHCI_EPCTX_TYPE_ISOCH_IN);
212 		return (XHCI_EPCTX_TYPE_ISOCH_OUT);
213 	case USB_EP_ATTR_BULK:
214 		if (in == B_TRUE)
215 			return (XHCI_EPCTX_TYPE_BULK_IN);
216 		return (XHCI_EPCTX_TYPE_BULK_OUT);
217 	case USB_EP_ATTR_INTR:
218 		if (in == B_TRUE)
219 			return (XHCI_EPCTX_TYPE_INTR_IN);
220 		return (XHCI_EPCTX_TYPE_INTR_OUT);
221 	default:
222 		panic("bad USB attribute type: %d", type);
223 	}
224 
225 	/* LINTED: E_FUNC_NO_RET_VAL */
226 }
227 
228 static uint_t
229 xhci_endpoint_determine_burst(xhci_device_t *xd, xhci_endpoint_t *xep)
230 {
231 	switch (xd->xd_usbdev->usb_port_status) {
232 	case USBA_LOW_SPEED_DEV:
233 	case USBA_FULL_SPEED_DEV:
234 		/*
235 		 * Per xHCI 1.1 / 6.2.3.4, burst is always zero for these
236 		 * devices.
237 		 */
238 		return (0);
239 	case USBA_HIGH_SPEED_DEV:
240 		if (xep->xep_type == USB_EP_ATTR_CONTROL ||
241 		    xep->xep_type == USB_EP_ATTR_BULK)
242 			return (0);
243 		return ((xep->xep_pipe->p_xep.uex_ep.wMaxPacketSize &
244 		    XHCI_CONTEXT_BURST_MASK) >> XHCI_CONTEXT_BURST_SHIFT);
245 	default:
246 		/*
247 		 * For these USB >= 3.0, this comes from the companion
248 		 * descriptor.
249 		 */
250 		ASSERT(xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP);
251 		return (xep->xep_pipe->p_xep.uex_ep_ss.bMaxBurst);
252 	}
253 }
254 
255 /*
256  * Convert a linear mapping of values that are in in the range of 1-255 into a
257  * 2^x value. Because we're supposed to round down for these calculations (see
258  * the note in xHCI 1.1 / 6.2.3.6) we can do this simply with a fls() and
259  * subtracting one.
260  */
261 static uint_t
262 xhci_endpoint_linear_interval(usb_ep_descr_t *ep)
263 {
264 	int exp;
265 	int ival = ep->bInterval;
266 	if (ival < 1)
267 		ival = 1;
268 	if (ival > 255)
269 		ival = 255;
270 	exp = ddi_fls(ival) - 1;
271 	ASSERT(exp >= 0 && exp <= 7);
272 	return (exp);
273 }
274 
275 /*
276  * Convert the set of values that use a 2^(x-1) value for interval into a 2^x
277  * range. Note the valid input range is 1-16, so we clamp values based on this.
278  * See xHCI 1.1 / 6.2.3.6 for more information.
279  */
280 static uint_t
281 xhci_endpoint_exponential_interval(usb_ep_descr_t *ep)
282 {
283 	int ival;
284 
285 	ival = ep->bInterval;
286 	if (ival < 1)
287 		ival = 1;
288 	if (ival > 16)
289 		ival = 16;
290 	ival--;
291 	ASSERT(ival >= 0 && ival <= 15);
292 	return (ival);
293 }
294 
295 
296 /*
297  * Determining the interval is unfortunately somewhat complicated as there are
298  * many differnet forms that things can take. This is all summarized in a
299  * somewhat helpful table, number 65, in xHCI 1.1 / 6.2.3.6. But here's
300  * basically the six different cases we have to consider:
301  *
302  * Case 1: Non-High Speed Bulk and Control Endpoints
303  *	Always return 0.
304  *
305  * Case 2: Super Speed and High Speed Isoch and Intr endpoints
306  *	Convert from a 2^(x-1) range to a 2^x range.
307  *
308  * Case 3: Full Speed Isochronous Endpoints
309  *	As case 2, but add 3 as its values are in frames and we need to convert
310  *	to microframes. Adding three to the result is the same as multiplying
311  *	the initial value by 8.
312  *
313  * Case 4: Full speed and Low Speed Interrupt Endpoints
314  *	These have a 1-255 ms range that we need to convert to a 2^x * 128 us
315  *	range. We use the linear conversion and then add 3 to account for the
316  *	multiplying by 8 conversion from frames to microframes.
317  *
318  * Case 5: High Speed Interrupt and Bulk Output
319  *	These are a bit of a weird case. The spec and other implementations make
320  *	it seem that it's similar to case 4, but without the fixed addition as
321  *	its interpreted differently due to NAKs.
322  *
323  * Case 6: Low Speed Isochronous Endpoints
324  *	These are not actually defined; however, like other implementations we
325  *	treat them like case 4.
326  */
327 static uint_t
328 xhci_endpoint_interval(xhci_device_t *xd, usb_ep_descr_t *ep)
329 {
330 	int type = ep->bmAttributes & USB_EP_ATTR_MASK;
331 	int speed = xd->xd_usbdev->usb_port_status;
332 
333 	/*
334 	 * Handle Cases 1 and 5 first.
335 	 */
336 	if (type == USB_EP_ATTR_CONTROL || type == USB_EP_ATTR_BULK) {
337 		if (speed != USBA_HIGH_SPEED_DEV)
338 			return (0);
339 		return (xhci_endpoint_linear_interval(ep));
340 	}
341 
342 	/*
343 	 * Handle Isoch and Intr cases next.
344 	 */
345 	switch (speed) {
346 	case USBA_LOW_SPEED_DEV:
347 		/*
348 		 * Interrupt endpoints at low speed are the same as full speed,
349 		 * hence the fall through.
350 		 */
351 		if (type == USB_EP_ATTR_ISOCH) {
352 			return (xhci_endpoint_exponential_interval(ep) + 3);
353 		}
354 		/* FALLTHROUGH */
355 	case USBA_FULL_SPEED_DEV:
356 		return (xhci_endpoint_linear_interval(ep) + 3);
357 	case USBA_HIGH_SPEED_DEV:
358 	case USBA_SUPER_SPEED_DEV:
359 	default:
360 		/*
361 		 * Case 2. Treat any newer and faster speeds as Super Speed by
362 		 * default as USB 3.1 is effectively treated the same here.
363 		 */
364 		return (xhci_endpoint_exponential_interval(ep));
365 	}
366 }
367 
368 /*
369  * The way to calculate the Maximum ESIT is described in xHCI 1.1 / 4.14.2.
370  * First off, this only applies to Interrupt and Isochronous descriptors. For
371  * Super Speed and newer things, it comes out of a descriptor. Otherwise we
372  * calculate it by doing 'Max Packet Size' * ('Max Burst' + 1).
373  */
374 static uint_t
375 xhci_endpoint_max_esit(xhci_device_t *xd, xhci_endpoint_t *xep, uint_t mps,
376     uint_t burst)
377 {
378 	if (xep->xep_type == USB_EP_ATTR_CONTROL ||
379 	    xep->xep_type == USB_EP_ATTR_BULK) {
380 		return (0);
381 	}
382 
383 	/*
384 	 * Note that this will need to be updated for SuperSpeedPlus ISOC
385 	 * devices to pull from the secondary companion descriptor they use.
386 	 */
387 	if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
388 		usb_ep_xdescr_t *ep_xdesc = &xep->xep_pipe->p_xep;
389 		ASSERT(xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP);
390 		return (ep_xdesc->uex_ep_ss.wBytesPerInterval);
391 	}
392 
393 	return (mps * (burst + 1));
394 }
395 
396 /*
397  * We've been asked to calculate and tell the xHCI controller an average TRB
398  * data length. This is talked about in an implementation note in xHCI 1.1 /
399  * 4.14.1.1. So, the reality is that it's hard to actually calculate this, as
400  * we're supposed to take into account all of the TRBs that we use on that ring.
401  *
402  * Surveying other xHCI drivers, they all agree on using the default of 8 for
403  * control endpoints; however, from there things get a little more fluid. For
404  * interrupt and isochronous endpoints, many device use the minimum of the max
405  * packet size and the device's pagesize. For bulk endpoints some folks punt and
406  * don't set anything and others try and set it to the pagesize. The xHCI
407  * implementation note suggests a 3k size here initially. For now, we'll just
408  * guess for bulk endpoints and use our page size as a determining factor for
409  * this and use the BSD style for others. Note Linux here only sets this value
410  * for control devices.
411  */
412 static uint_t
413 xhci_endpoint_avg_trb(xhci_t *xhcip, usb_ep_descr_t *ep, int mps)
414 {
415 	int type = ep->bmAttributes & USB_EP_ATTR_MASK;
416 
417 	switch (type) {
418 	case USB_EP_ATTR_ISOCH:
419 	case USB_EP_ATTR_INTR:
420 		return (MIN(xhcip->xhci_caps.xcap_pagesize, mps));
421 	case USB_EP_ATTR_CONTROL:
422 		return (XHCI_CONTEXT_DEF_CTRL_ATL);
423 	case USB_EP_ATTR_BULK:
424 		return (xhcip->xhci_caps.xcap_pagesize);
425 	default:
426 		panic("bad USB endpoint type: %d", type);
427 	}
428 
429 	/* LINTED: E_FUNC_NO_RET_VAL */
430 }
431 
432 int
433 xhci_endpoint_setup_context(xhci_t *xhcip, xhci_device_t *xd,
434     xhci_endpoint_t *xep)
435 {
436 	uint_t eptype, burst, ival, max_esit, avgtrb, mps, mult, cerr;
437 	xhci_endpoint_context_t *ectx;
438 	uint64_t deq;
439 
440 	/*
441 	 * For a USB >=3.0 device we should always have its companion descriptor
442 	 * provided for us by USBA. If it's not here, complain loudly and fail.
443 	 */
444 	if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV &&
445 	    (xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP) == 0) {
446 		const char *prod, *mfg;
447 
448 		prod = xd->xd_usbdev->usb_product_str;
449 		if (prod == NULL)
450 			prod = "Unknown Device";
451 		mfg = xd->xd_usbdev->usb_mfg_str;
452 		if (mfg == NULL)
453 			mfg = "Unknown Manufacturer";
454 
455 		xhci_log(xhcip, "Encountered USB >=3.0 device without endpoint "
456 		    "companion descriptor. Ensure driver %s is properly using "
457 		    "usb_pipe_xopen() for device %s %s",
458 		    ddi_driver_name(xd->xd_usbdev->usb_dip), prod, mfg);
459 		return (EINVAL);
460 	}
461 
462 	ectx = xd->xd_endin[xep->xep_num];
463 	VERIFY(ectx != NULL);
464 	VERIFY(xd->xd_usbdev->usb_dev_descr != NULL);
465 	VERIFY(xep->xep_pipe != NULL);
466 
467 	mps = xep->xep_pipe->p_ep.wMaxPacketSize & XHCI_CONTEXT_MPS_MASK;
468 	mult = XHCI_CONTEXT_DEF_MULT;
469 	cerr = XHCI_CONTEXT_DEF_CERR;
470 
471 	switch (xep->xep_type) {
472 	case USB_EP_ATTR_ISOCH:
473 		/*
474 		 * When we have support for USB 3.1 SuperSpeedPlus devices,
475 		 * we'll need to make sure that we also check for its secondary
476 		 * endpoint companion descriptor here.
477 		 */
478 		/*
479 		 * Super Speed devices nominally have these xHCI super speed
480 		 * companion descriptors. We know that we're not properly
481 		 * grabbing them right now, so until we do, we should basically
482 		 * error about it.
483 		 */
484 		if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
485 			ASSERT(xep->xep_pipe->p_xep.uex_flags &
486 			    USB_EP_XFLAGS_SS_COMP);
487 			mult = xep->xep_pipe->p_xep.uex_ep_ss.bmAttributes &
488 			    USB_EP_SS_COMP_ISOC_MULT_MASK;
489 		}
490 
491 		mps &= XHCI_CONTEXT_MPS_MASK;
492 		cerr = XHCI_CONTEXT_ISOCH_CERR;
493 		break;
494 	default:
495 		/*
496 		 * No explicit changes needed for CONTROL, INTR, and BULK
497 		 * endpoints. They've been handled already and don't have any
498 		 * differences.
499 		 */
500 		break;
501 	}
502 
503 	eptype = xhci_endpoint_epdesc_to_type(&xep->xep_pipe->p_xep.uex_ep);
504 	burst = xhci_endpoint_determine_burst(xd, xep);
505 	ival = xhci_endpoint_interval(xd, &xep->xep_pipe->p_xep.uex_ep);
506 	max_esit = xhci_endpoint_max_esit(xd, xep, mps, burst);
507 	avgtrb = xhci_endpoint_avg_trb(xhcip, &xep->xep_pipe->p_xep.uex_ep,
508 	    mps);
509 
510 	/*
511 	 * The multi field may be reserved as zero if the LEC feature flag is
512 	 * set. See the description of mult in xHCI 1.1 / 6.2.3.
513 	 */
514 	if (xhcip->xhci_caps.xcap_flags2 & XCAP2_LEC)
515 		mult = 0;
516 
517 	bzero(ectx, sizeof (xhci_endpoint_context_t));
518 
519 	ectx->xec_info = LE_32(XHCI_EPCTX_SET_MULT(mult) |
520 	    XHCI_EPCTX_SET_IVAL(ival));
521 	if (xhcip->xhci_caps.xcap_flags2 & XCAP2_LEC)
522 		ectx->xec_info |= LE_32(XHCI_EPCTX_SET_MAX_ESIT_HI(max_esit));
523 
524 	ectx->xec_info2 = LE_32(XHCI_EPCTX_SET_CERR(cerr) |
525 	    XHCI_EPCTX_SET_EPTYPE(eptype) | XHCI_EPCTX_SET_MAXB(burst) |
526 	    XHCI_EPCTX_SET_MPS(mps));
527 
528 	deq = xhci_dma_pa(&xep->xep_ring.xr_dma) + sizeof (xhci_trb_t) *
529 	    xep->xep_ring.xr_tail;
530 	ectx->xec_dequeue = LE_64(deq | xep->xep_ring.xr_cycle);
531 
532 	ectx->xec_txinfo = LE_32(XHCI_EPCTX_MAX_ESIT_PAYLOAD(max_esit) |
533 	    XHCI_EPCTX_AVG_TRB_LEN(avgtrb));
534 
535 	XHCI_DMA_SYNC(xd->xd_ictx, DDI_DMA_SYNC_FORDEV);
536 	if (xhci_check_dma_handle(xhcip, &xd->xd_ictx) != DDI_FM_OK) {
537 		xhci_error(xhcip, "failed to initialize device input "
538 		    "context on slot %d and port %d for endpoint %u:  "
539 		    "encountered fatal FM error synchronizing input context "
540 		    "DMA memory", xd->xd_slot, xd->xd_port, xep->xep_num);
541 		xhci_fm_runtime_reset(xhcip);
542 		return (EIO);
543 	}
544 
545 	return (0);
546 }
547 
548 /*
549  * Initialize the endpoint and its input context for a given device. This is
550  * called from two different contexts:
551  *
552  *   1. Initializing a device
553  *   2. Opening a USB pipe
554  *
555  * In the second case, we need to worry about locking around the device. We
556  * don't need to worry about the locking in the first case because the USBA
557  * doesn't know about it yet.
558  */
559 int
560 xhci_endpoint_init(xhci_t *xhcip, xhci_device_t *xd,
561     usba_pipe_handle_data_t *ph)
562 {
563 	int ret;
564 	uint_t epid;
565 	xhci_endpoint_t *xep;
566 
567 	if (ph == NULL) {
568 		epid = XHCI_DEFAULT_ENDPOINT;
569 	} else {
570 		ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
571 		epid = xhci_endpoint_pipe_to_epid(ph);
572 	}
573 	VERIFY(xd->xd_endpoints[epid] == NULL);
574 
575 	xep = kmem_zalloc(sizeof (xhci_endpoint_t), KM_SLEEP);
576 	list_create(&xep->xep_transfers, sizeof (xhci_transfer_t),
577 	    offsetof(xhci_transfer_t, xt_link));
578 	cv_init(&xep->xep_state_cv, NULL, CV_DRIVER, NULL);
579 	xep->xep_xd = xd;
580 	xep->xep_xhci = xhcip;
581 	xep->xep_num = epid;
582 	if (ph == NULL) {
583 		xep->xep_pipe = NULL;
584 		xep->xep_type = USB_EP_ATTR_CONTROL;
585 	} else {
586 		xep->xep_pipe = ph;
587 		xep->xep_type = ph->p_ep.bmAttributes & USB_EP_ATTR_MASK;
588 	}
589 
590 	if ((ret = xhci_ring_alloc(xhcip, &xep->xep_ring)) != 0) {
591 		cv_destroy(&xep->xep_state_cv);
592 		list_destroy(&xep->xep_transfers);
593 		kmem_free(xep, sizeof (xhci_endpoint_t));
594 		return (ret);
595 	}
596 
597 	if ((ret = xhci_ring_reset(xhcip, &xep->xep_ring)) != 0) {
598 		xhci_ring_free(&xep->xep_ring);
599 		cv_destroy(&xep->xep_state_cv);
600 		list_destroy(&xep->xep_transfers);
601 		kmem_free(xep, sizeof (xhci_endpoint_t));
602 		return (ret);
603 	}
604 
605 	xd->xd_endpoints[epid] = xep;
606 	if (ph == NULL) {
607 		ret = xhci_endpoint_setup_default_context(xhcip, xd, xep);
608 	} else {
609 		ret = xhci_endpoint_setup_context(xhcip, xd, xep);
610 	}
611 	if (ret != 0) {
612 		xhci_endpoint_fini(xd, xep->xep_num);
613 		return (ret);
614 	}
615 
616 	return (0);
617 }
618 
619 /*
620  * Attempt to quiesce an endpoint. Depending on the state of the endpoint, we
621  * may need to simply stop it. Alternatively, we may need to explicitly reset
622  * the endpoint. Once done, this endpoint should be stopped and can be
623  * manipulated.
624  */
625 int
626 xhci_endpoint_quiesce(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep)
627 {
628 	int ret = USB_SUCCESS;
629 	xhci_endpoint_context_t *epctx = xd->xd_endout[xep->xep_num];
630 
631 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
632 	ASSERT(xep->xep_state & XHCI_ENDPOINT_QUIESCE);
633 
634 	/*
635 	 * First attempt to stop the endpoint, unless it's halted. We don't
636 	 * really care what state it is in. Note that because other activity
637 	 * could be going on, the state may change on us; however, if it's
638 	 * running, it will always transition to a stopped state and none of the
639 	 * other valid states will allow transitions without us taking an active
640 	 * action.
641 	 */
642 	if (!(xep->xep_state & XHCI_ENDPOINT_HALTED)) {
643 		mutex_exit(&xhcip->xhci_lock);
644 		ret = xhci_command_stop_endpoint(xhcip, xd, xep);
645 		mutex_enter(&xhcip->xhci_lock);
646 
647 		if (ret == USB_INVALID_CONTEXT) {
648 			XHCI_DMA_SYNC(xd->xd_octx, DDI_DMA_SYNC_FORKERNEL);
649 		}
650 	}
651 
652 	/*
653 	 * Now, if we had the HALTED flag set or we failed to stop it due to a
654 	 * context error and we're in the HALTED state now, reset the end point.
655 	 */
656 	if ((xep->xep_state & XHCI_ENDPOINT_HALTED) ||
657 	    (ret == USB_INVALID_CONTEXT &&
658 	    XHCI_EPCTX_STATE(LE_32(epctx->xec_info)) == XHCI_EP_HALTED)) {
659 		mutex_exit(&xhcip->xhci_lock);
660 		ret = xhci_command_reset_endpoint(xhcip, xd, xep);
661 		mutex_enter(&xhcip->xhci_lock);
662 	}
663 
664 	/*
665 	 * Ideally, one of the two commands should have worked; however, we
666 	 * could have had a context error due to being in the wrong state.
667 	 * Verify that we're either in the ERROR or STOPPED state and treat both
668 	 * as success. All callers are assumed to be doing this so they can
669 	 * change the dequeue pointer.
670 	 */
671 	if (ret != USB_SUCCESS && ret != USB_INVALID_CONTEXT) {
672 		return (ret);
673 	}
674 
675 	if (ret == USB_INVALID_CONTEXT) {
676 		XHCI_DMA_SYNC(xd->xd_octx, DDI_DMA_SYNC_FORKERNEL);
677 
678 		switch (XHCI_EPCTX_STATE(LE_32(epctx->xec_info))) {
679 		case XHCI_EP_STOPPED:
680 		case XHCI_EP_ERROR:
681 			/*
682 			 * This is where we wanted to go, so let's just take it.
683 			 */
684 			ret = USB_SUCCESS;
685 			break;
686 		case XHCI_EP_DISABLED:
687 		case XHCI_EP_RUNNING:
688 		case XHCI_EP_HALTED:
689 		default:
690 			/*
691 			 * If we're in any of these, something really weird has
692 			 * happened and it's not worth trying to recover at this
693 			 * point.
694 			 */
695 			xhci_error(xhcip, "!asked to stop endpoint %u on slot "
696 			    "%d and port %d: ended up in unexpected state %d",
697 			    xep->xep_num, xd->xd_slot, xd->xd_port,
698 			    XHCI_EPCTX_STATE(LE_32(epctx->xec_info)));
699 			return (ret);
700 		}
701 	}
702 
703 	/*
704 	 * Now that we're successful, we can clear any possible halted state
705 	 * tracking that we might have had.
706 	 */
707 	if (ret == USB_SUCCESS) {
708 		xep->xep_state &= ~XHCI_ENDPOINT_HALTED;
709 	}
710 
711 	return (ret);
712 }
713 
714 int
715 xhci_endpoint_ring(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep)
716 {
717 	/*
718 	 * The doorbell ID's are offset by one from the endpoint numbers that we
719 	 * keep.
720 	 */
721 	xhci_put32(xhcip, XHCI_R_DOOR, XHCI_DOORBELL(xd->xd_slot),
722 	    xep->xep_num + 1);
723 	if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
724 		xhci_error(xhcip, "failed to ring doorbell for slot %d and "
725 		    "endpoint %u: encountered fatal FM register access error",
726 		    xd->xd_slot, xep->xep_num);
727 		xhci_fm_runtime_reset(xhcip);
728 		return (USB_HC_HARDWARE_ERROR);
729 	}
730 
731 	DTRACE_PROBE3(xhci__doorbell__ring, xhci_t *, xhcip, uint32_t,
732 	    XHCI_DOORBELL(xd->xd_slot), uint32_t, xep->xep_num + 1);
733 
734 	return (USB_SUCCESS);
735 }
736 
737 static void
738 xhci_endpoint_tick(void *arg)
739 {
740 	int ret;
741 	xhci_transfer_t *xt;
742 	xhci_endpoint_t *xep = arg;
743 	xhci_device_t *xd = xep->xep_xd;
744 	xhci_t *xhcip = xep->xep_xhci;
745 
746 	mutex_enter(&xhcip->xhci_lock);
747 
748 	/*
749 	 * If we have the teardown flag set, then this is going away, don't try
750 	 * to do anything. Also, if somehow a periodic endpoint has something
751 	 * scheduled, just quit now and don't bother.
752 	 */
753 	if (xep->xep_state & (XHCI_ENDPOINT_TEARDOWN |
754 	    XHCI_ENDPOINT_PERIODIC)) {
755 		xep->xep_timeout = 0;
756 		mutex_exit(&xhcip->xhci_lock);
757 		return;
758 	}
759 
760 	/*
761 	 * If something else has already kicked off, something potentially
762 	 * dangerous, just don't bother waiting for it and reschedule.
763 	 */
764 	if (xep->xep_state & XHCI_ENDPOINT_DONT_SCHEDULE) {
765 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
766 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
767 		mutex_exit(&xhcip->xhci_lock);
768 		return;
769 	}
770 
771 	/*
772 	 * At this point, we have an endpoint that we need to consider. See if
773 	 * there are any transfers on it, if none, we're done. If so, check if
774 	 * we have exceeded the timeout. If we have, then we have some work to
775 	 * do.
776 	 */
777 	xt = list_head(&xep->xep_transfers);
778 	if (xt == NULL) {
779 		xep->xep_timeout = 0;
780 		mutex_exit(&xhcip->xhci_lock);
781 		return;
782 	}
783 
784 	if (xt->xt_timeout > 0) {
785 		xt->xt_timeout--;
786 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
787 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
788 		mutex_exit(&xhcip->xhci_lock);
789 		return;
790 	}
791 
792 	/*
793 	 * This item has timed out. We need to stop the ring and take action.
794 	 */
795 	xep->xep_state |= XHCI_ENDPOINT_TIMED_OUT | XHCI_ENDPOINT_QUIESCE;
796 	ret = xhci_endpoint_quiesce(xhcip, xd, xep);
797 	if (ret != USB_SUCCESS) {
798 		/*
799 		 * If we fail to quiesce during the timeout, then remove the
800 		 * state flags and hopefully we'll be able to the next time
801 		 * around or if a reset or polling stop comes in, maybe it can
802 		 * deal with it.
803 		 */
804 		xep->xep_state &= ~(XHCI_ENDPOINT_QUIESCE |
805 		    XHCI_ENDPOINT_TIMED_OUT);
806 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
807 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
808 		mutex_exit(&xhcip->xhci_lock);
809 		cv_broadcast(&xep->xep_state_cv);
810 		xhci_error(xhcip, "failed to successfully quiesce timed out "
811 		    "endpoint %u of device on slot %d and port %d: device "
812 		    "remains timed out", xep->xep_num, xd->xd_slot,
813 		    xd->xd_port);
814 		return;
815 	}
816 
817 	xhci_ring_skip_transfer(&xep->xep_ring, xt);
818 	(void) list_remove_head(&xep->xep_transfers);
819 	mutex_exit(&xhcip->xhci_lock);
820 
821 	/*
822 	 * At this point, we try and set the ring's dequeue pointer. If this
823 	 * fails, we're left in an awkward state. We've already adjusted the
824 	 * ring and removed the transfer. All we can really do is go through and
825 	 * return the transfer and hope that they perhaps attempt to reset the
826 	 * ring and that will succeed at this point. Based on everything we've
827 	 * done to set things up, it'd be odd if this did fail.
828 	 */
829 	ret = xhci_command_set_tr_dequeue(xhcip, xd, xep);
830 	mutex_enter(&xhcip->xhci_lock);
831 	xep->xep_state &= ~XHCI_ENDPOINT_QUIESCE;
832 	if (ret == USB_SUCCESS) {
833 		xep->xep_state &= ~XHCI_ENDPOINT_TIMED_OUT;
834 	} else {
835 		xhci_error(xhcip, "failed to successfully set transfer ring "
836 		    "dequeue pointer of timed out endpoint %u of "
837 		    "device on slot %d and port %d: device remains timed out, "
838 		    "please use cfgadm to recover", xep->xep_num, xd->xd_slot,
839 		    xd->xd_port);
840 	}
841 	xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
842 	    drv_usectohz(XHCI_TICK_TIMEOUT_US));
843 	mutex_exit(&xhcip->xhci_lock);
844 	cv_broadcast(&xep->xep_state_cv);
845 
846 	/*
847 	 * Because we never time out periodic related activity, we will always
848 	 * have the request on the transfer.
849 	 */
850 	ASSERT(xt->xt_usba_req != NULL);
851 	usba_hcdi_cb(xep->xep_pipe, xt->xt_usba_req, USB_CR_TIMEOUT);
852 	xhci_transfer_free(xhcip, xt);
853 }
854 
855 /*
856  * We've been asked to schedule a series of frames onto the specified endpoint.
857  * We need to make sure that there is enough room, at which point we can queue
858  * it and then ring the door bell. Note that we queue in reverse order to make
859  * sure that if the ring moves on, it won't see the correct cycle bit.
860  */
861 int
862 xhci_endpoint_schedule(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep,
863     xhci_transfer_t *xt, boolean_t ring)
864 {
865 	int i;
866 	xhci_ring_t *rp = &xep->xep_ring;
867 
868 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
869 	ASSERT(xt->xt_ntrbs > 0);
870 	ASSERT(xt->xt_trbs != NULL);
871 
872 	if ((xep->xep_state & XHCI_ENDPOINT_DONT_SCHEDULE) != 0)
873 		return (USB_FAILURE);
874 
875 	if (xhci_ring_trb_space(rp, xt->xt_ntrbs) == B_FALSE)
876 		return (USB_NO_RESOURCES);
877 
878 	for (i = xt->xt_ntrbs - 1; i > 0; i--) {
879 		xhci_ring_trb_fill(rp, i, &xt->xt_trbs[i], &xt->xt_trbs_pa[i],
880 		    B_TRUE);
881 	}
882 	xhci_ring_trb_fill(rp, 0U, &xt->xt_trbs[0], &xt->xt_trbs_pa[0],
883 	    B_FALSE);
884 
885 	XHCI_DMA_SYNC(rp->xr_dma, DDI_DMA_SYNC_FORDEV);
886 	xhci_ring_trb_produce(rp, xt->xt_ntrbs);
887 	list_insert_tail(&xep->xep_transfers, xt);
888 
889 	XHCI_DMA_SYNC(rp->xr_dma, DDI_DMA_SYNC_FORDEV);
890 	if (xhci_check_dma_handle(xhcip, &rp->xr_dma) != DDI_FM_OK) {
891 		xhci_error(xhcip, "failed to write out TRB for device on slot "
892 		    "%d, port %d, and endpoint %u: encountered fatal FM error "
893 		    "synchronizing ring DMA memory", xd->xd_slot, xd->xd_port,
894 		    xep->xep_num);
895 		xhci_fm_runtime_reset(xhcip);
896 		return (USB_HC_HARDWARE_ERROR);
897 	}
898 
899 	if (xep->xep_timeout == 0 &&
900 	    !(xep->xep_state & XHCI_ENDPOINT_PERIODIC)) {
901 		xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
902 		    drv_usectohz(XHCI_TICK_TIMEOUT_US));
903 	}
904 
905 	xt->xt_sched_time = gethrtime();
906 
907 	if (ring == B_FALSE)
908 		return (USB_SUCCESS);
909 
910 	return (xhci_endpoint_ring(xhcip, xd, xep));
911 }
912 
913 xhci_transfer_t *
914 xhci_endpoint_determine_transfer(xhci_t *xhcip, xhci_endpoint_t *xep,
915     xhci_trb_t *trb, uint_t *offp)
916 {
917 	uint_t i;
918 	uint64_t addr;
919 	xhci_transfer_t *xt;
920 
921 	ASSERT(xhcip != NULL);
922 	ASSERT(offp != NULL);
923 	ASSERT(xep != NULL);
924 	ASSERT(trb != NULL);
925 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
926 
927 	if ((xt = list_head(&xep->xep_transfers)) == NULL)
928 		return (NULL);
929 
930 	addr = LE_64(trb->trb_addr);
931 
932 	/*
933 	 * Check if this is the simple case of an event data. If it is, then all
934 	 * we need to do is look and see its data matches the address of the
935 	 * transfer.
936 	 */
937 	if (XHCI_TRB_GET_ED(LE_32(trb->trb_flags)) != 0) {
938 		if (LE_64(trb->trb_addr) != (uintptr_t)xt)
939 			return (NULL);
940 
941 		*offp = xt->xt_ntrbs - 1;
942 		return (xt);
943 	}
944 
945 	/*
946 	 * This represents an error that has occurred. We need to check two
947 	 * different things. The first is that the TRB PA maps to one of the
948 	 * TRBs in the transfer. Secondly, we need to make sure that it makes
949 	 * sense in the context of the ring and our notion of where the tail is.
950 	 */
951 	for (i = 0; i < xt->xt_ntrbs; i++) {
952 		if (xt->xt_trbs_pa[i] == addr)
953 			break;
954 	}
955 
956 	if (i == xt->xt_ntrbs)
957 		return (NULL);
958 
959 	if (xhci_ring_trb_valid_range(&xep->xep_ring, LE_64(trb->trb_addr),
960 	    xt->xt_ntrbs) == -1)
961 		return (NULL);
962 
963 	*offp = i;
964 	return (xt);
965 }
966 
967 static void
968 xhci_endpoint_reschedule_periodic(xhci_t *xhcip, xhci_device_t *xd,
969     xhci_endpoint_t *xep, xhci_transfer_t *xt)
970 {
971 	int ret;
972 	xhci_pipe_t *xp = (xhci_pipe_t *)xep->xep_pipe->p_hcd_private;
973 	xhci_periodic_pipe_t *xpp = &xp->xp_periodic;
974 
975 	ASSERT3U(xpp->xpp_tsize, >, 0);
976 
977 	xt->xt_short = 0;
978 	xt->xt_cr = USB_CR_OK;
979 
980 	mutex_enter(&xhcip->xhci_lock);
981 
982 	/*
983 	 * If we don't have an active poll, then we shouldn't bother trying to
984 	 * reschedule it. This means that we're trying to stop or we ran out of
985 	 * memory.
986 	 */
987 	if (xpp->xpp_poll_state != XHCI_PERIODIC_POLL_ACTIVE) {
988 		mutex_exit(&xhcip->xhci_lock);
989 		return;
990 	}
991 
992 	if (xep->xep_type == USB_EP_ATTR_ISOCH) {
993 		int i;
994 		for (i = 0; i < xt->xt_ntrbs; i++) {
995 			xt->xt_isoc[i].isoc_pkt_actual_length =
996 			    xt->xt_isoc[i].isoc_pkt_length;
997 			xt->xt_isoc[i].isoc_pkt_status = USB_CR_OK;
998 		}
999 	}
1000 
1001 	/*
1002 	 * In general, there should always be space on the ring for this. The
1003 	 * only reason that rescheduling an existing transfer for a periodic
1004 	 * endpoint wouldn't work is because we have a hardware error, at which
1005 	 * point we're going to be going down hard anyways. We log and bump a
1006 	 * stat here to make this case discoverable in case our assumptions our
1007 	 * wrong.
1008 	 */
1009 	ret = xhci_endpoint_schedule(xhcip, xd, xep, xt, B_TRUE);
1010 	if (ret != 0) {
1011 		xhci_log(xhcip, "!failed to reschedule periodic endpoint %u "
1012 		    "(type %u) on slot %d: %d\n", xep->xep_num, xep->xep_type,
1013 		    xd->xd_slot, ret);
1014 	}
1015 	mutex_exit(&xhcip->xhci_lock);
1016 }
1017 
1018 /*
1019  * We're dealing with a message on a control endpoint. This may be a default
1020  * endpoint or otherwise. These usually come in groups of 3+ TRBs where you have
1021  * a setup stage, data stage (which may have one or more other TRBs) and then a
1022  * final status stage.
1023  *
1024  * We generally set ourselves up such that we get interrupted and notified only
1025  * on the status stage and for short transfers in the data stage. If we
1026  * encounter a short transfer in the data stage, then we need to go through and
1027  * check whether or not the short transfer is allowed. If it is, then there's
1028  * nothing to do. We'll update everything and call back the framework once we
1029  * get the status stage.
1030  */
1031 static boolean_t
1032 xhci_endpoint_control_callback(xhci_t *xhcip, xhci_device_t *xd,
1033     xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1034 {
1035 	int code;
1036 	usb_ctrl_req_t *ucrp;
1037 	xhci_transfer_t *rem;
1038 
1039 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1040 
1041 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1042 	ucrp = (usb_ctrl_req_t *)xt->xt_usba_req;
1043 
1044 	/*
1045 	 * Now that we know what this TRB is for, was it for a data/normal stage
1046 	 * or is it the status stage. We cheat by looking at the last entry. If
1047 	 * it's a data stage, then we must have gotten a short write. We record
1048 	 * this fact and whether we should consider the transfer fatal for the
1049 	 * subsequent status stage.
1050 	 */
1051 	if (off != xt->xt_ntrbs - 1) {
1052 		uint_t remain;
1053 		usb_ctrl_req_t *ucrp = (usb_ctrl_req_t *)xt->xt_usba_req;
1054 
1055 		/*
1056 		 * This is a data stage TRB. The only reason we should have
1057 		 * gotten something for this is beacuse it was short. Make sure
1058 		 * it's okay before we continue.
1059 		 */
1060 		VERIFY3S(code, ==, XHCI_CODE_SHORT_XFER);
1061 		if (!(ucrp->ctrl_attributes & USB_ATTRS_SHORT_XFER_OK)) {
1062 			xt->xt_cr = USB_CR_DATA_UNDERRUN;
1063 			mutex_exit(&xhcip->xhci_lock);
1064 			return (B_TRUE);
1065 		}
1066 
1067 		/*
1068 		 * The value in the resulting trb is how much data remained to
1069 		 * be transferred. Normalize that against the original buffer
1070 		 * size.
1071 		 */
1072 		remain = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1073 		xt->xt_short = xt->xt_buffer.xdb_len - remain;
1074 		mutex_exit(&xhcip->xhci_lock);
1075 		return (B_TRUE);
1076 	}
1077 
1078 	/*
1079 	 * Okay, this is a status stage trb that's in good health. We should
1080 	 * finally go ahead, sync data and try and finally do the callback. If
1081 	 * we have short data, then xt->xt_short will be non-zero.
1082 	 */
1083 	if (xt->xt_data_tohost == B_TRUE) {
1084 		size_t len;
1085 		if (xt->xt_short != 0) {
1086 			len = xt->xt_short;
1087 		} else {
1088 			len = xt->xt_buffer.xdb_len;
1089 		}
1090 
1091 		if (xhci_transfer_sync(xhcip, xt, DDI_DMA_SYNC_FORCPU) !=
1092 		    DDI_FM_OK) {
1093 			xhci_error(xhcip, "failed to process control transfer "
1094 			    "callback for endpoint %u of device on slot %d and "
1095 			    "port %d: encountered fatal FM error synchronizing "
1096 			    "DMA memory, resetting device", xep->xep_num,
1097 			    xd->xd_slot, xd->xd_port);
1098 			xhci_fm_runtime_reset(xhcip);
1099 			mutex_exit(&xhcip->xhci_lock);
1100 			return (B_FALSE);
1101 		}
1102 
1103 		xhci_transfer_copy(xt, ucrp->ctrl_data->b_rptr, len, B_TRUE);
1104 		ucrp->ctrl_data->b_wptr += len;
1105 	}
1106 
1107 	/*
1108 	 * Now we're done. We can go ahead and bump the ring. Free the transfer
1109 	 * outside of the lock and call back into the framework.
1110 	 */
1111 	VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, LE_64(trb->trb_addr)));
1112 	rem = list_remove_head(&xep->xep_transfers);
1113 	VERIFY3P(rem, ==, xt);
1114 	mutex_exit(&xhcip->xhci_lock);
1115 
1116 	usba_hcdi_cb(xep->xep_pipe, (usb_opaque_t)ucrp, xt->xt_cr);
1117 	xhci_transfer_free(xhcip, xt);
1118 
1119 	return (B_TRUE);
1120 }
1121 
1122 /*
1123  * Cons up a new usb request for the periodic data transfer if we can. If there
1124  * isn't one available, change the return code to NO_RESOURCES and stop polling
1125  * on this endpoint, thus using and consuming the original request.
1126  */
1127 static usb_opaque_t
1128 xhci_endpoint_dup_periodic(xhci_endpoint_t *xep, xhci_transfer_t *xt,
1129     usb_cr_t *cr)
1130 {
1131 	usb_opaque_t urp;
1132 
1133 	xhci_pipe_t *xp = (xhci_pipe_t *)xep->xep_pipe->p_hcd_private;
1134 	xhci_periodic_pipe_t *xpp = &xp->xp_periodic;
1135 
1136 	/*
1137 	 * In general, transfers shouldn't have a usb request. However, oneshot
1138 	 * Interrupt IN ones will, so we use this as a way to shortcut out of
1139 	 * here.
1140 	 */
1141 	if (xt->xt_usba_req != NULL)
1142 		return (xt->xt_usba_req);
1143 
1144 	if (xep->xep_type == USB_EP_ATTR_INTR) {
1145 		urp = (usb_opaque_t)usba_hcdi_dup_intr_req(xep->xep_pipe->p_dip,
1146 		    (usb_intr_req_t *)xpp->xpp_usb_req, xpp->xpp_tsize, 0);
1147 	} else {
1148 		urp = (usb_opaque_t)usba_hcdi_dup_isoc_req(xep->xep_pipe->p_dip,
1149 		    (usb_isoc_req_t *)xpp->xpp_usb_req, 0);
1150 	}
1151 	if (urp == NULL) {
1152 		xpp->xpp_poll_state = XHCI_PERIODIC_POLL_NOMEM;
1153 		urp = xpp->xpp_usb_req;
1154 		xpp->xpp_usb_req = NULL;
1155 		*cr = USB_CR_NO_RESOURCES;
1156 	} else {
1157 		mutex_enter(&xep->xep_pipe->p_mutex);
1158 		xep->xep_pipe->p_req_count++;
1159 		mutex_exit(&xep->xep_pipe->p_mutex);
1160 	}
1161 
1162 	return (urp);
1163 }
1164 
1165 xhci_device_t *
1166 xhci_device_lookup_by_slot(xhci_t *xhcip, int slot)
1167 {
1168 	xhci_device_t *xd;
1169 
1170 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1171 
1172 	for (xd = list_head(&xhcip->xhci_usba.xa_devices); xd != NULL;
1173 	    xd = list_next(&xhcip->xhci_usba.xa_devices, xd)) {
1174 		if (xd->xd_slot == slot)
1175 			return (xd);
1176 	}
1177 
1178 	return (NULL);
1179 }
1180 
1181 /*
1182  * Handle things which consist solely of normal tranfers, in other words, bulk
1183  * and interrupt transfers.
1184  */
1185 static boolean_t
1186 xhci_endpoint_norm_callback(xhci_t *xhcip, xhci_device_t *xd,
1187     xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1188 {
1189 	int code;
1190 	usb_cr_t cr;
1191 	xhci_transfer_t *rem;
1192 	int attrs;
1193 	mblk_t *mp;
1194 	boolean_t periodic = B_FALSE;
1195 	usb_opaque_t urp;
1196 
1197 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1198 	ASSERT(xep->xep_type == USB_EP_ATTR_BULK ||
1199 	    xep->xep_type == USB_EP_ATTR_INTR);
1200 
1201 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1202 
1203 	if (code == XHCI_CODE_SHORT_XFER) {
1204 		uint_t residue;
1205 		residue = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1206 
1207 		if (xep->xep_type == USB_EP_ATTR_BULK) {
1208 			VERIFY3U(XHCI_TRB_GET_ED(LE_32(trb->trb_flags)), !=, 0);
1209 			xt->xt_short = residue;
1210 		} else {
1211 			xt->xt_short = xt->xt_buffer.xdb_len - residue;
1212 		}
1213 	}
1214 
1215 	/*
1216 	 * If we have an interrupt from something that's not the last entry,
1217 	 * that must mean we had a short transfer, so there's nothing more for
1218 	 * us to do at the moment. We won't call back until everything's
1219 	 * finished for the general transfer.
1220 	 */
1221 	if (off < xt->xt_ntrbs - 1) {
1222 		mutex_exit(&xhcip->xhci_lock);
1223 		return (B_TRUE);
1224 	}
1225 
1226 	urp = xt->xt_usba_req;
1227 	if (xep->xep_type == USB_EP_ATTR_BULK) {
1228 		usb_bulk_req_t *ubrp = (usb_bulk_req_t *)xt->xt_usba_req;
1229 		attrs = ubrp->bulk_attributes;
1230 		mp = ubrp->bulk_data;
1231 	} else {
1232 		usb_intr_req_t *uirp = (usb_intr_req_t *)xt->xt_usba_req;
1233 
1234 		if (uirp == NULL) {
1235 			periodic = B_TRUE;
1236 			urp = xhci_endpoint_dup_periodic(xep, xt, &cr);
1237 			uirp = (usb_intr_req_t *)urp;
1238 
1239 			/*
1240 			 * If we weren't able to duplicate the interrupt, then
1241 			 * we can't put any data in it.
1242 			 */
1243 			if (cr == USB_CR_NO_RESOURCES)
1244 				goto out;
1245 		}
1246 
1247 		attrs = uirp->intr_attributes;
1248 		mp = uirp->intr_data;
1249 	}
1250 
1251 	if (xt->xt_data_tohost == B_TRUE) {
1252 		size_t len;
1253 		if (xt->xt_short != 0) {
1254 			if (!(attrs & USB_ATTRS_SHORT_XFER_OK)) {
1255 				cr = USB_CR_DATA_UNDERRUN;
1256 				goto out;
1257 			}
1258 			len = xt->xt_short;
1259 		} else {
1260 			len = xt->xt_buffer.xdb_len;
1261 		}
1262 
1263 		if (xhci_transfer_sync(xhcip, xt, DDI_DMA_SYNC_FORCPU) !=
1264 		    DDI_FM_OK) {
1265 			xhci_error(xhcip, "failed to process normal transfer "
1266 			    "callback for endpoint %u of device on slot %d and "
1267 			    "port %d: encountered fatal FM error synchronizing "
1268 			    "DMA memory, resetting device", xep->xep_num,
1269 			    xd->xd_slot, xd->xd_port);
1270 			xhci_fm_runtime_reset(xhcip);
1271 			mutex_exit(&xhcip->xhci_lock);
1272 			return (B_FALSE);
1273 		}
1274 
1275 		xhci_transfer_copy(xt, mp->b_rptr, len, B_TRUE);
1276 		mp->b_wptr += len;
1277 	}
1278 	cr = USB_CR_OK;
1279 
1280 out:
1281 	/*
1282 	 * Don't use the address from the TRB here. When we're dealing with
1283 	 * event data that will be entirely wrong.
1284 	 */
1285 	VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, xt->xt_trbs_pa[off]));
1286 	rem = list_remove_head(&xep->xep_transfers);
1287 	VERIFY3P(rem, ==, xt);
1288 	mutex_exit(&xhcip->xhci_lock);
1289 
1290 	usba_hcdi_cb(xep->xep_pipe, urp, cr);
1291 	if (periodic == B_TRUE) {
1292 		xhci_endpoint_reschedule_periodic(xhcip, xd, xep, xt);
1293 	} else {
1294 		xhci_transfer_free(xhcip, xt);
1295 	}
1296 
1297 	return (B_TRUE);
1298 }
1299 
1300 static boolean_t
1301 xhci_endpoint_isoch_callback(xhci_t *xhcip, xhci_device_t *xd,
1302     xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1303 {
1304 	int code;
1305 	usb_cr_t cr;
1306 	xhci_transfer_t *rem;
1307 	usb_isoc_pkt_descr_t *desc;
1308 	usb_isoc_req_t *usrp;
1309 
1310 	ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1311 	ASSERT3S(xep->xep_type, ==, USB_EP_ATTR_ISOCH);
1312 
1313 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1314 
1315 	/*
1316 	 * The descriptors that we copy the data from are set up to assume that
1317 	 * everything was OK and we transferred all the requested data.
1318 	 */
1319 	desc = &xt->xt_isoc[off];
1320 	if (code == XHCI_CODE_SHORT_XFER) {
1321 		int residue = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1322 		desc->isoc_pkt_actual_length -= residue;
1323 	}
1324 
1325 	/*
1326 	 * We don't perform the callback until the very last TRB is returned
1327 	 * here. If we have a TRB report on something else, that means that we
1328 	 * had a short transfer.
1329 	 */
1330 	if (off < xt->xt_ntrbs - 1) {
1331 		mutex_exit(&xhcip->xhci_lock);
1332 		return (B_TRUE);
1333 	}
1334 
1335 	VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, LE_64(trb->trb_addr)));
1336 	rem = list_remove_head(&xep->xep_transfers);
1337 	VERIFY3P(rem, ==, xt);
1338 	mutex_exit(&xhcip->xhci_lock);
1339 
1340 	cr = USB_CR_OK;
1341 
1342 	if (xt->xt_data_tohost == B_TRUE) {
1343 		usb_opaque_t urp;
1344 		urp = xhci_endpoint_dup_periodic(xep, xt, &cr);
1345 		usrp = (usb_isoc_req_t *)urp;
1346 
1347 		if (cr == USB_CR_OK) {
1348 			mblk_t *mp;
1349 			size_t len;
1350 			if (xhci_transfer_sync(xhcip, xt,
1351 			    DDI_DMA_SYNC_FORCPU) != DDI_FM_OK) {
1352 				xhci_error(xhcip, "failed to process "
1353 				    "isochronous transfer callback for "
1354 				    "endpoint %u of device on slot %d and port "
1355 				    "%d: encountered fatal FM error "
1356 				    "synchronizing DMA memory, resetting "
1357 				    "device",
1358 				    xep->xep_num, xd->xd_slot, xd->xd_port);
1359 				xhci_fm_runtime_reset(xhcip);
1360 				mutex_exit(&xhcip->xhci_lock);
1361 				return (B_FALSE);
1362 			}
1363 
1364 			mp = usrp->isoc_data;
1365 			len = xt->xt_buffer.xdb_len;
1366 			xhci_transfer_copy(xt, mp->b_rptr, len, B_TRUE);
1367 			mp->b_wptr += len;
1368 		}
1369 	} else {
1370 		usrp = (usb_isoc_req_t *)xt->xt_usba_req;
1371 	}
1372 
1373 	if (cr == USB_CR_OK) {
1374 		bcopy(xt->xt_isoc, usrp->isoc_pkt_descr,
1375 		    sizeof (usb_isoc_pkt_descr_t) * usrp->isoc_pkts_count);
1376 	}
1377 
1378 	usba_hcdi_cb(xep->xep_pipe, (usb_opaque_t)usrp, cr);
1379 	if (xt->xt_data_tohost == B_TRUE) {
1380 		xhci_endpoint_reschedule_periodic(xhcip, xd, xep, xt);
1381 	} else {
1382 		xhci_transfer_free(xhcip, xt);
1383 	}
1384 
1385 	return (B_TRUE);
1386 }
1387 
1388 boolean_t
1389 xhci_endpoint_transfer_callback(xhci_t *xhcip, xhci_trb_t *trb)
1390 {
1391 	boolean_t ret;
1392 	int slot, endpoint, code;
1393 	uint_t off;
1394 	xhci_device_t *xd;
1395 	xhci_endpoint_t *xep;
1396 	xhci_transfer_t *xt;
1397 	boolean_t transfer_done;
1398 
1399 	endpoint = XHCI_TRB_GET_EP(LE_32(trb->trb_flags));
1400 	slot = XHCI_TRB_GET_SLOT(LE_32(trb->trb_flags));
1401 	code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1402 
1403 	switch (code) {
1404 	case XHCI_CODE_RING_UNDERRUN:
1405 	case XHCI_CODE_RING_OVERRUN:
1406 		/*
1407 		 * If we have an ISOC overrun or underrun then there will be no
1408 		 * valid data pointer in the TRB associated with it. Just drive
1409 		 * on.
1410 		 */
1411 		return (B_TRUE);
1412 	case XHCI_CODE_UNDEFINED:
1413 		xhci_error(xhcip, "received transfer trb with undefined fatal "
1414 		    "error: resetting device");
1415 		xhci_fm_runtime_reset(xhcip);
1416 		return (B_FALSE);
1417 	case XHCI_CODE_XFER_STOPPED:
1418 	case XHCI_CODE_XFER_STOPINV:
1419 	case XHCI_CODE_XFER_STOPSHORT:
1420 		/*
1421 		 * This causes us to transition the endpoint to a stopped state.
1422 		 * Each of these indicate a different possible state that we
1423 		 * have to deal with. Effectively we're going to drop it and
1424 		 * leave it up to the consumers to figure out what to do. For
1425 		 * the moment, that's generally okay because stops are only used
1426 		 * in cases where we're cleaning up outstanding reqs, etc.
1427 		 *
1428 		 * We do this before we check for the corresponding transfer as
1429 		 * this will generally be generated by a command issued that's
1430 		 * stopping the ring.
1431 		 */
1432 		return (B_TRUE);
1433 	default:
1434 		break;
1435 	}
1436 
1437 	mutex_enter(&xhcip->xhci_lock);
1438 	xd = xhci_device_lookup_by_slot(xhcip, slot);
1439 	if (xd == NULL) {
1440 		xhci_error(xhcip, "received transfer trb with code %d for "
1441 		    "unknown slot %d and endpoint %d: resetting device", code,
1442 		    slot, endpoint);
1443 		mutex_exit(&xhcip->xhci_lock);
1444 		xhci_fm_runtime_reset(xhcip);
1445 		return (B_FALSE);
1446 	}
1447 
1448 	/*
1449 	 * Endpoint IDs are indexed based on their Device Context Index, which
1450 	 * means that we need to subtract one to get the actual ID that we use.
1451 	 */
1452 	xep = xd->xd_endpoints[endpoint - 1];
1453 	if (xep == NULL) {
1454 		xhci_error(xhcip, "received transfer trb with code %d, slot "
1455 		    "%d, and unknown endpoint %d: resetting device", code,
1456 		    slot, endpoint);
1457 		mutex_exit(&xhcip->xhci_lock);
1458 		xhci_fm_runtime_reset(xhcip);
1459 		return (B_FALSE);
1460 	}
1461 
1462 	/*
1463 	 * The TRB that we recieved may be an event data TRB for a bulk
1464 	 * endpoint, a normal or short completion for any other endpoint or an
1465 	 * error. In all cases, we need to figure out what transfer this
1466 	 * corresponds to. If this is an error, then we need to make sure that
1467 	 * the generating ring has been cleaned up.
1468 	 *
1469 	 * TRBs should be delivered in order, based on the ring. If for some
1470 	 * reason we find something that doesn't add up here, then we need to
1471 	 * assume that something has gone horribly wrong in the system and issue
1472 	 * a runtime reset. We issue the runtime reset rather than just trying
1473 	 * to stop and flush the ring, because it's unclear if we could stop
1474 	 * the ring in time.
1475 	 */
1476 	if ((xt = xhci_endpoint_determine_transfer(xhcip, xep, trb, &off)) ==
1477 	    NULL) {
1478 		xhci_error(xhcip, "received transfer trb with code %d, slot "
1479 		    "%d, and endpoint %d, but does not match current transfer "
1480 		    "for endpoint: resetting device", code, slot, endpoint);
1481 		mutex_exit(&xhcip->xhci_lock);
1482 		xhci_fm_runtime_reset(xhcip);
1483 		return (B_FALSE);
1484 	}
1485 
1486 	transfer_done = B_FALSE;
1487 
1488 	switch (code) {
1489 	case XHCI_CODE_SUCCESS:
1490 	case XHCI_CODE_SHORT_XFER:
1491 		/* Handled by endpoint logic */
1492 		break;
1493 	case XHCI_CODE_STALL:
1494 		/*
1495 		 * This causes us to transition to the halted state;
1496 		 * however, downstream clients are able to handle this just
1497 		 * fine.
1498 		 */
1499 		xep->xep_state |= XHCI_ENDPOINT_HALTED;
1500 		xt->xt_cr = USB_CR_STALL;
1501 		transfer_done = B_TRUE;
1502 		break;
1503 	case XHCI_CODE_BABBLE:
1504 		transfer_done = B_TRUE;
1505 		xt->xt_cr = USB_CR_DATA_OVERRUN;
1506 		xep->xep_state |= XHCI_ENDPOINT_HALTED;
1507 		break;
1508 	case XHCI_CODE_TXERR:
1509 	case XHCI_CODE_SPLITERR:
1510 		transfer_done = B_TRUE;
1511 		xt->xt_cr = USB_CR_DEV_NOT_RESP;
1512 		xep->xep_state |= XHCI_ENDPOINT_HALTED;
1513 		break;
1514 	case XHCI_CODE_BW_OVERRUN:
1515 		transfer_done = B_TRUE;
1516 		xt->xt_cr = USB_CR_DATA_OVERRUN;
1517 		break;
1518 	case XHCI_CODE_DATA_BUF:
1519 		transfer_done = B_TRUE;
1520 		if (xt->xt_data_tohost)
1521 			xt->xt_cr = USB_CR_DATA_OVERRUN;
1522 		else
1523 			xt->xt_cr = USB_CR_DATA_UNDERRUN;
1524 		break;
1525 	default:
1526 		/*
1527 		 * Treat these as general unspecified errors that don't cause a
1528 		 * stop of the ring. Even if it does, a subsequent timeout
1529 		 * should occur which causes us to end up dropping a pipe reset
1530 		 * or at least issuing a reset of the device as part of
1531 		 * quiescing.
1532 		 */
1533 		transfer_done = B_TRUE;
1534 		xt->xt_cr = USB_CR_HC_HARDWARE_ERR;
1535 		break;
1536 	}
1537 
1538 	if (transfer_done == B_TRUE) {
1539 		xhci_transfer_t *alt;
1540 
1541 		alt = list_remove_head(&xep->xep_transfers);
1542 		VERIFY3P(alt, ==, xt);
1543 		mutex_exit(&xhcip->xhci_lock);
1544 		if (xt->xt_usba_req == NULL) {
1545 			usb_opaque_t urp;
1546 
1547 			urp = xhci_endpoint_dup_periodic(xep, xt, &xt->xt_cr);
1548 			usba_hcdi_cb(xep->xep_pipe, urp, xt->xt_cr);
1549 		} else {
1550 			usba_hcdi_cb(xep->xep_pipe,
1551 			    (usb_opaque_t)xt->xt_usba_req, xt->xt_cr);
1552 			xhci_transfer_free(xhcip, xt);
1553 		}
1554 		return (B_TRUE);
1555 	}
1556 
1557 	/*
1558 	 * Process the transfer callback based on the type of endpoint. Each of
1559 	 * these callback functions will end up calling back into USBA via
1560 	 * usba_hcdi_cb() to return transfer information (whether successful or
1561 	 * not). Because we can't hold any locks across a call to that function,
1562 	 * all of these callbacks will drop the xhci_t`xhci_lock by the time
1563 	 * they return. This is why there's no mutex_exit() call before we
1564 	 * return.
1565 	 */
1566 	switch (xep->xep_type) {
1567 	case USB_EP_ATTR_CONTROL:
1568 		ret = xhci_endpoint_control_callback(xhcip, xd, xep, xt, off,
1569 		    trb);
1570 		break;
1571 	case USB_EP_ATTR_BULK:
1572 		ret = xhci_endpoint_norm_callback(xhcip, xd, xep, xt, off, trb);
1573 		break;
1574 	case USB_EP_ATTR_INTR:
1575 		ret = xhci_endpoint_norm_callback(xhcip, xd, xep, xt, off,
1576 		    trb);
1577 		break;
1578 	case USB_EP_ATTR_ISOCH:
1579 		ret = xhci_endpoint_isoch_callback(xhcip, xd, xep, xt, off,
1580 		    trb);
1581 		break;
1582 	default:
1583 		panic("bad endpoint type: %u", xep->xep_type);
1584 	}
1585 
1586 	return (ret);
1587 }
1588