1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * EHCI Host Controller Driver (EHCI)
30  *
31  * The EHCI driver is a software driver which interfaces to the Universal
32  * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
33  * the Host Controller is defined by the EHCI Host Controller Interface.
34  *
35  * This module contains the main EHCI driver code which handles all USB
36  * transfers, bandwidth allocations and other general functionalities.
37  */
38 
39 #include <sys/usb/hcd/ehci/ehcid.h>
40 #include <sys/usb/hcd/ehci/ehci_intr.h>
41 #include <sys/usb/hcd/ehci/ehci_util.h>
42 #include <sys/usb/hcd/ehci/ehci_isoch.h>
43 
44 /* Adjustable variables for the size of the pools */
45 extern int ehci_qh_pool_size;
46 extern int ehci_qtd_pool_size;
47 
48 
49 /* Endpoint Descriptor (QH) related functions */
50 ehci_qh_t	*ehci_alloc_qh(
51 				ehci_state_t		*ehcip,
52 				usba_pipe_handle_data_t	*ph,
53 				uint_t			flag);
54 static void	ehci_unpack_endpoint(
55 				ehci_state_t		*ehcip,
56 				usba_pipe_handle_data_t	*ph,
57 				ehci_qh_t		*qh);
58 void		ehci_insert_qh(
59 				ehci_state_t		*ehcip,
60 				usba_pipe_handle_data_t	*ph);
61 static void	ehci_insert_async_qh(
62 				ehci_state_t		*ehcip,
63 				ehci_pipe_private_t	*pp);
64 static void	ehci_insert_intr_qh(
65 				ehci_state_t		*ehcip,
66 				ehci_pipe_private_t	*pp);
67 static void	ehci_modify_qh_status_bit(
68 				ehci_state_t		*ehcip,
69 				ehci_pipe_private_t	*pp,
70 				halt_bit_t		action);
71 static void	ehci_halt_hs_qh(
72 				ehci_state_t		*ehcip,
73 				ehci_pipe_private_t	*pp,
74 				ehci_qh_t		*qh);
75 static void	ehci_halt_fls_ctrl_and_bulk_qh(
76 				ehci_state_t		*ehcip,
77 				ehci_pipe_private_t	*pp,
78 				ehci_qh_t		*qh);
79 static void	ehci_clear_tt_buffer(
80 				ehci_state_t		*ehcip,
81 				usba_pipe_handle_data_t	*ph,
82 				ehci_qh_t		*qh);
83 static void	ehci_halt_fls_intr_qh(
84 				ehci_state_t		*ehcip,
85 				ehci_qh_t		*qh);
86 void		ehci_remove_qh(
87 				ehci_state_t		*ehcip,
88 				ehci_pipe_private_t	*pp,
89 				boolean_t		reclaim);
90 static void	ehci_remove_async_qh(
91 				ehci_state_t		*ehcip,
92 				ehci_pipe_private_t	*pp,
93 				boolean_t		reclaim);
94 static void	ehci_remove_intr_qh(
95 				ehci_state_t		*ehcip,
96 				ehci_pipe_private_t	*pp,
97 				boolean_t		reclaim);
98 static void	ehci_insert_qh_on_reclaim_list(
99 				ehci_state_t		*ehcip,
100 				ehci_pipe_private_t	*pp);
101 void		ehci_deallocate_qh(
102 				ehci_state_t		*ehcip,
103 				ehci_qh_t		*old_qh);
104 uint32_t	ehci_qh_cpu_to_iommu(
105 				ehci_state_t		*ehcip,
106 				ehci_qh_t		*addr);
107 ehci_qh_t	*ehci_qh_iommu_to_cpu(
108 				ehci_state_t		*ehcip,
109 				uintptr_t		addr);
110 
111 /* Transfer Descriptor (QTD) related functions */
112 static int	ehci_initialize_dummy(
113 				ehci_state_t		*ehcip,
114 				ehci_qh_t		*qh);
115 ehci_trans_wrapper_t *ehci_allocate_ctrl_resources(
116 				ehci_state_t		*ehcip,
117 				ehci_pipe_private_t	*pp,
118 				usb_ctrl_req_t		*ctrl_reqp,
119 				usb_flags_t		usb_flags);
120 void		ehci_insert_ctrl_req(
121 				ehci_state_t		*ehcip,
122 				usba_pipe_handle_data_t	*ph,
123 				usb_ctrl_req_t		*ctrl_reqp,
124 				ehci_trans_wrapper_t	*tw,
125 				usb_flags_t		usb_flags);
126 ehci_trans_wrapper_t *ehci_allocate_bulk_resources(
127 				ehci_state_t		*ehcip,
128 				ehci_pipe_private_t	*pp,
129 				usb_bulk_req_t		*bulk_reqp,
130 				usb_flags_t		usb_flags);
131 void		ehci_insert_bulk_req(
132 				ehci_state_t		*ehcip,
133 				usba_pipe_handle_data_t	*ph,
134 				usb_bulk_req_t		*bulk_reqp,
135 				ehci_trans_wrapper_t	*tw,
136 				usb_flags_t		flags);
137 int		ehci_start_periodic_pipe_polling(
138 				ehci_state_t		*ehcip,
139 				usba_pipe_handle_data_t	*ph,
140 				usb_opaque_t		periodic_in_reqp,
141 				usb_flags_t		flags);
142 static int	ehci_start_pipe_polling(
143 				ehci_state_t		*ehcip,
144 				usba_pipe_handle_data_t	*ph,
145 				usb_flags_t		flags);
146 static int	ehci_start_intr_polling(
147 				ehci_state_t		*ehcip,
148 				usba_pipe_handle_data_t	*ph,
149 				usb_flags_t		flags);
150 static void	ehci_set_periodic_pipe_polling(
151 				ehci_state_t		*ehcip,
152 				usba_pipe_handle_data_t	*ph);
153 ehci_trans_wrapper_t *ehci_allocate_intr_resources(
154 				ehci_state_t		*ehcip,
155 				usba_pipe_handle_data_t	*ph,
156 				usb_intr_req_t		*intr_reqp,
157 				usb_flags_t		usb_flags);
158 void		ehci_insert_intr_req(
159 				ehci_state_t		*ehcip,
160 				ehci_pipe_private_t	*pp,
161 				ehci_trans_wrapper_t	*tw,
162 				usb_flags_t		flags);
163 int		ehci_stop_periodic_pipe_polling(
164 				ehci_state_t		*ehcip,
165 				usba_pipe_handle_data_t	*ph,
166 				usb_flags_t		flags);
167 int		ehci_insert_qtd(
168 				ehci_state_t		*ehcip,
169 				uint32_t		qtd_ctrl,
170 				size_t			qtd_dma_offs,
171 				size_t			qtd_length,
172 				uint32_t		qtd_ctrl_phase,
173 				ehci_pipe_private_t	*pp,
174 				ehci_trans_wrapper_t	*tw);
175 static ehci_qtd_t *ehci_allocate_qtd_from_pool(
176 				ehci_state_t		*ehcip);
177 static void	ehci_fill_in_qtd(
178 				ehci_state_t		*ehcip,
179 				ehci_qtd_t		*qtd,
180 				uint32_t		qtd_ctrl,
181 				size_t			qtd_dma_offs,
182 				size_t			qtd_length,
183 				uint32_t		qtd_ctrl_phase,
184 				ehci_pipe_private_t	*pp,
185 				ehci_trans_wrapper_t	*tw);
186 static void	ehci_insert_qtd_on_tw(
187 				ehci_state_t		*ehcip,
188 				ehci_trans_wrapper_t	*tw,
189 				ehci_qtd_t		*qtd);
190 static void	ehci_insert_qtd_into_active_qtd_list(
191 				ehci_state_t		*ehcip,
192 				ehci_qtd_t		*curr_qtd);
193 void		ehci_remove_qtd_from_active_qtd_list(
194 				ehci_state_t		*ehcip,
195 				ehci_qtd_t		*curr_qtd);
196 static void	ehci_traverse_qtds(
197 				ehci_state_t		*ehcip,
198 				usba_pipe_handle_data_t	*ph);
199 void		ehci_deallocate_qtd(
200 				ehci_state_t		*ehcip,
201 				ehci_qtd_t		*old_qtd);
202 uint32_t	ehci_qtd_cpu_to_iommu(
203 				ehci_state_t		*ehcip,
204 				ehci_qtd_t		*addr);
205 ehci_qtd_t	*ehci_qtd_iommu_to_cpu(
206 				ehci_state_t		*ehcip,
207 				uintptr_t		addr);
208 
209 /* Transfer Wrapper (TW) functions */
210 static ehci_trans_wrapper_t  *ehci_create_transfer_wrapper(
211 				ehci_state_t		*ehcip,
212 				ehci_pipe_private_t	*pp,
213 				size_t			length,
214 				uint_t			usb_flags);
215 int		ehci_allocate_tds_for_tw(
216 				ehci_state_t		*ehcip,
217 				ehci_pipe_private_t	*pp,
218 				ehci_trans_wrapper_t	*tw,
219 				size_t			qtd_count);
220 static ehci_trans_wrapper_t  *ehci_allocate_tw_resources(
221 				ehci_state_t		*ehcip,
222 				ehci_pipe_private_t	*pp,
223 				size_t			length,
224 				usb_flags_t		usb_flags,
225 				size_t			td_count);
226 static void	ehci_free_tw_td_resources(
227 				ehci_state_t		*ehcip,
228 				ehci_trans_wrapper_t	*tw);
229 static void	ehci_start_xfer_timer(
230 				ehci_state_t		*ehcip,
231 				ehci_pipe_private_t	*pp,
232 				ehci_trans_wrapper_t	*tw);
233 void		ehci_stop_xfer_timer(
234 				ehci_state_t		*ehcip,
235 				ehci_trans_wrapper_t	*tw,
236 				uint_t			flag);
237 static void	ehci_xfer_timeout_handler(void		*arg);
238 static void	ehci_remove_tw_from_timeout_list(
239 				ehci_state_t		*ehcip,
240 				ehci_trans_wrapper_t	*tw);
241 static void	ehci_start_timer(ehci_state_t		*ehcip,
242 				ehci_pipe_private_t	*pp);
243 void		ehci_deallocate_tw(
244 				ehci_state_t		*ehcip,
245 				ehci_pipe_private_t	*pp,
246 				ehci_trans_wrapper_t	*tw);
247 void		ehci_free_dma_resources(
248 				ehci_state_t		*ehcip,
249 				usba_pipe_handle_data_t	*ph);
250 static void	ehci_free_tw(
251 				ehci_state_t		*ehcip,
252 				ehci_pipe_private_t	*pp,
253 				ehci_trans_wrapper_t	*tw);
254 
255 /* Miscellaneous functions */
256 int		ehci_allocate_intr_in_resource(
257 				ehci_state_t		*ehcip,
258 				ehci_pipe_private_t	*pp,
259 				ehci_trans_wrapper_t	*tw,
260 				usb_flags_t		flags);
261 void		ehci_pipe_cleanup(
262 				ehci_state_t		*ehcip,
263 				usba_pipe_handle_data_t	*ph);
264 static void	ehci_wait_for_transfers_completion(
265 				ehci_state_t		*ehcip,
266 				ehci_pipe_private_t	*pp);
267 void		ehci_check_for_transfers_completion(
268 				ehci_state_t		*ehcip,
269 				ehci_pipe_private_t	*pp);
270 static void	ehci_save_data_toggle(
271 				ehci_state_t		*ehcip,
272 				usba_pipe_handle_data_t	*ph);
273 void		ehci_restore_data_toggle(
274 				ehci_state_t		*ehcip,
275 				usba_pipe_handle_data_t	*ph);
276 void		ehci_handle_outstanding_requests(
277 				ehci_state_t		*ehcip,
278 				ehci_pipe_private_t	*pp);
279 void		ehci_deallocate_intr_in_resource(
280 				ehci_state_t		*ehcip,
281 				ehci_pipe_private_t	*pp,
282 				ehci_trans_wrapper_t	*tw);
283 void		ehci_do_client_periodic_in_req_callback(
284 				ehci_state_t		*ehcip,
285 				ehci_pipe_private_t	*pp,
286 				usb_cr_t		completion_reason);
287 void		ehci_hcdi_callback(
288 				usba_pipe_handle_data_t	*ph,
289 				ehci_trans_wrapper_t	*tw,
290 				usb_cr_t		completion_reason);
291 
292 
293 /*
294  * Endpoint Descriptor (QH) manipulations functions
295  */
296 
297 /*
298  * ehci_alloc_qh:
299  *
300  * Allocate an endpoint descriptor (QH)
301  *
302  * NOTE: This function is also called from POLLED MODE.
303  */
304 ehci_qh_t *
305 ehci_alloc_qh(
306 	ehci_state_t		*ehcip,
307 	usba_pipe_handle_data_t	*ph,
308 	uint_t			flag)
309 {
310 	int			i, state;
311 	ehci_qh_t		*qh;
312 
313 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
314 	    "ehci_alloc_qh: ph = 0x%p flag = 0x%x", (void *)ph, flag);
315 
316 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
317 
318 	/*
319 	 * If this is for a ISOC endpoint return null.
320 	 * Isochronous uses ITD put directly onto the PFL.
321 	 */
322 	if (ph) {
323 		if (EHCI_ISOC_ENDPOINT((&ph->p_ep))) {
324 
325 			return (NULL);
326 		}
327 	}
328 
329 	/*
330 	 * The first 63 endpoints in the Endpoint Descriptor (QH)
331 	 * buffer pool are reserved for building interrupt lattice
332 	 * tree. Search for a blank endpoint descriptor in the QH
333 	 * buffer pool.
334 	 */
335 	for (i = EHCI_NUM_STATIC_NODES; i < ehci_qh_pool_size; i ++) {
336 		state = Get_QH(ehcip->ehci_qh_pool_addr[i].qh_state);
337 
338 		if (state == EHCI_QH_FREE) {
339 			break;
340 		}
341 	}
342 
343 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
344 	    "ehci_alloc_qh: Allocated %d", i);
345 
346 	if (i == ehci_qh_pool_size) {
347 		USB_DPRINTF_L2(PRINT_MASK_ALLOC,  ehcip->ehci_log_hdl,
348 		    "ehci_alloc_qh: QH exhausted");
349 
350 		return (NULL);
351 	} else {
352 		qh = &ehcip->ehci_qh_pool_addr[i];
353 
354 		USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
355 		    "ehci_alloc_qh: Allocated address 0x%p", (void *)qh);
356 
357 		/* Check polled mode flag */
358 		if (flag == EHCI_POLLED_MODE_FLAG) {
359 			Set_QH(qh->qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
360 			Set_QH(qh->qh_ctrl, EHCI_QH_CTRL_ED_INACTIVATE);
361 		}
362 
363 		/* Unpack the endpoint descriptor into a control field */
364 		if (ph) {
365 			if ((ehci_initialize_dummy(ehcip,
366 			    qh)) == USB_NO_RESOURCES) {
367 
368 				bzero((void *)qh, sizeof (ehci_qh_t));
369 				Set_QH(qh->qh_state, EHCI_QH_FREE);
370 
371 				return (NULL);
372 			}
373 
374 			ehci_unpack_endpoint(ehcip, ph, qh);
375 
376 			Set_QH(qh->qh_curr_qtd, NULL);
377 			Set_QH(qh->qh_alt_next_qtd,
378 			    EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
379 
380 			/* Change QH's state Active */
381 			Set_QH(qh->qh_state, EHCI_QH_ACTIVE);
382 		} else {
383 			Set_QH(qh->qh_status, EHCI_QH_STS_HALTED);
384 
385 			/* Change QH's state Static */
386 			Set_QH(qh->qh_state, EHCI_QH_STATIC);
387 		}
388 
389 		ehci_print_qh(ehcip, qh);
390 
391 		return (qh);
392 	}
393 }
394 
395 
396 /*
397  * ehci_unpack_endpoint:
398  *
399  * Unpack the information in the pipe handle and create the first byte
400  * of the Host Controller's (HC) Endpoint Descriptor (QH).
401  */
402 static void
403 ehci_unpack_endpoint(
404 	ehci_state_t		*ehcip,
405 	usba_pipe_handle_data_t	*ph,
406 	ehci_qh_t		*qh)
407 {
408 	usb_ep_descr_t		*endpoint = &ph->p_ep;
409 	uint_t			maxpacketsize, addr, xactions;
410 	uint_t			ctrl = 0, status = 0, split_ctrl = 0;
411 	usb_port_status_t	usb_port_status;
412 	usba_device_t		*usba_device = ph->p_usba_device;
413 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
414 
415 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
416 	    "ehci_unpack_endpoint:");
417 
418 	mutex_enter(&usba_device->usb_mutex);
419 	ctrl = usba_device->usb_addr;
420 	usb_port_status = usba_device->usb_port_status;
421 	mutex_exit(&usba_device->usb_mutex);
422 
423 	addr = endpoint->bEndpointAddress;
424 
425 	/* Assign the endpoint's address */
426 	ctrl |= ((addr & USB_EP_NUM_MASK) << EHCI_QH_CTRL_ED_NUMBER_SHIFT);
427 
428 	/* Assign the speed */
429 	switch (usb_port_status) {
430 	case USBA_LOW_SPEED_DEV:
431 		ctrl |= EHCI_QH_CTRL_ED_LOW_SPEED;
432 		break;
433 	case USBA_FULL_SPEED_DEV:
434 		ctrl |= EHCI_QH_CTRL_ED_FULL_SPEED;
435 		break;
436 	case USBA_HIGH_SPEED_DEV:
437 		ctrl |= EHCI_QH_CTRL_ED_HIGH_SPEED;
438 		break;
439 	}
440 
441 	switch (endpoint->bmAttributes & USB_EP_ATTR_MASK) {
442 	case USB_EP_ATTR_CONTROL:
443 		/* Assign data toggle information */
444 		ctrl |= EHCI_QH_CTRL_DATA_TOGGLE;
445 
446 		if (usb_port_status != USBA_HIGH_SPEED_DEV) {
447 			ctrl |= EHCI_QH_CTRL_CONTROL_ED_FLAG;
448 		}
449 		/* FALLTHRU */
450 	case USB_EP_ATTR_BULK:
451 		/* Maximum nak counter */
452 		ctrl |= EHCI_QH_CTRL_MAX_NC;
453 
454 		if (usb_port_status == USBA_HIGH_SPEED_DEV) {
455 			/*
456 			 * Perform ping before executing control
457 			 * and bulk transactions.
458 			 */
459 			status = EHCI_QH_STS_DO_PING;
460 		}
461 		break;
462 	case USB_EP_ATTR_INTR:
463 		/* Set start split mask */
464 		split_ctrl = (pp->pp_smask & EHCI_QH_SPLIT_CTRL_INTR_MASK);
465 
466 		/*
467 		 * Set complete split mask for low/full speed
468 		 * usb devices.
469 		 */
470 		if (usb_port_status != USBA_HIGH_SPEED_DEV) {
471 			split_ctrl |= ((pp->pp_cmask <<
472 			    EHCI_QH_SPLIT_CTRL_COMP_SHIFT) &
473 			    EHCI_QH_SPLIT_CTRL_COMP_MASK);
474 		}
475 		break;
476 	}
477 
478 	/* Get the max transactions per microframe */
479 	xactions = (endpoint->wMaxPacketSize &
480 	    USB_EP_MAX_XACTS_MASK) >>  USB_EP_MAX_XACTS_SHIFT;
481 
482 	switch (xactions) {
483 	case 0:
484 		split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
485 		break;
486 	case 1:
487 		split_ctrl |= EHCI_QH_SPLIT_CTRL_2_XACTS;
488 		break;
489 	case 2:
490 		split_ctrl |= EHCI_QH_SPLIT_CTRL_3_XACTS;
491 		break;
492 	default:
493 		split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
494 		break;
495 	}
496 
497 	/*
498 	 * For low/full speed devices, program high speed hub
499 	 * address and port number.
500 	 */
501 	if (usb_port_status != USBA_HIGH_SPEED_DEV) {
502 		mutex_enter(&usba_device->usb_mutex);
503 		split_ctrl |= ((usba_device->usb_hs_hub_addr
504 		    << EHCI_QH_SPLIT_CTRL_HUB_ADDR_SHIFT) &
505 		    EHCI_QH_SPLIT_CTRL_HUB_ADDR);
506 
507 		split_ctrl |= ((usba_device->usb_hs_hub_port
508 		    << EHCI_QH_SPLIT_CTRL_HUB_PORT_SHIFT) &
509 		    EHCI_QH_SPLIT_CTRL_HUB_PORT);
510 
511 		mutex_exit(&usba_device->usb_mutex);
512 
513 		/* Set start split transaction state */
514 		status = EHCI_QH_STS_DO_START_SPLIT;
515 	}
516 
517 	/* Assign endpoint's maxpacketsize */
518 	maxpacketsize = endpoint->wMaxPacketSize & USB_EP_MAX_PKTSZ_MASK;
519 	maxpacketsize = maxpacketsize << EHCI_QH_CTRL_MAXPKTSZ_SHIFT;
520 	ctrl |= (maxpacketsize & EHCI_QH_CTRL_MAXPKTSZ);
521 
522 	Set_QH(qh->qh_ctrl, ctrl);
523 	Set_QH(qh->qh_split_ctrl, split_ctrl);
524 	Set_QH(qh->qh_status, status);
525 }
526 
527 
528 /*
529  * ehci_insert_qh:
530  *
531  * Add the Endpoint Descriptor (QH) into the Host Controller's
532  * (HC) appropriate endpoint list.
533  */
534 void
535 ehci_insert_qh(
536 	ehci_state_t		*ehcip,
537 	usba_pipe_handle_data_t	*ph)
538 {
539 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
540 
541 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
542 	    "ehci_insert_qh: qh=0x%p", pp->pp_qh);
543 
544 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
545 
546 	switch (ph->p_ep.bmAttributes & USB_EP_ATTR_MASK) {
547 	case USB_EP_ATTR_CONTROL:
548 	case USB_EP_ATTR_BULK:
549 		ehci_insert_async_qh(ehcip, pp);
550 		ehcip->ehci_open_async_count++;
551 		break;
552 	case USB_EP_ATTR_INTR:
553 		ehci_insert_intr_qh(ehcip, pp);
554 		ehcip->ehci_open_periodic_count++;
555 		break;
556 	case USB_EP_ATTR_ISOCH:
557 		/* ISOCH does not use QH, don't do anything but update count */
558 		ehcip->ehci_open_periodic_count++;
559 		break;
560 	}
561 	ehci_toggle_scheduler(ehcip);
562 }
563 
564 
565 /*
566  * ehci_insert_async_qh:
567  *
568  * Insert a control/bulk endpoint into the Host Controller's (HC)
569  * Asynchronous schedule endpoint list.
570  */
571 static void
572 ehci_insert_async_qh(
573 	ehci_state_t		*ehcip,
574 	ehci_pipe_private_t	*pp)
575 {
576 	ehci_qh_t		*qh = pp->pp_qh;
577 	ehci_qh_t		*async_head_qh;
578 	ehci_qh_t		*next_qh;
579 	uintptr_t		qh_addr;
580 
581 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
582 	    "ehci_insert_async_qh:");
583 
584 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
585 
586 	/* Make sure this QH is not already in the list */
587 	ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
588 
589 	qh_addr = ehci_qh_cpu_to_iommu(ehcip, qh);
590 
591 	/* Obtain a ptr to the head of the Async schedule list */
592 	async_head_qh = ehcip->ehci_head_of_async_sched_list;
593 
594 	if (async_head_qh == NULL) {
595 		/* Set this QH to be the "head" of the circular list */
596 		Set_QH(qh->qh_ctrl,
597 		    (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_RECLAIM_HEAD));
598 
599 		/* Set new QH's link and previous pointer to itself */
600 		Set_QH(qh->qh_link_ptr, qh_addr | EHCI_QH_LINK_REF_QH);
601 		Set_QH(qh->qh_prev, qh_addr);
602 
603 		ehcip->ehci_head_of_async_sched_list = qh;
604 
605 		/* Set the head ptr to the new endpoint */
606 		Set_OpReg(ehci_async_list_addr, qh_addr);
607 	} else {
608 		ASSERT(Get_QH(async_head_qh->qh_ctrl) &
609 		    EHCI_QH_CTRL_RECLAIM_HEAD);
610 
611 		/* Ensure this QH's "H" bit is not set */
612 		Set_QH(qh->qh_ctrl,
613 		    (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_RECLAIM_HEAD));
614 
615 		next_qh = ehci_qh_iommu_to_cpu(ehcip,
616 		    Get_QH(async_head_qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
617 
618 		/* Set new QH's link and previous pointers */
619 		Set_QH(qh->qh_link_ptr,
620 		    Get_QH(async_head_qh->qh_link_ptr) | EHCI_QH_LINK_REF_QH);
621 		Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, async_head_qh));
622 
623 		/* Set next QH's prev pointer */
624 		Set_QH(next_qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, qh));
625 
626 		/* Set QH Head's link pointer points to new QH */
627 		Set_QH(async_head_qh->qh_link_ptr,
628 		    qh_addr | EHCI_QH_LINK_REF_QH);
629 	}
630 }
631 
632 
633 /*
634  * ehci_insert_intr_qh:
635  *
636  * Insert a interrupt endpoint into the Host Controller's (HC) interrupt
637  * lattice tree.
638  */
639 static void
640 ehci_insert_intr_qh(
641 	ehci_state_t		*ehcip,
642 	ehci_pipe_private_t	*pp)
643 {
644 	ehci_qh_t		*qh = pp->pp_qh;
645 	ehci_qh_t		*next_lattice_qh, *lattice_qh;
646 	uint_t			hnode;
647 
648 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
649 	    "ehci_insert_intr_qh:");
650 
651 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
652 
653 	/* Make sure this QH is not already in the list */
654 	ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
655 
656 	/*
657 	 * The appropriate high speed node was found
658 	 * during the opening of the pipe.
659 	 */
660 	hnode = pp->pp_pnode;
661 
662 	/* Find the lattice endpoint */
663 	lattice_qh = &ehcip->ehci_qh_pool_addr[hnode];
664 
665 	/* Find the next lattice endpoint */
666 	next_lattice_qh = ehci_qh_iommu_to_cpu(
667 	    ehcip, (Get_QH(lattice_qh->qh_link_ptr) & EHCI_QH_LINK_PTR));
668 
669 	/* Update the previous pointer */
670 	Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, lattice_qh));
671 
672 	/* Check next_lattice_qh value */
673 	if (next_lattice_qh) {
674 		/* Update this qh to point to the next one in the lattice */
675 		Set_QH(qh->qh_link_ptr, Get_QH(lattice_qh->qh_link_ptr));
676 
677 		/* Update the previous pointer of qh->qh_link_ptr */
678 		if (Get_QH(next_lattice_qh->qh_state) != EHCI_QH_STATIC) {
679 			Set_QH(next_lattice_qh->qh_prev,
680 			    ehci_qh_cpu_to_iommu(ehcip, qh));
681 		}
682 	} else {
683 		/* Update qh's link pointer to terminate periodic list */
684 		Set_QH(qh->qh_link_ptr,
685 		    (Get_QH(lattice_qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
686 	}
687 
688 	/* Insert this endpoint into the lattice */
689 	Set_QH(lattice_qh->qh_link_ptr,
690 	    (ehci_qh_cpu_to_iommu(ehcip, qh) | EHCI_QH_LINK_REF_QH));
691 }
692 
693 
694 /*
695  * ehci_modify_qh_status_bit:
696  *
697  * Modify the halt bit on the Host Controller (HC) Endpoint Descriptor (QH).
698  *
699  * If several threads try to halt the same pipe, they will need to wait on
700  * a condition variable.  Only one thread is allowed to halt or unhalt the
701  * pipe at a time.
702  *
703  * Usually after a halt pipe, an unhalt pipe will follow soon after.  There
704  * is an assumption that an Unhalt pipe will never occur without a halt pipe.
705  */
706 static void
707 ehci_modify_qh_status_bit(
708 	ehci_state_t		*ehcip,
709 	ehci_pipe_private_t	*pp,
710 	halt_bit_t		action)
711 {
712 	ehci_qh_t		*qh = pp->pp_qh;
713 	uint_t			smask, eps, split_intr_qh;
714 	uint_t			status;
715 
716 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
717 	    "ehci_modify_qh_status_bit: action=0x%x qh=0x%p",
718 	    action, qh);
719 
720 	ehci_print_qh(ehcip, qh);
721 
722 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
723 
724 	/*
725 	 * If this pipe is in the middle of halting don't allow another
726 	 * thread to come in and modify the same pipe.
727 	 */
728 	while (pp->pp_halt_state & EHCI_HALT_STATE_HALTING) {
729 
730 		cv_wait(&pp->pp_halt_cmpl_cv,
731 			    &ehcip->ehci_int_mutex);
732 	}
733 
734 	/* Sync the QH QTD pool to get up to date information */
735 	Sync_QH_QTD_Pool(ehcip);
736 
737 
738 	if (action == CLEAR_HALT) {
739 		/*
740 		 * If the halt bit is to be cleared, just clear it.
741 		 * there shouldn't be any race condition problems.
742 		 * If the host controller reads the bit before the
743 		 * driver has a chance to set the bit, the bit will
744 		 * be reread on the next frame.
745 		 */
746 		Set_QH(qh->qh_ctrl,
747 		    (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_ED_INACTIVATE));
748 		Set_QH(qh->qh_status,
749 		    Get_QH(qh->qh_status) & ~(EHCI_QH_STS_XACT_STATUS));
750 
751 		goto success;
752 	}
753 
754 	/* Halt the the QH, but first check to see if it is already halted */
755 	status = Get_QH(qh->qh_status);
756 	if (!(status & EHCI_QH_STS_HALTED)) {
757 		/* Indicate that this pipe is in the middle of halting. */
758 		pp->pp_halt_state |= EHCI_HALT_STATE_HALTING;
759 
760 		/*
761 		 * Find out if this is an full/low speed interrupt endpoint.
762 		 * A non-zero Cmask indicates that this QH is an interrupt
763 		 * endpoint.  Check the endpoint speed to see if it is either
764 		 * FULL or LOW .
765 		 */
766 		smask = Get_QH(qh->qh_split_ctrl) &
767 		    EHCI_QH_SPLIT_CTRL_INTR_MASK;
768 		eps = Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_SPEED;
769 		split_intr_qh = ((smask != 0) &&
770 		    (eps != EHCI_QH_CTRL_ED_HIGH_SPEED));
771 
772 		if (eps == EHCI_QH_CTRL_ED_HIGH_SPEED) {
773 			ehci_halt_hs_qh(ehcip, pp, qh);
774 		} else {
775 			if (split_intr_qh) {
776 				ehci_halt_fls_intr_qh(ehcip, qh);
777 			} else {
778 				ehci_halt_fls_ctrl_and_bulk_qh(ehcip, pp, qh);
779 			}
780 		}
781 
782 		/* Indicate that this pipe is not in the middle of halting. */
783 		pp->pp_halt_state &= ~EHCI_HALT_STATE_HALTING;
784 	}
785 
786 	/* Sync the QH QTD pool again to get the most up to date information */
787 	Sync_QH_QTD_Pool(ehcip);
788 
789 	ehci_print_qh(ehcip, qh);
790 
791 	status = Get_QH(qh->qh_status);
792 	if (!(status & EHCI_QH_STS_HALTED)) {
793 		USB_DPRINTF_L1(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
794 		    "ehci_modify_qh_status_bit: Failed to halt qh=0x%p", qh);
795 
796 		ehci_print_qh(ehcip, qh);
797 
798 		/* Set host controller soft state to error */
799 		ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
800 
801 		ASSERT(status & EHCI_QH_STS_HALTED);
802 	}
803 
804 success:
805 	/* Wake up threads waiting for this pipe to be halted. */
806 	cv_signal(&pp->pp_halt_cmpl_cv);
807 }
808 
809 
810 /*
811  * ehci_halt_hs_qh:
812  *
813  * Halts all types of HIGH SPEED QHs.
814  */
815 static void
816 ehci_halt_hs_qh(
817 	ehci_state_t		*ehcip,
818 	ehci_pipe_private_t	*pp,
819 	ehci_qh_t		*qh)
820 {
821 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
822 
823 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
824 	    "ehci_halt_hs_qh:");
825 
826 	/* Remove this qh from the HCD's view, but do not reclaim it */
827 	ehci_remove_qh(ehcip, pp, B_FALSE);
828 
829 	/*
830 	 * Wait for atleast one SOF, just in case the HCD is in the
831 	 * middle accessing this QH.
832 	 */
833 	(void) ehci_wait_for_sof(ehcip);
834 
835 	/* Sync the QH QTD pool to get up to date information */
836 	Sync_QH_QTD_Pool(ehcip);
837 
838 	/* Modify the status bit and halt this QH. */
839 	Set_QH(qh->qh_status,
840 	    ((Get_QH(qh->qh_status) &
841 		~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
842 
843 	/* Insert this QH back into the HCD's view */
844 	ehci_insert_qh(ehcip, ph);
845 }
846 
847 
848 /*
849  * ehci_halt_fls_ctrl_and_bulk_qh:
850  *
851  * Halts FULL/LOW Ctrl and Bulk QHs only.
852  */
853 static void
854 ehci_halt_fls_ctrl_and_bulk_qh(
855 	ehci_state_t		*ehcip,
856 	ehci_pipe_private_t	*pp,
857 	ehci_qh_t		*qh)
858 {
859 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
860 	uint_t			status, split_status, bytes_left;
861 
862 
863 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
864 	    "ehci_halt_fls_ctrl_and_bulk_qh:");
865 
866 	/* Remove this qh from the HCD's view, but do not reclaim it */
867 	ehci_remove_qh(ehcip, pp, B_FALSE);
868 
869 	/*
870 	 * Wait for atleast one SOF, just in case the HCD is in the
871 	 * middle accessing this QH.
872 	 */
873 	(void) ehci_wait_for_sof(ehcip);
874 
875 	/* Sync the QH QTD pool to get up to date information */
876 	Sync_QH_QTD_Pool(ehcip);
877 
878 	/* Modify the status bit and halt this QH. */
879 	Set_QH(qh->qh_status,
880 	    ((Get_QH(qh->qh_status) &
881 		~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
882 
883 	/* Check to see if the QH was in the middle of a transaction */
884 	status = Get_QH(qh->qh_status);
885 	split_status = status & EHCI_QH_STS_SPLIT_XSTATE;
886 	bytes_left = status & EHCI_QH_STS_BYTES_TO_XFER;
887 	if ((split_status == EHCI_QH_STS_DO_COMPLETE_SPLIT) &&
888 	    (bytes_left != 0)) {
889 		/* send ClearTTBuffer to this device's parent 2.0 hub */
890 		ehci_clear_tt_buffer(ehcip, ph, qh);
891 	}
892 
893 	/* Insert this QH back into the HCD's view */
894 	ehci_insert_qh(ehcip, ph);
895 }
896 
897 
898 /*
899  * ehci_clear_tt_buffer
900  *
901  * This function will sent a Clear_TT_Buffer request to the pipe's
902  * parent 2.0 hub.
903  */
904 static void
905 ehci_clear_tt_buffer(
906 	ehci_state_t		*ehcip,
907 	usba_pipe_handle_data_t	*ph,
908 	ehci_qh_t		*qh)
909 {
910 	usba_device_t		*usba_device;
911 	usba_device_t		*hub_usba_device;
912 	usb_pipe_handle_t	hub_def_ph;
913 	usb_ep_descr_t		*eptd;
914 	uchar_t			attributes;
915 	uint16_t		wValue;
916 	usb_ctrl_setup_t	setup;
917 	usb_cr_t		completion_reason;
918 	usb_cb_flags_t		cb_flags;
919 	int			retry;
920 
921 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
922 	    "ehci_clear_tt_buffer: ");
923 
924 	/* Get some information about the current pipe */
925 	usba_device = ph->p_usba_device;
926 	eptd = &ph->p_ep;
927 	attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
928 
929 	/*
930 	 * Create the wIndex for this request (usb spec 11.24.2.3)
931 	 * 3..0		Endpoint Number
932 	 * 10..4	Device Address
933 	 * 12..11	Endpoint Type
934 	 * 14..13	Reserved (must be 0)
935 	 * 15		Direction 1 = IN, 0 = OUT
936 	 */
937 	wValue = 0;
938 	if ((eptd->bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
939 		wValue |= 0x8000;
940 	}
941 	wValue |= attributes << 11;
942 	wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_DEVICE_ADDRESS) << 4;
943 	wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_HIGH_SPEED) >>
944 	    EHCI_QH_CTRL_ED_NUMBER_SHIFT;
945 
946 	mutex_exit(&ehcip->ehci_int_mutex);
947 
948 	/* Manually fill in the request. */
949 	setup.bmRequestType = EHCI_CLEAR_TT_BUFFER_REQTYPE;
950 	setup.bRequest = EHCI_CLEAR_TT_BUFFER_BREQ;
951 	setup.wValue = wValue;
952 	setup.wIndex = 1;
953 	setup.wLength = 0;
954 	setup.attrs = USB_ATTRS_NONE;
955 
956 	/* Get the usba_device of the parent 2.0 hub. */
957 	mutex_enter(&usba_device->usb_mutex);
958 	hub_usba_device = usba_device->usb_hs_hub_usba_dev;
959 	mutex_exit(&usba_device->usb_mutex);
960 
961 	/* Get the default ctrl pipe for the parent 2.0 hub */
962 	mutex_enter(&hub_usba_device->usb_mutex);
963 	hub_def_ph = (usb_pipe_handle_t)&hub_usba_device->usb_ph_list[0];
964 	mutex_exit(&hub_usba_device->usb_mutex);
965 
966 	for (retry = 0; retry < 3; retry++) {
967 
968 		/* sync send the request to the default pipe */
969 		if (usb_pipe_ctrl_xfer_wait(
970 		    hub_def_ph,
971 		    &setup,
972 		    NULL,
973 		    &completion_reason, &cb_flags, 0) == USB_SUCCESS) {
974 
975 			break;
976 		}
977 
978 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
979 		    "ehci_clear_tt_buffer: Failed to clear tt buffer,"
980 		    "retry = %d, cr = %d, cb_flags = 0x%x\n",
981 		    retry, completion_reason, cb_flags);
982 	}
983 
984 	if (retry >= 3) {
985 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
986 		dev_info_t *dip = hub_usba_device->usb_dip;
987 
988 		/*
989 		 * Ask the user to hotplug the 2.0 hub, to make sure that
990 		 * all the buffer is in sync since this command has failed.
991 		 */
992 		USB_DPRINTF_L0(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
993 		    "Error recovery failure: Please hotplug the 2.0 hub at"
994 		    "%s", ddi_pathname(dip, path));
995 
996 		kmem_free(path, MAXPATHLEN);
997 	}
998 
999 	mutex_enter(&ehcip->ehci_int_mutex);
1000 }
1001 
1002 /*
1003  * ehci_halt_fls_intr_qh:
1004  *
1005  * Halts FULL/LOW speed Intr QHs.
1006  */
1007 static void
1008 ehci_halt_fls_intr_qh(
1009 	ehci_state_t		*ehcip,
1010 	ehci_qh_t		*qh)
1011 {
1012 	usb_frame_number_t	starting_frame;
1013 	usb_frame_number_t	frames_past;
1014 	uint_t			status, i;
1015 
1016 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1017 	    "ehci_halt_fls_intr_qh:");
1018 
1019 	/*
1020 	 * Ask the HC to deactivate the QH in a
1021 	 * full/low periodic QH.
1022 	 */
1023 	Set_QH(qh->qh_ctrl,
1024 	    (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_ED_INACTIVATE));
1025 
1026 	starting_frame = ehci_get_current_frame_number(ehcip);
1027 
1028 	/*
1029 	 * Wait at least EHCI_NUM_INTR_QH_LISTS+2 frame or until
1030 	 * the QH has been halted.
1031 	 */
1032 	Sync_QH_QTD_Pool(ehcip);
1033 	frames_past = 0;
1034 	status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1035 
1036 	while ((frames_past <= (EHCI_NUM_INTR_QH_LISTS + 2)) &&
1037 	    (status != 0)) {
1038 
1039 		(void) ehci_wait_for_sof(ehcip);
1040 
1041 		Sync_QH_QTD_Pool(ehcip);
1042 		status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1043 		frames_past = ehci_get_current_frame_number(ehcip) -
1044 		    starting_frame;
1045 	}
1046 
1047 	/* Modify the status bit and halt this QH. */
1048 	Sync_QH_QTD_Pool(ehcip);
1049 
1050 	status = Get_QH(qh->qh_status);
1051 
1052 	for (i = 0; i < EHCI_NUM_INTR_QH_LISTS; i++) {
1053 		Set_QH(qh->qh_status,
1054 			((Get_QH(qh->qh_status) &
1055 			~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
1056 
1057 		Sync_QH_QTD_Pool(ehcip);
1058 
1059 		(void) ehci_wait_for_sof(ehcip);
1060 		Sync_QH_QTD_Pool(ehcip);
1061 
1062 		if (Get_QH(qh->qh_status) & EHCI_QH_STS_HALTED) {
1063 
1064 			break;
1065 		}
1066 	}
1067 
1068 	Sync_QH_QTD_Pool(ehcip);
1069 
1070 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1071 	    "ehci_halt_fls_intr_qh: qh=0x%p frames past=%d, status=0x%x, 0x%x",
1072 	    qh, ehci_get_current_frame_number(ehcip) - starting_frame,
1073 	    status, Get_QH(qh->qh_status));
1074 }
1075 
1076 
1077 /*
1078  * ehci_remove_qh:
1079  *
1080  * Remove the Endpoint Descriptor (QH) from the Host Controller's appropriate
1081  * endpoint list.
1082  */
1083 void
1084 ehci_remove_qh(
1085 	ehci_state_t		*ehcip,
1086 	ehci_pipe_private_t	*pp,
1087 	boolean_t		reclaim)
1088 {
1089 	uchar_t			attributes;
1090 
1091 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1092 
1093 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1094 	    "ehci_remove_qh: qh=0x%p", pp->pp_qh);
1095 
1096 	attributes = pp->pp_pipe_handle->p_ep.bmAttributes & USB_EP_ATTR_MASK;
1097 
1098 	switch (attributes) {
1099 	case USB_EP_ATTR_CONTROL:
1100 	case USB_EP_ATTR_BULK:
1101 		ehci_remove_async_qh(ehcip, pp, reclaim);
1102 		ehcip->ehci_open_async_count--;
1103 		break;
1104 	case USB_EP_ATTR_INTR:
1105 		ehci_remove_intr_qh(ehcip, pp, reclaim);
1106 		ehcip->ehci_open_periodic_count--;
1107 		break;
1108 	case USB_EP_ATTR_ISOCH:
1109 		/* ISOCH does not use QH, don't do anything but update count */
1110 		ehcip->ehci_open_periodic_count--;
1111 		break;
1112 	}
1113 	ehci_toggle_scheduler(ehcip);
1114 }
1115 
1116 
1117 /*
1118  * ehci_remove_async_qh:
1119  *
1120  * Remove a control/bulk endpoint into the Host Controller's (HC)
1121  * Asynchronous schedule endpoint list.
1122  */
1123 static void
1124 ehci_remove_async_qh(
1125 	ehci_state_t		*ehcip,
1126 	ehci_pipe_private_t	*pp,
1127 	boolean_t		reclaim)
1128 {
1129 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1130 	ehci_qh_t		*prev_qh, *next_qh;
1131 
1132 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1133 	    "ehci_remove_async_qh:");
1134 
1135 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1136 
1137 	prev_qh = ehci_qh_iommu_to_cpu(ehcip,
1138 	    Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR);
1139 	next_qh = ehci_qh_iommu_to_cpu(ehcip,
1140 	    Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1141 
1142 	/* Make sure this QH is in the list */
1143 	ASSERT(prev_qh != NULL);
1144 
1145 	/*
1146 	 * If next QH and current QH are the same, then this is the last
1147 	 * QH on the Asynchronous Schedule list.
1148 	 */
1149 	if (qh == next_qh) {
1150 		ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1151 		/*
1152 		 * Null our pointer to the async sched list, but do not
1153 		 * touch the host controller's list_addr.
1154 		 */
1155 		ehcip->ehci_head_of_async_sched_list = NULL;
1156 		ASSERT(ehcip->ehci_open_async_count == 1);
1157 	} else {
1158 		/* If this QH is the HEAD then find another one to replace it */
1159 		if (ehcip->ehci_head_of_async_sched_list == qh) {
1160 
1161 			ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1162 			ehcip->ehci_head_of_async_sched_list = next_qh;
1163 			Set_QH(next_qh->qh_ctrl,
1164 			    Get_QH(next_qh->qh_ctrl) |
1165 			    EHCI_QH_CTRL_RECLAIM_HEAD);
1166 		}
1167 		Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1168 		Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1169 	}
1170 
1171 	/* qh_prev to indicate it is no longer in the circular list */
1172 	Set_QH(qh->qh_prev, NULL);
1173 
1174 	if (reclaim) {
1175 		ehci_insert_qh_on_reclaim_list(ehcip, pp);
1176 	}
1177 }
1178 
1179 
1180 /*
1181  * ehci_remove_intr_qh:
1182  *
1183  * Set up an interrupt endpoint to be removed from the Host Controller's (HC)
1184  * interrupt lattice tree. The Endpoint Descriptor (QH) will be freed in the
1185  * interrupt handler.
1186  */
1187 static void
1188 ehci_remove_intr_qh(
1189 	ehci_state_t		*ehcip,
1190 	ehci_pipe_private_t	*pp,
1191 	boolean_t		reclaim)
1192 {
1193 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1194 	ehci_qh_t		*prev_qh, *next_qh;
1195 
1196 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1197 	    "ehci_remove_intr_qh:");
1198 
1199 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1200 
1201 	prev_qh = ehci_qh_iommu_to_cpu(ehcip, Get_QH(qh->qh_prev));
1202 	next_qh = ehci_qh_iommu_to_cpu(ehcip,
1203 	    Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1204 
1205 	/* Make sure this QH is in the list */
1206 	ASSERT(prev_qh != NULL);
1207 
1208 	if (next_qh) {
1209 		/* Update previous qh's link pointer */
1210 		Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1211 
1212 		if (Get_QH(next_qh->qh_state) != EHCI_QH_STATIC) {
1213 			/* Set the previous pointer of the next one */
1214 			Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1215 		}
1216 	} else {
1217 		/* Update previous qh's link pointer */
1218 		Set_QH(prev_qh->qh_link_ptr,
1219 		    (Get_QH(qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
1220 	}
1221 
1222 	/* qh_prev to indicate it is no longer in the circular list */
1223 	Set_QH(qh->qh_prev, NULL);
1224 
1225 	if (reclaim) {
1226 		ehci_insert_qh_on_reclaim_list(ehcip, pp);
1227 	}
1228 }
1229 
1230 
1231 /*
1232  * ehci_insert_qh_on_reclaim_list:
1233  *
1234  * Insert Endpoint onto the reclaim list
1235  */
1236 static void
1237 ehci_insert_qh_on_reclaim_list(
1238 	ehci_state_t		*ehcip,
1239 	ehci_pipe_private_t	*pp)
1240 {
1241 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1242 	ehci_qh_t		*next_qh, *prev_qh;
1243 	usb_frame_number_t	frame_number;
1244 
1245 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1246 
1247 	/*
1248 	 * Read current usb frame number and add appropriate number of
1249 	 * usb frames needs to wait before reclaiming current endpoint.
1250 	 */
1251 	frame_number =
1252 	    ehci_get_current_frame_number(ehcip) + MAX_SOF_WAIT_COUNT;
1253 
1254 	/* Store 32-bit ID */
1255 	Set_QH(qh->qh_reclaim_frame,
1256 	    ((uint32_t)(EHCI_GET_ID((void *)(uintptr_t)frame_number))));
1257 
1258 	/* Insert the endpoint onto the reclamation list */
1259 	if (ehcip->ehci_reclaim_list) {
1260 		next_qh = ehcip->ehci_reclaim_list;
1261 
1262 		while (next_qh) {
1263 			prev_qh = next_qh;
1264 			next_qh = ehci_qh_iommu_to_cpu(ehcip,
1265 			    Get_QH(next_qh->qh_reclaim_next));
1266 		}
1267 
1268 		Set_QH(prev_qh->qh_reclaim_next,
1269 		    ehci_qh_cpu_to_iommu(ehcip, qh));
1270 	} else {
1271 		ehcip->ehci_reclaim_list = qh;
1272 	}
1273 
1274 	ASSERT(Get_QH(qh->qh_reclaim_next) == NULL);
1275 }
1276 
1277 
1278 /*
1279  * ehci_deallocate_qh:
1280  *
1281  * Deallocate a Host Controller's (HC) Endpoint Descriptor (QH).
1282  *
1283  * NOTE: This function is also called from POLLED MODE.
1284  */
1285 void
1286 ehci_deallocate_qh(
1287 	ehci_state_t	*ehcip,
1288 	ehci_qh_t	*old_qh)
1289 {
1290 	ehci_qtd_t	*first_dummy_qtd, *second_dummy_qtd;
1291 
1292 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1293 	    "ehci_deallocate_qh:");
1294 
1295 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1296 
1297 	first_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1298 	    (Get_QH(old_qh->qh_next_qtd) & EHCI_QH_NEXT_QTD_PTR));
1299 
1300 	if (first_dummy_qtd) {
1301 		ASSERT(Get_QTD(first_dummy_qtd->qtd_state) == EHCI_QTD_DUMMY);
1302 
1303 		second_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1304 		    Get_QTD(first_dummy_qtd->qtd_next_qtd));
1305 
1306 		if (second_dummy_qtd) {
1307 			ASSERT(Get_QTD(second_dummy_qtd->qtd_state) ==
1308 			    EHCI_QTD_DUMMY);
1309 
1310 			ehci_deallocate_qtd(ehcip, second_dummy_qtd);
1311 		}
1312 
1313 		ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1314 	}
1315 
1316 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1317 	    "ehci_deallocate_qh: Deallocated 0x%p", (void *)old_qh);
1318 
1319 	bzero((void *)old_qh, sizeof (ehci_qh_t));
1320 	Set_QH(old_qh->qh_state, EHCI_QH_FREE);
1321 }
1322 
1323 
1324 /*
1325  * ehci_qh_cpu_to_iommu:
1326  *
1327  * This function converts for the given Endpoint Descriptor (QH) CPU address
1328  * to IO address.
1329  *
1330  * NOTE: This function is also called from POLLED MODE.
1331  */
1332 uint32_t
1333 ehci_qh_cpu_to_iommu(
1334 	ehci_state_t	*ehcip,
1335 	ehci_qh_t	*addr)
1336 {
1337 	uint32_t	qh;
1338 
1339 	qh = (uint32_t)ehcip->ehci_qh_pool_cookie.dmac_address +
1340 	    (uint32_t)((uintptr_t)addr - (uintptr_t)(ehcip->ehci_qh_pool_addr));
1341 
1342 	ASSERT(qh >= ehcip->ehci_qh_pool_cookie.dmac_address);
1343 	ASSERT(qh <= ehcip->ehci_qh_pool_cookie.dmac_address +
1344 	    sizeof (ehci_qh_t) * ehci_qh_pool_size);
1345 
1346 	return (qh);
1347 }
1348 
1349 
1350 /*
1351  * ehci_qh_iommu_to_cpu:
1352  *
1353  * This function converts for the given Endpoint Descriptor (QH) IO address
1354  * to CPU address.
1355  */
1356 ehci_qh_t *
1357 ehci_qh_iommu_to_cpu(
1358 	ehci_state_t	*ehcip,
1359 	uintptr_t	addr)
1360 {
1361 	ehci_qh_t	*qh;
1362 
1363 	if (addr == NULL) {
1364 
1365 		return (NULL);
1366 	}
1367 
1368 	qh = (ehci_qh_t *)((uintptr_t)
1369 	    (addr - ehcip->ehci_qh_pool_cookie.dmac_address) +
1370 	    (uintptr_t)ehcip->ehci_qh_pool_addr);
1371 
1372 	ASSERT(qh >= ehcip->ehci_qh_pool_addr);
1373 	ASSERT((uintptr_t)qh <= (uintptr_t)ehcip->ehci_qh_pool_addr +
1374 	    (uintptr_t)(sizeof (ehci_qh_t) * ehci_qh_pool_size));
1375 
1376 	return (qh);
1377 }
1378 
1379 
1380 /*
1381  * Transfer Descriptor manipulations functions
1382  */
1383 
1384 /*
1385  * ehci_initialize_dummy:
1386  *
1387  * An Endpoint Descriptor (QH) has a  dummy Transfer Descriptor (QTD) on the
1388  * end of its QTD list. Initially, both the head and tail pointers of the QH
1389  * point to the dummy QTD.
1390  */
1391 static int
1392 ehci_initialize_dummy(
1393 	ehci_state_t	*ehcip,
1394 	ehci_qh_t	*qh)
1395 {
1396 	ehci_qtd_t	*first_dummy_qtd, *second_dummy_qtd;
1397 
1398 	/* Allocate first dummy QTD */
1399 	first_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1400 
1401 	if (first_dummy_qtd == NULL) {
1402 		return (USB_NO_RESOURCES);
1403 	}
1404 
1405 	/* Allocate second dummy QTD */
1406 	second_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1407 
1408 	if (second_dummy_qtd == NULL) {
1409 		/* Deallocate first dummy QTD */
1410 		ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1411 
1412 		return (USB_NO_RESOURCES);
1413 	}
1414 
1415 	/* Next QTD pointer of an QH point to this new dummy QTD */
1416 	Set_QH(qh->qh_next_qtd, ehci_qtd_cpu_to_iommu(ehcip,
1417 	    first_dummy_qtd) & EHCI_QH_NEXT_QTD_PTR);
1418 
1419 	/* Set qh's dummy qtd field */
1420 	Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, first_dummy_qtd));
1421 
1422 	/* Set first_dummy's next qtd pointer */
1423 	Set_QTD(first_dummy_qtd->qtd_next_qtd,
1424 	    ehci_qtd_cpu_to_iommu(ehcip, second_dummy_qtd));
1425 
1426 	return (USB_SUCCESS);
1427 }
1428 
1429 /*
1430  * ehci_allocate_ctrl_resources:
1431  *
1432  * Calculates the number of tds necessary for a ctrl transfer, and allocates
1433  * all the resources necessary.
1434  *
1435  * Returns NULL if there is insufficient resources otherwise TW.
1436  */
1437 ehci_trans_wrapper_t *
1438 ehci_allocate_ctrl_resources(
1439 	ehci_state_t		*ehcip,
1440 	ehci_pipe_private_t	*pp,
1441 	usb_ctrl_req_t		*ctrl_reqp,
1442 	usb_flags_t		usb_flags)
1443 {
1444 	size_t			qtd_count = 2;
1445 	size_t			ctrl_buf_size;
1446 	ehci_trans_wrapper_t	*tw;
1447 
1448 	/* Add one more td for data phase */
1449 	if (ctrl_reqp->ctrl_wLength) {
1450 		qtd_count += 1;
1451 	}
1452 
1453 	/*
1454 	 * If we have a control data phase, the data buffer starts
1455 	 * on the next 4K page boundary. So the TW buffer is allocated
1456 	 * to be larger than required. The buffer in the range of
1457 	 * [SETUP_SIZE, EHCI_MAX_QTD_BUF_SIZE) is just for padding
1458 	 * and not to be transferred.
1459 	 */
1460 	if (ctrl_reqp->ctrl_wLength) {
1461 		ctrl_buf_size = EHCI_MAX_QTD_BUF_SIZE +
1462 		    ctrl_reqp->ctrl_wLength;
1463 	} else {
1464 		ctrl_buf_size = SETUP_SIZE;
1465 	}
1466 
1467 	tw = ehci_allocate_tw_resources(ehcip, pp, ctrl_buf_size,
1468 	    usb_flags, qtd_count);
1469 
1470 	return (tw);
1471 }
1472 
1473 /*
1474  * ehci_insert_ctrl_req:
1475  *
1476  * Create a Transfer Descriptor (QTD) and a data buffer for a control endpoint.
1477  */
1478 /* ARGSUSED */
1479 void
1480 ehci_insert_ctrl_req(
1481 	ehci_state_t		*ehcip,
1482 	usba_pipe_handle_data_t	*ph,
1483 	usb_ctrl_req_t		*ctrl_reqp,
1484 	ehci_trans_wrapper_t	*tw,
1485 	usb_flags_t		usb_flags)
1486 {
1487 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1488 	uchar_t			bmRequestType = ctrl_reqp->ctrl_bmRequestType;
1489 	uchar_t			bRequest = ctrl_reqp->ctrl_bRequest;
1490 	uint16_t		wValue = ctrl_reqp->ctrl_wValue;
1491 	uint16_t		wIndex = ctrl_reqp->ctrl_wIndex;
1492 	uint16_t		wLength = ctrl_reqp->ctrl_wLength;
1493 	mblk_t			*data = ctrl_reqp->ctrl_data;
1494 	uint32_t		ctrl = 0;
1495 	uint8_t			setup_packet[8];
1496 
1497 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1498 	    "ehci_insert_ctrl_req:");
1499 
1500 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1501 
1502 	/*
1503 	 * Save current control request pointer and timeout values
1504 	 * in transfer wrapper.
1505 	 */
1506 	tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp;
1507 	tw->tw_timeout = ctrl_reqp->ctrl_timeout ?
1508 	    ctrl_reqp->ctrl_timeout : EHCI_DEFAULT_XFER_TIMEOUT;
1509 
1510 	/*
1511 	 * Initialize the callback and any callback data for when
1512 	 * the qtd completes.
1513 	 */
1514 	tw->tw_handle_qtd = ehci_handle_ctrl_qtd;
1515 	tw->tw_handle_callback_value = NULL;
1516 
1517 	/*
1518 	 * swap the setup bytes where necessary since we specified
1519 	 * NEVERSWAP
1520 	 */
1521 	setup_packet[0] = bmRequestType;
1522 	setup_packet[1] = bRequest;
1523 	setup_packet[2] = wValue;
1524 	setup_packet[3] = wValue >> 8;
1525 	setup_packet[4] = wIndex;
1526 	setup_packet[5] = wIndex >> 8;
1527 	setup_packet[6] = wLength;
1528 	setup_packet[7] = wLength >> 8;
1529 
1530 	bcopy(setup_packet, tw->tw_buf, SETUP_SIZE);
1531 
1532 	Sync_IO_Buffer_for_device(tw->tw_dmahandle, SETUP_SIZE);
1533 
1534 	ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_0 | EHCI_QTD_CTRL_SETUP_PID);
1535 
1536 	/*
1537 	 * The QTD's are placed on the QH one at a time.
1538 	 * Once this QTD is placed on the done list, the
1539 	 * data or status phase QTD will be enqueued.
1540 	 */
1541 	(void) ehci_insert_qtd(ehcip, ctrl, 0, SETUP_SIZE,
1542 	    EHCI_CTRL_SETUP_PHASE, pp, tw);
1543 
1544 	USB_DPRINTF_L3(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1545 	    "ehci_insert_ctrl_req: pp 0x%p", (void *)pp);
1546 
1547 	/*
1548 	 * If this control transfer has a data phase, record the
1549 	 * direction. If the data phase is an OUT transaction,
1550 	 * copy the data into the buffer of the transfer wrapper.
1551 	 */
1552 	if (wLength != 0) {
1553 		/* There is a data stage.  Find the direction */
1554 		if (bmRequestType & USB_DEV_REQ_DEV_TO_HOST) {
1555 			tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
1556 		} else {
1557 			tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
1558 
1559 			/* Copy the data into the message */
1560 			bcopy(data->b_rptr, tw->tw_buf + EHCI_MAX_QTD_BUF_SIZE,
1561 				wLength);
1562 
1563 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1564 				wLength + EHCI_MAX_QTD_BUF_SIZE);
1565 		}
1566 
1567 		ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 | tw->tw_direction);
1568 
1569 		/*
1570 		 * Create the QTD.  If this is an OUT transaction,
1571 		 * the data is already in the buffer of the TW.
1572 		 * The transfer should start from EHCI_MAX_QTD_BUF_SIZE
1573 		 * which is 4K aligned, though the ctrl phase only
1574 		 * transfers a length of SETUP_SIZE. The padding data
1575 		 * in the TW buffer are discarded.
1576 		 */
1577 		(void) ehci_insert_qtd(ehcip, ctrl, EHCI_MAX_QTD_BUF_SIZE,
1578 		    tw->tw_length - EHCI_MAX_QTD_BUF_SIZE,
1579 		    EHCI_CTRL_DATA_PHASE, pp, tw);
1580 
1581 		/*
1582 		 * The direction of the STATUS QTD depends  on
1583 		 * the direction of the transfer.
1584 		 */
1585 		if (tw->tw_direction == EHCI_QTD_CTRL_IN_PID) {
1586 			ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1587 			    EHCI_QTD_CTRL_OUT_PID |
1588 			    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1589 		} else {
1590 			ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1591 			    EHCI_QTD_CTRL_IN_PID |
1592 			    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1593 		}
1594 	} else {
1595 		/*
1596 		 * There is no data stage,  then initiate
1597 		 * status phase from the host.
1598 		 */
1599 		ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 |
1600 		    EHCI_QTD_CTRL_IN_PID |
1601 		    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1602 	}
1603 
1604 
1605 	(void) ehci_insert_qtd(ehcip, ctrl, 0, 0,
1606 	    EHCI_CTRL_STATUS_PHASE, pp,  tw);
1607 
1608 	/* Start the timer for this control transfer */
1609 	ehci_start_xfer_timer(ehcip, pp, tw);
1610 }
1611 
1612 
1613 /*
1614  * ehci_allocate_bulk_resources:
1615  *
1616  * Calculates the number of tds necessary for a ctrl transfer, and allocates
1617  * all the resources necessary.
1618  *
1619  * Returns NULL if there is insufficient resources otherwise TW.
1620  */
1621 ehci_trans_wrapper_t *
1622 ehci_allocate_bulk_resources(
1623 	ehci_state_t		*ehcip,
1624 	ehci_pipe_private_t	*pp,
1625 	usb_bulk_req_t		*bulk_reqp,
1626 	usb_flags_t		usb_flags)
1627 {
1628 	size_t			qtd_count = 0;
1629 	ehci_trans_wrapper_t	*tw;
1630 
1631 	/* Check the size of bulk request */
1632 	if (bulk_reqp->bulk_len > EHCI_MAX_BULK_XFER_SIZE) {
1633 
1634 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1635 		    "ehci_allocate_bulk_resources: Bulk request size 0x%x is "
1636 		    "more than 0x%x", bulk_reqp->bulk_len,
1637 		    EHCI_MAX_BULK_XFER_SIZE);
1638 
1639 		return (NULL);
1640 	}
1641 
1642 	/* Get the required bulk packet size */
1643 	qtd_count = bulk_reqp->bulk_len / EHCI_MAX_QTD_XFER_SIZE;
1644 	if (bulk_reqp->bulk_len % EHCI_MAX_QTD_XFER_SIZE) {
1645 		qtd_count += 1;
1646 	}
1647 
1648 	tw = ehci_allocate_tw_resources(ehcip, pp, bulk_reqp->bulk_len,
1649 	    usb_flags, qtd_count);
1650 
1651 	return (tw);
1652 }
1653 
1654 /*
1655  * ehci_insert_bulk_req:
1656  *
1657  * Create a Transfer Descriptor (QTD) and a data buffer for a bulk
1658  * endpoint.
1659  */
1660 /* ARGSUSED */
1661 void
1662 ehci_insert_bulk_req(
1663 	ehci_state_t		*ehcip,
1664 	usba_pipe_handle_data_t	*ph,
1665 	usb_bulk_req_t		*bulk_reqp,
1666 	ehci_trans_wrapper_t	*tw,
1667 	usb_flags_t		flags)
1668 {
1669 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1670 	uint_t			bulk_pkt_size, count;
1671 	size_t			residue = 0, len = 0;
1672 	uint32_t		ctrl = 0;
1673 	int			pipe_dir;
1674 
1675 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1676 	    "ehci_insert_bulk_req: bulk_reqp = 0x%p flags = 0x%x",
1677 	    bulk_reqp, flags);
1678 
1679 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1680 
1681 	/* Get the bulk pipe direction */
1682 	pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
1683 
1684 	/* Get the required bulk packet size */
1685 	bulk_pkt_size = min(bulk_reqp->bulk_len, EHCI_MAX_QTD_XFER_SIZE);
1686 
1687 	residue = tw->tw_length % bulk_pkt_size;
1688 
1689 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1690 	    "ehci_insert_bulk_req: bulk_pkt_size = %d", bulk_pkt_size);
1691 
1692 	/*
1693 	 * Save current bulk request pointer and timeout values
1694 	 * in transfer wrapper.
1695 	 */
1696 	tw->tw_curr_xfer_reqp = (usb_opaque_t)bulk_reqp;
1697 	tw->tw_timeout = bulk_reqp->bulk_timeout;
1698 
1699 	/*
1700 	 * Initialize the callback and any callback
1701 	 * data required when the qtd completes.
1702 	 */
1703 	tw->tw_handle_qtd = ehci_handle_bulk_qtd;
1704 	tw->tw_handle_callback_value = NULL;
1705 
1706 	tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ?
1707 	    EHCI_QTD_CTRL_OUT_PID : EHCI_QTD_CTRL_IN_PID;
1708 
1709 	if (tw->tw_direction == EHCI_QTD_CTRL_OUT_PID) {
1710 
1711 		ASSERT(bulk_reqp->bulk_data != NULL);
1712 
1713 		bcopy(bulk_reqp->bulk_data->b_rptr, tw->tw_buf,
1714 			bulk_reqp->bulk_len);
1715 
1716 		Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1717 			bulk_reqp->bulk_len);
1718 	}
1719 
1720 	ctrl = tw->tw_direction;
1721 
1722 	/* Insert all the bulk QTDs */
1723 	for (count = 0; count < tw->tw_num_qtds; count++) {
1724 
1725 		/* Check for last qtd */
1726 		if (count == (tw->tw_num_qtds - 1)) {
1727 
1728 			ctrl |= EHCI_QTD_CTRL_INTR_ON_COMPLETE;
1729 
1730 			/* Check for inserting residue data */
1731 			if (residue) {
1732 				bulk_pkt_size = residue;
1733 			}
1734 		}
1735 
1736 		/* Insert the QTD onto the endpoint */
1737 		(void) ehci_insert_qtd(ehcip, ctrl, len, bulk_pkt_size,
1738 		    0, pp, tw);
1739 
1740 		len = len + bulk_pkt_size;
1741 	}
1742 
1743 	/* Start the timer for this bulk transfer */
1744 	ehci_start_xfer_timer(ehcip, pp, tw);
1745 }
1746 
1747 
1748 /*
1749  * ehci_start_periodic_pipe_polling:
1750  *
1751  * NOTE: This function is also called from POLLED MODE.
1752  */
1753 int
1754 ehci_start_periodic_pipe_polling(
1755 	ehci_state_t		*ehcip,
1756 	usba_pipe_handle_data_t	*ph,
1757 	usb_opaque_t		periodic_in_reqp,
1758 	usb_flags_t		flags)
1759 {
1760 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1761 	usb_ep_descr_t		*eptd = &ph->p_ep;
1762 	int			error = USB_SUCCESS;
1763 
1764 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
1765 	    "ehci_start_periodic_pipe_polling: ep%d",
1766 	    ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK);
1767 
1768 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1769 
1770 	/*
1771 	 * Check and handle start polling on root hub interrupt pipe.
1772 	 */
1773 	if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
1774 	    ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
1775 	    USB_EP_ATTR_INTR)) {
1776 
1777 		error = ehci_handle_root_hub_pipe_start_intr_polling(ph,
1778 		    (usb_intr_req_t *)periodic_in_reqp, flags);
1779 
1780 		return (error);
1781 	}
1782 
1783 	switch (pp->pp_state) {
1784 	case EHCI_PIPE_STATE_IDLE:
1785 		/* Save the Original client's Periodic IN request */
1786 		pp->pp_client_periodic_in_reqp = periodic_in_reqp;
1787 
1788 		/*
1789 		 * This pipe is uninitialized or if a valid QTD is
1790 		 * not found then insert a QTD on the interrupt IN
1791 		 * endpoint.
1792 		 */
1793 		error = ehci_start_pipe_polling(ehcip, ph, flags);
1794 
1795 		if (error != USB_SUCCESS) {
1796 			USB_DPRINTF_L2(PRINT_MASK_INTR,
1797 			    ehcip->ehci_log_hdl,
1798 			    "ehci_start_periodic_pipe_polling: "
1799 			    "Start polling failed");
1800 
1801 			pp->pp_client_periodic_in_reqp = NULL;
1802 
1803 			return (error);
1804 		}
1805 
1806 		USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
1807 		    "ehci_start_periodic_pipe_polling: PP = 0x%p", pp);
1808 
1809 #ifdef DEBUG
1810 		switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1811 		case USB_EP_ATTR_INTR:
1812 			ASSERT((pp->pp_tw_head != NULL) &&
1813 			    (pp->pp_tw_tail != NULL));
1814 			break;
1815 		case USB_EP_ATTR_ISOCH:
1816 			ASSERT((pp->pp_itw_head != NULL) &&
1817 			    (pp->pp_itw_tail != NULL));
1818 			break;
1819 		}
1820 #endif
1821 
1822 		break;
1823 	case EHCI_PIPE_STATE_ACTIVE:
1824 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1825 		    ehcip->ehci_log_hdl,
1826 		    "ehci_start_periodic_pipe_polling: "
1827 		    "Polling is already in progress");
1828 
1829 		error = USB_FAILURE;
1830 		break;
1831 	case EHCI_PIPE_STATE_ERROR:
1832 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1833 		    ehcip->ehci_log_hdl,
1834 		    "ehci_start_periodic_pipe_polling: "
1835 		    "Pipe is halted and perform reset"
1836 		    "before restart polling");
1837 
1838 		error = USB_FAILURE;
1839 		break;
1840 	default:
1841 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1842 		    ehcip->ehci_log_hdl,
1843 		    "ehci_start_periodic_pipe_polling: "
1844 		    "Undefined state");
1845 
1846 		error = USB_FAILURE;
1847 		break;
1848 	}
1849 
1850 	return (error);
1851 }
1852 
1853 
1854 /*
1855  * ehci_start_pipe_polling:
1856  *
1857  * Insert the number of periodic requests corresponding to polling
1858  * interval as calculated during pipe open.
1859  */
1860 static int
1861 ehci_start_pipe_polling(
1862 	ehci_state_t		*ehcip,
1863 	usba_pipe_handle_data_t	*ph,
1864 	usb_flags_t		flags)
1865 {
1866 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1867 	usb_ep_descr_t		*eptd = &ph->p_ep;
1868 	int			error = USB_FAILURE;
1869 
1870 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1871 	    "ehci_start_pipe_polling:");
1872 
1873 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1874 
1875 	/*
1876 	 * For the start polling, pp_max_periodic_req_cnt will be zero
1877 	 * and for the restart polling request, it will be non zero.
1878 	 *
1879 	 * In case of start polling request, find out number of requests
1880 	 * required for the Interrupt IN endpoints corresponding to the
1881 	 * endpoint polling interval. For Isochronous IN endpoints, it is
1882 	 * always fixed since its polling interval will be one ms.
1883 	 */
1884 	if (pp->pp_max_periodic_req_cnt == 0) {
1885 
1886 		ehci_set_periodic_pipe_polling(ehcip, ph);
1887 	}
1888 
1889 	ASSERT(pp->pp_max_periodic_req_cnt != 0);
1890 
1891 	switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1892 	case USB_EP_ATTR_INTR:
1893 		error = ehci_start_intr_polling(ehcip, ph, flags);
1894 		break;
1895 	case USB_EP_ATTR_ISOCH:
1896 		error = ehci_start_isoc_polling(ehcip, ph, flags);
1897 		break;
1898 	}
1899 
1900 	return (error);
1901 }
1902 
1903 static int
1904 ehci_start_intr_polling(
1905 	ehci_state_t		*ehcip,
1906 	usba_pipe_handle_data_t	*ph,
1907 	usb_flags_t		flags)
1908 {
1909 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1910 	ehci_trans_wrapper_t	*tw_list, *tw;
1911 	int			i, total_tws;
1912 	int			error = USB_SUCCESS;
1913 
1914 	/* Allocate all the necessary resources for the IN transfer */
1915 	tw_list = NULL;
1916 	total_tws = pp->pp_max_periodic_req_cnt - pp->pp_cur_periodic_req_cnt;
1917 	for (i = 0; i < total_tws; i += 1) {
1918 		tw = ehci_allocate_intr_resources(ehcip, ph, NULL, flags);
1919 		if (tw == NULL) {
1920 			error = USB_NO_RESOURCES;
1921 			/* There are not enough resources, deallocate the TWs */
1922 			tw = tw_list;
1923 			while (tw != NULL) {
1924 				tw_list = tw->tw_next;
1925 				ehci_deallocate_intr_in_resource(
1926 					ehcip, pp, tw);
1927 				ehci_deallocate_tw(ehcip, pp, tw);
1928 				tw = tw_list;
1929 			}
1930 
1931 			return (error);
1932 		} else {
1933 			if (tw_list == NULL) {
1934 				tw_list = tw;
1935 			}
1936 		}
1937 	}
1938 
1939 	while (pp->pp_cur_periodic_req_cnt < pp->pp_max_periodic_req_cnt) {
1940 
1941 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1942 		    "ehci_start_pipe_polling: max = %d curr = %d tw = %p:",
1943 		    pp->pp_max_periodic_req_cnt, pp->pp_cur_periodic_req_cnt,
1944 		    tw_list);
1945 
1946 		tw = tw_list;
1947 		tw_list = tw->tw_next;
1948 
1949 		ehci_insert_intr_req(ehcip, pp, tw, flags);
1950 
1951 		pp->pp_cur_periodic_req_cnt++;
1952 	}
1953 
1954 	return (error);
1955 }
1956 
1957 
1958 /*
1959  * ehci_set_periodic_pipe_polling:
1960  *
1961  * Calculate the number of periodic requests needed corresponding to the
1962  * interrupt IN endpoints polling interval. Table below gives the number
1963  * of periodic requests needed for the interrupt IN endpoints  according
1964  * to endpoint polling interval.
1965  *
1966  * Polling interval		Number of periodic requests
1967  *
1968  * 1ms				4
1969  * 2ms				2
1970  * 4ms to 32ms			1
1971  */
1972 static void
1973 ehci_set_periodic_pipe_polling(
1974 	ehci_state_t		*ehcip,
1975 	usba_pipe_handle_data_t	*ph)
1976 {
1977 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1978 	usb_ep_descr_t		*endpoint = &ph->p_ep;
1979 	uchar_t			ep_attr = endpoint->bmAttributes;
1980 	uint_t			interval;
1981 
1982 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1983 	    "ehci_set_periodic_pipe_polling:");
1984 
1985 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1986 
1987 	pp->pp_cur_periodic_req_cnt = 0;
1988 
1989 	/*
1990 	 * Check usb flag whether USB_FLAGS_ONE_TIME_POLL flag is
1991 	 * set and if so, set pp->pp_max_periodic_req_cnt to one.
1992 	 */
1993 	if (((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) &&
1994 	    (pp->pp_client_periodic_in_reqp)) {
1995 		usb_intr_req_t *intr_reqp = (usb_intr_req_t *)
1996 					pp->pp_client_periodic_in_reqp;
1997 
1998 		if (intr_reqp->intr_attributes &
1999 		    USB_ATTRS_ONE_XFER) {
2000 
2001 			pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2002 
2003 			return;
2004 		}
2005 	}
2006 
2007 	mutex_enter(&ph->p_usba_device->usb_mutex);
2008 
2009 	/*
2010 	 * The ehci_adjust_polling_interval function will not fail
2011 	 * at this instance since bandwidth allocation is already
2012 	 * done. Here we are getting only the periodic interval.
2013 	 */
2014 	interval = ehci_adjust_polling_interval(ehcip, endpoint,
2015 		ph->p_usba_device->usb_port_status);
2016 
2017 	mutex_exit(&ph->p_usba_device->usb_mutex);
2018 
2019 	switch (interval) {
2020 	case EHCI_INTR_1MS_POLL:
2021 		pp->pp_max_periodic_req_cnt = EHCI_INTR_1MS_REQS;
2022 		break;
2023 	case EHCI_INTR_2MS_POLL:
2024 		pp->pp_max_periodic_req_cnt = EHCI_INTR_2MS_REQS;
2025 		break;
2026 	default:
2027 		pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2028 		break;
2029 	}
2030 
2031 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2032 	    "ehci_set_periodic_pipe_polling: Max periodic requests = %d",
2033 	    pp->pp_max_periodic_req_cnt);
2034 }
2035 
2036 /*
2037  * ehci_allocate_intr_resources:
2038  *
2039  * Calculates the number of tds necessary for a intr transfer, and allocates
2040  * all the necessary resources.
2041  *
2042  * Returns NULL if there is insufficient resources otherwise TW.
2043  */
2044 ehci_trans_wrapper_t *
2045 ehci_allocate_intr_resources(
2046 	ehci_state_t		*ehcip,
2047 	usba_pipe_handle_data_t	*ph,
2048 	usb_intr_req_t		*intr_reqp,
2049 	usb_flags_t		flags)
2050 {
2051 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2052 	int			pipe_dir;
2053 	size_t			qtd_count = 1;
2054 	size_t			tw_length;
2055 	ehci_trans_wrapper_t	*tw;
2056 
2057 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2058 	    "ehci_allocate_intr_resources:");
2059 
2060 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2061 
2062 	pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
2063 
2064 	/* Get the length of interrupt transfer & alloc data */
2065 	if (intr_reqp) {
2066 		tw_length = intr_reqp->intr_len;
2067 	} else {
2068 		ASSERT(pipe_dir == USB_EP_DIR_IN);
2069 		tw_length = (pp->pp_client_periodic_in_reqp) ?
2070 		    (((usb_intr_req_t *)pp->
2071 		    pp_client_periodic_in_reqp)->intr_len) :
2072 		    ph->p_ep.wMaxPacketSize;
2073 	}
2074 
2075 	/* Check the size of interrupt request */
2076 	if (tw_length > EHCI_MAX_QTD_XFER_SIZE) {
2077 
2078 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2079 		    "ehci_allocate_intr_resources: Intr request size 0x%lx is "
2080 		    "more than 0x%x", tw_length, EHCI_MAX_QTD_XFER_SIZE);
2081 
2082 		return (NULL);
2083 	}
2084 
2085 	if ((tw = ehci_allocate_tw_resources(ehcip, pp, tw_length, flags,
2086 	    qtd_count)) == NULL) {
2087 
2088 		return (NULL);
2089 	}
2090 
2091 	if (pipe_dir == USB_EP_DIR_IN) {
2092 		if (ehci_allocate_intr_in_resource(ehcip, pp, tw, flags) !=
2093 		    USB_SUCCESS) {
2094 			ehci_deallocate_tw(ehcip, pp, tw);
2095 		}
2096 		tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
2097 	} else {
2098 		ASSERT(intr_reqp->intr_data != NULL);
2099 
2100 		/* Copy the data into the buffer */
2101 		bcopy(intr_reqp->intr_data->b_rptr, tw->tw_buf,
2102 		    intr_reqp->intr_len);
2103 
2104 		Sync_IO_Buffer_for_device(tw->tw_dmahandle,
2105 		    intr_reqp->intr_len);
2106 
2107 		tw->tw_curr_xfer_reqp = (usb_opaque_t)intr_reqp;
2108 		tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
2109 	}
2110 
2111 	if (intr_reqp) {
2112 		tw->tw_timeout = intr_reqp->intr_timeout;
2113 	}
2114 
2115 	/*
2116 	 * Initialize the callback and any callback
2117 	 * data required when the qtd completes.
2118 	 */
2119 	tw->tw_handle_qtd = ehci_handle_intr_qtd;
2120 	tw->tw_handle_callback_value = NULL;
2121 
2122 	return (tw);
2123 }
2124 
2125 
2126 /*
2127  * ehci_insert_intr_req:
2128  *
2129  * Insert an Interrupt request into the Host Controller's periodic list.
2130  */
2131 /* ARGSUSED */
2132 void
2133 ehci_insert_intr_req(
2134 	ehci_state_t		*ehcip,
2135 	ehci_pipe_private_t	*pp,
2136 	ehci_trans_wrapper_t	*tw,
2137 	usb_flags_t		flags)
2138 {
2139 	uint_t			ctrl = 0;
2140 
2141 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2142 
2143 	ASSERT(tw->tw_curr_xfer_reqp != NULL);
2144 
2145 	ctrl = (tw->tw_direction | EHCI_QTD_CTRL_INTR_ON_COMPLETE);
2146 
2147 	/* Insert another interrupt QTD */
2148 	(void) ehci_insert_qtd(ehcip, ctrl, 0, tw->tw_length, 0, pp, tw);
2149 
2150 	/* Start the timer for this Interrupt transfer */
2151 	ehci_start_xfer_timer(ehcip, pp, tw);
2152 }
2153 
2154 
2155 /*
2156  * ehci_stop_periodic_pipe_polling:
2157  */
2158 /* ARGSUSED */
2159 int
2160 ehci_stop_periodic_pipe_polling(
2161 	ehci_state_t		*ehcip,
2162 	usba_pipe_handle_data_t	*ph,
2163 	usb_flags_t		flags)
2164 {
2165 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2166 	usb_ep_descr_t		*eptd = &ph->p_ep;
2167 
2168 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2169 	    "ehci_stop_periodic_pipe_polling: Flags = 0x%x", flags);
2170 
2171 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2172 
2173 	/*
2174 	 * Check and handle stop polling on root hub interrupt pipe.
2175 	 */
2176 	if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
2177 	    ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
2178 	    USB_EP_ATTR_INTR)) {
2179 
2180 		ehci_handle_root_hub_pipe_stop_intr_polling(ph, flags);
2181 
2182 		return (USB_SUCCESS);
2183 	}
2184 
2185 	if (pp->pp_state != EHCI_PIPE_STATE_ACTIVE) {
2186 
2187 		USB_DPRINTF_L2(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2188 		    "ehci_stop_periodic_pipe_polling: "
2189 		    "Polling already stopped");
2190 
2191 		return (USB_SUCCESS);
2192 	}
2193 
2194 	/* Set pipe state to pipe stop polling */
2195 	pp->pp_state = EHCI_PIPE_STATE_STOP_POLLING;
2196 
2197 	ehci_pipe_cleanup(ehcip, ph);
2198 
2199 	return (USB_SUCCESS);
2200 }
2201 
2202 
2203 /*
2204  * ehci_insert_qtd:
2205  *
2206  * Insert a Transfer Descriptor (QTD) on an Endpoint Descriptor (QH).
2207  * Always returns USB_SUCCESS for now.	Once Isoch has been implemented,
2208  * it may return USB_FAILURE.
2209  */
2210 int
2211 ehci_insert_qtd(
2212 	ehci_state_t		*ehcip,
2213 	uint32_t		qtd_ctrl,
2214 	size_t			qtd_dma_offs,
2215 	size_t			qtd_length,
2216 	uint32_t		qtd_ctrl_phase,
2217 	ehci_pipe_private_t	*pp,
2218 	ehci_trans_wrapper_t	*tw)
2219 {
2220 	ehci_qtd_t		*curr_dummy_qtd, *next_dummy_qtd;
2221 	ehci_qtd_t		*new_dummy_qtd;
2222 	ehci_qh_t		*qh = pp->pp_qh;
2223 	int			error = USB_SUCCESS;
2224 
2225 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2226 
2227 	/* Allocate new dummy QTD */
2228 	new_dummy_qtd = tw->tw_qtd_free_list;
2229 
2230 	ASSERT(new_dummy_qtd != NULL);
2231 	tw->tw_qtd_free_list = ehci_qtd_iommu_to_cpu(ehcip,
2232 	    Get_QTD(new_dummy_qtd->qtd_tw_next_qtd));
2233 	Set_QTD(new_dummy_qtd->qtd_tw_next_qtd, NULL);
2234 
2235 	/* Get the current and next dummy QTDs */
2236 	curr_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2237 	    Get_QH(qh->qh_dummy_qtd));
2238 	next_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2239 	    Get_QTD(curr_dummy_qtd->qtd_next_qtd));
2240 
2241 	/* Update QH's dummy qtd field */
2242 	Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, next_dummy_qtd));
2243 
2244 	/* Update next dummy's next qtd pointer */
2245 	Set_QTD(next_dummy_qtd->qtd_next_qtd,
2246 	    ehci_qtd_cpu_to_iommu(ehcip, new_dummy_qtd));
2247 
2248 	/*
2249 	 * Fill in the current dummy qtd and
2250 	 * add the new dummy to the end.
2251 	 */
2252 	ehci_fill_in_qtd(ehcip, curr_dummy_qtd, qtd_ctrl,
2253 	    qtd_dma_offs, qtd_length, qtd_ctrl_phase, pp, tw);
2254 
2255 	/* Insert this qtd onto the tw */
2256 	ehci_insert_qtd_on_tw(ehcip, tw, curr_dummy_qtd);
2257 
2258 	/*
2259 	 * Insert this qtd onto active qtd list.
2260 	 * Don't insert polled mode qtd here.
2261 	 */
2262 	if (pp->pp_flag != EHCI_POLLED_MODE_FLAG) {
2263 		/* Insert this qtd onto active qtd list */
2264 		ehci_insert_qtd_into_active_qtd_list(ehcip, curr_dummy_qtd);
2265 	}
2266 
2267 	/* Print qh and qtd */
2268 	ehci_print_qh(ehcip, qh);
2269 	ehci_print_qtd(ehcip, curr_dummy_qtd);
2270 
2271 	return (error);
2272 }
2273 
2274 
2275 /*
2276  * ehci_allocate_qtd_from_pool:
2277  *
2278  * Allocate a Transfer Descriptor (QTD) from the QTD buffer pool.
2279  */
2280 static ehci_qtd_t *
2281 ehci_allocate_qtd_from_pool(ehci_state_t	*ehcip)
2282 {
2283 	int		i, ctrl;
2284 	ehci_qtd_t	*qtd;
2285 
2286 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2287 
2288 	/*
2289 	 * Search for a blank Transfer Descriptor (QTD)
2290 	 * in the QTD buffer pool.
2291 	 */
2292 	for (i = 0; i < ehci_qtd_pool_size; i ++) {
2293 		ctrl = Get_QTD(ehcip->ehci_qtd_pool_addr[i].qtd_state);
2294 		if (ctrl == EHCI_QTD_FREE) {
2295 			break;
2296 		}
2297 	}
2298 
2299 	if (i >= ehci_qtd_pool_size) {
2300 		USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2301 		    "ehci_allocate_qtd_from_pool: QTD exhausted");
2302 
2303 		return (NULL);
2304 	}
2305 
2306 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2307 	    "ehci_allocate_qtd_from_pool: Allocated %d", i);
2308 
2309 	/* Create a new dummy for the end of the QTD list */
2310 	qtd = &ehcip->ehci_qtd_pool_addr[i];
2311 
2312 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2313 	    "ehci_allocate_qtd_from_pool: qtd 0x%p", (void *)qtd);
2314 
2315 	/* Mark the newly allocated QTD as a dummy */
2316 	Set_QTD(qtd->qtd_state, EHCI_QTD_DUMMY);
2317 
2318 	/* Mark the status of this new QTD to halted state */
2319 	Set_QTD(qtd->qtd_ctrl, EHCI_QTD_CTRL_HALTED_XACT);
2320 
2321 	/* Disable dummy QTD's next and alternate next pointers */
2322 	Set_QTD(qtd->qtd_next_qtd, EHCI_QTD_NEXT_QTD_PTR_VALID);
2323 	Set_QTD(qtd->qtd_alt_next_qtd, EHCI_QTD_ALT_NEXT_QTD_PTR_VALID);
2324 
2325 	return (qtd);
2326 }
2327 
2328 
2329 /*
2330  * ehci_fill_in_qtd:
2331  *
2332  * Fill in the fields of a Transfer Descriptor (QTD).
2333  * The "Buffer Pointer" fields of a QTD are retrieved from the TW
2334  * it is associated with.
2335  *
2336  * Note:
2337  * qtd_dma_offs - the starting offset into the TW buffer, where the QTD
2338  *                should transfer from. It should be 4K aligned. And when
2339  *                a TW has more than one QTDs, the QTDs must be filled in
2340  *                increasing order.
2341  * qtd_length - the total bytes to transfer.
2342  */
2343 /*ARGSUSED*/
2344 static void
2345 ehci_fill_in_qtd(
2346 	ehci_state_t		*ehcip,
2347 	ehci_qtd_t		*qtd,
2348 	uint32_t		qtd_ctrl,
2349 	size_t			qtd_dma_offs,
2350 	size_t			qtd_length,
2351 	uint32_t		qtd_ctrl_phase,
2352 	ehci_pipe_private_t	*pp,
2353 	ehci_trans_wrapper_t	*tw)
2354 {
2355 	uint32_t		buf_addr;
2356 	size_t			buf_len = qtd_length;
2357 	uint32_t		ctrl = qtd_ctrl;
2358 	uint_t			i = 0;
2359 	int			rem_len;
2360 
2361 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2362 	    "ehci_fill_in_qtd: qtd 0x%p ctrl 0x%x bufoffs 0x%lx "
2363 	    "len 0x%lx", qtd, qtd_ctrl, qtd_dma_offs, qtd_length);
2364 
2365 	/* Assert that the qtd to be filled in is a dummy */
2366 	ASSERT(Get_QTD(qtd->qtd_state) == EHCI_QTD_DUMMY);
2367 
2368 	/* Change QTD's state Active */
2369 	Set_QTD(qtd->qtd_state, EHCI_QTD_ACTIVE);
2370 
2371 	/* Set the total length data transfer */
2372 	ctrl |= (((qtd_length << EHCI_QTD_CTRL_BYTES_TO_XFER_SHIFT)
2373 	    & EHCI_QTD_CTRL_BYTES_TO_XFER) | EHCI_QTD_CTRL_MAX_ERR_COUNTS);
2374 
2375 	/*
2376 	 * QTDs must be filled in increasing DMA offset order.
2377 	 * tw_dma_offs is initialized to be 0 at TW creation and
2378 	 * is only increased in this function.
2379 	 */
2380 	ASSERT(buf_len == 0 || qtd_dma_offs >= tw->tw_dma_offs);
2381 
2382 	/*
2383 	 * Save the starting dma buffer offset used and
2384 	 * length of data that will be transfered in
2385 	 * the current QTD.
2386 	 */
2387 	Set_QTD(qtd->qtd_xfer_offs, qtd_dma_offs);
2388 	Set_QTD(qtd->qtd_xfer_len, buf_len);
2389 
2390 	while (buf_len) {
2391 		/*
2392 		 * Advance to the next DMA cookie until finding the cookie
2393 		 * that qtd_dma_offs falls in.
2394 		 * It is very likely this loop will never repeat more than
2395 		 * once. It is here just to accommodate the case qtd_dma_offs
2396 		 * is increased by multiple cookies during two consecutive
2397 		 * calls into this function. In that case, the interim DMA
2398 		 * buffer is allowed to be skipped.
2399 		 */
2400 		while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <=
2401 		    qtd_dma_offs) {
2402 			/*
2403 			 * tw_dma_offs always points to the starting offset
2404 			 * of a cookie
2405 			 */
2406 			tw->tw_dma_offs += tw->tw_cookie.dmac_size;
2407 			ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie);
2408 			tw->tw_cookie_idx++;
2409 			ASSERT(tw->tw_cookie_idx < tw->tw_ncookies);
2410 		}
2411 
2412 		/*
2413 		 * Counting the remained buffer length to be filled in
2414 		 * the QTD for current DMA cookie
2415 		 */
2416 		rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) -
2417 		    qtd_dma_offs;
2418 
2419 		/* Update the beginning of the buffer */
2420 		buf_addr = (qtd_dma_offs - tw->tw_dma_offs) +
2421 		    tw->tw_cookie.dmac_address;
2422 		ASSERT((buf_addr % EHCI_4K_ALIGN) == 0);
2423 		Set_QTD(qtd->qtd_buf[i], buf_addr);
2424 
2425 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2426 		    "ehci_fill_in_qtd: dmac_addr 0x%p dmac_size "
2427 		    "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size,
2428 		    tw->tw_cookie_idx);
2429 
2430 		if (buf_len <= EHCI_MAX_QTD_BUF_SIZE) {
2431 			ASSERT(buf_len <= rem_len);
2432 			break;
2433 		} else {
2434 			ASSERT(rem_len >= EHCI_MAX_QTD_BUF_SIZE);
2435 			buf_len -= EHCI_MAX_QTD_BUF_SIZE;
2436 			qtd_dma_offs += EHCI_MAX_QTD_BUF_SIZE;
2437 		}
2438 
2439 		i++;
2440 	}
2441 
2442 	/*
2443 	 * Setup the alternate next qTD pointer if appropriate.  The alternate
2444 	 * qtd is currently pointing to a QTD that is not yet linked, but will
2445 	 * be in the very near future.	If a short_xfer occurs in this
2446 	 * situation , the HC will automatically skip this QH.	Eventually
2447 	 * everything will be placed and the alternate_qtd will be valid QTD.
2448 	 * For more information on alternate qtds look at section 3.5.2 in the
2449 	 * EHCI spec.
2450 	 */
2451 	if (tw->tw_alt_qtd != NULL) {
2452 		Set_QTD(qtd->qtd_alt_next_qtd,
2453 		    (ehci_qtd_cpu_to_iommu(ehcip, tw->tw_alt_qtd) &
2454 		    EHCI_QTD_ALT_NEXT_QTD_PTR));
2455 	}
2456 
2457 	/*
2458 	 * For control, bulk and interrupt QTD, now
2459 	 * enable current QTD by setting active bit.
2460 	 */
2461 	Set_QTD(qtd->qtd_ctrl, (ctrl | EHCI_QTD_CTRL_ACTIVE_XACT));
2462 
2463 	/*
2464 	 * For Control Xfer, qtd_ctrl_phase is a valid filed.
2465 	 */
2466 	if (qtd_ctrl_phase) {
2467 		Set_QTD(qtd->qtd_ctrl_phase, qtd_ctrl_phase);
2468 	}
2469 
2470 	/* Set the transfer wrapper */
2471 	ASSERT(tw != NULL);
2472 	ASSERT(tw->tw_id != NULL);
2473 
2474 	Set_QTD(qtd->qtd_trans_wrapper, (uint32_t)tw->tw_id);
2475 }
2476 
2477 
2478 /*
2479  * ehci_insert_qtd_on_tw:
2480  *
2481  * The transfer wrapper keeps a list of all Transfer Descriptors (QTD) that
2482  * are allocated for this transfer. Insert a QTD  onto this list. The  list
2483  * of QTD's does not include the dummy QTD that is at the end of the list of
2484  * QTD's for the endpoint.
2485  */
2486 static void
2487 ehci_insert_qtd_on_tw(
2488 	ehci_state_t		*ehcip,
2489 	ehci_trans_wrapper_t	*tw,
2490 	ehci_qtd_t		*qtd)
2491 {
2492 	/*
2493 	 * Set the next pointer to NULL because
2494 	 * this is the last QTD on list.
2495 	 */
2496 	Set_QTD(qtd->qtd_tw_next_qtd, NULL);
2497 
2498 	if (tw->tw_qtd_head == NULL) {
2499 		ASSERT(tw->tw_qtd_tail == NULL);
2500 		tw->tw_qtd_head = qtd;
2501 		tw->tw_qtd_tail = qtd;
2502 	} else {
2503 		ehci_qtd_t *dummy = (ehci_qtd_t *)tw->tw_qtd_tail;
2504 
2505 		ASSERT(dummy != NULL);
2506 		ASSERT(dummy != qtd);
2507 		ASSERT(Get_QTD(qtd->qtd_state) != EHCI_QTD_DUMMY);
2508 
2509 		/* Add the qtd to the end of the list */
2510 		Set_QTD(dummy->qtd_tw_next_qtd,
2511 		    ehci_qtd_cpu_to_iommu(ehcip, qtd));
2512 
2513 		tw->tw_qtd_tail = qtd;
2514 
2515 		ASSERT(Get_QTD(qtd->qtd_tw_next_qtd) == NULL);
2516 	}
2517 }
2518 
2519 
2520 /*
2521  * ehci_insert_qtd_into_active_qtd_list:
2522  *
2523  * Insert current QTD into active QTD list.
2524  */
2525 static void
2526 ehci_insert_qtd_into_active_qtd_list(
2527 	ehci_state_t		*ehcip,
2528 	ehci_qtd_t		*qtd)
2529 {
2530 	ehci_qtd_t		*curr_qtd, *next_qtd;
2531 
2532 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2533 
2534 	curr_qtd = ehcip->ehci_active_qtd_list;
2535 
2536 	/* Insert this QTD into QTD Active List */
2537 	if (curr_qtd) {
2538 		next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2539 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2540 
2541 		while (next_qtd) {
2542 			curr_qtd = next_qtd;
2543 			next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2544 			    Get_QTD(curr_qtd->qtd_active_qtd_next));
2545 		}
2546 
2547 		Set_QTD(qtd->qtd_active_qtd_prev,
2548 		    ehci_qtd_cpu_to_iommu(ehcip, curr_qtd));
2549 
2550 		Set_QTD(curr_qtd->qtd_active_qtd_next,
2551 		    ehci_qtd_cpu_to_iommu(ehcip, qtd));
2552 	} else {
2553 		ehcip->ehci_active_qtd_list = qtd;
2554 		Set_QTD(qtd->qtd_active_qtd_next, NULL);
2555 		Set_QTD(qtd->qtd_active_qtd_prev, NULL);
2556 	}
2557 }
2558 
2559 
2560 /*
2561  * ehci_remove_qtd_from_active_qtd_list:
2562  *
2563  * Remove current QTD from the active QTD list.
2564  *
2565  * NOTE: This function is also called from POLLED MODE.
2566  */
2567 void
2568 ehci_remove_qtd_from_active_qtd_list(
2569 	ehci_state_t		*ehcip,
2570 	ehci_qtd_t		*qtd)
2571 {
2572 	ehci_qtd_t		*curr_qtd, *prev_qtd, *next_qtd;
2573 
2574 	ASSERT(qtd != NULL);
2575 
2576 	curr_qtd = ehcip->ehci_active_qtd_list;
2577 
2578 	while ((curr_qtd) && (curr_qtd != qtd)) {
2579 		curr_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2580 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2581 	}
2582 
2583 	if ((curr_qtd) && (curr_qtd == qtd)) {
2584 		prev_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2585 		    Get_QTD(curr_qtd->qtd_active_qtd_prev));
2586 		next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2587 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2588 
2589 		if (prev_qtd) {
2590 			Set_QTD(prev_qtd->qtd_active_qtd_next,
2591 			    Get_QTD(curr_qtd->qtd_active_qtd_next));
2592 		} else {
2593 			ehcip->ehci_active_qtd_list = next_qtd;
2594 		}
2595 
2596 		if (next_qtd) {
2597 			Set_QTD(next_qtd->qtd_active_qtd_prev,
2598 			    Get_QTD(curr_qtd->qtd_active_qtd_prev));
2599 		}
2600 	} else {
2601 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2602 		    "ehci_remove_qtd_from_active_qtd_list: "
2603 			"Unable to find QTD in active_qtd_list");
2604 	}
2605 }
2606 
2607 
2608 /*
2609  * ehci_traverse_qtds:
2610  *
2611  * Traverse the list of QTDs for given pipe using transfer wrapper.  Since
2612  * the endpoint is marked as Halted, the Host Controller (HC) is no longer
2613  * accessing these QTDs. Remove all the QTDs that are attached to endpoint.
2614  */
2615 static void
2616 ehci_traverse_qtds(
2617 	ehci_state_t		*ehcip,
2618 	usba_pipe_handle_data_t	*ph)
2619 {
2620 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2621 	ehci_trans_wrapper_t	*next_tw;
2622 	ehci_qtd_t		*qtd;
2623 	ehci_qtd_t		*next_qtd;
2624 
2625 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2626 
2627 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2628 	    "ehci_traverse_qtds:");
2629 
2630 	/* Process the transfer wrappers for this pipe */
2631 	next_tw = pp->pp_tw_head;
2632 
2633 	while (next_tw) {
2634 		/* Stop the the transfer timer */
2635 		ehci_stop_xfer_timer(ehcip, next_tw, EHCI_REMOVE_XFER_ALWAYS);
2636 
2637 		qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
2638 
2639 		/* Walk through each QTD for this transfer wrapper */
2640 		while (qtd) {
2641 			/* Remove this QTD from active QTD list */
2642 			ehci_remove_qtd_from_active_qtd_list(ehcip, qtd);
2643 
2644 			next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2645 			    Get_QTD(qtd->qtd_tw_next_qtd));
2646 
2647 			/* Deallocate this QTD */
2648 			ehci_deallocate_qtd(ehcip, qtd);
2649 
2650 			qtd = next_qtd;
2651 		}
2652 
2653 		next_tw = next_tw->tw_next;
2654 	}
2655 
2656 	/* Clear current qtd pointer */
2657 	Set_QH(pp->pp_qh->qh_curr_qtd, (uint32_t)0x00000000);
2658 
2659 	/* Update the next qtd pointer in the QH */
2660 	Set_QH(pp->pp_qh->qh_next_qtd, Get_QH(pp->pp_qh->qh_dummy_qtd));
2661 }
2662 
2663 
2664 /*
2665  * ehci_deallocate_qtd:
2666  *
2667  * Deallocate a Host Controller's (HC) Transfer Descriptor (QTD).
2668  *
2669  * NOTE: This function is also called from POLLED MODE.
2670  */
2671 void
2672 ehci_deallocate_qtd(
2673 	ehci_state_t		*ehcip,
2674 	ehci_qtd_t		*old_qtd)
2675 {
2676 	ehci_trans_wrapper_t	*tw = NULL;
2677 
2678 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2679 	    "ehci_deallocate_qtd: old_qtd = 0x%p", (void *)old_qtd);
2680 
2681 	/*
2682 	 * Obtain the transaction wrapper and tw will be
2683 	 * NULL for the dummy QTDs.
2684 	 */
2685 	if (Get_QTD(old_qtd->qtd_state) != EHCI_QTD_DUMMY) {
2686 		tw = (ehci_trans_wrapper_t *)
2687 		EHCI_LOOKUP_ID((uint32_t)
2688 		Get_QTD(old_qtd->qtd_trans_wrapper));
2689 
2690 		ASSERT(tw != NULL);
2691 	}
2692 
2693 	/*
2694 	 * If QTD's transfer wrapper is NULL, don't access its TW.
2695 	 * Just free the QTD.
2696 	 */
2697 	if (tw) {
2698 		ehci_qtd_t	*qtd, *next_qtd;
2699 
2700 		qtd = tw->tw_qtd_head;
2701 
2702 		if (old_qtd != qtd) {
2703 			next_qtd = ehci_qtd_iommu_to_cpu(
2704 				    ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2705 
2706 			while (next_qtd != old_qtd) {
2707 				qtd = next_qtd;
2708 				next_qtd = ehci_qtd_iommu_to_cpu(
2709 				    ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2710 			}
2711 
2712 			Set_QTD(qtd->qtd_tw_next_qtd, old_qtd->qtd_tw_next_qtd);
2713 
2714 			if (qtd->qtd_tw_next_qtd == NULL) {
2715 				tw->tw_qtd_tail = qtd;
2716 			}
2717 		} else {
2718 			tw->tw_qtd_head = ehci_qtd_iommu_to_cpu(
2719 			    ehcip, Get_QTD(old_qtd->qtd_tw_next_qtd));
2720 
2721 			if (tw->tw_qtd_head == NULL) {
2722 				tw->tw_qtd_tail = NULL;
2723 			}
2724 		}
2725 	}
2726 
2727 	bzero((void *)old_qtd, sizeof (ehci_qtd_t));
2728 	Set_QTD(old_qtd->qtd_state, EHCI_QTD_FREE);
2729 
2730 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2731 	    "Dealloc_qtd: qtd 0x%p", (void *)old_qtd);
2732 }
2733 
2734 
2735 /*
2736  * ehci_qtd_cpu_to_iommu:
2737  *
2738  * This function converts for the given Transfer Descriptor (QTD) CPU address
2739  * to IO address.
2740  *
2741  * NOTE: This function is also called from POLLED MODE.
2742  */
2743 uint32_t
2744 ehci_qtd_cpu_to_iommu(
2745 	ehci_state_t	*ehcip,
2746 	ehci_qtd_t	*addr)
2747 {
2748 	uint32_t	td;
2749 
2750 	td  = (uint32_t)ehcip->ehci_qtd_pool_cookie.dmac_address +
2751 	    (uint32_t)((uintptr_t)addr -
2752 	    (uintptr_t)(ehcip->ehci_qtd_pool_addr));
2753 
2754 	ASSERT((ehcip->ehci_qtd_pool_cookie.dmac_address +
2755 	    (uint32_t) (sizeof (ehci_qtd_t) *
2756 	    (addr - ehcip->ehci_qtd_pool_addr))) ==
2757 	    (ehcip->ehci_qtd_pool_cookie.dmac_address +
2758 	    (uint32_t)((uintptr_t)addr - (uintptr_t)
2759 	    (ehcip->ehci_qtd_pool_addr))));
2760 
2761 	ASSERT(td >= ehcip->ehci_qtd_pool_cookie.dmac_address);
2762 	ASSERT(td <= ehcip->ehci_qtd_pool_cookie.dmac_address +
2763 	    sizeof (ehci_qtd_t) * ehci_qtd_pool_size);
2764 
2765 	return (td);
2766 }
2767 
2768 
2769 /*
2770  * ehci_qtd_iommu_to_cpu:
2771  *
2772  * This function converts for the given Transfer Descriptor (QTD) IO address
2773  * to CPU address.
2774  *
2775  * NOTE: This function is also called from POLLED MODE.
2776  */
2777 ehci_qtd_t *
2778 ehci_qtd_iommu_to_cpu(
2779 	ehci_state_t	*ehcip,
2780 	uintptr_t	addr)
2781 {
2782 	ehci_qtd_t	*qtd;
2783 
2784 	if (addr == NULL) {
2785 
2786 		return (NULL);
2787 	}
2788 
2789 	qtd = (ehci_qtd_t *)((uintptr_t)
2790 	    (addr - ehcip->ehci_qtd_pool_cookie.dmac_address) +
2791 	    (uintptr_t)ehcip->ehci_qtd_pool_addr);
2792 
2793 	ASSERT(qtd >= ehcip->ehci_qtd_pool_addr);
2794 	ASSERT((uintptr_t)qtd <= (uintptr_t)ehcip->ehci_qtd_pool_addr +
2795 	    (uintptr_t)(sizeof (ehci_qtd_t) * ehci_qtd_pool_size));
2796 
2797 	return (qtd);
2798 }
2799 
2800 /*
2801  * ehci_allocate_tds_for_tw_resources:
2802  *
2803  * Allocate n Transfer Descriptors (TD) from the TD buffer pool and places it
2804  * into the TW.  Also chooses the correct alternate qtd when required.	It is
2805  * used for hardware short transfer support.  For more information on
2806  * alternate qtds look at section 3.5.2 in the EHCI spec.
2807  * Here is how each alternate qtd's are used:
2808  *
2809  * Bulk: used fully.
2810  * Intr: xfers only require 1 QTD, so alternate qtds are never used.
2811  * Ctrl: Should not use alternate QTD
2812  * Isoch: Doesn't support short_xfer nor does it use QTD
2813  *
2814  * Returns USB_NO_RESOURCES if it was not able to allocate all the requested TD
2815  * otherwise USB_SUCCESS.
2816  */
2817 int
2818 ehci_allocate_tds_for_tw(
2819 	ehci_state_t		*ehcip,
2820 	ehci_pipe_private_t	*pp,
2821 	ehci_trans_wrapper_t	*tw,
2822 	size_t			qtd_count)
2823 {
2824 	usb_ep_descr_t		*eptd = &pp->pp_pipe_handle->p_ep;
2825 	uchar_t			attributes;
2826 	ehci_qtd_t		*qtd;
2827 	uint32_t		qtd_addr;
2828 	int			i;
2829 	int			error = USB_SUCCESS;
2830 
2831 	attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
2832 
2833 	for (i = 0; i < qtd_count; i += 1) {
2834 		qtd = ehci_allocate_qtd_from_pool(ehcip);
2835 		if (qtd == NULL) {
2836 			error = USB_NO_RESOURCES;
2837 			USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2838 			    "ehci_allocate_qtds_for_tw: "
2839 			    "Unable to allocate %lu QTDs",
2840 			    qtd_count);
2841 			break;
2842 		}
2843 		if (i > 0) {
2844 			qtd_addr = ehci_qtd_cpu_to_iommu(ehcip,
2845 			    tw->tw_qtd_free_list);
2846 			Set_QTD(qtd->qtd_tw_next_qtd, qtd_addr);
2847 		}
2848 		tw->tw_qtd_free_list = qtd;
2849 
2850 		/*
2851 		 * Save the second one as a pointer to the new dummy 1.
2852 		 * It is used later for the alt_qtd_ptr.  Xfers with only
2853 		 * one qtd do not need alt_qtd_ptr.
2854 		 * The tds's are allocated and put into a stack, that is
2855 		 * why the second qtd allocated will turn out to be the
2856 		 * new dummy 1.
2857 		 */
2858 		if ((i == 1) && (attributes == USB_EP_ATTR_BULK)) {
2859 			tw->tw_alt_qtd = qtd;
2860 		}
2861 	}
2862 
2863 	return (error);
2864 }
2865 
2866 /*
2867  * ehci_allocate_tw_resources:
2868  *
2869  * Allocate a Transaction Wrapper (TW) and n Transfer Descriptors (QTD)
2870  * from the QTD buffer pool and places it into the TW.	It does an all
2871  * or nothing transaction.
2872  *
2873  * Returns NULL if there is insufficient resources otherwise TW.
2874  */
2875 static ehci_trans_wrapper_t *
2876 ehci_allocate_tw_resources(
2877 	ehci_state_t		*ehcip,
2878 	ehci_pipe_private_t	*pp,
2879 	size_t			tw_length,
2880 	usb_flags_t		usb_flags,
2881 	size_t			qtd_count)
2882 {
2883 	ehci_trans_wrapper_t	*tw;
2884 
2885 	tw = ehci_create_transfer_wrapper(ehcip, pp, tw_length, usb_flags);
2886 
2887 	if (tw == NULL) {
2888 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2889 		    "ehci_allocate_tw_resources: Unable to allocate TW");
2890 	} else {
2891 		if (ehci_allocate_tds_for_tw(ehcip, pp, tw, qtd_count) ==
2892 		    USB_SUCCESS) {
2893 			tw->tw_num_qtds = qtd_count;
2894 		} else {
2895 			ehci_deallocate_tw(ehcip, pp, tw);
2896 			tw = NULL;
2897 		}
2898 	}
2899 
2900 	return (tw);
2901 }
2902 
2903 
2904 /*
2905  * ehci_free_tw_td_resources:
2906  *
2907  * Free all allocated resources for Transaction Wrapper (TW).
2908  * Does not free the TW itself.
2909  *
2910  * Returns NULL if there is insufficient resources otherwise TW.
2911  */
2912 static void
2913 ehci_free_tw_td_resources(
2914 	ehci_state_t		*ehcip,
2915 	ehci_trans_wrapper_t	*tw)
2916 {
2917 	ehci_qtd_t		*qtd = NULL;
2918 	ehci_qtd_t		*temp_qtd = NULL;
2919 
2920 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2921 	    "ehci_free_tw_td_resources: tw = 0x%p", tw);
2922 
2923 	qtd = tw->tw_qtd_free_list;
2924 	while (qtd != NULL) {
2925 		/* Save the pointer to the next qtd before destroying it */
2926 		temp_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2927 		    Get_QTD(qtd->qtd_tw_next_qtd));
2928 		ehci_deallocate_qtd(ehcip, qtd);
2929 		qtd = temp_qtd;
2930 	}
2931 	tw->tw_qtd_free_list = NULL;
2932 }
2933 
2934 /*
2935  * Transfer Wrapper functions
2936  *
2937  * ehci_create_transfer_wrapper:
2938  *
2939  * Create a Transaction Wrapper (TW) and this involves the allocating of DMA
2940  * resources.
2941  */
2942 static ehci_trans_wrapper_t *
2943 ehci_create_transfer_wrapper(
2944 	ehci_state_t		*ehcip,
2945 	ehci_pipe_private_t	*pp,
2946 	size_t			length,
2947 	uint_t			usb_flags)
2948 {
2949 	ddi_device_acc_attr_t	dev_attr;
2950 	ddi_dma_attr_t		dma_attr;
2951 	int			result;
2952 	size_t			real_length;
2953 	ehci_trans_wrapper_t	*tw;
2954 	int			kmem_flag;
2955 	int			(*dmamem_wait)(caddr_t);
2956 
2957 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2958 	    "ehci_create_transfer_wrapper: length = 0x%lx flags = 0x%x",
2959 	    length, usb_flags);
2960 
2961 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2962 
2963 	/* SLEEP flag should not be used in interrupt context */
2964 	if (servicing_interrupt()) {
2965 		kmem_flag = KM_NOSLEEP;
2966 		dmamem_wait = DDI_DMA_DONTWAIT;
2967 	} else {
2968 		kmem_flag = KM_SLEEP;
2969 		dmamem_wait = DDI_DMA_SLEEP;
2970 	}
2971 
2972 	/* Allocate space for the transfer wrapper */
2973 	tw = kmem_zalloc(sizeof (ehci_trans_wrapper_t), kmem_flag);
2974 
2975 	if (tw == NULL) {
2976 		USB_DPRINTF_L2(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
2977 		    "ehci_create_transfer_wrapper: kmem_zalloc failed");
2978 
2979 		return (NULL);
2980 	}
2981 
2982 	/* allow sg lists for transfer wrapper dma memory */
2983 	bcopy(&ehcip->ehci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t));
2984 	dma_attr.dma_attr_sgllen = EHCI_DMA_ATTR_TW_SGLLEN;
2985 	dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
2986 
2987 	/* Allocate the DMA handle */
2988 	result = ddi_dma_alloc_handle(ehcip->ehci_dip,
2989 	    &dma_attr, dmamem_wait, 0, &tw->tw_dmahandle);
2990 
2991 	if (result != DDI_SUCCESS) {
2992 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2993 		    "ehci_create_transfer_wrapper: Alloc handle failed");
2994 
2995 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
2996 
2997 		return (NULL);
2998 	}
2999 
3000 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
3001 
3002 	/* no need for swapping the raw data */
3003 	dev_attr.devacc_attr_endian_flags  = DDI_NEVERSWAP_ACC;
3004 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
3005 
3006 	/* Allocate the memory */
3007 	result = ddi_dma_mem_alloc(tw->tw_dmahandle, length,
3008 	    &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait, NULL,
3009 	    (caddr_t *)&tw->tw_buf, &real_length, &tw->tw_accesshandle);
3010 
3011 	if (result != DDI_SUCCESS) {
3012 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3013 		    "ehci_create_transfer_wrapper: dma_mem_alloc fail");
3014 
3015 		ddi_dma_free_handle(&tw->tw_dmahandle);
3016 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3017 
3018 		return (NULL);
3019 	}
3020 
3021 	ASSERT(real_length >= length);
3022 
3023 	/* Bind the handle */
3024 	result = ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL,
3025 	    (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
3026 	    dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies);
3027 
3028 	if (result != DDI_DMA_MAPPED) {
3029 		ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
3030 
3031 		ddi_dma_mem_free(&tw->tw_accesshandle);
3032 		ddi_dma_free_handle(&tw->tw_dmahandle);
3033 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3034 
3035 		return (NULL);
3036 	}
3037 
3038 	tw->tw_cookie_idx = 0;
3039 	tw->tw_dma_offs = 0;
3040 
3041 	/*
3042 	 * Only allow one wrapper to be added at a time. Insert the
3043 	 * new transaction wrapper into the list for this pipe.
3044 	 */
3045 	if (pp->pp_tw_head == NULL) {
3046 		pp->pp_tw_head = tw;
3047 		pp->pp_tw_tail = tw;
3048 	} else {
3049 		pp->pp_tw_tail->tw_next = tw;
3050 		pp->pp_tw_tail = tw;
3051 	}
3052 
3053 	/* Store the transfer length */
3054 	tw->tw_length = length;
3055 
3056 	/* Store a back pointer to the pipe private structure */
3057 	tw->tw_pipe_private = pp;
3058 
3059 	/* Store the transfer type - synchronous or asynchronous */
3060 	tw->tw_flags = usb_flags;
3061 
3062 	/* Get and Store 32bit ID */
3063 	tw->tw_id = EHCI_GET_ID((void *)tw);
3064 
3065 	ASSERT(tw->tw_id != NULL);
3066 
3067 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3068 	    "ehci_create_transfer_wrapper: tw = 0x%p, ncookies = %u",
3069 	    tw, tw->tw_ncookies);
3070 
3071 	return (tw);
3072 }
3073 
3074 
3075 /*
3076  * ehci_start_xfer_timer:
3077  *
3078  * Start the timer for the control, bulk and for one time interrupt
3079  * transfers.
3080  */
3081 /* ARGSUSED */
3082 static void
3083 ehci_start_xfer_timer(
3084 	ehci_state_t		*ehcip,
3085 	ehci_pipe_private_t	*pp,
3086 	ehci_trans_wrapper_t	*tw)
3087 {
3088 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3089 	    "ehci_start_xfer_timer: tw = 0x%p", tw);
3090 
3091 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3092 
3093 	/*
3094 	 * The timeout handling is done only for control, bulk and for
3095 	 * one time Interrupt transfers.
3096 	 *
3097 	 * NOTE: If timeout is zero; Assume infinite timeout and don't
3098 	 * insert this transfer on the timeout list.
3099 	 */
3100 	if (tw->tw_timeout) {
3101 		/*
3102 		 * Add this transfer wrapper to the head of the pipe's
3103 		 * tw timeout list.
3104 		 */
3105 		if (pp->pp_timeout_list) {
3106 			tw->tw_timeout_next = pp->pp_timeout_list;
3107 		}
3108 
3109 		pp->pp_timeout_list = tw;
3110 		ehci_start_timer(ehcip, pp);
3111 	}
3112 }
3113 
3114 
3115 /*
3116  * ehci_stop_xfer_timer:
3117  *
3118  * Start the timer for the control, bulk and for one time interrupt
3119  * transfers.
3120  */
3121 void
3122 ehci_stop_xfer_timer(
3123 	ehci_state_t		*ehcip,
3124 	ehci_trans_wrapper_t	*tw,
3125 	uint_t			flag)
3126 {
3127 	ehci_pipe_private_t	*pp;
3128 	timeout_id_t		timer_id;
3129 
3130 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3131 	    "ehci_stop_xfer_timer: tw = 0x%p", tw);
3132 
3133 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3134 
3135 	/* Obtain the pipe private structure */
3136 	pp = tw->tw_pipe_private;
3137 
3138 	/* check if the timeout tw list is empty */
3139 	if (pp->pp_timeout_list == NULL) {
3140 
3141 		return;
3142 	}
3143 
3144 	switch (flag) {
3145 	case EHCI_REMOVE_XFER_IFLAST:
3146 		if (tw->tw_qtd_head != tw->tw_qtd_tail) {
3147 			break;
3148 		}
3149 
3150 		/* FALLTHRU */
3151 	case EHCI_REMOVE_XFER_ALWAYS:
3152 		ehci_remove_tw_from_timeout_list(ehcip, tw);
3153 
3154 		if ((pp->pp_timeout_list == NULL) &&
3155 		    (pp->pp_timer_id)) {
3156 
3157 			timer_id = pp->pp_timer_id;
3158 
3159 			/* Reset the timer id to zero */
3160 			pp->pp_timer_id = 0;
3161 
3162 			mutex_exit(&ehcip->ehci_int_mutex);
3163 
3164 			(void) untimeout(timer_id);
3165 
3166 			mutex_enter(&ehcip->ehci_int_mutex);
3167 		}
3168 		break;
3169 	default:
3170 		break;
3171 	}
3172 }
3173 
3174 
3175 /*
3176  * ehci_xfer_timeout_handler:
3177  *
3178  * Control or bulk transfer timeout handler.
3179  */
3180 static void
3181 ehci_xfer_timeout_handler(void *arg)
3182 {
3183 	usba_pipe_handle_data_t	*ph = (usba_pipe_handle_data_t *)arg;
3184 	ehci_state_t		*ehcip = ehci_obtain_state(
3185 				    ph->p_usba_device->usb_root_hub_dip);
3186 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3187 	ehci_trans_wrapper_t	*tw, *next;
3188 	ehci_trans_wrapper_t	*expire_xfer_list = NULL;
3189 	ehci_qtd_t		*qtd;
3190 
3191 	USB_DPRINTF_L4(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3192 	    "ehci_xfer_timeout_handler: ehcip = 0x%p, ph = 0x%p", ehcip, ph);
3193 
3194 	mutex_enter(&ehcip->ehci_int_mutex);
3195 
3196 	/*
3197 	 * Check whether still timeout handler is valid.
3198 	 */
3199 	if (pp->pp_timer_id != 0) {
3200 
3201 		/* Reset the timer id to zero */
3202 		pp->pp_timer_id = 0;
3203 	} else {
3204 		mutex_exit(&ehcip->ehci_int_mutex);
3205 
3206 		return;
3207 	}
3208 
3209 	/* Get the transfer timeout list head */
3210 	tw = pp->pp_timeout_list;
3211 
3212 	while (tw) {
3213 
3214 		/* Get the transfer on the timeout list */
3215 		next = tw->tw_timeout_next;
3216 
3217 		tw->tw_timeout--;
3218 
3219 		if (tw->tw_timeout <= 0) {
3220 
3221 			/* remove the tw from the timeout list */
3222 			ehci_remove_tw_from_timeout_list(ehcip, tw);
3223 
3224 			/* remove QTDs from active QTD list */
3225 			qtd = tw->tw_qtd_head;
3226 			while (qtd) {
3227 				ehci_remove_qtd_from_active_qtd_list(
3228 					ehcip, qtd);
3229 
3230 				/* Get the next QTD from the wrapper */
3231 				qtd = ehci_qtd_iommu_to_cpu(ehcip,
3232 				    Get_QTD(qtd->qtd_tw_next_qtd));
3233 			}
3234 
3235 			/*
3236 			 * Preserve the order to the requests
3237 			 * started time sequence.
3238 			 */
3239 			tw->tw_timeout_next = expire_xfer_list;
3240 			expire_xfer_list = tw;
3241 		}
3242 
3243 		tw = next;
3244 	}
3245 
3246 	/*
3247 	 * The timer should be started before the callbacks.
3248 	 * There is always a chance that ehci interrupts come
3249 	 * in when we release the mutex while calling the tw back.
3250 	 * To keep an accurate timeout it should be restarted
3251 	 * as soon as possible.
3252 	 */
3253 	ehci_start_timer(ehcip, pp);
3254 
3255 	/* Get the expired transfer timeout list head */
3256 	tw = expire_xfer_list;
3257 
3258 	while (tw) {
3259 
3260 		/* Get the next tw on the expired transfer timeout list */
3261 		next = tw->tw_timeout_next;
3262 
3263 		/*
3264 		 * The error handle routine will release the mutex when
3265 		 * calling back to USBA. But this will not cause any race.
3266 		 * We do the callback and are relying on ehci_pipe_cleanup()
3267 		 * to halt the queue head and clean up since we should not
3268 		 * block in timeout context.
3269 		 */
3270 		ehci_handle_error(ehcip, tw->tw_qtd_head, USB_CR_TIMEOUT);
3271 
3272 		tw = next;
3273 	}
3274 	mutex_exit(&ehcip->ehci_int_mutex);
3275 }
3276 
3277 
3278 /*
3279  * ehci_remove_tw_from_timeout_list:
3280  *
3281  * Remove Control or bulk transfer from the timeout list.
3282  */
3283 static void
3284 ehci_remove_tw_from_timeout_list(
3285 	ehci_state_t		*ehcip,
3286 	ehci_trans_wrapper_t	*tw)
3287 {
3288 	ehci_pipe_private_t	*pp;
3289 	ehci_trans_wrapper_t	*prev, *next;
3290 
3291 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3292 	    "ehci_remove_tw_from_timeout_list: tw = 0x%p", tw);
3293 
3294 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3295 
3296 	/* Obtain the pipe private structure */
3297 	pp = tw->tw_pipe_private;
3298 
3299 	if (pp->pp_timeout_list) {
3300 		if (pp->pp_timeout_list == tw) {
3301 			pp->pp_timeout_list = tw->tw_timeout_next;
3302 
3303 			tw->tw_timeout_next = NULL;
3304 		} else {
3305 			prev = pp->pp_timeout_list;
3306 			next = prev->tw_timeout_next;
3307 
3308 			while (next && (next != tw)) {
3309 				prev = next;
3310 				next = next->tw_timeout_next;
3311 			}
3312 
3313 			if (next == tw) {
3314 				prev->tw_timeout_next =
3315 					next->tw_timeout_next;
3316 				tw->tw_timeout_next = NULL;
3317 			}
3318 		}
3319 	}
3320 }
3321 
3322 
3323 /*
3324  * ehci_start_timer:
3325  *
3326  * Start the pipe's timer
3327  */
3328 static void
3329 ehci_start_timer(
3330 	ehci_state_t		*ehcip,
3331 	ehci_pipe_private_t	*pp)
3332 {
3333 	USB_DPRINTF_L4(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3334 	    "ehci_start_timer: ehcip = 0x%p, pp = 0x%p", ehcip, pp);
3335 
3336 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3337 
3338 	/*
3339 	 * Start the pipe's timer only if currently timer is not
3340 	 * running and if there are any transfers on the timeout
3341 	 * list. This timer will be per pipe.
3342 	 */
3343 	if ((!pp->pp_timer_id) && (pp->pp_timeout_list)) {
3344 		pp->pp_timer_id = timeout(ehci_xfer_timeout_handler,
3345 		    (void *)(pp->pp_pipe_handle), drv_usectohz(1000000));
3346 	}
3347 }
3348 
3349 /*
3350  * ehci_deallocate_tw:
3351  *
3352  * Deallocate of a Transaction Wrapper (TW) and this involves the freeing of
3353  * of DMA resources.
3354  */
3355 void
3356 ehci_deallocate_tw(
3357 	ehci_state_t		*ehcip,
3358 	ehci_pipe_private_t	*pp,
3359 	ehci_trans_wrapper_t	*tw)
3360 {
3361 	ehci_trans_wrapper_t	*prev, *next;
3362 
3363 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3364 	    "ehci_deallocate_tw: tw = 0x%p", tw);
3365 
3366 	/*
3367 	 * If the transfer wrapper has no Host Controller (HC)
3368 	 * Transfer Descriptors (QTD) associated with it,  then
3369 	 * remove the transfer wrapper.
3370 	 */
3371 	if (tw->tw_qtd_head) {
3372 		ASSERT(tw->tw_qtd_tail != NULL);
3373 
3374 		return;
3375 	}
3376 
3377 	ASSERT(tw->tw_qtd_tail == NULL);
3378 
3379 	/* Make sure we return all the unused qtd's to the pool as well */
3380 	ehci_free_tw_td_resources(ehcip, tw);
3381 
3382 	/*
3383 	 * If pp->pp_tw_head and pp->pp_tw_tail are pointing to
3384 	 * given TW then set the head and  tail  equal to NULL.
3385 	 * Otherwise search for this TW in the linked TW's list
3386 	 * and then remove this TW from the list.
3387 	 */
3388 	if (pp->pp_tw_head == tw) {
3389 		if (pp->pp_tw_tail == tw) {
3390 			pp->pp_tw_head = NULL;
3391 			pp->pp_tw_tail = NULL;
3392 		} else {
3393 			pp->pp_tw_head = tw->tw_next;
3394 		}
3395 	} else {
3396 		prev = pp->pp_tw_head;
3397 		next = prev->tw_next;
3398 
3399 		while (next && (next != tw)) {
3400 			prev = next;
3401 			next = next->tw_next;
3402 		}
3403 
3404 		if (next == tw) {
3405 			prev->tw_next = next->tw_next;
3406 
3407 			if (pp->pp_tw_tail == tw) {
3408 				pp->pp_tw_tail = prev;
3409 			}
3410 		}
3411 	}
3412 
3413 	/*
3414 	 * Make sure that, this TW has been removed
3415 	 * from the timeout list.
3416 	 */
3417 	ehci_remove_tw_from_timeout_list(ehcip, tw);
3418 
3419 	/* Deallocate this TW */
3420 	ehci_free_tw(ehcip, pp, tw);
3421 }
3422 
3423 
3424 /*
3425  * ehci_free_dma_resources:
3426  *
3427  * Free dma resources of a Transfer Wrapper (TW) and also free the TW.
3428  *
3429  * NOTE: This function is also called from POLLED MODE.
3430  */
3431 void
3432 ehci_free_dma_resources(
3433 	ehci_state_t		*ehcip,
3434 	usba_pipe_handle_data_t	*ph)
3435 {
3436 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3437 	ehci_trans_wrapper_t	*head_tw = pp->pp_tw_head;
3438 	ehci_trans_wrapper_t	*next_tw, *tw;
3439 
3440 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3441 	    "ehci_free_dma_resources: ph = 0x%p", (void *)ph);
3442 
3443 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3444 
3445 	/* Process the Transfer Wrappers */
3446 	next_tw = head_tw;
3447 	while (next_tw) {
3448 		tw = next_tw;
3449 		next_tw = tw->tw_next;
3450 
3451 		USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3452 		    "ehci_free_dma_resources: Free TW = 0x%p", (void *)tw);
3453 
3454 		ehci_free_tw(ehcip, pp, tw);
3455 	}
3456 
3457 	/* Adjust the head and tail pointers */
3458 	pp->pp_tw_head = NULL;
3459 	pp->pp_tw_tail = NULL;
3460 }
3461 
3462 
3463 /*
3464  * ehci_free_tw:
3465  *
3466  * Free the Transfer Wrapper (TW).
3467  */
3468 /*ARGSUSED*/
3469 static void
3470 ehci_free_tw(
3471 	ehci_state_t		*ehcip,
3472 	ehci_pipe_private_t	*pp,
3473 	ehci_trans_wrapper_t	*tw)
3474 {
3475 	int	rval;
3476 
3477 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3478 	    "ehci_free_tw: tw = 0x%p", tw);
3479 
3480 	ASSERT(tw != NULL);
3481 	ASSERT(tw->tw_id != NULL);
3482 
3483 	/* Free 32bit ID */
3484 	EHCI_FREE_ID((uint32_t)tw->tw_id);
3485 
3486 	rval = ddi_dma_unbind_handle(tw->tw_dmahandle);
3487 	ASSERT(rval == DDI_SUCCESS);
3488 
3489 	ddi_dma_mem_free(&tw->tw_accesshandle);
3490 	ddi_dma_free_handle(&tw->tw_dmahandle);
3491 
3492 	/* Free transfer wrapper */
3493 	kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3494 }
3495 
3496 
3497 /*
3498  * Miscellaneous functions
3499  */
3500 
3501 /*
3502  * ehci_allocate_intr_in_resource
3503  *
3504  * Allocate interrupt request structure for the interrupt IN transfer.
3505  */
3506 /*ARGSUSED*/
3507 int
3508 ehci_allocate_intr_in_resource(
3509 	ehci_state_t		*ehcip,
3510 	ehci_pipe_private_t	*pp,
3511 	ehci_trans_wrapper_t	*tw,
3512 	usb_flags_t		flags)
3513 {
3514 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3515 	usb_intr_req_t		*curr_intr_reqp;
3516 	usb_opaque_t		client_periodic_in_reqp;
3517 	size_t			length = 0;
3518 
3519 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3520 	    "ehci_allocate_intr_in_resource:"
3521 	    "pp = 0x%p tw = 0x%p flags = 0x%x", pp, tw, flags);
3522 
3523 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3524 	ASSERT(tw->tw_curr_xfer_reqp == NULL);
3525 
3526 	/* Get the client periodic in request pointer */
3527 	client_periodic_in_reqp = pp->pp_client_periodic_in_reqp;
3528 
3529 	/*
3530 	 * If it a periodic IN request and periodic request is NULL,
3531 	 * allocate corresponding usb periodic IN request for the
3532 	 * current periodic polling request and copy the information
3533 	 * from the saved periodic request structure.
3534 	 */
3535 	if (client_periodic_in_reqp) {
3536 
3537 		/* Get the interrupt transfer length */
3538 		length = ((usb_intr_req_t *)
3539 		    client_periodic_in_reqp)->intr_len;
3540 
3541 		curr_intr_reqp = usba_hcdi_dup_intr_req(ph->p_dip,
3542 		    (usb_intr_req_t *)client_periodic_in_reqp, length, flags);
3543 	} else {
3544 		curr_intr_reqp = usb_alloc_intr_req(ph->p_dip, length, flags);
3545 	}
3546 
3547 	if (curr_intr_reqp == NULL) {
3548 
3549 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3550 		    "ehci_allocate_intr_in_resource: Interrupt"
3551 		    "request structure allocation failed");
3552 
3553 		return (USB_NO_RESOURCES);
3554 	}
3555 
3556 	/* For polled mode */
3557 	if (client_periodic_in_reqp == NULL) {
3558 		curr_intr_reqp->intr_attributes = USB_ATTRS_SHORT_XFER_OK;
3559 		curr_intr_reqp->intr_len = ph->p_ep.wMaxPacketSize;
3560 	} else {
3561 		/* Check and save the timeout value */
3562 		tw->tw_timeout = (curr_intr_reqp->intr_attributes &
3563 		    USB_ATTRS_ONE_XFER) ? curr_intr_reqp->intr_timeout: 0;
3564 	}
3565 
3566 	tw->tw_curr_xfer_reqp = (usb_opaque_t)curr_intr_reqp;
3567 	tw->tw_length = curr_intr_reqp->intr_len;
3568 
3569 	mutex_enter(&ph->p_mutex);
3570 	ph->p_req_count++;
3571 	mutex_exit(&ph->p_mutex);
3572 
3573 	pp->pp_state = EHCI_PIPE_STATE_ACTIVE;
3574 
3575 	return (USB_SUCCESS);
3576 }
3577 
3578 /*
3579  * ehci_pipe_cleanup
3580  *
3581  * Cleanup ehci pipe.
3582  */
3583 void
3584 ehci_pipe_cleanup(
3585 	ehci_state_t		*ehcip,
3586 	usba_pipe_handle_data_t	*ph)
3587 {
3588 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3589 	uint_t			pipe_state = pp->pp_state;
3590 	usb_cr_t		completion_reason;
3591 	usb_ep_descr_t		*eptd = &ph->p_ep;
3592 
3593 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3594 	    "ehci_pipe_cleanup: ph = 0x%p", ph);
3595 
3596 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3597 
3598 	if (EHCI_ISOC_ENDPOINT(eptd)) {
3599 		ehci_isoc_pipe_cleanup(ehcip, ph);
3600 
3601 		return;
3602 	}
3603 
3604 	ASSERT(!servicing_interrupt());
3605 
3606 	/*
3607 	 * Set the QH's status to Halt condition.
3608 	 * If another thread is halting this function will automatically
3609 	 * wait. If a pipe close happens at this time
3610 	 * we will be in lots of trouble.
3611 	 * If we are in an interrupt thread, don't halt, because it may
3612 	 * do a wait_for_sof.
3613 	 */
3614 	ehci_modify_qh_status_bit(ehcip, pp, SET_HALT);
3615 
3616 	/*
3617 	 * Wait for processing all completed transfers and
3618 	 * to send results to upstream.
3619 	 */
3620 	ehci_wait_for_transfers_completion(ehcip, pp);
3621 
3622 	/* Save the data toggle information */
3623 	ehci_save_data_toggle(ehcip, ph);
3624 
3625 	/*
3626 	 * Traverse the list of QTDs for this pipe using transfer
3627 	 * wrapper. Process these QTDs depending on their status.
3628 	 * And stop the timer of this pipe.
3629 	 */
3630 	ehci_traverse_qtds(ehcip, ph);
3631 
3632 	/* Make sure the timer is not running */
3633 	ASSERT(pp->pp_timer_id == 0);
3634 
3635 	/* Do callbacks for all unfinished requests */
3636 	ehci_handle_outstanding_requests(ehcip, pp);
3637 
3638 	/* Free DMA resources */
3639 	ehci_free_dma_resources(ehcip, ph);
3640 
3641 	switch (pipe_state) {
3642 	case EHCI_PIPE_STATE_CLOSE:
3643 		completion_reason = USB_CR_PIPE_CLOSING;
3644 		break;
3645 	case EHCI_PIPE_STATE_RESET:
3646 	case EHCI_PIPE_STATE_STOP_POLLING:
3647 		/* Set completion reason */
3648 		completion_reason = (pipe_state ==
3649 		    EHCI_PIPE_STATE_RESET) ?
3650 		    USB_CR_PIPE_RESET: USB_CR_STOPPED_POLLING;
3651 
3652 		/* Restore the data toggle information */
3653 		ehci_restore_data_toggle(ehcip, ph);
3654 
3655 		/*
3656 		 * Clear the halt bit to restart all the
3657 		 * transactions on this pipe.
3658 		 */
3659 		ehci_modify_qh_status_bit(ehcip, pp, CLEAR_HALT);
3660 
3661 		/* Set pipe state to idle */
3662 		pp->pp_state = EHCI_PIPE_STATE_IDLE;
3663 
3664 		break;
3665 	}
3666 
3667 	/*
3668 	 * Do the callback for the original client
3669 	 * periodic IN request.
3670 	 */
3671 	if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
3672 	    ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) ==
3673 	    USB_EP_DIR_IN)) {
3674 
3675 		ehci_do_client_periodic_in_req_callback(
3676 		    ehcip, pp, completion_reason);
3677 	}
3678 }
3679 
3680 
3681 /*
3682  * ehci_wait_for_transfers_completion:
3683  *
3684  * Wait for processing all completed transfers and to send results
3685  * to upstream.
3686  */
3687 static void
3688 ehci_wait_for_transfers_completion(
3689 	ehci_state_t		*ehcip,
3690 	ehci_pipe_private_t	*pp)
3691 {
3692 	ehci_trans_wrapper_t	*next_tw = pp->pp_tw_head;
3693 	clock_t			xfer_cmpl_time_wait;
3694 	ehci_qtd_t		*qtd;
3695 
3696 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3697 	    ehcip->ehci_log_hdl,
3698 	    "ehci_wait_for_transfers_completion: pp = 0x%p", pp);
3699 
3700 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3701 
3702 	if ((ehci_state_is_operational(ehcip)) != USB_SUCCESS) {
3703 
3704 		return;
3705 	}
3706 
3707 	pp->pp_count_done_qtds = 0;
3708 
3709 	/* Process the transfer wrappers for this pipe */
3710 	while (next_tw) {
3711 		qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
3712 
3713 		/*
3714 		 * Walk through each QTD for this transfer wrapper.
3715 		 * If a QTD still exists, then it is either on done
3716 		 * list or on the QH's list.
3717 		 */
3718 		while (qtd) {
3719 			if (!(Get_QTD(qtd->qtd_ctrl) &
3720 			    EHCI_QTD_CTRL_ACTIVE_XACT)) {
3721 				pp->pp_count_done_qtds++;
3722 			}
3723 
3724 			qtd = ehci_qtd_iommu_to_cpu(ehcip,
3725 			    Get_QTD(qtd->qtd_tw_next_qtd));
3726 		}
3727 
3728 		next_tw = next_tw->tw_next;
3729 	}
3730 
3731 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3732 	    "ehci_wait_for_transfers_completion: count_done_qtds = 0x%x",
3733 	    pp->pp_count_done_qtds);
3734 
3735 	if (!pp->pp_count_done_qtds) {
3736 
3737 		return;
3738 	}
3739 
3740 	/* Get the number of clock ticks to wait */
3741 	xfer_cmpl_time_wait = drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000);
3742 
3743 	(void) cv_timedwait(&pp->pp_xfer_cmpl_cv,
3744 	    &ehcip->ehci_int_mutex,
3745 	    ddi_get_lbolt() + xfer_cmpl_time_wait);
3746 
3747 	if (pp->pp_count_done_qtds) {
3748 
3749 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3750 		    "ehci_wait_for_transfers_completion:"
3751 		    "No transfers completion confirmation received");
3752 	}
3753 }
3754 
3755 /*
3756  * ehci_check_for_transfers_completion:
3757  *
3758  * Check whether anybody is waiting for transfers completion event. If so, send
3759  * this event and also stop initiating any new transfers on this pipe.
3760  */
3761 void
3762 ehci_check_for_transfers_completion(
3763 	ehci_state_t		*ehcip,
3764 	ehci_pipe_private_t	*pp)
3765 {
3766 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3767 	    ehcip->ehci_log_hdl,
3768 	    "ehci_check_for_transfers_completion: pp = 0x%p", pp);
3769 
3770 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3771 
3772 	if ((pp->pp_state == EHCI_PIPE_STATE_STOP_POLLING) &&
3773 	    (pp->pp_error == USB_CR_NO_RESOURCES) &&
3774 	    (pp->pp_cur_periodic_req_cnt == 0)) {
3775 
3776 		/* Reset pipe error to zero */
3777 		pp->pp_error = 0;
3778 
3779 		/* Do callback for original request */
3780 		ehci_do_client_periodic_in_req_callback(
3781 		    ehcip, pp, USB_CR_NO_RESOURCES);
3782 	}
3783 
3784 	if (pp->pp_count_done_qtds) {
3785 
3786 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3787 		    "ehci_check_for_transfers_completion:"
3788 		    "count_done_qtds = 0x%x", pp->pp_count_done_qtds);
3789 
3790 		/* Decrement the done qtd count */
3791 		pp->pp_count_done_qtds--;
3792 
3793 		if (!pp->pp_count_done_qtds) {
3794 
3795 			USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3796 			    "ehci_check_for_transfers_completion:"
3797 			    "Sent transfers completion event pp = 0x%p", pp);
3798 
3799 			/* Send the transfer completion signal */
3800 			cv_signal(&pp->pp_xfer_cmpl_cv);
3801 		}
3802 	}
3803 }
3804 
3805 
3806 /*
3807  * ehci_save_data_toggle:
3808  *
3809  * Save the data toggle information.
3810  */
3811 static void
3812 ehci_save_data_toggle(
3813 	ehci_state_t		*ehcip,
3814 	usba_pipe_handle_data_t	*ph)
3815 {
3816 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3817 	usb_ep_descr_t		*eptd = &ph->p_ep;
3818 	uint_t			data_toggle;
3819 	usb_cr_t		error = pp->pp_error;
3820 	ehci_qh_t		*qh = pp->pp_qh;
3821 
3822 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3823 	    ehcip->ehci_log_hdl,
3824 	    "ehci_save_data_toggle: ph = 0x%p", ph);
3825 
3826 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3827 
3828 	/* Reset the pipe error value */
3829 	pp->pp_error = USB_CR_OK;
3830 
3831 	/* Return immediately if it is a control pipe */
3832 	if ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
3833 	    USB_EP_ATTR_CONTROL) {
3834 
3835 		return;
3836 	}
3837 
3838 	/* Get the data toggle information from the endpoint (QH) */
3839 	data_toggle = (Get_QH(qh->qh_status) &
3840 	    EHCI_QH_STS_DATA_TOGGLE)? DATA1:DATA0;
3841 
3842 	/*
3843 	 * If error is STALL, then, set
3844 	 * data toggle to zero.
3845 	 */
3846 	if (error == USB_CR_STALL) {
3847 		data_toggle = DATA0;
3848 	}
3849 
3850 	/*
3851 	 * Save the data toggle information
3852 	 * in the usb device structure.
3853 	 */
3854 	mutex_enter(&ph->p_mutex);
3855 	usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3856 	    data_toggle);
3857 	mutex_exit(&ph->p_mutex);
3858 }
3859 
3860 
3861 /*
3862  * ehci_restore_data_toggle:
3863  *
3864  * Restore the data toggle information.
3865  */
3866 void
3867 ehci_restore_data_toggle(
3868 	ehci_state_t		*ehcip,
3869 	usba_pipe_handle_data_t	*ph)
3870 {
3871 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3872 	usb_ep_descr_t		*eptd = &ph->p_ep;
3873 	uint_t			data_toggle = 0;
3874 
3875 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3876 	    ehcip->ehci_log_hdl,
3877 	    "ehci_restore_data_toggle: ph = 0x%p", ph);
3878 
3879 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3880 
3881 	/* Return immediately if it is a control pipe */
3882 	if ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
3883 	    USB_EP_ATTR_CONTROL) {
3884 
3885 		return;
3886 	}
3887 
3888 	mutex_enter(&ph->p_mutex);
3889 
3890 	data_toggle = usba_hcdi_get_data_toggle(ph->p_usba_device,
3891 	    ph->p_ep.bEndpointAddress);
3892 	usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3893 	    0);
3894 
3895 	mutex_exit(&ph->p_mutex);
3896 
3897 	/*
3898 	 * Restore the data toggle bit depending on the
3899 	 * previous data toggle information.
3900 	 */
3901 	if (data_toggle) {
3902 		Set_QH(pp->pp_qh->qh_status,
3903 		    Get_QH(pp->pp_qh->qh_status) | EHCI_QH_STS_DATA_TOGGLE);
3904 	} else {
3905 		Set_QH(pp->pp_qh->qh_status,
3906 		    Get_QH(pp->pp_qh->qh_status) & (~EHCI_QH_STS_DATA_TOGGLE));
3907 	}
3908 }
3909 
3910 
3911 /*
3912  * ehci_handle_outstanding_requests
3913  *
3914  * Deallocate interrupt request structure for the interrupt IN transfer.
3915  * Do the callbacks for all unfinished requests.
3916  *
3917  * NOTE: This function is also called from POLLED MODE.
3918  */
3919 void
3920 ehci_handle_outstanding_requests(
3921 	ehci_state_t		*ehcip,
3922 	ehci_pipe_private_t	*pp)
3923 {
3924 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3925 	usb_ep_descr_t		*eptd = &ph->p_ep;
3926 	ehci_trans_wrapper_t	*curr_tw;
3927 	ehci_trans_wrapper_t	*next_tw;
3928 	usb_opaque_t		curr_xfer_reqp;
3929 
3930 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3931 	    ehcip->ehci_log_hdl,
3932 	    "ehci_handle_outstanding_requests: pp = 0x%p", pp);
3933 
3934 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3935 
3936 	/* Deallocate all pre-allocated interrupt requests */
3937 	next_tw = pp->pp_tw_head;
3938 
3939 	while (next_tw) {
3940 		curr_tw = next_tw;
3941 		next_tw = curr_tw->tw_next;
3942 
3943 		curr_xfer_reqp = curr_tw->tw_curr_xfer_reqp;
3944 
3945 		/* Deallocate current interrupt request */
3946 		if (curr_xfer_reqp) {
3947 
3948 			if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
3949 			    (curr_tw->tw_direction == EHCI_QTD_CTRL_IN_PID)) {
3950 
3951 				/* Decrement periodic in request count */
3952 				pp->pp_cur_periodic_req_cnt--;
3953 
3954 				ehci_deallocate_intr_in_resource(
3955 				    ehcip, pp, curr_tw);
3956 			} else {
3957 				ehci_hcdi_callback(ph, curr_tw, USB_CR_FLUSHED);
3958 			}
3959 		}
3960 	}
3961 }
3962 
3963 
3964 /*
3965  * ehci_deallocate_intr_in_resource
3966  *
3967  * Deallocate interrupt request structure for the interrupt IN transfer.
3968  */
3969 void
3970 ehci_deallocate_intr_in_resource(
3971 	ehci_state_t		*ehcip,
3972 	ehci_pipe_private_t	*pp,
3973 	ehci_trans_wrapper_t	*tw)
3974 {
3975 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3976 	uchar_t			ep_attr = ph->p_ep.bmAttributes;
3977 	usb_opaque_t		curr_xfer_reqp;
3978 
3979 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3980 	    ehcip->ehci_log_hdl,
3981 	    "ehci_deallocate_intr_in_resource: "
3982 	    "pp = 0x%p tw = 0x%p", pp, tw);
3983 
3984 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3985 	ASSERT((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR);
3986 
3987 	curr_xfer_reqp = tw->tw_curr_xfer_reqp;
3988 
3989 	/* Check the current periodic in request pointer */
3990 	if (curr_xfer_reqp) {
3991 
3992 		tw->tw_curr_xfer_reqp = NULL;
3993 
3994 		mutex_enter(&ph->p_mutex);
3995 		ph->p_req_count--;
3996 		mutex_exit(&ph->p_mutex);
3997 
3998 		/* Free pre-allocated interrupt requests */
3999 		usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp);
4000 
4001 		/* Set periodic in pipe state to idle */
4002 		pp->pp_state = EHCI_PIPE_STATE_IDLE;
4003 	}
4004 }
4005 
4006 
4007 /*
4008  * ehci_do_client_periodic_in_req_callback
4009  *
4010  * Do callback for the original client periodic IN request.
4011  */
4012 void
4013 ehci_do_client_periodic_in_req_callback(
4014 	ehci_state_t		*ehcip,
4015 	ehci_pipe_private_t	*pp,
4016 	usb_cr_t		completion_reason)
4017 {
4018 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
4019 	usb_ep_descr_t		*eptd = &ph->p_ep;
4020 
4021 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
4022 	    ehcip->ehci_log_hdl,
4023 	    "ehci_do_client_periodic_in_req_callback: "
4024 	    "pp = 0x%p cc = 0x%x", pp, completion_reason);
4025 
4026 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4027 
4028 	/*
4029 	 * Check for Interrupt/Isochronous IN, whether we need to do
4030 	 * callback for the original client's periodic IN request.
4031 	 */
4032 	if (pp->pp_client_periodic_in_reqp) {
4033 		ASSERT(pp->pp_cur_periodic_req_cnt == 0);
4034 		if (EHCI_ISOC_ENDPOINT(eptd)) {
4035 			ehci_hcdi_isoc_callback(ph, NULL, completion_reason);
4036 		} else {
4037 			ehci_hcdi_callback(ph, NULL, completion_reason);
4038 		}
4039 	}
4040 }
4041 
4042 
4043 /*
4044  * ehci_hcdi_callback()
4045  *
4046  * Convenience wrapper around usba_hcdi_cb() other than root hub.
4047  */
4048 void
4049 ehci_hcdi_callback(
4050 	usba_pipe_handle_data_t	*ph,
4051 	ehci_trans_wrapper_t	*tw,
4052 	usb_cr_t		completion_reason)
4053 {
4054 	ehci_state_t		*ehcip = ehci_obtain_state(
4055 				    ph->p_usba_device->usb_root_hub_dip);
4056 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
4057 	usb_opaque_t		curr_xfer_reqp;
4058 	uint_t			pipe_state = 0;
4059 
4060 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
4061 	    "ehci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x",
4062 	    ph, tw, completion_reason);
4063 
4064 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4065 
4066 	/* Set the pipe state as per completion reason */
4067 	switch (completion_reason) {
4068 	case USB_CR_OK:
4069 		pipe_state = pp->pp_state;
4070 		break;
4071 	case USB_CR_NO_RESOURCES:
4072 	case USB_CR_NOT_SUPPORTED:
4073 	case USB_CR_PIPE_RESET:
4074 	case USB_CR_STOPPED_POLLING:
4075 		pipe_state = EHCI_PIPE_STATE_IDLE;
4076 		break;
4077 	case USB_CR_PIPE_CLOSING:
4078 		break;
4079 	default:
4080 		/* Set the pipe state to error */
4081 		pipe_state = EHCI_PIPE_STATE_ERROR;
4082 		pp->pp_error = completion_reason;
4083 		break;
4084 
4085 	}
4086 
4087 	pp->pp_state = pipe_state;
4088 
4089 	if (tw && tw->tw_curr_xfer_reqp) {
4090 		curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4091 		tw->tw_curr_xfer_reqp = NULL;
4092 	} else {
4093 		ASSERT(pp->pp_client_periodic_in_reqp != NULL);
4094 
4095 		curr_xfer_reqp = pp->pp_client_periodic_in_reqp;
4096 		pp->pp_client_periodic_in_reqp = NULL;
4097 	}
4098 
4099 	ASSERT(curr_xfer_reqp != NULL);
4100 
4101 	mutex_exit(&ehcip->ehci_int_mutex);
4102 
4103 	usba_hcdi_cb(ph, curr_xfer_reqp, completion_reason);
4104 
4105 	mutex_enter(&ehcip->ehci_int_mutex);
4106 }
4107