1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * EHCI Host Controller Driver (EHCI)
31  *
32  * The EHCI driver is a software driver which interfaces to the Universal
33  * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
34  * the Host Controller is defined by the EHCI Host Controller Interface.
35  *
36  * This module contains the main EHCI driver code which handles all USB
37  * transfers, bandwidth allocations and other general functionalities.
38  */
39 
40 #include <sys/usb/hcd/ehci/ehcid.h>
41 #include <sys/usb/hcd/ehci/ehci_intr.h>
42 #include <sys/usb/hcd/ehci/ehci_util.h>
43 #include <sys/usb/hcd/ehci/ehci_isoch.h>
44 
45 /* Adjustable variables for the size of the pools */
46 extern int ehci_qh_pool_size;
47 extern int ehci_qtd_pool_size;
48 
49 
50 /* Endpoint Descriptor (QH) related functions */
51 ehci_qh_t	*ehci_alloc_qh(
52 				ehci_state_t		*ehcip,
53 				usba_pipe_handle_data_t	*ph,
54 				uint_t			flag);
55 static void	ehci_unpack_endpoint(
56 				ehci_state_t		*ehcip,
57 				usba_pipe_handle_data_t	*ph,
58 				ehci_qh_t		*qh);
59 void		ehci_insert_qh(
60 				ehci_state_t		*ehcip,
61 				usba_pipe_handle_data_t	*ph);
62 static void	ehci_insert_async_qh(
63 				ehci_state_t		*ehcip,
64 				ehci_pipe_private_t	*pp);
65 static void	ehci_insert_intr_qh(
66 				ehci_state_t		*ehcip,
67 				ehci_pipe_private_t	*pp);
68 static void	ehci_modify_qh_status_bit(
69 				ehci_state_t		*ehcip,
70 				ehci_pipe_private_t	*pp,
71 				halt_bit_t		action);
72 static void	ehci_halt_hs_qh(
73 				ehci_state_t		*ehcip,
74 				ehci_pipe_private_t	*pp,
75 				ehci_qh_t		*qh);
76 static void	ehci_halt_fls_ctrl_and_bulk_qh(
77 				ehci_state_t		*ehcip,
78 				ehci_pipe_private_t	*pp,
79 				ehci_qh_t		*qh);
80 static void	ehci_clear_tt_buffer(
81 				ehci_state_t		*ehcip,
82 				usba_pipe_handle_data_t	*ph,
83 				ehci_qh_t		*qh);
84 static void	ehci_halt_fls_intr_qh(
85 				ehci_state_t		*ehcip,
86 				ehci_qh_t		*qh);
87 void		ehci_remove_qh(
88 				ehci_state_t		*ehcip,
89 				ehci_pipe_private_t	*pp,
90 				boolean_t		reclaim);
91 static void	ehci_remove_async_qh(
92 				ehci_state_t		*ehcip,
93 				ehci_pipe_private_t	*pp,
94 				boolean_t		reclaim);
95 static void	ehci_remove_intr_qh(
96 				ehci_state_t		*ehcip,
97 				ehci_pipe_private_t	*pp,
98 				boolean_t		reclaim);
99 static void	ehci_insert_qh_on_reclaim_list(
100 				ehci_state_t		*ehcip,
101 				ehci_pipe_private_t	*pp);
102 void		ehci_deallocate_qh(
103 				ehci_state_t		*ehcip,
104 				ehci_qh_t		*old_qh);
105 uint32_t	ehci_qh_cpu_to_iommu(
106 				ehci_state_t		*ehcip,
107 				ehci_qh_t		*addr);
108 ehci_qh_t	*ehci_qh_iommu_to_cpu(
109 				ehci_state_t		*ehcip,
110 				uintptr_t		addr);
111 
112 /* Transfer Descriptor (QTD) related functions */
113 static int	ehci_initialize_dummy(
114 				ehci_state_t		*ehcip,
115 				ehci_qh_t		*qh);
116 ehci_trans_wrapper_t *ehci_allocate_ctrl_resources(
117 				ehci_state_t		*ehcip,
118 				ehci_pipe_private_t	*pp,
119 				usb_ctrl_req_t		*ctrl_reqp,
120 				usb_flags_t		usb_flags);
121 void		ehci_insert_ctrl_req(
122 				ehci_state_t		*ehcip,
123 				usba_pipe_handle_data_t	*ph,
124 				usb_ctrl_req_t		*ctrl_reqp,
125 				ehci_trans_wrapper_t	*tw,
126 				usb_flags_t		usb_flags);
127 ehci_trans_wrapper_t *ehci_allocate_bulk_resources(
128 				ehci_state_t		*ehcip,
129 				ehci_pipe_private_t	*pp,
130 				usb_bulk_req_t		*bulk_reqp,
131 				usb_flags_t		usb_flags);
132 void		ehci_insert_bulk_req(
133 				ehci_state_t		*ehcip,
134 				usba_pipe_handle_data_t	*ph,
135 				usb_bulk_req_t		*bulk_reqp,
136 				ehci_trans_wrapper_t	*tw,
137 				usb_flags_t		flags);
138 int		ehci_start_periodic_pipe_polling(
139 				ehci_state_t		*ehcip,
140 				usba_pipe_handle_data_t	*ph,
141 				usb_opaque_t		periodic_in_reqp,
142 				usb_flags_t		flags);
143 static int	ehci_start_pipe_polling(
144 				ehci_state_t		*ehcip,
145 				usba_pipe_handle_data_t	*ph,
146 				usb_flags_t		flags);
147 static int	ehci_start_intr_polling(
148 				ehci_state_t		*ehcip,
149 				usba_pipe_handle_data_t	*ph,
150 				usb_flags_t		flags);
151 static void	ehci_set_periodic_pipe_polling(
152 				ehci_state_t		*ehcip,
153 				usba_pipe_handle_data_t	*ph);
154 ehci_trans_wrapper_t *ehci_allocate_intr_resources(
155 				ehci_state_t		*ehcip,
156 				usba_pipe_handle_data_t	*ph,
157 				usb_intr_req_t		*intr_reqp,
158 				usb_flags_t		usb_flags);
159 void		ehci_insert_intr_req(
160 				ehci_state_t		*ehcip,
161 				ehci_pipe_private_t	*pp,
162 				ehci_trans_wrapper_t	*tw,
163 				usb_flags_t		flags);
164 int		ehci_stop_periodic_pipe_polling(
165 				ehci_state_t		*ehcip,
166 				usba_pipe_handle_data_t	*ph,
167 				usb_flags_t		flags);
168 int		ehci_insert_qtd(
169 				ehci_state_t		*ehcip,
170 				uint32_t		qtd_ctrl,
171 				uint32_t		qtd_iommu_cbp,
172 				size_t			qtd_length,
173 				uint32_t		qtd_ctrl_phase,
174 				ehci_pipe_private_t	*pp,
175 				ehci_trans_wrapper_t	*tw);
176 static ehci_qtd_t *ehci_allocate_qtd_from_pool(
177 				ehci_state_t		*ehcip);
178 static void	ehci_fill_in_qtd(
179 				ehci_state_t		*ehcip,
180 				ehci_qtd_t		*qtd,
181 				uint32_t		qtd_ctrl,
182 				uint32_t		qtd_iommu_cbp,
183 				size_t			qtd_length,
184 				uint32_t		qtd_ctrl_phase,
185 				ehci_pipe_private_t	*pp,
186 				ehci_trans_wrapper_t	*tw);
187 static void	ehci_insert_qtd_on_tw(
188 				ehci_state_t		*ehcip,
189 				ehci_trans_wrapper_t	*tw,
190 				ehci_qtd_t		*qtd);
191 static void	ehci_insert_qtd_into_active_qtd_list(
192 				ehci_state_t		*ehcip,
193 				ehci_qtd_t		*curr_qtd);
194 void		ehci_remove_qtd_from_active_qtd_list(
195 				ehci_state_t		*ehcip,
196 				ehci_qtd_t		*curr_qtd);
197 static void	ehci_traverse_qtds(
198 				ehci_state_t		*ehcip,
199 				usba_pipe_handle_data_t	*ph);
200 void		ehci_deallocate_qtd(
201 				ehci_state_t		*ehcip,
202 				ehci_qtd_t		*old_qtd);
203 uint32_t	ehci_qtd_cpu_to_iommu(
204 				ehci_state_t		*ehcip,
205 				ehci_qtd_t		*addr);
206 ehci_qtd_t	*ehci_qtd_iommu_to_cpu(
207 				ehci_state_t		*ehcip,
208 				uintptr_t		addr);
209 
210 /* Transfer Wrapper (TW) functions */
211 static ehci_trans_wrapper_t  *ehci_create_transfer_wrapper(
212 				ehci_state_t		*ehcip,
213 				ehci_pipe_private_t	*pp,
214 				size_t			length,
215 				uint_t			usb_flags);
216 int		ehci_allocate_tds_for_tw(
217 				ehci_state_t		*ehcip,
218 				ehci_pipe_private_t	*pp,
219 				ehci_trans_wrapper_t	*tw,
220 				size_t			qtd_count);
221 static ehci_trans_wrapper_t  *ehci_allocate_tw_resources(
222 				ehci_state_t		*ehcip,
223 				ehci_pipe_private_t	*pp,
224 				size_t			length,
225 				usb_flags_t		usb_flags,
226 				size_t			td_count);
227 static void	ehci_free_tw_td_resources(
228 				ehci_state_t		*ehcip,
229 				ehci_trans_wrapper_t	*tw);
230 static void	ehci_start_xfer_timer(
231 				ehci_state_t		*ehcip,
232 				ehci_pipe_private_t	*pp,
233 				ehci_trans_wrapper_t	*tw);
234 void		ehci_stop_xfer_timer(
235 				ehci_state_t		*ehcip,
236 				ehci_trans_wrapper_t	*tw,
237 				uint_t			flag);
238 static void	ehci_xfer_timeout_handler(void		*arg);
239 static void	ehci_remove_tw_from_timeout_list(
240 				ehci_state_t		*ehcip,
241 				ehci_trans_wrapper_t	*tw);
242 static void	ehci_start_timer(ehci_state_t		*ehcip,
243 				ehci_pipe_private_t	*pp);
244 void		ehci_deallocate_tw(
245 				ehci_state_t		*ehcip,
246 				ehci_pipe_private_t	*pp,
247 				ehci_trans_wrapper_t	*tw);
248 void		ehci_free_dma_resources(
249 				ehci_state_t		*ehcip,
250 				usba_pipe_handle_data_t	*ph);
251 static void	ehci_free_tw(
252 				ehci_state_t		*ehcip,
253 				ehci_pipe_private_t	*pp,
254 				ehci_trans_wrapper_t	*tw);
255 
256 /* Miscellaneous functions */
257 int		ehci_allocate_intr_in_resource(
258 				ehci_state_t		*ehcip,
259 				ehci_pipe_private_t	*pp,
260 				ehci_trans_wrapper_t	*tw,
261 				usb_flags_t		flags);
262 void		ehci_pipe_cleanup(
263 				ehci_state_t		*ehcip,
264 				usba_pipe_handle_data_t	*ph);
265 static void	ehci_wait_for_transfers_completion(
266 				ehci_state_t		*ehcip,
267 				ehci_pipe_private_t	*pp);
268 void		ehci_check_for_transfers_completion(
269 				ehci_state_t		*ehcip,
270 				ehci_pipe_private_t	*pp);
271 static void	ehci_save_data_toggle(
272 				ehci_state_t		*ehcip,
273 				usba_pipe_handle_data_t	*ph);
274 void		ehci_restore_data_toggle(
275 				ehci_state_t		*ehcip,
276 				usba_pipe_handle_data_t	*ph);
277 void		ehci_handle_outstanding_requests(
278 				ehci_state_t		*ehcip,
279 				ehci_pipe_private_t	*pp);
280 void		ehci_deallocate_intr_in_resource(
281 				ehci_state_t		*ehcip,
282 				ehci_pipe_private_t	*pp,
283 				ehci_trans_wrapper_t	*tw);
284 void		ehci_do_client_periodic_in_req_callback(
285 				ehci_state_t		*ehcip,
286 				ehci_pipe_private_t	*pp,
287 				usb_cr_t		completion_reason);
288 void		ehci_hcdi_callback(
289 				usba_pipe_handle_data_t	*ph,
290 				ehci_trans_wrapper_t	*tw,
291 				usb_cr_t		completion_reason);
292 
293 
294 /*
295  * Endpoint Descriptor (QH) manipulations functions
296  */
297 
298 /*
299  * ehci_alloc_qh:
300  *
301  * Allocate an endpoint descriptor (QH)
302  *
303  * NOTE: This function is also called from POLLED MODE.
304  */
305 ehci_qh_t *
306 ehci_alloc_qh(
307 	ehci_state_t		*ehcip,
308 	usba_pipe_handle_data_t	*ph,
309 	uint_t			flag)
310 {
311 	int			i, state;
312 	ehci_qh_t		*qh;
313 
314 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
315 	    "ehci_alloc_qh: ph = 0x%p flag = 0x%x", (void *)ph, flag);
316 
317 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
318 
319 	/*
320 	 * If this is for a ISOC endpoint return null.
321 	 * Isochronous uses ITD put directly onto the PFL.
322 	 */
323 	if (ph) {
324 		if (EHCI_ISOC_ENDPOINT((&ph->p_ep))) {
325 
326 			return (NULL);
327 		}
328 	}
329 
330 	/*
331 	 * The first 63 endpoints in the Endpoint Descriptor (QH)
332 	 * buffer pool are reserved for building interrupt lattice
333 	 * tree. Search for a blank endpoint descriptor in the QH
334 	 * buffer pool.
335 	 */
336 	for (i = EHCI_NUM_STATIC_NODES; i < ehci_qh_pool_size; i ++) {
337 		state = Get_QH(ehcip->ehci_qh_pool_addr[i].qh_state);
338 
339 		if (state == EHCI_QH_FREE) {
340 			break;
341 		}
342 	}
343 
344 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
345 	    "ehci_alloc_qh: Allocated %d", i);
346 
347 	if (i == ehci_qh_pool_size) {
348 		USB_DPRINTF_L2(PRINT_MASK_ALLOC,  ehcip->ehci_log_hdl,
349 		    "ehci_alloc_qh: QH exhausted");
350 
351 		return (NULL);
352 	} else {
353 		qh = &ehcip->ehci_qh_pool_addr[i];
354 
355 		USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
356 		    "ehci_alloc_qh: Allocated address 0x%p", (void *)qh);
357 
358 		/* Check polled mode flag */
359 		if (flag == EHCI_POLLED_MODE_FLAG) {
360 			Set_QH(qh->qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
361 			Set_QH(qh->qh_ctrl, EHCI_QH_CTRL_ED_INACTIVATE);
362 		}
363 
364 		/* Unpack the endpoint descriptor into a control field */
365 		if (ph) {
366 			if ((ehci_initialize_dummy(ehcip,
367 			    qh)) == USB_NO_RESOURCES) {
368 
369 				bzero((void *)qh, sizeof (ehci_qh_t));
370 				Set_QH(qh->qh_state, EHCI_QH_FREE);
371 
372 				return (NULL);
373 			}
374 
375 			ehci_unpack_endpoint(ehcip, ph, qh);
376 
377 			Set_QH(qh->qh_curr_qtd, NULL);
378 			Set_QH(qh->qh_alt_next_qtd,
379 			    EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
380 
381 			/* Change QH's state Active */
382 			Set_QH(qh->qh_state, EHCI_QH_ACTIVE);
383 		} else {
384 			Set_QH(qh->qh_status, EHCI_QH_STS_HALTED);
385 
386 			/* Change QH's state Static */
387 			Set_QH(qh->qh_state, EHCI_QH_STATIC);
388 		}
389 
390 		ehci_print_qh(ehcip, qh);
391 
392 		return (qh);
393 	}
394 }
395 
396 
397 /*
398  * ehci_unpack_endpoint:
399  *
400  * Unpack the information in the pipe handle and create the first byte
401  * of the Host Controller's (HC) Endpoint Descriptor (QH).
402  */
403 static void
404 ehci_unpack_endpoint(
405 	ehci_state_t		*ehcip,
406 	usba_pipe_handle_data_t	*ph,
407 	ehci_qh_t		*qh)
408 {
409 	usb_ep_descr_t		*endpoint = &ph->p_ep;
410 	uint_t			maxpacketsize, addr, xactions;
411 	uint_t			ctrl = 0, status = 0, split_ctrl = 0;
412 	usb_port_status_t	usb_port_status;
413 	usba_device_t		*usba_device = ph->p_usba_device;
414 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
415 
416 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
417 	    "ehci_unpack_endpoint:");
418 
419 	mutex_enter(&usba_device->usb_mutex);
420 	ctrl = usba_device->usb_addr;
421 	usb_port_status = usba_device->usb_port_status;
422 	mutex_exit(&usba_device->usb_mutex);
423 
424 	addr = endpoint->bEndpointAddress;
425 
426 	/* Assign the endpoint's address */
427 	ctrl |= ((addr & USB_EP_NUM_MASK) << EHCI_QH_CTRL_ED_NUMBER_SHIFT);
428 
429 	/* Assign the speed */
430 	switch (usb_port_status) {
431 	case USBA_LOW_SPEED_DEV:
432 		ctrl |= EHCI_QH_CTRL_ED_LOW_SPEED;
433 		break;
434 	case USBA_FULL_SPEED_DEV:
435 		ctrl |= EHCI_QH_CTRL_ED_FULL_SPEED;
436 		break;
437 	case USBA_HIGH_SPEED_DEV:
438 		ctrl |= EHCI_QH_CTRL_ED_HIGH_SPEED;
439 		break;
440 	}
441 
442 	switch (endpoint->bmAttributes & USB_EP_ATTR_MASK) {
443 	case USB_EP_ATTR_CONTROL:
444 		/* Assign data toggle information */
445 		ctrl |= EHCI_QH_CTRL_DATA_TOGGLE;
446 
447 		if (usb_port_status != USBA_HIGH_SPEED_DEV) {
448 			ctrl |= EHCI_QH_CTRL_CONTROL_ED_FLAG;
449 		}
450 		/* FALLTHRU */
451 	case USB_EP_ATTR_BULK:
452 		/* Maximum nak counter */
453 		ctrl |= EHCI_QH_CTRL_MAX_NC;
454 
455 		if (usb_port_status == USBA_HIGH_SPEED_DEV) {
456 			/*
457 			 * Perform ping before executing control
458 			 * and bulk transactions.
459 			 */
460 			status = EHCI_QH_STS_DO_PING;
461 		}
462 		break;
463 	case USB_EP_ATTR_INTR:
464 		/* Set start split mask */
465 		split_ctrl = (pp->pp_smask & EHCI_QH_SPLIT_CTRL_INTR_MASK);
466 
467 		/*
468 		 * Set complete split mask for low/full speed
469 		 * usb devices.
470 		 */
471 		if (usb_port_status != USBA_HIGH_SPEED_DEV) {
472 			split_ctrl |= ((pp->pp_cmask <<
473 			    EHCI_QH_SPLIT_CTRL_COMP_SHIFT) &
474 			    EHCI_QH_SPLIT_CTRL_COMP_MASK);
475 		}
476 		break;
477 	}
478 
479 	/* Get the max transactions per microframe */
480 	xactions = (endpoint->wMaxPacketSize &
481 	    USB_EP_MAX_XACTS_MASK) >>  USB_EP_MAX_XACTS_SHIFT;
482 
483 	switch (xactions) {
484 	case 0:
485 		split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
486 		break;
487 	case 1:
488 		split_ctrl |= EHCI_QH_SPLIT_CTRL_2_XACTS;
489 		break;
490 	case 2:
491 		split_ctrl |= EHCI_QH_SPLIT_CTRL_3_XACTS;
492 		break;
493 	default:
494 		split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
495 		break;
496 	}
497 
498 	/*
499 	 * For low/full speed devices, program high speed hub
500 	 * address and port number.
501 	 */
502 	if (usb_port_status != USBA_HIGH_SPEED_DEV) {
503 		mutex_enter(&usba_device->usb_mutex);
504 		split_ctrl |= ((usba_device->usb_hs_hub_addr
505 		    << EHCI_QH_SPLIT_CTRL_HUB_ADDR_SHIFT) &
506 		    EHCI_QH_SPLIT_CTRL_HUB_ADDR);
507 
508 		split_ctrl |= ((usba_device->usb_hs_hub_port
509 		    << EHCI_QH_SPLIT_CTRL_HUB_PORT_SHIFT) &
510 		    EHCI_QH_SPLIT_CTRL_HUB_PORT);
511 
512 		mutex_exit(&usba_device->usb_mutex);
513 
514 		/* Set start split transaction state */
515 		status = EHCI_QH_STS_DO_START_SPLIT;
516 	}
517 
518 	/* Assign endpoint's maxpacketsize */
519 	maxpacketsize = endpoint->wMaxPacketSize & USB_EP_MAX_PKTSZ_MASK;
520 	maxpacketsize = maxpacketsize << EHCI_QH_CTRL_MAXPKTSZ_SHIFT;
521 	ctrl |= (maxpacketsize & EHCI_QH_CTRL_MAXPKTSZ);
522 
523 	Set_QH(qh->qh_ctrl, ctrl);
524 	Set_QH(qh->qh_split_ctrl, split_ctrl);
525 	Set_QH(qh->qh_status, status);
526 }
527 
528 
529 /*
530  * ehci_insert_qh:
531  *
532  * Add the Endpoint Descriptor (QH) into the Host Controller's
533  * (HC) appropriate endpoint list.
534  */
535 void
536 ehci_insert_qh(
537 	ehci_state_t		*ehcip,
538 	usba_pipe_handle_data_t	*ph)
539 {
540 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
541 
542 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
543 	    "ehci_insert_qh: qh=0x%p", pp->pp_qh);
544 
545 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
546 
547 	switch (ph->p_ep.bmAttributes & USB_EP_ATTR_MASK) {
548 	case USB_EP_ATTR_CONTROL:
549 	case USB_EP_ATTR_BULK:
550 		ehci_insert_async_qh(ehcip, pp);
551 		ehcip->ehci_open_async_count++;
552 		break;
553 	case USB_EP_ATTR_INTR:
554 		ehci_insert_intr_qh(ehcip, pp);
555 		ehcip->ehci_open_periodic_count++;
556 		break;
557 	case USB_EP_ATTR_ISOCH:
558 		/* ISOCH does not use QH, don't do anything but update count */
559 		ehcip->ehci_open_periodic_count++;
560 		break;
561 	}
562 	ehci_toggle_scheduler(ehcip);
563 }
564 
565 
566 /*
567  * ehci_insert_async_qh:
568  *
569  * Insert a control/bulk endpoint into the Host Controller's (HC)
570  * Asynchronous schedule endpoint list.
571  */
572 static void
573 ehci_insert_async_qh(
574 	ehci_state_t		*ehcip,
575 	ehci_pipe_private_t	*pp)
576 {
577 	ehci_qh_t		*qh = pp->pp_qh;
578 	ehci_qh_t		*async_head_qh;
579 	ehci_qh_t		*next_qh;
580 	uintptr_t		qh_addr;
581 
582 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
583 	    "ehci_insert_async_qh:");
584 
585 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
586 
587 	/* Make sure this QH is not already in the list */
588 	ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
589 
590 	qh_addr = ehci_qh_cpu_to_iommu(ehcip, qh);
591 
592 	/* Obtain a ptr to the head of the Async schedule list */
593 	async_head_qh = ehcip->ehci_head_of_async_sched_list;
594 
595 	if (async_head_qh == NULL) {
596 		/* Set this QH to be the "head" of the circular list */
597 		Set_QH(qh->qh_ctrl,
598 		    (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_RECLAIM_HEAD));
599 
600 		/* Set new QH's link and previous pointer to itself */
601 		Set_QH(qh->qh_link_ptr, qh_addr | EHCI_QH_LINK_REF_QH);
602 		Set_QH(qh->qh_prev, qh_addr);
603 
604 		ehcip->ehci_head_of_async_sched_list = qh;
605 
606 		/* Set the head ptr to the new endpoint */
607 		Set_OpReg(ehci_async_list_addr, qh_addr);
608 	} else {
609 		ASSERT(Get_QH(async_head_qh->qh_ctrl) &
610 		    EHCI_QH_CTRL_RECLAIM_HEAD);
611 
612 		/* Ensure this QH's "H" bit is not set */
613 		Set_QH(qh->qh_ctrl,
614 		    (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_RECLAIM_HEAD));
615 
616 		next_qh = ehci_qh_iommu_to_cpu(ehcip,
617 		    Get_QH(async_head_qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
618 
619 		/* Set new QH's link and previous pointers */
620 		Set_QH(qh->qh_link_ptr,
621 		    Get_QH(async_head_qh->qh_link_ptr) | EHCI_QH_LINK_REF_QH);
622 		Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, async_head_qh));
623 
624 		/* Set next QH's prev pointer */
625 		Set_QH(next_qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, qh));
626 
627 		/* Set QH Head's link pointer points to new QH */
628 		Set_QH(async_head_qh->qh_link_ptr,
629 		    qh_addr | EHCI_QH_LINK_REF_QH);
630 	}
631 }
632 
633 
634 /*
635  * ehci_insert_intr_qh:
636  *
637  * Insert a interrupt endpoint into the Host Controller's (HC) interrupt
638  * lattice tree.
639  */
640 static void
641 ehci_insert_intr_qh(
642 	ehci_state_t		*ehcip,
643 	ehci_pipe_private_t	*pp)
644 {
645 	ehci_qh_t		*qh = pp->pp_qh;
646 	ehci_qh_t		*next_lattice_qh, *lattice_qh;
647 	uint_t			hnode;
648 
649 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
650 	    "ehci_insert_intr_qh:");
651 
652 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
653 
654 	/* Make sure this QH is not already in the list */
655 	ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
656 
657 	/*
658 	 * The appropriate high speed node was found
659 	 * during the opening of the pipe.
660 	 */
661 	hnode = pp->pp_pnode;
662 
663 	/* Find the lattice endpoint */
664 	lattice_qh = &ehcip->ehci_qh_pool_addr[hnode];
665 
666 	/* Find the next lattice endpoint */
667 	next_lattice_qh = ehci_qh_iommu_to_cpu(
668 	    ehcip, (Get_QH(lattice_qh->qh_link_ptr) & EHCI_QH_LINK_PTR));
669 
670 	/* Update the previous pointer */
671 	Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, lattice_qh));
672 
673 	/* Check next_lattice_qh value */
674 	if (next_lattice_qh) {
675 		/* Update this qh to point to the next one in the lattice */
676 		Set_QH(qh->qh_link_ptr, Get_QH(lattice_qh->qh_link_ptr));
677 
678 		/* Update the previous pointer of qh->qh_link_ptr */
679 		if (Get_QH(next_lattice_qh->qh_state) != EHCI_QH_STATIC) {
680 			Set_QH(next_lattice_qh->qh_prev,
681 			    ehci_qh_cpu_to_iommu(ehcip, qh));
682 		}
683 	} else {
684 		/* Update qh's link pointer to terminate periodic list */
685 		Set_QH(qh->qh_link_ptr,
686 		    (Get_QH(lattice_qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
687 	}
688 
689 	/* Insert this endpoint into the lattice */
690 	Set_QH(lattice_qh->qh_link_ptr,
691 	    (ehci_qh_cpu_to_iommu(ehcip, qh) | EHCI_QH_LINK_REF_QH));
692 }
693 
694 
695 /*
696  * ehci_modify_qh_status_bit:
697  *
698  * Modify the halt bit on the Host Controller (HC) Endpoint Descriptor (QH).
699  *
700  * If several threads try to halt the same pipe, they will need to wait on
701  * a condition variable.  Only one thread is allowed to halt or unhalt the
702  * pipe at a time.
703  *
704  * Usually after a halt pipe, an unhalt pipe will follow soon after.  There
705  * is an assumption that an Unhalt pipe will never occur without a halt pipe.
706  */
707 static void
708 ehci_modify_qh_status_bit(
709 	ehci_state_t		*ehcip,
710 	ehci_pipe_private_t	*pp,
711 	halt_bit_t		action)
712 {
713 	ehci_qh_t		*qh = pp->pp_qh;
714 	uint_t			smask, eps, split_intr_qh;
715 	uint_t			status;
716 
717 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
718 	    "ehci_modify_qh_status_bit: action=0x%x qh=0x%p",
719 	    action, qh);
720 
721 	ehci_print_qh(ehcip, qh);
722 
723 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
724 
725 	/*
726 	 * If this pipe is in the middle of halting don't allow another
727 	 * thread to come in and modify the same pipe.
728 	 */
729 	while (pp->pp_halt_state & EHCI_HALT_STATE_HALTING) {
730 
731 		cv_wait(&pp->pp_halt_cmpl_cv,
732 			    &ehcip->ehci_int_mutex);
733 	}
734 
735 	/* Sync the QH QTD pool to get up to date information */
736 	Sync_QH_QTD_Pool(ehcip);
737 
738 
739 	if (action == CLEAR_HALT) {
740 		/*
741 		 * If the halt bit is to be cleared, just clear it.
742 		 * there shouldn't be any race condition problems.
743 		 * If the host controller reads the bit before the
744 		 * driver has a chance to set the bit, the bit will
745 		 * be reread on the next frame.
746 		 */
747 		Set_QH(qh->qh_ctrl,
748 		    (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_ED_INACTIVATE));
749 		Set_QH(qh->qh_status,
750 		    Get_QH(qh->qh_status) & ~(EHCI_QH_STS_XACT_STATUS));
751 
752 		goto success;
753 	}
754 
755 	/* Halt the the QH, but first check to see if it is already halted */
756 	status = Get_QH(qh->qh_status);
757 	if (!(status & EHCI_QH_STS_HALTED)) {
758 		/* Indicate that this pipe is in the middle of halting. */
759 		pp->pp_halt_state |= EHCI_HALT_STATE_HALTING;
760 
761 		/*
762 		 * Find out if this is an full/low speed interrupt endpoint.
763 		 * A non-zero Cmask indicates that this QH is an interrupt
764 		 * endpoint.  Check the endpoint speed to see if it is either
765 		 * FULL or LOW .
766 		 */
767 		smask = Get_QH(qh->qh_split_ctrl) &
768 		    EHCI_QH_SPLIT_CTRL_INTR_MASK;
769 		eps = Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_SPEED;
770 		split_intr_qh = ((smask != 0) &&
771 		    (eps != EHCI_QH_CTRL_ED_HIGH_SPEED));
772 
773 		if (eps == EHCI_QH_CTRL_ED_HIGH_SPEED) {
774 			ehci_halt_hs_qh(ehcip, pp, qh);
775 		} else {
776 			if (split_intr_qh) {
777 				ehci_halt_fls_intr_qh(ehcip, qh);
778 			} else {
779 				ehci_halt_fls_ctrl_and_bulk_qh(ehcip, pp, qh);
780 			}
781 		}
782 
783 		/* Indicate that this pipe is not in the middle of halting. */
784 		pp->pp_halt_state &= ~EHCI_HALT_STATE_HALTING;
785 	}
786 
787 	/* Sync the QH QTD pool again to get the most up to date information */
788 	Sync_QH_QTD_Pool(ehcip);
789 
790 	ehci_print_qh(ehcip, qh);
791 
792 	status = Get_QH(qh->qh_status);
793 	if (!(status & EHCI_QH_STS_HALTED)) {
794 		USB_DPRINTF_L1(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
795 		    "ehci_modify_qh_status_bit: Failed to halt qh=0x%p", qh);
796 
797 		ehci_print_qh(ehcip, qh);
798 
799 		/* Set host controller soft state to error */
800 		ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
801 
802 		ASSERT(status & EHCI_QH_STS_HALTED);
803 	}
804 
805 success:
806 	/* Wake up threads waiting for this pipe to be halted. */
807 	cv_signal(&pp->pp_halt_cmpl_cv);
808 }
809 
810 
811 /*
812  * ehci_halt_hs_qh:
813  *
814  * Halts all types of HIGH SPEED QHs.
815  */
816 static void
817 ehci_halt_hs_qh(
818 	ehci_state_t		*ehcip,
819 	ehci_pipe_private_t	*pp,
820 	ehci_qh_t		*qh)
821 {
822 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
823 
824 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
825 	    "ehci_halt_hs_qh:");
826 
827 	/* Remove this qh from the HCD's view, but do not reclaim it */
828 	ehci_remove_qh(ehcip, pp, B_FALSE);
829 
830 	/*
831 	 * Wait for atleast one SOF, just in case the HCD is in the
832 	 * middle accessing this QH.
833 	 */
834 	(void) ehci_wait_for_sof(ehcip);
835 
836 	/* Sync the QH QTD pool to get up to date information */
837 	Sync_QH_QTD_Pool(ehcip);
838 
839 	/* Modify the status bit and halt this QH. */
840 	Set_QH(qh->qh_status,
841 	    ((Get_QH(qh->qh_status) &
842 		~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
843 
844 	/* Insert this QH back into the HCD's view */
845 	ehci_insert_qh(ehcip, ph);
846 }
847 
848 
849 /*
850  * ehci_halt_fls_ctrl_and_bulk_qh:
851  *
852  * Halts FULL/LOW Ctrl and Bulk QHs only.
853  */
854 static void
855 ehci_halt_fls_ctrl_and_bulk_qh(
856 	ehci_state_t		*ehcip,
857 	ehci_pipe_private_t	*pp,
858 	ehci_qh_t		*qh)
859 {
860 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
861 	uint_t			status, split_status, bytes_left;
862 
863 
864 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
865 	    "ehci_halt_fls_ctrl_and_bulk_qh:");
866 
867 	/* Remove this qh from the HCD's view, but do not reclaim it */
868 	ehci_remove_qh(ehcip, pp, B_FALSE);
869 
870 	/*
871 	 * Wait for atleast one SOF, just in case the HCD is in the
872 	 * middle accessing this QH.
873 	 */
874 	(void) ehci_wait_for_sof(ehcip);
875 
876 	/* Sync the QH QTD pool to get up to date information */
877 	Sync_QH_QTD_Pool(ehcip);
878 
879 	/* Modify the status bit and halt this QH. */
880 	Set_QH(qh->qh_status,
881 	    ((Get_QH(qh->qh_status) &
882 		~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
883 
884 	/* Check to see if the QH was in the middle of a transaction */
885 	status = Get_QH(qh->qh_status);
886 	split_status = status & EHCI_QH_STS_SPLIT_XSTATE;
887 	bytes_left = status & EHCI_QH_STS_BYTES_TO_XFER;
888 	if ((split_status == EHCI_QH_STS_DO_COMPLETE_SPLIT) &&
889 	    (bytes_left != 0)) {
890 		/* send ClearTTBuffer to this device's parent 2.0 hub */
891 		ehci_clear_tt_buffer(ehcip, ph, qh);
892 	}
893 
894 	/* Insert this QH back into the HCD's view */
895 	ehci_insert_qh(ehcip, ph);
896 }
897 
898 
899 /*
900  * ehci_clear_tt_buffer
901  *
902  * This function will sent a Clear_TT_Buffer request to the pipe's
903  * parent 2.0 hub.
904  */
905 static void
906 ehci_clear_tt_buffer(
907 	ehci_state_t		*ehcip,
908 	usba_pipe_handle_data_t	*ph,
909 	ehci_qh_t		*qh)
910 {
911 	usba_device_t		*usba_device;
912 	usba_device_t		*hub_usba_device;
913 	usb_pipe_handle_t	hub_def_ph;
914 	usb_ep_descr_t		*eptd;
915 	uchar_t			attributes;
916 	uint16_t		wValue;
917 	usb_ctrl_setup_t	setup;
918 	usb_cr_t		completion_reason;
919 	usb_cb_flags_t		cb_flags;
920 	int			retry;
921 
922 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
923 	    "ehci_clear_tt_buffer: ");
924 
925 	/* Get some information about the current pipe */
926 	usba_device = ph->p_usba_device;
927 	eptd = &ph->p_ep;
928 	attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
929 
930 	/*
931 	 * Create the wIndex for this request (usb spec 11.24.2.3)
932 	 * 3..0		Endpoint Number
933 	 * 10..4	Device Address
934 	 * 12..11	Endpoint Type
935 	 * 14..13	Reserved (must be 0)
936 	 * 15		Direction 1 = IN, 0 = OUT
937 	 */
938 	wValue = 0;
939 	if ((eptd->bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
940 		wValue |= 0x8000;
941 	}
942 	wValue |= attributes << 11;
943 	wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_DEVICE_ADDRESS) << 4;
944 	wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_HIGH_SPEED) >>
945 	    EHCI_QH_CTRL_ED_NUMBER_SHIFT;
946 
947 	mutex_exit(&ehcip->ehci_int_mutex);
948 
949 	/* Manually fill in the request. */
950 	setup.bmRequestType = EHCI_CLEAR_TT_BUFFER_REQTYPE;
951 	setup.bRequest = EHCI_CLEAR_TT_BUFFER_BREQ;
952 	setup.wValue = wValue;
953 	setup.wIndex = 1;
954 	setup.wLength = 0;
955 	setup.attrs = USB_ATTRS_NONE;
956 
957 	/* Get the usba_device of the parent 2.0 hub. */
958 	mutex_enter(&usba_device->usb_mutex);
959 	hub_usba_device = usba_device->usb_hs_hub_usba_dev;
960 	mutex_exit(&usba_device->usb_mutex);
961 
962 	/* Get the default ctrl pipe for the parent 2.0 hub */
963 	mutex_enter(&hub_usba_device->usb_mutex);
964 	hub_def_ph = (usb_pipe_handle_t)&hub_usba_device->usb_ph_list[0];
965 	mutex_exit(&hub_usba_device->usb_mutex);
966 
967 	for (retry = 0; retry < 3; retry++) {
968 
969 		/* sync send the request to the default pipe */
970 		if (usb_pipe_ctrl_xfer_wait(
971 		    hub_def_ph,
972 		    &setup,
973 		    NULL,
974 		    &completion_reason, &cb_flags, 0) == USB_SUCCESS) {
975 
976 			break;
977 		}
978 
979 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
980 		    "ehci_clear_tt_buffer: Failed to clear tt buffer,"
981 		    "retry = %d, cr = %d, cb_flags = 0x%x\n",
982 		    retry, completion_reason, cb_flags);
983 	}
984 
985 	if (retry >= 3) {
986 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
987 		dev_info_t *dip = hub_usba_device->usb_dip;
988 
989 		/*
990 		 * Ask the user to hotplug the 2.0 hub, to make sure that
991 		 * all the buffer is in sync since this command has failed.
992 		 */
993 		USB_DPRINTF_L0(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
994 		    "Error recovery failure: Please hotplug the 2.0 hub at"
995 		    "%s", ddi_pathname(dip, path));
996 
997 		kmem_free(path, MAXPATHLEN);
998 	}
999 
1000 	mutex_enter(&ehcip->ehci_int_mutex);
1001 }
1002 
1003 /*
1004  * ehci_halt_fls_intr_qh:
1005  *
1006  * Halts FULL/LOW speed Intr QHs.
1007  */
1008 static void
1009 ehci_halt_fls_intr_qh(
1010 	ehci_state_t		*ehcip,
1011 	ehci_qh_t		*qh)
1012 {
1013 	usb_frame_number_t	starting_frame;
1014 	usb_frame_number_t	frames_past;
1015 	uint_t			status, i;
1016 
1017 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1018 	    "ehci_halt_fls_intr_qh:");
1019 
1020 	/*
1021 	 * Ask the HC to deactivate the QH in a
1022 	 * full/low periodic QH.
1023 	 */
1024 	Set_QH(qh->qh_ctrl,
1025 	    (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_ED_INACTIVATE));
1026 
1027 	starting_frame = ehci_get_current_frame_number(ehcip);
1028 
1029 	/*
1030 	 * Wait at least EHCI_NUM_INTR_QH_LISTS+2 frame or until
1031 	 * the QH has been halted.
1032 	 */
1033 	Sync_QH_QTD_Pool(ehcip);
1034 	frames_past = 0;
1035 	status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1036 
1037 	while ((frames_past <= (EHCI_NUM_INTR_QH_LISTS + 2)) &&
1038 	    (status != 0)) {
1039 
1040 		(void) ehci_wait_for_sof(ehcip);
1041 
1042 		Sync_QH_QTD_Pool(ehcip);
1043 		status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1044 		frames_past = ehci_get_current_frame_number(ehcip) -
1045 		    starting_frame;
1046 	}
1047 
1048 	/* Modify the status bit and halt this QH. */
1049 	Sync_QH_QTD_Pool(ehcip);
1050 
1051 	status = Get_QH(qh->qh_status);
1052 
1053 	for (i = 0; i < EHCI_NUM_INTR_QH_LISTS; i++) {
1054 		Set_QH(qh->qh_status,
1055 			((Get_QH(qh->qh_status) &
1056 			~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
1057 
1058 		Sync_QH_QTD_Pool(ehcip);
1059 
1060 		(void) ehci_wait_for_sof(ehcip);
1061 		Sync_QH_QTD_Pool(ehcip);
1062 
1063 		if (Get_QH(qh->qh_status) & EHCI_QH_STS_HALTED) {
1064 
1065 			break;
1066 		}
1067 	}
1068 
1069 	Sync_QH_QTD_Pool(ehcip);
1070 
1071 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1072 	    "ehci_halt_fls_intr_qh: qh=0x%p frames past=%d, status=0x%x, 0x%x",
1073 	    qh, ehci_get_current_frame_number(ehcip) - starting_frame,
1074 	    status, Get_QH(qh->qh_status));
1075 }
1076 
1077 
1078 /*
1079  * ehci_remove_qh:
1080  *
1081  * Remove the Endpoint Descriptor (QH) from the Host Controller's appropriate
1082  * endpoint list.
1083  */
1084 void
1085 ehci_remove_qh(
1086 	ehci_state_t		*ehcip,
1087 	ehci_pipe_private_t	*pp,
1088 	boolean_t		reclaim)
1089 {
1090 	uchar_t			attributes;
1091 
1092 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1093 
1094 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1095 	    "ehci_remove_qh: qh=0x%p", pp->pp_qh);
1096 
1097 	attributes = pp->pp_pipe_handle->p_ep.bmAttributes & USB_EP_ATTR_MASK;
1098 
1099 	switch (attributes) {
1100 	case USB_EP_ATTR_CONTROL:
1101 	case USB_EP_ATTR_BULK:
1102 		ehci_remove_async_qh(ehcip, pp, reclaim);
1103 		ehcip->ehci_open_async_count--;
1104 		break;
1105 	case USB_EP_ATTR_INTR:
1106 		ehci_remove_intr_qh(ehcip, pp, reclaim);
1107 		ehcip->ehci_open_periodic_count--;
1108 		break;
1109 	case USB_EP_ATTR_ISOCH:
1110 		/* ISOCH does not use QH, don't do anything but update count */
1111 		ehcip->ehci_open_periodic_count--;
1112 		break;
1113 	}
1114 	ehci_toggle_scheduler(ehcip);
1115 }
1116 
1117 
1118 /*
1119  * ehci_remove_async_qh:
1120  *
1121  * Remove a control/bulk endpoint into the Host Controller's (HC)
1122  * Asynchronous schedule endpoint list.
1123  */
1124 static void
1125 ehci_remove_async_qh(
1126 	ehci_state_t		*ehcip,
1127 	ehci_pipe_private_t	*pp,
1128 	boolean_t		reclaim)
1129 {
1130 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1131 	ehci_qh_t		*prev_qh, *next_qh;
1132 
1133 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1134 	    "ehci_remove_async_qh:");
1135 
1136 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1137 
1138 	prev_qh = ehci_qh_iommu_to_cpu(ehcip,
1139 	    Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR);
1140 	next_qh = ehci_qh_iommu_to_cpu(ehcip,
1141 	    Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1142 
1143 	/* Make sure this QH is in the list */
1144 	ASSERT(prev_qh != NULL);
1145 
1146 	/*
1147 	 * If next QH and current QH are the same, then this is the last
1148 	 * QH on the Asynchronous Schedule list.
1149 	 */
1150 	if (qh == next_qh) {
1151 		ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1152 		/*
1153 		 * Null our pointer to the async sched list, but do not
1154 		 * touch the host controller's list_addr.
1155 		 */
1156 		ehcip->ehci_head_of_async_sched_list = NULL;
1157 		ASSERT(ehcip->ehci_open_async_count == 1);
1158 	} else {
1159 		/* If this QH is the HEAD then find another one to replace it */
1160 		if (ehcip->ehci_head_of_async_sched_list == qh) {
1161 
1162 			ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1163 			ehcip->ehci_head_of_async_sched_list = next_qh;
1164 			Set_QH(next_qh->qh_ctrl,
1165 			    Get_QH(next_qh->qh_ctrl) |
1166 			    EHCI_QH_CTRL_RECLAIM_HEAD);
1167 		}
1168 		Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1169 		Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1170 	}
1171 
1172 	/* qh_prev to indicate it is no longer in the circular list */
1173 	Set_QH(qh->qh_prev, NULL);
1174 
1175 	if (reclaim) {
1176 		ehci_insert_qh_on_reclaim_list(ehcip, pp);
1177 	}
1178 }
1179 
1180 
1181 /*
1182  * ehci_remove_intr_qh:
1183  *
1184  * Set up an interrupt endpoint to be removed from the Host Controller's (HC)
1185  * interrupt lattice tree. The Endpoint Descriptor (QH) will be freed in the
1186  * interrupt handler.
1187  */
1188 static void
1189 ehci_remove_intr_qh(
1190 	ehci_state_t		*ehcip,
1191 	ehci_pipe_private_t	*pp,
1192 	boolean_t		reclaim)
1193 {
1194 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1195 	ehci_qh_t		*prev_qh, *next_qh;
1196 
1197 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1198 	    "ehci_remove_intr_qh:");
1199 
1200 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1201 
1202 	prev_qh = ehci_qh_iommu_to_cpu(ehcip, Get_QH(qh->qh_prev));
1203 	next_qh = ehci_qh_iommu_to_cpu(ehcip,
1204 	    Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1205 
1206 	/* Make sure this QH is in the list */
1207 	ASSERT(prev_qh != NULL);
1208 
1209 	if (next_qh) {
1210 		/* Update previous qh's link pointer */
1211 		Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1212 
1213 		if (Get_QH(next_qh->qh_state) != EHCI_QH_STATIC) {
1214 			/* Set the previous pointer of the next one */
1215 			Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1216 		}
1217 	} else {
1218 		/* Update previous qh's link pointer */
1219 		Set_QH(prev_qh->qh_link_ptr,
1220 		    (Get_QH(qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
1221 	}
1222 
1223 	/* qh_prev to indicate it is no longer in the circular list */
1224 	Set_QH(qh->qh_prev, NULL);
1225 
1226 	if (reclaim) {
1227 		ehci_insert_qh_on_reclaim_list(ehcip, pp);
1228 	}
1229 }
1230 
1231 
1232 /*
1233  * ehci_insert_qh_on_reclaim_list:
1234  *
1235  * Insert Endpoint onto the reclaim list
1236  */
1237 static void
1238 ehci_insert_qh_on_reclaim_list(
1239 	ehci_state_t		*ehcip,
1240 	ehci_pipe_private_t	*pp)
1241 {
1242 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1243 	ehci_qh_t		*next_qh, *prev_qh;
1244 	usb_frame_number_t	frame_number;
1245 
1246 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1247 
1248 	/*
1249 	 * Read current usb frame number and add appropriate number of
1250 	 * usb frames needs to wait before reclaiming current endpoint.
1251 	 */
1252 	frame_number =
1253 	    ehci_get_current_frame_number(ehcip) + MAX_SOF_WAIT_COUNT;
1254 
1255 	/* Store 32-bit ID */
1256 	Set_QH(qh->qh_reclaim_frame,
1257 	    ((uint32_t)(EHCI_GET_ID((void *)(uintptr_t)frame_number))));
1258 
1259 	/* Insert the endpoint onto the reclamation list */
1260 	if (ehcip->ehci_reclaim_list) {
1261 		next_qh = ehcip->ehci_reclaim_list;
1262 
1263 		while (next_qh) {
1264 			prev_qh = next_qh;
1265 			next_qh = ehci_qh_iommu_to_cpu(ehcip,
1266 			    Get_QH(next_qh->qh_reclaim_next));
1267 		}
1268 
1269 		Set_QH(prev_qh->qh_reclaim_next,
1270 		    ehci_qh_cpu_to_iommu(ehcip, qh));
1271 	} else {
1272 		ehcip->ehci_reclaim_list = qh;
1273 	}
1274 
1275 	ASSERT(Get_QH(qh->qh_reclaim_next) == NULL);
1276 }
1277 
1278 
1279 /*
1280  * ehci_deallocate_qh:
1281  *
1282  * Deallocate a Host Controller's (HC) Endpoint Descriptor (QH).
1283  *
1284  * NOTE: This function is also called from POLLED MODE.
1285  */
1286 void
1287 ehci_deallocate_qh(
1288 	ehci_state_t	*ehcip,
1289 	ehci_qh_t	*old_qh)
1290 {
1291 	ehci_qtd_t	*first_dummy_qtd, *second_dummy_qtd;
1292 
1293 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1294 	    "ehci_deallocate_qh:");
1295 
1296 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1297 
1298 	first_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1299 	    (Get_QH(old_qh->qh_next_qtd) & EHCI_QH_NEXT_QTD_PTR));
1300 
1301 	if (first_dummy_qtd) {
1302 		ASSERT(Get_QTD(first_dummy_qtd->qtd_state) == EHCI_QTD_DUMMY);
1303 
1304 		second_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1305 		    Get_QTD(first_dummy_qtd->qtd_next_qtd));
1306 
1307 		if (second_dummy_qtd) {
1308 			ASSERT(Get_QTD(second_dummy_qtd->qtd_state) ==
1309 			    EHCI_QTD_DUMMY);
1310 
1311 			ehci_deallocate_qtd(ehcip, second_dummy_qtd);
1312 		}
1313 
1314 		ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1315 	}
1316 
1317 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1318 	    "ehci_deallocate_qh: Deallocated 0x%p", (void *)old_qh);
1319 
1320 	bzero((void *)old_qh, sizeof (ehci_qh_t));
1321 	Set_QH(old_qh->qh_state, EHCI_QH_FREE);
1322 }
1323 
1324 
1325 /*
1326  * ehci_qh_cpu_to_iommu:
1327  *
1328  * This function converts for the given Endpoint Descriptor (QH) CPU address
1329  * to IO address.
1330  *
1331  * NOTE: This function is also called from POLLED MODE.
1332  */
1333 uint32_t
1334 ehci_qh_cpu_to_iommu(
1335 	ehci_state_t	*ehcip,
1336 	ehci_qh_t	*addr)
1337 {
1338 	uint32_t	qh;
1339 
1340 	qh = (uint32_t)ehcip->ehci_qh_pool_cookie.dmac_address +
1341 	    (uint32_t)((uintptr_t)addr - (uintptr_t)(ehcip->ehci_qh_pool_addr));
1342 
1343 	ASSERT(qh >= ehcip->ehci_qh_pool_cookie.dmac_address);
1344 	ASSERT(qh <= ehcip->ehci_qh_pool_cookie.dmac_address +
1345 	    sizeof (ehci_qh_t) * ehci_qh_pool_size);
1346 
1347 	return (qh);
1348 }
1349 
1350 
1351 /*
1352  * ehci_qh_iommu_to_cpu:
1353  *
1354  * This function converts for the given Endpoint Descriptor (QH) IO address
1355  * to CPU address.
1356  */
1357 ehci_qh_t *
1358 ehci_qh_iommu_to_cpu(
1359 	ehci_state_t	*ehcip,
1360 	uintptr_t	addr)
1361 {
1362 	ehci_qh_t	*qh;
1363 
1364 	if (addr == NULL) {
1365 
1366 		return (NULL);
1367 	}
1368 
1369 	qh = (ehci_qh_t *)((uintptr_t)
1370 	    (addr - ehcip->ehci_qh_pool_cookie.dmac_address) +
1371 	    (uintptr_t)ehcip->ehci_qh_pool_addr);
1372 
1373 	ASSERT(qh >= ehcip->ehci_qh_pool_addr);
1374 	ASSERT((uintptr_t)qh <= (uintptr_t)ehcip->ehci_qh_pool_addr +
1375 	    (uintptr_t)(sizeof (ehci_qh_t) * ehci_qh_pool_size));
1376 
1377 	return (qh);
1378 }
1379 
1380 
1381 /*
1382  * Transfer Descriptor manipulations functions
1383  */
1384 
1385 /*
1386  * ehci_initialize_dummy:
1387  *
1388  * An Endpoint Descriptor (QH) has a  dummy Transfer Descriptor (QTD) on the
1389  * end of its QTD list. Initially, both the head and tail pointers of the QH
1390  * point to the dummy QTD.
1391  */
1392 static int
1393 ehci_initialize_dummy(
1394 	ehci_state_t	*ehcip,
1395 	ehci_qh_t	*qh)
1396 {
1397 	ehci_qtd_t	*first_dummy_qtd, *second_dummy_qtd;
1398 
1399 	/* Allocate first dummy QTD */
1400 	first_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1401 
1402 	if (first_dummy_qtd == NULL) {
1403 		return (USB_NO_RESOURCES);
1404 	}
1405 
1406 	/* Allocate second dummy QTD */
1407 	second_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1408 
1409 	if (second_dummy_qtd == NULL) {
1410 		/* Deallocate first dummy QTD */
1411 		ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1412 
1413 		return (USB_NO_RESOURCES);
1414 	}
1415 
1416 	/* Next QTD pointer of an QH point to this new dummy QTD */
1417 	Set_QH(qh->qh_next_qtd, ehci_qtd_cpu_to_iommu(ehcip,
1418 	    first_dummy_qtd) & EHCI_QH_NEXT_QTD_PTR);
1419 
1420 	/* Set qh's dummy qtd field */
1421 	Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, first_dummy_qtd));
1422 
1423 	/* Set first_dummy's next qtd pointer */
1424 	Set_QTD(first_dummy_qtd->qtd_next_qtd,
1425 	    ehci_qtd_cpu_to_iommu(ehcip, second_dummy_qtd));
1426 
1427 	return (USB_SUCCESS);
1428 }
1429 
1430 /*
1431  * ehci_allocate_ctrl_resources:
1432  *
1433  * Calculates the number of tds necessary for a ctrl transfer, and allocates
1434  * all the resources necessary.
1435  *
1436  * Returns NULL if there is insufficient resources otherwise TW.
1437  */
1438 ehci_trans_wrapper_t *
1439 ehci_allocate_ctrl_resources(
1440 	ehci_state_t		*ehcip,
1441 	ehci_pipe_private_t	*pp,
1442 	usb_ctrl_req_t		*ctrl_reqp,
1443 	usb_flags_t		usb_flags)
1444 {
1445 	size_t			qtd_count = 2;
1446 	ehci_trans_wrapper_t	*tw;
1447 
1448 	/* Add one more td for data phase */
1449 	if (ctrl_reqp->ctrl_wLength) {
1450 		qtd_count += 1;
1451 	}
1452 
1453 	tw = ehci_allocate_tw_resources(ehcip, pp,
1454 	    ctrl_reqp->ctrl_wLength + SETUP_SIZE,
1455 	    usb_flags, qtd_count);
1456 
1457 	return (tw);
1458 }
1459 
1460 /*
1461  * ehci_insert_ctrl_req:
1462  *
1463  * Create a Transfer Descriptor (QTD) and a data buffer for a control endpoint.
1464  */
1465 /* ARGSUSED */
1466 void
1467 ehci_insert_ctrl_req(
1468 	ehci_state_t		*ehcip,
1469 	usba_pipe_handle_data_t	*ph,
1470 	usb_ctrl_req_t		*ctrl_reqp,
1471 	ehci_trans_wrapper_t	*tw,
1472 	usb_flags_t		usb_flags)
1473 {
1474 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1475 	uchar_t			bmRequestType = ctrl_reqp->ctrl_bmRequestType;
1476 	uchar_t			bRequest = ctrl_reqp->ctrl_bRequest;
1477 	uint16_t		wValue = ctrl_reqp->ctrl_wValue;
1478 	uint16_t		wIndex = ctrl_reqp->ctrl_wIndex;
1479 	uint16_t		wLength = ctrl_reqp->ctrl_wLength;
1480 	mblk_t			*data = ctrl_reqp->ctrl_data;
1481 	uint32_t		ctrl = 0;
1482 	uint8_t			setup_packet[8];
1483 
1484 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1485 	    "ehci_insert_ctrl_req:");
1486 
1487 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1488 
1489 	/*
1490 	 * Save current control request pointer and timeout values
1491 	 * in transfer wrapper.
1492 	 */
1493 	tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp;
1494 	tw->tw_timeout = ctrl_reqp->ctrl_timeout ?
1495 	    ctrl_reqp->ctrl_timeout : EHCI_DEFAULT_XFER_TIMEOUT;
1496 
1497 	/*
1498 	 * Initialize the callback and any callback data for when
1499 	 * the qtd completes.
1500 	 */
1501 	tw->tw_handle_qtd = ehci_handle_ctrl_qtd;
1502 	tw->tw_handle_callback_value = NULL;
1503 
1504 	/*
1505 	 * swap the setup bytes where necessary since we specified
1506 	 * NEVERSWAP
1507 	 */
1508 	setup_packet[0] = bmRequestType;
1509 	setup_packet[1] = bRequest;
1510 	setup_packet[2] = wValue;
1511 	setup_packet[3] = wValue >> 8;
1512 	setup_packet[4] = wIndex;
1513 	setup_packet[5] = wIndex >> 8;
1514 	setup_packet[6] = wLength;
1515 	setup_packet[7] = wLength >> 8;
1516 
1517 	bcopy(setup_packet, tw->tw_buf, SETUP_SIZE);
1518 
1519 	Sync_IO_Buffer_for_device(tw->tw_dmahandle, SETUP_SIZE);
1520 
1521 	ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_0 | EHCI_QTD_CTRL_SETUP_PID);
1522 
1523 	/*
1524 	 * The QTD's are placed on the QH one at a time.
1525 	 * Once this QTD is placed on the done list, the
1526 	 * data or status phase QTD will be enqueued.
1527 	 */
1528 	(void) ehci_insert_qtd(ehcip, ctrl,
1529 	    tw->tw_cookie.dmac_address, SETUP_SIZE,
1530 	    EHCI_CTRL_SETUP_PHASE, pp, tw);
1531 
1532 	USB_DPRINTF_L3(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1533 	    "ehci_insert_ctrl_req: pp 0x%p", (void *)pp);
1534 
1535 	/*
1536 	 * If this control transfer has a data phase, record the
1537 	 * direction. If the data phase is an OUT transaction,
1538 	 * copy the data into the buffer of the transfer wrapper.
1539 	 */
1540 	if (wLength != 0) {
1541 		/* There is a data stage.  Find the direction */
1542 		if (bmRequestType & USB_DEV_REQ_DEV_TO_HOST) {
1543 			tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
1544 		} else {
1545 			tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
1546 
1547 			/* Copy the data into the message */
1548 			bcopy(data->b_rptr, tw->tw_buf + SETUP_SIZE,
1549 						wLength);
1550 
1551 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1552 						wLength + SETUP_SIZE);
1553 		}
1554 
1555 		ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 | tw->tw_direction);
1556 
1557 		/*
1558 		 * Create the QTD.  If this is an OUT transaction,
1559 		 * the data is already in the buffer of the TW.
1560 		 */
1561 		(void) ehci_insert_qtd(ehcip, ctrl,
1562 		    tw->tw_cookie.dmac_address + SETUP_SIZE,
1563 		    tw->tw_length - SETUP_SIZE, EHCI_CTRL_DATA_PHASE,
1564 		    pp, tw);
1565 
1566 		/*
1567 		 * The direction of the STATUS QTD depends  on
1568 		 * the direction of the transfer.
1569 		 */
1570 		if (tw->tw_direction == EHCI_QTD_CTRL_IN_PID) {
1571 			ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1572 			    EHCI_QTD_CTRL_OUT_PID |
1573 			    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1574 		} else {
1575 			ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1576 			    EHCI_QTD_CTRL_IN_PID |
1577 			    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1578 		}
1579 	} else {
1580 		/*
1581 		 * There is no data stage,  then initiate
1582 		 * status phase from the host.
1583 		 */
1584 		ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 |
1585 		    EHCI_QTD_CTRL_IN_PID |
1586 		    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1587 	}
1588 
1589 
1590 	(void) ehci_insert_qtd(ehcip, ctrl, NULL, 0,
1591 	    EHCI_CTRL_STATUS_PHASE, pp,  tw);
1592 
1593 	/* Start the timer for this control transfer */
1594 	ehci_start_xfer_timer(ehcip, pp, tw);
1595 }
1596 
1597 
1598 /*
1599  * ehci_allocate_bulk_resources:
1600  *
1601  * Calculates the number of tds necessary for a ctrl transfer, and allocates
1602  * all the resources necessary.
1603  *
1604  * Returns NULL if there is insufficient resources otherwise TW.
1605  */
1606 ehci_trans_wrapper_t *
1607 ehci_allocate_bulk_resources(
1608 	ehci_state_t		*ehcip,
1609 	ehci_pipe_private_t	*pp,
1610 	usb_bulk_req_t		*bulk_reqp,
1611 	usb_flags_t		usb_flags)
1612 {
1613 	size_t			qtd_count = 0;
1614 	ehci_trans_wrapper_t	*tw;
1615 
1616 	/* Check the size of bulk request */
1617 	if (bulk_reqp->bulk_len > EHCI_MAX_BULK_XFER_SIZE) {
1618 
1619 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1620 		    "ehci_allocate_bulk_resources: Bulk request size 0x%x is "
1621 		    "more than 0x%x", bulk_reqp->bulk_len,
1622 		    EHCI_MAX_BULK_XFER_SIZE);
1623 
1624 		return (NULL);
1625 	}
1626 
1627 	/* Get the required bulk packet size */
1628 	qtd_count = bulk_reqp->bulk_len / EHCI_MAX_QTD_XFER_SIZE;
1629 	if (bulk_reqp->bulk_len % EHCI_MAX_QTD_XFER_SIZE) {
1630 		qtd_count += 1;
1631 	}
1632 
1633 	tw = ehci_allocate_tw_resources(ehcip, pp, bulk_reqp->bulk_len,
1634 	    usb_flags, qtd_count);
1635 
1636 	return (tw);
1637 }
1638 
1639 /*
1640  * ehci_insert_bulk_req:
1641  *
1642  * Create a Transfer Descriptor (QTD) and a data buffer for a bulk
1643  * endpoint.
1644  */
1645 /* ARGSUSED */
1646 void
1647 ehci_insert_bulk_req(
1648 	ehci_state_t		*ehcip,
1649 	usba_pipe_handle_data_t	*ph,
1650 	usb_bulk_req_t		*bulk_reqp,
1651 	ehci_trans_wrapper_t	*tw,
1652 	usb_flags_t		flags)
1653 {
1654 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1655 	uint_t			bulk_pkt_size, count;
1656 	size_t			residue = 0, len = 0;
1657 	uint32_t		ctrl = 0;
1658 	int			pipe_dir;
1659 
1660 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1661 	    "ehci_insert_bulk_req: bulk_reqp = 0x%p flags = 0x%x",
1662 	    bulk_reqp, flags);
1663 
1664 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1665 
1666 	/* Get the bulk pipe direction */
1667 	pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
1668 
1669 	/* Get the required bulk packet size */
1670 	bulk_pkt_size = min(bulk_reqp->bulk_len, EHCI_MAX_QTD_XFER_SIZE);
1671 
1672 	residue = tw->tw_length % bulk_pkt_size;
1673 
1674 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1675 	    "ehci_insert_bulk_req: bulk_pkt_size = %d", bulk_pkt_size);
1676 
1677 	/*
1678 	 * Save current bulk request pointer and timeout values
1679 	 * in transfer wrapper.
1680 	 */
1681 	tw->tw_curr_xfer_reqp = (usb_opaque_t)bulk_reqp;
1682 	tw->tw_timeout = bulk_reqp->bulk_timeout;
1683 
1684 	/*
1685 	 * Initialize the callback and any callback
1686 	 * data required when the qtd completes.
1687 	 */
1688 	tw->tw_handle_qtd = ehci_handle_bulk_qtd;
1689 	tw->tw_handle_callback_value = NULL;
1690 
1691 	tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ?
1692 	    EHCI_QTD_CTRL_OUT_PID : EHCI_QTD_CTRL_IN_PID;
1693 
1694 	if (tw->tw_direction == EHCI_QTD_CTRL_OUT_PID) {
1695 
1696 		ASSERT(bulk_reqp->bulk_data != NULL);
1697 
1698 		bcopy(bulk_reqp->bulk_data->b_rptr, tw->tw_buf,
1699 			bulk_reqp->bulk_len);
1700 
1701 		Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1702 			bulk_reqp->bulk_len);
1703 	}
1704 
1705 	ctrl = tw->tw_direction;
1706 
1707 	/* Insert all the bulk QTDs */
1708 	for (count = 0; count < tw->tw_num_qtds; count++) {
1709 
1710 		/* Check for last qtd */
1711 		if (count == (tw->tw_num_qtds - 1)) {
1712 
1713 			ctrl |= EHCI_QTD_CTRL_INTR_ON_COMPLETE;
1714 
1715 			/* Check for inserting residue data */
1716 			if (residue) {
1717 				bulk_pkt_size = residue;
1718 			}
1719 		}
1720 
1721 		/* Insert the QTD onto the endpoint */
1722 		(void) ehci_insert_qtd(ehcip, ctrl,
1723 		    tw->tw_cookie.dmac_address + len,
1724 		    bulk_pkt_size, 0, pp, tw);
1725 
1726 		len = len + bulk_pkt_size;
1727 	}
1728 
1729 	/* Start the timer for this bulk transfer */
1730 	ehci_start_xfer_timer(ehcip, pp, tw);
1731 }
1732 
1733 
1734 /*
1735  * ehci_start_periodic_pipe_polling:
1736  *
1737  * NOTE: This function is also called from POLLED MODE.
1738  */
1739 int
1740 ehci_start_periodic_pipe_polling(
1741 	ehci_state_t		*ehcip,
1742 	usba_pipe_handle_data_t	*ph,
1743 	usb_opaque_t		periodic_in_reqp,
1744 	usb_flags_t		flags)
1745 {
1746 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1747 	usb_ep_descr_t		*eptd = &ph->p_ep;
1748 	int			error = USB_SUCCESS;
1749 
1750 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
1751 	    "ehci_start_periodic_pipe_polling: ep%d",
1752 	    ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK);
1753 
1754 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1755 
1756 	/*
1757 	 * Check and handle start polling on root hub interrupt pipe.
1758 	 */
1759 	if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
1760 	    ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
1761 	    USB_EP_ATTR_INTR)) {
1762 
1763 		error = ehci_handle_root_hub_pipe_start_intr_polling(ph,
1764 		    (usb_intr_req_t *)periodic_in_reqp, flags);
1765 
1766 		return (error);
1767 	}
1768 
1769 	switch (pp->pp_state) {
1770 	case EHCI_PIPE_STATE_IDLE:
1771 		/* Save the Original client's Periodic IN request */
1772 		pp->pp_client_periodic_in_reqp = periodic_in_reqp;
1773 
1774 		/*
1775 		 * This pipe is uninitialized or if a valid QTD is
1776 		 * not found then insert a QTD on the interrupt IN
1777 		 * endpoint.
1778 		 */
1779 		error = ehci_start_pipe_polling(ehcip, ph, flags);
1780 
1781 		if (error != USB_SUCCESS) {
1782 			USB_DPRINTF_L2(PRINT_MASK_INTR,
1783 			    ehcip->ehci_log_hdl,
1784 			    "ehci_start_periodic_pipe_polling: "
1785 			    "Start polling failed");
1786 
1787 			pp->pp_client_periodic_in_reqp = NULL;
1788 
1789 			return (error);
1790 		}
1791 
1792 		USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
1793 		    "ehci_start_periodic_pipe_polling: PP = 0x%p", pp);
1794 
1795 #ifdef DEBUG
1796 		switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1797 		case USB_EP_ATTR_INTR:
1798 			ASSERT((pp->pp_tw_head != NULL) &&
1799 			    (pp->pp_tw_tail != NULL));
1800 			break;
1801 		case USB_EP_ATTR_ISOCH:
1802 			ASSERT((pp->pp_itw_head != NULL) &&
1803 			    (pp->pp_itw_tail != NULL));
1804 			break;
1805 		}
1806 #endif
1807 
1808 		break;
1809 	case EHCI_PIPE_STATE_ACTIVE:
1810 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1811 		    ehcip->ehci_log_hdl,
1812 		    "ehci_start_periodic_pipe_polling: "
1813 		    "Polling is already in progress");
1814 
1815 		error = USB_FAILURE;
1816 		break;
1817 	case EHCI_PIPE_STATE_ERROR:
1818 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1819 		    ehcip->ehci_log_hdl,
1820 		    "ehci_start_periodic_pipe_polling: "
1821 		    "Pipe is halted and perform reset"
1822 		    "before restart polling");
1823 
1824 		error = USB_FAILURE;
1825 		break;
1826 	default:
1827 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1828 		    ehcip->ehci_log_hdl,
1829 		    "ehci_start_periodic_pipe_polling: "
1830 		    "Undefined state");
1831 
1832 		error = USB_FAILURE;
1833 		break;
1834 	}
1835 
1836 	return (error);
1837 }
1838 
1839 
1840 /*
1841  * ehci_start_pipe_polling:
1842  *
1843  * Insert the number of periodic requests corresponding to polling
1844  * interval as calculated during pipe open.
1845  */
1846 static int
1847 ehci_start_pipe_polling(
1848 	ehci_state_t		*ehcip,
1849 	usba_pipe_handle_data_t	*ph,
1850 	usb_flags_t		flags)
1851 {
1852 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1853 	usb_ep_descr_t		*eptd = &ph->p_ep;
1854 	int			error = USB_FAILURE;
1855 
1856 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1857 	    "ehci_start_pipe_polling:");
1858 
1859 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1860 
1861 	/*
1862 	 * For the start polling, pp_max_periodic_req_cnt will be zero
1863 	 * and for the restart polling request, it will be non zero.
1864 	 *
1865 	 * In case of start polling request, find out number of requests
1866 	 * required for the Interrupt IN endpoints corresponding to the
1867 	 * endpoint polling interval. For Isochronous IN endpoints, it is
1868 	 * always fixed since its polling interval will be one ms.
1869 	 */
1870 	if (pp->pp_max_periodic_req_cnt == 0) {
1871 
1872 		ehci_set_periodic_pipe_polling(ehcip, ph);
1873 	}
1874 
1875 	ASSERT(pp->pp_max_periodic_req_cnt != 0);
1876 
1877 	switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1878 	case USB_EP_ATTR_INTR:
1879 		error = ehci_start_intr_polling(ehcip, ph, flags);
1880 		break;
1881 	case USB_EP_ATTR_ISOCH:
1882 		error = ehci_start_isoc_polling(ehcip, ph, flags);
1883 		break;
1884 	}
1885 
1886 	return (error);
1887 }
1888 
1889 static int
1890 ehci_start_intr_polling(
1891 	ehci_state_t		*ehcip,
1892 	usba_pipe_handle_data_t	*ph,
1893 	usb_flags_t		flags)
1894 {
1895 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1896 	ehci_trans_wrapper_t	*tw_list, *tw;
1897 	int			i, total_tws;
1898 	int			error = USB_SUCCESS;
1899 
1900 	/* Allocate all the necessary resources for the IN transfer */
1901 	tw_list = NULL;
1902 	total_tws = pp->pp_max_periodic_req_cnt - pp->pp_cur_periodic_req_cnt;
1903 	for (i = 0; i < total_tws; i += 1) {
1904 		tw = ehci_allocate_intr_resources(ehcip, ph, NULL, flags);
1905 		if (tw == NULL) {
1906 			error = USB_NO_RESOURCES;
1907 			/* There are not enough resources, deallocate the TWs */
1908 			tw = tw_list;
1909 			while (tw != NULL) {
1910 				tw_list = tw->tw_next;
1911 				ehci_deallocate_intr_in_resource(
1912 					ehcip, pp, tw);
1913 				ehci_deallocate_tw(ehcip, pp, tw);
1914 				tw = tw_list;
1915 			}
1916 
1917 			return (error);
1918 		} else {
1919 			if (tw_list == NULL) {
1920 				tw_list = tw;
1921 			}
1922 		}
1923 	}
1924 
1925 	while (pp->pp_cur_periodic_req_cnt < pp->pp_max_periodic_req_cnt) {
1926 
1927 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1928 		    "ehci_start_pipe_polling: max = %d curr = %d tw = %p:",
1929 		    pp->pp_max_periodic_req_cnt, pp->pp_cur_periodic_req_cnt,
1930 		    tw_list);
1931 
1932 		tw = tw_list;
1933 		tw_list = tw->tw_next;
1934 
1935 		ehci_insert_intr_req(ehcip, pp, tw, flags);
1936 
1937 		pp->pp_cur_periodic_req_cnt++;
1938 	}
1939 
1940 	return (error);
1941 }
1942 
1943 
1944 /*
1945  * ehci_set_periodic_pipe_polling:
1946  *
1947  * Calculate the number of periodic requests needed corresponding to the
1948  * interrupt IN endpoints polling interval. Table below gives the number
1949  * of periodic requests needed for the interrupt IN endpoints  according
1950  * to endpoint polling interval.
1951  *
1952  * Polling interval		Number of periodic requests
1953  *
1954  * 1ms				4
1955  * 2ms				2
1956  * 4ms to 32ms			1
1957  */
1958 static void
1959 ehci_set_periodic_pipe_polling(
1960 	ehci_state_t		*ehcip,
1961 	usba_pipe_handle_data_t	*ph)
1962 {
1963 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1964 	usb_ep_descr_t		*endpoint = &ph->p_ep;
1965 	uchar_t			ep_attr = endpoint->bmAttributes;
1966 	uint_t			interval;
1967 
1968 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1969 	    "ehci_set_periodic_pipe_polling:");
1970 
1971 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1972 
1973 	pp->pp_cur_periodic_req_cnt = 0;
1974 
1975 	/*
1976 	 * Check usb flag whether USB_FLAGS_ONE_TIME_POLL flag is
1977 	 * set and if so, set pp->pp_max_periodic_req_cnt to one.
1978 	 */
1979 	if (((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) &&
1980 	    (pp->pp_client_periodic_in_reqp)) {
1981 		usb_intr_req_t *intr_reqp = (usb_intr_req_t *)
1982 					pp->pp_client_periodic_in_reqp;
1983 
1984 		if (intr_reqp->intr_attributes &
1985 		    USB_ATTRS_ONE_XFER) {
1986 
1987 			pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
1988 
1989 			return;
1990 		}
1991 	}
1992 
1993 	mutex_enter(&ph->p_usba_device->usb_mutex);
1994 
1995 	/*
1996 	 * The ehci_adjust_polling_interval function will not fail
1997 	 * at this instance since bandwidth allocation is already
1998 	 * done. Here we are getting only the periodic interval.
1999 	 */
2000 	interval = ehci_adjust_polling_interval(ehcip, endpoint,
2001 		ph->p_usba_device->usb_port_status);
2002 
2003 	mutex_exit(&ph->p_usba_device->usb_mutex);
2004 
2005 	switch (interval) {
2006 	case EHCI_INTR_1MS_POLL:
2007 		pp->pp_max_periodic_req_cnt = EHCI_INTR_1MS_REQS;
2008 		break;
2009 	case EHCI_INTR_2MS_POLL:
2010 		pp->pp_max_periodic_req_cnt = EHCI_INTR_2MS_REQS;
2011 		break;
2012 	default:
2013 		pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2014 		break;
2015 	}
2016 
2017 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2018 	    "ehci_set_periodic_pipe_polling: Max periodic requests = %d",
2019 	    pp->pp_max_periodic_req_cnt);
2020 }
2021 
2022 /*
2023  * ehci_allocate_intr_resources:
2024  *
2025  * Calculates the number of tds necessary for a intr transfer, and allocates
2026  * all the necessary resources.
2027  *
2028  * Returns NULL if there is insufficient resources otherwise TW.
2029  */
2030 ehci_trans_wrapper_t *
2031 ehci_allocate_intr_resources(
2032 	ehci_state_t		*ehcip,
2033 	usba_pipe_handle_data_t	*ph,
2034 	usb_intr_req_t		*intr_reqp,
2035 	usb_flags_t		flags)
2036 {
2037 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2038 	int			pipe_dir;
2039 	size_t			qtd_count = 1;
2040 	size_t			tw_length;
2041 	ehci_trans_wrapper_t	*tw;
2042 
2043 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2044 	    "ehci_allocate_intr_resources:");
2045 
2046 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2047 
2048 	pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
2049 
2050 	/* Get the length of interrupt transfer & alloc data */
2051 	if (intr_reqp) {
2052 		tw_length = intr_reqp->intr_len;
2053 	} else {
2054 		ASSERT(pipe_dir == USB_EP_DIR_IN);
2055 		tw_length = (pp->pp_client_periodic_in_reqp) ?
2056 		    (((usb_intr_req_t *)pp->
2057 		    pp_client_periodic_in_reqp)->intr_len) :
2058 		    ph->p_ep.wMaxPacketSize;
2059 	}
2060 
2061 	/* Check the size of interrupt request */
2062 	if (tw_length > EHCI_MAX_QTD_XFER_SIZE) {
2063 
2064 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2065 		    "ehci_allocate_intr_resources: Intr request size 0x%lx is "
2066 		    "more than 0x%x", tw_length, EHCI_MAX_QTD_XFER_SIZE);
2067 
2068 		return (NULL);
2069 	}
2070 
2071 	if ((tw = ehci_allocate_tw_resources(ehcip, pp, tw_length, flags,
2072 	    qtd_count)) == NULL) {
2073 
2074 		return (NULL);
2075 	}
2076 
2077 	if (pipe_dir == USB_EP_DIR_IN) {
2078 		if (ehci_allocate_intr_in_resource(ehcip, pp, tw, flags) !=
2079 		    USB_SUCCESS) {
2080 			ehci_deallocate_tw(ehcip, pp, tw);
2081 		}
2082 		tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
2083 	} else {
2084 		ASSERT(intr_reqp->intr_data != NULL);
2085 
2086 		/* Copy the data into the buffer */
2087 		bcopy(intr_reqp->intr_data->b_rptr, tw->tw_buf,
2088 		    intr_reqp->intr_len);
2089 
2090 		Sync_IO_Buffer_for_device(tw->tw_dmahandle,
2091 		    intr_reqp->intr_len);
2092 
2093 		tw->tw_curr_xfer_reqp = (usb_opaque_t)intr_reqp;
2094 		tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
2095 	}
2096 
2097 	if (intr_reqp) {
2098 		tw->tw_timeout = intr_reqp->intr_timeout;
2099 	}
2100 
2101 	/*
2102 	 * Initialize the callback and any callback
2103 	 * data required when the qtd completes.
2104 	 */
2105 	tw->tw_handle_qtd = ehci_handle_intr_qtd;
2106 	tw->tw_handle_callback_value = NULL;
2107 
2108 	return (tw);
2109 }
2110 
2111 
2112 /*
2113  * ehci_insert_intr_req:
2114  *
2115  * Insert an Interrupt request into the Host Controller's periodic list.
2116  */
2117 /* ARGSUSED */
2118 void
2119 ehci_insert_intr_req(
2120 	ehci_state_t		*ehcip,
2121 	ehci_pipe_private_t	*pp,
2122 	ehci_trans_wrapper_t	*tw,
2123 	usb_flags_t		flags)
2124 {
2125 	uint_t			ctrl = 0;
2126 
2127 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2128 
2129 	ASSERT(tw->tw_curr_xfer_reqp != NULL);
2130 
2131 	ctrl = (tw->tw_direction | EHCI_QTD_CTRL_INTR_ON_COMPLETE);
2132 
2133 	/* Insert another interrupt QTD */
2134 	(void) ehci_insert_qtd(ehcip, ctrl,
2135 	    tw->tw_cookie.dmac_address, tw->tw_length, 0, pp, tw);
2136 
2137 	/* Start the timer for this Interrupt transfer */
2138 	ehci_start_xfer_timer(ehcip, pp, tw);
2139 }
2140 
2141 
2142 /*
2143  * ehci_stop_periodic_pipe_polling:
2144  */
2145 /* ARGSUSED */
2146 int
2147 ehci_stop_periodic_pipe_polling(
2148 	ehci_state_t		*ehcip,
2149 	usba_pipe_handle_data_t	*ph,
2150 	usb_flags_t		flags)
2151 {
2152 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2153 	usb_ep_descr_t		*eptd = &ph->p_ep;
2154 
2155 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2156 	    "ehci_stop_periodic_pipe_polling: Flags = 0x%x", flags);
2157 
2158 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2159 
2160 	/*
2161 	 * Check and handle stop polling on root hub interrupt pipe.
2162 	 */
2163 	if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
2164 	    ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
2165 	    USB_EP_ATTR_INTR)) {
2166 
2167 		ehci_handle_root_hub_pipe_stop_intr_polling(ph, flags);
2168 
2169 		return (USB_SUCCESS);
2170 	}
2171 
2172 	if (pp->pp_state != EHCI_PIPE_STATE_ACTIVE) {
2173 
2174 		USB_DPRINTF_L2(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2175 		    "ehci_stop_periodic_pipe_polling: "
2176 		    "Polling already stopped");
2177 
2178 		return (USB_SUCCESS);
2179 	}
2180 
2181 	/* Set pipe state to pipe stop polling */
2182 	pp->pp_state = EHCI_PIPE_STATE_STOP_POLLING;
2183 
2184 	ehci_pipe_cleanup(ehcip, ph);
2185 
2186 	return (USB_SUCCESS);
2187 }
2188 
2189 
2190 /*
2191  * ehci_insert_qtd:
2192  *
2193  * Insert a Transfer Descriptor (QTD) on an Endpoint Descriptor (QH).
2194  * Always returns USB_SUCCESS for now.	Once Isoch has been implemented,
2195  * it may return USB_FAILURE.
2196  */
2197 int
2198 ehci_insert_qtd(
2199 	ehci_state_t		*ehcip,
2200 	uint32_t		qtd_ctrl,
2201 	uint32_t		qtd_iommu_cbp,
2202 	size_t			qtd_length,
2203 	uint32_t		qtd_ctrl_phase,
2204 	ehci_pipe_private_t	*pp,
2205 	ehci_trans_wrapper_t	*tw)
2206 {
2207 	ehci_qtd_t		*curr_dummy_qtd, *next_dummy_qtd;
2208 	ehci_qtd_t		*new_dummy_qtd;
2209 	ehci_qh_t		*qh = pp->pp_qh;
2210 	int			error = USB_SUCCESS;
2211 
2212 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2213 
2214 	/* Allocate new dummy QTD */
2215 	new_dummy_qtd = tw->tw_qtd_free_list;
2216 
2217 	ASSERT(new_dummy_qtd != NULL);
2218 	tw->tw_qtd_free_list = ehci_qtd_iommu_to_cpu(ehcip,
2219 	    Get_QTD(new_dummy_qtd->qtd_tw_next_qtd));
2220 	Set_QTD(new_dummy_qtd->qtd_tw_next_qtd, NULL);
2221 
2222 	/* Get the current and next dummy QTDs */
2223 	curr_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2224 	    Get_QH(qh->qh_dummy_qtd));
2225 	next_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2226 	    Get_QTD(curr_dummy_qtd->qtd_next_qtd));
2227 
2228 	/* Update QH's dummy qtd field */
2229 	Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, next_dummy_qtd));
2230 
2231 	/* Update next dummy's next qtd pointer */
2232 	Set_QTD(next_dummy_qtd->qtd_next_qtd,
2233 	    ehci_qtd_cpu_to_iommu(ehcip, new_dummy_qtd));
2234 
2235 	/*
2236 	 * Fill in the current dummy qtd and
2237 	 * add the new dummy to the end.
2238 	 */
2239 	ehci_fill_in_qtd(ehcip, curr_dummy_qtd, qtd_ctrl,
2240 	    qtd_iommu_cbp, qtd_length, qtd_ctrl_phase, pp, tw);
2241 
2242 	/* Insert this qtd onto the tw */
2243 	ehci_insert_qtd_on_tw(ehcip, tw, curr_dummy_qtd);
2244 
2245 	/*
2246 	 * Insert this qtd onto active qtd list.
2247 	 * Don't insert polled mode qtd here.
2248 	 */
2249 	if (pp->pp_flag != EHCI_POLLED_MODE_FLAG) {
2250 		/* Insert this qtd onto active qtd list */
2251 		ehci_insert_qtd_into_active_qtd_list(ehcip, curr_dummy_qtd);
2252 	}
2253 
2254 	/* Print qh and qtd */
2255 	ehci_print_qh(ehcip, qh);
2256 	ehci_print_qtd(ehcip, curr_dummy_qtd);
2257 
2258 	return (error);
2259 }
2260 
2261 
2262 /*
2263  * ehci_allocate_qtd_from_pool:
2264  *
2265  * Allocate a Transfer Descriptor (QTD) from the QTD buffer pool.
2266  */
2267 static ehci_qtd_t *
2268 ehci_allocate_qtd_from_pool(ehci_state_t	*ehcip)
2269 {
2270 	int		i, ctrl;
2271 	ehci_qtd_t	*qtd;
2272 
2273 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2274 
2275 	/*
2276 	 * Search for a blank Transfer Descriptor (QTD)
2277 	 * in the QTD buffer pool.
2278 	 */
2279 	for (i = 0; i < ehci_qtd_pool_size; i ++) {
2280 		ctrl = Get_QTD(ehcip->ehci_qtd_pool_addr[i].qtd_state);
2281 		if (ctrl == EHCI_QTD_FREE) {
2282 			break;
2283 		}
2284 	}
2285 
2286 	if (i >= ehci_qtd_pool_size) {
2287 		USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2288 		    "ehci_allocate_qtd_from_pool: QTD exhausted");
2289 
2290 		return (NULL);
2291 	}
2292 
2293 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2294 	    "ehci_allocate_qtd_from_pool: Allocated %d", i);
2295 
2296 	/* Create a new dummy for the end of the QTD list */
2297 	qtd = &ehcip->ehci_qtd_pool_addr[i];
2298 
2299 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2300 	    "ehci_allocate_qtd_from_pool: qtd 0x%p", (void *)qtd);
2301 
2302 	/* Mark the newly allocated QTD as a dummy */
2303 	Set_QTD(qtd->qtd_state, EHCI_QTD_DUMMY);
2304 
2305 	/* Mark the status of this new QTD to halted state */
2306 	Set_QTD(qtd->qtd_ctrl, EHCI_QTD_CTRL_HALTED_XACT);
2307 
2308 	/* Disable dummy QTD's next and alternate next pointers */
2309 	Set_QTD(qtd->qtd_next_qtd, EHCI_QTD_NEXT_QTD_PTR_VALID);
2310 	Set_QTD(qtd->qtd_alt_next_qtd, EHCI_QTD_ALT_NEXT_QTD_PTR_VALID);
2311 
2312 	return (qtd);
2313 }
2314 
2315 
2316 /*
2317  * ehci_fill_in_qtd:
2318  *
2319  * Fill in the fields of a Transfer Descriptor (QTD).
2320  */
2321 /*ARGSUSED*/
2322 static void
2323 ehci_fill_in_qtd(
2324 	ehci_state_t		*ehcip,
2325 	ehci_qtd_t		*qtd,
2326 	uint32_t		qtd_ctrl,
2327 	uint32_t		qtd_iommu_cbp,
2328 	size_t			qtd_length,
2329 	uint32_t		qtd_ctrl_phase,
2330 	ehci_pipe_private_t	*pp,
2331 	ehci_trans_wrapper_t	*tw)
2332 {
2333 	uint32_t		buf_addr = qtd_iommu_cbp;
2334 	size_t			buf_len = qtd_length;
2335 	uint32_t		ctrl = qtd_ctrl;
2336 	uint_t			i = 0;
2337 
2338 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2339 	    "ehci_fill_in_qtd: qtd 0x%p ctrl 0x%x buf 0x%x "
2340 	    "len 0x%lx", qtd, qtd_ctrl, qtd_iommu_cbp, qtd_length);
2341 
2342 	/* Assert that the qtd to be filled in is a dummy */
2343 	ASSERT(Get_QTD(qtd->qtd_state) == EHCI_QTD_DUMMY);
2344 
2345 	/* Change QTD's state Active */
2346 	Set_QTD(qtd->qtd_state, EHCI_QTD_ACTIVE);
2347 
2348 	/* Set the total length data transfer */
2349 	ctrl |= (((qtd_length << EHCI_QTD_CTRL_BYTES_TO_XFER_SHIFT)
2350 	    & EHCI_QTD_CTRL_BYTES_TO_XFER) | EHCI_QTD_CTRL_MAX_ERR_COUNTS);
2351 
2352 	/*
2353 	 * Save the starting buffer address used and
2354 	 * length of data that will be transfered in
2355 	 * the current QTD.
2356 	 */
2357 	Set_QTD(qtd->qtd_xfer_addr, buf_addr);
2358 	Set_QTD(qtd->qtd_xfer_len, buf_len);
2359 
2360 	while (buf_len) {
2361 		/* Update the beginning of the buffer */
2362 		Set_QTD(qtd->qtd_buf[i], buf_addr);
2363 
2364 		if (buf_len <= EHCI_MAX_QTD_BUF_SIZE) {
2365 			break;
2366 		} else {
2367 			buf_len -= EHCI_MAX_QTD_BUF_SIZE;
2368 			buf_addr += EHCI_MAX_QTD_BUF_SIZE;
2369 		}
2370 
2371 		i++;
2372 	}
2373 
2374 	/*
2375 	 * Setup the alternate next qTD pointer if appropriate.  The alternate
2376 	 * qtd is currently pointing to a QTD that is not yet linked, but will
2377 	 * be in the very near future.	If a short_xfer occurs in this
2378 	 * situation , the HC will automatically skip this QH.	Eventually
2379 	 * everything will be placed and the alternate_qtd will be valid QTD.
2380 	 * For more information on alternate qtds look at section 3.5.2 in the
2381 	 * EHCI spec.
2382 	 */
2383 	if (tw->tw_alt_qtd != NULL) {
2384 		Set_QTD(qtd->qtd_alt_next_qtd,
2385 		    (ehci_qtd_cpu_to_iommu(ehcip, tw->tw_alt_qtd) &
2386 		    EHCI_QTD_ALT_NEXT_QTD_PTR));
2387 	}
2388 
2389 	/*
2390 	 * For control, bulk and interrupt QTD, now
2391 	 * enable current QTD by setting active bit.
2392 	 */
2393 	Set_QTD(qtd->qtd_ctrl, (ctrl | EHCI_QTD_CTRL_ACTIVE_XACT));
2394 
2395 	/*
2396 	 * For Control Xfer, qtd_ctrl_phase is a valid filed.
2397 	 */
2398 	if (qtd_ctrl_phase) {
2399 		Set_QTD(qtd->qtd_ctrl_phase, qtd_ctrl_phase);
2400 	}
2401 
2402 	/* Set the transfer wrapper */
2403 	ASSERT(tw != NULL);
2404 	ASSERT(tw->tw_id != NULL);
2405 
2406 	Set_QTD(qtd->qtd_trans_wrapper, (uint32_t)tw->tw_id);
2407 }
2408 
2409 
2410 /*
2411  * ehci_insert_qtd_on_tw:
2412  *
2413  * The transfer wrapper keeps a list of all Transfer Descriptors (QTD) that
2414  * are allocated for this transfer. Insert a QTD  onto this list. The  list
2415  * of QTD's does not include the dummy QTD that is at the end of the list of
2416  * QTD's for the endpoint.
2417  */
2418 static void
2419 ehci_insert_qtd_on_tw(
2420 	ehci_state_t		*ehcip,
2421 	ehci_trans_wrapper_t	*tw,
2422 	ehci_qtd_t		*qtd)
2423 {
2424 	/*
2425 	 * Set the next pointer to NULL because
2426 	 * this is the last QTD on list.
2427 	 */
2428 	Set_QTD(qtd->qtd_tw_next_qtd, NULL);
2429 
2430 	if (tw->tw_qtd_head == NULL) {
2431 		ASSERT(tw->tw_qtd_tail == NULL);
2432 		tw->tw_qtd_head = qtd;
2433 		tw->tw_qtd_tail = qtd;
2434 	} else {
2435 		ehci_qtd_t *dummy = (ehci_qtd_t *)tw->tw_qtd_tail;
2436 
2437 		ASSERT(dummy != NULL);
2438 		ASSERT(dummy != qtd);
2439 		ASSERT(Get_QTD(qtd->qtd_state) != EHCI_QTD_DUMMY);
2440 
2441 		/* Add the qtd to the end of the list */
2442 		Set_QTD(dummy->qtd_tw_next_qtd,
2443 		    ehci_qtd_cpu_to_iommu(ehcip, qtd));
2444 
2445 		tw->tw_qtd_tail = qtd;
2446 
2447 		ASSERT(Get_QTD(qtd->qtd_tw_next_qtd) == NULL);
2448 	}
2449 }
2450 
2451 
2452 /*
2453  * ehci_insert_qtd_into_active_qtd_list:
2454  *
2455  * Insert current QTD into active QTD list.
2456  */
2457 static void
2458 ehci_insert_qtd_into_active_qtd_list(
2459 	ehci_state_t		*ehcip,
2460 	ehci_qtd_t		*qtd)
2461 {
2462 	ehci_qtd_t		*curr_qtd, *next_qtd;
2463 
2464 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2465 
2466 	curr_qtd = ehcip->ehci_active_qtd_list;
2467 
2468 	/* Insert this QTD into QTD Active List */
2469 	if (curr_qtd) {
2470 		next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2471 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2472 
2473 		while (next_qtd) {
2474 			curr_qtd = next_qtd;
2475 			next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2476 			    Get_QTD(curr_qtd->qtd_active_qtd_next));
2477 		}
2478 
2479 		Set_QTD(qtd->qtd_active_qtd_prev,
2480 		    ehci_qtd_cpu_to_iommu(ehcip, curr_qtd));
2481 
2482 		Set_QTD(curr_qtd->qtd_active_qtd_next,
2483 		    ehci_qtd_cpu_to_iommu(ehcip, qtd));
2484 	} else {
2485 		ehcip->ehci_active_qtd_list = qtd;
2486 		Set_QTD(qtd->qtd_active_qtd_next, NULL);
2487 		Set_QTD(qtd->qtd_active_qtd_prev, NULL);
2488 	}
2489 }
2490 
2491 
2492 /*
2493  * ehci_remove_qtd_from_active_qtd_list:
2494  *
2495  * Remove current QTD from the active QTD list.
2496  *
2497  * NOTE: This function is also called from POLLED MODE.
2498  */
2499 void
2500 ehci_remove_qtd_from_active_qtd_list(
2501 	ehci_state_t		*ehcip,
2502 	ehci_qtd_t		*qtd)
2503 {
2504 	ehci_qtd_t		*curr_qtd, *prev_qtd, *next_qtd;
2505 
2506 	ASSERT(qtd != NULL);
2507 
2508 	curr_qtd = ehcip->ehci_active_qtd_list;
2509 
2510 	while ((curr_qtd) && (curr_qtd != qtd)) {
2511 		curr_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2512 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2513 	}
2514 
2515 	if ((curr_qtd) && (curr_qtd == qtd)) {
2516 		prev_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2517 		    Get_QTD(curr_qtd->qtd_active_qtd_prev));
2518 		next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2519 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2520 
2521 		if (prev_qtd) {
2522 			Set_QTD(prev_qtd->qtd_active_qtd_next,
2523 			    Get_QTD(curr_qtd->qtd_active_qtd_next));
2524 		} else {
2525 			ehcip->ehci_active_qtd_list = next_qtd;
2526 		}
2527 
2528 		if (next_qtd) {
2529 			Set_QTD(next_qtd->qtd_active_qtd_prev,
2530 			    Get_QTD(curr_qtd->qtd_active_qtd_prev));
2531 		}
2532 	} else {
2533 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2534 		    "ehci_remove_qtd_from_active_qtd_list: "
2535 			"Unable to find QTD in active_qtd_list");
2536 	}
2537 }
2538 
2539 
2540 /*
2541  * ehci_traverse_qtds:
2542  *
2543  * Traverse the list of QTDs for given pipe using transfer wrapper.  Since
2544  * the endpoint is marked as Halted, the Host Controller (HC) is no longer
2545  * accessing these QTDs. Remove all the QTDs that are attached to endpoint.
2546  */
2547 static void
2548 ehci_traverse_qtds(
2549 	ehci_state_t		*ehcip,
2550 	usba_pipe_handle_data_t	*ph)
2551 {
2552 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2553 	ehci_trans_wrapper_t	*next_tw;
2554 	ehci_qtd_t		*qtd;
2555 	ehci_qtd_t		*next_qtd;
2556 
2557 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2558 
2559 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2560 	    "ehci_traverse_qtds:");
2561 
2562 	/* Process the transfer wrappers for this pipe */
2563 	next_tw = pp->pp_tw_head;
2564 
2565 	while (next_tw) {
2566 		/* Stop the the transfer timer */
2567 		ehci_stop_xfer_timer(ehcip, next_tw, EHCI_REMOVE_XFER_ALWAYS);
2568 
2569 		qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
2570 
2571 		/* Walk through each QTD for this transfer wrapper */
2572 		while (qtd) {
2573 			/* Remove this QTD from active QTD list */
2574 			ehci_remove_qtd_from_active_qtd_list(ehcip, qtd);
2575 
2576 			next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2577 			    Get_QTD(qtd->qtd_tw_next_qtd));
2578 
2579 			/* Deallocate this QTD */
2580 			ehci_deallocate_qtd(ehcip, qtd);
2581 
2582 			qtd = next_qtd;
2583 		}
2584 
2585 		next_tw = next_tw->tw_next;
2586 	}
2587 
2588 	/* Clear current qtd pointer */
2589 	Set_QH(pp->pp_qh->qh_curr_qtd, (uint32_t)0x00000000);
2590 
2591 	/* Update the next qtd pointer in the QH */
2592 	Set_QH(pp->pp_qh->qh_next_qtd, Get_QH(pp->pp_qh->qh_dummy_qtd));
2593 }
2594 
2595 
2596 /*
2597  * ehci_deallocate_qtd:
2598  *
2599  * Deallocate a Host Controller's (HC) Transfer Descriptor (QTD).
2600  *
2601  * NOTE: This function is also called from POLLED MODE.
2602  */
2603 void
2604 ehci_deallocate_qtd(
2605 	ehci_state_t		*ehcip,
2606 	ehci_qtd_t		*old_qtd)
2607 {
2608 	ehci_trans_wrapper_t	*tw = NULL;
2609 
2610 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2611 	    "ehci_deallocate_qtd: old_qtd = 0x%p", (void *)old_qtd);
2612 
2613 	/*
2614 	 * Obtain the transaction wrapper and tw will be
2615 	 * NULL for the dummy QTDs.
2616 	 */
2617 	if (Get_QTD(old_qtd->qtd_state) != EHCI_QTD_DUMMY) {
2618 		tw = (ehci_trans_wrapper_t *)
2619 		EHCI_LOOKUP_ID((uint32_t)
2620 		Get_QTD(old_qtd->qtd_trans_wrapper));
2621 
2622 		ASSERT(tw != NULL);
2623 	}
2624 
2625 	/*
2626 	 * If QTD's transfer wrapper is NULL, don't access its TW.
2627 	 * Just free the QTD.
2628 	 */
2629 	if (tw) {
2630 		ehci_qtd_t	*qtd, *next_qtd;
2631 
2632 		qtd = tw->tw_qtd_head;
2633 
2634 		if (old_qtd != qtd) {
2635 			next_qtd = ehci_qtd_iommu_to_cpu(
2636 				    ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2637 
2638 			while (next_qtd != old_qtd) {
2639 				qtd = next_qtd;
2640 				next_qtd = ehci_qtd_iommu_to_cpu(
2641 				    ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2642 			}
2643 
2644 			Set_QTD(qtd->qtd_tw_next_qtd, old_qtd->qtd_tw_next_qtd);
2645 
2646 			if (qtd->qtd_tw_next_qtd == NULL) {
2647 				tw->tw_qtd_tail = qtd;
2648 			}
2649 		} else {
2650 			tw->tw_qtd_head = ehci_qtd_iommu_to_cpu(
2651 			    ehcip, Get_QTD(old_qtd->qtd_tw_next_qtd));
2652 
2653 			if (tw->tw_qtd_head == NULL) {
2654 				tw->tw_qtd_tail = NULL;
2655 			}
2656 		}
2657 	}
2658 
2659 	bzero((void *)old_qtd, sizeof (ehci_qtd_t));
2660 	Set_QTD(old_qtd->qtd_state, EHCI_QTD_FREE);
2661 
2662 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2663 	    "Dealloc_qtd: qtd 0x%p", (void *)old_qtd);
2664 }
2665 
2666 
2667 /*
2668  * ehci_qtd_cpu_to_iommu:
2669  *
2670  * This function converts for the given Transfer Descriptor (QTD) CPU address
2671  * to IO address.
2672  *
2673  * NOTE: This function is also called from POLLED MODE.
2674  */
2675 uint32_t
2676 ehci_qtd_cpu_to_iommu(
2677 	ehci_state_t	*ehcip,
2678 	ehci_qtd_t	*addr)
2679 {
2680 	uint32_t	td;
2681 
2682 	td  = (uint32_t)ehcip->ehci_qtd_pool_cookie.dmac_address +
2683 	    (uint32_t)((uintptr_t)addr -
2684 	    (uintptr_t)(ehcip->ehci_qtd_pool_addr));
2685 
2686 	ASSERT((ehcip->ehci_qtd_pool_cookie.dmac_address +
2687 	    (uint32_t) (sizeof (ehci_qtd_t) *
2688 	    (addr - ehcip->ehci_qtd_pool_addr))) ==
2689 	    (ehcip->ehci_qtd_pool_cookie.dmac_address +
2690 	    (uint32_t)((uintptr_t)addr - (uintptr_t)
2691 	    (ehcip->ehci_qtd_pool_addr))));
2692 
2693 	ASSERT(td >= ehcip->ehci_qtd_pool_cookie.dmac_address);
2694 	ASSERT(td <= ehcip->ehci_qtd_pool_cookie.dmac_address +
2695 	    sizeof (ehci_qtd_t) * ehci_qtd_pool_size);
2696 
2697 	return (td);
2698 }
2699 
2700 
2701 /*
2702  * ehci_qtd_iommu_to_cpu:
2703  *
2704  * This function converts for the given Transfer Descriptor (QTD) IO address
2705  * to CPU address.
2706  *
2707  * NOTE: This function is also called from POLLED MODE.
2708  */
2709 ehci_qtd_t *
2710 ehci_qtd_iommu_to_cpu(
2711 	ehci_state_t	*ehcip,
2712 	uintptr_t	addr)
2713 {
2714 	ehci_qtd_t	*qtd;
2715 
2716 	if (addr == NULL) {
2717 
2718 		return (NULL);
2719 	}
2720 
2721 	qtd = (ehci_qtd_t *)((uintptr_t)
2722 	    (addr - ehcip->ehci_qtd_pool_cookie.dmac_address) +
2723 	    (uintptr_t)ehcip->ehci_qtd_pool_addr);
2724 
2725 	ASSERT(qtd >= ehcip->ehci_qtd_pool_addr);
2726 	ASSERT((uintptr_t)qtd <= (uintptr_t)ehcip->ehci_qtd_pool_addr +
2727 	    (uintptr_t)(sizeof (ehci_qtd_t) * ehci_qtd_pool_size));
2728 
2729 	return (qtd);
2730 }
2731 
2732 /*
2733  * ehci_allocate_tds_for_tw_resources:
2734  *
2735  * Allocate n Transfer Descriptors (TD) from the TD buffer pool and places it
2736  * into the TW.  Also chooses the correct alternate qtd when required.	It is
2737  * used for hardware short transfer support.  For more information on
2738  * alternate qtds look at section 3.5.2 in the EHCI spec.
2739  * Here is how each alternate qtd's are used:
2740  *
2741  * Bulk: used fully.
2742  * Intr: xfers only require 1 QTD, so alternate qtds are never used.
2743  * Ctrl: Should not use alternate QTD
2744  * Isoch: Doesn't support short_xfer nor does it use QTD
2745  *
2746  * Returns USB_NO_RESOURCES if it was not able to allocate all the requested TD
2747  * otherwise USB_SUCCESS.
2748  */
2749 int
2750 ehci_allocate_tds_for_tw(
2751 	ehci_state_t		*ehcip,
2752 	ehci_pipe_private_t	*pp,
2753 	ehci_trans_wrapper_t	*tw,
2754 	size_t			qtd_count)
2755 {
2756 	usb_ep_descr_t		*eptd = &pp->pp_pipe_handle->p_ep;
2757 	uchar_t			attributes;
2758 	ehci_qtd_t		*qtd;
2759 	uint32_t		qtd_addr;
2760 	int			i;
2761 	int			error = USB_SUCCESS;
2762 
2763 	attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
2764 
2765 	for (i = 0; i < qtd_count; i += 1) {
2766 		qtd = ehci_allocate_qtd_from_pool(ehcip);
2767 		if (qtd == NULL) {
2768 			error = USB_NO_RESOURCES;
2769 			USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2770 			    "ehci_allocate_qtds_for_tw: "
2771 			    "Unable to allocate %lu QTDs",
2772 			    qtd_count);
2773 			break;
2774 		}
2775 		if (i > 0) {
2776 			qtd_addr = ehci_qtd_cpu_to_iommu(ehcip,
2777 			    tw->tw_qtd_free_list);
2778 			Set_QTD(qtd->qtd_tw_next_qtd, qtd_addr);
2779 		}
2780 		tw->tw_qtd_free_list = qtd;
2781 
2782 		/*
2783 		 * Save the second one as a pointer to the new dummy 1.
2784 		 * It is used later for the alt_qtd_ptr.  Xfers with only
2785 		 * one qtd do not need alt_qtd_ptr.
2786 		 * The tds's are allocated and put into a stack, that is
2787 		 * why the second qtd allocated will turn out to be the
2788 		 * new dummy 1.
2789 		 */
2790 		if ((i == 1) && (attributes == USB_EP_ATTR_BULK)) {
2791 			tw->tw_alt_qtd = qtd;
2792 		}
2793 	}
2794 
2795 	return (error);
2796 }
2797 
2798 /*
2799  * ehci_allocate_tw_resources:
2800  *
2801  * Allocate a Transaction Wrapper (TW) and n Transfer Descriptors (QTD)
2802  * from the QTD buffer pool and places it into the TW.	It does an all
2803  * or nothing transaction.
2804  *
2805  * Returns NULL if there is insufficient resources otherwise TW.
2806  */
2807 static ehci_trans_wrapper_t *
2808 ehci_allocate_tw_resources(
2809 	ehci_state_t		*ehcip,
2810 	ehci_pipe_private_t	*pp,
2811 	size_t			tw_length,
2812 	usb_flags_t		usb_flags,
2813 	size_t			qtd_count)
2814 {
2815 	ehci_trans_wrapper_t	*tw;
2816 
2817 	tw = ehci_create_transfer_wrapper(ehcip, pp, tw_length, usb_flags);
2818 
2819 	if (tw == NULL) {
2820 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2821 		    "ehci_allocate_tw_resources: Unable to allocate TW");
2822 	} else {
2823 		if (ehci_allocate_tds_for_tw(ehcip, pp, tw, qtd_count) ==
2824 		    USB_SUCCESS) {
2825 			tw->tw_num_qtds = qtd_count;
2826 		} else {
2827 			ehci_deallocate_tw(ehcip, pp, tw);
2828 			tw = NULL;
2829 		}
2830 	}
2831 
2832 	return (tw);
2833 }
2834 
2835 
2836 /*
2837  * ehci_free_tw_td_resources:
2838  *
2839  * Free all allocated resources for Transaction Wrapper (TW).
2840  * Does not free the TW itself.
2841  *
2842  * Returns NULL if there is insufficient resources otherwise TW.
2843  */
2844 static void
2845 ehci_free_tw_td_resources(
2846 	ehci_state_t		*ehcip,
2847 	ehci_trans_wrapper_t	*tw)
2848 {
2849 	ehci_qtd_t		*qtd = NULL;
2850 	ehci_qtd_t		*temp_qtd = NULL;
2851 
2852 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2853 	    "ehci_free_tw_td_resources: tw = 0x%p", tw);
2854 
2855 	qtd = tw->tw_qtd_free_list;
2856 	while (qtd != NULL) {
2857 		/* Save the pointer to the next qtd before destroying it */
2858 		temp_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2859 		    Get_QTD(qtd->qtd_tw_next_qtd));
2860 		ehci_deallocate_qtd(ehcip, qtd);
2861 		qtd = temp_qtd;
2862 	}
2863 	tw->tw_qtd_free_list = NULL;
2864 }
2865 
2866 /*
2867  * Transfer Wrapper functions
2868  *
2869  * ehci_create_transfer_wrapper:
2870  *
2871  * Create a Transaction Wrapper (TW) and this involves the allocating of DMA
2872  * resources.
2873  */
2874 static ehci_trans_wrapper_t *
2875 ehci_create_transfer_wrapper(
2876 	ehci_state_t		*ehcip,
2877 	ehci_pipe_private_t	*pp,
2878 	size_t			length,
2879 	uint_t			usb_flags)
2880 {
2881 	ddi_device_acc_attr_t	dev_attr;
2882 	int			result;
2883 	size_t			real_length;
2884 	uint_t			ccount;	/* Cookie count */
2885 	ehci_trans_wrapper_t	*tw;
2886 
2887 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2888 	    "ehci_create_transfer_wrapper: length = 0x%lx flags = 0x%x",
2889 	    length, usb_flags);
2890 
2891 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2892 
2893 	/* Allocate space for the transfer wrapper */
2894 	tw = kmem_zalloc(sizeof (ehci_trans_wrapper_t), KM_NOSLEEP);
2895 
2896 	if (tw == NULL) {
2897 		USB_DPRINTF_L2(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
2898 		    "ehci_create_transfer_wrapper: kmem_zalloc failed");
2899 
2900 		return (NULL);
2901 	}
2902 
2903 	ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
2904 
2905 	/* Allocate the DMA handle */
2906 	result = ddi_dma_alloc_handle(ehcip->ehci_dip,
2907 	    &ehcip->ehci_dma_attr, DDI_DMA_DONTWAIT, 0, &tw->tw_dmahandle);
2908 
2909 	if (result != DDI_SUCCESS) {
2910 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2911 		    "ehci_create_transfer_wrapper: Alloc handle failed");
2912 
2913 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
2914 
2915 		return (NULL);
2916 	}
2917 
2918 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2919 
2920 	/* no need for swapping the raw data */
2921 	dev_attr.devacc_attr_endian_flags  = DDI_NEVERSWAP_ACC;
2922 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2923 
2924 	/* Allocate the memory */
2925 	result = ddi_dma_mem_alloc(tw->tw_dmahandle, length,
2926 	    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
2927 	    (caddr_t *)&tw->tw_buf, &real_length, &tw->tw_accesshandle);
2928 
2929 	if (result != DDI_SUCCESS) {
2930 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2931 		    "ehci_create_transfer_wrapper: dma_mem_alloc fail");
2932 
2933 		ddi_dma_free_handle(&tw->tw_dmahandle);
2934 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
2935 
2936 		return (NULL);
2937 	}
2938 
2939 	ASSERT(real_length >= length);
2940 
2941 	/* Bind the handle */
2942 	result = ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL,
2943 	    (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
2944 	    DDI_DMA_DONTWAIT, NULL, &tw->tw_cookie, &ccount);
2945 
2946 	if (result == DDI_DMA_MAPPED) {
2947 		/* The cookie count should be 1 */
2948 		if (ccount != 1) {
2949 			USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2950 			    "ehci_create_transfer_wrapper: More than 1 cookie");
2951 
2952 			result = ddi_dma_unbind_handle(tw->tw_dmahandle);
2953 			ASSERT(result == DDI_SUCCESS);
2954 
2955 			ddi_dma_mem_free(&tw->tw_accesshandle);
2956 			ddi_dma_free_handle(&tw->tw_dmahandle);
2957 			kmem_free(tw, sizeof (ehci_trans_wrapper_t));
2958 
2959 			return (NULL);
2960 		}
2961 	} else {
2962 		ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
2963 
2964 		ddi_dma_mem_free(&tw->tw_accesshandle);
2965 		ddi_dma_free_handle(&tw->tw_dmahandle);
2966 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
2967 
2968 		return (NULL);
2969 	}
2970 
2971 	/*
2972 	 * Only allow one wrapper to be added at a time. Insert the
2973 	 * new transaction wrapper into the list for this pipe.
2974 	 */
2975 	if (pp->pp_tw_head == NULL) {
2976 		pp->pp_tw_head = tw;
2977 		pp->pp_tw_tail = tw;
2978 	} else {
2979 		pp->pp_tw_tail->tw_next = tw;
2980 		pp->pp_tw_tail = tw;
2981 	}
2982 
2983 	/* Store the transfer length */
2984 	tw->tw_length = length;
2985 
2986 	/* Store a back pointer to the pipe private structure */
2987 	tw->tw_pipe_private = pp;
2988 
2989 	/* Store the transfer type - synchronous or asynchronous */
2990 	tw->tw_flags = usb_flags;
2991 
2992 	/* Get and Store 32bit ID */
2993 	tw->tw_id = EHCI_GET_ID((void *)tw);
2994 
2995 	ASSERT(tw->tw_id != NULL);
2996 
2997 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2998 	    "ehci_create_transfer_wrapper: tw = 0x%p", tw);
2999 
3000 	return (tw);
3001 }
3002 
3003 
3004 /*
3005  * ehci_start_xfer_timer:
3006  *
3007  * Start the timer for the control, bulk and for one time interrupt
3008  * transfers.
3009  */
3010 /* ARGSUSED */
3011 static void
3012 ehci_start_xfer_timer(
3013 	ehci_state_t		*ehcip,
3014 	ehci_pipe_private_t	*pp,
3015 	ehci_trans_wrapper_t	*tw)
3016 {
3017 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3018 	    "ehci_start_xfer_timer: tw = 0x%p", tw);
3019 
3020 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3021 
3022 	/*
3023 	 * The timeout handling is done only for control, bulk and for
3024 	 * one time Interrupt transfers.
3025 	 *
3026 	 * NOTE: If timeout is zero; Assume infinite timeout and don't
3027 	 * insert this transfer on the timeout list.
3028 	 */
3029 	if (tw->tw_timeout) {
3030 		/*
3031 		 * Add this transfer wrapper to the head of the pipe's
3032 		 * tw timeout list.
3033 		 */
3034 		if (pp->pp_timeout_list) {
3035 			tw->tw_timeout_next = pp->pp_timeout_list;
3036 		}
3037 
3038 		pp->pp_timeout_list = tw;
3039 		ehci_start_timer(ehcip, pp);
3040 	}
3041 }
3042 
3043 
3044 /*
3045  * ehci_stop_xfer_timer:
3046  *
3047  * Start the timer for the control, bulk and for one time interrupt
3048  * transfers.
3049  */
3050 void
3051 ehci_stop_xfer_timer(
3052 	ehci_state_t		*ehcip,
3053 	ehci_trans_wrapper_t	*tw,
3054 	uint_t			flag)
3055 {
3056 	ehci_pipe_private_t	*pp;
3057 	timeout_id_t		timer_id;
3058 
3059 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3060 	    "ehci_stop_xfer_timer: tw = 0x%p", tw);
3061 
3062 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3063 
3064 	/* Obtain the pipe private structure */
3065 	pp = tw->tw_pipe_private;
3066 
3067 	/* check if the timeout tw list is empty */
3068 	if (pp->pp_timeout_list == NULL) {
3069 
3070 		return;
3071 	}
3072 
3073 	switch (flag) {
3074 	case EHCI_REMOVE_XFER_IFLAST:
3075 		if (tw->tw_qtd_head != tw->tw_qtd_tail) {
3076 			break;
3077 		}
3078 
3079 		/* FALLTHRU */
3080 	case EHCI_REMOVE_XFER_ALWAYS:
3081 		ehci_remove_tw_from_timeout_list(ehcip, tw);
3082 
3083 		if ((pp->pp_timeout_list == NULL) &&
3084 		    (pp->pp_timer_id)) {
3085 
3086 			timer_id = pp->pp_timer_id;
3087 
3088 			/* Reset the timer id to zero */
3089 			pp->pp_timer_id = 0;
3090 
3091 			mutex_exit(&ehcip->ehci_int_mutex);
3092 
3093 			(void) untimeout(timer_id);
3094 
3095 			mutex_enter(&ehcip->ehci_int_mutex);
3096 		}
3097 		break;
3098 	default:
3099 		break;
3100 	}
3101 }
3102 
3103 
3104 /*
3105  * ehci_xfer_timeout_handler:
3106  *
3107  * Control or bulk transfer timeout handler.
3108  */
3109 static void
3110 ehci_xfer_timeout_handler(void *arg)
3111 {
3112 	usba_pipe_handle_data_t	*ph = (usba_pipe_handle_data_t *)arg;
3113 	ehci_state_t		*ehcip = ehci_obtain_state(
3114 				    ph->p_usba_device->usb_root_hub_dip);
3115 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3116 	ehci_trans_wrapper_t	*tw, *next;
3117 	ehci_trans_wrapper_t	*expire_xfer_list = NULL;
3118 	ehci_qtd_t		*qtd;
3119 
3120 	USB_DPRINTF_L4(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3121 	    "ehci_xfer_timeout_handler: ehcip = 0x%p, ph = 0x%p", ehcip, ph);
3122 
3123 	mutex_enter(&ehcip->ehci_int_mutex);
3124 
3125 	/*
3126 	 * Check whether still timeout handler is valid.
3127 	 */
3128 	if (pp->pp_timer_id != 0) {
3129 
3130 		/* Reset the timer id to zero */
3131 		pp->pp_timer_id = 0;
3132 	} else {
3133 		mutex_exit(&ehcip->ehci_int_mutex);
3134 
3135 		return;
3136 	}
3137 
3138 	/* Get the transfer timeout list head */
3139 	tw = pp->pp_timeout_list;
3140 
3141 	while (tw) {
3142 
3143 		/* Get the transfer on the timeout list */
3144 		next = tw->tw_timeout_next;
3145 
3146 		tw->tw_timeout--;
3147 
3148 		if (tw->tw_timeout <= 0) {
3149 
3150 			/* remove the tw from the timeout list */
3151 			ehci_remove_tw_from_timeout_list(ehcip, tw);
3152 
3153 			/* remove QTDs from active QTD list */
3154 			qtd = tw->tw_qtd_head;
3155 			while (qtd) {
3156 				ehci_remove_qtd_from_active_qtd_list(
3157 					ehcip, qtd);
3158 
3159 				/* Get the next QTD from the wrapper */
3160 				qtd = ehci_qtd_iommu_to_cpu(ehcip,
3161 				    Get_QTD(qtd->qtd_tw_next_qtd));
3162 			}
3163 
3164 			/*
3165 			 * Preserve the order to the requests
3166 			 * started time sequence.
3167 			 */
3168 			tw->tw_timeout_next = expire_xfer_list;
3169 			expire_xfer_list = tw;
3170 		}
3171 
3172 		tw = next;
3173 	}
3174 
3175 	/*
3176 	 * The timer should be started before the callbacks.
3177 	 * There is always a chance that ehci interrupts come
3178 	 * in when we release the mutex while calling the tw back.
3179 	 * To keep an accurate timeout it should be restarted
3180 	 * as soon as possible.
3181 	 */
3182 	ehci_start_timer(ehcip, pp);
3183 
3184 	/* Get the expired transfer timeout list head */
3185 	tw = expire_xfer_list;
3186 
3187 	while (tw) {
3188 
3189 		/* Get the next tw on the expired transfer timeout list */
3190 		next = tw->tw_timeout_next;
3191 
3192 		/*
3193 		 * The error handle routine will release the mutex when
3194 		 * calling back to USBA. But this will not cause any race.
3195 		 * We do the callback and are relying on ehci_pipe_cleanup()
3196 		 * to halt the queue head and clean up since we should not
3197 		 * block in timeout context.
3198 		 */
3199 		ehci_handle_error(ehcip, tw->tw_qtd_head, USB_CR_TIMEOUT);
3200 
3201 		tw = next;
3202 	}
3203 	mutex_exit(&ehcip->ehci_int_mutex);
3204 }
3205 
3206 
3207 /*
3208  * ehci_remove_tw_from_timeout_list:
3209  *
3210  * Remove Control or bulk transfer from the timeout list.
3211  */
3212 static void
3213 ehci_remove_tw_from_timeout_list(
3214 	ehci_state_t		*ehcip,
3215 	ehci_trans_wrapper_t	*tw)
3216 {
3217 	ehci_pipe_private_t	*pp;
3218 	ehci_trans_wrapper_t	*prev, *next;
3219 
3220 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3221 	    "ehci_remove_tw_from_timeout_list: tw = 0x%p", tw);
3222 
3223 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3224 
3225 	/* Obtain the pipe private structure */
3226 	pp = tw->tw_pipe_private;
3227 
3228 	if (pp->pp_timeout_list) {
3229 		if (pp->pp_timeout_list == tw) {
3230 			pp->pp_timeout_list = tw->tw_timeout_next;
3231 
3232 			tw->tw_timeout_next = NULL;
3233 		} else {
3234 			prev = pp->pp_timeout_list;
3235 			next = prev->tw_timeout_next;
3236 
3237 			while (next && (next != tw)) {
3238 				prev = next;
3239 				next = next->tw_timeout_next;
3240 			}
3241 
3242 			if (next == tw) {
3243 				prev->tw_timeout_next =
3244 					next->tw_timeout_next;
3245 				tw->tw_timeout_next = NULL;
3246 			}
3247 		}
3248 	}
3249 }
3250 
3251 
3252 /*
3253  * ehci_start_timer:
3254  *
3255  * Start the pipe's timer
3256  */
3257 static void
3258 ehci_start_timer(
3259 	ehci_state_t		*ehcip,
3260 	ehci_pipe_private_t	*pp)
3261 {
3262 	USB_DPRINTF_L4(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3263 	    "ehci_start_timer: ehcip = 0x%p, pp = 0x%p", ehcip, pp);
3264 
3265 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3266 
3267 	/*
3268 	 * Start the pipe's timer only if currently timer is not
3269 	 * running and if there are any transfers on the timeout
3270 	 * list. This timer will be per pipe.
3271 	 */
3272 	if ((!pp->pp_timer_id) && (pp->pp_timeout_list)) {
3273 		pp->pp_timer_id = timeout(ehci_xfer_timeout_handler,
3274 		    (void *)(pp->pp_pipe_handle), drv_usectohz(1000000));
3275 	}
3276 }
3277 
3278 /*
3279  * ehci_deallocate_tw:
3280  *
3281  * Deallocate of a Transaction Wrapper (TW) and this involves the freeing of
3282  * of DMA resources.
3283  */
3284 void
3285 ehci_deallocate_tw(
3286 	ehci_state_t		*ehcip,
3287 	ehci_pipe_private_t	*pp,
3288 	ehci_trans_wrapper_t	*tw)
3289 {
3290 	ehci_trans_wrapper_t	*prev, *next;
3291 
3292 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3293 	    "ehci_deallocate_tw: tw = 0x%p", tw);
3294 
3295 	/*
3296 	 * If the transfer wrapper has no Host Controller (HC)
3297 	 * Transfer Descriptors (QTD) associated with it,  then
3298 	 * remove the transfer wrapper.
3299 	 */
3300 	if (tw->tw_qtd_head) {
3301 		ASSERT(tw->tw_qtd_tail != NULL);
3302 
3303 		return;
3304 	}
3305 
3306 	ASSERT(tw->tw_qtd_tail == NULL);
3307 
3308 	/* Make sure we return all the unused qtd's to the pool as well */
3309 	ehci_free_tw_td_resources(ehcip, tw);
3310 
3311 	/*
3312 	 * If pp->pp_tw_head and pp->pp_tw_tail are pointing to
3313 	 * given TW then set the head and  tail  equal to NULL.
3314 	 * Otherwise search for this TW in the linked TW's list
3315 	 * and then remove this TW from the list.
3316 	 */
3317 	if (pp->pp_tw_head == tw) {
3318 		if (pp->pp_tw_tail == tw) {
3319 			pp->pp_tw_head = NULL;
3320 			pp->pp_tw_tail = NULL;
3321 		} else {
3322 			pp->pp_tw_head = tw->tw_next;
3323 		}
3324 	} else {
3325 		prev = pp->pp_tw_head;
3326 		next = prev->tw_next;
3327 
3328 		while (next && (next != tw)) {
3329 			prev = next;
3330 			next = next->tw_next;
3331 		}
3332 
3333 		if (next == tw) {
3334 			prev->tw_next = next->tw_next;
3335 
3336 			if (pp->pp_tw_tail == tw) {
3337 				pp->pp_tw_tail = prev;
3338 			}
3339 		}
3340 	}
3341 
3342 	/*
3343 	 * Make sure that, this TW has been removed
3344 	 * from the timeout list.
3345 	 */
3346 	ehci_remove_tw_from_timeout_list(ehcip, tw);
3347 
3348 	/* Deallocate this TW */
3349 	ehci_free_tw(ehcip, pp, tw);
3350 }
3351 
3352 
3353 /*
3354  * ehci_free_dma_resources:
3355  *
3356  * Free dma resources of a Transfer Wrapper (TW) and also free the TW.
3357  *
3358  * NOTE: This function is also called from POLLED MODE.
3359  */
3360 void
3361 ehci_free_dma_resources(
3362 	ehci_state_t		*ehcip,
3363 	usba_pipe_handle_data_t	*ph)
3364 {
3365 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3366 	ehci_trans_wrapper_t	*head_tw = pp->pp_tw_head;
3367 	ehci_trans_wrapper_t	*next_tw, *tw;
3368 
3369 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3370 	    "ehci_free_dma_resources: ph = 0x%p", (void *)ph);
3371 
3372 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3373 
3374 	/* Process the Transfer Wrappers */
3375 	next_tw = head_tw;
3376 	while (next_tw) {
3377 		tw = next_tw;
3378 		next_tw = tw->tw_next;
3379 
3380 		USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3381 		    "ehci_free_dma_resources: Free TW = 0x%p", (void *)tw);
3382 
3383 		ehci_free_tw(ehcip, pp, tw);
3384 	}
3385 
3386 	/* Adjust the head and tail pointers */
3387 	pp->pp_tw_head = NULL;
3388 	pp->pp_tw_tail = NULL;
3389 }
3390 
3391 
3392 /*
3393  * ehci_free_tw:
3394  *
3395  * Free the Transfer Wrapper (TW).
3396  */
3397 /*ARGSUSED*/
3398 static void
3399 ehci_free_tw(
3400 	ehci_state_t		*ehcip,
3401 	ehci_pipe_private_t	*pp,
3402 	ehci_trans_wrapper_t	*tw)
3403 {
3404 	int	rval;
3405 
3406 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3407 	    "ehci_free_tw: tw = 0x%p", tw);
3408 
3409 	ASSERT(tw != NULL);
3410 	ASSERT(tw->tw_id != NULL);
3411 
3412 	/* Free 32bit ID */
3413 	EHCI_FREE_ID((uint32_t)tw->tw_id);
3414 
3415 	rval = ddi_dma_unbind_handle(tw->tw_dmahandle);
3416 	ASSERT(rval == DDI_SUCCESS);
3417 
3418 	ddi_dma_mem_free(&tw->tw_accesshandle);
3419 	ddi_dma_free_handle(&tw->tw_dmahandle);
3420 
3421 	/* Free transfer wrapper */
3422 	kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3423 }
3424 
3425 
3426 /*
3427  * Miscellaneous functions
3428  */
3429 
3430 /*
3431  * ehci_allocate_intr_in_resource
3432  *
3433  * Allocate interrupt request structure for the interrupt IN transfer.
3434  */
3435 /*ARGSUSED*/
3436 int
3437 ehci_allocate_intr_in_resource(
3438 	ehci_state_t		*ehcip,
3439 	ehci_pipe_private_t	*pp,
3440 	ehci_trans_wrapper_t	*tw,
3441 	usb_flags_t		flags)
3442 {
3443 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3444 	usb_intr_req_t		*curr_intr_reqp;
3445 	usb_opaque_t		client_periodic_in_reqp;
3446 	size_t			length = 0;
3447 
3448 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3449 	    "ehci_allocate_intr_in_resource:"
3450 	    "pp = 0x%p tw = 0x%p flags = 0x%x", pp, tw, flags);
3451 
3452 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3453 	ASSERT(tw->tw_curr_xfer_reqp == NULL);
3454 
3455 	/* Get the client periodic in request pointer */
3456 	client_periodic_in_reqp = pp->pp_client_periodic_in_reqp;
3457 
3458 	/*
3459 	 * If it a periodic IN request and periodic request is NULL,
3460 	 * allocate corresponding usb periodic IN request for the
3461 	 * current periodic polling request and copy the information
3462 	 * from the saved periodic request structure.
3463 	 */
3464 	if (client_periodic_in_reqp) {
3465 
3466 		/* Get the interrupt transfer length */
3467 		length = ((usb_intr_req_t *)
3468 		    client_periodic_in_reqp)->intr_len;
3469 
3470 		curr_intr_reqp = usba_hcdi_dup_intr_req(ph->p_dip,
3471 		    (usb_intr_req_t *)client_periodic_in_reqp, length, flags);
3472 	} else {
3473 		curr_intr_reqp = usb_alloc_intr_req(ph->p_dip, length, flags);
3474 	}
3475 
3476 	if (curr_intr_reqp == NULL) {
3477 
3478 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3479 		    "ehci_allocate_intr_in_resource: Interrupt"
3480 		    "request structure allocation failed");
3481 
3482 		return (USB_NO_RESOURCES);
3483 	}
3484 
3485 	/* For polled mode */
3486 	if (client_periodic_in_reqp == NULL) {
3487 		curr_intr_reqp->intr_attributes = USB_ATTRS_SHORT_XFER_OK;
3488 		curr_intr_reqp->intr_len = ph->p_ep.wMaxPacketSize;
3489 	} else {
3490 		/* Check and save the timeout value */
3491 		tw->tw_timeout = (curr_intr_reqp->intr_attributes &
3492 		    USB_ATTRS_ONE_XFER) ? curr_intr_reqp->intr_timeout: 0;
3493 	}
3494 
3495 	tw->tw_curr_xfer_reqp = (usb_opaque_t)curr_intr_reqp;
3496 	tw->tw_length = curr_intr_reqp->intr_len;
3497 
3498 	mutex_enter(&ph->p_mutex);
3499 	ph->p_req_count++;
3500 	mutex_exit(&ph->p_mutex);
3501 
3502 	pp->pp_state = EHCI_PIPE_STATE_ACTIVE;
3503 
3504 	return (USB_SUCCESS);
3505 }
3506 
3507 /*
3508  * ehci_pipe_cleanup
3509  *
3510  * Cleanup ehci pipe.
3511  */
3512 void
3513 ehci_pipe_cleanup(
3514 	ehci_state_t		*ehcip,
3515 	usba_pipe_handle_data_t	*ph)
3516 {
3517 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3518 	uint_t			pipe_state = pp->pp_state;
3519 	usb_cr_t		completion_reason;
3520 	usb_ep_descr_t		*eptd = &ph->p_ep;
3521 
3522 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3523 	    "ehci_pipe_cleanup: ph = 0x%p", ph);
3524 
3525 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3526 
3527 	if (EHCI_ISOC_ENDPOINT(eptd)) {
3528 		ehci_isoc_pipe_cleanup(ehcip, ph);
3529 
3530 		return;
3531 	}
3532 
3533 	ASSERT(!servicing_interrupt());
3534 
3535 	/*
3536 	 * Set the QH's status to Halt condition.
3537 	 * If another thread is halting this function will automatically
3538 	 * wait. If a pipe close happens at this time
3539 	 * we will be in lots of trouble.
3540 	 * If we are in an interrupt thread, don't halt, because it may
3541 	 * do a wait_for_sof.
3542 	 */
3543 	ehci_modify_qh_status_bit(ehcip, pp, SET_HALT);
3544 
3545 	/*
3546 	 * Wait for processing all completed transfers and
3547 	 * to send results to upstream.
3548 	 */
3549 	ehci_wait_for_transfers_completion(ehcip, pp);
3550 
3551 	/* Save the data toggle information */
3552 	ehci_save_data_toggle(ehcip, ph);
3553 
3554 	/*
3555 	 * Traverse the list of QTDs for this pipe using transfer
3556 	 * wrapper. Process these QTDs depending on their status.
3557 	 * And stop the timer of this pipe.
3558 	 */
3559 	ehci_traverse_qtds(ehcip, ph);
3560 
3561 	/* Make sure the timer is not running */
3562 	ASSERT(pp->pp_timer_id == 0);
3563 
3564 	/* Do callbacks for all unfinished requests */
3565 	ehci_handle_outstanding_requests(ehcip, pp);
3566 
3567 	/* Free DMA resources */
3568 	ehci_free_dma_resources(ehcip, ph);
3569 
3570 	switch (pipe_state) {
3571 	case EHCI_PIPE_STATE_CLOSE:
3572 		completion_reason = USB_CR_PIPE_CLOSING;
3573 		break;
3574 	case EHCI_PIPE_STATE_RESET:
3575 	case EHCI_PIPE_STATE_STOP_POLLING:
3576 		/* Set completion reason */
3577 		completion_reason = (pipe_state ==
3578 		    EHCI_PIPE_STATE_RESET) ?
3579 		    USB_CR_PIPE_RESET: USB_CR_STOPPED_POLLING;
3580 
3581 		/* Restore the data toggle information */
3582 		ehci_restore_data_toggle(ehcip, ph);
3583 
3584 		/*
3585 		 * Clear the halt bit to restart all the
3586 		 * transactions on this pipe.
3587 		 */
3588 		ehci_modify_qh_status_bit(ehcip, pp, CLEAR_HALT);
3589 
3590 		/* Set pipe state to idle */
3591 		pp->pp_state = EHCI_PIPE_STATE_IDLE;
3592 
3593 		break;
3594 	}
3595 
3596 	/*
3597 	 * Do the callback for the original client
3598 	 * periodic IN request.
3599 	 */
3600 	if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
3601 	    ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) ==
3602 	    USB_EP_DIR_IN)) {
3603 
3604 		ehci_do_client_periodic_in_req_callback(
3605 		    ehcip, pp, completion_reason);
3606 	}
3607 }
3608 
3609 
3610 /*
3611  * ehci_wait_for_transfers_completion:
3612  *
3613  * Wait for processing all completed transfers and to send results
3614  * to upstream.
3615  */
3616 static void
3617 ehci_wait_for_transfers_completion(
3618 	ehci_state_t		*ehcip,
3619 	ehci_pipe_private_t	*pp)
3620 {
3621 	ehci_trans_wrapper_t	*next_tw = pp->pp_tw_head;
3622 	clock_t			xfer_cmpl_time_wait;
3623 	ehci_qtd_t		*qtd;
3624 
3625 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3626 	    ehcip->ehci_log_hdl,
3627 	    "ehci_wait_for_transfers_completion: pp = 0x%p", pp);
3628 
3629 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3630 
3631 	if ((ehci_state_is_operational(ehcip)) != USB_SUCCESS) {
3632 
3633 		return;
3634 	}
3635 
3636 	pp->pp_count_done_qtds = 0;
3637 
3638 	/* Process the transfer wrappers for this pipe */
3639 	while (next_tw) {
3640 		qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
3641 
3642 		/*
3643 		 * Walk through each QTD for this transfer wrapper.
3644 		 * If a QTD still exists, then it is either on done
3645 		 * list or on the QH's list.
3646 		 */
3647 		while (qtd) {
3648 			if (!(Get_QTD(qtd->qtd_ctrl) &
3649 			    EHCI_QTD_CTRL_ACTIVE_XACT)) {
3650 				pp->pp_count_done_qtds++;
3651 			}
3652 
3653 			qtd = ehci_qtd_iommu_to_cpu(ehcip,
3654 			    Get_QTD(qtd->qtd_tw_next_qtd));
3655 		}
3656 
3657 		next_tw = next_tw->tw_next;
3658 	}
3659 
3660 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3661 	    "ehci_wait_for_transfers_completion: count_done_qtds = 0x%x",
3662 	    pp->pp_count_done_qtds);
3663 
3664 	if (!pp->pp_count_done_qtds) {
3665 
3666 		return;
3667 	}
3668 
3669 	/* Get the number of clock ticks to wait */
3670 	xfer_cmpl_time_wait = drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000);
3671 
3672 	(void) cv_timedwait(&pp->pp_xfer_cmpl_cv,
3673 	    &ehcip->ehci_int_mutex,
3674 	    ddi_get_lbolt() + xfer_cmpl_time_wait);
3675 
3676 	if (pp->pp_count_done_qtds) {
3677 
3678 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3679 		    "ehci_wait_for_transfers_completion:"
3680 		    "No transfers completion confirmation received");
3681 	}
3682 }
3683 
3684 /*
3685  * ehci_check_for_transfers_completion:
3686  *
3687  * Check whether anybody is waiting for transfers completion event. If so, send
3688  * this event and also stop initiating any new transfers on this pipe.
3689  */
3690 void
3691 ehci_check_for_transfers_completion(
3692 	ehci_state_t		*ehcip,
3693 	ehci_pipe_private_t	*pp)
3694 {
3695 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3696 	    ehcip->ehci_log_hdl,
3697 	    "ehci_check_for_transfers_completion: pp = 0x%p", pp);
3698 
3699 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3700 
3701 	if ((pp->pp_state == EHCI_PIPE_STATE_STOP_POLLING) &&
3702 	    (pp->pp_error == USB_CR_NO_RESOURCES) &&
3703 	    (pp->pp_cur_periodic_req_cnt == 0)) {
3704 
3705 		/* Reset pipe error to zero */
3706 		pp->pp_error = 0;
3707 
3708 		/* Do callback for original request */
3709 		ehci_do_client_periodic_in_req_callback(
3710 		    ehcip, pp, USB_CR_NO_RESOURCES);
3711 	}
3712 
3713 	if (pp->pp_count_done_qtds) {
3714 
3715 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3716 		    "ehci_check_for_transfers_completion:"
3717 		    "count_done_qtds = 0x%x", pp->pp_count_done_qtds);
3718 
3719 		/* Decrement the done qtd count */
3720 		pp->pp_count_done_qtds--;
3721 
3722 		if (!pp->pp_count_done_qtds) {
3723 
3724 			USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3725 			    "ehci_check_for_transfers_completion:"
3726 			    "Sent transfers completion event pp = 0x%p", pp);
3727 
3728 			/* Send the transfer completion signal */
3729 			cv_signal(&pp->pp_xfer_cmpl_cv);
3730 		}
3731 	}
3732 }
3733 
3734 
3735 /*
3736  * ehci_save_data_toggle:
3737  *
3738  * Save the data toggle information.
3739  */
3740 static void
3741 ehci_save_data_toggle(
3742 	ehci_state_t		*ehcip,
3743 	usba_pipe_handle_data_t	*ph)
3744 {
3745 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3746 	usb_ep_descr_t		*eptd = &ph->p_ep;
3747 	uint_t			data_toggle;
3748 	usb_cr_t		error = pp->pp_error;
3749 	ehci_qh_t		*qh = pp->pp_qh;
3750 
3751 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3752 	    ehcip->ehci_log_hdl,
3753 	    "ehci_save_data_toggle: ph = 0x%p", ph);
3754 
3755 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3756 
3757 	/* Reset the pipe error value */
3758 	pp->pp_error = USB_CR_OK;
3759 
3760 	/* Return immediately if it is a control pipe */
3761 	if ((eptd->bmAttributes &