1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 /*
28  * EHCI Host Controller Driver (EHCI)
29  *
30  * The EHCI driver is a software driver which interfaces to the Universal
31  * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
32  * the Host Controller is defined by the EHCI Host Controller Interface.
33  *
34  * This module contains the main EHCI driver code which handles all USB
35  * transfers, bandwidth allocations and other general functionalities.
36  */
37 
38 #include <sys/usb/hcd/ehci/ehcid.h>
39 #include <sys/usb/hcd/ehci/ehci_intr.h>
40 #include <sys/usb/hcd/ehci/ehci_util.h>
41 #include <sys/usb/hcd/ehci/ehci_isoch.h>
42 
43 /* Adjustable variables for the size of the pools */
44 extern int ehci_qh_pool_size;
45 extern int ehci_qtd_pool_size;
46 
47 
48 /* Endpoint Descriptor (QH) related functions */
49 ehci_qh_t	*ehci_alloc_qh(
50 				ehci_state_t		*ehcip,
51 				usba_pipe_handle_data_t	*ph,
52 				uint_t			flag);
53 static void	ehci_unpack_endpoint(
54 				ehci_state_t		*ehcip,
55 				usba_pipe_handle_data_t	*ph,
56 				ehci_qh_t		*qh);
57 void		ehci_insert_qh(
58 				ehci_state_t		*ehcip,
59 				usba_pipe_handle_data_t	*ph);
60 static void	ehci_insert_async_qh(
61 				ehci_state_t		*ehcip,
62 				ehci_pipe_private_t	*pp);
63 static void	ehci_insert_intr_qh(
64 				ehci_state_t		*ehcip,
65 				ehci_pipe_private_t	*pp);
66 static void	ehci_modify_qh_status_bit(
67 				ehci_state_t		*ehcip,
68 				ehci_pipe_private_t	*pp,
69 				halt_bit_t		action);
70 static void	ehci_halt_hs_qh(
71 				ehci_state_t		*ehcip,
72 				ehci_pipe_private_t	*pp,
73 				ehci_qh_t		*qh);
74 static void	ehci_halt_fls_ctrl_and_bulk_qh(
75 				ehci_state_t		*ehcip,
76 				ehci_pipe_private_t	*pp,
77 				ehci_qh_t		*qh);
78 static void	ehci_clear_tt_buffer(
79 				ehci_state_t		*ehcip,
80 				usba_pipe_handle_data_t	*ph,
81 				ehci_qh_t		*qh);
82 static void	ehci_halt_fls_intr_qh(
83 				ehci_state_t		*ehcip,
84 				ehci_qh_t		*qh);
85 void		ehci_remove_qh(
86 				ehci_state_t		*ehcip,
87 				ehci_pipe_private_t	*pp,
88 				boolean_t		reclaim);
89 static void	ehci_remove_async_qh(
90 				ehci_state_t		*ehcip,
91 				ehci_pipe_private_t	*pp,
92 				boolean_t		reclaim);
93 static void	ehci_remove_intr_qh(
94 				ehci_state_t		*ehcip,
95 				ehci_pipe_private_t	*pp,
96 				boolean_t		reclaim);
97 static void	ehci_insert_qh_on_reclaim_list(
98 				ehci_state_t		*ehcip,
99 				ehci_pipe_private_t	*pp);
100 void		ehci_deallocate_qh(
101 				ehci_state_t		*ehcip,
102 				ehci_qh_t		*old_qh);
103 uint32_t	ehci_qh_cpu_to_iommu(
104 				ehci_state_t		*ehcip,
105 				ehci_qh_t		*addr);
106 ehci_qh_t	*ehci_qh_iommu_to_cpu(
107 				ehci_state_t		*ehcip,
108 				uintptr_t		addr);
109 
110 /* Transfer Descriptor (QTD) related functions */
111 static int	ehci_initialize_dummy(
112 				ehci_state_t		*ehcip,
113 				ehci_qh_t		*qh);
114 ehci_trans_wrapper_t *ehci_allocate_ctrl_resources(
115 				ehci_state_t		*ehcip,
116 				ehci_pipe_private_t	*pp,
117 				usb_ctrl_req_t		*ctrl_reqp,
118 				usb_flags_t		usb_flags);
119 void		ehci_insert_ctrl_req(
120 				ehci_state_t		*ehcip,
121 				usba_pipe_handle_data_t	*ph,
122 				usb_ctrl_req_t		*ctrl_reqp,
123 				ehci_trans_wrapper_t	*tw,
124 				usb_flags_t		usb_flags);
125 ehci_trans_wrapper_t *ehci_allocate_bulk_resources(
126 				ehci_state_t		*ehcip,
127 				ehci_pipe_private_t	*pp,
128 				usb_bulk_req_t		*bulk_reqp,
129 				usb_flags_t		usb_flags);
130 void		ehci_insert_bulk_req(
131 				ehci_state_t		*ehcip,
132 				usba_pipe_handle_data_t	*ph,
133 				usb_bulk_req_t		*bulk_reqp,
134 				ehci_trans_wrapper_t	*tw,
135 				usb_flags_t		flags);
136 int		ehci_start_periodic_pipe_polling(
137 				ehci_state_t		*ehcip,
138 				usba_pipe_handle_data_t	*ph,
139 				usb_opaque_t		periodic_in_reqp,
140 				usb_flags_t		flags);
141 static int	ehci_start_pipe_polling(
142 				ehci_state_t		*ehcip,
143 				usba_pipe_handle_data_t	*ph,
144 				usb_flags_t		flags);
145 static int	ehci_start_intr_polling(
146 				ehci_state_t		*ehcip,
147 				usba_pipe_handle_data_t	*ph,
148 				usb_flags_t		flags);
149 static void	ehci_set_periodic_pipe_polling(
150 				ehci_state_t		*ehcip,
151 				usba_pipe_handle_data_t	*ph);
152 ehci_trans_wrapper_t *ehci_allocate_intr_resources(
153 				ehci_state_t		*ehcip,
154 				usba_pipe_handle_data_t	*ph,
155 				usb_intr_req_t		*intr_reqp,
156 				usb_flags_t		usb_flags);
157 void		ehci_insert_intr_req(
158 				ehci_state_t		*ehcip,
159 				ehci_pipe_private_t	*pp,
160 				ehci_trans_wrapper_t	*tw,
161 				usb_flags_t		flags);
162 int		ehci_stop_periodic_pipe_polling(
163 				ehci_state_t		*ehcip,
164 				usba_pipe_handle_data_t	*ph,
165 				usb_flags_t		flags);
166 int		ehci_insert_qtd(
167 				ehci_state_t		*ehcip,
168 				uint32_t		qtd_ctrl,
169 				size_t			qtd_dma_offs,
170 				size_t			qtd_length,
171 				uint32_t		qtd_ctrl_phase,
172 				ehci_pipe_private_t	*pp,
173 				ehci_trans_wrapper_t	*tw);
174 static ehci_qtd_t *ehci_allocate_qtd_from_pool(
175 				ehci_state_t		*ehcip);
176 static void	ehci_fill_in_qtd(
177 				ehci_state_t		*ehcip,
178 				ehci_qtd_t		*qtd,
179 				uint32_t		qtd_ctrl,
180 				size_t			qtd_dma_offs,
181 				size_t			qtd_length,
182 				uint32_t		qtd_ctrl_phase,
183 				ehci_pipe_private_t	*pp,
184 				ehci_trans_wrapper_t	*tw);
185 static void	ehci_insert_qtd_on_tw(
186 				ehci_state_t		*ehcip,
187 				ehci_trans_wrapper_t	*tw,
188 				ehci_qtd_t		*qtd);
189 static void	ehci_insert_qtd_into_active_qtd_list(
190 				ehci_state_t		*ehcip,
191 				ehci_qtd_t		*curr_qtd);
192 void		ehci_remove_qtd_from_active_qtd_list(
193 				ehci_state_t		*ehcip,
194 				ehci_qtd_t		*curr_qtd);
195 static void	ehci_traverse_qtds(
196 				ehci_state_t		*ehcip,
197 				usba_pipe_handle_data_t	*ph);
198 void		ehci_deallocate_qtd(
199 				ehci_state_t		*ehcip,
200 				ehci_qtd_t		*old_qtd);
201 uint32_t	ehci_qtd_cpu_to_iommu(
202 				ehci_state_t		*ehcip,
203 				ehci_qtd_t		*addr);
204 ehci_qtd_t	*ehci_qtd_iommu_to_cpu(
205 				ehci_state_t		*ehcip,
206 				uintptr_t		addr);
207 
208 /* Transfer Wrapper (TW) functions */
209 static ehci_trans_wrapper_t  *ehci_create_transfer_wrapper(
210 				ehci_state_t		*ehcip,
211 				ehci_pipe_private_t	*pp,
212 				size_t			length,
213 				uint_t			usb_flags);
214 int		ehci_allocate_tds_for_tw(
215 				ehci_state_t		*ehcip,
216 				ehci_pipe_private_t	*pp,
217 				ehci_trans_wrapper_t	*tw,
218 				size_t			qtd_count);
219 static ehci_trans_wrapper_t  *ehci_allocate_tw_resources(
220 				ehci_state_t		*ehcip,
221 				ehci_pipe_private_t	*pp,
222 				size_t			length,
223 				usb_flags_t		usb_flags,
224 				size_t			td_count);
225 static void	ehci_free_tw_td_resources(
226 				ehci_state_t		*ehcip,
227 				ehci_trans_wrapper_t	*tw);
228 static void	ehci_start_xfer_timer(
229 				ehci_state_t		*ehcip,
230 				ehci_pipe_private_t	*pp,
231 				ehci_trans_wrapper_t	*tw);
232 void		ehci_stop_xfer_timer(
233 				ehci_state_t		*ehcip,
234 				ehci_trans_wrapper_t	*tw,
235 				uint_t			flag);
236 static void	ehci_xfer_timeout_handler(void		*arg);
237 static void	ehci_remove_tw_from_timeout_list(
238 				ehci_state_t		*ehcip,
239 				ehci_trans_wrapper_t	*tw);
240 static void	ehci_start_timer(ehci_state_t		*ehcip,
241 				ehci_pipe_private_t	*pp);
242 void		ehci_deallocate_tw(
243 				ehci_state_t		*ehcip,
244 				ehci_pipe_private_t	*pp,
245 				ehci_trans_wrapper_t	*tw);
246 void		ehci_free_dma_resources(
247 				ehci_state_t		*ehcip,
248 				usba_pipe_handle_data_t	*ph);
249 static void	ehci_free_tw(
250 				ehci_state_t		*ehcip,
251 				ehci_pipe_private_t	*pp,
252 				ehci_trans_wrapper_t	*tw);
253 
254 /* Miscellaneous functions */
255 int		ehci_allocate_intr_in_resource(
256 				ehci_state_t		*ehcip,
257 				ehci_pipe_private_t	*pp,
258 				ehci_trans_wrapper_t	*tw,
259 				usb_flags_t		flags);
260 void		ehci_pipe_cleanup(
261 				ehci_state_t		*ehcip,
262 				usba_pipe_handle_data_t	*ph);
263 static void	ehci_wait_for_transfers_completion(
264 				ehci_state_t		*ehcip,
265 				ehci_pipe_private_t	*pp);
266 void		ehci_check_for_transfers_completion(
267 				ehci_state_t		*ehcip,
268 				ehci_pipe_private_t	*pp);
269 static void	ehci_save_data_toggle(
270 				ehci_state_t		*ehcip,
271 				usba_pipe_handle_data_t	*ph);
272 void		ehci_restore_data_toggle(
273 				ehci_state_t		*ehcip,
274 				usba_pipe_handle_data_t	*ph);
275 void		ehci_handle_outstanding_requests(
276 				ehci_state_t		*ehcip,
277 				ehci_pipe_private_t	*pp);
278 void		ehci_deallocate_intr_in_resource(
279 				ehci_state_t		*ehcip,
280 				ehci_pipe_private_t	*pp,
281 				ehci_trans_wrapper_t	*tw);
282 void		ehci_do_client_periodic_in_req_callback(
283 				ehci_state_t		*ehcip,
284 				ehci_pipe_private_t	*pp,
285 				usb_cr_t		completion_reason);
286 void		ehci_hcdi_callback(
287 				usba_pipe_handle_data_t	*ph,
288 				ehci_trans_wrapper_t	*tw,
289 				usb_cr_t		completion_reason);
290 
291 
292 /*
293  * Endpoint Descriptor (QH) manipulations functions
294  */
295 
296 /*
297  * ehci_alloc_qh:
298  *
299  * Allocate an endpoint descriptor (QH)
300  *
301  * NOTE: This function is also called from POLLED MODE.
302  */
303 ehci_qh_t *
304 ehci_alloc_qh(
305 	ehci_state_t		*ehcip,
306 	usba_pipe_handle_data_t	*ph,
307 	uint_t			flag)
308 {
309 	int			i, state;
310 	ehci_qh_t		*qh;
311 
312 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
313 	    "ehci_alloc_qh: ph = 0x%p flag = 0x%x", (void *)ph, flag);
314 
315 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
316 
317 	/*
318 	 * If this is for a ISOC endpoint return null.
319 	 * Isochronous uses ITD put directly onto the PFL.
320 	 */
321 	if (ph) {
322 		if (EHCI_ISOC_ENDPOINT((&ph->p_ep))) {
323 
324 			return (NULL);
325 		}
326 	}
327 
328 	/*
329 	 * The first 63 endpoints in the Endpoint Descriptor (QH)
330 	 * buffer pool are reserved for building interrupt lattice
331 	 * tree. Search for a blank endpoint descriptor in the QH
332 	 * buffer pool.
333 	 */
334 	for (i = EHCI_NUM_STATIC_NODES; i < ehci_qh_pool_size; i ++) {
335 		state = Get_QH(ehcip->ehci_qh_pool_addr[i].qh_state);
336 
337 		if (state == EHCI_QH_FREE) {
338 			break;
339 		}
340 	}
341 
342 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
343 	    "ehci_alloc_qh: Allocated %d", i);
344 
345 	if (i == ehci_qh_pool_size) {
346 		USB_DPRINTF_L2(PRINT_MASK_ALLOC,  ehcip->ehci_log_hdl,
347 		    "ehci_alloc_qh: QH exhausted");
348 
349 		return (NULL);
350 	} else {
351 		qh = &ehcip->ehci_qh_pool_addr[i];
352 		bzero((void *)qh, sizeof (ehci_qh_t));
353 
354 		USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
355 		    "ehci_alloc_qh: Allocated address 0x%p", (void *)qh);
356 
357 		/* Check polled mode flag */
358 		if (flag == EHCI_POLLED_MODE_FLAG) {
359 			Set_QH(qh->qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
360 			Set_QH(qh->qh_ctrl, EHCI_QH_CTRL_ED_INACTIVATE);
361 		}
362 
363 		/* Unpack the endpoint descriptor into a control field */
364 		if (ph) {
365 			if ((ehci_initialize_dummy(ehcip,
366 			    qh)) == USB_NO_RESOURCES) {
367 
368 				Set_QH(qh->qh_state, EHCI_QH_FREE);
369 
370 				return (NULL);
371 			}
372 
373 			ehci_unpack_endpoint(ehcip, ph, qh);
374 
375 			Set_QH(qh->qh_curr_qtd, NULL);
376 			Set_QH(qh->qh_alt_next_qtd,
377 			    EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
378 
379 			/* Change QH's state Active */
380 			Set_QH(qh->qh_state, EHCI_QH_ACTIVE);
381 		} else {
382 			Set_QH(qh->qh_status, EHCI_QH_STS_HALTED);
383 
384 			/* Change QH's state Static */
385 			Set_QH(qh->qh_state, EHCI_QH_STATIC);
386 		}
387 
388 		ehci_print_qh(ehcip, qh);
389 
390 		return (qh);
391 	}
392 }
393 
394 
395 /*
396  * ehci_unpack_endpoint:
397  *
398  * Unpack the information in the pipe handle and create the first byte
399  * of the Host Controller's (HC) Endpoint Descriptor (QH).
400  */
401 static void
402 ehci_unpack_endpoint(
403 	ehci_state_t		*ehcip,
404 	usba_pipe_handle_data_t	*ph,
405 	ehci_qh_t		*qh)
406 {
407 	usb_ep_descr_t		*endpoint = &ph->p_ep;
408 	uint_t			maxpacketsize, addr, xactions;
409 	uint_t			ctrl = 0, status = 0, split_ctrl = 0;
410 	usb_port_status_t	usb_port_status;
411 	usba_device_t		*usba_device = ph->p_usba_device;
412 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
413 
414 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
415 	    "ehci_unpack_endpoint:");
416 
417 	mutex_enter(&usba_device->usb_mutex);
418 	ctrl = usba_device->usb_addr;
419 	usb_port_status = usba_device->usb_port_status;
420 	mutex_exit(&usba_device->usb_mutex);
421 
422 	addr = endpoint->bEndpointAddress;
423 
424 	/* Assign the endpoint's address */
425 	ctrl |= ((addr & USB_EP_NUM_MASK) << EHCI_QH_CTRL_ED_NUMBER_SHIFT);
426 
427 	/* Assign the speed */
428 	switch (usb_port_status) {
429 	case USBA_LOW_SPEED_DEV:
430 		ctrl |= EHCI_QH_CTRL_ED_LOW_SPEED;
431 		break;
432 	case USBA_FULL_SPEED_DEV:
433 		ctrl |= EHCI_QH_CTRL_ED_FULL_SPEED;
434 		break;
435 	case USBA_HIGH_SPEED_DEV:
436 		ctrl |= EHCI_QH_CTRL_ED_HIGH_SPEED;
437 		break;
438 	}
439 
440 	switch (endpoint->bmAttributes & USB_EP_ATTR_MASK) {
441 	case USB_EP_ATTR_CONTROL:
442 		/* Assign data toggle information */
443 		ctrl |= EHCI_QH_CTRL_DATA_TOGGLE;
444 
445 		if (usb_port_status != USBA_HIGH_SPEED_DEV) {
446 			ctrl |= EHCI_QH_CTRL_CONTROL_ED_FLAG;
447 		}
448 		/* FALLTHRU */
449 	case USB_EP_ATTR_BULK:
450 		/* Maximum nak counter */
451 		ctrl |= EHCI_QH_CTRL_MAX_NC;
452 
453 		if (usb_port_status == USBA_HIGH_SPEED_DEV) {
454 			/*
455 			 * Perform ping before executing control
456 			 * and bulk transactions.
457 			 */
458 			status = EHCI_QH_STS_DO_PING;
459 		}
460 		break;
461 	case USB_EP_ATTR_INTR:
462 		/* Set start split mask */
463 		split_ctrl = (pp->pp_smask & EHCI_QH_SPLIT_CTRL_INTR_MASK);
464 
465 		/*
466 		 * Set complete split mask for low/full speed
467 		 * usb devices.
468 		 */
469 		if (usb_port_status != USBA_HIGH_SPEED_DEV) {
470 			split_ctrl |= ((pp->pp_cmask <<
471 			    EHCI_QH_SPLIT_CTRL_COMP_SHIFT) &
472 			    EHCI_QH_SPLIT_CTRL_COMP_MASK);
473 		}
474 		break;
475 	}
476 
477 	/* Get the max transactions per microframe */
478 	xactions = (endpoint->wMaxPacketSize &
479 	    USB_EP_MAX_XACTS_MASK) >>  USB_EP_MAX_XACTS_SHIFT;
480 
481 	switch (xactions) {
482 	case 0:
483 		split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
484 		break;
485 	case 1:
486 		split_ctrl |= EHCI_QH_SPLIT_CTRL_2_XACTS;
487 		break;
488 	case 2:
489 		split_ctrl |= EHCI_QH_SPLIT_CTRL_3_XACTS;
490 		break;
491 	default:
492 		split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
493 		break;
494 	}
495 
496 	/*
497 	 * For low/full speed devices, program high speed hub
498 	 * address and port number.
499 	 */
500 	if (usb_port_status != USBA_HIGH_SPEED_DEV) {
501 		mutex_enter(&usba_device->usb_mutex);
502 		split_ctrl |= ((usba_device->usb_hs_hub_addr
503 		    << EHCI_QH_SPLIT_CTRL_HUB_ADDR_SHIFT) &
504 		    EHCI_QH_SPLIT_CTRL_HUB_ADDR);
505 
506 		split_ctrl |= ((usba_device->usb_hs_hub_port
507 		    << EHCI_QH_SPLIT_CTRL_HUB_PORT_SHIFT) &
508 		    EHCI_QH_SPLIT_CTRL_HUB_PORT);
509 
510 		mutex_exit(&usba_device->usb_mutex);
511 
512 		/* Set start split transaction state */
513 		status = EHCI_QH_STS_DO_START_SPLIT;
514 	}
515 
516 	/* Assign endpoint's maxpacketsize */
517 	maxpacketsize = endpoint->wMaxPacketSize & USB_EP_MAX_PKTSZ_MASK;
518 	maxpacketsize = maxpacketsize << EHCI_QH_CTRL_MAXPKTSZ_SHIFT;
519 	ctrl |= (maxpacketsize & EHCI_QH_CTRL_MAXPKTSZ);
520 
521 	Set_QH(qh->qh_ctrl, ctrl);
522 	Set_QH(qh->qh_split_ctrl, split_ctrl);
523 	Set_QH(qh->qh_status, status);
524 }
525 
526 
527 /*
528  * ehci_insert_qh:
529  *
530  * Add the Endpoint Descriptor (QH) into the Host Controller's
531  * (HC) appropriate endpoint list.
532  */
533 void
534 ehci_insert_qh(
535 	ehci_state_t		*ehcip,
536 	usba_pipe_handle_data_t	*ph)
537 {
538 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
539 
540 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
541 	    "ehci_insert_qh: qh=0x%p", (void *)pp->pp_qh);
542 
543 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
544 
545 	switch (ph->p_ep.bmAttributes & USB_EP_ATTR_MASK) {
546 	case USB_EP_ATTR_CONTROL:
547 	case USB_EP_ATTR_BULK:
548 		ehci_insert_async_qh(ehcip, pp);
549 		ehcip->ehci_open_async_count++;
550 		break;
551 	case USB_EP_ATTR_INTR:
552 		ehci_insert_intr_qh(ehcip, pp);
553 		ehcip->ehci_open_periodic_count++;
554 		break;
555 	case USB_EP_ATTR_ISOCH:
556 		/* ISOCH does not use QH, don't do anything but update count */
557 		ehcip->ehci_open_periodic_count++;
558 		break;
559 	}
560 	ehci_toggle_scheduler(ehcip);
561 }
562 
563 
564 /*
565  * ehci_insert_async_qh:
566  *
567  * Insert a control/bulk endpoint into the Host Controller's (HC)
568  * Asynchronous schedule endpoint list.
569  */
570 static void
571 ehci_insert_async_qh(
572 	ehci_state_t		*ehcip,
573 	ehci_pipe_private_t	*pp)
574 {
575 	ehci_qh_t		*qh = pp->pp_qh;
576 	ehci_qh_t		*async_head_qh;
577 	ehci_qh_t		*next_qh;
578 	uintptr_t		qh_addr;
579 
580 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
581 	    "ehci_insert_async_qh:");
582 
583 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
584 
585 	/* Make sure this QH is not already in the list */
586 	ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
587 
588 	qh_addr = ehci_qh_cpu_to_iommu(ehcip, qh);
589 
590 	/* Obtain a ptr to the head of the Async schedule list */
591 	async_head_qh = ehcip->ehci_head_of_async_sched_list;
592 
593 	if (async_head_qh == NULL) {
594 		/* Set this QH to be the "head" of the circular list */
595 		Set_QH(qh->qh_ctrl,
596 		    (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_RECLAIM_HEAD));
597 
598 		/* Set new QH's link and previous pointer to itself */
599 		Set_QH(qh->qh_link_ptr, qh_addr | EHCI_QH_LINK_REF_QH);
600 		Set_QH(qh->qh_prev, qh_addr);
601 
602 		ehcip->ehci_head_of_async_sched_list = qh;
603 
604 		/* Set the head ptr to the new endpoint */
605 		Set_OpReg(ehci_async_list_addr, qh_addr);
606 
607 		/*
608 		 * For some reason this register might get nulled out by
609 		 * the Uli M1575 South Bridge. To workaround the hardware
610 		 * problem, check the value after write and retry if the
611 		 * last write fails.
612 		 *
613 		 * If the ASYNCLISTADDR remains "stuck" after
614 		 * EHCI_MAX_RETRY retries, then the M1575 is broken
615 		 * and is stuck in an inconsistent state and is about
616 		 * to crash the machine with a trn_oor panic when it
617 		 * does a DMA read from 0x0.  It is better to panic
618 		 * now rather than wait for the trn_oor crash; this
619 		 * way Customer Service will have a clean signature
620 		 * that indicts the M1575 chip rather than a
621 		 * mysterious and hard-to-diagnose trn_oor panic.
622 		 */
623 		if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
624 		    (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
625 		    (qh_addr != Get_OpReg(ehci_async_list_addr))) {
626 			int retry = 0;
627 
628 			Set_OpRegRetry(ehci_async_list_addr, qh_addr, retry);
629 			if (retry >= EHCI_MAX_RETRY)
630 				cmn_err(CE_PANIC, "ehci_insert_async_qh:"
631 				    " ASYNCLISTADDR write failed.");
632 
633 			USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
634 			    "ehci_insert_async_qh: ASYNCLISTADDR "
635 			    "write failed, retry=%d", retry);
636 		}
637 	} else {
638 		ASSERT(Get_QH(async_head_qh->qh_ctrl) &
639 		    EHCI_QH_CTRL_RECLAIM_HEAD);
640 
641 		/* Ensure this QH's "H" bit is not set */
642 		Set_QH(qh->qh_ctrl,
643 		    (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_RECLAIM_HEAD));
644 
645 		next_qh = ehci_qh_iommu_to_cpu(ehcip,
646 		    Get_QH(async_head_qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
647 
648 		/* Set new QH's link and previous pointers */
649 		Set_QH(qh->qh_link_ptr,
650 		    Get_QH(async_head_qh->qh_link_ptr) | EHCI_QH_LINK_REF_QH);
651 		Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, async_head_qh));
652 
653 		/* Set next QH's prev pointer */
654 		Set_QH(next_qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, qh));
655 
656 		/* Set QH Head's link pointer points to new QH */
657 		Set_QH(async_head_qh->qh_link_ptr,
658 		    qh_addr | EHCI_QH_LINK_REF_QH);
659 	}
660 }
661 
662 
663 /*
664  * ehci_insert_intr_qh:
665  *
666  * Insert a interrupt endpoint into the Host Controller's (HC) interrupt
667  * lattice tree.
668  */
669 static void
670 ehci_insert_intr_qh(
671 	ehci_state_t		*ehcip,
672 	ehci_pipe_private_t	*pp)
673 {
674 	ehci_qh_t		*qh = pp->pp_qh;
675 	ehci_qh_t		*next_lattice_qh, *lattice_qh;
676 	uint_t			hnode;
677 
678 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
679 	    "ehci_insert_intr_qh:");
680 
681 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
682 
683 	/* Make sure this QH is not already in the list */
684 	ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
685 
686 	/*
687 	 * The appropriate high speed node was found
688 	 * during the opening of the pipe.
689 	 */
690 	hnode = pp->pp_pnode;
691 
692 	/* Find the lattice endpoint */
693 	lattice_qh = &ehcip->ehci_qh_pool_addr[hnode];
694 
695 	/* Find the next lattice endpoint */
696 	next_lattice_qh = ehci_qh_iommu_to_cpu(
697 	    ehcip, (Get_QH(lattice_qh->qh_link_ptr) & EHCI_QH_LINK_PTR));
698 
699 	/* Update the previous pointer */
700 	Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, lattice_qh));
701 
702 	/* Check next_lattice_qh value */
703 	if (next_lattice_qh) {
704 		/* Update this qh to point to the next one in the lattice */
705 		Set_QH(qh->qh_link_ptr, Get_QH(lattice_qh->qh_link_ptr));
706 
707 		/* Update the previous pointer of qh->qh_link_ptr */
708 		if (Get_QH(next_lattice_qh->qh_state) != EHCI_QH_STATIC) {
709 			Set_QH(next_lattice_qh->qh_prev,
710 			    ehci_qh_cpu_to_iommu(ehcip, qh));
711 		}
712 	} else {
713 		/* Update qh's link pointer to terminate periodic list */
714 		Set_QH(qh->qh_link_ptr,
715 		    (Get_QH(lattice_qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
716 	}
717 
718 	/* Insert this endpoint into the lattice */
719 	Set_QH(lattice_qh->qh_link_ptr,
720 	    (ehci_qh_cpu_to_iommu(ehcip, qh) | EHCI_QH_LINK_REF_QH));
721 }
722 
723 
724 /*
725  * ehci_modify_qh_status_bit:
726  *
727  * Modify the halt bit on the Host Controller (HC) Endpoint Descriptor (QH).
728  *
729  * If several threads try to halt the same pipe, they will need to wait on
730  * a condition variable.  Only one thread is allowed to halt or unhalt the
731  * pipe at a time.
732  *
733  * Usually after a halt pipe, an unhalt pipe will follow soon after.  There
734  * is an assumption that an Unhalt pipe will never occur without a halt pipe.
735  */
736 static void
737 ehci_modify_qh_status_bit(
738 	ehci_state_t		*ehcip,
739 	ehci_pipe_private_t	*pp,
740 	halt_bit_t		action)
741 {
742 	ehci_qh_t		*qh = pp->pp_qh;
743 	uint_t			smask, eps, split_intr_qh;
744 	uint_t			status;
745 
746 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
747 	    "ehci_modify_qh_status_bit: action=0x%x qh=0x%p",
748 	    action, (void *)qh);
749 
750 	ehci_print_qh(ehcip, qh);
751 
752 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
753 
754 	/*
755 	 * If this pipe is in the middle of halting don't allow another
756 	 * thread to come in and modify the same pipe.
757 	 */
758 	while (pp->pp_halt_state & EHCI_HALT_STATE_HALTING) {
759 
760 		cv_wait(&pp->pp_halt_cmpl_cv,
761 		    &ehcip->ehci_int_mutex);
762 	}
763 
764 	/* Sync the QH QTD pool to get up to date information */
765 	Sync_QH_QTD_Pool(ehcip);
766 
767 
768 	if (action == CLEAR_HALT) {
769 		/*
770 		 * If the halt bit is to be cleared, just clear it.
771 		 * there shouldn't be any race condition problems.
772 		 * If the host controller reads the bit before the
773 		 * driver has a chance to set the bit, the bit will
774 		 * be reread on the next frame.
775 		 */
776 		Set_QH(qh->qh_ctrl,
777 		    (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_ED_INACTIVATE));
778 		Set_QH(qh->qh_status,
779 		    Get_QH(qh->qh_status) & ~(EHCI_QH_STS_XACT_STATUS));
780 
781 		goto success;
782 	}
783 
784 	/* Halt the the QH, but first check to see if it is already halted */
785 	status = Get_QH(qh->qh_status);
786 	if (!(status & EHCI_QH_STS_HALTED)) {
787 		/* Indicate that this pipe is in the middle of halting. */
788 		pp->pp_halt_state |= EHCI_HALT_STATE_HALTING;
789 
790 		/*
791 		 * Find out if this is an full/low speed interrupt endpoint.
792 		 * A non-zero Cmask indicates that this QH is an interrupt
793 		 * endpoint.  Check the endpoint speed to see if it is either
794 		 * FULL or LOW .
795 		 */
796 		smask = Get_QH(qh->qh_split_ctrl) &
797 		    EHCI_QH_SPLIT_CTRL_INTR_MASK;
798 		eps = Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_SPEED;
799 		split_intr_qh = ((smask != 0) &&
800 		    (eps != EHCI_QH_CTRL_ED_HIGH_SPEED));
801 
802 		if (eps == EHCI_QH_CTRL_ED_HIGH_SPEED) {
803 			ehci_halt_hs_qh(ehcip, pp, qh);
804 		} else {
805 			if (split_intr_qh) {
806 				ehci_halt_fls_intr_qh(ehcip, qh);
807 			} else {
808 				ehci_halt_fls_ctrl_and_bulk_qh(ehcip, pp, qh);
809 			}
810 		}
811 
812 		/* Indicate that this pipe is not in the middle of halting. */
813 		pp->pp_halt_state &= ~EHCI_HALT_STATE_HALTING;
814 	}
815 
816 	/* Sync the QH QTD pool again to get the most up to date information */
817 	Sync_QH_QTD_Pool(ehcip);
818 
819 	ehci_print_qh(ehcip, qh);
820 
821 	status = Get_QH(qh->qh_status);
822 	if (!(status & EHCI_QH_STS_HALTED)) {
823 		USB_DPRINTF_L1(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
824 		    "ehci_modify_qh_status_bit: Failed to halt qh=0x%p",
825 		    (void *)qh);
826 
827 		ehci_print_qh(ehcip, qh);
828 
829 		/* Set host controller soft state to error */
830 		ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
831 
832 		ASSERT(status & EHCI_QH_STS_HALTED);
833 	}
834 
835 success:
836 	/* Wake up threads waiting for this pipe to be halted. */
837 	cv_signal(&pp->pp_halt_cmpl_cv);
838 }
839 
840 
841 /*
842  * ehci_halt_hs_qh:
843  *
844  * Halts all types of HIGH SPEED QHs.
845  */
846 static void
847 ehci_halt_hs_qh(
848 	ehci_state_t		*ehcip,
849 	ehci_pipe_private_t	*pp,
850 	ehci_qh_t		*qh)
851 {
852 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
853 
854 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
855 	    "ehci_halt_hs_qh:");
856 
857 	/* Remove this qh from the HCD's view, but do not reclaim it */
858 	ehci_remove_qh(ehcip, pp, B_FALSE);
859 
860 	/*
861 	 * Wait for atleast one SOF, just in case the HCD is in the
862 	 * middle accessing this QH.
863 	 */
864 	(void) ehci_wait_for_sof(ehcip);
865 
866 	/* Sync the QH QTD pool to get up to date information */
867 	Sync_QH_QTD_Pool(ehcip);
868 
869 	/* Modify the status bit and halt this QH. */
870 	Set_QH(qh->qh_status,
871 	    ((Get_QH(qh->qh_status) &
872 	    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
873 
874 	/* Insert this QH back into the HCD's view */
875 	ehci_insert_qh(ehcip, ph);
876 }
877 
878 
879 /*
880  * ehci_halt_fls_ctrl_and_bulk_qh:
881  *
882  * Halts FULL/LOW Ctrl and Bulk QHs only.
883  */
884 static void
885 ehci_halt_fls_ctrl_and_bulk_qh(
886 	ehci_state_t		*ehcip,
887 	ehci_pipe_private_t	*pp,
888 	ehci_qh_t		*qh)
889 {
890 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
891 	uint_t			status, split_status, bytes_left;
892 
893 
894 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
895 	    "ehci_halt_fls_ctrl_and_bulk_qh:");
896 
897 	/* Remove this qh from the HCD's view, but do not reclaim it */
898 	ehci_remove_qh(ehcip, pp, B_FALSE);
899 
900 	/*
901 	 * Wait for atleast one SOF, just in case the HCD is in the
902 	 * middle accessing this QH.
903 	 */
904 	(void) ehci_wait_for_sof(ehcip);
905 
906 	/* Sync the QH QTD pool to get up to date information */
907 	Sync_QH_QTD_Pool(ehcip);
908 
909 	/* Modify the status bit and halt this QH. */
910 	Set_QH(qh->qh_status,
911 	    ((Get_QH(qh->qh_status) &
912 	    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
913 
914 	/* Check to see if the QH was in the middle of a transaction */
915 	status = Get_QH(qh->qh_status);
916 	split_status = status & EHCI_QH_STS_SPLIT_XSTATE;
917 	bytes_left = status & EHCI_QH_STS_BYTES_TO_XFER;
918 	if ((split_status == EHCI_QH_STS_DO_COMPLETE_SPLIT) &&
919 	    (bytes_left != 0)) {
920 		/* send ClearTTBuffer to this device's parent 2.0 hub */
921 		ehci_clear_tt_buffer(ehcip, ph, qh);
922 	}
923 
924 	/* Insert this QH back into the HCD's view */
925 	ehci_insert_qh(ehcip, ph);
926 }
927 
928 
929 /*
930  * ehci_clear_tt_buffer
931  *
932  * This function will sent a Clear_TT_Buffer request to the pipe's
933  * parent 2.0 hub.
934  */
935 static void
936 ehci_clear_tt_buffer(
937 	ehci_state_t		*ehcip,
938 	usba_pipe_handle_data_t	*ph,
939 	ehci_qh_t		*qh)
940 {
941 	usba_device_t		*usba_device;
942 	usba_device_t		*hub_usba_device;
943 	usb_pipe_handle_t	hub_def_ph;
944 	usb_ep_descr_t		*eptd;
945 	uchar_t			attributes;
946 	uint16_t		wValue;
947 	usb_ctrl_setup_t	setup;
948 	usb_cr_t		completion_reason;
949 	usb_cb_flags_t		cb_flags;
950 	int			retry;
951 
952 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
953 	    "ehci_clear_tt_buffer: ");
954 
955 	/* Get some information about the current pipe */
956 	usba_device = ph->p_usba_device;
957 	eptd = &ph->p_ep;
958 	attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
959 
960 	/*
961 	 * Create the wIndex for this request (usb spec 11.24.2.3)
962 	 * 3..0		Endpoint Number
963 	 * 10..4	Device Address
964 	 * 12..11	Endpoint Type
965 	 * 14..13	Reserved (must be 0)
966 	 * 15		Direction 1 = IN, 0 = OUT
967 	 */
968 	wValue = 0;
969 	if ((eptd->bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
970 		wValue |= 0x8000;
971 	}
972 	wValue |= attributes << 11;
973 	wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_DEVICE_ADDRESS) << 4;
974 	wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_HIGH_SPEED) >>
975 	    EHCI_QH_CTRL_ED_NUMBER_SHIFT;
976 
977 	mutex_exit(&ehcip->ehci_int_mutex);
978 
979 	/* Manually fill in the request. */
980 	setup.bmRequestType = EHCI_CLEAR_TT_BUFFER_REQTYPE;
981 	setup.bRequest = EHCI_CLEAR_TT_BUFFER_BREQ;
982 	setup.wValue = wValue;
983 	setup.wIndex = 1;
984 	setup.wLength = 0;
985 	setup.attrs = USB_ATTRS_NONE;
986 
987 	/* Get the usba_device of the parent 2.0 hub. */
988 	mutex_enter(&usba_device->usb_mutex);
989 	hub_usba_device = usba_device->usb_hs_hub_usba_dev;
990 	mutex_exit(&usba_device->usb_mutex);
991 
992 	/* Get the default ctrl pipe for the parent 2.0 hub */
993 	mutex_enter(&hub_usba_device->usb_mutex);
994 	hub_def_ph = (usb_pipe_handle_t)&hub_usba_device->usb_ph_list[0];
995 	mutex_exit(&hub_usba_device->usb_mutex);
996 
997 	for (retry = 0; retry < 3; retry++) {
998 
999 		/* sync send the request to the default pipe */
1000 		if (usb_pipe_ctrl_xfer_wait(
1001 		    hub_def_ph,
1002 		    &setup,
1003 		    NULL,
1004 		    &completion_reason, &cb_flags, 0) == USB_SUCCESS) {
1005 
1006 			break;
1007 		}
1008 
1009 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1010 		    "ehci_clear_tt_buffer: Failed to clear tt buffer,"
1011 		    "retry = %d, cr = %d, cb_flags = 0x%x\n",
1012 		    retry, completion_reason, cb_flags);
1013 	}
1014 
1015 	if (retry >= 3) {
1016 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1017 		dev_info_t *dip = hub_usba_device->usb_dip;
1018 
1019 		/*
1020 		 * Ask the user to hotplug the 2.0 hub, to make sure that
1021 		 * all the buffer is in sync since this command has failed.
1022 		 */
1023 		USB_DPRINTF_L0(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1024 		    "Error recovery failure: Please hotplug the 2.0 hub at"
1025 		    "%s", ddi_pathname(dip, path));
1026 
1027 		kmem_free(path, MAXPATHLEN);
1028 	}
1029 
1030 	mutex_enter(&ehcip->ehci_int_mutex);
1031 }
1032 
1033 /*
1034  * ehci_halt_fls_intr_qh:
1035  *
1036  * Halts FULL/LOW speed Intr QHs.
1037  */
1038 static void
1039 ehci_halt_fls_intr_qh(
1040 	ehci_state_t		*ehcip,
1041 	ehci_qh_t		*qh)
1042 {
1043 	usb_frame_number_t	starting_frame;
1044 	usb_frame_number_t	frames_past;
1045 	uint_t			status, i;
1046 
1047 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1048 	    "ehci_halt_fls_intr_qh:");
1049 
1050 	/*
1051 	 * Ask the HC to deactivate the QH in a
1052 	 * full/low periodic QH.
1053 	 */
1054 	Set_QH(qh->qh_ctrl,
1055 	    (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_ED_INACTIVATE));
1056 
1057 	starting_frame = ehci_get_current_frame_number(ehcip);
1058 
1059 	/*
1060 	 * Wait at least EHCI_NUM_INTR_QH_LISTS+2 frame or until
1061 	 * the QH has been halted.
1062 	 */
1063 	Sync_QH_QTD_Pool(ehcip);
1064 	frames_past = 0;
1065 	status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1066 
1067 	while ((frames_past <= (EHCI_NUM_INTR_QH_LISTS + 2)) &&
1068 	    (status != 0)) {
1069 
1070 		(void) ehci_wait_for_sof(ehcip);
1071 
1072 		Sync_QH_QTD_Pool(ehcip);
1073 		status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1074 		frames_past = ehci_get_current_frame_number(ehcip) -
1075 		    starting_frame;
1076 	}
1077 
1078 	/* Modify the status bit and halt this QH. */
1079 	Sync_QH_QTD_Pool(ehcip);
1080 
1081 	status = Get_QH(qh->qh_status);
1082 
1083 	for (i = 0; i < EHCI_NUM_INTR_QH_LISTS; i++) {
1084 		Set_QH(qh->qh_status,
1085 		    ((Get_QH(qh->qh_status) &
1086 		    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
1087 
1088 		Sync_QH_QTD_Pool(ehcip);
1089 
1090 		(void) ehci_wait_for_sof(ehcip);
1091 		Sync_QH_QTD_Pool(ehcip);
1092 
1093 		if (Get_QH(qh->qh_status) & EHCI_QH_STS_HALTED) {
1094 
1095 			break;
1096 		}
1097 	}
1098 
1099 	Sync_QH_QTD_Pool(ehcip);
1100 
1101 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1102 	    "ehci_halt_fls_intr_qh: qh=0x%p frames past=%llu,"
1103 	    " status=0x%x, 0x%x", (void *)qh,
1104 	    (unsigned long long)(ehci_get_current_frame_number(ehcip) -
1105 	    starting_frame), status, Get_QH(qh->qh_status));
1106 }
1107 
1108 
1109 /*
1110  * ehci_remove_qh:
1111  *
1112  * Remove the Endpoint Descriptor (QH) from the Host Controller's appropriate
1113  * endpoint list.
1114  */
1115 void
1116 ehci_remove_qh(
1117 	ehci_state_t		*ehcip,
1118 	ehci_pipe_private_t	*pp,
1119 	boolean_t		reclaim)
1120 {
1121 	uchar_t			attributes;
1122 
1123 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1124 
1125 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1126 	    "ehci_remove_qh: qh=0x%p", (void *)pp->pp_qh);
1127 
1128 	attributes = pp->pp_pipe_handle->p_ep.bmAttributes & USB_EP_ATTR_MASK;
1129 
1130 	switch (attributes) {
1131 	case USB_EP_ATTR_CONTROL:
1132 	case USB_EP_ATTR_BULK:
1133 		ehci_remove_async_qh(ehcip, pp, reclaim);
1134 		ehcip->ehci_open_async_count--;
1135 		break;
1136 	case USB_EP_ATTR_INTR:
1137 		ehci_remove_intr_qh(ehcip, pp, reclaim);
1138 		ehcip->ehci_open_periodic_count--;
1139 		break;
1140 	case USB_EP_ATTR_ISOCH:
1141 		/* ISOCH does not use QH, don't do anything but update count */
1142 		ehcip->ehci_open_periodic_count--;
1143 		break;
1144 	}
1145 	ehci_toggle_scheduler(ehcip);
1146 }
1147 
1148 
1149 /*
1150  * ehci_remove_async_qh:
1151  *
1152  * Remove a control/bulk endpoint into the Host Controller's (HC)
1153  * Asynchronous schedule endpoint list.
1154  */
1155 static void
1156 ehci_remove_async_qh(
1157 	ehci_state_t		*ehcip,
1158 	ehci_pipe_private_t	*pp,
1159 	boolean_t		reclaim)
1160 {
1161 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1162 	ehci_qh_t		*prev_qh, *next_qh;
1163 
1164 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1165 	    "ehci_remove_async_qh:");
1166 
1167 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1168 
1169 	prev_qh = ehci_qh_iommu_to_cpu(ehcip,
1170 	    Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR);
1171 	next_qh = ehci_qh_iommu_to_cpu(ehcip,
1172 	    Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1173 
1174 	/* Make sure this QH is in the list */
1175 	ASSERT(prev_qh != NULL);
1176 
1177 	/*
1178 	 * If next QH and current QH are the same, then this is the last
1179 	 * QH on the Asynchronous Schedule list.
1180 	 */
1181 	if (qh == next_qh) {
1182 		ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1183 		/*
1184 		 * Null our pointer to the async sched list, but do not
1185 		 * touch the host controller's list_addr.
1186 		 */
1187 		ehcip->ehci_head_of_async_sched_list = NULL;
1188 		ASSERT(ehcip->ehci_open_async_count == 1);
1189 	} else {
1190 		/* If this QH is the HEAD then find another one to replace it */
1191 		if (ehcip->ehci_head_of_async_sched_list == qh) {
1192 
1193 			ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1194 			ehcip->ehci_head_of_async_sched_list = next_qh;
1195 			Set_QH(next_qh->qh_ctrl,
1196 			    Get_QH(next_qh->qh_ctrl) |
1197 			    EHCI_QH_CTRL_RECLAIM_HEAD);
1198 		}
1199 		Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1200 		Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1201 	}
1202 
1203 	/* qh_prev to indicate it is no longer in the circular list */
1204 	Set_QH(qh->qh_prev, NULL);
1205 
1206 	if (reclaim) {
1207 		ehci_insert_qh_on_reclaim_list(ehcip, pp);
1208 	}
1209 }
1210 
1211 
1212 /*
1213  * ehci_remove_intr_qh:
1214  *
1215  * Set up an interrupt endpoint to be removed from the Host Controller's (HC)
1216  * interrupt lattice tree. The Endpoint Descriptor (QH) will be freed in the
1217  * interrupt handler.
1218  */
1219 static void
1220 ehci_remove_intr_qh(
1221 	ehci_state_t		*ehcip,
1222 	ehci_pipe_private_t	*pp,
1223 	boolean_t		reclaim)
1224 {
1225 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1226 	ehci_qh_t		*prev_qh, *next_qh;
1227 
1228 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1229 	    "ehci_remove_intr_qh:");
1230 
1231 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1232 
1233 	prev_qh = ehci_qh_iommu_to_cpu(ehcip, Get_QH(qh->qh_prev));
1234 	next_qh = ehci_qh_iommu_to_cpu(ehcip,
1235 	    Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1236 
1237 	/* Make sure this QH is in the list */
1238 	ASSERT(prev_qh != NULL);
1239 
1240 	if (next_qh) {
1241 		/* Update previous qh's link pointer */
1242 		Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1243 
1244 		if (Get_QH(next_qh->qh_state) != EHCI_QH_STATIC) {
1245 			/* Set the previous pointer of the next one */
1246 			Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1247 		}
1248 	} else {
1249 		/* Update previous qh's link pointer */
1250 		Set_QH(prev_qh->qh_link_ptr,
1251 		    (Get_QH(qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
1252 	}
1253 
1254 	/* qh_prev to indicate it is no longer in the circular list */
1255 	Set_QH(qh->qh_prev, NULL);
1256 
1257 	if (reclaim) {
1258 		ehci_insert_qh_on_reclaim_list(ehcip, pp);
1259 	}
1260 }
1261 
1262 
1263 /*
1264  * ehci_insert_qh_on_reclaim_list:
1265  *
1266  * Insert Endpoint onto the reclaim list
1267  */
1268 static void
1269 ehci_insert_qh_on_reclaim_list(
1270 	ehci_state_t		*ehcip,
1271 	ehci_pipe_private_t	*pp)
1272 {
1273 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1274 	ehci_qh_t		*next_qh, *prev_qh;
1275 	usb_frame_number_t	frame_number;
1276 
1277 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1278 
1279 	/*
1280 	 * Read current usb frame number and add appropriate number of
1281 	 * usb frames needs to wait before reclaiming current endpoint.
1282 	 */
1283 	frame_number =
1284 	    ehci_get_current_frame_number(ehcip) + MAX_SOF_WAIT_COUNT;
1285 
1286 	/* Store 32-bit ID */
1287 	Set_QH(qh->qh_reclaim_frame,
1288 	    ((uint32_t)(EHCI_GET_ID((void *)(uintptr_t)frame_number))));
1289 
1290 	/* Insert the endpoint onto the reclamation list */
1291 	if (ehcip->ehci_reclaim_list) {
1292 		next_qh = ehcip->ehci_reclaim_list;
1293 
1294 		while (next_qh) {
1295 			prev_qh = next_qh;
1296 			next_qh = ehci_qh_iommu_to_cpu(ehcip,
1297 			    Get_QH(next_qh->qh_reclaim_next));
1298 		}
1299 
1300 		Set_QH(prev_qh->qh_reclaim_next,
1301 		    ehci_qh_cpu_to_iommu(ehcip, qh));
1302 	} else {
1303 		ehcip->ehci_reclaim_list = qh;
1304 	}
1305 
1306 	ASSERT(Get_QH(qh->qh_reclaim_next) == NULL);
1307 }
1308 
1309 
1310 /*
1311  * ehci_deallocate_qh:
1312  *
1313  * Deallocate a Host Controller's (HC) Endpoint Descriptor (QH).
1314  *
1315  * NOTE: This function is also called from POLLED MODE.
1316  */
1317 void
1318 ehci_deallocate_qh(
1319 	ehci_state_t	*ehcip,
1320 	ehci_qh_t	*old_qh)
1321 {
1322 	ehci_qtd_t	*first_dummy_qtd, *second_dummy_qtd;
1323 
1324 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1325 	    "ehci_deallocate_qh:");
1326 
1327 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1328 
1329 	first_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1330 	    (Get_QH(old_qh->qh_next_qtd) & EHCI_QH_NEXT_QTD_PTR));
1331 
1332 	if (first_dummy_qtd) {
1333 		ASSERT(Get_QTD(first_dummy_qtd->qtd_state) == EHCI_QTD_DUMMY);
1334 
1335 		second_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1336 		    Get_QTD(first_dummy_qtd->qtd_next_qtd));
1337 
1338 		if (second_dummy_qtd) {
1339 			ASSERT(Get_QTD(second_dummy_qtd->qtd_state) ==
1340 			    EHCI_QTD_DUMMY);
1341 
1342 			ehci_deallocate_qtd(ehcip, second_dummy_qtd);
1343 		}
1344 
1345 		ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1346 	}
1347 
1348 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1349 	    "ehci_deallocate_qh: Deallocated 0x%p", (void *)old_qh);
1350 
1351 	Set_QH(old_qh->qh_state, EHCI_QH_FREE);
1352 }
1353 
1354 
1355 /*
1356  * ehci_qh_cpu_to_iommu:
1357  *
1358  * This function converts for the given Endpoint Descriptor (QH) CPU address
1359  * to IO address.
1360  *
1361  * NOTE: This function is also called from POLLED MODE.
1362  */
1363 uint32_t
1364 ehci_qh_cpu_to_iommu(
1365 	ehci_state_t	*ehcip,
1366 	ehci_qh_t	*addr)
1367 {
1368 	uint32_t	qh;
1369 
1370 	qh = (uint32_t)ehcip->ehci_qh_pool_cookie.dmac_address +
1371 	    (uint32_t)((uintptr_t)addr - (uintptr_t)(ehcip->ehci_qh_pool_addr));
1372 
1373 	ASSERT(qh >= ehcip->ehci_qh_pool_cookie.dmac_address);
1374 	ASSERT(qh <= ehcip->ehci_qh_pool_cookie.dmac_address +
1375 	    sizeof (ehci_qh_t) * ehci_qh_pool_size);
1376 
1377 	return (qh);
1378 }
1379 
1380 
1381 /*
1382  * ehci_qh_iommu_to_cpu:
1383  *
1384  * This function converts for the given Endpoint Descriptor (QH) IO address
1385  * to CPU address.
1386  */
1387 ehci_qh_t *
1388 ehci_qh_iommu_to_cpu(
1389 	ehci_state_t	*ehcip,
1390 	uintptr_t	addr)
1391 {
1392 	ehci_qh_t	*qh;
1393 
1394 	if (addr == NULL) {
1395 
1396 		return (NULL);
1397 	}
1398 
1399 	qh = (ehci_qh_t *)((uintptr_t)
1400 	    (addr - ehcip->ehci_qh_pool_cookie.dmac_address) +
1401 	    (uintptr_t)ehcip->ehci_qh_pool_addr);
1402 
1403 	ASSERT(qh >= ehcip->ehci_qh_pool_addr);
1404 	ASSERT((uintptr_t)qh <= (uintptr_t)ehcip->ehci_qh_pool_addr +
1405 	    (uintptr_t)(sizeof (ehci_qh_t) * ehci_qh_pool_size));
1406 
1407 	return (qh);
1408 }
1409 
1410 
1411 /*
1412  * Transfer Descriptor manipulations functions
1413  */
1414 
1415 /*
1416  * ehci_initialize_dummy:
1417  *
1418  * An Endpoint Descriptor (QH) has a  dummy Transfer Descriptor (QTD) on the
1419  * end of its QTD list. Initially, both the head and tail pointers of the QH
1420  * point to the dummy QTD.
1421  */
1422 static int
1423 ehci_initialize_dummy(
1424 	ehci_state_t	*ehcip,
1425 	ehci_qh_t	*qh)
1426 {
1427 	ehci_qtd_t	*first_dummy_qtd, *second_dummy_qtd;
1428 
1429 	/* Allocate first dummy QTD */
1430 	first_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1431 
1432 	if (first_dummy_qtd == NULL) {
1433 		return (USB_NO_RESOURCES);
1434 	}
1435 
1436 	/* Allocate second dummy QTD */
1437 	second_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1438 
1439 	if (second_dummy_qtd == NULL) {
1440 		/* Deallocate first dummy QTD */
1441 		ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1442 
1443 		return (USB_NO_RESOURCES);
1444 	}
1445 
1446 	/* Next QTD pointer of an QH point to this new dummy QTD */
1447 	Set_QH(qh->qh_next_qtd, ehci_qtd_cpu_to_iommu(ehcip,
1448 	    first_dummy_qtd) & EHCI_QH_NEXT_QTD_PTR);
1449 
1450 	/* Set qh's dummy qtd field */
1451 	Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, first_dummy_qtd));
1452 
1453 	/* Set first_dummy's next qtd pointer */
1454 	Set_QTD(first_dummy_qtd->qtd_next_qtd,
1455 	    ehci_qtd_cpu_to_iommu(ehcip, second_dummy_qtd));
1456 
1457 	return (USB_SUCCESS);
1458 }
1459 
1460 /*
1461  * ehci_allocate_ctrl_resources:
1462  *
1463  * Calculates the number of tds necessary for a ctrl transfer, and allocates
1464  * all the resources necessary.
1465  *
1466  * Returns NULL if there is insufficient resources otherwise TW.
1467  */
1468 ehci_trans_wrapper_t *
1469 ehci_allocate_ctrl_resources(
1470 	ehci_state_t		*ehcip,
1471 	ehci_pipe_private_t	*pp,
1472 	usb_ctrl_req_t		*ctrl_reqp,
1473 	usb_flags_t		usb_flags)
1474 {
1475 	size_t			qtd_count = 2;
1476 	size_t			ctrl_buf_size;
1477 	ehci_trans_wrapper_t	*tw;
1478 
1479 	/* Add one more td for data phase */
1480 	if (ctrl_reqp->ctrl_wLength) {
1481 		qtd_count += 1;
1482 	}
1483 
1484 	/*
1485 	 * If we have a control data phase, the data buffer starts
1486 	 * on the next 4K page boundary. So the TW buffer is allocated
1487 	 * to be larger than required. The buffer in the range of
1488 	 * [SETUP_SIZE, EHCI_MAX_QTD_BUF_SIZE) is just for padding
1489 	 * and not to be transferred.
1490 	 */
1491 	if (ctrl_reqp->ctrl_wLength) {
1492 		ctrl_buf_size = EHCI_MAX_QTD_BUF_SIZE +
1493 		    ctrl_reqp->ctrl_wLength;
1494 	} else {
1495 		ctrl_buf_size = SETUP_SIZE;
1496 	}
1497 
1498 	tw = ehci_allocate_tw_resources(ehcip, pp, ctrl_buf_size,
1499 	    usb_flags, qtd_count);
1500 
1501 	return (tw);
1502 }
1503 
1504 /*
1505  * ehci_insert_ctrl_req:
1506  *
1507  * Create a Transfer Descriptor (QTD) and a data buffer for a control endpoint.
1508  */
1509 /* ARGSUSED */
1510 void
1511 ehci_insert_ctrl_req(
1512 	ehci_state_t		*ehcip,
1513 	usba_pipe_handle_data_t	*ph,
1514 	usb_ctrl_req_t		*ctrl_reqp,
1515 	ehci_trans_wrapper_t	*tw,
1516 	usb_flags_t		usb_flags)
1517 {
1518 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1519 	uchar_t			bmRequestType = ctrl_reqp->ctrl_bmRequestType;
1520 	uchar_t			bRequest = ctrl_reqp->ctrl_bRequest;
1521 	uint16_t		wValue = ctrl_reqp->ctrl_wValue;
1522 	uint16_t		wIndex = ctrl_reqp->ctrl_wIndex;
1523 	uint16_t		wLength = ctrl_reqp->ctrl_wLength;
1524 	mblk_t			*data = ctrl_reqp->ctrl_data;
1525 	uint32_t		ctrl = 0;
1526 	uint8_t			setup_packet[8];
1527 
1528 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1529 	    "ehci_insert_ctrl_req:");
1530 
1531 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1532 
1533 	/*
1534 	 * Save current control request pointer and timeout values
1535 	 * in transfer wrapper.
1536 	 */
1537 	tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp;
1538 	tw->tw_timeout = ctrl_reqp->ctrl_timeout ?
1539 	    ctrl_reqp->ctrl_timeout : EHCI_DEFAULT_XFER_TIMEOUT;
1540 
1541 	/*
1542 	 * Initialize the callback and any callback data for when
1543 	 * the qtd completes.
1544 	 */
1545 	tw->tw_handle_qtd = ehci_handle_ctrl_qtd;
1546 	tw->tw_handle_callback_value = NULL;
1547 
1548 	/*
1549 	 * swap the setup bytes where necessary since we specified
1550 	 * NEVERSWAP
1551 	 */
1552 	setup_packet[0] = bmRequestType;
1553 	setup_packet[1] = bRequest;
1554 	setup_packet[2] = (uint8_t)wValue;
1555 	setup_packet[3] = wValue >> 8;
1556 	setup_packet[4] = (uint8_t)wIndex;
1557 	setup_packet[5] = wIndex >> 8;
1558 	setup_packet[6] = (uint8_t)wLength;
1559 	setup_packet[7] = wLength >> 8;
1560 
1561 	bcopy(setup_packet, tw->tw_buf, SETUP_SIZE);
1562 
1563 	Sync_IO_Buffer_for_device(tw->tw_dmahandle, SETUP_SIZE);
1564 
1565 	ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_0 | EHCI_QTD_CTRL_SETUP_PID);
1566 
1567 	/*
1568 	 * The QTD's are placed on the QH one at a time.
1569 	 * Once this QTD is placed on the done list, the
1570 	 * data or status phase QTD will be enqueued.
1571 	 */
1572 	(void) ehci_insert_qtd(ehcip, ctrl, 0, SETUP_SIZE,
1573 	    EHCI_CTRL_SETUP_PHASE, pp, tw);
1574 
1575 	USB_DPRINTF_L3(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1576 	    "ehci_insert_ctrl_req: pp 0x%p", (void *)pp);
1577 
1578 	/*
1579 	 * If this control transfer has a data phase, record the
1580 	 * direction. If the data phase is an OUT transaction,
1581 	 * copy the data into the buffer of the transfer wrapper.
1582 	 */
1583 	if (wLength != 0) {
1584 		/* There is a data stage.  Find the direction */
1585 		if (bmRequestType & USB_DEV_REQ_DEV_TO_HOST) {
1586 			tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
1587 		} else {
1588 			tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
1589 
1590 			/* Copy the data into the message */
1591 			bcopy(data->b_rptr, tw->tw_buf + EHCI_MAX_QTD_BUF_SIZE,
1592 			    wLength);
1593 
1594 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1595 			    wLength + EHCI_MAX_QTD_BUF_SIZE);
1596 		}
1597 
1598 		ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 | tw->tw_direction);
1599 
1600 		/*
1601 		 * Create the QTD.  If this is an OUT transaction,
1602 		 * the data is already in the buffer of the TW.
1603 		 * The transfer should start from EHCI_MAX_QTD_BUF_SIZE
1604 		 * which is 4K aligned, though the ctrl phase only
1605 		 * transfers a length of SETUP_SIZE. The padding data
1606 		 * in the TW buffer are discarded.
1607 		 */
1608 		(void) ehci_insert_qtd(ehcip, ctrl, EHCI_MAX_QTD_BUF_SIZE,
1609 		    tw->tw_length - EHCI_MAX_QTD_BUF_SIZE,
1610 		    EHCI_CTRL_DATA_PHASE, pp, tw);
1611 
1612 		/*
1613 		 * The direction of the STATUS QTD depends  on
1614 		 * the direction of the transfer.
1615 		 */
1616 		if (tw->tw_direction == EHCI_QTD_CTRL_IN_PID) {
1617 			ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1618 			    EHCI_QTD_CTRL_OUT_PID |
1619 			    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1620 		} else {
1621 			ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1622 			    EHCI_QTD_CTRL_IN_PID |
1623 			    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1624 		}
1625 	} else {
1626 		/*
1627 		 * There is no data stage,  then initiate
1628 		 * status phase from the host.
1629 		 */
1630 		ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 |
1631 		    EHCI_QTD_CTRL_IN_PID |
1632 		    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1633 	}
1634 
1635 
1636 	(void) ehci_insert_qtd(ehcip, ctrl, 0, 0,
1637 	    EHCI_CTRL_STATUS_PHASE, pp,  tw);
1638 
1639 	/* Start the timer for this control transfer */
1640 	ehci_start_xfer_timer(ehcip, pp, tw);
1641 }
1642 
1643 
1644 /*
1645  * ehci_allocate_bulk_resources:
1646  *
1647  * Calculates the number of tds necessary for a ctrl transfer, and allocates
1648  * all the resources necessary.
1649  *
1650  * Returns NULL if there is insufficient resources otherwise TW.
1651  */
1652 ehci_trans_wrapper_t *
1653 ehci_allocate_bulk_resources(
1654 	ehci_state_t		*ehcip,
1655 	ehci_pipe_private_t	*pp,
1656 	usb_bulk_req_t		*bulk_reqp,
1657 	usb_flags_t		usb_flags)
1658 {
1659 	size_t			qtd_count = 0;
1660 	ehci_trans_wrapper_t	*tw;
1661 
1662 	/* Check the size of bulk request */
1663 	if (bulk_reqp->bulk_len > EHCI_MAX_BULK_XFER_SIZE) {
1664 
1665 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1666 		    "ehci_allocate_bulk_resources: Bulk request size 0x%x is "
1667 		    "more than 0x%x", bulk_reqp->bulk_len,
1668 		    EHCI_MAX_BULK_XFER_SIZE);
1669 
1670 		return (NULL);
1671 	}
1672 
1673 	/* Get the required bulk packet size */
1674 	qtd_count = bulk_reqp->bulk_len / EHCI_MAX_QTD_XFER_SIZE;
1675 	if (bulk_reqp->bulk_len % EHCI_MAX_QTD_XFER_SIZE ||
1676 	    bulk_reqp->bulk_len == 0) {
1677 		qtd_count += 1;
1678 	}
1679 
1680 	tw = ehci_allocate_tw_resources(ehcip, pp, bulk_reqp->bulk_len,
1681 	    usb_flags, qtd_count);
1682 
1683 	return (tw);
1684 }
1685 
1686 /*
1687  * ehci_insert_bulk_req:
1688  *
1689  * Create a Transfer Descriptor (QTD) and a data buffer for a bulk
1690  * endpoint.
1691  */
1692 /* ARGSUSED */
1693 void
1694 ehci_insert_bulk_req(
1695 	ehci_state_t		*ehcip,
1696 	usba_pipe_handle_data_t	*ph,
1697 	usb_bulk_req_t		*bulk_reqp,
1698 	ehci_trans_wrapper_t	*tw,
1699 	usb_flags_t		flags)
1700 {
1701 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1702 	uint_t			bulk_pkt_size, count;
1703 	size_t			residue = 0, len = 0;
1704 	uint32_t		ctrl = 0;
1705 	int			pipe_dir;
1706 
1707 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1708 	    "ehci_insert_bulk_req: bulk_reqp = 0x%p flags = 0x%x",
1709 	    (void *)bulk_reqp, flags);
1710 
1711 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1712 
1713 	/* Get the bulk pipe direction */
1714 	pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
1715 
1716 	/* Get the required bulk packet size */
1717 	bulk_pkt_size = min(bulk_reqp->bulk_len, EHCI_MAX_QTD_XFER_SIZE);
1718 
1719 	if (bulk_pkt_size) {
1720 		residue = tw->tw_length % bulk_pkt_size;
1721 	}
1722 
1723 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1724 	    "ehci_insert_bulk_req: bulk_pkt_size = %d", bulk_pkt_size);
1725 
1726 	/*
1727 	 * Save current bulk request pointer and timeout values
1728 	 * in transfer wrapper.
1729 	 */
1730 	tw->tw_curr_xfer_reqp = (usb_opaque_t)bulk_reqp;
1731 	tw->tw_timeout = bulk_reqp->bulk_timeout;
1732 
1733 	/*
1734 	 * Initialize the callback and any callback
1735 	 * data required when the qtd completes.
1736 	 */
1737 	tw->tw_handle_qtd = ehci_handle_bulk_qtd;
1738 	tw->tw_handle_callback_value = NULL;
1739 
1740 	tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ?
1741 	    EHCI_QTD_CTRL_OUT_PID : EHCI_QTD_CTRL_IN_PID;
1742 
1743 	if (tw->tw_direction == EHCI_QTD_CTRL_OUT_PID) {
1744 
1745 		if (bulk_reqp->bulk_len) {
1746 			ASSERT(bulk_reqp->bulk_data != NULL);
1747 
1748 			bcopy(bulk_reqp->bulk_data->b_rptr, tw->tw_buf,
1749 			    bulk_reqp->bulk_len);
1750 
1751 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1752 			    bulk_reqp->bulk_len);
1753 		}
1754 	}
1755 
1756 	ctrl = tw->tw_direction;
1757 
1758 	/* Insert all the bulk QTDs */
1759 	for (count = 0; count < tw->tw_num_qtds; count++) {
1760 
1761 		/* Check for last qtd */
1762 		if (count == (tw->tw_num_qtds - 1)) {
1763 
1764 			ctrl |= EHCI_QTD_CTRL_INTR_ON_COMPLETE;
1765 
1766 			/* Check for inserting residue data */
1767 			if (residue) {
1768 				bulk_pkt_size = (uint_t)residue;
1769 			}
1770 		}
1771 
1772 		/* Insert the QTD onto the endpoint */
1773 		(void) ehci_insert_qtd(ehcip, ctrl, len, bulk_pkt_size,
1774 		    0, pp, tw);
1775 
1776 		len = len + bulk_pkt_size;
1777 	}
1778 
1779 	/* Start the timer for this bulk transfer */
1780 	ehci_start_xfer_timer(ehcip, pp, tw);
1781 }
1782 
1783 
1784 /*
1785  * ehci_start_periodic_pipe_polling:
1786  *
1787  * NOTE: This function is also called from POLLED MODE.
1788  */
1789 int
1790 ehci_start_periodic_pipe_polling(
1791 	ehci_state_t		*ehcip,
1792 	usba_pipe_handle_data_t	*ph,
1793 	usb_opaque_t		periodic_in_reqp,
1794 	usb_flags_t		flags)
1795 {
1796 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1797 	usb_ep_descr_t		*eptd = &ph->p_ep;
1798 	int			error = USB_SUCCESS;
1799 
1800 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
1801 	    "ehci_start_periodic_pipe_polling: ep%d",
1802 	    ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK);
1803 
1804 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1805 
1806 	/*
1807 	 * Check and handle start polling on root hub interrupt pipe.
1808 	 */
1809 	if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
1810 	    ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
1811 	    USB_EP_ATTR_INTR)) {
1812 
1813 		error = ehci_handle_root_hub_pipe_start_intr_polling(ph,
1814 		    (usb_intr_req_t *)periodic_in_reqp, flags);
1815 
1816 		return (error);
1817 	}
1818 
1819 	switch (pp->pp_state) {
1820 	case EHCI_PIPE_STATE_IDLE:
1821 		/* Save the Original client's Periodic IN request */
1822 		pp->pp_client_periodic_in_reqp = periodic_in_reqp;
1823 
1824 		/*
1825 		 * This pipe is uninitialized or if a valid QTD is
1826 		 * not found then insert a QTD on the interrupt IN
1827 		 * endpoint.
1828 		 */
1829 		error = ehci_start_pipe_polling(ehcip, ph, flags);
1830 
1831 		if (error != USB_SUCCESS) {
1832 			USB_DPRINTF_L2(PRINT_MASK_INTR,
1833 			    ehcip->ehci_log_hdl,
1834 			    "ehci_start_periodic_pipe_polling: "
1835 			    "Start polling failed");
1836 
1837 			pp->pp_client_periodic_in_reqp = NULL;
1838 
1839 			return (error);
1840 		}
1841 
1842 		USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
1843 		    "ehci_start_periodic_pipe_polling: PP = 0x%p", (void *)pp);
1844 
1845 #ifdef DEBUG
1846 		switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1847 		case USB_EP_ATTR_INTR:
1848 			ASSERT((pp->pp_tw_head != NULL) &&
1849 			    (pp->pp_tw_tail != NULL));
1850 			break;
1851 		case USB_EP_ATTR_ISOCH:
1852 			ASSERT((pp->pp_itw_head != NULL) &&
1853 			    (pp->pp_itw_tail != NULL));
1854 			break;
1855 		}
1856 #endif
1857 
1858 		break;
1859 	case EHCI_PIPE_STATE_ACTIVE:
1860 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1861 		    ehcip->ehci_log_hdl,
1862 		    "ehci_start_periodic_pipe_polling: "
1863 		    "Polling is already in progress");
1864 
1865 		error = USB_FAILURE;
1866 		break;
1867 	case EHCI_PIPE_STATE_ERROR:
1868 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1869 		    ehcip->ehci_log_hdl,
1870 		    "ehci_start_periodic_pipe_polling: "
1871 		    "Pipe is halted and perform reset"
1872 		    "before restart polling");
1873 
1874 		error = USB_FAILURE;
1875 		break;
1876 	default:
1877 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1878 		    ehcip->ehci_log_hdl,
1879 		    "ehci_start_periodic_pipe_polling: "
1880 		    "Undefined state");
1881 
1882 		error = USB_FAILURE;
1883 		break;
1884 	}
1885 
1886 	return (error);
1887 }
1888 
1889 
1890 /*
1891  * ehci_start_pipe_polling:
1892  *
1893  * Insert the number of periodic requests corresponding to polling
1894  * interval as calculated during pipe open.
1895  */
1896 static int
1897 ehci_start_pipe_polling(
1898 	ehci_state_t		*ehcip,
1899 	usba_pipe_handle_data_t	*ph,
1900 	usb_flags_t		flags)
1901 {
1902 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1903 	usb_ep_descr_t		*eptd = &ph->p_ep;
1904 	int			error = USB_FAILURE;
1905 
1906 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1907 	    "ehci_start_pipe_polling:");
1908 
1909 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1910 
1911 	/*
1912 	 * For the start polling, pp_max_periodic_req_cnt will be zero
1913 	 * and for the restart polling request, it will be non zero.
1914 	 *
1915 	 * In case of start polling request, find out number of requests
1916 	 * required for the Interrupt IN endpoints corresponding to the
1917 	 * endpoint polling interval. For Isochronous IN endpoints, it is
1918 	 * always fixed since its polling interval will be one ms.
1919 	 */
1920 	if (pp->pp_max_periodic_req_cnt == 0) {
1921 
1922 		ehci_set_periodic_pipe_polling(ehcip, ph);
1923 	}
1924 
1925 	ASSERT(pp->pp_max_periodic_req_cnt != 0);
1926 
1927 	switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1928 	case USB_EP_ATTR_INTR:
1929 		error = ehci_start_intr_polling(ehcip, ph, flags);
1930 		break;
1931 	case USB_EP_ATTR_ISOCH:
1932 		error = ehci_start_isoc_polling(ehcip, ph, flags);
1933 		break;
1934 	}
1935 
1936 	return (error);
1937 }
1938 
1939 static int
1940 ehci_start_intr_polling(
1941 	ehci_state_t		*ehcip,
1942 	usba_pipe_handle_data_t	*ph,
1943 	usb_flags_t		flags)
1944 {
1945 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1946 	ehci_trans_wrapper_t	*tw_list, *tw;
1947 	int			i, total_tws;
1948 	int			error = USB_SUCCESS;
1949 
1950 	/* Allocate all the necessary resources for the IN transfer */
1951 	tw_list = NULL;
1952 	total_tws = pp->pp_max_periodic_req_cnt - pp->pp_cur_periodic_req_cnt;
1953 	for (i = 0; i < total_tws; i += 1) {
1954 		tw = ehci_allocate_intr_resources(ehcip, ph, NULL, flags);
1955 		if (tw == NULL) {
1956 			error = USB_NO_RESOURCES;
1957 			/* There are not enough resources, deallocate the TWs */
1958 			tw = tw_list;
1959 			while (tw != NULL) {
1960 				tw_list = tw->tw_next;
1961 				ehci_deallocate_intr_in_resource(
1962 				    ehcip, pp, tw);
1963 				ehci_deallocate_tw(ehcip, pp, tw);
1964 				tw = tw_list;
1965 			}
1966 
1967 			return (error);
1968 		} else {
1969 			if (tw_list == NULL) {
1970 				tw_list = tw;
1971 			}
1972 		}
1973 	}
1974 
1975 	while (pp->pp_cur_periodic_req_cnt < pp->pp_max_periodic_req_cnt) {
1976 
1977 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1978 		    "ehci_start_pipe_polling: max = %d curr = %d tw = %p:",
1979 		    pp->pp_max_periodic_req_cnt, pp->pp_cur_periodic_req_cnt,
1980 		    (void *)tw_list);
1981 
1982 		tw = tw_list;
1983 		tw_list = tw->tw_next;
1984 
1985 		ehci_insert_intr_req(ehcip, pp, tw, flags);
1986 
1987 		pp->pp_cur_periodic_req_cnt++;
1988 	}
1989 
1990 	return (error);
1991 }
1992 
1993 
1994 /*
1995  * ehci_set_periodic_pipe_polling:
1996  *
1997  * Calculate the number of periodic requests needed corresponding to the
1998  * interrupt IN endpoints polling interval. Table below gives the number
1999  * of periodic requests needed for the interrupt IN endpoints  according
2000  * to endpoint polling interval.
2001  *
2002  * Polling interval		Number of periodic requests
2003  *
2004  * 1ms				4
2005  * 2ms				2
2006  * 4ms to 32ms			1
2007  */
2008 static void
2009 ehci_set_periodic_pipe_polling(
2010 	ehci_state_t		*ehcip,
2011 	usba_pipe_handle_data_t	*ph)
2012 {
2013 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2014 	usb_ep_descr_t		*endpoint = &ph->p_ep;
2015 	uchar_t			ep_attr = endpoint->bmAttributes;
2016 	uint_t			interval;
2017 
2018 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2019 	    "ehci_set_periodic_pipe_polling:");
2020 
2021 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2022 
2023 	pp->pp_cur_periodic_req_cnt = 0;
2024 
2025 	/*
2026 	 * Check usb flag whether USB_FLAGS_ONE_TIME_POLL flag is
2027 	 * set and if so, set pp->pp_max_periodic_req_cnt to one.
2028 	 */
2029 	if (((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) &&
2030 	    (pp->pp_client_periodic_in_reqp)) {
2031 		usb_intr_req_t *intr_reqp = (usb_intr_req_t *)
2032 		    pp->pp_client_periodic_in_reqp;
2033 
2034 		if (intr_reqp->intr_attributes &
2035 		    USB_ATTRS_ONE_XFER) {
2036 
2037 			pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2038 
2039 			return;
2040 		}
2041 	}
2042 
2043 	mutex_enter(&ph->p_usba_device->usb_mutex);
2044 
2045 	/*
2046 	 * The ehci_adjust_polling_interval function will not fail
2047 	 * at this instance since bandwidth allocation is already
2048 	 * done. Here we are getting only the periodic interval.
2049 	 */
2050 	interval = ehci_adjust_polling_interval(ehcip, endpoint,
2051 	    ph->p_usba_device->usb_port_status);
2052 
2053 	mutex_exit(&ph->p_usba_device->usb_mutex);
2054 
2055 	switch (interval) {
2056 	case EHCI_INTR_1MS_POLL:
2057 		pp->pp_max_periodic_req_cnt = EHCI_INTR_1MS_REQS;
2058 		break;
2059 	case EHCI_INTR_2MS_POLL:
2060 		pp->pp_max_periodic_req_cnt = EHCI_INTR_2MS_REQS;
2061 		break;
2062 	default:
2063 		pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2064 		break;
2065 	}
2066 
2067 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2068 	    "ehci_set_periodic_pipe_polling: Max periodic requests = %d",
2069 	    pp->pp_max_periodic_req_cnt);
2070 }
2071 
2072 /*
2073  * ehci_allocate_intr_resources:
2074  *
2075  * Calculates the number of tds necessary for a intr transfer, and allocates
2076  * all the necessary resources.
2077  *
2078  * Returns NULL if there is insufficient resources otherwise TW.
2079  */
2080 ehci_trans_wrapper_t *
2081 ehci_allocate_intr_resources(
2082 	ehci_state_t		*ehcip,
2083 	usba_pipe_handle_data_t	*ph,
2084 	usb_intr_req_t		*intr_reqp,
2085 	usb_flags_t		flags)
2086 {
2087 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2088 	int			pipe_dir;
2089 	size_t			qtd_count = 1;
2090 	size_t			tw_length;
2091 	ehci_trans_wrapper_t	*tw;
2092 
2093 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2094 	    "ehci_allocate_intr_resources:");
2095 
2096 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2097 
2098 	pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
2099 
2100 	/* Get the length of interrupt transfer & alloc data */
2101 	if (intr_reqp) {
2102 		tw_length = intr_reqp->intr_len;
2103 	} else {
2104 		ASSERT(pipe_dir == USB_EP_DIR_IN);
2105 		tw_length = (pp->pp_client_periodic_in_reqp) ?
2106 		    (((usb_intr_req_t *)pp->
2107 		    pp_client_periodic_in_reqp)->intr_len) :
2108 		    ph->p_ep.wMaxPacketSize;
2109 	}
2110 
2111 	/* Check the size of interrupt request */
2112 	if (tw_length > EHCI_MAX_QTD_XFER_SIZE) {
2113 
2114 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2115 		    "ehci_allocate_intr_resources: Intr request size 0x%lx is "
2116 		    "more than 0x%x", tw_length, EHCI_MAX_QTD_XFER_SIZE);
2117 
2118 		return (NULL);
2119 	}
2120 
2121 	if ((tw = ehci_allocate_tw_resources(ehcip, pp, tw_length, flags,
2122 	    qtd_count)) == NULL) {
2123 
2124 		return (NULL);
2125 	}
2126 
2127 	if (pipe_dir == USB_EP_DIR_IN) {
2128 		if (ehci_allocate_intr_in_resource(ehcip, pp, tw, flags) !=
2129 		    USB_SUCCESS) {
2130 			ehci_deallocate_tw(ehcip, pp, tw);
2131 		}
2132 		tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
2133 	} else {
2134 		if (tw_length) {
2135 			ASSERT(intr_reqp->intr_data != NULL);
2136 
2137 			/* Copy the data into the buffer */
2138 			bcopy(intr_reqp->intr_data->b_rptr, tw->tw_buf,
2139 			    intr_reqp->intr_len);
2140 
2141 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
2142 			    intr_reqp->intr_len);
2143 		}
2144 
2145 		tw->tw_curr_xfer_reqp = (usb_opaque_t)intr_reqp;
2146 		tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
2147 	}
2148 
2149 	if (intr_reqp) {
2150 		tw->tw_timeout = intr_reqp->intr_timeout;
2151 	}
2152 
2153 	/*
2154 	 * Initialize the callback and any callback
2155 	 * data required when the qtd completes.
2156 	 */
2157 	tw->tw_handle_qtd = ehci_handle_intr_qtd;
2158 	tw->tw_handle_callback_value = NULL;
2159 
2160 	return (tw);
2161 }
2162 
2163 
2164 /*
2165  * ehci_insert_intr_req:
2166  *
2167  * Insert an Interrupt request into the Host Controller's periodic list.
2168  */
2169 /* ARGSUSED */
2170 void
2171 ehci_insert_intr_req(
2172 	ehci_state_t		*ehcip,
2173 	ehci_pipe_private_t	*pp,
2174 	ehci_trans_wrapper_t	*tw,
2175 	usb_flags_t		flags)
2176 {
2177 	uint_t			ctrl = 0;
2178 
2179 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2180 
2181 	ASSERT(tw->tw_curr_xfer_reqp != NULL);
2182 
2183 	ctrl = (tw->tw_direction | EHCI_QTD_CTRL_INTR_ON_COMPLETE);
2184 
2185 	/* Insert another interrupt QTD */
2186 	(void) ehci_insert_qtd(ehcip, ctrl, 0, tw->tw_length, 0, pp, tw);
2187 
2188 	/* Start the timer for this Interrupt transfer */
2189 	ehci_start_xfer_timer(ehcip, pp, tw);
2190 }
2191 
2192 
2193 /*
2194  * ehci_stop_periodic_pipe_polling:
2195  */
2196 /* ARGSUSED */
2197 int
2198 ehci_stop_periodic_pipe_polling(
2199 	ehci_state_t		*ehcip,
2200 	usba_pipe_handle_data_t	*ph,
2201 	usb_flags_t		flags)
2202 {
2203 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2204 	usb_ep_descr_t		*eptd = &ph->p_ep;
2205 
2206 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2207 	    "ehci_stop_periodic_pipe_polling: Flags = 0x%x", flags);
2208 
2209 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2210 
2211 	/*
2212 	 * Check and handle stop polling on root hub interrupt pipe.
2213 	 */
2214 	if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
2215 	    ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
2216 	    USB_EP_ATTR_INTR)) {
2217 
2218 		ehci_handle_root_hub_pipe_stop_intr_polling(ph, flags);
2219 
2220 		return (USB_SUCCESS);
2221 	}
2222 
2223 	if (pp->pp_state != EHCI_PIPE_STATE_ACTIVE) {
2224 
2225 		USB_DPRINTF_L2(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2226 		    "ehci_stop_periodic_pipe_polling: "
2227 		    "Polling already stopped");
2228 
2229 		return (USB_SUCCESS);
2230 	}
2231 
2232 	/* Set pipe state to pipe stop polling */
2233 	pp->pp_state = EHCI_PIPE_STATE_STOP_POLLING;
2234 
2235 	ehci_pipe_cleanup(ehcip, ph);
2236 
2237 	return (USB_SUCCESS);
2238 }
2239 
2240 
2241 /*
2242  * ehci_insert_qtd:
2243  *
2244  * Insert a Transfer Descriptor (QTD) on an Endpoint Descriptor (QH).
2245  * Always returns USB_SUCCESS for now.	Once Isoch has been implemented,
2246  * it may return USB_FAILURE.
2247  */
2248 int
2249 ehci_insert_qtd(
2250 	ehci_state_t		*ehcip,
2251 	uint32_t		qtd_ctrl,
2252 	size_t			qtd_dma_offs,
2253 	size_t			qtd_length,
2254 	uint32_t		qtd_ctrl_phase,
2255 	ehci_pipe_private_t	*pp,
2256 	ehci_trans_wrapper_t	*tw)
2257 {
2258 	ehci_qtd_t		*curr_dummy_qtd, *next_dummy_qtd;
2259 	ehci_qtd_t		*new_dummy_qtd;
2260 	ehci_qh_t		*qh = pp->pp_qh;
2261 	int			error = USB_SUCCESS;
2262 
2263 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2264 
2265 	/* Allocate new dummy QTD */
2266 	new_dummy_qtd = tw->tw_qtd_free_list;
2267 
2268 	ASSERT(new_dummy_qtd != NULL);
2269 	tw->tw_qtd_free_list = ehci_qtd_iommu_to_cpu(ehcip,
2270 	    Get_QTD(new_dummy_qtd->qtd_tw_next_qtd));
2271 	Set_QTD(new_dummy_qtd->qtd_tw_next_qtd, NULL);
2272 
2273 	/* Get the current and next dummy QTDs */
2274 	curr_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2275 	    Get_QH(qh->qh_dummy_qtd));
2276 	next_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2277 	    Get_QTD(curr_dummy_qtd->qtd_next_qtd));
2278 
2279 	/* Update QH's dummy qtd field */
2280 	Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, next_dummy_qtd));
2281 
2282 	/* Update next dummy's next qtd pointer */
2283 	Set_QTD(next_dummy_qtd->qtd_next_qtd,
2284 	    ehci_qtd_cpu_to_iommu(ehcip, new_dummy_qtd));
2285 
2286 	/*
2287 	 * Fill in the current dummy qtd and
2288 	 * add the new dummy to the end.
2289 	 */
2290 	ehci_fill_in_qtd(ehcip, curr_dummy_qtd, qtd_ctrl,
2291 	    qtd_dma_offs, qtd_length, qtd_ctrl_phase, pp, tw);
2292 
2293 	/* Insert this qtd onto the tw */
2294 	ehci_insert_qtd_on_tw(ehcip, tw, curr_dummy_qtd);
2295 
2296 	/*
2297 	 * Insert this qtd onto active qtd list.
2298 	 * Don't insert polled mode qtd here.
2299 	 */
2300 	if (pp->pp_flag != EHCI_POLLED_MODE_FLAG) {
2301 		/* Insert this qtd onto active qtd list */
2302 		ehci_insert_qtd_into_active_qtd_list(ehcip, curr_dummy_qtd);
2303 	}
2304 
2305 	/* Print qh and qtd */
2306 	ehci_print_qh(ehcip, qh);
2307 	ehci_print_qtd(ehcip, curr_dummy_qtd);
2308 
2309 	return (error);
2310 }
2311 
2312 
2313 /*
2314  * ehci_allocate_qtd_from_pool:
2315  *
2316  * Allocate a Transfer Descriptor (QTD) from the QTD buffer pool.
2317  */
2318 static ehci_qtd_t *
2319 ehci_allocate_qtd_from_pool(ehci_state_t	*ehcip)
2320 {
2321 	int		i, ctrl;
2322 	ehci_qtd_t	*qtd;
2323 
2324 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2325 
2326 	/*
2327 	 * Search for a blank Transfer Descriptor (QTD)
2328 	 * in the QTD buffer pool.
2329 	 */
2330 	for (i = 0; i < ehci_qtd_pool_size; i ++) {
2331 		ctrl = Get_QTD(ehcip->ehci_qtd_pool_addr[i].qtd_state);
2332 		if (ctrl == EHCI_QTD_FREE) {
2333 			break;
2334 		}
2335 	}
2336 
2337 	if (i >= ehci_qtd_pool_size) {
2338 		USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2339 		    "ehci_allocate_qtd_from_pool: QTD exhausted");
2340 
2341 		return (NULL);
2342 	}
2343 
2344 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2345 	    "ehci_allocate_qtd_from_pool: Allocated %d", i);
2346 
2347 	/* Create a new dummy for the end of the QTD list */
2348 	qtd = &ehcip->ehci_qtd_pool_addr[i];
2349 
2350 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2351 	    "ehci_allocate_qtd_from_pool: qtd 0x%p", (void *)qtd);
2352 
2353 	/* Mark the newly allocated QTD as a dummy */
2354 	Set_QTD(qtd->qtd_state, EHCI_QTD_DUMMY);
2355 
2356 	/* Mark the status of this new QTD to halted state */
2357 	Set_QTD(qtd->qtd_ctrl, EHCI_QTD_CTRL_HALTED_XACT);
2358 
2359 	/* Disable dummy QTD's next and alternate next pointers */
2360 	Set_QTD(qtd->qtd_next_qtd, EHCI_QTD_NEXT_QTD_PTR_VALID);
2361 	Set_QTD(qtd->qtd_alt_next_qtd, EHCI_QTD_ALT_NEXT_QTD_PTR_VALID);
2362 
2363 	return (qtd);
2364 }
2365 
2366 
2367 /*
2368  * ehci_fill_in_qtd:
2369  *
2370  * Fill in the fields of a Transfer Descriptor (QTD).
2371  * The "Buffer Pointer" fields of a QTD are retrieved from the TW
2372  * it is associated with.
2373  *
2374  * Note:
2375  * qtd_dma_offs - the starting offset into the TW buffer, where the QTD
2376  *		  should transfer from. It should be 4K aligned. And when
2377  *		  a TW has more than one QTDs, the QTDs must be filled in
2378  *		  increasing order.
2379  * qtd_length - the total bytes to transfer.
2380  */
2381 /*ARGSUSED*/
2382 static void
2383 ehci_fill_in_qtd(
2384 	ehci_state_t		*ehcip,
2385 	ehci_qtd_t		*qtd,
2386 	uint32_t		qtd_ctrl,
2387 	size_t			qtd_dma_offs,
2388 	size_t			qtd_length,
2389 	uint32_t		qtd_ctrl_phase,
2390 	ehci_pipe_private_t	*pp,
2391 	ehci_trans_wrapper_t	*tw)
2392 {
2393 	uint32_t		buf_addr;
2394 	size_t			buf_len = qtd_length;
2395 	uint32_t		ctrl = qtd_ctrl;
2396 	uint_t			i = 0;
2397 	int			rem_len;
2398 
2399 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2400 	    "ehci_fill_in_qtd: qtd 0x%p ctrl 0x%x bufoffs 0x%lx "
2401 	    "len 0x%lx", (void *)qtd, qtd_ctrl, qtd_dma_offs, qtd_length);
2402 
2403 	/* Assert that the qtd to be filled in is a dummy */
2404 	ASSERT(Get_QTD(qtd->qtd_state) == EHCI_QTD_DUMMY);
2405 
2406 	/* Change QTD's state Active */
2407 	Set_QTD(qtd->qtd_state, EHCI_QTD_ACTIVE);
2408 
2409 	/* Set the total length data transfer */
2410 	ctrl |= (((qtd_length << EHCI_QTD_CTRL_BYTES_TO_XFER_SHIFT)
2411 	    & EHCI_QTD_CTRL_BYTES_TO_XFER) | EHCI_QTD_CTRL_MAX_ERR_COUNTS);
2412 
2413 	/*
2414 	 * QTDs must be filled in increasing DMA offset order.
2415 	 * tw_dma_offs is initialized to be 0 at TW creation and
2416 	 * is only increased in this function.
2417 	 */
2418 	ASSERT(buf_len == 0 || qtd_dma_offs >= tw->tw_dma_offs);
2419 
2420 	/*
2421 	 * Save the starting dma buffer offset used and
2422 	 * length of data that will be transfered in
2423 	 * the current QTD.
2424 	 */
2425 	Set_QTD(qtd->qtd_xfer_offs, qtd_dma_offs);
2426 	Set_QTD(qtd->qtd_xfer_len, buf_len);
2427 
2428 	while (buf_len) {
2429 		/*
2430 		 * Advance to the next DMA cookie until finding the cookie
2431 		 * that qtd_dma_offs falls in.
2432 		 * It is very likely this loop will never repeat more than
2433 		 * once. It is here just to accommodate the case qtd_dma_offs
2434 		 * is increased by multiple cookies during two consecutive
2435 		 * calls into this function. In that case, the interim DMA
2436 		 * buffer is allowed to be skipped.
2437 		 */
2438 		while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <=
2439 		    qtd_dma_offs) {
2440 			/*
2441 			 * tw_dma_offs always points to the starting offset
2442 			 * of a cookie
2443 			 */
2444 			tw->tw_dma_offs += tw->tw_cookie.dmac_size;
2445 			ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie);
2446 			tw->tw_cookie_idx++;
2447 			ASSERT(tw->tw_cookie_idx < tw->tw_ncookies);
2448 		}
2449 
2450 		/*
2451 		 * Counting the remained buffer length to be filled in
2452 		 * the QTD for current DMA cookie
2453 		 */
2454 		rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) -
2455 		    qtd_dma_offs;
2456 
2457 		/* Update the beginning of the buffer */
2458 		buf_addr = (qtd_dma_offs - tw->tw_dma_offs) +
2459 		    tw->tw_cookie.dmac_address;
2460 		ASSERT((buf_addr % EHCI_4K_ALIGN) == 0);
2461 		Set_QTD(qtd->qtd_buf[i], buf_addr);
2462 
2463 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2464 		    "ehci_fill_in_qtd: dmac_addr 0x%x dmac_size "
2465 		    "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size,
2466 		    tw->tw_cookie_idx);
2467 
2468 		if (buf_len <= EHCI_MAX_QTD_BUF_SIZE) {
2469 			ASSERT(buf_len <= rem_len);
2470 			break;
2471 		} else {
2472 			ASSERT(rem_len >= EHCI_MAX_QTD_BUF_SIZE);
2473 			buf_len -= EHCI_MAX_QTD_BUF_SIZE;
2474 			qtd_dma_offs += EHCI_MAX_QTD_BUF_SIZE;
2475 		}
2476 
2477 		i++;
2478 	}
2479 
2480 	/*
2481 	 * Setup the alternate next qTD pointer if appropriate.  The alternate
2482 	 * qtd is currently pointing to a QTD that is not yet linked, but will
2483 	 * be in the very near future.	If a short_xfer occurs in this
2484 	 * situation , the HC will automatically skip this QH.	Eventually
2485 	 * everything will be placed and the alternate_qtd will be valid QTD.
2486 	 * For more information on alternate qtds look at section 3.5.2 in the
2487 	 * EHCI spec.
2488 	 */
2489 	if (tw->tw_alt_qtd != NULL) {
2490 		Set_QTD(qtd->qtd_alt_next_qtd,
2491 		    (ehci_qtd_cpu_to_iommu(ehcip, tw->tw_alt_qtd) &
2492 		    EHCI_QTD_ALT_NEXT_QTD_PTR));
2493 	}
2494 
2495 	/*
2496 	 * For control, bulk and interrupt QTD, now
2497 	 * enable current QTD by setting active bit.
2498 	 */
2499 	Set_QTD(qtd->qtd_ctrl, (ctrl | EHCI_QTD_CTRL_ACTIVE_XACT));
2500 
2501 	/*
2502 	 * For Control Xfer, qtd_ctrl_phase is a valid filed.
2503 	 */
2504 	if (qtd_ctrl_phase) {
2505 		Set_QTD(qtd->qtd_ctrl_phase, qtd_ctrl_phase);
2506 	}
2507 
2508 	/* Set the transfer wrapper */
2509 	ASSERT(tw != NULL);
2510 	ASSERT(tw->tw_id != NULL);
2511 
2512 	Set_QTD(qtd->qtd_trans_wrapper, (uint32_t)tw->tw_id);
2513 }
2514 
2515 
2516 /*
2517  * ehci_insert_qtd_on_tw:
2518  *
2519  * The transfer wrapper keeps a list of all Transfer Descriptors (QTD) that
2520  * are allocated for this transfer. Insert a QTD  onto this list. The  list
2521  * of QTD's does not include the dummy QTD that is at the end of the list of
2522  * QTD's for the endpoint.
2523  */
2524 static void
2525 ehci_insert_qtd_on_tw(
2526 	ehci_state_t		*ehcip,
2527 	ehci_trans_wrapper_t	*tw,
2528 	ehci_qtd_t		*qtd)
2529 {
2530 	/*
2531 	 * Set the next pointer to NULL because
2532 	 * this is the last QTD on list.
2533 	 */
2534 	Set_QTD(qtd->qtd_tw_next_qtd, NULL);
2535 
2536 	if (tw->tw_qtd_head == NULL) {
2537 		ASSERT(tw->tw_qtd_tail == NULL);
2538 		tw->tw_qtd_head = qtd;
2539 		tw->tw_qtd_tail = qtd;
2540 	} else {
2541 		ehci_qtd_t *dummy = (ehci_qtd_t *)tw->tw_qtd_tail;
2542 
2543 		ASSERT(dummy != NULL);
2544 		ASSERT(dummy != qtd);
2545 		ASSERT(Get_QTD(qtd->qtd_state) != EHCI_QTD_DUMMY);
2546 
2547 		/* Add the qtd to the end of the list */
2548 		Set_QTD(dummy->qtd_tw_next_qtd,
2549 		    ehci_qtd_cpu_to_iommu(ehcip, qtd));
2550 
2551 		tw->tw_qtd_tail = qtd;
2552 
2553 		ASSERT(Get_QTD(qtd->qtd_tw_next_qtd) == NULL);
2554 	}
2555 }
2556 
2557 
2558 /*
2559  * ehci_insert_qtd_into_active_qtd_list:
2560  *
2561  * Insert current QTD into active QTD list.
2562  */
2563 static void
2564 ehci_insert_qtd_into_active_qtd_list(
2565 	ehci_state_t		*ehcip,
2566 	ehci_qtd_t		*qtd)
2567 {
2568 	ehci_qtd_t		*curr_qtd, *next_qtd;
2569 
2570 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2571 
2572 	curr_qtd = ehcip->ehci_active_qtd_list;
2573 
2574 	/* Insert this QTD into QTD Active List */
2575 	if (curr_qtd) {
2576 		next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2577 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2578 
2579 		while (next_qtd) {
2580 			curr_qtd = next_qtd;
2581 			next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2582 			    Get_QTD(curr_qtd->qtd_active_qtd_next));
2583 		}
2584 
2585 		Set_QTD(qtd->qtd_active_qtd_prev,
2586 		    ehci_qtd_cpu_to_iommu(ehcip, curr_qtd));
2587 
2588 		Set_QTD(curr_qtd->qtd_active_qtd_next,
2589 		    ehci_qtd_cpu_to_iommu(ehcip, qtd));
2590 	} else {
2591 		ehcip->ehci_active_qtd_list = qtd;
2592 		Set_QTD(qtd->qtd_active_qtd_next, NULL);
2593 		Set_QTD(qtd->qtd_active_qtd_prev, NULL);
2594 	}
2595 }
2596 
2597 
2598 /*
2599  * ehci_remove_qtd_from_active_qtd_list:
2600  *
2601  * Remove current QTD from the active QTD list.
2602  *
2603  * NOTE: This function is also called from POLLED MODE.
2604  */
2605 void
2606 ehci_remove_qtd_from_active_qtd_list(
2607 	ehci_state_t		*ehcip,
2608 	ehci_qtd_t		*qtd)
2609 {
2610 	ehci_qtd_t		*curr_qtd, *prev_qtd, *next_qtd;
2611 
2612 	ASSERT(qtd != NULL);
2613 
2614 	curr_qtd = ehcip->ehci_active_qtd_list;
2615 
2616 	while ((curr_qtd) && (curr_qtd != qtd)) {
2617 		curr_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2618 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2619 	}
2620 
2621 	if ((curr_qtd) && (curr_qtd == qtd)) {
2622 		prev_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2623 		    Get_QTD(curr_qtd->qtd_active_qtd_prev));
2624 		next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2625 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2626 
2627 		if (prev_qtd) {
2628 			Set_QTD(prev_qtd->qtd_active_qtd_next,
2629 			    Get_QTD(curr_qtd->qtd_active_qtd_next));
2630 		} else {
2631 			ehcip->ehci_active_qtd_list = next_qtd;
2632 		}
2633 
2634 		if (next_qtd) {
2635 			Set_QTD(next_qtd->qtd_active_qtd_prev,
2636 			    Get_QTD(curr_qtd->qtd_active_qtd_prev));
2637 		}
2638 	} else {
2639 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2640 		    "ehci_remove_qtd_from_active_qtd_list: "
2641 		    "Unable to find QTD in active_qtd_list");
2642 	}
2643 }
2644 
2645 
2646 /*
2647  * ehci_traverse_qtds:
2648  *
2649  * Traverse the list of QTDs for given pipe using transfer wrapper.  Since
2650  * the endpoint is marked as Halted, the Host Controller (HC) is no longer
2651  * accessing these QTDs. Remove all the QTDs that are attached to endpoint.
2652  */
2653 static void
2654 ehci_traverse_qtds(
2655 	ehci_state_t		*ehcip,
2656 	usba_pipe_handle_data_t	*ph)
2657 {
2658 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2659 	ehci_trans_wrapper_t	*next_tw;
2660 	ehci_qtd_t		*qtd;
2661 	ehci_qtd_t		*next_qtd;
2662 
2663 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2664 
2665 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2666 	    "ehci_traverse_qtds:");
2667 
2668 	/* Process the transfer wrappers for this pipe */
2669 	next_tw = pp->pp_tw_head;
2670 
2671 	while (next_tw) {
2672 		/* Stop the the transfer timer */
2673 		ehci_stop_xfer_timer(ehcip, next_tw, EHCI_REMOVE_XFER_ALWAYS);
2674 
2675 		qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
2676 
2677 		/* Walk through each QTD for this transfer wrapper */
2678 		while (qtd) {
2679 			/* Remove this QTD from active QTD list */
2680 			ehci_remove_qtd_from_active_qtd_list(ehcip, qtd);
2681 
2682 			next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2683 			    Get_QTD(qtd->qtd_tw_next_qtd));
2684 
2685 			/* Deallocate this QTD */
2686 			ehci_deallocate_qtd(ehcip, qtd);
2687 
2688 			qtd = next_qtd;
2689 		}
2690 
2691 		next_tw = next_tw->tw_next;
2692 	}
2693 
2694 	/* Clear current qtd pointer */
2695 	Set_QH(pp->pp_qh->qh_curr_qtd, (uint32_t)0x00000000);
2696 
2697 	/* Update the next qtd pointer in the QH */
2698 	Set_QH(pp->pp_qh->qh_next_qtd, Get_QH(pp->pp_qh->qh_dummy_qtd));
2699 }
2700 
2701 
2702 /*
2703  * ehci_deallocate_qtd:
2704  *
2705  * Deallocate a Host Controller's (HC) Transfer Descriptor (QTD).
2706  *
2707  * NOTE: This function is also called from POLLED MODE.
2708  */
2709 void
2710 ehci_deallocate_qtd(
2711 	ehci_state_t		*ehcip,
2712 	ehci_qtd_t		*old_qtd)
2713 {
2714 	ehci_trans_wrapper_t	*tw = NULL;
2715 
2716 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2717 	    "ehci_deallocate_qtd: old_qtd = 0x%p", (void *)old_qtd);
2718 
2719 	/*
2720 	 * Obtain the transaction wrapper and tw will be
2721 	 * NULL for the dummy QTDs.
2722 	 */
2723 	if (Get_QTD(old_qtd->qtd_state) != EHCI_QTD_DUMMY) {
2724 		tw = (ehci_trans_wrapper_t *)
2725 		    EHCI_LOOKUP_ID((uint32_t)
2726 		    Get_QTD(old_qtd->qtd_trans_wrapper));
2727 
2728 		ASSERT(tw != NULL);
2729 	}
2730 
2731 	/*
2732 	 * If QTD's transfer wrapper is NULL, don't access its TW.
2733 	 * Just free the QTD.
2734 	 */
2735 	if (tw) {
2736 		ehci_qtd_t	*qtd, *next_qtd;
2737 
2738 		qtd = tw->tw_qtd_head;
2739 
2740 		if (old_qtd != qtd) {
2741 			next_qtd = ehci_qtd_iommu_to_cpu(
2742 			    ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2743 
2744 			while (next_qtd != old_qtd) {
2745 				qtd = next_qtd;
2746 				next_qtd = ehci_qtd_iommu_to_cpu(
2747 				    ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2748 			}
2749 
2750 			Set_QTD(qtd->qtd_tw_next_qtd, old_qtd->qtd_tw_next_qtd);
2751 
2752 			if (qtd->qtd_tw_next_qtd == NULL) {
2753 				tw->tw_qtd_tail = qtd;
2754 			}
2755 		} else {
2756 			tw->tw_qtd_head = ehci_qtd_iommu_to_cpu(
2757 			    ehcip, Get_QTD(old_qtd->qtd_tw_next_qtd));
2758 
2759 			if (tw->tw_qtd_head == NULL) {
2760 				tw->tw_qtd_tail = NULL;
2761 			}
2762 		}
2763 	}
2764 
2765 	bzero((void *)old_qtd, sizeof (ehci_qtd_t));
2766 	Set_QTD(old_qtd->qtd_state, EHCI_QTD_FREE);
2767 
2768 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2769 	    "Dealloc_qtd: qtd 0x%p", (void *)old_qtd);
2770 }
2771 
2772 
2773 /*
2774  * ehci_qtd_cpu_to_iommu:
2775  *
2776  * This function converts for the given Transfer Descriptor (QTD) CPU address
2777  * to IO address.
2778  *
2779  * NOTE: This function is also called from POLLED MODE.
2780  */
2781 uint32_t
2782 ehci_qtd_cpu_to_iommu(
2783 	ehci_state_t	*ehcip,
2784 	ehci_qtd_t	*addr)
2785 {
2786 	uint32_t	td;
2787 
2788 	td  = (uint32_t)ehcip->ehci_qtd_pool_cookie.dmac_address +
2789 	    (uint32_t)((uintptr_t)addr -
2790 	    (uintptr_t)(ehcip->ehci_qtd_pool_addr));
2791 
2792 	ASSERT((ehcip->ehci_qtd_pool_cookie.dmac_address +
2793 	    (uint32_t) (sizeof (ehci_qtd_t) *
2794 	    (addr - ehcip->ehci_qtd_pool_addr))) ==
2795 	    (ehcip->ehci_qtd_pool_cookie.dmac_address +
2796 	    (uint32_t)((uintptr_t)addr - (uintptr_t)
2797 	    (ehcip->ehci_qtd_pool_addr))));
2798 
2799 	ASSERT(td >= ehcip->ehci_qtd_pool_cookie.dmac_address);
2800 	ASSERT(td <= ehcip->ehci_qtd_pool_cookie.dmac_address +
2801 	    sizeof (ehci_qtd_t) * ehci_qtd_pool_size);
2802 
2803 	return (td);
2804 }
2805 
2806 
2807 /*
2808  * ehci_qtd_iommu_to_cpu:
2809  *
2810  * This function converts for the given Transfer Descriptor (QTD) IO address
2811  * to CPU address.
2812  *
2813  * NOTE: This function is also called from POLLED MODE.
2814  */
2815 ehci_qtd_t *
2816 ehci_qtd_iommu_to_cpu(
2817 	ehci_state_t	*ehcip,
2818 	uintptr_t	addr)
2819 {
2820 	ehci_qtd_t	*qtd;
2821 
2822 	if (addr == NULL) {
2823 
2824 		return (NULL);
2825 	}
2826 
2827 	qtd = (ehci_qtd_t *)((uintptr_t)
2828 	    (addr - ehcip->ehci_qtd_pool_cookie.dmac_address) +
2829 	    (uintptr_t)ehcip->ehci_qtd_pool_addr);
2830 
2831 	ASSERT(qtd >= ehcip->ehci_qtd_pool_addr);
2832 	ASSERT((uintptr_t)qtd <= (uintptr_t)ehcip->ehci_qtd_pool_addr +
2833 	    (uintptr_t)(sizeof (ehci_qtd_t) * ehci_qtd_pool_size));
2834 
2835 	return (qtd);
2836 }
2837 
2838 /*
2839  * ehci_allocate_tds_for_tw_resources:
2840  *
2841  * Allocate n Transfer Descriptors (TD) from the TD buffer pool and places it
2842  * into the TW.  Also chooses the correct alternate qtd when required.	It is
2843  * used for hardware short transfer support.  For more information on
2844  * alternate qtds look at section 3.5.2 in the EHCI spec.
2845  * Here is how each alternate qtd's are used:
2846  *
2847  * Bulk: used fully.
2848  * Intr: xfers only require 1 QTD, so alternate qtds are never used.
2849  * Ctrl: Should not use alternate QTD
2850  * Isoch: Doesn't support short_xfer nor does it use QTD
2851  *
2852  * Returns USB_NO_RESOURCES if it was not able to allocate all the requested TD
2853  * otherwise USB_SUCCESS.
2854  */
2855 int
2856 ehci_allocate_tds_for_tw(
2857 	ehci_state_t		*ehcip,
2858 	ehci_pipe_private_t	*pp,
2859 	ehci_trans_wrapper_t	*tw,
2860 	size_t			qtd_count)
2861 {
2862 	usb_ep_descr_t		*eptd = &pp->pp_pipe_handle->p_ep;
2863 	uchar_t			attributes;
2864 	ehci_qtd_t		*qtd;
2865 	uint32_t		qtd_addr;
2866 	int			i;
2867 	int			error = USB_SUCCESS;
2868 
2869 	attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
2870 
2871 	for (i = 0; i < qtd_count; i += 1) {
2872 		qtd = ehci_allocate_qtd_from_pool(ehcip);
2873 		if (qtd == NULL) {
2874 			error = USB_NO_RESOURCES;
2875 			USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2876 			    "ehci_allocate_qtds_for_tw: "
2877 			    "Unable to allocate %lu QTDs",
2878 			    qtd_count);
2879 			break;
2880 		}
2881 		if (i > 0) {
2882 			qtd_addr = ehci_qtd_cpu_to_iommu(ehcip,
2883 			    tw->tw_qtd_free_list);
2884 			Set_QTD(qtd->qtd_tw_next_qtd, qtd_addr);
2885 		}
2886 		tw->tw_qtd_free_list = qtd;
2887 
2888 		/*
2889 		 * Save the second one as a pointer to the new dummy 1.
2890 		 * It is used later for the alt_qtd_ptr.  Xfers with only
2891 		 * one qtd do not need alt_qtd_ptr.
2892 		 * The tds's are allocated and put into a stack, that is
2893 		 * why the second qtd allocated will turn out to be the
2894 		 * new dummy 1.
2895 		 */
2896 		if ((i == 1) && (attributes == USB_EP_ATTR_BULK)) {
2897 			tw->tw_alt_qtd = qtd;
2898 		}
2899 	}
2900 
2901 	return (error);
2902 }
2903 
2904 /*
2905  * ehci_allocate_tw_resources:
2906  *
2907  * Allocate a Transaction Wrapper (TW) and n Transfer Descriptors (QTD)
2908  * from the QTD buffer pool and places it into the TW.	It does an all
2909  * or nothing transaction.
2910  *
2911  * Returns NULL if there is insufficient resources otherwise TW.
2912  */
2913 static ehci_trans_wrapper_t *
2914 ehci_allocate_tw_resources(
2915 	ehci_state_t		*ehcip,
2916 	ehci_pipe_private_t	*pp,
2917 	size_t			tw_length,
2918 	usb_flags_t		usb_flags,
2919 	size_t			qtd_count)
2920 {
2921 	ehci_trans_wrapper_t	*tw;
2922 
2923 	tw = ehci_create_transfer_wrapper(ehcip, pp, tw_length, usb_flags);
2924 
2925 	if (tw == NULL) {
2926 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2927 		    "ehci_allocate_tw_resources: Unable to allocate TW");
2928 	} else {
2929 		if (ehci_allocate_tds_for_tw(ehcip, pp, tw, qtd_count) ==
2930 		    USB_SUCCESS) {
2931 			tw->tw_num_qtds = (uint_t)qtd_count;
2932 		} else {
2933 			ehci_deallocate_tw(ehcip, pp, tw);
2934 			tw = NULL;
2935 		}
2936 	}
2937 
2938 	return (tw);
2939 }
2940 
2941 
2942 /*
2943  * ehci_free_tw_td_resources:
2944  *
2945  * Free all allocated resources for Transaction Wrapper (TW).
2946  * Does not free the TW itself.
2947  *
2948  * Returns NULL if there is insufficient resources otherwise TW.
2949  */
2950 static void
2951 ehci_free_tw_td_resources(
2952 	ehci_state_t		*ehcip,
2953 	ehci_trans_wrapper_t	*tw)
2954 {
2955 	ehci_qtd_t		*qtd = NULL;
2956 	ehci_qtd_t		*temp_qtd = NULL;
2957 
2958 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2959 	    "ehci_free_tw_td_resources: tw = 0x%p", (void *)tw);
2960 
2961 	qtd = tw->tw_qtd_free_list;
2962 	while (qtd != NULL) {
2963 		/* Save the pointer to the next qtd before destroying it */
2964 		temp_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2965 		    Get_QTD(qtd->qtd_tw_next_qtd));
2966 		ehci_deallocate_qtd(ehcip, qtd);
2967 		qtd = temp_qtd;
2968 	}
2969 	tw->tw_qtd_free_list = NULL;
2970 }
2971 
2972 /*
2973  * Transfer Wrapper functions
2974  *
2975  * ehci_create_transfer_wrapper:
2976  *
2977  * Create a Transaction Wrapper (TW) and this involves the allocating of DMA
2978  * resources.
2979  */
2980 static ehci_trans_wrapper_t *
2981 ehci_create_transfer_wrapper(
2982 	ehci_state_t		*ehcip,
2983 	ehci_pipe_private_t	*pp,
2984 	size_t			length,
2985 	uint_t			usb_flags)
2986 {
2987 	ddi_device_acc_attr_t	dev_attr;
2988 	ddi_dma_attr_t		dma_attr;
2989 	int			result;
2990 	size_t			real_length;
2991 	ehci_trans_wrapper_t	*tw;
2992 	int			kmem_flag;
2993 	int			(*dmamem_wait)(caddr_t);
2994 
2995 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2996 	    "ehci_create_transfer_wrapper: length = 0x%lx flags = 0x%x",
2997 	    length, usb_flags);
2998 
2999 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3000 
3001 	/* SLEEP flag should not be used while holding mutex */
3002 	kmem_flag = KM_NOSLEEP;
3003 	dmamem_wait = DDI_DMA_DONTWAIT;
3004 
3005 	/* Allocate space for the transfer wrapper */
3006 	tw = kmem_zalloc(sizeof (ehci_trans_wrapper_t), kmem_flag);
3007 
3008 	if (tw == NULL) {
3009 		USB_DPRINTF_L2(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3010 		    "ehci_create_transfer_wrapper: kmem_zalloc failed");
3011 
3012 		return (NULL);
3013 	}
3014 
3015 	/* zero-length packet doesn't need to allocate dma memory */
3016 	if (length == 0) {
3017 
3018 		goto dmadone;
3019 	}
3020 
3021 	/* allow sg lists for transfer wrapper dma memory */
3022 	bcopy(&ehcip->ehci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t));
3023 	dma_attr.dma_attr_sgllen = EHCI_DMA_ATTR_TW_SGLLEN;
3024 	dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
3025 
3026 	/* Allocate the DMA handle */
3027 	result = ddi_dma_alloc_handle(ehcip->ehci_dip,
3028 	    &dma_attr, dmamem_wait, 0, &tw->tw_dmahandle);
3029 
3030 	if (result != DDI_SUCCESS) {
3031 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3032 		    "ehci_create_transfer_wrapper: Alloc handle failed");
3033 
3034 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3035 
3036 		return (NULL);
3037 	}
3038 
3039 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
3040 
3041 	/* no need for swapping the raw data */
3042 	dev_attr.devacc_attr_endian_flags  = DDI_NEVERSWAP_ACC;
3043 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
3044 
3045 	/* Allocate the memory */
3046 	result = ddi_dma_mem_alloc(tw->tw_dmahandle, length,
3047 	    &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait, NULL,
3048 	    (caddr_t *)&tw->tw_buf, &real_length, &tw->tw_accesshandle);
3049 
3050 	if (result != DDI_SUCCESS) {
3051 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3052 		    "ehci_create_transfer_wrapper: dma_mem_alloc fail");
3053 
3054 		ddi_dma_free_handle(&tw->tw_dmahandle);
3055 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3056 
3057 		return (NULL);
3058 	}
3059 
3060 	ASSERT(real_length >= length);
3061 
3062 	/* Bind the handle */
3063 	result = ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL,
3064 	    (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
3065 	    dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies);
3066 
3067 	if (result != DDI_DMA_MAPPED) {
3068 		ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
3069 
3070 		ddi_dma_mem_free(&tw->tw_accesshandle);
3071 		ddi_dma_free_handle(&tw->tw_dmahandle);
3072 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3073 
3074 		return (NULL);
3075 	}
3076 
3077 	tw->tw_cookie_idx = 0;
3078 	tw->tw_dma_offs = 0;
3079 
3080 dmadone:
3081 	/*
3082 	 * Only allow one wrapper to be added at a time. Insert the
3083 	 * new transaction wrapper into the list for this pipe.
3084 	 */
3085 	if (pp->pp_tw_head == NULL) {
3086 		pp->pp_tw_head = tw;
3087 		pp->pp_tw_tail = tw;
3088 	} else {
3089 		pp->pp_tw_tail->tw_next = tw;
3090 		pp->pp_tw_tail = tw;
3091 	}
3092 
3093 	/* Store the transfer length */
3094 	tw->tw_length = length;
3095 
3096 	/* Store a back pointer to the pipe private structure */
3097 	tw->tw_pipe_private = pp;
3098 
3099 	/* Store the transfer type - synchronous or asynchronous */
3100 	tw->tw_flags = usb_flags;
3101 
3102 	/* Get and Store 32bit ID */
3103 	tw->tw_id = EHCI_GET_ID((void *)tw);
3104 
3105 	ASSERT(tw->tw_id != NULL);
3106 
3107 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3108 	    "ehci_create_transfer_wrapper: tw = 0x%p, ncookies = %u",
3109 	    (void *)tw, tw->tw_ncookies);
3110 
3111 	return (tw);
3112 }
3113 
3114 
3115 /*
3116  * ehci_start_xfer_timer:
3117  *
3118  * Start the timer for the control, bulk and for one time interrupt
3119  * transfers.
3120  */
3121 /* ARGSUSED */
3122 static void
3123 ehci_start_xfer_timer(
3124 	ehci_state_t		*ehcip,
3125 	ehci_pipe_private_t	*pp,
3126 	ehci_trans_wrapper_t	*tw)
3127 {
3128 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3129 	    "ehci_start_xfer_timer: tw = 0x%p", (void *)tw);
3130 
3131 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3132 
3133 	/*
3134 	 * The timeout handling is done only for control, bulk and for
3135 	 * one time Interrupt transfers.
3136 	 *
3137 	 * NOTE: If timeout is zero; Assume infinite timeout and don't
3138 	 * insert this transfer on the timeout list.
3139 	 */
3140 	if (tw->tw_timeout) {
3141 		/*
3142 		 * Add this transfer wrapper to the head of the pipe's
3143 		 * tw timeout list.
3144 		 */
3145 		if (pp->pp_timeout_list) {
3146 			tw->tw_timeout_next = pp->pp_timeout_list;
3147 		}
3148 
3149 		pp->pp_timeout_list = tw;
3150 		ehci_start_timer(ehcip, pp);
3151 	}
3152 }
3153 
3154 
3155 /*
3156  * ehci_stop_xfer_timer:
3157  *
3158  * Start the timer for the control, bulk and for one time interrupt
3159  * transfers.
3160  */
3161 void
3162 ehci_stop_xfer_timer(
3163 	ehci_state_t		*ehcip,
3164 	ehci_trans_wrapper_t	*tw,
3165 	uint_t			flag)
3166 {
3167 	ehci_pipe_private_t	*pp;
3168 	timeout_id_t		timer_id;
3169 
3170 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3171 	    "ehci_stop_xfer_timer: tw = 0x%p", (void *)tw);
3172 
3173 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3174 
3175 	/* Obtain the pipe private structure */
3176 	pp = tw->tw_pipe_private;
3177 
3178 	/* check if the timeout tw list is empty */
3179 	if (pp->pp_timeout_list == NULL) {
3180 
3181 		return;
3182 	}
3183 
3184 	switch (flag) {
3185 	case EHCI_REMOVE_XFER_IFLAST:
3186 		if (tw->tw_qtd_head != tw->tw_qtd_tail) {
3187 			break;
3188 		}
3189 
3190 		/* FALLTHRU */
3191 	case EHCI_REMOVE_XFER_ALWAYS:
3192 		ehci_remove_tw_from_timeout_list(ehcip, tw);
3193 
3194 		if ((pp->pp_timeout_list == NULL) &&
3195 		    (pp->pp_timer_id)) {
3196 
3197 			timer_id = pp->pp_timer_id;
3198 
3199 			/* Reset the timer id to zero */
3200 			pp->pp_timer_id = 0;
3201 
3202 			mutex_exit(&ehcip->ehci_int_mutex);
3203 
3204 			(void) untimeout(timer_id);
3205 
3206 			mutex_enter(&ehcip->ehci_int_mutex);
3207 		}
3208 		break;
3209 	default:
3210 		break;
3211 	}
3212 }
3213 
3214 
3215 /*
3216  * ehci_xfer_timeout_handler:
3217  *
3218  * Control or bulk transfer timeout handler.
3219  */
3220 static void
3221 ehci_xfer_timeout_handler(void *arg)
3222 {
3223 	usba_pipe_handle_data_t	*ph = (usba_pipe_handle_data_t *)arg;
3224 	ehci_state_t		*ehcip = ehci_obtain_state(
3225 	    ph->p_usba_device->usb_root_hub_dip);
3226 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3227 	ehci_trans_wrapper_t	*tw, *next;
3228 	ehci_trans_wrapper_t	*expire_xfer_list = NULL;
3229 	ehci_qtd_t		*qtd;
3230 
3231 	USB_DPRINTF_L4(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3232 	    "ehci_xfer_timeout_handler: ehcip = 0x%p, ph = 0x%p",
3233 	    (void *)ehcip, (void *)ph);
3234 
3235 	mutex_enter(&ehcip->ehci_int_mutex);
3236 
3237 	/*
3238 	 * Check whether still timeout handler is valid.
3239 	 */
3240 	if (pp->pp_timer_id != 0) {
3241 
3242 		/* Reset the timer id to zero */
3243 		pp->pp_timer_id = 0;
3244 	} else {
3245 		mutex_exit(&ehcip->ehci_int_mutex);
3246 
3247 		return;
3248 	}
3249 
3250 	/* Get the transfer timeout list head */
3251 	tw = pp->pp_timeout_list;
3252 
3253 	while (tw) {
3254 
3255 		/* Get the transfer on the timeout list */
3256 		next = tw->tw_timeout_next;
3257 
3258 		tw->tw_timeout--;
3259 
3260 		if (tw->tw_timeout <= 0) {
3261 
3262 			/* remove the tw from the timeout list */
3263 			ehci_remove_tw_from_timeout_list(ehcip, tw);
3264 
3265 			/* remove QTDs from active QTD list */
3266 			qtd = tw->tw_qtd_head;
3267 			while (qtd) {
3268 				ehci_remove_qtd_from_active_qtd_list(
3269 				    ehcip, qtd);
3270 
3271 				/* Get the next QTD from the wrapper */
3272 				qtd = ehci_qtd_iommu_to_cpu(ehcip,
3273 				    Get_QTD(qtd->qtd_tw_next_qtd));
3274 			}
3275 
3276 			/*
3277 			 * Preserve the order to the requests
3278 			 * started time sequence.
3279 			 */
3280 			tw->tw_timeout_next = expire_xfer_list;
3281 			expire_xfer_list = tw;
3282 		}
3283 
3284 		tw = next;
3285 	}
3286 
3287 	/*
3288 	 * The timer should be started before the callbacks.
3289 	 * There is always a chance that ehci interrupts come
3290 	 * in when we release the mutex while calling the tw back.
3291 	 * To keep an accurate timeout it should be restarted
3292 	 * as soon as possible.
3293 	 */
3294 	ehci_start_timer(ehcip, pp);
3295 
3296 	/* Get the expired transfer timeout list head */
3297 	tw = expire_xfer_list;
3298 
3299 	while (tw) {
3300 
3301 		/* Get the next tw on the expired transfer timeout list */
3302 		next = tw->tw_timeout_next;
3303 
3304 		/*
3305 		 * The error handle routine will release the mutex when
3306 		 * calling back to USBA. But this will not cause any race.
3307 		 * We do the callback and are relying on ehci_pipe_cleanup()
3308 		 * to halt the queue head and clean up since we should not
3309 		 * block in timeout context.
3310 		 */
3311 		ehci_handle_error(ehcip, tw->tw_qtd_head, USB_CR_TIMEOUT);
3312 
3313 		tw = next;
3314 	}
3315 	mutex_exit(&ehcip->ehci_int_mutex);
3316 }
3317 
3318 
3319 /*
3320  * ehci_remove_tw_from_timeout_list:
3321  *
3322  * Remove Control or bulk transfer from the timeout list.
3323  */
3324 static void
3325 ehci_remove_tw_from_timeout_list(
3326 	ehci_state_t		*ehcip,
3327 	ehci_trans_wrapper_t	*tw)
3328 {
3329 	ehci_pipe_private_t	*pp;
3330 	ehci_trans_wrapper_t	*prev, *next;
3331 
3332 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3333 	    "ehci_remove_tw_from_timeout_list: tw = 0x%p", (void *)tw);
3334 
3335 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3336 
3337 	/* Obtain the pipe private structure */
3338 	pp = tw->tw_pipe_private;
3339 
3340 	if (pp->pp_timeout_list) {
3341 		if (pp->pp_timeout_list == tw) {
3342 			pp->pp_timeout_list = tw->tw_timeout_next;
3343 
3344 			tw->tw_timeout_next = NULL;
3345 		} else {
3346 			prev = pp->pp_timeout_list;
3347 			next = prev->tw_timeout_next;
3348 
3349 			while (next && (next != tw)) {
3350 				prev = next;
3351 				next = next->tw_timeout_next;
3352 			}
3353 
3354 			if (next == tw) {
3355 				prev->tw_timeout_next =
3356 				    next->tw_timeout_next;
3357 				tw->tw_timeout_next = NULL;
3358 			}
3359 		}
3360 	}
3361 }
3362 
3363 
3364 /*
3365  * ehci_start_timer:
3366  *
3367  * Start the pipe's timer
3368  */
3369 static void
3370 ehci_start_timer(
3371 	ehci_state_t		*ehcip,
3372 	ehci_pipe_private_t	*pp)
3373 {
3374 	USB_DPRINTF_L4(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3375 	    "ehci_start_timer: ehcip = 0x%p, pp = 0x%p",
3376 	    (void *)ehcip, (void *)pp);
3377 
3378 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3379 
3380 	/*
3381 	 * Start the pipe's timer only if currently timer is not
3382 	 * running and if there are any transfers on the timeout
3383 	 * list. This timer will be per pipe.
3384 	 */
3385 	if ((!pp->pp_timer_id) && (pp->pp_timeout_list)) {
3386 		pp->pp_timer_id = timeout(ehci_xfer_timeout_handler,
3387 		    (void *)(pp->pp_pipe_handle), drv_usectohz(1000000));
3388 	}
3389 }
3390 
3391 /*
3392  * ehci_deallocate_tw:
3393  *
3394  * Deallocate of a Transaction Wrapper (TW) and this involves the freeing of
3395  * of DMA resources.
3396  */
3397 void
3398 ehci_deallocate_tw(
3399 	ehci_state_t		*ehcip,
3400 	ehci_pipe_private_t	*pp,
3401 	ehci_trans_wrapper_t	*tw)
3402 {
3403 	ehci_trans_wrapper_t	*prev, *next;
3404 
3405 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3406 	    "ehci_deallocate_tw: tw = 0x%p", (void *)tw);
3407 
3408 	/*
3409 	 * If the transfer wrapper has no Host Controller (HC)
3410 	 * Transfer Descriptors (QTD) associated with it,  then
3411 	 * remove the transfer wrapper.
3412 	 */
3413 	if (tw->tw_qtd_head) {
3414 		ASSERT(tw->tw_qtd_tail != NULL);
3415 
3416 		return;
3417 	}
3418 
3419 	ASSERT(tw->tw_qtd_tail == NULL);
3420 
3421 	/* Make sure we return all the unused qtd's to the pool as well */
3422 	ehci_free_tw_td_resources(ehcip, tw);
3423 
3424 	/*
3425 	 * If pp->pp_tw_head and pp->pp_tw_tail are pointing to
3426 	 * given TW then set the head and  tail  equal to NULL.
3427 	 * Otherwise search for this TW in the linked TW's list
3428 	 * and then remove this TW from the list.
3429 	 */
3430 	if (pp->pp_tw_head == tw) {
3431 		if (pp->pp_tw_tail == tw) {
3432 			pp->pp_tw_head = NULL;
3433 			pp->pp_tw_tail = NULL;
3434 		} else {
3435 			pp->pp_tw_head = tw->tw_next;
3436 		}
3437 	} else {
3438 		prev = pp->pp_tw_head;
3439 		next = prev->tw_next;
3440 
3441 		while (next && (next != tw)) {
3442 			prev = next;
3443 			next = next->tw_next;
3444 		}
3445 
3446 		if (next == tw) {
3447 			prev->tw_next = next->tw_next;
3448 
3449 			if (pp->pp_tw_tail == tw) {
3450 				pp->pp_tw_tail = prev;
3451 			}
3452 		}
3453 	}
3454 
3455 	/*
3456 	 * Make sure that, this TW has been removed
3457 	 * from the timeout list.
3458 	 */
3459 	ehci_remove_tw_from_timeout_list(ehcip, tw);
3460 
3461 	/* Deallocate this TW */
3462 	ehci_free_tw(ehcip, pp, tw);
3463 }
3464 
3465 
3466 /*
3467  * ehci_free_dma_resources:
3468  *
3469  * Free dma resources of a Transfer Wrapper (TW) and also free the TW.
3470  *
3471  * NOTE: This function is also called from POLLED MODE.
3472  */
3473 void
3474 ehci_free_dma_resources(
3475 	ehci_state_t		*ehcip,
3476 	usba_pipe_handle_data_t	*ph)
3477 {
3478 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3479 	ehci_trans_wrapper_t	*head_tw = pp->pp_tw_head;
3480 	ehci_trans_wrapper_t	*next_tw, *tw;
3481 
3482 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3483 	    "ehci_free_dma_resources: ph = 0x%p", (void *)ph);
3484 
3485 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3486 
3487 	/* Process the Transfer Wrappers */
3488 	next_tw = head_tw;
3489 	while (next_tw) {
3490 		tw = next_tw;
3491 		next_tw = tw->tw_next;
3492 
3493 		USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3494 		    "ehci_free_dma_resources: Free TW = 0x%p", (void *)tw);
3495 
3496 		ehci_free_tw(ehcip, pp, tw);
3497 	}
3498 
3499 	/* Adjust the head and tail pointers */
3500 	pp->pp_tw_head = NULL;
3501 	pp->pp_tw_tail = NULL;
3502 }
3503 
3504 
3505 /*
3506  * ehci_free_tw:
3507  *
3508  * Free the Transfer Wrapper (TW).
3509  */
3510 /*ARGSUSED*/
3511 static void
3512 ehci_free_tw(
3513 	ehci_state_t		*ehcip,
3514 	ehci_pipe_private_t	*pp,
3515 	ehci_trans_wrapper_t	*tw)
3516 {
3517 	int	rval;
3518 
3519 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3520 	    "ehci_free_tw: tw = 0x%p", (void *)tw);
3521 
3522 	ASSERT(tw != NULL);
3523 	ASSERT(tw->tw_id != NULL);
3524 
3525 	/* Free 32bit ID */
3526 	EHCI_FREE_ID((uint32_t)tw->tw_id);
3527 
3528 	if (tw->tw_dmahandle != NULL) {
3529 		rval = ddi_dma_unbind_handle(tw->tw_dmahandle);
3530 		ASSERT(rval == DDI_SUCCESS);
3531 
3532 		ddi_dma_mem_free(&tw->tw_accesshandle);
3533 		ddi_dma_free_handle(&tw->tw_dmahandle);
3534 	}
3535 
3536 	/* Free transfer wrapper */
3537 	kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3538 }
3539 
3540 
3541 /*
3542  * Miscellaneous functions
3543  */
3544 
3545 /*
3546  * ehci_allocate_intr_in_resource
3547  *
3548  * Allocate interrupt request structure for the interrupt IN transfer.
3549  */
3550 /*ARGSUSED*/
3551 int
3552 ehci_allocate_intr_in_resource(
3553 	ehci_state_t		*ehcip,
3554 	ehci_pipe_private_t	*pp,
3555 	ehci_trans_wrapper_t	*tw,
3556 	usb_flags_t		flags)
3557 {
3558 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3559 	usb_intr_req_t		*curr_intr_reqp;
3560 	usb_opaque_t		client_periodic_in_reqp;
3561 	size_t			length = 0;
3562 
3563 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3564 	    "ehci_allocate_intr_in_resource:"
3565 	    "pp = 0x%p tw = 0x%p flags = 0x%x", (void *)pp, (void *)tw, flags);
3566 
3567 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3568 	ASSERT(tw->tw_curr_xfer_reqp == NULL);
3569 
3570 	/* Get the client periodic in request pointer */
3571 	client_periodic_in_reqp = pp->pp_client_periodic_in_reqp;
3572 
3573 	/*
3574 	 * If it a periodic IN request and periodic request is NULL,
3575 	 * allocate corresponding usb periodic IN request for the
3576 	 * current periodic polling request and copy the information
3577 	 * from the saved periodic request structure.
3578 	 */
3579 	if (client_periodic_in_reqp) {
3580 
3581 		/* Get the interrupt transfer length */
3582 		length = ((usb_intr_req_t *)
3583 		    client_periodic_in_reqp)->intr_len;
3584 
3585 		curr_intr_reqp = usba_hcdi_dup_intr_req(ph->p_dip,
3586 		    (usb_intr_req_t *)client_periodic_in_reqp, length, flags);
3587 	} else {
3588 		curr_intr_reqp = usb_alloc_intr_req(ph->p_dip, length, flags);
3589 	}
3590 
3591 	if (curr_intr_reqp == NULL) {
3592 
3593 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3594 		    "ehci_allocate_intr_in_resource: Interrupt"
3595 		    "request structure allocation failed");
3596 
3597 		return (USB_NO_RESOURCES);
3598 	}
3599 
3600 	/* For polled mode */
3601 	if (client_periodic_in_reqp == NULL) {
3602 		curr_intr_reqp->intr_attributes = USB_ATTRS_SHORT_XFER_OK;
3603 		curr_intr_reqp->intr_len = ph->p_ep.wMaxPacketSize;
3604 	} else {
3605 		/* Check and save the timeout value */
3606 		tw->tw_timeout = (curr_intr_reqp->intr_attributes &
3607 		    USB_ATTRS_ONE_XFER) ? curr_intr_reqp->intr_timeout: 0;
3608 	}
3609 
3610 	tw->tw_curr_xfer_reqp = (usb_opaque_t)curr_intr_reqp;
3611 	tw->tw_length = curr_intr_reqp->intr_len;
3612 
3613 	mutex_enter(&ph->p_mutex);
3614 	ph->p_req_count++;
3615 	mutex_exit(&ph->p_mutex);
3616 
3617 	pp->pp_state = EHCI_PIPE_STATE_ACTIVE;
3618 
3619 	return (USB_SUCCESS);
3620 }
3621 
3622 /*
3623  * ehci_pipe_cleanup
3624  *
3625  * Cleanup ehci pipe.
3626  */
3627 void
3628 ehci_pipe_cleanup(
3629 	ehci_state_t		*ehcip,
3630 	usba_pipe_handle_data_t	*ph)
3631 {
3632 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3633 	uint_t			pipe_state = pp->pp_state;
3634 	usb_cr_t		completion_reason;
3635 	usb_ep_descr_t		*eptd = &ph->p_ep;
3636 
3637 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3638 	    "ehci_pipe_cleanup: ph = 0x%p", (void *)ph);
3639 
3640 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3641 
3642 	if (EHCI_ISOC_ENDPOINT(eptd)) {
3643 		ehci_isoc_pipe_cleanup(ehcip, ph);
3644 
3645 		return;
3646 	}
3647 
3648 	ASSERT(!servicing_interrupt());
3649 
3650 	/*
3651 	 * Set the QH's status to Halt condition.
3652 	 * If another thread is halting this function will automatically
3653 	 * wait. If a pipe close happens at this time
3654 	 * we will be in lots of trouble.
3655 	 * If we are in an interrupt thread, don't halt, because it may
3656 	 * do a wait_for_sof.
3657 	 */
3658 	ehci_modify_qh_status_bit(ehcip, pp, SET_HALT);
3659 
3660 	/*
3661 	 * Wait for processing all completed transfers and
3662 	 * to send results to upstream.
3663 	 */
3664 	ehci_wait_for_transfers_completion(ehcip, pp);
3665 
3666 	/* Save the data toggle information */
3667 	ehci_save_data_toggle(ehcip, ph);
3668 
3669 	/*
3670 	 * Traverse the list of QTDs for this pipe using transfer
3671 	 * wrapper. Process these QTDs depending on their status.
3672 	 * And stop the timer of this pipe.
3673 	 */
3674 	ehci_traverse_qtds(ehcip, ph);
3675 
3676 	/* Make sure the timer is not running */
3677 	ASSERT(pp->pp_timer_id == 0);
3678 
3679 	/* Do callbacks for all unfinished requests */
3680 	ehci_handle_outstanding_requests(ehcip, pp);
3681 
3682 	/* Free DMA resources */
3683 	ehci_free_dma_resources(ehcip, ph);
3684 
3685 	switch (pipe_state) {
3686 	case EHCI_PIPE_STATE_CLOSE:
3687 		completion_reason = USB_CR_PIPE_CLOSING;
3688 		break;
3689 	case EHCI_PIPE_STATE_RESET:
3690 	case EHCI_PIPE_STATE_STOP_POLLING:
3691 		/* Set completion reason */
3692 		completion_reason = (pipe_state ==
3693 		    EHCI_PIPE_STATE_RESET) ?
3694 		    USB_CR_PIPE_RESET: USB_CR_STOPPED_POLLING;
3695 
3696 		/* Restore the data toggle information */
3697 		ehci_restore_data_toggle(ehcip, ph);
3698 
3699 		/*
3700 		 * Clear the halt bit to restart all the
3701 		 * transactions on this pipe.
3702 		 */
3703 		ehci_modify_qh_status_bit(ehcip, pp, CLEAR_HALT);
3704 
3705 		/* Set pipe state to idle */
3706 		pp->pp_state = EHCI_PIPE_STATE_IDLE;
3707 
3708 		break;
3709 	}
3710 
3711 	/*
3712 	 * Do the callback for the original client
3713 	 * periodic IN request.
3714 	 */
3715 	if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
3716 	    ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) ==
3717 	    USB_EP_DIR_IN)) {
3718 
3719 		ehci_do_client_periodic_in_req_callback(
3720 		    ehcip, pp, completion_reason);
3721 	}
3722 }
3723 
3724 
3725 /*
3726  * ehci_wait_for_transfers_completion:
3727  *
3728  * Wait for processing all completed transfers and to send results
3729  * to upstream.
3730  */
3731 static void
3732 ehci_wait_for_transfers_completion(
3733 	ehci_state_t		*ehcip,
3734 	ehci_pipe_private_t	*pp)
3735 {
3736 	ehci_trans_wrapper_t	*next_tw = pp->pp_tw_head;
3737 	ehci_qtd_t		*qtd;
3738 
3739 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3740 	    ehcip->ehci_log_hdl,
3741 	    "ehci_wait_for_transfers_completion: pp = 0x%p", (void *)pp);
3742 
3743 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3744 
3745 	if ((ehci_state_is_operational(ehcip)) != USB_SUCCESS) {
3746 
3747 		return;
3748 	}
3749 
3750 	pp->pp_count_done_qtds = 0;
3751 
3752 	/* Process the transfer wrappers for this pipe */
3753 	while (next_tw) {
3754 		qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
3755 
3756 		/*
3757 		 * Walk through each QTD for this transfer wrapper.
3758 		 * If a QTD still exists, then it is either on done
3759 		 * list or on the QH's list.
3760 		 */
3761 		while (qtd) {
3762 			if (!(Get_QTD(qtd->qtd_ctrl) &
3763 			    EHCI_QTD_CTRL_ACTIVE_XACT)) {
3764 				pp->pp_count_done_qtds++;
3765 			}
3766 
3767 			qtd = ehci_qtd_iommu_to_cpu(ehcip,
3768 			    Get_QTD(qtd->qtd_tw_next_qtd));
3769 		}
3770 
3771 		next_tw = next_tw->tw_next;
3772 	}
3773 
3774 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->