1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 /*
28  * EHCI Host Controller Driver (EHCI)
29  *
30  * The EHCI driver is a software driver which interfaces to the Universal
31  * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
32  * the Host Controller is defined by the EHCI Host Controller Interface.
33  *
34  * This module contains the main EHCI driver code which handles all USB
35  * transfers, bandwidth allocations and other general functionalities.
36  */
37 
38 #include <sys/usb/hcd/ehci/ehcid.h>
39 #include <sys/usb/hcd/ehci/ehci_intr.h>
40 #include <sys/usb/hcd/ehci/ehci_util.h>
41 #include <sys/usb/hcd/ehci/ehci_isoch.h>
42 
43 /* Adjustable variables for the size of the pools */
44 extern int ehci_qh_pool_size;
45 extern int ehci_qtd_pool_size;
46 
47 
48 /* Endpoint Descriptor (QH) related functions */
49 ehci_qh_t	*ehci_alloc_qh(
50 				ehci_state_t		*ehcip,
51 				usba_pipe_handle_data_t	*ph,
52 				uint_t			flag);
53 static void	ehci_unpack_endpoint(
54 				ehci_state_t		*ehcip,
55 				usba_pipe_handle_data_t	*ph,
56 				ehci_qh_t		*qh);
57 void		ehci_insert_qh(
58 				ehci_state_t		*ehcip,
59 				usba_pipe_handle_data_t	*ph);
60 static void	ehci_insert_async_qh(
61 				ehci_state_t		*ehcip,
62 				ehci_pipe_private_t	*pp);
63 static void	ehci_insert_intr_qh(
64 				ehci_state_t		*ehcip,
65 				ehci_pipe_private_t	*pp);
66 static void	ehci_modify_qh_status_bit(
67 				ehci_state_t		*ehcip,
68 				ehci_pipe_private_t	*pp,
69 				halt_bit_t		action);
70 static void	ehci_halt_hs_qh(
71 				ehci_state_t		*ehcip,
72 				ehci_pipe_private_t	*pp,
73 				ehci_qh_t		*qh);
74 static void	ehci_halt_fls_ctrl_and_bulk_qh(
75 				ehci_state_t		*ehcip,
76 				ehci_pipe_private_t	*pp,
77 				ehci_qh_t		*qh);
78 static void	ehci_clear_tt_buffer(
79 				ehci_state_t		*ehcip,
80 				usba_pipe_handle_data_t	*ph,
81 				ehci_qh_t		*qh);
82 static void	ehci_halt_fls_intr_qh(
83 				ehci_state_t		*ehcip,
84 				ehci_qh_t		*qh);
85 void		ehci_remove_qh(
86 				ehci_state_t		*ehcip,
87 				ehci_pipe_private_t	*pp,
88 				boolean_t		reclaim);
89 static void	ehci_remove_async_qh(
90 				ehci_state_t		*ehcip,
91 				ehci_pipe_private_t	*pp,
92 				boolean_t		reclaim);
93 static void	ehci_remove_intr_qh(
94 				ehci_state_t		*ehcip,
95 				ehci_pipe_private_t	*pp,
96 				boolean_t		reclaim);
97 static void	ehci_insert_qh_on_reclaim_list(
98 				ehci_state_t		*ehcip,
99 				ehci_pipe_private_t	*pp);
100 void		ehci_deallocate_qh(
101 				ehci_state_t		*ehcip,
102 				ehci_qh_t		*old_qh);
103 uint32_t	ehci_qh_cpu_to_iommu(
104 				ehci_state_t		*ehcip,
105 				ehci_qh_t		*addr);
106 ehci_qh_t	*ehci_qh_iommu_to_cpu(
107 				ehci_state_t		*ehcip,
108 				uintptr_t		addr);
109 
110 /* Transfer Descriptor (QTD) related functions */
111 static int	ehci_initialize_dummy(
112 				ehci_state_t		*ehcip,
113 				ehci_qh_t		*qh);
114 ehci_trans_wrapper_t *ehci_allocate_ctrl_resources(
115 				ehci_state_t		*ehcip,
116 				ehci_pipe_private_t	*pp,
117 				usb_ctrl_req_t		*ctrl_reqp,
118 				usb_flags_t		usb_flags);
119 void		ehci_insert_ctrl_req(
120 				ehci_state_t		*ehcip,
121 				usba_pipe_handle_data_t	*ph,
122 				usb_ctrl_req_t		*ctrl_reqp,
123 				ehci_trans_wrapper_t	*tw,
124 				usb_flags_t		usb_flags);
125 ehci_trans_wrapper_t *ehci_allocate_bulk_resources(
126 				ehci_state_t		*ehcip,
127 				ehci_pipe_private_t	*pp,
128 				usb_bulk_req_t		*bulk_reqp,
129 				usb_flags_t		usb_flags);
130 void		ehci_insert_bulk_req(
131 				ehci_state_t		*ehcip,
132 				usba_pipe_handle_data_t	*ph,
133 				usb_bulk_req_t		*bulk_reqp,
134 				ehci_trans_wrapper_t	*tw,
135 				usb_flags_t		flags);
136 int		ehci_start_periodic_pipe_polling(
137 				ehci_state_t		*ehcip,
138 				usba_pipe_handle_data_t	*ph,
139 				usb_opaque_t		periodic_in_reqp,
140 				usb_flags_t		flags);
141 static int	ehci_start_pipe_polling(
142 				ehci_state_t		*ehcip,
143 				usba_pipe_handle_data_t	*ph,
144 				usb_flags_t		flags);
145 static int	ehci_start_intr_polling(
146 				ehci_state_t		*ehcip,
147 				usba_pipe_handle_data_t	*ph,
148 				usb_flags_t		flags);
149 static void	ehci_set_periodic_pipe_polling(
150 				ehci_state_t		*ehcip,
151 				usba_pipe_handle_data_t	*ph);
152 ehci_trans_wrapper_t *ehci_allocate_intr_resources(
153 				ehci_state_t		*ehcip,
154 				usba_pipe_handle_data_t	*ph,
155 				usb_intr_req_t		*intr_reqp,
156 				usb_flags_t		usb_flags);
157 void		ehci_insert_intr_req(
158 				ehci_state_t		*ehcip,
159 				ehci_pipe_private_t	*pp,
160 				ehci_trans_wrapper_t	*tw,
161 				usb_flags_t		flags);
162 int		ehci_stop_periodic_pipe_polling(
163 				ehci_state_t		*ehcip,
164 				usba_pipe_handle_data_t	*ph,
165 				usb_flags_t		flags);
166 int		ehci_insert_qtd(
167 				ehci_state_t		*ehcip,
168 				uint32_t		qtd_ctrl,
169 				size_t			qtd_dma_offs,
170 				size_t			qtd_length,
171 				uint32_t		qtd_ctrl_phase,
172 				ehci_pipe_private_t	*pp,
173 				ehci_trans_wrapper_t	*tw);
174 static ehci_qtd_t *ehci_allocate_qtd_from_pool(
175 				ehci_state_t		*ehcip);
176 static void	ehci_fill_in_qtd(
177 				ehci_state_t		*ehcip,
178 				ehci_qtd_t		*qtd,
179 				uint32_t		qtd_ctrl,
180 				size_t			qtd_dma_offs,
181 				size_t			qtd_length,
182 				uint32_t		qtd_ctrl_phase,
183 				ehci_pipe_private_t	*pp,
184 				ehci_trans_wrapper_t	*tw);
185 static void	ehci_insert_qtd_on_tw(
186 				ehci_state_t		*ehcip,
187 				ehci_trans_wrapper_t	*tw,
188 				ehci_qtd_t		*qtd);
189 static void	ehci_insert_qtd_into_active_qtd_list(
190 				ehci_state_t		*ehcip,
191 				ehci_qtd_t		*curr_qtd);
192 void		ehci_remove_qtd_from_active_qtd_list(
193 				ehci_state_t		*ehcip,
194 				ehci_qtd_t		*curr_qtd);
195 static void	ehci_traverse_qtds(
196 				ehci_state_t		*ehcip,
197 				usba_pipe_handle_data_t	*ph);
198 void		ehci_deallocate_qtd(
199 				ehci_state_t		*ehcip,
200 				ehci_qtd_t		*old_qtd);
201 uint32_t	ehci_qtd_cpu_to_iommu(
202 				ehci_state_t		*ehcip,
203 				ehci_qtd_t		*addr);
204 ehci_qtd_t	*ehci_qtd_iommu_to_cpu(
205 				ehci_state_t		*ehcip,
206 				uintptr_t		addr);
207 
208 /* Transfer Wrapper (TW) functions */
209 static ehci_trans_wrapper_t  *ehci_create_transfer_wrapper(
210 				ehci_state_t		*ehcip,
211 				ehci_pipe_private_t	*pp,
212 				size_t			length,
213 				uint_t			usb_flags);
214 int		ehci_allocate_tds_for_tw(
215 				ehci_state_t		*ehcip,
216 				ehci_pipe_private_t	*pp,
217 				ehci_trans_wrapper_t	*tw,
218 				size_t			qtd_count);
219 static ehci_trans_wrapper_t  *ehci_allocate_tw_resources(
220 				ehci_state_t		*ehcip,
221 				ehci_pipe_private_t	*pp,
222 				size_t			length,
223 				usb_flags_t		usb_flags,
224 				size_t			td_count);
225 static void	ehci_free_tw_td_resources(
226 				ehci_state_t		*ehcip,
227 				ehci_trans_wrapper_t	*tw);
228 static void	ehci_start_xfer_timer(
229 				ehci_state_t		*ehcip,
230 				ehci_pipe_private_t	*pp,
231 				ehci_trans_wrapper_t	*tw);
232 void		ehci_stop_xfer_timer(
233 				ehci_state_t		*ehcip,
234 				ehci_trans_wrapper_t	*tw,
235 				uint_t			flag);
236 static void	ehci_xfer_timeout_handler(void		*arg);
237 static void	ehci_remove_tw_from_timeout_list(
238 				ehci_state_t		*ehcip,
239 				ehci_trans_wrapper_t	*tw);
240 static void	ehci_start_timer(ehci_state_t		*ehcip,
241 				ehci_pipe_private_t	*pp);
242 void		ehci_deallocate_tw(
243 				ehci_state_t		*ehcip,
244 				ehci_pipe_private_t	*pp,
245 				ehci_trans_wrapper_t	*tw);
246 void		ehci_free_dma_resources(
247 				ehci_state_t		*ehcip,
248 				usba_pipe_handle_data_t	*ph);
249 static void	ehci_free_tw(
250 				ehci_state_t		*ehcip,
251 				ehci_pipe_private_t	*pp,
252 				ehci_trans_wrapper_t	*tw);
253 
254 /* Miscellaneous functions */
255 int		ehci_allocate_intr_in_resource(
256 				ehci_state_t		*ehcip,
257 				ehci_pipe_private_t	*pp,
258 				ehci_trans_wrapper_t	*tw,
259 				usb_flags_t		flags);
260 void		ehci_pipe_cleanup(
261 				ehci_state_t		*ehcip,
262 				usba_pipe_handle_data_t	*ph);
263 static void	ehci_wait_for_transfers_completion(
264 				ehci_state_t		*ehcip,
265 				ehci_pipe_private_t	*pp);
266 void		ehci_check_for_transfers_completion(
267 				ehci_state_t		*ehcip,
268 				ehci_pipe_private_t	*pp);
269 static void	ehci_save_data_toggle(
270 				ehci_state_t		*ehcip,
271 				usba_pipe_handle_data_t	*ph);
272 void		ehci_restore_data_toggle(
273 				ehci_state_t		*ehcip,
274 				usba_pipe_handle_data_t	*ph);
275 void		ehci_handle_outstanding_requests(
276 				ehci_state_t		*ehcip,
277 				ehci_pipe_private_t	*pp);
278 void		ehci_deallocate_intr_in_resource(
279 				ehci_state_t		*ehcip,
280 				ehci_pipe_private_t	*pp,
281 				ehci_trans_wrapper_t	*tw);
282 void		ehci_do_client_periodic_in_req_callback(
283 				ehci_state_t		*ehcip,
284 				ehci_pipe_private_t	*pp,
285 				usb_cr_t		completion_reason);
286 void		ehci_hcdi_callback(
287 				usba_pipe_handle_data_t	*ph,
288 				ehci_trans_wrapper_t	*tw,
289 				usb_cr_t		completion_reason);
290 
291 
292 /*
293  * Endpoint Descriptor (QH) manipulations functions
294  */
295 
296 /*
297  * ehci_alloc_qh:
298  *
299  * Allocate an endpoint descriptor (QH)
300  *
301  * NOTE: This function is also called from POLLED MODE.
302  */
303 ehci_qh_t *
304 ehci_alloc_qh(
305 	ehci_state_t		*ehcip,
306 	usba_pipe_handle_data_t	*ph,
307 	uint_t			flag)
308 {
309 	int			i, state;
310 	ehci_qh_t		*qh;
311 
312 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
313 	    "ehci_alloc_qh: ph = 0x%p flag = 0x%x", (void *)ph, flag);
314 
315 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
316 
317 	/*
318 	 * If this is for a ISOC endpoint return null.
319 	 * Isochronous uses ITD put directly onto the PFL.
320 	 */
321 	if (ph) {
322 		if (EHCI_ISOC_ENDPOINT((&ph->p_ep))) {
323 
324 			return (NULL);
325 		}
326 	}
327 
328 	/*
329 	 * The first 63 endpoints in the Endpoint Descriptor (QH)
330 	 * buffer pool are reserved for building interrupt lattice
331 	 * tree. Search for a blank endpoint descriptor in the QH
332 	 * buffer pool.
333 	 */
334 	for (i = EHCI_NUM_STATIC_NODES; i < ehci_qh_pool_size; i ++) {
335 		state = Get_QH(ehcip->ehci_qh_pool_addr[i].qh_state);
336 
337 		if (state == EHCI_QH_FREE) {
338 			break;
339 		}
340 	}
341 
342 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
343 	    "ehci_alloc_qh: Allocated %d", i);
344 
345 	if (i == ehci_qh_pool_size) {
346 		USB_DPRINTF_L2(PRINT_MASK_ALLOC,  ehcip->ehci_log_hdl,
347 		    "ehci_alloc_qh: QH exhausted");
348 
349 		return (NULL);
350 	} else {
351 		qh = &ehcip->ehci_qh_pool_addr[i];
352 		bzero((void *)qh, sizeof (ehci_qh_t));
353 
354 		USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
355 		    "ehci_alloc_qh: Allocated address 0x%p", (void *)qh);
356 
357 		/* Check polled mode flag */
358 		if (flag == EHCI_POLLED_MODE_FLAG) {
359 			Set_QH(qh->qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
360 			Set_QH(qh->qh_ctrl, EHCI_QH_CTRL_ED_INACTIVATE);
361 		}
362 
363 		/* Unpack the endpoint descriptor into a control field */
364 		if (ph) {
365 			if ((ehci_initialize_dummy(ehcip,
366 			    qh)) == USB_NO_RESOURCES) {
367 
368 				Set_QH(qh->qh_state, EHCI_QH_FREE);
369 
370 				return (NULL);
371 			}
372 
373 			ehci_unpack_endpoint(ehcip, ph, qh);
374 
375 			Set_QH(qh->qh_curr_qtd, NULL);
376 			Set_QH(qh->qh_alt_next_qtd,
377 			    EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
378 
379 			/* Change QH's state Active */
380 			Set_QH(qh->qh_state, EHCI_QH_ACTIVE);
381 		} else {
382 			Set_QH(qh->qh_status, EHCI_QH_STS_HALTED);
383 
384 			/* Change QH's state Static */
385 			Set_QH(qh->qh_state, EHCI_QH_STATIC);
386 		}
387 
388 		ehci_print_qh(ehcip, qh);
389 
390 		return (qh);
391 	}
392 }
393 
394 
395 /*
396  * ehci_unpack_endpoint:
397  *
398  * Unpack the information in the pipe handle and create the first byte
399  * of the Host Controller's (HC) Endpoint Descriptor (QH).
400  */
401 static void
402 ehci_unpack_endpoint(
403 	ehci_state_t		*ehcip,
404 	usba_pipe_handle_data_t	*ph,
405 	ehci_qh_t		*qh)
406 {
407 	usb_ep_descr_t		*endpoint = &ph->p_ep;
408 	uint_t			maxpacketsize, addr, xactions;
409 	uint_t			ctrl = 0, status = 0, split_ctrl = 0;
410 	usb_port_status_t	usb_port_status;
411 	usba_device_t		*usba_device = ph->p_usba_device;
412 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
413 
414 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
415 	    "ehci_unpack_endpoint:");
416 
417 	mutex_enter(&usba_device->usb_mutex);
418 	ctrl = usba_device->usb_addr;
419 	usb_port_status = usba_device->usb_port_status;
420 	mutex_exit(&usba_device->usb_mutex);
421 
422 	addr = endpoint->bEndpointAddress;
423 
424 	/* Assign the endpoint's address */
425 	ctrl |= ((addr & USB_EP_NUM_MASK) << EHCI_QH_CTRL_ED_NUMBER_SHIFT);
426 
427 	/* Assign the speed */
428 	switch (usb_port_status) {
429 	case USBA_LOW_SPEED_DEV:
430 		ctrl |= EHCI_QH_CTRL_ED_LOW_SPEED;
431 		break;
432 	case USBA_FULL_SPEED_DEV:
433 		ctrl |= EHCI_QH_CTRL_ED_FULL_SPEED;
434 		break;
435 	case USBA_HIGH_SPEED_DEV:
436 		ctrl |= EHCI_QH_CTRL_ED_HIGH_SPEED;
437 		break;
438 	}
439 
440 	switch (endpoint->bmAttributes & USB_EP_ATTR_MASK) {
441 	case USB_EP_ATTR_CONTROL:
442 		/* Assign data toggle information */
443 		ctrl |= EHCI_QH_CTRL_DATA_TOGGLE;
444 
445 		if (usb_port_status != USBA_HIGH_SPEED_DEV) {
446 			ctrl |= EHCI_QH_CTRL_CONTROL_ED_FLAG;
447 		}
448 		/* FALLTHRU */
449 	case USB_EP_ATTR_BULK:
450 		/* Maximum nak counter */
451 		ctrl |= EHCI_QH_CTRL_MAX_NC;
452 
453 		if (usb_port_status == USBA_HIGH_SPEED_DEV) {
454 			/*
455 			 * Perform ping before executing control
456 			 * and bulk transactions.
457 			 */
458 			status = EHCI_QH_STS_DO_PING;
459 		}
460 		break;
461 	case USB_EP_ATTR_INTR:
462 		/* Set start split mask */
463 		split_ctrl = (pp->pp_smask & EHCI_QH_SPLIT_CTRL_INTR_MASK);
464 
465 		/*
466 		 * Set complete split mask for low/full speed
467 		 * usb devices.
468 		 */
469 		if (usb_port_status != USBA_HIGH_SPEED_DEV) {
470 			split_ctrl |= ((pp->pp_cmask <<
471 			    EHCI_QH_SPLIT_CTRL_COMP_SHIFT) &
472 			    EHCI_QH_SPLIT_CTRL_COMP_MASK);
473 		}
474 		break;
475 	}
476 
477 	/* Get the max transactions per microframe */
478 	xactions = (endpoint->wMaxPacketSize &
479 	    USB_EP_MAX_XACTS_MASK) >>  USB_EP_MAX_XACTS_SHIFT;
480 
481 	switch (xactions) {
482 	case 0:
483 		split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
484 		break;
485 	case 1:
486 		split_ctrl |= EHCI_QH_SPLIT_CTRL_2_XACTS;
487 		break;
488 	case 2:
489 		split_ctrl |= EHCI_QH_SPLIT_CTRL_3_XACTS;
490 		break;
491 	default:
492 		split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
493 		break;
494 	}
495 
496 	/*
497 	 * For low/full speed devices, program high speed hub
498 	 * address and port number.
499 	 */
500 	if (usb_port_status != USBA_HIGH_SPEED_DEV) {
501 		mutex_enter(&usba_device->usb_mutex);
502 		split_ctrl |= ((usba_device->usb_hs_hub_addr
503 		    << EHCI_QH_SPLIT_CTRL_HUB_ADDR_SHIFT) &
504 		    EHCI_QH_SPLIT_CTRL_HUB_ADDR);
505 
506 		split_ctrl |= ((usba_device->usb_hs_hub_port
507 		    << EHCI_QH_SPLIT_CTRL_HUB_PORT_SHIFT) &
508 		    EHCI_QH_SPLIT_CTRL_HUB_PORT);
509 
510 		mutex_exit(&usba_device->usb_mutex);
511 
512 		/* Set start split transaction state */
513 		status = EHCI_QH_STS_DO_START_SPLIT;
514 	}
515 
516 	/* Assign endpoint's maxpacketsize */
517 	maxpacketsize = endpoint->wMaxPacketSize & USB_EP_MAX_PKTSZ_MASK;
518 	maxpacketsize = maxpacketsize << EHCI_QH_CTRL_MAXPKTSZ_SHIFT;
519 	ctrl |= (maxpacketsize & EHCI_QH_CTRL_MAXPKTSZ);
520 
521 	Set_QH(qh->qh_ctrl, ctrl);
522 	Set_QH(qh->qh_split_ctrl, split_ctrl);
523 	Set_QH(qh->qh_status, status);
524 }
525 
526 
527 /*
528  * ehci_insert_qh:
529  *
530  * Add the Endpoint Descriptor (QH) into the Host Controller's
531  * (HC) appropriate endpoint list.
532  */
533 void
534 ehci_insert_qh(
535 	ehci_state_t		*ehcip,
536 	usba_pipe_handle_data_t	*ph)
537 {
538 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
539 
540 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
541 	    "ehci_insert_qh: qh=0x%p", (void *)pp->pp_qh);
542 
543 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
544 
545 	switch (ph->p_ep.bmAttributes & USB_EP_ATTR_MASK) {
546 	case USB_EP_ATTR_CONTROL:
547 	case USB_EP_ATTR_BULK:
548 		ehci_insert_async_qh(ehcip, pp);
549 		ehcip->ehci_open_async_count++;
550 		break;
551 	case USB_EP_ATTR_INTR:
552 		ehci_insert_intr_qh(ehcip, pp);
553 		ehcip->ehci_open_periodic_count++;
554 		break;
555 	case USB_EP_ATTR_ISOCH:
556 		/* ISOCH does not use QH, don't do anything but update count */
557 		ehcip->ehci_open_periodic_count++;
558 		break;
559 	}
560 	ehci_toggle_scheduler(ehcip);
561 }
562 
563 
564 /*
565  * ehci_insert_async_qh:
566  *
567  * Insert a control/bulk endpoint into the Host Controller's (HC)
568  * Asynchronous schedule endpoint list.
569  */
570 static void
571 ehci_insert_async_qh(
572 	ehci_state_t		*ehcip,
573 	ehci_pipe_private_t	*pp)
574 {
575 	ehci_qh_t		*qh = pp->pp_qh;
576 	ehci_qh_t		*async_head_qh;
577 	ehci_qh_t		*next_qh;
578 	uintptr_t		qh_addr;
579 
580 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
581 	    "ehci_insert_async_qh:");
582 
583 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
584 
585 	/* Make sure this QH is not already in the list */
586 	ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
587 
588 	qh_addr = ehci_qh_cpu_to_iommu(ehcip, qh);
589 
590 	/* Obtain a ptr to the head of the Async schedule list */
591 	async_head_qh = ehcip->ehci_head_of_async_sched_list;
592 
593 	if (async_head_qh == NULL) {
594 		/* Set this QH to be the "head" of the circular list */
595 		Set_QH(qh->qh_ctrl,
596 		    (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_RECLAIM_HEAD));
597 
598 		/* Set new QH's link and previous pointer to itself */
599 		Set_QH(qh->qh_link_ptr, qh_addr | EHCI_QH_LINK_REF_QH);
600 		Set_QH(qh->qh_prev, qh_addr);
601 
602 		ehcip->ehci_head_of_async_sched_list = qh;
603 
604 		/* Set the head ptr to the new endpoint */
605 		Set_OpReg(ehci_async_list_addr, qh_addr);
606 
607 		/*
608 		 * For some reason this register might get nulled out by
609 		 * the Uli M1575 South Bridge. To workaround the hardware
610 		 * problem, check the value after write and retry if the
611 		 * last write fails.
612 		 *
613 		 * If the ASYNCLISTADDR remains "stuck" after
614 		 * EHCI_MAX_RETRY retries, then the M1575 is broken
615 		 * and is stuck in an inconsistent state and is about
616 		 * to crash the machine with a trn_oor panic when it
617 		 * does a DMA read from 0x0.  It is better to panic
618 		 * now rather than wait for the trn_oor crash; this
619 		 * way Customer Service will have a clean signature
620 		 * that indicts the M1575 chip rather than a
621 		 * mysterious and hard-to-diagnose trn_oor panic.
622 		 */
623 		if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
624 		    (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
625 		    (qh_addr != Get_OpReg(ehci_async_list_addr))) {
626 			int retry = 0;
627 
628 			Set_OpRegRetry(ehci_async_list_addr, qh_addr, retry);
629 			if (retry >= EHCI_MAX_RETRY)
630 				cmn_err(CE_PANIC, "ehci_insert_async_qh:"
631 				    " ASYNCLISTADDR write failed.");
632 
633 			USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
634 			    "ehci_insert_async_qh: ASYNCLISTADDR "
635 			    "write failed, retry=%d", retry);
636 		}
637 	} else {
638 		ASSERT(Get_QH(async_head_qh->qh_ctrl) &
639 		    EHCI_QH_CTRL_RECLAIM_HEAD);
640 
641 		/* Ensure this QH's "H" bit is not set */
642 		Set_QH(qh->qh_ctrl,
643 		    (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_RECLAIM_HEAD));
644 
645 		next_qh = ehci_qh_iommu_to_cpu(ehcip,
646 		    Get_QH(async_head_qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
647 
648 		/* Set new QH's link and previous pointers */
649 		Set_QH(qh->qh_link_ptr,
650 		    Get_QH(async_head_qh->qh_link_ptr) | EHCI_QH_LINK_REF_QH);
651 		Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, async_head_qh));
652 
653 		/* Set next QH's prev pointer */
654 		Set_QH(next_qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, qh));
655 
656 		/* Set QH Head's link pointer points to new QH */
657 		Set_QH(async_head_qh->qh_link_ptr,
658 		    qh_addr | EHCI_QH_LINK_REF_QH);
659 	}
660 }
661 
662 
663 /*
664  * ehci_insert_intr_qh:
665  *
666  * Insert a interrupt endpoint into the Host Controller's (HC) interrupt
667  * lattice tree.
668  */
669 static void
670 ehci_insert_intr_qh(
671 	ehci_state_t		*ehcip,
672 	ehci_pipe_private_t	*pp)
673 {
674 	ehci_qh_t		*qh = pp->pp_qh;
675 	ehci_qh_t		*next_lattice_qh, *lattice_qh;
676 	uint_t			hnode;
677 
678 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
679 	    "ehci_insert_intr_qh:");
680 
681 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
682 
683 	/* Make sure this QH is not already in the list */
684 	ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
685 
686 	/*
687 	 * The appropriate high speed node was found
688 	 * during the opening of the pipe.
689 	 */
690 	hnode = pp->pp_pnode;
691 
692 	/* Find the lattice endpoint */
693 	lattice_qh = &ehcip->ehci_qh_pool_addr[hnode];
694 
695 	/* Find the next lattice endpoint */
696 	next_lattice_qh = ehci_qh_iommu_to_cpu(
697 	    ehcip, (Get_QH(lattice_qh->qh_link_ptr) & EHCI_QH_LINK_PTR));
698 
699 	/* Update the previous pointer */
700 	Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, lattice_qh));
701 
702 	/* Check next_lattice_qh value */
703 	if (next_lattice_qh) {
704 		/* Update this qh to point to the next one in the lattice */
705 		Set_QH(qh->qh_link_ptr, Get_QH(lattice_qh->qh_link_ptr));
706 
707 		/* Update the previous pointer of qh->qh_link_ptr */
708 		if (Get_QH(next_lattice_qh->qh_state) != EHCI_QH_STATIC) {
709 			Set_QH(next_lattice_qh->qh_prev,
710 			    ehci_qh_cpu_to_iommu(ehcip, qh));
711 		}
712 	} else {
713 		/* Update qh's link pointer to terminate periodic list */
714 		Set_QH(qh->qh_link_ptr,
715 		    (Get_QH(lattice_qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
716 	}
717 
718 	/* Insert this endpoint into the lattice */
719 	Set_QH(lattice_qh->qh_link_ptr,
720 	    (ehci_qh_cpu_to_iommu(ehcip, qh) | EHCI_QH_LINK_REF_QH));
721 }
722 
723 
724 /*
725  * ehci_modify_qh_status_bit:
726  *
727  * Modify the halt bit on the Host Controller (HC) Endpoint Descriptor (QH).
728  *
729  * If several threads try to halt the same pipe, they will need to wait on
730  * a condition variable.  Only one thread is allowed to halt or unhalt the
731  * pipe at a time.
732  *
733  * Usually after a halt pipe, an unhalt pipe will follow soon after.  There
734  * is an assumption that an Unhalt pipe will never occur without a halt pipe.
735  */
736 static void
737 ehci_modify_qh_status_bit(
738 	ehci_state_t		*ehcip,
739 	ehci_pipe_private_t	*pp,
740 	halt_bit_t		action)
741 {
742 	ehci_qh_t		*qh = pp->pp_qh;
743 	uint_t			smask, eps, split_intr_qh;
744 	uint_t			status;
745 
746 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
747 	    "ehci_modify_qh_status_bit: action=0x%x qh=0x%p",
748 	    action, (void *)qh);
749 
750 	ehci_print_qh(ehcip, qh);
751 
752 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
753 
754 	/*
755 	 * If this pipe is in the middle of halting don't allow another
756 	 * thread to come in and modify the same pipe.
757 	 */
758 	while (pp->pp_halt_state & EHCI_HALT_STATE_HALTING) {
759 
760 		cv_wait(&pp->pp_halt_cmpl_cv,
761 		    &ehcip->ehci_int_mutex);
762 	}
763 
764 	/* Sync the QH QTD pool to get up to date information */
765 	Sync_QH_QTD_Pool(ehcip);
766 
767 
768 	if (action == CLEAR_HALT) {
769 		/*
770 		 * If the halt bit is to be cleared, just clear it.
771 		 * there shouldn't be any race condition problems.
772 		 * If the host controller reads the bit before the
773 		 * driver has a chance to set the bit, the bit will
774 		 * be reread on the next frame.
775 		 */
776 		Set_QH(qh->qh_ctrl,
777 		    (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_ED_INACTIVATE));
778 		Set_QH(qh->qh_status,
779 		    Get_QH(qh->qh_status) & ~(EHCI_QH_STS_XACT_STATUS));
780 
781 		goto success;
782 	}
783 
784 	/* Halt the the QH, but first check to see if it is already halted */
785 	status = Get_QH(qh->qh_status);
786 	if (!(status & EHCI_QH_STS_HALTED)) {
787 		/* Indicate that this pipe is in the middle of halting. */
788 		pp->pp_halt_state |= EHCI_HALT_STATE_HALTING;
789 
790 		/*
791 		 * Find out if this is an full/low speed interrupt endpoint.
792 		 * A non-zero Cmask indicates that this QH is an interrupt
793 		 * endpoint.  Check the endpoint speed to see if it is either
794 		 * FULL or LOW .
795 		 */
796 		smask = Get_QH(qh->qh_split_ctrl) &
797 		    EHCI_QH_SPLIT_CTRL_INTR_MASK;
798 		eps = Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_SPEED;
799 		split_intr_qh = ((smask != 0) &&
800 		    (eps != EHCI_QH_CTRL_ED_HIGH_SPEED));
801 
802 		if (eps == EHCI_QH_CTRL_ED_HIGH_SPEED) {
803 			ehci_halt_hs_qh(ehcip, pp, qh);
804 		} else {
805 			if (split_intr_qh) {
806 				ehci_halt_fls_intr_qh(ehcip, qh);
807 			} else {
808 				ehci_halt_fls_ctrl_and_bulk_qh(ehcip, pp, qh);
809 			}
810 		}
811 
812 		/* Indicate that this pipe is not in the middle of halting. */
813 		pp->pp_halt_state &= ~EHCI_HALT_STATE_HALTING;
814 	}
815 
816 	/* Sync the QH QTD pool again to get the most up to date information */
817 	Sync_QH_QTD_Pool(ehcip);
818 
819 	ehci_print_qh(ehcip, qh);
820 
821 	status = Get_QH(qh->qh_status);
822 	if (!(status & EHCI_QH_STS_HALTED)) {
823 		USB_DPRINTF_L1(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
824 		    "ehci_modify_qh_status_bit: Failed to halt qh=0x%p",
825 		    (void *)qh);
826 
827 		ehci_print_qh(ehcip, qh);
828 
829 		/* Set host controller soft state to error */
830 		ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
831 
832 		ASSERT(status & EHCI_QH_STS_HALTED);
833 	}
834 
835 success:
836 	/* Wake up threads waiting for this pipe to be halted. */
837 	cv_signal(&pp->pp_halt_cmpl_cv);
838 }
839 
840 
841 /*
842  * ehci_halt_hs_qh:
843  *
844  * Halts all types of HIGH SPEED QHs.
845  */
846 static void
847 ehci_halt_hs_qh(
848 	ehci_state_t		*ehcip,
849 	ehci_pipe_private_t	*pp,
850 	ehci_qh_t		*qh)
851 {
852 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
853 
854 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
855 	    "ehci_halt_hs_qh:");
856 
857 	/* Remove this qh from the HCD's view, but do not reclaim it */
858 	ehci_remove_qh(ehcip, pp, B_FALSE);
859 
860 	/*
861 	 * Wait for atleast one SOF, just in case the HCD is in the
862 	 * middle accessing this QH.
863 	 */
864 	(void) ehci_wait_for_sof(ehcip);
865 
866 	/* Sync the QH QTD pool to get up to date information */
867 	Sync_QH_QTD_Pool(ehcip);
868 
869 	/* Modify the status bit and halt this QH. */
870 	Set_QH(qh->qh_status,
871 	    ((Get_QH(qh->qh_status) &
872 	    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
873 
874 	/* Insert this QH back into the HCD's view */
875 	ehci_insert_qh(ehcip, ph);
876 }
877 
878 
879 /*
880  * ehci_halt_fls_ctrl_and_bulk_qh:
881  *
882  * Halts FULL/LOW Ctrl and Bulk QHs only.
883  */
884 static void
885 ehci_halt_fls_ctrl_and_bulk_qh(
886 	ehci_state_t		*ehcip,
887 	ehci_pipe_private_t	*pp,
888 	ehci_qh_t		*qh)
889 {
890 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
891 	uint_t			status, split_status, bytes_left;
892 
893 
894 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
895 	    "ehci_halt_fls_ctrl_and_bulk_qh:");
896 
897 	/* Remove this qh from the HCD's view, but do not reclaim it */
898 	ehci_remove_qh(ehcip, pp, B_FALSE);
899 
900 	/*
901 	 * Wait for atleast one SOF, just in case the HCD is in the
902 	 * middle accessing this QH.
903 	 */
904 	(void) ehci_wait_for_sof(ehcip);
905 
906 	/* Sync the QH QTD pool to get up to date information */
907 	Sync_QH_QTD_Pool(ehcip);
908 
909 	/* Modify the status bit and halt this QH. */
910 	Set_QH(qh->qh_status,
911 	    ((Get_QH(qh->qh_status) &
912 	    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
913 
914 	/* Check to see if the QH was in the middle of a transaction */
915 	status = Get_QH(qh->qh_status);
916 	split_status = status & EHCI_QH_STS_SPLIT_XSTATE;
917 	bytes_left = status & EHCI_QH_STS_BYTES_TO_XFER;
918 	if ((split_status == EHCI_QH_STS_DO_COMPLETE_SPLIT) &&
919 	    (bytes_left != 0)) {
920 		/* send ClearTTBuffer to this device's parent 2.0 hub */
921 		ehci_clear_tt_buffer(ehcip, ph, qh);
922 	}
923 
924 	/* Insert this QH back into the HCD's view */
925 	ehci_insert_qh(ehcip, ph);
926 }
927 
928 
929 /*
930  * ehci_clear_tt_buffer
931  *
932  * This function will sent a Clear_TT_Buffer request to the pipe's
933  * parent 2.0 hub.
934  */
935 static void
936 ehci_clear_tt_buffer(
937 	ehci_state_t		*ehcip,
938 	usba_pipe_handle_data_t	*ph,
939 	ehci_qh_t		*qh)
940 {
941 	usba_device_t		*usba_device;
942 	usba_device_t		*hub_usba_device;
943 	usb_pipe_handle_t	hub_def_ph;
944 	usb_ep_descr_t		*eptd;
945 	uchar_t			attributes;
946 	uint16_t		wValue;
947 	usb_ctrl_setup_t	setup;
948 	usb_cr_t		completion_reason;
949 	usb_cb_flags_t		cb_flags;
950 	int			retry;
951 
952 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
953 	    "ehci_clear_tt_buffer: ");
954 
955 	/* Get some information about the current pipe */
956 	usba_device = ph->p_usba_device;
957 	eptd = &ph->p_ep;
958 	attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
959 
960 	/*
961 	 * Create the wIndex for this request (usb spec 11.24.2.3)
962 	 * 3..0		Endpoint Number
963 	 * 10..4	Device Address
964 	 * 12..11	Endpoint Type
965 	 * 14..13	Reserved (must be 0)
966 	 * 15		Direction 1 = IN, 0 = OUT
967 	 */
968 	wValue = 0;
969 	if ((eptd->bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
970 		wValue |= 0x8000;
971 	}
972 	wValue |= attributes << 11;
973 	wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_DEVICE_ADDRESS) << 4;
974 	wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_HIGH_SPEED) >>
975 	    EHCI_QH_CTRL_ED_NUMBER_SHIFT;
976 
977 	mutex_exit(&ehcip->ehci_int_mutex);
978 
979 	/* Manually fill in the request. */
980 	setup.bmRequestType = EHCI_CLEAR_TT_BUFFER_REQTYPE;
981 	setup.bRequest = EHCI_CLEAR_TT_BUFFER_BREQ;
982 	setup.wValue = wValue;
983 	setup.wIndex = 1;
984 	setup.wLength = 0;
985 	setup.attrs = USB_ATTRS_NONE;
986 
987 	/* Get the usba_device of the parent 2.0 hub. */
988 	mutex_enter(&usba_device->usb_mutex);
989 	hub_usba_device = usba_device->usb_hs_hub_usba_dev;
990 	mutex_exit(&usba_device->usb_mutex);
991 
992 	/* Get the default ctrl pipe for the parent 2.0 hub */
993 	mutex_enter(&hub_usba_device->usb_mutex);
994 	hub_def_ph = (usb_pipe_handle_t)&hub_usba_device->usb_ph_list[0];
995 	mutex_exit(&hub_usba_device->usb_mutex);
996 
997 	for (retry = 0; retry < 3; retry++) {
998 
999 		/* sync send the request to the default pipe */
1000 		if (usb_pipe_ctrl_xfer_wait(
1001 		    hub_def_ph,
1002 		    &setup,
1003 		    NULL,
1004 		    &completion_reason, &cb_flags, 0) == USB_SUCCESS) {
1005 
1006 			break;
1007 		}
1008 
1009 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1010 		    "ehci_clear_tt_buffer: Failed to clear tt buffer,"
1011 		    "retry = %d, cr = %d, cb_flags = 0x%x\n",
1012 		    retry, completion_reason, cb_flags);
1013 	}
1014 
1015 	if (retry >= 3) {
1016 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1017 		dev_info_t *dip = hub_usba_device->usb_dip;
1018 
1019 		/*
1020 		 * Ask the user to hotplug the 2.0 hub, to make sure that
1021 		 * all the buffer is in sync since this command has failed.
1022 		 */
1023 		USB_DPRINTF_L0(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1024 		    "Error recovery failure: Please hotplug the 2.0 hub at"
1025 		    "%s", ddi_pathname(dip, path));
1026 
1027 		kmem_free(path, MAXPATHLEN);
1028 	}
1029 
1030 	mutex_enter(&ehcip->ehci_int_mutex);
1031 }
1032 
1033 /*
1034  * ehci_halt_fls_intr_qh:
1035  *
1036  * Halts FULL/LOW speed Intr QHs.
1037  */
1038 static void
1039 ehci_halt_fls_intr_qh(
1040 	ehci_state_t		*ehcip,
1041 	ehci_qh_t		*qh)
1042 {
1043 	usb_frame_number_t	starting_frame;
1044 	usb_frame_number_t	frames_past;
1045 	uint_t			status, i;
1046 
1047 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1048 	    "ehci_halt_fls_intr_qh:");
1049 
1050 	/*
1051 	 * Ask the HC to deactivate the QH in a
1052 	 * full/low periodic QH.
1053 	 */
1054 	Set_QH(qh->qh_ctrl,
1055 	    (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_ED_INACTIVATE));
1056 
1057 	starting_frame = ehci_get_current_frame_number(ehcip);
1058 
1059 	/*
1060 	 * Wait at least EHCI_NUM_INTR_QH_LISTS+2 frame or until
1061 	 * the QH has been halted.
1062 	 */
1063 	Sync_QH_QTD_Pool(ehcip);
1064 	frames_past = 0;
1065 	status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1066 
1067 	while ((frames_past <= (EHCI_NUM_INTR_QH_LISTS + 2)) &&
1068 	    (status != 0)) {
1069 
1070 		(void) ehci_wait_for_sof(ehcip);
1071 
1072 		Sync_QH_QTD_Pool(ehcip);
1073 		status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1074 		frames_past = ehci_get_current_frame_number(ehcip) -
1075 		    starting_frame;
1076 	}
1077 
1078 	/* Modify the status bit and halt this QH. */
1079 	Sync_QH_QTD_Pool(ehcip);
1080 
1081 	status = Get_QH(qh->qh_status);
1082 
1083 	for (i = 0; i < EHCI_NUM_INTR_QH_LISTS; i++) {
1084 		Set_QH(qh->qh_status,
1085 		    ((Get_QH(qh->qh_status) &
1086 		    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
1087 
1088 		Sync_QH_QTD_Pool(ehcip);
1089 
1090 		(void) ehci_wait_for_sof(ehcip);
1091 		Sync_QH_QTD_Pool(ehcip);
1092 
1093 		if (Get_QH(qh->qh_status) & EHCI_QH_STS_HALTED) {
1094 
1095 			break;
1096 		}
1097 	}
1098 
1099 	Sync_QH_QTD_Pool(ehcip);
1100 
1101 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1102 	    "ehci_halt_fls_intr_qh: qh=0x%p frames past=%llu,"
1103 	    " status=0x%x, 0x%x", (void *)qh,
1104 	    (unsigned long long)(ehci_get_current_frame_number(ehcip) -
1105 	    starting_frame), status, Get_QH(qh->qh_status));
1106 }
1107 
1108 
1109 /*
1110  * ehci_remove_qh:
1111  *
1112  * Remove the Endpoint Descriptor (QH) from the Host Controller's appropriate
1113  * endpoint list.
1114  */
1115 void
1116 ehci_remove_qh(
1117 	ehci_state_t		*ehcip,
1118 	ehci_pipe_private_t	*pp,
1119 	boolean_t		reclaim)
1120 {
1121 	uchar_t			attributes;
1122 
1123 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1124 
1125 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1126 	    "ehci_remove_qh: qh=0x%p", (void *)pp->pp_qh);
1127 
1128 	attributes = pp->pp_pipe_handle->p_ep.bmAttributes & USB_EP_ATTR_MASK;
1129 
1130 	switch (attributes) {
1131 	case USB_EP_ATTR_CONTROL:
1132 	case USB_EP_ATTR_BULK:
1133 		ehci_remove_async_qh(ehcip, pp, reclaim);
1134 		ehcip->ehci_open_async_count--;
1135 		break;
1136 	case USB_EP_ATTR_INTR:
1137 		ehci_remove_intr_qh(ehcip, pp, reclaim);
1138 		ehcip->ehci_open_periodic_count--;
1139 		break;
1140 	case USB_EP_ATTR_ISOCH:
1141 		/* ISOCH does not use QH, don't do anything but update count */
1142 		ehcip->ehci_open_periodic_count--;
1143 		break;
1144 	}
1145 	ehci_toggle_scheduler(ehcip);
1146 }
1147 
1148 
1149 /*
1150  * ehci_remove_async_qh:
1151  *
1152  * Remove a control/bulk endpoint into the Host Controller's (HC)
1153  * Asynchronous schedule endpoint list.
1154  */
1155 static void
1156 ehci_remove_async_qh(
1157 	ehci_state_t		*ehcip,
1158 	ehci_pipe_private_t	*pp,
1159 	boolean_t		reclaim)
1160 {
1161 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1162 	ehci_qh_t		*prev_qh, *next_qh;
1163 
1164 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1165 	    "ehci_remove_async_qh:");
1166 
1167 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1168 
1169 	prev_qh = ehci_qh_iommu_to_cpu(ehcip,
1170 	    Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR);
1171 	next_qh = ehci_qh_iommu_to_cpu(ehcip,
1172 	    Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1173 
1174 	/* Make sure this QH is in the list */
1175 	ASSERT(prev_qh != NULL);
1176 
1177 	/*
1178 	 * If next QH and current QH are the same, then this is the last
1179 	 * QH on the Asynchronous Schedule list.
1180 	 */
1181 	if (qh == next_qh) {
1182 		ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1183 		/*
1184 		 * Null our pointer to the async sched list, but do not
1185 		 * touch the host controller's list_addr.
1186 		 */
1187 		ehcip->ehci_head_of_async_sched_list = NULL;
1188 		ASSERT(ehcip->ehci_open_async_count == 1);
1189 	} else {
1190 		/* If this QH is the HEAD then find another one to replace it */
1191 		if (ehcip->ehci_head_of_async_sched_list == qh) {
1192 
1193 			ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1194 			ehcip->ehci_head_of_async_sched_list = next_qh;
1195 			Set_QH(next_qh->qh_ctrl,
1196 			    Get_QH(next_qh->qh_ctrl) |
1197 			    EHCI_QH_CTRL_RECLAIM_HEAD);
1198 		}
1199 		Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1200 		Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1201 	}
1202 
1203 	/* qh_prev to indicate it is no longer in the circular list */
1204 	Set_QH(qh->qh_prev, NULL);
1205 
1206 	if (reclaim) {
1207 		ehci_insert_qh_on_reclaim_list(ehcip, pp);
1208 	}
1209 }
1210 
1211 
1212 /*
1213  * ehci_remove_intr_qh:
1214  *
1215  * Set up an interrupt endpoint to be removed from the Host Controller's (HC)
1216  * interrupt lattice tree. The Endpoint Descriptor (QH) will be freed in the
1217  * interrupt handler.
1218  */
1219 static void
1220 ehci_remove_intr_qh(
1221 	ehci_state_t		*ehcip,
1222 	ehci_pipe_private_t	*pp,
1223 	boolean_t		reclaim)
1224 {
1225 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1226 	ehci_qh_t		*prev_qh, *next_qh;
1227 
1228 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1229 	    "ehci_remove_intr_qh:");
1230 
1231 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1232 
1233 	prev_qh = ehci_qh_iommu_to_cpu(ehcip, Get_QH(qh->qh_prev));
1234 	next_qh = ehci_qh_iommu_to_cpu(ehcip,
1235 	    Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1236 
1237 	/* Make sure this QH is in the list */
1238 	ASSERT(prev_qh != NULL);
1239 
1240 	if (next_qh) {
1241 		/* Update previous qh's link pointer */
1242 		Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1243 
1244 		if (Get_QH(next_qh->qh_state) != EHCI_QH_STATIC) {
1245 			/* Set the previous pointer of the next one */
1246 			Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1247 		}
1248 	} else {
1249 		/* Update previous qh's link pointer */
1250 		Set_QH(prev_qh->qh_link_ptr,
1251 		    (Get_QH(qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
1252 	}
1253 
1254 	/* qh_prev to indicate it is no longer in the circular list */
1255 	Set_QH(qh->qh_prev, NULL);
1256 
1257 	if (reclaim) {
1258 		ehci_insert_qh_on_reclaim_list(ehcip, pp);
1259 	}
1260 }
1261 
1262 
1263 /*
1264  * ehci_insert_qh_on_reclaim_list:
1265  *
1266  * Insert Endpoint onto the reclaim list
1267  */
1268 static void
1269 ehci_insert_qh_on_reclaim_list(
1270 	ehci_state_t		*ehcip,
1271 	ehci_pipe_private_t	*pp)
1272 {
1273 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1274 	ehci_qh_t		*next_qh, *prev_qh;
1275 	usb_frame_number_t	frame_number;
1276 
1277 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1278 
1279 	/*
1280 	 * Read current usb frame number and add appropriate number of
1281 	 * usb frames needs to wait before reclaiming current endpoint.
1282 	 */
1283 	frame_number =
1284 	    ehci_get_current_frame_number(ehcip) + MAX_SOF_WAIT_COUNT;
1285 
1286 	/* Store 32-bit ID */
1287 	Set_QH(qh->qh_reclaim_frame,
1288 	    ((uint32_t)(EHCI_GET_ID((void *)(uintptr_t)frame_number))));
1289 
1290 	/* Insert the endpoint onto the reclamation list */
1291 	if (ehcip->ehci_reclaim_list) {
1292 		next_qh = ehcip->ehci_reclaim_list;
1293 
1294 		while (next_qh) {
1295 			prev_qh = next_qh;
1296 			next_qh = ehci_qh_iommu_to_cpu(ehcip,
1297 			    Get_QH(next_qh->qh_reclaim_next));
1298 		}
1299 
1300 		Set_QH(prev_qh->qh_reclaim_next,
1301 		    ehci_qh_cpu_to_iommu(ehcip, qh));
1302 	} else {
1303 		ehcip->ehci_reclaim_list = qh;
1304 	}
1305 
1306 	ASSERT(Get_QH(qh->qh_reclaim_next) == NULL);
1307 }
1308 
1309 
1310 /*
1311  * ehci_deallocate_qh:
1312  *
1313  * Deallocate a Host Controller's (HC) Endpoint Descriptor (QH).
1314  *
1315  * NOTE: This function is also called from POLLED MODE.
1316  */
1317 void
1318 ehci_deallocate_qh(
1319 	ehci_state_t	*ehcip,
1320 	ehci_qh_t	*old_qh)
1321 {
1322 	ehci_qtd_t	*first_dummy_qtd, *second_dummy_qtd;
1323 
1324 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1325 	    "ehci_deallocate_qh:");
1326 
1327 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1328 
1329 	first_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1330 	    (Get_QH(old_qh->qh_next_qtd) & EHCI_QH_NEXT_QTD_PTR));
1331 
1332 	if (first_dummy_qtd) {
1333 		ASSERT(Get_QTD(first_dummy_qtd->qtd_state) == EHCI_QTD_DUMMY);
1334 
1335 		second_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1336 		    Get_QTD(first_dummy_qtd->qtd_next_qtd));
1337 
1338 		if (second_dummy_qtd) {
1339 			ASSERT(Get_QTD(second_dummy_qtd->qtd_state) ==
1340 			    EHCI_QTD_DUMMY);
1341 
1342 			ehci_deallocate_qtd(ehcip, second_dummy_qtd);
1343 		}
1344 
1345 		ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1346 	}
1347 
1348 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1349 	    "ehci_deallocate_qh: Deallocated 0x%p", (void *)old_qh);
1350 
1351 	Set_QH(old_qh->qh_state, EHCI_QH_FREE);
1352 }
1353 
1354 
1355 /*
1356  * ehci_qh_cpu_to_iommu:
1357  *
1358  * This function converts for the given Endpoint Descriptor (QH) CPU address
1359  * to IO address.
1360  *
1361  * NOTE: This function is also called from POLLED MODE.
1362  */
1363 uint32_t
1364 ehci_qh_cpu_to_iommu(
1365 	ehci_state_t	*ehcip,
1366 	ehci_qh_t	*addr)
1367 {
1368 	uint32_t	qh;
1369 
1370 	qh = (uint32_t)ehcip->ehci_qh_pool_cookie.dmac_address +
1371 	    (uint32_t)((uintptr_t)addr - (uintptr_t)(ehcip->ehci_qh_pool_addr));
1372 
1373 	ASSERT(qh >= ehcip->ehci_qh_pool_cookie.dmac_address);
1374 	ASSERT(qh <= ehcip->ehci_qh_pool_cookie.dmac_address +
1375 	    sizeof (ehci_qh_t) * ehci_qh_pool_size);
1376 
1377 	return (qh);
1378 }
1379 
1380 
1381 /*
1382  * ehci_qh_iommu_to_cpu:
1383  *
1384  * This function converts for the given Endpoint Descriptor (QH) IO address
1385  * to CPU address.
1386  */
1387 ehci_qh_t *
1388 ehci_qh_iommu_to_cpu(
1389 	ehci_state_t	*ehcip,
1390 	uintptr_t	addr)
1391 {
1392 	ehci_qh_t	*qh;
1393 
1394 	if (addr == NULL) {
1395 
1396 		return (NULL);
1397 	}
1398 
1399 	qh = (ehci_qh_t *)((uintptr_t)
1400 	    (addr - ehcip->ehci_qh_pool_cookie.dmac_address) +
1401 	    (uintptr_t)ehcip->ehci_qh_pool_addr);
1402 
1403 	ASSERT(qh >= ehcip->ehci_qh_pool_addr);
1404 	ASSERT((uintptr_t)qh <= (uintptr_t)ehcip->ehci_qh_pool_addr +
1405 	    (uintptr_t)(sizeof (ehci_qh_t) * ehci_qh_pool_size));
1406 
1407 	return (qh);
1408 }
1409 
1410 
1411 /*
1412  * Transfer Descriptor manipulations functions
1413  */
1414 
1415 /*
1416  * ehci_initialize_dummy:
1417  *
1418  * An Endpoint Descriptor (QH) has a  dummy Transfer Descriptor (QTD) on the
1419  * end of its QTD list. Initially, both the head and tail pointers of the QH
1420  * point to the dummy QTD.
1421  */
1422 static int
1423 ehci_initialize_dummy(
1424 	ehci_state_t	*ehcip,
1425 	ehci_qh_t	*qh)
1426 {
1427 	ehci_qtd_t	*first_dummy_qtd, *second_dummy_qtd;
1428 
1429 	/* Allocate first dummy QTD */
1430 	first_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1431 
1432 	if (first_dummy_qtd == NULL) {
1433 		return (USB_NO_RESOURCES);
1434 	}
1435 
1436 	/* Allocate second dummy QTD */
1437 	second_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1438 
1439 	if (second_dummy_qtd == NULL) {
1440 		/* Deallocate first dummy QTD */
1441 		ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1442 
1443 		return (USB_NO_RESOURCES);
1444 	}
1445 
1446 	/* Next QTD pointer of an QH point to this new dummy QTD */
1447 	Set_QH(qh->qh_next_qtd, ehci_qtd_cpu_to_iommu(ehcip,
1448 	    first_dummy_qtd) & EHCI_QH_NEXT_QTD_PTR);
1449 
1450 	/* Set qh's dummy qtd field */
1451 	Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, first_dummy_qtd));
1452 
1453 	/* Set first_dummy's next qtd pointer */
1454 	Set_QTD(first_dummy_qtd->qtd_next_qtd,
1455 	    ehci_qtd_cpu_to_iommu(ehcip, second_dummy_qtd));
1456 
1457 	return (USB_SUCCESS);
1458 }
1459 
1460 /*
1461  * ehci_allocate_ctrl_resources:
1462  *
1463  * Calculates the number of tds necessary for a ctrl transfer, and allocates
1464  * all the resources necessary.
1465  *
1466  * Returns NULL if there is insufficient resources otherwise TW.
1467  */
1468 ehci_trans_wrapper_t *
1469 ehci_allocate_ctrl_resources(
1470 	ehci_state_t		*ehcip,
1471 	ehci_pipe_private_t	*pp,
1472 	usb_ctrl_req_t		*ctrl_reqp,
1473 	usb_flags_t		usb_flags)
1474 {
1475 	size_t			qtd_count = 2;
1476 	size_t			ctrl_buf_size;
1477 	ehci_trans_wrapper_t	*tw;
1478 
1479 	/* Add one more td for data phase */
1480 	if (ctrl_reqp->ctrl_wLength) {
1481 		qtd_count += 1;
1482 	}
1483 
1484 	/*
1485 	 * If we have a control data phase, the data buffer starts
1486 	 * on the next 4K page boundary. So the TW buffer is allocated
1487 	 * to be larger than required. The buffer in the range of
1488 	 * [SETUP_SIZE, EHCI_MAX_QTD_BUF_SIZE) is just for padding
1489 	 * and not to be transferred.
1490 	 */
1491 	if (ctrl_reqp->ctrl_wLength) {
1492 		ctrl_buf_size = EHCI_MAX_QTD_BUF_SIZE +
1493 		    ctrl_reqp->ctrl_wLength;
1494 	} else {
1495 		ctrl_buf_size = SETUP_SIZE;
1496 	}
1497 
1498 	tw = ehci_allocate_tw_resources(ehcip, pp, ctrl_buf_size,
1499 	    usb_flags, qtd_count);
1500 
1501 	return (tw);
1502 }
1503 
1504 /*
1505  * ehci_insert_ctrl_req:
1506  *
1507  * Create a Transfer Descriptor (QTD) and a data buffer for a control endpoint.
1508  */
1509 /* ARGSUSED */
1510 void
1511 ehci_insert_ctrl_req(
1512 	ehci_state_t		*ehcip,
1513 	usba_pipe_handle_data_t	*ph,
1514 	usb_ctrl_req_t		*ctrl_reqp,
1515 	ehci_trans_wrapper_t	*tw,
1516 	usb_flags_t		usb_flags)
1517 {
1518 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1519 	uchar_t			bmRequestType = ctrl_reqp->ctrl_bmRequestType;
1520 	uchar_t			bRequest = ctrl_reqp->ctrl_bRequest;
1521 	uint16_t		wValue = ctrl_reqp->ctrl_wValue;
1522 	uint16_t		wIndex = ctrl_reqp->ctrl_wIndex;
1523 	uint16_t		wLength = ctrl_reqp->ctrl_wLength;
1524 	mblk_t			*data = ctrl_reqp->ctrl_data;
1525 	uint32_t		ctrl = 0;
1526 	uint8_t			setup_packet[8];
1527 
1528 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1529 	    "ehci_insert_ctrl_req:");
1530 
1531 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1532 
1533 	/*
1534 	 * Save current control request pointer and timeout values
1535 	 * in transfer wrapper.
1536 	 */
1537 	tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp;
1538 	tw->tw_timeout = ctrl_reqp->ctrl_timeout ?
1539 	    ctrl_reqp->ctrl_timeout : EHCI_DEFAULT_XFER_TIMEOUT;
1540 
1541 	/*
1542 	 * Initialize the callback and any callback data for when
1543 	 * the qtd completes.
1544 	 */
1545 	tw->tw_handle_qtd = ehci_handle_ctrl_qtd;
1546 	tw->tw_handle_callback_value = NULL;
1547 
1548 	/*
1549 	 * swap the setup bytes where necessary since we specified
1550 	 * NEVERSWAP
1551 	 */
1552 	setup_packet[0] = bmRequestType;
1553 	setup_packet[1] = bRequest;
1554 	setup_packet[2] = (uint8_t)wValue;
1555 	setup_packet[3] = wValue >> 8;
1556 	setup_packet[4] = (uint8_t)wIndex;
1557 	setup_packet[5] = wIndex >> 8;
1558 	setup_packet[6] = (uint8_t)wLength;
1559 	setup_packet[7] = wLength >> 8;
1560 
1561 	bcopy(setup_packet, tw->tw_buf, SETUP_SIZE);
1562 
1563 	Sync_IO_Buffer_for_device(tw->tw_dmahandle, SETUP_SIZE);
1564 
1565 	ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_0 | EHCI_QTD_CTRL_SETUP_PID);
1566 
1567 	/*
1568 	 * The QTD's are placed on the QH one at a time.
1569 	 * Once this QTD is placed on the done list, the
1570 	 * data or status phase QTD will be enqueued.
1571 	 */
1572 	(void) ehci_insert_qtd(ehcip, ctrl, 0, SETUP_SIZE,
1573 	    EHCI_CTRL_SETUP_PHASE, pp, tw);
1574 
1575 	USB_DPRINTF_L3(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1576 	    "ehci_insert_ctrl_req: pp 0x%p", (void *)pp);
1577 
1578 	/*
1579 	 * If this control transfer has a data phase, record the
1580 	 * direction. If the data phase is an OUT transaction,
1581 	 * copy the data into the buffer of the transfer wrapper.
1582 	 */
1583 	if (wLength != 0) {
1584 		/* There is a data stage.  Find the direction */
1585 		if (bmRequestType & USB_DEV_REQ_DEV_TO_HOST) {
1586 			tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
1587 		} else {
1588 			tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
1589 
1590 			/* Copy the data into the message */
1591 			bcopy(data->b_rptr, tw->tw_buf + EHCI_MAX_QTD_BUF_SIZE,
1592 			    wLength);
1593 
1594 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1595 			    wLength + EHCI_MAX_QTD_BUF_SIZE);
1596 		}
1597 
1598 		ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 | tw->tw_direction);
1599 
1600 		/*
1601 		 * Create the QTD.  If this is an OUT transaction,
1602 		 * the data is already in the buffer of the TW.
1603 		 * The transfer should start from EHCI_MAX_QTD_BUF_SIZE
1604 		 * which is 4K aligned, though the ctrl phase only
1605 		 * transfers a length of SETUP_SIZE. The padding data
1606 		 * in the TW buffer are discarded.
1607 		 */
1608 		(void) ehci_insert_qtd(ehcip, ctrl, EHCI_MAX_QTD_BUF_SIZE,
1609 		    tw->tw_length - EHCI_MAX_QTD_BUF_SIZE,
1610 		    EHCI_CTRL_DATA_PHASE, pp, tw);
1611 
1612 		/*
1613 		 * The direction of the STATUS QTD depends  on
1614 		 * the direction of the transfer.
1615 		 */
1616 		if (tw->tw_direction == EHCI_QTD_CTRL_IN_PID) {
1617 			ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1618 			    EHCI_QTD_CTRL_OUT_PID |
1619 			    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1620 		} else {
1621 			ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1622 			    EHCI_QTD_CTRL_IN_PID |
1623 			    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1624 		}
1625 	} else {
1626 		/*
1627 		 * There is no data stage,  then initiate
1628 		 * status phase from the host.
1629 		 */
1630 		ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 |
1631 		    EHCI_QTD_CTRL_IN_PID |
1632 		    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1633 	}
1634 
1635 
1636 	(void) ehci_insert_qtd(ehcip, ctrl, 0, 0,
1637 	    EHCI_CTRL_STATUS_PHASE, pp,  tw);
1638 
1639 	/* Start the timer for this control transfer */
1640 	ehci_start_xfer_timer(ehcip, pp, tw);
1641 }
1642 
1643 
1644 /*
1645  * ehci_allocate_bulk_resources:
1646  *
1647  * Calculates the number of tds necessary for a ctrl transfer, and allocates
1648  * all the resources necessary.
1649  *
1650  * Returns NULL if there is insufficient resources otherwise TW.
1651  */
1652 ehci_trans_wrapper_t *
1653 ehci_allocate_bulk_resources(
1654 	ehci_state_t		*ehcip,
1655 	ehci_pipe_private_t	*pp,
1656 	usb_bulk_req_t		*bulk_reqp,
1657 	usb_flags_t		usb_flags)
1658 {
1659 	size_t			qtd_count = 0;
1660 	ehci_trans_wrapper_t	*tw;
1661 
1662 	/* Check the size of bulk request */
1663 	if (bulk_reqp->bulk_len > EHCI_MAX_BULK_XFER_SIZE) {
1664 
1665 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1666 		    "ehci_allocate_bulk_resources: Bulk request size 0x%x is "
1667 		    "more than 0x%x", bulk_reqp->bulk_len,
1668 		    EHCI_MAX_BULK_XFER_SIZE);
1669 
1670 		return (NULL);
1671 	}
1672 
1673 	/* Get the required bulk packet size */
1674 	qtd_count = bulk_reqp->bulk_len / EHCI_MAX_QTD_XFER_SIZE;
1675 	if (bulk_reqp->bulk_len % EHCI_MAX_QTD_XFER_SIZE ||
1676 	    bulk_reqp->bulk_len == 0) {
1677 		qtd_count += 1;
1678 	}
1679 
1680 	tw = ehci_allocate_tw_resources(ehcip, pp, bulk_reqp->bulk_len,
1681 	    usb_flags, qtd_count);
1682 
1683 	return (tw);
1684 }
1685 
1686 /*
1687  * ehci_insert_bulk_req:
1688  *
1689  * Create a Transfer Descriptor (QTD) and a data buffer for a bulk
1690  * endpoint.
1691  */
1692 /* ARGSUSED */
1693 void
1694 ehci_insert_bulk_req(
1695 	ehci_state_t		*ehcip,
1696 	usba_pipe_handle_data_t	*ph,
1697 	usb_bulk_req_t		*bulk_reqp,
1698 	ehci_trans_wrapper_t	*tw,
1699 	usb_flags_t		flags)
1700 {
1701 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1702 	uint_t			bulk_pkt_size, count;
1703 	size_t			residue = 0, len = 0;
1704 	uint32_t		ctrl = 0;
1705 	int			pipe_dir;
1706 
1707 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1708 	    "ehci_insert_bulk_req: bulk_reqp = 0x%p flags = 0x%x",
1709 	    (void *)bulk_reqp, flags);
1710 
1711 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1712 
1713 	/* Get the bulk pipe direction */
1714 	pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
1715 
1716 	/* Get the required bulk packet size */
1717 	bulk_pkt_size = min(bulk_reqp->bulk_len, EHCI_MAX_QTD_XFER_SIZE);
1718 
1719 	if (bulk_pkt_size) {
1720 		residue = tw->tw_length % bulk_pkt_size;
1721 	}
1722 
1723 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1724 	    "ehci_insert_bulk_req: bulk_pkt_size = %d", bulk_pkt_size);
1725 
1726 	/*
1727 	 * Save current bulk request pointer and timeout values
1728 	 * in transfer wrapper.
1729 	 */
1730 	tw->tw_curr_xfer_reqp = (usb_opaque_t)bulk_reqp;
1731 	tw->tw_timeout = bulk_reqp->bulk_timeout;
1732 
1733 	/*
1734 	 * Initialize the callback and any callback
1735 	 * data required when the qtd completes.
1736 	 */
1737 	tw->tw_handle_qtd = ehci_handle_bulk_qtd;
1738 	tw->tw_handle_callback_value = NULL;
1739 
1740 	tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ?
1741 	    EHCI_QTD_CTRL_OUT_PID : EHCI_QTD_CTRL_IN_PID;
1742 
1743 	if (tw->tw_direction == EHCI_QTD_CTRL_OUT_PID) {
1744 
1745 		if (bulk_reqp->bulk_len) {
1746 			ASSERT(bulk_reqp->bulk_data != NULL);
1747 
1748 			bcopy(bulk_reqp->bulk_data->b_rptr, tw->tw_buf,
1749 			    bulk_reqp->bulk_len);
1750 
1751 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1752 			    bulk_reqp->bulk_len);
1753 		}
1754 	}
1755 
1756 	ctrl = tw->tw_direction;
1757 
1758 	/* Insert all the bulk QTDs */
1759 	for (count = 0; count < tw->tw_num_qtds; count++) {
1760 
1761 		/* Check for last qtd */
1762 		if (count == (tw->tw_num_qtds - 1)) {
1763 
1764 			ctrl |= EHCI_QTD_CTRL_INTR_ON_COMPLETE;
1765 
1766 			/* Check for inserting residue data */
1767 			if (residue) {
1768 				bulk_pkt_size = (uint_t)residue;
1769 			}
1770 		}
1771 
1772 		/* Insert the QTD onto the endpoint */
1773 		(void) ehci_insert_qtd(ehcip, ctrl, len, bulk_pkt_size,
1774 		    0, pp, tw);
1775 
1776 		len = len + bulk_pkt_size;
1777 	}
1778 
1779 	/* Start the timer for this bulk transfer */
1780 	ehci_start_xfer_timer(ehcip, pp, tw);
1781 }
1782 
1783 
1784 /*
1785  * ehci_start_periodic_pipe_polling:
1786  *
1787  * NOTE: This function is also called from POLLED MODE.
1788  */
1789 int
1790 ehci_start_periodic_pipe_polling(
1791 	ehci_state_t		*ehcip,
1792 	usba_pipe_handle_data_t	*ph,
1793 	usb_opaque_t		periodic_in_reqp,
1794 	usb_flags_t		flags)
1795 {
1796 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1797 	usb_ep_descr_t		*eptd = &ph->p_ep;
1798 	int			error = USB_SUCCESS;
1799 
1800 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
1801 	    "ehci_start_periodic_pipe_polling: ep%d",
1802 	    ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK);
1803 
1804 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1805 
1806 	/*
1807 	 * Check and handle start polling on root hub interrupt pipe.
1808 	 */
1809 	if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
1810 	    ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
1811 	    USB_EP_ATTR_INTR)) {
1812 
1813 		error = ehci_handle_root_hub_pipe_start_intr_polling(ph,
1814 		    (usb_intr_req_t *)periodic_in_reqp, flags);
1815 
1816 		return (error);
1817 	}
1818 
1819 	switch (pp->pp_state) {
1820 	case EHCI_PIPE_STATE_IDLE:
1821 		/* Save the Original client's Periodic IN request */
1822 		pp->pp_client_periodic_in_reqp = periodic_in_reqp;
1823 
1824 		/*
1825 		 * This pipe is uninitialized or if a valid QTD is
1826 		 * not found then insert a QTD on the interrupt IN
1827 		 * endpoint.
1828 		 */
1829 		error = ehci_start_pipe_polling(ehcip, ph, flags);
1830 
1831 		if (error != USB_SUCCESS) {
1832 			USB_DPRINTF_L2(PRINT_MASK_INTR,
1833 			    ehcip->ehci_log_hdl,
1834 			    "ehci_start_periodic_pipe_polling: "
1835 			    "Start polling failed");
1836 
1837 			pp->pp_client_periodic_in_reqp = NULL;
1838 
1839 			return (error);
1840 		}
1841 
1842 		USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
1843 		    "ehci_start_periodic_pipe_polling: PP = 0x%p", (void *)pp);
1844 
1845 #ifdef DEBUG
1846 		switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1847 		case USB_EP_ATTR_INTR:
1848 			ASSERT((pp->pp_tw_head != NULL) &&
1849 			    (pp->pp_tw_tail != NULL));
1850 			break;
1851 		case USB_EP_ATTR_ISOCH:
1852 			ASSERT((pp->pp_itw_head != NULL) &&
1853 			    (pp->pp_itw_tail != NULL));
1854 			break;
1855 		}
1856 #endif
1857 
1858 		break;
1859 	case EHCI_PIPE_STATE_ACTIVE:
1860 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1861 		    ehcip->ehci_log_hdl,
1862 		    "ehci_start_periodic_pipe_polling: "
1863 		    "Polling is already in progress");
1864 
1865 		error = USB_FAILURE;
1866 		break;
1867 	case EHCI_PIPE_STATE_ERROR:
1868 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1869 		    ehcip->ehci_log_hdl,
1870 		    "ehci_start_periodic_pipe_polling: "
1871 		    "Pipe is halted and perform reset"
1872 		    "before restart polling");
1873 
1874 		error = USB_FAILURE;
1875 		break;
1876 	default:
1877 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1878 		    ehcip->ehci_log_hdl,
1879 		    "ehci_start_periodic_pipe_polling: "
1880 		    "Undefined state");
1881 
1882 		error = USB_FAILURE;
1883 		break;
1884 	}
1885 
1886 	return (error);
1887 }
1888 
1889 
1890 /*
1891  * ehci_start_pipe_polling:
1892  *
1893  * Insert the number of periodic requests corresponding to polling
1894  * interval as calculated during pipe open.
1895  */
1896 static int
1897 ehci_start_pipe_polling(
1898 	ehci_state_t		*ehcip,
1899 	usba_pipe_handle_data_t	*ph,
1900 	usb_flags_t		flags)
1901 {
1902 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1903 	usb_ep_descr_t		*eptd = &ph->p_ep;
1904 	int			error = USB_FAILURE;
1905 
1906 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1907 	    "ehci_start_pipe_polling:");
1908 
1909 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1910 
1911 	/*
1912 	 * For the start polling, pp_max_periodic_req_cnt will be zero
1913 	 * and for the restart polling request, it will be non zero.
1914 	 *
1915 	 * In case of start polling request, find out number of requests
1916 	 * required for the Interrupt IN endpoints corresponding to the
1917 	 * endpoint polling interval. For Isochronous IN endpoints, it is
1918 	 * always fixed since its polling interval will be one ms.
1919 	 */
1920 	if (pp->pp_max_periodic_req_cnt == 0) {
1921 
1922 		ehci_set_periodic_pipe_polling(ehcip, ph);
1923 	}
1924 
1925 	ASSERT(pp->pp_max_periodic_req_cnt != 0);
1926 
1927 	switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1928 	case USB_EP_ATTR_INTR:
1929 		error = ehci_start_intr_polling(ehcip, ph, flags);
1930 		break;
1931 	case USB_EP_ATTR_ISOCH:
1932 		error = ehci_start_isoc_polling(ehcip, ph, flags);
1933 		break;
1934 	}
1935 
1936 	return (error);
1937 }
1938 
1939 static int
1940 ehci_start_intr_polling(
1941 	ehci_state_t		*ehcip,
1942 	usba_pipe_handle_data_t	*ph,
1943 	usb_flags_t		flags)
1944 {
1945 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1946 	ehci_trans_wrapper_t	*tw_list, *tw;
1947 	int			i, total_tws;
1948 	int			error = USB_SUCCESS;
1949 
1950 	/* Allocate all the necessary resources for the IN transfer */
1951 	tw_list = NULL;
1952 	total_tws = pp->pp_max_periodic_req_cnt - pp->pp_cur_periodic_req_cnt;
1953 	for (i = 0; i < total_tws; i += 1) {
1954 		tw = ehci_allocate_intr_resources(ehcip, ph, NULL, flags);
1955 		if (tw == NULL) {
1956 			error = USB_NO_RESOURCES;
1957 			/* There are not enough resources, deallocate the TWs */
1958 			tw = tw_list;
1959 			while (tw != NULL) {
1960 				tw_list = tw->tw_next;
1961 				ehci_deallocate_intr_in_resource(
1962 				    ehcip, pp, tw);
1963 				ehci_deallocate_tw(ehcip, pp, tw);
1964 				tw = tw_list;
1965 			}
1966 
1967 			return (error);
1968 		} else {
1969 			if (tw_list == NULL) {
1970 				tw_list = tw;
1971 			}
1972 		}
1973 	}
1974 
1975 	while (pp->pp_cur_periodic_req_cnt < pp->pp_max_periodic_req_cnt) {
1976 
1977 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1978 		    "ehci_start_pipe_polling: max = %d curr = %d tw = %p:",
1979 		    pp->pp_max_periodic_req_cnt, pp->pp_cur_periodic_req_cnt,
1980 		    (void *)tw_list);
1981 
1982 		tw = tw_list;
1983 		tw_list = tw->tw_next;
1984 
1985 		ehci_insert_intr_req(ehcip, pp, tw, flags);
1986 
1987 		pp->pp_cur_periodic_req_cnt++;
1988 	}
1989 
1990 	return (error);
1991 }
1992 
1993 
1994 /*
1995  * ehci_set_periodic_pipe_polling:
1996  *
1997  * Calculate the number of periodic requests needed corresponding to the
1998  * interrupt IN endpoints polling interval. Table below gives the number
1999  * of periodic requests needed for the interrupt IN endpoints  according
2000  * to endpoint polling interval.
2001  *
2002  * Polling interval		Number of periodic requests
2003  *
2004  * 1ms				4
2005  * 2ms				2
2006  * 4ms to 32ms			1
2007  */
2008 static void
2009 ehci_set_periodic_pipe_polling(
2010 	ehci_state_t		*ehcip,
2011 	usba_pipe_handle_data_t	*ph)
2012 {
2013 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2014 	usb_ep_descr_t		*endpoint = &ph->p_ep;
2015 	uchar_t			ep_attr = endpoint->bmAttributes;
2016 	uint_t			interval;
2017 
2018 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2019 	    "ehci_set_periodic_pipe_polling:");
2020 
2021 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2022 
2023 	pp->pp_cur_periodic_req_cnt = 0;
2024 
2025 	/*
2026 	 * Check usb flag whether USB_FLAGS_ONE_TIME_POLL flag is
2027 	 * set and if so, set pp->pp_max_periodic_req_cnt to one.
2028 	 */
2029 	if (((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) &&
2030 	    (pp->pp_client_periodic_in_reqp)) {
2031 		usb_intr_req_t *intr_reqp = (usb_intr_req_t *)
2032 		    pp->pp_client_periodic_in_reqp;
2033 
2034 		if (intr_reqp->intr_attributes &
2035 		    USB_ATTRS_ONE_XFER) {
2036 
2037 			pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2038 
2039 			return;
2040 		}
2041 	}
2042 
2043 	mutex_enter(&ph->p_usba_device->usb_mutex);
2044 
2045 	/*
2046 	 * The ehci_adjust_polling_interval function will not fail
2047 	 * at this instance since bandwidth allocation is already
2048 	 * done. Here we are getting only the periodic interval.
2049 	 */
2050 	interval = ehci_adjust_polling_interval(ehcip, endpoint,
2051 	    ph->p_usba_device->usb_port_status);
2052 
2053 	mutex_exit(&ph->p_usba_device->usb_mutex);
2054 
2055 	switch (interval) {
2056 	case EHCI_INTR_1MS_POLL:
2057 		pp->pp_max_periodic_req_cnt = EHCI_INTR_1MS_REQS;
2058 		break;
2059 	case EHCI_INTR_2MS_POLL:
2060 		pp->pp_max_periodic_req_cnt = EHCI_INTR_2MS_REQS;
2061 		break;
2062 	default:
2063 		pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2064 		break;
2065 	}
2066 
2067 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2068 	    "ehci_set_periodic_pipe_polling: Max periodic requests = %d",
2069 	    pp->pp_max_periodic_req_cnt);
2070 }
2071 
2072 /*
2073  * ehci_allocate_intr_resources:
2074  *
2075  * Calculates the number of tds necessary for a intr transfer, and allocates
2076  * all the necessary resources.
2077  *
2078  * Returns NULL if there is insufficient resources otherwise TW.
2079  */
2080 ehci_trans_wrapper_t *
2081 ehci_allocate_intr_resources(
2082 	ehci_state_t		*ehcip,
2083 	usba_pipe_handle_data_t	*ph,
2084 	usb_intr_req_t		*intr_reqp,
2085 	usb_flags_t		flags)
2086 {
2087 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2088 	int			pipe_dir;
2089 	size_t			qtd_count = 1;
2090 	size_t			tw_length;
2091 	ehci_trans_wrapper_t	*tw;
2092 
2093 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2094 	    "ehci_allocate_intr_resources:");
2095 
2096 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2097 
2098 	pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
2099 
2100 	/* Get the length of interrupt transfer & alloc data */
2101 	if (intr_reqp) {
2102 		tw_length = intr_reqp->intr_len;
2103 	} else {
2104 		ASSERT(pipe_dir == USB_EP_DIR_IN);
2105 		tw_length = (pp->pp_client_periodic_in_reqp) ?
2106 		    (((usb_intr_req_t *)pp->
2107 		    pp_client_periodic_in_reqp)->intr_len) :
2108 		    ph->p_ep.wMaxPacketSize;
2109 	}
2110 
2111 	/* Check the size of interrupt request */
2112 	if (tw_length > EHCI_MAX_QTD_XFER_SIZE) {
2113 
2114 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2115 		    "ehci_allocate_intr_resources: Intr request size 0x%lx is "
2116 		    "more than 0x%x", tw_length, EHCI_MAX_QTD_XFER_SIZE);
2117 
2118 		return (NULL);
2119 	}
2120 
2121 	if ((tw = ehci_allocate_tw_resources(ehcip, pp, tw_length, flags,
2122 	    qtd_count)) == NULL) {
2123 
2124 		return (NULL);
2125 	}
2126 
2127 	if (pipe_dir == USB_EP_DIR_IN) {
2128 		if (ehci_allocate_intr_in_resource(ehcip, pp, tw, flags) !=
2129 		    USB_SUCCESS) {
2130 			ehci_deallocate_tw(ehcip, pp, tw);
2131 		}
2132 		tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
2133 	} else {
2134 		if (tw_length) {
2135 			ASSERT(intr_reqp->intr_data != NULL);
2136 
2137 			/* Copy the data into the buffer */
2138 			bcopy(intr_reqp->intr_data->b_rptr, tw->tw_buf,
2139 			    intr_reqp->intr_len);
2140 
2141 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
2142 			    intr_reqp->intr_len);
2143 		}
2144 
2145 		tw->tw_curr_xfer_reqp = (usb_opaque_t)intr_reqp;
2146 		tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
2147 	}
2148 
2149 	if (intr_reqp) {
2150 		tw->tw_timeout = intr_reqp->intr_timeout;
2151 	}
2152 
2153 	/*
2154 	 * Initialize the callback and any callback
2155 	 * data required when the qtd completes.
2156 	 */
2157 	tw->tw_handle_qtd = ehci_handle_intr_qtd;
2158 	tw->tw_handle_callback_value = NULL;
2159 
2160 	return (tw);
2161 }
2162 
2163 
2164 /*
2165  * ehci_insert_intr_req:
2166  *
2167  * Insert an Interrupt request into the Host Controller's periodic list.
2168  */
2169 /* ARGSUSED */
2170 void
2171 ehci_insert_intr_req(
2172 	ehci_state_t		*ehcip,
2173 	ehci_pipe_private_t	*pp,
2174 	ehci_trans_wrapper_t	*tw,
2175 	usb_flags_t		flags)
2176 {
2177 	uint_t			ctrl = 0;
2178 
2179 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2180 
2181 	ASSERT(tw->tw_curr_xfer_reqp != NULL);
2182 
2183 	ctrl = (tw->tw_direction | EHCI_QTD_CTRL_INTR_ON_COMPLETE);
2184 
2185 	/* Insert another interrupt QTD */
2186 	(void) ehci_insert_qtd(ehcip, ctrl, 0, tw->tw_length, 0, pp, tw);
2187 
2188 	/* Start the timer for this Interrupt transfer */
2189 	ehci_start_xfer_timer(ehcip, pp, tw);
2190 }
2191 
2192 
2193 /*
2194  * ehci_stop_periodic_pipe_polling:
2195  */
2196 /* ARGSUSED */
2197 int
2198 ehci_stop_periodic_pipe_polling(
2199 	ehci_state_t		*ehcip,
2200 	usba_pipe_handle_data_t	*ph,
2201 	usb_flags_t		flags)
2202 {
2203 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2204 	usb_ep_descr_t		*eptd = &ph->p_ep;
2205 
2206 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2207 	    "ehci_stop_periodic_pipe_polling: Flags = 0x%x", flags);
2208 
2209 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2210 
2211 	/*
2212 	 * Check and handle stop polling on root hub interrupt pipe.
2213 	 */
2214 	if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
2215 	    ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
2216 	    USB_EP_ATTR_INTR)) {
2217 
2218 		ehci_handle_root_hub_pipe_stop_intr_polling(ph, flags);
2219 
2220 		return (USB_SUCCESS);
2221 	}
2222 
2223 	if (pp->pp_state != EHCI_PIPE_STATE_ACTIVE) {
2224 
2225 		USB_DPRINTF_L2(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2226 		    "ehci_stop_periodic_pipe_polling: "
2227 		    "Polling already stopped");
2228 
2229 		return (USB_SUCCESS);
2230 	}
2231 
2232 	/* Set pipe state to pipe stop polling */
2233 	pp->pp_state = EHCI_PIPE_STATE_STOP_POLLING;
2234 
2235 	ehci_pipe_cleanup(ehcip, ph);
2236 
2237 	return (USB_SUCCESS);
2238 }
2239 
2240 
2241 /*
2242  * ehci_insert_qtd:
2243  *
2244  * Insert a Transfer Descriptor (QTD) on an Endpoint Descriptor (QH).
2245  * Always returns USB_SUCCESS for now.	Once Isoch has been implemented,
2246  * it may return USB_FAILURE.
2247  */
2248 int
2249 ehci_insert_qtd(
2250 	ehci_state_t		*ehcip,
2251 	uint32_t		qtd_ctrl,
2252 	size_t			qtd_dma_offs,
2253 	size_t			qtd_length,
2254 	uint32_t		qtd_ctrl_phase,
2255 	ehci_pipe_private_t	*pp,
2256 	ehci_trans_wrapper_t	*tw)
2257 {
2258 	ehci_qtd_t		*curr_dummy_qtd, *next_dummy_qtd;
2259 	ehci_qtd_t		*new_dummy_qtd;
2260 	ehci_qh_t		*qh = pp->pp_qh;
2261 	int			error = USB_SUCCESS;
2262 
2263 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2264 
2265 	/* Allocate new dummy QTD */
2266 	new_dummy_qtd = tw->tw_qtd_free_list;
2267 
2268 	ASSERT(new_dummy_qtd != NULL);
2269 	tw->tw_qtd_free_list = ehci_qtd_iommu_to_cpu(ehcip,
2270 	    Get_QTD(new_dummy_qtd->qtd_tw_next_qtd));
2271 	Set_QTD(new_dummy_qtd->qtd_tw_next_qtd, NULL);
2272 
2273 	/* Get the current and next dummy QTDs */
2274 	curr_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2275 	    Get_QH(qh->qh_dummy_qtd));
2276 	next_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2277 	    Get_QTD(curr_dummy_qtd->qtd_next_qtd));
2278 
2279 	/* Update QH's dummy qtd field */
2280 	Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, next_dummy_qtd));
2281 
2282 	/* Update next dummy's next qtd pointer */
2283 	Set_QTD(next_dummy_qtd->qtd_next_qtd,
2284 	    ehci_qtd_cpu_to_iommu(ehcip, new_dummy_qtd));
2285 
2286 	/*
2287 	 * Fill in the current dummy qtd and
2288 	 * add the new dummy to the end.
2289 	 */
2290 	ehci_fill_in_qtd(ehcip, curr_dummy_qtd, qtd_ctrl,
2291 	    qtd_dma_offs, qtd_length, qtd_ctrl_phase, pp, tw);
2292 
2293 	/* Insert this qtd onto the tw */
2294 	ehci_insert_qtd_on_tw(ehcip, tw, curr_dummy_qtd);
2295 
2296 	/*
2297 	 * Insert this qtd onto active qtd list.
2298 	 * Don't insert polled mode qtd here.
2299 	 */
2300 	if (pp->pp_flag != EHCI_POLLED_MODE_FLAG) {
2301 		/* Insert this qtd onto active qtd list */
2302 		ehci_insert_qtd_into_active_qtd_list(ehcip, curr_dummy_qtd);
2303 	}
2304 
2305 	/* Print qh and qtd */
2306 	ehci_print_qh(ehcip, qh);
2307 	ehci_print_qtd(ehcip, curr_dummy_qtd);
2308 
2309 	return (error);
2310 }
2311 
2312 
2313 /*
2314  * ehci_allocate_qtd_from_pool:
2315  *
2316  * Allocate a Transfer Descriptor (QTD) from the QTD buffer pool.
2317  */
2318 static ehci_qtd_t *
2319 ehci_allocate_qtd_from_pool(ehci_state_t	*ehcip)
2320 {
2321 	int		i, ctrl;
2322 	ehci_qtd_t	*qtd;
2323 
2324 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2325 
2326 	/*
2327 	 * Search for a blank Transfer Descriptor (QTD)
2328 	 * in the QTD buffer pool.
2329 	 */
2330 	for (i = 0; i < ehci_qtd_pool_size; i ++) {
2331 		ctrl = Get_QTD(ehcip->ehci_qtd_pool_addr[i].qtd_state);
2332 		if (ctrl == EHCI_QTD_FREE) {
2333 			break;
2334 		}
2335 	}
2336 
2337 	if (i >= ehci_qtd_pool_size) {
2338 		USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2339 		    "ehci_allocate_qtd_from_pool: QTD exhausted");
2340 
2341 		return (NULL);
2342 	}
2343 
2344 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2345 	    "ehci_allocate_qtd_from_pool: Allocated %d", i);
2346 
2347 	/* Create a new dummy for the end of the QTD list */
2348 	qtd = &ehcip->ehci_qtd_pool_addr[i];
2349 
2350 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2351 	    "ehci_allocate_qtd_from_pool: qtd 0x%p", (void *)qtd);
2352 
2353 	/* Mark the newly allocated QTD as a dummy */
2354 	Set_QTD(qtd->qtd_state, EHCI_QTD_DUMMY);
2355 
2356 	/* Mark the status of this new QTD to halted state */
2357 	Set_QTD(qtd->qtd_ctrl, EHCI_QTD_CTRL_HALTED_XACT);
2358 
2359 	/* Disable dummy QTD's next and alternate next pointers */
2360 	Set_QTD(qtd->qtd_next_qtd, EHCI_QTD_NEXT_QTD_PTR_VALID);
2361 	Set_QTD(qtd->qtd_alt_next_qtd, EHCI_QTD_ALT_NEXT_QTD_PTR_VALID);
2362 
2363 	return (qtd);
2364 }
2365 
2366 
2367 /*
2368  * ehci_fill_in_qtd:
2369  *
2370  * Fill in the fields of a Transfer Descriptor (QTD).
2371  * The "Buffer Pointer" fields of a QTD are retrieved from the TW
2372  * it is associated with.
2373  *
2374  * Note:
2375  * qtd_dma_offs - the starting offset into the TW buffer, where the QTD
2376  *		  should transfer from. It should be 4K aligned. And when
2377  *		  a TW has more than one QTDs, the QTDs must be filled in
2378  *		  increasing order.
2379  * qtd_length - the total bytes to transfer.
2380  */
2381 /*ARGSUSED*/
2382 static void
2383 ehci_fill_in_qtd(
2384 	ehci_state_t		*ehcip,
2385 	ehci_qtd_t		*qtd,
2386 	uint32_t		qtd_ctrl,
2387 	size_t			qtd_dma_offs,
2388 	size_t			qtd_length,
2389 	uint32_t		qtd_ctrl_phase,
2390 	ehci_pipe_private_t	*pp,
2391 	ehci_trans_wrapper_t	*tw)
2392 {
2393 	uint32_t		buf_addr;
2394 	size_t			buf_len = qtd_length;
2395 	uint32_t		ctrl = qtd_ctrl;
2396 	uint_t			i = 0;
2397 	int			rem_len;
2398 
2399 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2400 	    "ehci_fill_in_qtd: qtd 0x%p ctrl 0x%x bufoffs 0x%lx "
2401 	    "len 0x%lx", (void *)qtd, qtd_ctrl, qtd_dma_offs, qtd_length);
2402 
2403 	/* Assert that the qtd to be filled in is a dummy */
2404 	ASSERT(Get_QTD(qtd->qtd_state) == EHCI_QTD_DUMMY);
2405 
2406 	/* Change QTD's state Active */
2407 	Set_QTD(qtd->qtd_state, EHCI_QTD_ACTIVE);
2408 
2409 	/* Set the total length data transfer */
2410 	ctrl |= (((qtd_length << EHCI_QTD_CTRL_BYTES_TO_XFER_SHIFT)
2411 	    & EHCI_QTD_CTRL_BYTES_TO_XFER) | EHCI_QTD_CTRL_MAX_ERR_COUNTS);
2412 
2413 	/*
2414 	 * QTDs must be filled in increasing DMA offset order.
2415 	 * tw_dma_offs is initialized to be 0 at TW creation and
2416 	 * is only increased in this function.
2417 	 */
2418 	ASSERT(buf_len == 0 || qtd_dma_offs >= tw->tw_dma_offs);
2419 
2420 	/*
2421 	 * Save the starting dma buffer offset used and
2422 	 * length of data that will be transfered in
2423 	 * the current QTD.
2424 	 */
2425 	Set_QTD(qtd->qtd_xfer_offs, qtd_dma_offs);
2426 	Set_QTD(qtd->qtd_xfer_len, buf_len);
2427 
2428 	while (buf_len) {
2429 		/*
2430 		 * Advance to the next DMA cookie until finding the cookie
2431 		 * that qtd_dma_offs falls in.
2432 		 * It is very likely this loop will never repeat more than
2433 		 * once. It is here just to accommodate the case qtd_dma_offs
2434 		 * is increased by multiple cookies during two consecutive
2435 		 * calls into this function. In that case, the interim DMA
2436 		 * buffer is allowed to be skipped.
2437 		 */
2438 		while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <=
2439 		    qtd_dma_offs) {
2440 			/*
2441 			 * tw_dma_offs always points to the starting offset
2442 			 * of a cookie
2443 			 */
2444 			tw->tw_dma_offs += tw->tw_cookie.dmac_size;
2445 			ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie);
2446 			tw->tw_cookie_idx++;
2447 			ASSERT(tw->tw_cookie_idx < tw->tw_ncookies);
2448 		}
2449 
2450 		/*
2451 		 * Counting the remained buffer length to be filled in
2452 		 * the QTD for current DMA cookie
2453 		 */
2454 		rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) -
2455 		    qtd_dma_offs;
2456 
2457 		/* Update the beginning of the buffer */
2458 		buf_addr = (qtd_dma_offs - tw->tw_dma_offs) +
2459 		    tw->tw_cookie.dmac_address;
2460 		ASSERT((buf_addr % EHCI_4K_ALIGN) == 0);
2461 		Set_QTD(qtd->qtd_buf[i], buf_addr);
2462 
2463 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2464 		    "ehci_fill_in_qtd: dmac_addr 0x%x dmac_size "
2465 		    "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size,
2466 		    tw->tw_cookie_idx);
2467 
2468 		if (buf_len <= EHCI_MAX_QTD_BUF_SIZE) {
2469 			ASSERT(buf_len <= rem_len);
2470 			break;
2471 		} else {
2472 			ASSERT(rem_len >= EHCI_MAX_QTD_BUF_SIZE);
2473 			buf_len -= EHCI_MAX_QTD_BUF_SIZE;
2474 			qtd_dma_offs += EHCI_MAX_QTD_BUF_SIZE;
2475 		}
2476 
2477 		i++;
2478 	}
2479 
2480 	/*
2481 	 * Setup the alternate next qTD pointer if appropriate.  The alternate
2482 	 * qtd is currently pointing to a QTD that is not yet linked, but will
2483 	 * be in the very near future.	If a short_xfer occurs in this
2484 	 * situation , the HC will automatically skip this QH.	Eventually
2485 	 * everything will be placed and the alternate_qtd will be valid QTD.
2486 	 * For more information on alternate qtds look at section 3.5.2 in the
2487 	 * EHCI spec.
2488 	 */
2489 	if (tw->tw_alt_qtd != NULL) {
2490 		Set_QTD(qtd->qtd_alt_next_qtd,
2491 		    (ehci_qtd_cpu_to_iommu(ehcip, tw->tw_alt_qtd) &
2492 		    EHCI_QTD_ALT_NEXT_QTD_PTR));
2493 	}
2494 
2495 	/*
2496 	 * For control, bulk and interrupt QTD, now
2497 	 * enable current QTD by setting active bit.
2498 	 */
2499 	Set_QTD(qtd->qtd_ctrl, (ctrl | EHCI_QTD_CTRL_ACTIVE_XACT));
2500 
2501 	/*
2502 	 * For Control Xfer, qtd_ctrl_phase is a valid filed.
2503 	 */
2504 	if (qtd_ctrl_phase) {
2505 		Set_QTD(qtd->qtd_ctrl_phase, qtd_ctrl_phase);
2506 	}
2507 
2508 	/* Set the transfer wrapper */
2509 	ASSERT(tw != NULL);
2510 	ASSERT(tw->tw_id != NULL);
2511 
2512 	Set_QTD(qtd->qtd_trans_wrapper, (uint32_t)tw->tw_id);
2513 }
2514 
2515 
2516 /*
2517  * ehci_insert_qtd_on_tw:
2518  *
2519  * The transfer wrapper keeps a list of all Transfer Descriptors (QTD) that
2520  * are allocated for this transfer. Insert a QTD  onto this list. The  list
2521  * of QTD's does not include the dummy QTD that is at the end of the list of
2522  * QTD's for the endpoint.
2523  */
2524 static void
2525 ehci_insert_qtd_on_tw(
2526 	ehci_state_t		*ehcip,
2527 	ehci_trans_wrapper_t	*tw,
2528 	ehci_qtd_t		*qtd)
2529 {
2530 	/*
2531 	 * Set the next pointer to NULL because
2532 	 * this is the last QTD on list.
2533 	 */
2534 	Set_QTD(qtd->qtd_tw_next_qtd, NULL);
2535 
2536 	if (tw->tw_qtd_head == NULL) {
2537 		ASSERT(tw->tw_qtd_tail == NULL);
2538 		tw->tw_qtd_head = qtd;
2539 		tw->tw_qtd_tail = qtd;
2540 	} else {
2541 		ehci_qtd_t *dummy = (ehci_qtd_t *)tw->tw_qtd_tail;
2542 
2543 		ASSERT(dummy != NULL);
2544 		ASSERT(dummy != qtd);
2545 		ASSERT(Get_QTD(qtd->qtd_state) != EHCI_QTD_DUMMY);
2546 
2547 		/* Add the qtd to the end of the list */
2548 		Set_QTD(dummy->qtd_tw_next_qtd,
2549 		    ehci_qtd_cpu_to_iommu(ehcip, qtd));
2550 
2551 		tw->tw_qtd_tail = qtd;
2552 
2553 		ASSERT(Get_QTD(qtd->qtd_tw_next_qtd) == NULL);
2554 	}
2555 }
2556 
2557 
2558 /*
2559  * ehci_insert_qtd_into_active_qtd_list:
2560  *
2561  * Insert current QTD into active QTD list.
2562  */
2563 static void
2564 ehci_insert_qtd_into_active_qtd_list(
2565 	ehci_state_t		*ehcip,
2566 	ehci_qtd_t		*qtd)
2567 {
2568 	ehci_qtd_t		*curr_qtd, *next_qtd;
2569 
2570 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2571 
2572 	curr_qtd = ehcip->ehci_active_qtd_list;
2573 
2574 	/* Insert this QTD into QTD Active List */
2575 	if (curr_qtd) {
2576 		next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2577 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2578 
2579 		while (next_qtd) {
2580 			curr_qtd = next_qtd;
2581 			next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2582 			    Get_QTD(curr_qtd->qtd_active_qtd_next));
2583 		}
2584 
2585 		Set_QTD(qtd->qtd_active_qtd_prev,
2586 		    ehci_qtd_cpu_to_iommu(ehcip, curr_qtd));
2587 
2588 		Set_QTD(curr_qtd->qtd_active_qtd_next,
2589 		    ehci_qtd_cpu_to_iommu(ehcip, qtd));
2590 	} else {
2591 		ehcip->ehci_active_qtd_list = qtd;
2592 		Set_QTD(qtd->qtd_active_qtd_next, NULL);
2593 		Set_QTD(qtd->qtd_active_qtd_prev, NULL);
2594 	}
2595 }
2596 
2597 
2598 /*
2599  * ehci_remove_qtd_from_active_qtd_list:
2600  *
2601  * Remove current QTD from the active QTD list.
2602  *
2603  * NOTE: This function is also called from POLLED MODE.
2604  */
2605 void
2606 ehci_remove_qtd_from_active_qtd_list(
2607 	ehci_state_t		*ehcip,
2608 	ehci_qtd_t		*qtd)
2609 {
2610 	ehci_qtd_t		*curr_qtd, *prev_qtd, *next_qtd;
2611 
2612 	ASSERT(qtd != NULL);
2613 
2614 	curr_qtd = ehcip->ehci_active_qtd_list;
2615 
2616 	while ((curr_qtd) && (curr_qtd != qtd)) {
2617 		curr_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2618 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2619 	}
2620 
2621 	if ((curr_qtd) && (curr_qtd == qtd)) {
2622 		prev_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2623 		    Get_QTD(curr_qtd->qtd_active_qtd_prev));
2624 		next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2625 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2626 
2627 		if (prev_qtd) {
2628 			Set_QTD(prev_qtd->qtd_active_qtd_next,
2629 			    Get_QTD(curr_qtd->qtd_active_qtd_next));
2630 		} else {
2631 			ehcip->ehci_active_qtd_list = next_qtd;
2632 		}
2633 
2634 		if (next_qtd) {
2635 			Set_QTD(next_qtd->qtd_active_qtd_prev,
2636 			    Get_QTD(curr_qtd->qtd_active_qtd_prev));
2637 		}
2638 	} else {
2639 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2640 		    "ehci_remove_qtd_from_active_qtd_list: "
2641 		    "Unable to find QTD in active_qtd_list");
2642 	}
2643 }
2644 
2645 
2646 /*
2647  * ehci_traverse_qtds:
2648  *
2649  * Traverse the list of QTDs for given pipe using transfer wrapper.  Since
2650  * the endpoint is marked as Halted, the Host Controller (HC) is no longer
2651  * accessing these QTDs. Remove all the QTDs that are attached to endpoint.
2652  */
2653 static void
2654 ehci_traverse_qtds(
2655 	ehci_state_t		*ehcip,
2656 	usba_pipe_handle_data_t	*ph)
2657 {
2658 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2659 	ehci_trans_wrapper_t	*next_tw;
2660 	ehci_qtd_t		*qtd;
2661 	ehci_qtd_t		*next_qtd;
2662 
2663 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2664 
2665 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2666 	    "ehci_traverse_qtds:");
2667 
2668 	/* Process the transfer wrappers for this pipe */
2669 	next_tw = pp->pp_tw_head;
2670 
2671 	while (next_tw) {
2672 		/* Stop the the transfer timer */
2673 		ehci_stop_xfer_timer(ehcip, next_tw, EHCI_REMOVE_XFER_ALWAYS);
2674 
2675 		qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
2676 
2677 		/* Walk through each QTD for this transfer wrapper */
2678 		while (qtd) {
2679 			/* Remove this QTD from active QTD list */
2680 			ehci_remove_qtd_from_active_qtd_list(ehcip, qtd);
2681 
2682 			next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2683 			    Get_QTD(qtd->qtd_tw_next_qtd));
2684 
2685 			/* Deallocate this QTD */
2686 			ehci_deallocate_qtd(ehcip, qtd);
2687 
2688 			qtd = next_qtd;
2689 		}
2690 
2691 		next_tw = next_tw->tw_next;
2692 	}
2693 
2694 	/* Clear current qtd pointer */
2695 	Set_QH(pp->pp_qh->qh_curr_qtd, (uint32_t)0x00000000);
2696 
2697 	/* Update the next qtd pointer in the QH */
2698 	Set_QH(pp->pp_qh->qh_next_qtd, Get_QH(pp->pp_qh->qh_dummy_qtd));
2699 }
2700 
2701 
2702 /*
2703  * ehci_deallocate_qtd:
2704  *
2705  * Deallocate a Host Controller's (HC) Transfer Descriptor (QTD).
2706  *
2707  * NOTE: This function is also called from POLLED MODE.
2708  */
2709 void
2710 ehci_deallocate_qtd(
2711 	ehci_state_t		*ehcip,
2712 	ehci_qtd_t		*old_qtd)
2713 {
2714 	ehci_trans_wrapper_t	*tw = NULL;
2715 
2716 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2717 	    "ehci_deallocate_qtd: old_qtd = 0x%p", (void *)old_qtd);
2718 
2719 	/*
2720 	 * Obtain the transaction wrapper and tw will be
2721 	 * NULL for the dummy QTDs.
2722 	 */
2723 	if (Get_QTD(old_qtd->qtd_state) != EHCI_QTD_DUMMY) {
2724 		tw = (ehci_trans_wrapper_t *)
2725 		    EHCI_LOOKUP_ID((uint32_t)
2726 		    Get_QTD(old_qtd->qtd_trans_wrapper));
2727 
2728 		ASSERT(tw != NULL);
2729 	}
2730 
2731 	/*
2732 	 * If QTD's transfer wrapper is NULL, don't access its TW.
2733 	 * Just free the QTD.
2734 	 */
2735 	if (tw) {
2736 		ehci_qtd_t	*qtd, *next_qtd;
2737 
2738 		qtd = tw->tw_qtd_head;
2739 
2740 		if (old_qtd != qtd) {
2741 			next_qtd = ehci_qtd_iommu_to_cpu(
2742 			    ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2743 
2744 			while (next_qtd != old_qtd) {
2745 				qtd = next_qtd;
2746 				next_qtd = ehci_qtd_iommu_to_cpu(
2747 				    ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2748 			}
2749 
2750 			Set_QTD(qtd->qtd_tw_next_qtd, old_qtd->qtd_tw_next_qtd);
2751 
2752 			if (qtd->qtd_tw_next_qtd == NULL) {
2753 				tw->tw_qtd_tail = qtd;
2754 			}
2755 		} else {
2756 			tw->tw_qtd_head = ehci_qtd_iommu_to_cpu(
2757 			    ehcip, Get_QTD(old_qtd->qtd_tw_next_qtd));
2758 
2759 			if (tw->tw_qtd_head == NULL) {
2760 				tw->tw_qtd_tail = NULL;
2761 			}
2762 		}
2763 	}
2764 
2765 	bzero((void *)old_qtd, sizeof (ehci_qtd_t));
2766 	Set_QTD(old_qtd->qtd_state, EHCI_QTD_FREE);
2767 
2768 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2769 	    "Dealloc_qtd: qtd 0x%p", (void *)old_qtd);
2770 }
2771 
2772 
2773 /*
2774  * ehci_qtd_cpu_to_iommu:
2775  *
2776  * This function converts for the given Transfer Descriptor (QTD) CPU address
2777  * to IO address.
2778  *
2779  * NOTE: This function is also called from POLLED MODE.
2780  */
2781 uint32_t
2782 ehci_qtd_cpu_to_iommu(
2783 	ehci_state_t	*ehcip,
2784 	ehci_qtd_t	*addr)
2785 {
2786 	uint32_t	td;
2787 
2788 	td  = (uint32_t)ehcip->ehci_qtd_pool_cookie.dmac_address +
2789 	    (uint32_t)((uintptr_t)addr -
2790 	    (uintptr_t)(ehcip->ehci_qtd_pool_addr));
2791 
2792 	ASSERT((ehcip->ehci_qtd_pool_cookie.dmac_address +
2793 	    (uint32_t) (sizeof (ehci_qtd_t) *
2794 	    (addr - ehcip->ehci_qtd_pool_addr))) ==
2795 	    (ehcip->ehci_qtd_pool_cookie.dmac_address +
2796 	    (uint32_t)((uintptr_t)addr - (uintptr_t)
2797 	    (ehcip->ehci_qtd_pool_addr))));
2798 
2799 	ASSERT(td >= ehcip->ehci_qtd_pool_cookie.dmac_address);
2800 	ASSERT(td <= ehcip->ehci_qtd_pool_cookie.dmac_address +
2801 	    sizeof (ehci_qtd_t) * ehci_qtd_pool_size);
2802 
2803 	return (td);
2804 }
2805 
2806 
2807 /*
2808  * ehci_qtd_iommu_to_cpu:
2809  *
2810  * This function converts for the given Transfer Descriptor (QTD) IO address
2811  * to CPU address.
2812  *
2813  * NOTE: This function is also called from POLLED MODE.
2814  */
2815 ehci_qtd_t *
2816 ehci_qtd_iommu_to_cpu(
2817 	ehci_state_t	*ehcip,
2818 	uintptr_t	addr)
2819 {
2820 	ehci_qtd_t	*qtd;
2821 
2822 	if (addr == NULL) {
2823 
2824 		return (NULL);
2825 	}
2826 
2827 	qtd = (ehci_qtd_t *)((uintptr_t)
2828 	    (addr - ehcip->ehci_qtd_pool_cookie.dmac_address) +
2829 	    (uintptr_t)ehcip->ehci_qtd_pool_addr);
2830 
2831 	ASSERT(qtd >= ehcip->ehci_qtd_pool_addr);
2832 	ASSERT((uintptr_t)qtd <= (uintptr_t)ehcip->ehci_qtd_pool_addr +
2833 	    (uintptr_t)(sizeof (ehci_qtd_t) * ehci_qtd_pool_size));
2834 
2835 	return (qtd);
2836 }
2837 
2838 /*
2839  * ehci_allocate_tds_for_tw_resources:
2840  *
2841  * Allocate n Transfer Descriptors (TD) from the TD buffer pool and places it
2842  * into the TW.  Also chooses the correct alternate qtd when required.	It is
2843  * used for hardware short transfer support.  For more information on
2844  * alternate qtds look at section 3.5.2 in the EHCI spec.
2845  * Here is how each alternate qtd's are used:
2846  *
2847  * Bulk: used fully.
2848  * Intr: xfers only require 1 QTD, so alternate qtds are never used.
2849  * Ctrl: Should not use alternate QTD
2850  * Isoch: Doesn't support short_xfer nor does it use QTD
2851  *
2852  * Returns USB_NO_RESOURCES if it was not able to allocate all the requested TD
2853  * otherwise USB_SUCCESS.
2854  */
2855 int
2856 ehci_allocate_tds_for_tw(
2857 	ehci_state_t		*ehcip,
2858 	ehci_pipe_private_t	*pp,
2859 	ehci_trans_wrapper_t	*tw,
2860 	size_t			qtd_count)
2861 {
2862 	usb_ep_descr_t		*eptd = &pp->pp_pipe_handle->p_ep;
2863 	uchar_t			attributes;
2864 	ehci_qtd_t		*qtd;
2865 	uint32_t		qtd_addr;
2866 	int			i;
2867 	int			error = USB_SUCCESS;
2868 
2869 	attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
2870 
2871 	for (i = 0; i < qtd_count; i += 1) {
2872 		qtd = ehci_allocate_qtd_from_pool(ehcip);
2873 		if (qtd == NULL) {
2874 			error = USB_NO_RESOURCES;
2875 			USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2876 			    "ehci_allocate_qtds_for_tw: "
2877 			    "Unable to allocate %lu QTDs",
2878 			    qtd_count);
2879 			break;
2880 		}
2881 		if (i > 0) {
2882 			qtd_addr = ehci_qtd_cpu_to_iommu(ehcip,
2883 			    tw->tw_qtd_free_list);
2884 			Set_QTD(qtd->qtd_tw_next_qtd, qtd_addr);
2885 		}
2886 		tw->tw_qtd_free_list = qtd;
2887 
2888 		/*
2889 		 * Save the second one as a pointer to the new dummy 1.
2890 		 * It is used later for the alt_qtd_ptr.  Xfers with only
2891 		 * one qtd do not need alt_qtd_ptr.
2892 		 * The tds's are allocated and put into a stack, that is
2893 		 * why the second qtd allocated will turn out to be the
2894 		 * new dummy 1.
2895 		 */
2896 		if ((i == 1) && (attributes == USB_EP_ATTR_BULK)) {
2897 			tw->tw_alt_qtd = qtd;
2898 		}
2899 	}
2900 
2901 	return (error);
2902 }
2903 
2904 /*
2905  * ehci_allocate_tw_resources:
2906  *
2907  * Allocate a Transaction Wrapper (TW) and n Transfer Descriptors (QTD)
2908  * from the QTD buffer pool and places it into the TW.	It does an all
2909  * or nothing transaction.
2910  *
2911  * Returns NULL if there is insufficient resources otherwise TW.
2912  */
2913 static ehci_trans_wrapper_t *
2914 ehci_allocate_tw_resources(
2915 	ehci_state_t		*ehcip,
2916 	ehci_pipe_private_t	*pp,
2917 	size_t			tw_length,
2918 	usb_flags_t		usb_flags,
2919 	size_t			qtd_count)
2920 {
2921 	ehci_trans_wrapper_t	*tw;
2922 
2923 	tw = ehci_create_transfer_wrapper(ehcip, pp, tw_length, usb_flags);
2924 
2925 	if (tw == NULL) {
2926 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2927 		    "ehci_allocate_tw_resources: Unable to allocate TW");
2928 	} else {
2929 		if (ehci_allocate_tds_for_tw(ehcip, pp, tw, qtd_count) ==
2930 		    USB_SUCCESS) {
2931 			tw->tw_num_qtds = (uint_t)qtd_count;
2932 		} else {
2933 			ehci_deallocate_tw(ehcip, pp, tw);
2934 			tw = NULL;
2935 		}
2936 	}
2937 
2938 	return (tw);
2939 }
2940 
2941 
2942 /*
2943  * ehci_free_tw_td_resources:
2944  *
2945  * Free all allocated resources for Transaction Wrapper (TW).
2946  * Does not free the TW itself.
2947  *
2948  * Returns NULL if there is insufficient resources otherwise TW.
2949  */
2950 static void
2951 ehci_free_tw_td_resources(
2952 	ehci_state_t		*ehcip,
2953 	ehci_trans_wrapper_t	*tw)
2954 {
2955 	ehci_qtd_t		*qtd = NULL;
2956 	ehci_qtd_t		*temp_qtd = NULL;
2957 
2958 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2959 	    "ehci_free_tw_td_resources: tw = 0x%p", (void *)tw);
2960 
2961 	qtd = tw->tw_qtd_free_list;
2962 	while (qtd != NULL) {
2963 		/* Save the pointer to the next qtd before destroying it */
2964 		temp_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2965 		    Get_QTD(qtd->qtd_tw_next_qtd));
2966 		ehci_deallocate_qtd(ehcip, qtd);
2967 		qtd = temp_qtd;
2968 	}
2969 	tw->tw_qtd_free_list = NULL;
2970 }
2971 
2972 /*
2973  * Transfer Wrapper functions
2974  *
2975  * ehci_create_transfer_wrapper:
2976  *
2977  * Create a Transaction Wrapper (TW) and this involves the allocating of DMA
2978  * resources.
2979  */
2980 static ehci_trans_wrapper_t *
2981 ehci_create_transfer_wrapper(
2982 	ehci_state_t		*ehcip,
2983 	ehci_pipe_private_t	*pp,
2984 	size_t			length,
2985 	uint_t			usb_flags)
2986 {
2987 	ddi_device_acc_attr_t	dev_attr;
2988 	ddi_dma_attr_t		dma_attr;
2989 	int			result;
2990 	size_t			real_length;
2991 	ehci_trans_wrapper_t	*tw;
2992 	int			kmem_flag;
2993 	int			(*dmamem_wait)(caddr_t);
2994 
2995 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2996 	    "ehci_create_transfer_wrapper: length = 0x%lx flags = 0x%x",
2997 	    length, usb_flags);
2998 
2999 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3000 
3001 	/* SLEEP flag should not be used in interrupt context */
3002 	if (servicing_interrupt()) {
3003 		kmem_flag = KM_NOSLEEP;
3004 		dmamem_wait = DDI_DMA_DONTWAIT;
3005 	} else {
3006 		kmem_flag = KM_SLEEP;
3007 		dmamem_wait = DDI_DMA_SLEEP;
3008 	}
3009 
3010 	/* Allocate space for the transfer wrapper */
3011 	tw = kmem_zalloc(sizeof (ehci_trans_wrapper_t), kmem_flag);
3012 
3013 	if (tw == NULL) {
3014 		USB_DPRINTF_L2(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3015 		    "ehci_create_transfer_wrapper: kmem_zalloc failed");
3016 
3017 		return (NULL);
3018 	}
3019 
3020 	/* zero-length packet doesn't need to allocate dma memory */
3021 	if (length == 0) {
3022 
3023 		goto dmadone;
3024 	}
3025 
3026 	/* allow sg lists for transfer wrapper dma memory */
3027 	bcopy(&ehcip->ehci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t));
3028 	dma_attr.dma_attr_sgllen = EHCI_DMA_ATTR_TW_SGLLEN;
3029 	dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
3030 
3031 	/* Allocate the DMA handle */
3032 	result = ddi_dma_alloc_handle(ehcip->ehci_dip,
3033 	    &dma_attr, dmamem_wait, 0, &tw->tw_dmahandle);
3034 
3035 	if (result != DDI_SUCCESS) {
3036 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3037 		    "ehci_create_transfer_wrapper: Alloc handle failed");
3038 
3039 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3040 
3041 		return (NULL);
3042 	}
3043 
3044 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
3045 
3046 	/* no need for swapping the raw data */
3047 	dev_attr.devacc_attr_endian_flags  = DDI_NEVERSWAP_ACC;
3048 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
3049 
3050 	/* Allocate the memory */
3051 	result = ddi_dma_mem_alloc(tw->tw_dmahandle, length,
3052 	    &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait, NULL,
3053 	    (caddr_t *)&tw->tw_buf, &real_length, &tw->tw_accesshandle);
3054 
3055 	if (result != DDI_SUCCESS) {
3056 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3057 		    "ehci_create_transfer_wrapper: dma_mem_alloc fail");
3058 
3059 		ddi_dma_free_handle(&tw->tw_dmahandle);
3060 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3061 
3062 		return (NULL);
3063 	}
3064 
3065 	ASSERT(real_length >= length);
3066 
3067 	/* Bind the handle */
3068 	result = ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL,
3069 	    (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
3070 	    dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies);
3071 
3072 	if (result != DDI_DMA_MAPPED) {
3073 		ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
3074 
3075 		ddi_dma_mem_free(&tw->tw_accesshandle);
3076 		ddi_dma_free_handle(&tw->tw_dmahandle);
3077 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3078 
3079 		return (NULL);
3080 	}
3081 
3082 	tw->tw_cookie_idx = 0;
3083 	tw->tw_dma_offs = 0;
3084 
3085 dmadone:
3086 	/*
3087 	 * Only allow one wrapper to be added at a time. Insert the
3088 	 * new transaction wrapper into the list for this pipe.
3089 	 */
3090 	if (pp->pp_tw_head == NULL) {
3091 		pp->pp_tw_head = tw;
3092 		pp->pp_tw_tail = tw;
3093 	} else {
3094 		pp->pp_tw_tail->tw_next = tw;
3095 		pp->pp_tw_tail = tw;
3096 	}
3097 
3098 	/* Store the transfer length */
3099 	tw->tw_length = length;
3100 
3101 	/* Store a back pointer to the pipe private structure */
3102 	tw->tw_pipe_private = pp;
3103 
3104 	/* Store the transfer type - synchronous or asynchronous */
3105 	tw->tw_flags = usb_flags;
3106 
3107 	/* Get and Store 32bit ID */
3108 	tw->tw_id = EHCI_GET_ID((void *)tw);
3109 
3110 	ASSERT(tw->tw_id != NULL);
3111 
3112 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3113 	    "ehci_create_transfer_wrapper: tw = 0x%p, ncookies = %u",
3114 	    (void *)tw, tw->tw_ncookies);
3115 
3116 	return (tw);
3117 }
3118 
3119 
3120 /*
3121  * ehci_start_xfer_timer:
3122  *
3123  * Start the timer for the control, bulk and for one time interrupt
3124  * transfers.
3125  */
3126 /* ARGSUSED */
3127 static void
3128 ehci_start_xfer_timer(
3129 	ehci_state_t		*ehcip,
3130 	ehci_pipe_private_t	*pp,
3131 	ehci_trans_wrapper_t	*tw)
3132 {
3133 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3134 	    "ehci_start_xfer_timer: tw = 0x%p", (void *)tw);
3135 
3136 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3137 
3138 	/*
3139 	 * The timeout handling is done only for control, bulk and for
3140 	 * one time Interrupt transfers.
3141 	 *
3142 	 * NOTE: If timeout is zero; Assume infinite timeout and don't
3143 	 * insert this transfer on the timeout list.
3144 	 */
3145 	if (tw->tw_timeout) {
3146 		/*
3147 		 * Add this transfer wrapper to the head of the pipe's
3148 		 * tw timeout list.
3149 		 */
3150 		if (pp->pp_timeout_list) {
3151 			tw->tw_timeout_next = pp->pp_timeout_list;
3152 		}
3153 
3154 		pp->pp_timeout_list = tw;
3155 		ehci_start_timer(ehcip, pp);
3156 	}
3157 }
3158 
3159 
3160 /*
3161  * ehci_stop_xfer_timer:
3162  *
3163  * Start the timer for the control, bulk and for one time interrupt
3164  * transfers.
3165  */
3166 void
3167 ehci_stop_xfer_timer(
3168 	ehci_state_t		*ehcip,
3169 	ehci_trans_wrapper_t	*tw,
3170 	uint_t			flag)
3171 {
3172 	ehci_pipe_private_t	*pp;
3173 	timeout_id_t		timer_id;
3174 
3175 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3176 	    "ehci_stop_xfer_timer: tw = 0x%p", (void *)tw);
3177 
3178 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3179 
3180 	/* Obtain the pipe private structure */
3181 	pp = tw->tw_pipe_private;
3182 
3183 	/* check if the timeout tw list is empty */
3184 	if (pp->pp_timeout_list == NULL) {
3185 
3186 		return;
3187 	}
3188 
3189 	switch (flag) {
3190 	case EHCI_REMOVE_XFER_IFLAST:
3191 		if (tw->tw_qtd_head != tw->tw_qtd_tail) {
3192 			break;
3193 		}
3194 
3195 		/* FALLTHRU */
3196 	case EHCI_REMOVE_XFER_ALWAYS:
3197 		ehci_remove_tw_from_timeout_list(ehcip, tw);
3198 
3199 		if ((pp->pp_timeout_list == NULL) &&
3200 		    (pp->pp_timer_id)) {
3201 
3202 			timer_id = pp->pp_timer_id;
3203 
3204 			/* Reset the timer id to zero */
3205 			pp->pp_timer_id = 0;
3206 
3207 			mutex_exit(&ehcip->ehci_int_mutex);
3208 
3209 			(void) untimeout(timer_id);
3210 
3211 			mutex_enter(&ehcip->ehci_int_mutex);
3212 		}
3213 		break;
3214 	default:
3215 		break;
3216 	}
3217 }
3218 
3219 
3220 /*
3221  * ehci_xfer_timeout_handler:
3222  *
3223  * Control or bulk transfer timeout handler.
3224  */
3225 static void
3226 ehci_xfer_timeout_handler(void *arg)
3227 {
3228 	usba_pipe_handle_data_t	*ph = (usba_pipe_handle_data_t *)arg;
3229 	ehci_state_t		*ehcip = ehci_obtain_state(
3230 	    ph->p_usba_device->usb_root_hub_dip);
3231 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3232 	ehci_trans_wrapper_t	*tw, *next;
3233 	ehci_trans_wrapper_t	*expire_xfer_list = NULL;
3234 	ehci_qtd_t		*qtd;
3235 
3236 	USB_DPRINTF_L4(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3237 	    "ehci_xfer_timeout_handler: ehcip = 0x%p, ph = 0x%p",
3238 	    (void *)ehcip, (void *)ph);
3239 
3240 	mutex_enter(&ehcip->ehci_int_mutex);
3241 
3242 	/*
3243 	 * Check whether still timeout handler is valid.
3244 	 */
3245 	if (pp->pp_timer_id != 0) {
3246 
3247 		/* Reset the timer id to zero */
3248 		pp->pp_timer_id = 0;
3249 	} else {
3250 		mutex_exit(&ehcip->ehci_int_mutex);
3251 
3252 		return;
3253 	}
3254 
3255 	/* Get the transfer timeout list head */
3256 	tw = pp->pp_timeout_list;
3257 
3258 	while (tw) {
3259 
3260 		/* Get the transfer on the timeout list */
3261 		next = tw->tw_timeout_next;
3262 
3263 		tw->tw_timeout--;
3264 
3265 		if (tw->tw_timeout <= 0) {
3266 
3267 			/* remove the tw from the timeout list */
3268 			ehci_remove_tw_from_timeout_list(ehcip, tw);
3269 
3270 			/* remove QTDs from active QTD list */
3271 			qtd = tw->tw_qtd_head;
3272 			while (qtd) {
3273 				ehci_remove_qtd_from_active_qtd_list(
3274 				    ehcip, qtd);
3275 
3276 				/* Get the next QTD from the wrapper */
3277 				qtd = ehci_qtd_iommu_to_cpu(ehcip,
3278 				    Get_QTD(qtd->qtd_tw_next_qtd));
3279 			}
3280 
3281 			/*
3282 			 * Preserve the order to the requests
3283 			 * started time sequence.
3284 			 */
3285 			tw->tw_timeout_next = expire_xfer_list;
3286 			expire_xfer_list = tw;
3287 		}
3288 
3289 		tw = next;
3290 	}
3291 
3292 	/*
3293 	 * The timer should be started before the callbacks.
3294 	 * There is always a chance that ehci interrupts come
3295 	 * in when we release the mutex while calling the tw back.
3296 	 * To keep an accurate timeout it should be restarted
3297 	 * as soon as possible.
3298 	 */
3299 	ehci_start_timer(ehcip, pp);
3300 
3301 	/* Get the expired transfer timeout list head */
3302 	tw = expire_xfer_list;
3303 
3304 	while (tw) {
3305 
3306 		/* Get the next tw on the expired transfer timeout list */
3307 		next = tw->tw_timeout_next;
3308 
3309 		/*
3310 		 * The error handle routine will release the mutex when
3311 		 * calling back to USBA. But this will not cause any race.
3312 		 * We do the callback and are relying on ehci_pipe_cleanup()
3313 		 * to halt the queue head and clean up since we should not
3314 		 * block in timeout context.
3315 		 */
3316 		ehci_handle_error(ehcip, tw->tw_qtd_head, USB_CR_TIMEOUT);
3317 
3318 		tw = next;
3319 	}
3320 	mutex_exit(&ehcip->ehci_int_mutex);
3321 }
3322 
3323 
3324 /*
3325  * ehci_remove_tw_from_timeout_list:
3326  *
3327  * Remove Control or bulk transfer from the timeout list.
3328  */
3329 static void
3330 ehci_remove_tw_from_timeout_list(
3331 	ehci_state_t		*ehcip,
3332 	ehci_trans_wrapper_t	*tw)
3333 {
3334 	ehci_pipe_private_t	*pp;
3335 	ehci_trans_wrapper_t	*prev, *next;
3336 
3337 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3338 	    "ehci_remove_tw_from_timeout_list: tw = 0x%p", (void *)tw);
3339 
3340 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3341 
3342 	/* Obtain the pipe private structure */
3343 	pp = tw->tw_pipe_private;
3344 
3345 	if (pp->pp_timeout_list) {
3346 		if (pp->pp_timeout_list == tw) {
3347 			pp->pp_timeout_list = tw->tw_timeout_next;
3348 
3349 			tw->tw_timeout_next = NULL;
3350 		} else {
3351 			prev = pp->pp_timeout_list;
3352 			next = prev->tw_timeout_next;
3353 
3354 			while (next && (next != tw)) {
3355 				prev = next;
3356 				next = next->tw_timeout_next;
3357 			}
3358 
3359 			if (next == tw) {
3360 				prev->tw_timeout_next =
3361 				    next->tw_timeout_next;
3362 				tw->tw_timeout_next = NULL;
3363 			}
3364 		}
3365 	}
3366 }
3367 
3368 
3369 /*
3370  * ehci_start_timer:
3371  *
3372  * Start the pipe's timer
3373  */
3374 static void
3375 ehci_start_timer(
3376 	ehci_state_t		*ehcip,
3377 	ehci_pipe_private_t	*pp)
3378 {
3379 	USB_DPRINTF_L4(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3380 	    "ehci_start_timer: ehcip = 0x%p, pp = 0x%p",
3381 	    (void *)ehcip, (void *)pp);
3382 
3383 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3384 
3385 	/*
3386 	 * Start the pipe's timer only if currently timer is not
3387 	 * running and if there are any transfers on the timeout
3388 	 * list. This timer will be per pipe.
3389 	 */
3390 	if ((!pp->pp_timer_id) && (pp->pp_timeout_list)) {
3391 		pp->pp_timer_id = timeout(ehci_xfer_timeout_handler,
3392 		    (void *)(pp->pp_pipe_handle), drv_usectohz(1000000));
3393 	}
3394 }
3395 
3396 /*
3397  * ehci_deallocate_tw:
3398  *
3399  * Deallocate of a Transaction Wrapper (TW) and this involves the freeing of
3400  * of DMA resources.
3401  */
3402 void
3403 ehci_deallocate_tw(
3404 	ehci_state_t		*ehcip,
3405 	ehci_pipe_private_t	*pp,
3406 	ehci_trans_wrapper_t	*tw)
3407 {
3408 	ehci_trans_wrapper_t	*prev, *next;
3409 
3410 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3411 	    "ehci_deallocate_tw: tw = 0x%p", (void *)tw);
3412 
3413 	/*
3414 	 * If the transfer wrapper has no Host Controller (HC)
3415 	 * Transfer Descriptors (QTD) associated with it,  then
3416 	 * remove the transfer wrapper.
3417 	 */
3418 	if (tw->tw_qtd_head) {
3419 		ASSERT(tw->tw_qtd_tail != NULL);
3420 
3421 		return;
3422 	}
3423 
3424 	ASSERT(tw->tw_qtd_tail == NULL);
3425 
3426 	/* Make sure we return all the unused qtd's to the pool as well */
3427 	ehci_free_tw_td_resources(ehcip, tw);
3428 
3429 	/*
3430 	 * If pp->pp_tw_head and pp->pp_tw_tail are pointing to
3431 	 * given TW then set the head and  tail  equal to NULL.
3432 	 * Otherwise search for this TW in the linked TW's list
3433 	 * and then remove this TW from the list.
3434 	 */
3435 	if (pp->pp_tw_head == tw) {
3436 		if (pp->pp_tw_tail == tw) {
3437 			pp->pp_tw_head = NULL;
3438 			pp->pp_tw_tail = NULL;
3439 		} else {
3440 			pp->pp_tw_head = tw->tw_next;
3441 		}
3442 	} else {
3443 		prev = pp->pp_tw_head;
3444 		next = prev->tw_next;
3445 
3446 		while (next && (next != tw)) {
3447 			prev = next;
3448 			next = next->tw_next;
3449 		}
3450 
3451 		if (next == tw) {
3452 			prev->tw_next = next->tw_next;
3453 
3454 			if (pp->pp_tw_tail == tw) {
3455 				pp->pp_tw_tail = prev;
3456 			}
3457 		}
3458 	}
3459 
3460 	/*
3461 	 * Make sure that, this TW has been removed
3462 	 * from the timeout list.
3463 	 */
3464 	ehci_remove_tw_from_timeout_list(ehcip, tw);
3465 
3466 	/* Deallocate this TW */
3467 	ehci_free_tw(ehcip, pp, tw);
3468 }
3469 
3470 
3471 /*
3472  * ehci_free_dma_resources:
3473  *
3474  * Free dma resources of a Transfer Wrapper (TW) and also free the TW.
3475  *
3476  * NOTE: This function is also called from POLLED MODE.
3477  */
3478 void
3479 ehci_free_dma_resources(
3480 	ehci_state_t		*ehcip,
3481 	usba_pipe_handle_data_t	*ph)
3482 {
3483 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3484 	ehci_trans_wrapper_t	*head_tw = pp->pp_tw_head;
3485 	ehci_trans_wrapper_t	*next_tw, *tw;
3486 
3487 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3488 	    "ehci_free_dma_resources: ph = 0x%p", (void *)ph);
3489 
3490 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3491 
3492 	/* Process the Transfer Wrappers */
3493 	next_tw = head_tw;
3494 	while (next_tw) {
3495 		tw = next_tw;
3496 		next_tw = tw->tw_next;
3497 
3498 		USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3499 		    "ehci_free_dma_resources: Free TW = 0x%p", (void *)tw);
3500 
3501 		ehci_free_tw(ehcip, pp, tw);
3502 	}
3503 
3504 	/* Adjust the head and tail pointers */
3505 	pp->pp_tw_head = NULL;
3506 	pp->pp_tw_tail = NULL;
3507 }
3508 
3509 
3510 /*
3511  * ehci_free_tw:
3512  *
3513  * Free the Transfer Wrapper (TW).
3514  */
3515 /*ARGSUSED*/
3516 static void
3517 ehci_free_tw(
3518 	ehci_state_t		*ehcip,
3519 	ehci_pipe_private_t	*pp,
3520 	ehci_trans_wrapper_t	*tw)
3521 {
3522 	int	rval;
3523 
3524 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3525 	    "ehci_free_tw: tw = 0x%p", (void *)tw);
3526 
3527 	ASSERT(tw != NULL);
3528 	ASSERT(tw->tw_id != NULL);
3529 
3530 	/* Free 32bit ID */
3531 	EHCI_FREE_ID((uint32_t)tw->tw_id);
3532 
3533 	if (tw->tw_dmahandle != NULL) {
3534 		rval = ddi_dma_unbind_handle(tw->tw_dmahandle);
3535 		ASSERT(rval == DDI_SUCCESS);
3536 
3537 		ddi_dma_mem_free(&tw->tw_accesshandle);
3538 		ddi_dma_free_handle(&tw->tw_dmahandle);
3539 	}
3540 
3541 	/* Free transfer wrapper */
3542 	kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3543 }
3544 
3545 
3546 /*
3547  * Miscellaneous functions
3548  */
3549 
3550 /*
3551  * ehci_allocate_intr_in_resource
3552  *
3553  * Allocate interrupt request structure for the interrupt IN transfer.
3554  */
3555 /*ARGSUSED*/
3556 int
3557 ehci_allocate_intr_in_resource(
3558 	ehci_state_t		*ehcip,
3559 	ehci_pipe_private_t	*pp,
3560 	ehci_trans_wrapper_t	*tw,
3561 	usb_flags_t		flags)
3562 {
3563 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3564 	usb_intr_req_t		*curr_intr_reqp;
3565 	usb_opaque_t		client_periodic_in_reqp;
3566 	size_t			length = 0;
3567 
3568 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3569 	    "ehci_allocate_intr_in_resource:"
3570 	    "pp = 0x%p tw = 0x%p flags = 0x%x", (void *)pp, (void *)tw, flags);
3571 
3572 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3573 	ASSERT(tw->tw_curr_xfer_reqp == NULL);
3574 
3575 	/* Get the client periodic in request pointer */
3576 	client_periodic_in_reqp = pp->pp_client_periodic_in_reqp;
3577 
3578 	/*
3579 	 * If it a periodic IN request and periodic request is NULL,
3580 	 * allocate corresponding usb periodic IN request for the
3581 	 * current periodic polling request and copy the information
3582 	 * from the saved periodic request structure.
3583 	 */
3584 	if (client_periodic_in_reqp) {
3585 
3586 		/* Get the interrupt transfer length */
3587 		length = ((usb_intr_req_t *)
3588 		    client_periodic_in_reqp)->intr_len;
3589 
3590 		curr_intr_reqp = usba_hcdi_dup_intr_req(ph->p_dip,
3591 		    (usb_intr_req_t *)client_periodic_in_reqp, length, flags);
3592 	} else {
3593 		curr_intr_reqp = usb_alloc_intr_req(ph->p_dip, length, flags);
3594 	}
3595 
3596 	if (curr_intr_reqp == NULL) {
3597 
3598 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3599 		    "ehci_allocate_intr_in_resource: Interrupt"
3600 		    "request structure allocation failed");
3601 
3602 		return (USB_NO_RESOURCES);
3603 	}
3604 
3605 	/* For polled mode */
3606 	if (client_periodic_in_reqp == NULL) {
3607 		curr_intr_reqp->intr_attributes = USB_ATTRS_SHORT_XFER_OK;
3608 		curr_intr_reqp->intr_len = ph->p_ep.wMaxPacketSize;
3609 	} else {
3610 		/* Check and save the timeout value */
3611 		tw->tw_timeout = (curr_intr_reqp->intr_attributes &
3612 		    USB_ATTRS_ONE_XFER) ? curr_intr_reqp->intr_timeout: 0;
3613 	}
3614 
3615 	tw->tw_curr_xfer_reqp = (usb_opaque_t)curr_intr_reqp;
3616 	tw->tw_length = curr_intr_reqp->intr_len;
3617 
3618 	mutex_enter(&ph->p_mutex);
3619 	ph->p_req_count++;
3620 	mutex_exit(&ph->p_mutex);
3621 
3622 	pp->pp_state = EHCI_PIPE_STATE_ACTIVE;
3623 
3624 	return (USB_SUCCESS);
3625 }
3626 
3627 /*
3628  * ehci_pipe_cleanup
3629  *
3630  * Cleanup ehci pipe.
3631  */
3632 void
3633 ehci_pipe_cleanup(
3634 	ehci_state_t		*ehcip,
3635 	usba_pipe_handle_data_t	*ph)
3636 {
3637 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3638 	uint_t			pipe_state = pp->pp_state;
3639 	usb_cr_t		completion_reason;
3640 	usb_ep_descr_t		*eptd = &ph->p_ep;
3641 
3642 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3643 	    "ehci_pipe_cleanup: ph = 0x%p", (void *)ph);
3644 
3645 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3646 
3647 	if (EHCI_ISOC_ENDPOINT(eptd)) {
3648 		ehci_isoc_pipe_cleanup(ehcip, ph);
3649 
3650 		return;
3651 	}
3652 
3653 	ASSERT(!servicing_interrupt());
3654 
3655 	/*
3656 	 * Set the QH's status to Halt condition.
3657 	 * If another thread is halting this function will automatically
3658 	 * wait. If a pipe close happens at this time
3659 	 * we will be in lots of trouble.
3660 	 * If we are in an interrupt thread, don't halt, because it may
3661 	 * do a wait_for_sof.
3662 	 */
3663 	ehci_modify_qh_status_bit(ehcip, pp, SET_HALT);
3664 
3665 	/*
3666 	 * Wait for processing all completed transfers and
3667 	 * to send results to upstream.
3668 	 */
3669 	ehci_wait_for_transfers_completion(ehcip, pp);
3670 
3671 	/* Save the data toggle information */
3672 	ehci_save_data_toggle(ehcip, ph);
3673 
3674 	/*
3675 	 * Traverse the list of QTDs for this pipe using transfer
3676 	 * wrapper. Process these QTDs depending on their status.
3677 	 * And stop the timer of this pipe.
3678 	 */
3679 	ehci_traverse_qtds(ehcip, ph);
3680 
3681 	/* Make sure the timer is not running */
3682 	ASSERT(pp->pp_timer_id == 0);
3683 
3684 	/* Do callbacks for all unfinished requests */
3685 	ehci_handle_outstanding_requests(ehcip, pp);
3686 
3687 	/* Free DMA resources */
3688 	ehci_free_dma_resources(ehcip, ph);
3689 
3690 	switch (pipe_state) {
3691 	case EHCI_PIPE_STATE_CLOSE:
3692 		completion_reason = USB_CR_PIPE_CLOSING;
3693 		break;
3694 	case EHCI_PIPE_STATE_RESET:
3695 	case EHCI_PIPE_STATE_STOP_POLLING:
3696 		/* Set completion reason */
3697 		completion_reason = (pipe_state ==
3698 		    EHCI_PIPE_STATE_RESET) ?
3699 		    USB_CR_PIPE_RESET: USB_CR_STOPPED_POLLING;
3700 
3701 		/* Restore the data toggle information */
3702 		ehci_restore_data_toggle(ehcip, ph);
3703 
3704 		/*
3705 		 * Clear the halt bit to restart all the
3706 		 * transactions on this pipe.
3707 		 */
3708 		ehci_modify_qh_status_bit(ehcip, pp, CLEAR_HALT);
3709 
3710 		/* Set pipe state to idle */
3711 		pp->pp_state = EHCI_PIPE_STATE_IDLE;
3712 
3713 		break;
3714 	}
3715 
3716 	/*
3717 	 * Do the callback for the original client
3718 	 * periodic IN request.
3719 	 */
3720 	if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
3721 	    ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) ==
3722 	    USB_EP_DIR_IN)) {
3723 
3724 		ehci_do_client_periodic_in_req_callback(
3725 		    ehcip, pp, completion_reason);
3726 	}
3727 }
3728 
3729 
3730 /*
3731  * ehci_wait_for_transfers_completion:
3732  *
3733  * Wait for processing all completed transfers and to send results
3734  * to upstream.
3735  */
3736 static void
3737 ehci_wait_for_transfers_completion(
3738 	ehci_state_t		*ehcip,
3739 	ehci_pipe_private_t	*pp)
3740 {
3741 	ehci_trans_wrapper_t	*next_tw = pp->pp_tw_head;
3742 	clock_t			xfer_cmpl_time_wait;
3743 	ehci_qtd_t		*qtd;
3744 
3745 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3746 	    ehcip->ehci_log_hdl,
3747 	    "ehci_wait_for_transfers_completion: pp = 0x%p", (void *)pp);
3748 
3749 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3750 
3751 	if ((ehci_state_is_operational(ehcip)) != USB_SUCCESS) {
3752 
3753 		return;
3754 	}
3755 
3756 	pp->pp_count_done_qtds = 0;
3757 
3758 	/* Process the transfer wrappers for this pipe */
3759 	while (next_tw) {
3760 		qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
3761 
3762 		/*
3763 		 * Walk through each QTD for this transfer wrapper.
3764 		 * If a QTD still exists, then it is either on done
3765 		 * list or on the QH's list.
3766 		 */
3767 		while (qtd) {
3768 			if (!(Get_QTD(qtd->qtd_ctrl) &
3769 			    EHCI_QTD_CTRL_ACTIVE_XACT)) {
3770 				pp->pp_count_done_qtds++;
3771 			}
3772 
3773 			qtd = ehci_qtd_iommu_to_cpu(ehcip,
3774 			    Get_QTD(qtd->qtd_tw_next_qtd));
3775 		}
3776 
3777 		next_tw = next_tw->tw_next;
3778 	}
3779 
3780 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3781 	    "ehci_wait_for_transfers_completion: count_done_qtds = 0x%x",
3782 	    pp->pp_count_done_qtds);
3783 
3784 	if (!pp->pp_count_done_qtds) {
3785 
3786 		return;
3787 	}
3788 
3789 	/* Get the number of clock ticks to wait */
3790 	xfer_cmpl_time_wait = drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000);
3791 
3792 	(void) cv_timedwait(&pp->pp_xfer_cmpl_cv,
3793 	    &ehcip->ehci_int_mutex,
3794 	    ddi_get_lbolt() + xfer_cmpl_time_wait);
3795 
3796 	if (pp->pp_count_done_qtds) {
3797 
3798 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3799 		    "ehci_wait_for_transfers_completion:"
3800 		    "No transfers completion confirmation received");
3801 	}
3802 }
3803 
3804 /*
3805  * ehci_check_for_transfers_completion:
3806  *
3807  * Check whether anybody is waiting for transfers completion event. If so, send
3808  * this event and also stop initiating any new transfers on this pipe.
3809  */
3810 void
3811 ehci_check_for_transfers_completion(
3812 	ehci_state_t		*ehcip,
3813 	ehci_pipe_private_t	*pp)
3814 {
3815 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3816 	    ehcip->ehci_log_hdl,
3817 	    "ehci_check_for_transfers_completion: pp = 0x%p", (void *)pp);
3818 
3819 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3820 
3821 	if ((pp->pp_state == EHCI_PIPE_STATE_STOP_POLLING) &&
3822 	    (pp->pp_error == USB_CR_NO_RESOURCES) &&
3823 	    (pp->pp_cur_periodic_req_cnt == 0)) {
3824 
3825 		/* Reset pipe error to zero */
3826 		pp->pp_error = 0;
3827 
3828 		/* Do callback for original request */
3829 		ehci_do_client_periodic_in_req_callback(
3830 		    ehcip, pp, USB_CR_NO_RESOURCES);
3831 	}
3832 
3833 	if (pp->pp_count_done_qtds) {
3834 
3835 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3836 		    "ehci_check_for_transfers_completion:"
3837 		    "count_done_qtds = 0x%x", pp->pp_count_done_qtds);
3838 
3839 		/* Decrement the done qtd count */
3840 		pp->pp_count_done_qtds--;
3841 
3842 		if (!pp->pp_count_done_qtds) {
3843 
3844 			USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3845 			    "ehci_check_for_transfers_completion:"
3846 			    "Sent transfers completion event pp = 0x%p",
3847 			    (void *)pp);
3848 
3849 			/* Send the transfer completion signal */
3850 			cv_signal(&pp->pp_xfer_cmpl_cv);
3851 		}
3852 	}
3853 }
3854 
3855 
3856 /*
3857  * ehci_save_data_toggle:
3858  *
3859  * Save the data toggle information.
3860  */
3861 static void
3862 ehci_save_data_toggle(
3863 	ehci_state_t		*ehcip,
3864 	usba_pipe_handle_data_t	*ph)
3865 {
3866 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3867 	usb_ep_descr_t		*eptd = &ph->p_ep;
3868 	uint_t			data_toggle;
3869 	usb_cr_t		error = pp->pp_error;
3870 	ehci_qh_t		*qh = pp->pp_qh;
3871 
3872 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3873 	    ehcip->ehci_log_hdl,
3874 	    "ehci_save_data_toggle: ph = 0x%p", (void *)ph);
3875 
3876 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3877 
3878 	/* Reset the pipe error value */
3879 	pp->pp_error = USB_CR_OK;
3880 
3881 	/* Return immediately if it is a control pipe */
3882 	if ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
3883 	    USB_EP_ATTR_CONTROL) {
3884 
3885 		return;
3886 	}
3887 
3888 	/* Get the data toggle information from the endpoint (QH) */
3889 	data_toggle = (Get_QH(qh->qh_status) &
3890 	    EHCI_QH_STS_DATA_TOGGLE)? DATA1:DATA0;
3891 
3892 	/*
3893 	 * If error is STALL, then, set
3894 	 * data toggle to zero.
3895 	 */
3896 	if (error == USB_CR_STALL) {
3897 		data_toggle = DATA0;
3898 	}
3899 
3900 	/*
3901 	 * Save the data toggle information
3902 	 * in the usb device structure.
3903 	 */
3904 	mutex_enter(&ph->p_mutex);
3905 	usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3906 	    data_toggle);
3907 	mutex_exit(&ph->p_mutex);
3908 }
3909 
3910 
3911 /*
3912  * ehci_restore_data_toggle:
3913  *
3914  * Restore the data toggle information.
3915  */
3916 void
3917 ehci_restore_data_toggle(
3918 	ehci_state_t		*ehcip,
3919 	usba_pipe_handle_data_t	*ph)
3920 {
3921 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3922 	usb_ep_descr_t		*eptd = &ph->p_ep;
3923 	uint_t			data_toggle = 0;
3924 
3925 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3926 	    ehcip->ehci_log_hdl,
3927 	    "ehci_restore_data_toggle: ph = 0x%p", (void *)ph);
3928 
3929 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3930 
3931 	/* Return immediately if it is a control pipe */
3932 	if ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
3933 	    USB_EP_ATTR_CONTROL) {
3934 
3935 		return;
3936 	}
3937 
3938 	mutex_enter(&ph->p_mutex);
3939 
3940 	data_toggle = usba_hcdi_get_data_toggle(ph->p_usba_device,
3941 	    ph->p_ep.bEndpointAddress);
3942 	usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3943 	    0);
3944 
3945 	mutex_exit(&ph->p_mutex);
3946 
3947 	/*
3948 	 * Restore the data toggle bit depending on the
3949 	 * previous data toggle information.
3950 	 */
3951 	if (data_toggle) {
3952 		Set_QH(pp->pp_qh->qh_status,
3953 		    Get_QH(pp->pp_qh->qh_status) | EHCI_QH_STS_DATA_TOGGLE);
3954 	} else {
3955 		Set_QH(pp->pp_qh->qh_status,
3956 		    Get_QH(pp->pp_qh->qh_status) & (~EHCI_QH_STS_DATA_TOGGLE));
3957 	}
3958 }
3959 
3960 
3961 /*
3962  * ehci_handle_outstanding_requests
3963  *
3964  * Deallocate interrupt request structure for the interrupt IN transfer.
3965  * Do the callbacks for all unfinished requests.
3966  *
3967  * NOTE: This function is also called from POLLED MODE.
3968  */
3969 void
3970 ehci_handle_outstanding_requests(
3971 	ehci_state_t		*ehcip,
3972 	ehci_pipe_private_t	*pp)
3973 {
3974 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3975 	usb_ep_descr_t		*eptd = &ph->p_ep;
3976 	ehci_trans_wrapper_t	*curr_tw;
3977 	ehci_trans_wrapper_t	*next_tw;
3978 	usb_opaque_t		curr_xfer_reqp;
3979 
3980 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3981 	    ehcip->ehci_log_hdl,
3982 	    "ehci_handle_outstanding_requests: pp = 0x%p", (void *)pp);
3983 
3984 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3985 
3986 	/* Deallocate all pre-allocated interrupt requests */
3987 	next_tw = pp->pp_tw_head;
3988 
3989 	while (next_tw) {
3990 		curr_tw = next_tw;
3991 		next_tw = curr_tw->tw_next;
3992 
3993 		curr_xfer_reqp = curr_tw->tw_curr_xfer_reqp;
3994 
3995 		/* Deallocate current interrupt request */
3996 		if (curr_xfer_reqp) {
3997 
3998 			if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
3999 			    (curr_tw->tw_direction == EHCI_QTD_CTRL_IN_PID)) {
4000 
4001 				/* Decrement periodic in request count */
4002 				pp->pp_cur_periodic_req_cnt--;
4003 
4004 				ehci_deallocate_intr_in_resource(
4005 				    ehcip, pp, curr_tw);
4006 			} else {
4007 				ehci_hcdi_callback(ph, curr_tw, USB_CR_FLUSHED);
4008 			}
4009 		}
4010 	}
4011 }
4012 
4013 
4014 /*
4015  * ehci_deallocate_intr_in_resource
4016  *
4017  * Deallocate interrupt request structure for the interrupt IN transfer.
4018  */
4019 void
4020 ehci_deallocate_intr_in_resource(
4021 	ehci_state_t		*ehcip,
4022 	ehci_pipe_private_t	*pp,
4023 	ehci_trans_wrapper_t	*tw)
4024 {
4025 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
4026 	uchar_t			ep_attr = ph->p_ep.bmAttributes;
4027 	usb_opaque_t		curr_xfer_reqp;
4028 
4029 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
4030 	    ehcip->ehci_log_hdl,
4031 	    "ehci_deallocate_intr_in_resource: "
4032 	    "pp = 0x%p tw = 0x%p", (void *)pp, (void *)tw);
4033 
4034 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4035 	ASSERT((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR);
4036 
4037 	curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4038 
4039 	/* Check the current periodic in request pointer */
4040 	if (curr_xfer_reqp) {
4041 
4042 		tw->tw_curr_xfer_reqp = NULL;
4043 
4044 		mutex_enter(&ph->p_mutex);
4045 		ph->p_req_count--;
4046 		mutex_exit(&ph->p_mutex);
4047 
4048 		/* Free pre-allocated interrupt requests */
4049 		usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp);
4050 
4051 		/* Set periodic in pipe state to idle */
4052 		pp->pp_state = EHCI_PIPE_STATE_IDLE;
4053 	}
4054 }
4055 
4056 
4057 /*
4058  * ehci_do_client_periodic_in_req_callback
4059  *
4060  * Do callback for the original client periodic IN request.
4061  */
4062 void
4063 ehci_do_client_periodic_in_req_callback(
4064 	ehci_state_t		*ehcip,
4065 	ehci_pipe_private_t	*pp,
4066 	usb_cr_t		completion_reason)
4067 {
4068 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
4069 	usb_ep_descr_t		*eptd = &ph->p_ep;
4070 
4071 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
4072 	    ehcip->ehci_log_hdl,
4073 	    "ehci_do_client_periodic_in_req_callback: "
4074 	    "pp = 0x%p cc = 0x%x", (void *)pp, completion_reason);
4075 
4076 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4077 
4078 	/*
4079 	 * Check for Interrupt/Isochronous IN, whether we need to do
4080 	 * callback for the original client's periodic IN request.
4081 	 */
4082 	if (pp->pp_client_periodic_in_reqp) {
4083 		ASSERT(pp->pp_cur_periodic_req_cnt == 0);
4084 		if (EHCI_ISOC_ENDPOINT(eptd)) {
4085 			ehci_hcdi_isoc_callback(ph, NULL, completion_reason);
4086 		} else {
4087 			ehci_hcdi_callback(ph, NULL, completion_reason);
4088 		}
4089 	}
4090 }
4091 
4092 
4093 /*
4094  * ehci_hcdi_callback()
4095  *
4096  * Convenience wrapper around usba_hcdi_cb() other than root hub.
4097  */
4098 void
4099 ehci_hcdi_callback(
4100 	usba_pipe_handle_data_t	*ph,
4101 	ehci_trans_wrapper_t	*tw,
4102 	usb_cr_t		completion_reason)
4103 {
4104 	ehci_state_t		*ehcip = ehci_obtain_state(
4105 	    ph->p_usba_device->usb_root_hub_dip);
4106 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
4107 	usb_opaque_t		curr_xfer_reqp;
4108 	uint_t			pipe_state = 0;
4109 
4110 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
4111 	    "ehci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x",
4112 	    (void *)ph, (void *)tw, completion_reason);
4113 
4114 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4115 
4116 	/* Set the pipe state as per completion reason */
4117 	switch (completion_reason) {
4118 	case USB_CR_OK:
4119 		pipe_state = pp->pp_state;
4120 		break;
4121 	case USB_CR_NO_RESOURCES:
4122 	case USB_CR_NOT_SUPPORTED:
4123 	case USB_CR_PIPE_RESET:
4124 	case USB_CR_STOPPED_POLLING:
4125 		pipe_state = EHCI_PIPE_STATE_IDLE;
4126 		break;
4127 	case USB_CR_PIPE_CLOSING:
4128 		break;
4129 	default:
4130 		/* Set the pipe state to error */
4131 		pipe_state = EHCI_PIPE_STATE_ERROR;
4132 		pp->pp_error = completion_reason;
4133 		break;
4134 
4135 	}
4136 
4137 	pp->pp_state = pipe_state;
4138 
4139 	if (tw && tw->tw_curr_xfer_reqp) {
4140 		curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4141 		tw->tw_curr_xfer_reqp = NULL;
4142 	} else {
4143 		ASSERT(pp->pp_client_periodic_in_reqp != NULL);
4144 
4145 		curr_xfer_reqp = pp->pp_client_periodic_in_reqp;
4146 		pp->pp_client_periodic_in_reqp = NULL;
4147 	}
4148 
4149 	ASSERT(curr_xfer_reqp != NULL);
4150 
4151 	mutex_exit(&ehcip->ehci_int_mutex);
4152 
4153 	usba_hcdi_cb(ph, curr_xfer_reqp, completion_reason);
4154 
4155 	mutex_enter(&ehcip->ehci_int_mutex);
4156 }
4157