1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2018, Joyent, Inc.
25 */
26
27 /*
28 * EHCI Host Controller Driver (EHCI)
29 *
30 * The EHCI driver is a software driver which interfaces to the Universal
31 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
32 * the Host Controller is defined by the EHCI Host Controller Interface.
33 *
34 * This module contains the main EHCI driver code which handles all USB
35 * transfers, bandwidth allocations and other general functionalities.
36 */
37
38 #include <sys/usb/hcd/ehci/ehcid.h>
39 #include <sys/usb/hcd/ehci/ehci_isoch.h>
40 #include <sys/usb/hcd/ehci/ehci_xfer.h>
41
42 /*
43 * EHCI MSI tunable:
44 *
45 * By default MSI is enabled on all supported platforms except for the
46 * EHCI controller of ULI1575 South bridge.
47 */
48 boolean_t ehci_enable_msi = B_TRUE;
49
50 /* Pointer to the state structure */
51 extern void *ehci_statep;
52
53 extern void ehci_handle_endpoint_reclaimation(ehci_state_t *);
54
55 extern uint_t ehci_vt62x2_workaround;
56 extern int force_ehci_off;
57
58 /* Adjustable variables for the size of the pools */
59 int ehci_qh_pool_size = EHCI_QH_POOL_SIZE;
60 int ehci_qtd_pool_size = EHCI_QTD_POOL_SIZE;
61
62 /*
63 * Initialize the values which the order of 32ms intr qh are executed
64 * by the host controller in the lattice tree.
65 */
66 static uchar_t ehci_index[EHCI_NUM_INTR_QH_LISTS] =
67 {0x00, 0x10, 0x08, 0x18,
68 0x04, 0x14, 0x0c, 0x1c,
69 0x02, 0x12, 0x0a, 0x1a,
70 0x06, 0x16, 0x0e, 0x1e,
71 0x01, 0x11, 0x09, 0x19,
72 0x05, 0x15, 0x0d, 0x1d,
73 0x03, 0x13, 0x0b, 0x1b,
74 0x07, 0x17, 0x0f, 0x1f};
75
76 /*
77 * Initialize the values which are used to calculate start split mask
78 * for the low/full/high speed interrupt and isochronous endpoints.
79 */
80 static uint_t ehci_start_split_mask[15] = {
81 /*
82 * For high/full/low speed usb devices. For high speed
83 * device with polling interval greater than or equal
84 * to 8us (125us).
85 */
86 0x01, /* 00000001 */
87 0x02, /* 00000010 */
88 0x04, /* 00000100 */
89 0x08, /* 00001000 */
90 0x10, /* 00010000 */
91 0x20, /* 00100000 */
92 0x40, /* 01000000 */
93 0x80, /* 10000000 */
94
95 /* Only for high speed devices with polling interval 4us */
96 0x11, /* 00010001 */
97 0x22, /* 00100010 */
98 0x44, /* 01000100 */
99 0x88, /* 10001000 */
100
101 /* Only for high speed devices with polling interval 2us */
102 0x55, /* 01010101 */
103 0xaa, /* 10101010 */
104
105 /* Only for high speed devices with polling interval 1us */
106 0xff /* 11111111 */
107 };
108
109 /*
110 * Initialize the values which are used to calculate complete split mask
111 * for the low/full speed interrupt and isochronous endpoints.
112 */
113 static uint_t ehci_intr_complete_split_mask[7] = {
114 /* Only full/low speed devices */
115 0x1c, /* 00011100 */
116 0x38, /* 00111000 */
117 0x70, /* 01110000 */
118 0xe0, /* 11100000 */
119 0x00, /* Need FSTN feature */
120 0x00, /* Need FSTN feature */
121 0x00 /* Need FSTN feature */
122 };
123
124
125 /*
126 * EHCI Internal Function Prototypes
127 */
128
129 /* Host Controller Driver (HCD) initialization functions */
130 void ehci_set_dma_attributes(ehci_state_t *ehcip);
131 int ehci_allocate_pools(ehci_state_t *ehcip);
132 void ehci_decode_ddi_dma_addr_bind_handle_result(
133 ehci_state_t *ehcip,
134 int result);
135 int ehci_map_regs(ehci_state_t *ehcip);
136 int ehci_register_intrs_and_init_mutex(
137 ehci_state_t *ehcip);
138 static int ehci_add_intrs(ehci_state_t *ehcip,
139 int intr_type);
140 int ehci_init_ctlr(ehci_state_t *ehcip,
141 int init_type);
142 static int ehci_take_control(ehci_state_t *ehcip);
143 static int ehci_init_periodic_frame_lst_table(
144 ehci_state_t *ehcip);
145 static void ehci_build_interrupt_lattice(
146 ehci_state_t *ehcip);
147 usba_hcdi_ops_t *ehci_alloc_hcdi_ops(ehci_state_t *ehcip);
148
149 /* Host Controller Driver (HCD) deinitialization functions */
150 int ehci_cleanup(ehci_state_t *ehcip);
151 static void ehci_rem_intrs(ehci_state_t *ehcip);
152 int ehci_cpr_suspend(ehci_state_t *ehcip);
153 int ehci_cpr_resume(ehci_state_t *ehcip);
154
155 /* Bandwidth Allocation functions */
156 int ehci_allocate_bandwidth(ehci_state_t *ehcip,
157 usba_pipe_handle_data_t *ph,
158 uint_t *pnode,
159 uchar_t *smask,
160 uchar_t *cmask);
161 static int ehci_allocate_high_speed_bandwidth(
162 ehci_state_t *ehcip,
163 usba_pipe_handle_data_t *ph,
164 uint_t *hnode,
165 uchar_t *smask,
166 uchar_t *cmask);
167 static int ehci_allocate_classic_tt_bandwidth(
168 ehci_state_t *ehcip,
169 usba_pipe_handle_data_t *ph,
170 uint_t pnode);
171 void ehci_deallocate_bandwidth(ehci_state_t *ehcip,
172 usba_pipe_handle_data_t *ph,
173 uint_t pnode,
174 uchar_t smask,
175 uchar_t cmask);
176 static void ehci_deallocate_high_speed_bandwidth(
177 ehci_state_t *ehcip,
178 usba_pipe_handle_data_t *ph,
179 uint_t hnode,
180 uchar_t smask,
181 uchar_t cmask);
182 static void ehci_deallocate_classic_tt_bandwidth(
183 ehci_state_t *ehcip,
184 usba_pipe_handle_data_t *ph,
185 uint_t pnode);
186 static int ehci_compute_high_speed_bandwidth(
187 ehci_state_t *ehcip,
188 usb_ep_descr_t *endpoint,
189 usb_port_status_t port_status,
190 uint_t *sbandwidth,
191 uint_t *cbandwidth);
192 static int ehci_compute_classic_bandwidth(
193 usb_ep_descr_t *endpoint,
194 usb_port_status_t port_status,
195 uint_t *bandwidth);
196 int ehci_adjust_polling_interval(
197 ehci_state_t *ehcip,
198 usb_ep_descr_t *endpoint,
199 usb_port_status_t port_status);
200 static int ehci_adjust_high_speed_polling_interval(
201 ehci_state_t *ehcip,
202 usb_ep_descr_t *endpoint);
203 static uint_t ehci_lattice_height(uint_t interval);
204 static uint_t ehci_lattice_parent(uint_t node);
205 static uint_t ehci_find_periodic_node(
206 uint_t leaf,
207 int interval);
208 static uint_t ehci_leftmost_leaf(uint_t node,
209 uint_t height);
210 static uint_t ehci_pow_2(uint_t x);
211 static uint_t ehci_log_2(uint_t x);
212 static int ehci_find_bestfit_hs_mask(
213 ehci_state_t *ehcip,
214 uchar_t *smask,
215 uint_t *pnode,
216 usb_ep_descr_t *endpoint,
217 uint_t bandwidth,
218 int interval);
219 static int ehci_find_bestfit_ls_intr_mask(
220 ehci_state_t *ehcip,
221 uchar_t *smask,
222 uchar_t *cmask,
223 uint_t *pnode,
224 uint_t sbandwidth,
225 uint_t cbandwidth,
226 int interval);
227 static int ehci_find_bestfit_sitd_in_mask(
228 ehci_state_t *ehcip,
229 uchar_t *smask,
230 uchar_t *cmask,
231 uint_t *pnode,
232 uint_t sbandwidth,
233 uint_t cbandwidth,
234 int interval);
235 static int ehci_find_bestfit_sitd_out_mask(
236 ehci_state_t *ehcip,
237 uchar_t *smask,
238 uint_t *pnode,
239 uint_t sbandwidth,
240 int interval);
241 static uint_t ehci_calculate_bw_availability_mask(
242 ehci_state_t *ehcip,
243 uint_t bandwidth,
244 int leaf,
245 int leaf_count,
246 uchar_t *bw_mask);
247 static void ehci_update_bw_availability(
248 ehci_state_t *ehcip,
249 int bandwidth,
250 int leftmost_leaf,
251 int leaf_count,
252 uchar_t mask);
253
254 /* Miscellaneous functions */
255 ehci_state_t *ehci_obtain_state(
256 dev_info_t *dip);
257 int ehci_state_is_operational(
258 ehci_state_t *ehcip);
259 int ehci_do_soft_reset(
260 ehci_state_t *ehcip);
261 usb_req_attrs_t ehci_get_xfer_attrs(ehci_state_t *ehcip,
262 ehci_pipe_private_t *pp,
263 ehci_trans_wrapper_t *tw);
264 usb_frame_number_t ehci_get_current_frame_number(
265 ehci_state_t *ehcip);
266 static void ehci_cpr_cleanup(
267 ehci_state_t *ehcip);
268 int ehci_wait_for_sof(
269 ehci_state_t *ehcip);
270 void ehci_toggle_scheduler(
271 ehci_state_t *ehcip);
272 void ehci_print_caps(ehci_state_t *ehcip);
273 void ehci_print_regs(ehci_state_t *ehcip);
274 void ehci_print_qh(ehci_state_t *ehcip,
275 ehci_qh_t *qh);
276 void ehci_print_qtd(ehci_state_t *ehcip,
277 ehci_qtd_t *qtd);
278 void ehci_create_stats(ehci_state_t *ehcip);
279 void ehci_destroy_stats(ehci_state_t *ehcip);
280 void ehci_do_intrs_stats(ehci_state_t *ehcip,
281 int val);
282 void ehci_do_byte_stats(ehci_state_t *ehcip,
283 size_t len,
284 uint8_t attr,
285 uint8_t addr);
286
287 /*
288 * check if this ehci controller can support PM
289 */
290 int
ehci_hcdi_pm_support(dev_info_t * dip)291 ehci_hcdi_pm_support(dev_info_t *dip)
292 {
293 ehci_state_t *ehcip = ddi_get_soft_state(ehci_statep,
294 ddi_get_instance(dip));
295
296 if (((ehcip->ehci_vendor_id == PCI_VENDOR_NEC_COMBO) &&
297 (ehcip->ehci_device_id == PCI_DEVICE_NEC_COMBO)) ||
298
299 ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
300 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) ||
301
302 (ehcip->ehci_vendor_id == PCI_VENDOR_VIA)) {
303
304 return (USB_SUCCESS);
305 }
306
307 return (USB_FAILURE);
308 }
309
310 void
ehci_dma_attr_workaround(ehci_state_t * ehcip)311 ehci_dma_attr_workaround(ehci_state_t *ehcip)
312 {
313 /*
314 * Some Nvidia chips can not handle qh dma address above 2G.
315 * The bit 31 of the dma address might be omitted and it will
316 * cause system crash or other unpredicable result. So force
317 * the dma address allocated below 2G to make ehci work.
318 */
319 if (PCI_VENDOR_NVIDIA == ehcip->ehci_vendor_id) {
320 switch (ehcip->ehci_device_id) {
321 case PCI_DEVICE_NVIDIA_CK804:
322 case PCI_DEVICE_NVIDIA_MCP04:
323 USB_DPRINTF_L2(PRINT_MASK_ATTA,
324 ehcip->ehci_log_hdl,
325 "ehci_dma_attr_workaround: NVIDIA dma "
326 "workaround enabled, force dma address "
327 "to be allocated below 2G");
328 ehcip->ehci_dma_attr.dma_attr_addr_hi =
329 0x7fffffffull;
330 break;
331 default:
332 break;
333
334 }
335 }
336 }
337
338 /*
339 * Host Controller Driver (HCD) initialization functions
340 */
341
342 /*
343 * ehci_set_dma_attributes:
344 *
345 * Set the limits in the DMA attributes structure. Most of the values used
346 * in the DMA limit structures are the default values as specified by the
347 * Writing PCI device drivers document.
348 */
349 void
ehci_set_dma_attributes(ehci_state_t * ehcip)350 ehci_set_dma_attributes(ehci_state_t *ehcip)
351 {
352 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
353 "ehci_set_dma_attributes:");
354
355 /* Initialize the DMA attributes */
356 ehcip->ehci_dma_attr.dma_attr_version = DMA_ATTR_V0;
357 ehcip->ehci_dma_attr.dma_attr_addr_lo = 0x00000000ull;
358 ehcip->ehci_dma_attr.dma_attr_addr_hi = 0xfffffffeull;
359
360 /* 32 bit addressing */
361 ehcip->ehci_dma_attr.dma_attr_count_max = EHCI_DMA_ATTR_COUNT_MAX;
362
363 /* Byte alignment */
364 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
365
366 /*
367 * Since PCI specification is byte alignment, the
368 * burst size field should be set to 1 for PCI devices.
369 */
370 ehcip->ehci_dma_attr.dma_attr_burstsizes = 0x1;
371
372 ehcip->ehci_dma_attr.dma_attr_minxfer = 0x1;
373 ehcip->ehci_dma_attr.dma_attr_maxxfer = EHCI_DMA_ATTR_MAX_XFER;
374 ehcip->ehci_dma_attr.dma_attr_seg = 0xffffffffull;
375 ehcip->ehci_dma_attr.dma_attr_sgllen = 1;
376 ehcip->ehci_dma_attr.dma_attr_granular = EHCI_DMA_ATTR_GRANULAR;
377 ehcip->ehci_dma_attr.dma_attr_flags = 0;
378 ehci_dma_attr_workaround(ehcip);
379 }
380
381
382 /*
383 * ehci_allocate_pools:
384 *
385 * Allocate the system memory for the Endpoint Descriptor (QH) and for the
386 * Transfer Descriptor (QTD) pools. Both QH and QTD structures must be aligned
387 * to a 16 byte boundary.
388 */
389 int
ehci_allocate_pools(ehci_state_t * ehcip)390 ehci_allocate_pools(ehci_state_t *ehcip)
391 {
392 ddi_device_acc_attr_t dev_attr;
393 size_t real_length;
394 int result;
395 uint_t ccount;
396 int i;
397
398 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
399 "ehci_allocate_pools:");
400
401 /* The host controller will be little endian */
402 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
403 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
404 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
405
406 /* Byte alignment */
407 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_TD_QH_ALIGNMENT;
408
409 /* Allocate the QTD pool DMA handle */
410 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
411 DDI_DMA_SLEEP, 0,
412 &ehcip->ehci_qtd_pool_dma_handle) != DDI_SUCCESS) {
413
414 goto failure;
415 }
416
417 /* Allocate the memory for the QTD pool */
418 if (ddi_dma_mem_alloc(ehcip->ehci_qtd_pool_dma_handle,
419 ehci_qtd_pool_size * sizeof (ehci_qtd_t),
420 &dev_attr,
421 DDI_DMA_CONSISTENT,
422 DDI_DMA_SLEEP,
423 0,
424 (caddr_t *)&ehcip->ehci_qtd_pool_addr,
425 &real_length,
426 &ehcip->ehci_qtd_pool_mem_handle)) {
427
428 goto failure;
429 }
430
431 /* Map the QTD pool into the I/O address space */
432 result = ddi_dma_addr_bind_handle(
433 ehcip->ehci_qtd_pool_dma_handle,
434 NULL,
435 (caddr_t)ehcip->ehci_qtd_pool_addr,
436 real_length,
437 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
438 DDI_DMA_SLEEP,
439 NULL,
440 &ehcip->ehci_qtd_pool_cookie,
441 &ccount);
442
443 bzero((void *)ehcip->ehci_qtd_pool_addr,
444 ehci_qtd_pool_size * sizeof (ehci_qtd_t));
445
446 /* Process the result */
447 if (result == DDI_DMA_MAPPED) {
448 /* The cookie count should be 1 */
449 if (ccount != 1) {
450 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
451 "ehci_allocate_pools: More than 1 cookie");
452
453 goto failure;
454 }
455 } else {
456 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
457 "ehci_allocate_pools: Result = %d", result);
458
459 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
460
461 goto failure;
462 }
463
464 /*
465 * DMA addresses for QTD pools are bound
466 */
467 ehcip->ehci_dma_addr_bind_flag |= EHCI_QTD_POOL_BOUND;
468
469 /* Initialize the QTD pool */
470 for (i = 0; i < ehci_qtd_pool_size; i ++) {
471 Set_QTD(ehcip->ehci_qtd_pool_addr[i].
472 qtd_state, EHCI_QTD_FREE);
473 }
474
475 /* Allocate the QTD pool DMA handle */
476 if (ddi_dma_alloc_handle(ehcip->ehci_dip,
477 &ehcip->ehci_dma_attr,
478 DDI_DMA_SLEEP,
479 0,
480 &ehcip->ehci_qh_pool_dma_handle) != DDI_SUCCESS) {
481 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
482 "ehci_allocate_pools: ddi_dma_alloc_handle failed");
483
484 goto failure;
485 }
486
487 /* Allocate the memory for the QH pool */
488 if (ddi_dma_mem_alloc(ehcip->ehci_qh_pool_dma_handle,
489 ehci_qh_pool_size * sizeof (ehci_qh_t),
490 &dev_attr,
491 DDI_DMA_CONSISTENT,
492 DDI_DMA_SLEEP,
493 0,
494 (caddr_t *)&ehcip->ehci_qh_pool_addr,
495 &real_length,
496 &ehcip->ehci_qh_pool_mem_handle) != DDI_SUCCESS) {
497 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
498 "ehci_allocate_pools: ddi_dma_mem_alloc failed");
499
500 goto failure;
501 }
502
503 result = ddi_dma_addr_bind_handle(ehcip->ehci_qh_pool_dma_handle,
504 NULL,
505 (caddr_t)ehcip->ehci_qh_pool_addr,
506 real_length,
507 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
508 DDI_DMA_SLEEP,
509 NULL,
510 &ehcip->ehci_qh_pool_cookie,
511 &ccount);
512
513 bzero((void *)ehcip->ehci_qh_pool_addr,
514 ehci_qh_pool_size * sizeof (ehci_qh_t));
515
516 /* Process the result */
517 if (result == DDI_DMA_MAPPED) {
518 /* The cookie count should be 1 */
519 if (ccount != 1) {
520 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
521 "ehci_allocate_pools: More than 1 cookie");
522
523 goto failure;
524 }
525 } else {
526 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
527
528 goto failure;
529 }
530
531 /*
532 * DMA addresses for QH pools are bound
533 */
534 ehcip->ehci_dma_addr_bind_flag |= EHCI_QH_POOL_BOUND;
535
536 /* Initialize the QH pool */
537 for (i = 0; i < ehci_qh_pool_size; i ++) {
538 Set_QH(ehcip->ehci_qh_pool_addr[i].qh_state, EHCI_QH_FREE);
539 }
540
541 /* Byte alignment */
542 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
543
544 return (DDI_SUCCESS);
545
546 failure:
547 /* Byte alignment */
548 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
549
550 return (DDI_FAILURE);
551 }
552
553
554 /*
555 * ehci_decode_ddi_dma_addr_bind_handle_result:
556 *
557 * Process the return values of ddi_dma_addr_bind_handle()
558 */
559 void
ehci_decode_ddi_dma_addr_bind_handle_result(ehci_state_t * ehcip,int result)560 ehci_decode_ddi_dma_addr_bind_handle_result(
561 ehci_state_t *ehcip,
562 int result)
563 {
564 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
565 "ehci_decode_ddi_dma_addr_bind_handle_result:");
566
567 switch (result) {
568 case DDI_DMA_PARTIAL_MAP:
569 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
570 "Partial transfers not allowed");
571 break;
572 case DDI_DMA_INUSE:
573 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
574 "Handle is in use");
575 break;
576 case DDI_DMA_NORESOURCES:
577 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
578 "No resources");
579 break;
580 case DDI_DMA_NOMAPPING:
581 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
582 "No mapping");
583 break;
584 case DDI_DMA_TOOBIG:
585 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
586 "Object is too big");
587 break;
588 default:
589 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
590 "Unknown dma error");
591 }
592 }
593
594
595 /*
596 * ehci_map_regs:
597 *
598 * The Host Controller (HC) contains a set of on-chip operational registers
599 * and which should be mapped into a non-cacheable portion of the system
600 * addressable space.
601 */
602 int
ehci_map_regs(ehci_state_t * ehcip)603 ehci_map_regs(ehci_state_t *ehcip)
604 {
605 ddi_device_acc_attr_t attr;
606 uint16_t cmd_reg;
607 uint_t length;
608
609 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_map_regs:");
610
611 /* Check to make sure we have memory access */
612 if (pci_config_setup(ehcip->ehci_dip,
613 &ehcip->ehci_config_handle) != DDI_SUCCESS) {
614
615 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
616 "ehci_map_regs: Config error");
617
618 return (DDI_FAILURE);
619 }
620
621 /* Make sure Memory Access Enable is set */
622 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
623
624 if (!(cmd_reg & PCI_COMM_MAE)) {
625
626 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
627 "ehci_map_regs: Memory base address access disabled");
628
629 return (DDI_FAILURE);
630 }
631
632 /* The host controller will be little endian */
633 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
634 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
635 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
636
637 /* Map in EHCI Capability registers */
638 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
639 (caddr_t *)&ehcip->ehci_capsp, 0,
640 sizeof (ehci_caps_t), &attr,
641 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
642
643 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
644 "ehci_map_regs: Map setup error");
645
646 return (DDI_FAILURE);
647 }
648
649 length = ddi_get8(ehcip->ehci_caps_handle,
650 (uint8_t *)&ehcip->ehci_capsp->ehci_caps_length);
651
652 /* Free the original mapping */
653 ddi_regs_map_free(&ehcip->ehci_caps_handle);
654
655 /* Re-map in EHCI Capability and Operational registers */
656 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
657 (caddr_t *)&ehcip->ehci_capsp, 0,
658 length + sizeof (ehci_regs_t), &attr,
659 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
660
661 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
662 "ehci_map_regs: Map setup error");
663
664 return (DDI_FAILURE);
665 }
666
667 /* Get the pointer to EHCI Operational Register */
668 ehcip->ehci_regsp = (ehci_regs_t *)
669 ((uintptr_t)ehcip->ehci_capsp + length);
670
671 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
672 "ehci_map_regs: Capsp 0x%p Regsp 0x%p\n",
673 (void *)ehcip->ehci_capsp, (void *)ehcip->ehci_regsp);
674
675 return (DDI_SUCCESS);
676 }
677
678 /*
679 * The following simulated polling is for debugging purposes only.
680 * It is activated on x86 by setting usb-polling=true in GRUB or ehci.conf.
681 */
682 static int
ehci_is_polled(dev_info_t * dip)683 ehci_is_polled(dev_info_t *dip)
684 {
685 int ret;
686 char *propval;
687
688 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
689 "usb-polling", &propval) != DDI_SUCCESS)
690
691 return (0);
692
693 ret = (strcmp(propval, "true") == 0);
694 ddi_prop_free(propval);
695
696 return (ret);
697 }
698
699 static void
ehci_poll_intr(void * arg)700 ehci_poll_intr(void *arg)
701 {
702 /* poll every msec */
703 for (;;) {
704 (void) ehci_intr(arg, NULL);
705 delay(drv_usectohz(1000));
706 }
707 }
708
709 /*
710 * ehci_register_intrs_and_init_mutex:
711 *
712 * Register interrupts and initialize each mutex and condition variables
713 */
714 int
ehci_register_intrs_and_init_mutex(ehci_state_t * ehcip)715 ehci_register_intrs_and_init_mutex(ehci_state_t *ehcip)
716 {
717 int intr_types;
718
719 #if defined(__x86)
720 uint8_t iline;
721 #endif
722
723 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
724 "ehci_register_intrs_and_init_mutex:");
725
726 /*
727 * There is a known MSI hardware bug with the EHCI controller
728 * of ULI1575 southbridge. Hence MSI is disabled for this chip.
729 */
730 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
731 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
732 ehcip->ehci_msi_enabled = B_FALSE;
733 } else {
734 /* Set the MSI enable flag from the global EHCI MSI tunable */
735 ehcip->ehci_msi_enabled = ehci_enable_msi;
736 }
737
738 /* launch polling thread instead of enabling pci interrupt */
739 if (ehci_is_polled(ehcip->ehci_dip)) {
740 extern pri_t maxclsyspri;
741
742 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
743 "ehci_register_intrs_and_init_mutex: "
744 "running in simulated polled mode");
745
746 (void) thread_create(NULL, 0, ehci_poll_intr, ehcip, 0, &p0,
747 TS_RUN, maxclsyspri);
748
749 goto skip_intr;
750 }
751
752 #if defined(__x86)
753 /*
754 * Make sure that the interrupt pin is connected to the
755 * interrupt controller on x86. Interrupt line 255 means
756 * "unknown" or "not connected" (PCI spec 6.2.4, footnote 43).
757 * If we would return failure when interrupt line equals 255, then
758 * high speed devices will be routed to companion host controllers.
759 * However, it is not necessary to return failure here, and
760 * o/uhci codes don't check the interrupt line either.
761 * But it's good to log a message here for debug purposes.
762 */
763 iline = pci_config_get8(ehcip->ehci_config_handle,
764 PCI_CONF_ILINE);
765
766 if (iline == 255) {
767 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
768 "ehci_register_intrs_and_init_mutex: "
769 "interrupt line value out of range (%d)",
770 iline);
771 }
772 #endif /* __x86 */
773
774 /* Get supported interrupt types */
775 if (ddi_intr_get_supported_types(ehcip->ehci_dip,
776 &intr_types) != DDI_SUCCESS) {
777 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
778 "ehci_register_intrs_and_init_mutex: "
779 "ddi_intr_get_supported_types failed");
780
781 return (DDI_FAILURE);
782 }
783
784 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
785 "ehci_register_intrs_and_init_mutex: "
786 "supported interrupt types 0x%x", intr_types);
787
788 if ((intr_types & DDI_INTR_TYPE_MSI) && ehcip->ehci_msi_enabled) {
789 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_MSI)
790 != DDI_SUCCESS) {
791 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
792 "ehci_register_intrs_and_init_mutex: MSI "
793 "registration failed, trying FIXED interrupt \n");
794 } else {
795 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
796 "ehci_register_intrs_and_init_mutex: "
797 "Using MSI interrupt type\n");
798
799 ehcip->ehci_intr_type = DDI_INTR_TYPE_MSI;
800 ehcip->ehci_flags |= EHCI_INTR;
801 }
802 }
803
804 if ((!(ehcip->ehci_flags & EHCI_INTR)) &&
805 (intr_types & DDI_INTR_TYPE_FIXED)) {
806 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_FIXED)
807 != DDI_SUCCESS) {
808 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
809 "ehci_register_intrs_and_init_mutex: "
810 "FIXED interrupt registration failed\n");
811
812 return (DDI_FAILURE);
813 }
814
815 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
816 "ehci_register_intrs_and_init_mutex: "
817 "Using FIXED interrupt type\n");
818
819 ehcip->ehci_intr_type = DDI_INTR_TYPE_FIXED;
820 ehcip->ehci_flags |= EHCI_INTR;
821 }
822
823 skip_intr:
824 /* Create prototype for advance on async schedule */
825 cv_init(&ehcip->ehci_async_schedule_advance_cv,
826 NULL, CV_DRIVER, NULL);
827
828 return (DDI_SUCCESS);
829 }
830
831
832 /*
833 * ehci_add_intrs:
834 *
835 * Register FIXED or MSI interrupts.
836 */
837 static int
ehci_add_intrs(ehci_state_t * ehcip,int intr_type)838 ehci_add_intrs(ehci_state_t *ehcip, int intr_type)
839 {
840 int actual, avail, intr_size, count = 0;
841 int i, flag, ret;
842
843 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
844 "ehci_add_intrs: interrupt type 0x%x", intr_type);
845
846 /* Get number of interrupts */
847 ret = ddi_intr_get_nintrs(ehcip->ehci_dip, intr_type, &count);
848 if ((ret != DDI_SUCCESS) || (count == 0)) {
849 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
850 "ehci_add_intrs: ddi_intr_get_nintrs() failure, "
851 "ret: %d, count: %d", ret, count);
852
853 return (DDI_FAILURE);
854 }
855
856 /* Get number of available interrupts */
857 ret = ddi_intr_get_navail(ehcip->ehci_dip, intr_type, &avail);
858 if ((ret != DDI_SUCCESS) || (avail == 0)) {
859 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
860 "ehci_add_intrs: ddi_intr_get_navail() failure, "
861 "ret: %d, count: %d", ret, count);
862
863 return (DDI_FAILURE);
864 }
865
866 if (avail < count) {
867 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
868 "ehci_add_intrs: ehci_add_intrs: nintrs () "
869 "returned %d, navail returned %d\n", count, avail);
870 }
871
872 /* Allocate an array of interrupt handles */
873 intr_size = count * sizeof (ddi_intr_handle_t);
874 ehcip->ehci_htable = kmem_zalloc(intr_size, KM_SLEEP);
875
876 flag = (intr_type == DDI_INTR_TYPE_MSI) ?
877 DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL;
878
879 /* call ddi_intr_alloc() */
880 ret = ddi_intr_alloc(ehcip->ehci_dip, ehcip->ehci_htable,
881 intr_type, 0, count, &actual, flag);
882
883 if ((ret != DDI_SUCCESS) || (actual == 0)) {
884 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
885 "ehci_add_intrs: ddi_intr_alloc() failed %d", ret);
886
887 kmem_free(ehcip->ehci_htable, intr_size);
888
889 return (DDI_FAILURE);
890 }
891
892 if (actual < count) {
893 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
894 "ehci_add_intrs: Requested: %d, Received: %d\n",
895 count, actual);
896
897 for (i = 0; i < actual; i++)
898 (void) ddi_intr_free(ehcip->ehci_htable[i]);
899
900 kmem_free(ehcip->ehci_htable, intr_size);
901
902 return (DDI_FAILURE);
903 }
904
905 ehcip->ehci_intr_cnt = actual;
906
907 if ((ret = ddi_intr_get_pri(ehcip->ehci_htable[0],
908 &ehcip->ehci_intr_pri)) != DDI_SUCCESS) {
909 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
910 "ehci_add_intrs: ddi_intr_get_pri() failed %d", ret);
911
912 for (i = 0; i < actual; i++)
913 (void) ddi_intr_free(ehcip->ehci_htable[i]);
914
915 kmem_free(ehcip->ehci_htable, intr_size);
916
917 return (DDI_FAILURE);
918 }
919
920 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
921 "ehci_add_intrs: Supported Interrupt priority 0x%x",
922 ehcip->ehci_intr_pri);
923
924 /* Test for high level mutex */
925 if (ehcip->ehci_intr_pri >= ddi_intr_get_hilevel_pri()) {
926 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
927 "ehci_add_intrs: Hi level interrupt not supported");
928
929 for (i = 0; i < actual; i++)
930 (void) ddi_intr_free(ehcip->ehci_htable[i]);
931
932 kmem_free(ehcip->ehci_htable, intr_size);
933
934 return (DDI_FAILURE);
935 }
936
937 /* Initialize the mutex */
938 mutex_init(&ehcip->ehci_int_mutex, NULL, MUTEX_DRIVER,
939 DDI_INTR_PRI(ehcip->ehci_intr_pri));
940
941 /* Call ddi_intr_add_handler() */
942 for (i = 0; i < actual; i++) {
943 if ((ret = ddi_intr_add_handler(ehcip->ehci_htable[i],
944 ehci_intr, (caddr_t)ehcip,
945 (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
946 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
947 "ehci_add_intrs:ddi_intr_add_handler() "
948 "failed %d", ret);
949
950 for (i = 0; i < actual; i++)
951 (void) ddi_intr_free(ehcip->ehci_htable[i]);
952
953 mutex_destroy(&ehcip->ehci_int_mutex);
954 kmem_free(ehcip->ehci_htable, intr_size);
955
956 return (DDI_FAILURE);
957 }
958 }
959
960 if ((ret = ddi_intr_get_cap(ehcip->ehci_htable[0],
961 &ehcip->ehci_intr_cap)) != DDI_SUCCESS) {
962 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
963 "ehci_add_intrs: ddi_intr_get_cap() failed %d", ret);
964
965 for (i = 0; i < actual; i++) {
966 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
967 (void) ddi_intr_free(ehcip->ehci_htable[i]);
968 }
969
970 mutex_destroy(&ehcip->ehci_int_mutex);
971 kmem_free(ehcip->ehci_htable, intr_size);
972
973 return (DDI_FAILURE);
974 }
975
976 /* Enable all interrupts */
977 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
978 /* Call ddi_intr_block_enable() for MSI interrupts */
979 (void) ddi_intr_block_enable(ehcip->ehci_htable,
980 ehcip->ehci_intr_cnt);
981 } else {
982 /* Call ddi_intr_enable for MSI or FIXED interrupts */
983 for (i = 0; i < ehcip->ehci_intr_cnt; i++)
984 (void) ddi_intr_enable(ehcip->ehci_htable[i]);
985 }
986
987 return (DDI_SUCCESS);
988 }
989
990
991 /*
992 * ehci_init_hardware
993 *
994 * take control from BIOS, reset EHCI host controller, and check version, etc.
995 */
996 int
ehci_init_hardware(ehci_state_t * ehcip)997 ehci_init_hardware(ehci_state_t *ehcip)
998 {
999 int revision;
1000 uint16_t cmd_reg;
1001 int abort_on_BIOS_take_over_failure;
1002
1003 /* Take control from the BIOS */
1004 if (ehci_take_control(ehcip) != USB_SUCCESS) {
1005
1006 /* read .conf file properties */
1007 abort_on_BIOS_take_over_failure =
1008 ddi_prop_get_int(DDI_DEV_T_ANY,
1009 ehcip->ehci_dip, DDI_PROP_DONTPASS,
1010 "abort-on-BIOS-take-over-failure", 0);
1011
1012 if (abort_on_BIOS_take_over_failure) {
1013
1014 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1015 "Unable to take control from BIOS.");
1016
1017 return (DDI_FAILURE);
1018 }
1019
1020 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1021 "Unable to take control from BIOS. Failure is ignored.");
1022 }
1023
1024 /* set Memory Master Enable */
1025 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
1026 cmd_reg |= (PCI_COMM_MAE | PCI_COMM_ME);
1027 pci_config_put16(ehcip->ehci_config_handle, PCI_CONF_COMM, cmd_reg);
1028
1029 /* Reset the EHCI host controller */
1030 Set_OpReg(ehci_command,
1031 Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET);
1032
1033 /* Wait 10ms for reset to complete */
1034 drv_usecwait(EHCI_RESET_TIMEWAIT);
1035
1036 ASSERT(Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED);
1037
1038 /* Verify the version number */
1039 revision = Get_16Cap(ehci_version);
1040
1041 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1042 "ehci_init_hardware: Revision 0x%x", revision);
1043
1044 /*
1045 * EHCI driver supports EHCI host controllers compliant to
1046 * 0.95 and higher revisions of EHCI specifications.
1047 */
1048 if (revision < EHCI_REVISION_0_95) {
1049
1050 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1051 "Revision 0x%x is not supported", revision);
1052
1053 return (DDI_FAILURE);
1054 }
1055
1056 if (ehcip->ehci_hc_soft_state == EHCI_CTLR_INIT_STATE) {
1057
1058 /* Initialize the Frame list base address area */
1059 if (ehci_init_periodic_frame_lst_table(ehcip) != DDI_SUCCESS) {
1060
1061 return (DDI_FAILURE);
1062 }
1063
1064 /*
1065 * For performance reasons, do not insert anything into the
1066 * asynchronous list or activate the asynch list schedule until
1067 * there is a valid QH.
1068 */
1069 ehcip->ehci_head_of_async_sched_list = NULL;
1070
1071 if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) &&
1072 (ehci_vt62x2_workaround & EHCI_VIA_ASYNC_SCHEDULE)) {
1073 /*
1074 * The driver is unable to reliably stop the asynch
1075 * list schedule on VIA VT6202 controllers, so we
1076 * always keep a dummy QH on the list.
1077 */
1078 ehci_qh_t *dummy_async_qh =
1079 ehci_alloc_qh(ehcip, NULL,
1080 EHCI_INTERRUPT_MODE_FLAG);
1081
1082 Set_QH(dummy_async_qh->qh_link_ptr,
1083 ((ehci_qh_cpu_to_iommu(ehcip, dummy_async_qh) &
1084 EHCI_QH_LINK_PTR) | EHCI_QH_LINK_REF_QH));
1085
1086 /* Set this QH to be the "head" of the circular list */
1087 Set_QH(dummy_async_qh->qh_ctrl,
1088 Get_QH(dummy_async_qh->qh_ctrl) |
1089 EHCI_QH_CTRL_RECLAIM_HEAD);
1090
1091 Set_QH(dummy_async_qh->qh_next_qtd,
1092 EHCI_QH_NEXT_QTD_PTR_VALID);
1093 Set_QH(dummy_async_qh->qh_alt_next_qtd,
1094 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1095
1096 ehcip->ehci_head_of_async_sched_list = dummy_async_qh;
1097 ehcip->ehci_open_async_count++;
1098 ehcip->ehci_async_req_count++;
1099 }
1100 }
1101
1102 return (DDI_SUCCESS);
1103 }
1104
1105
1106 /*
1107 * ehci_init_workaround
1108 *
1109 * some workarounds during initializing ehci
1110 */
1111 int
ehci_init_workaround(ehci_state_t * ehcip)1112 ehci_init_workaround(ehci_state_t *ehcip)
1113 {
1114 /*
1115 * Acer Labs Inc. M5273 EHCI controller does not send
1116 * interrupts unless the Root hub ports are routed to the EHCI
1117 * host controller; so route the ports now, before we test for
1118 * the presence of SOFs interrupts.
1119 */
1120 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1121 /* Route all Root hub ports to EHCI host controller */
1122 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1123 }
1124
1125 /*
1126 * VIA chips have some issues and may not work reliably.
1127 * Revisions >= 0x80 are part of a southbridge and appear
1128 * to be reliable with the workaround.
1129 * For revisions < 0x80, if we were bound using class
1130 * complain, else proceed. This will allow the user to
1131 * bind ehci specifically to this chip and not have the
1132 * warnings
1133 */
1134 if (ehcip->ehci_vendor_id == PCI_VENDOR_VIA) {
1135
1136 if (ehcip->ehci_rev_id >= PCI_VIA_REVISION_6212) {
1137
1138 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1139 "ehci_init_workaround: Applying VIA workarounds "
1140 "for the 6212 chip.");
1141
1142 } else if (strcmp(DEVI(ehcip->ehci_dip)->devi_binding_name,
1143 "pciclass,0c0320") == 0) {
1144
1145 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1146 "Due to recently discovered incompatibilities");
1147 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1148 "with this USB controller, USB2.x transfer");
1149 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1150 "support has been disabled. This device will");
1151 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1152 "continue to function as a USB1.x controller.");
1153 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1154 "If you are interested in enabling USB2.x");
1155 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1156 "support please, refer to the ehci(7D) man page.");
1157 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1158 "Please also refer to www.sun.com/io for");
1159 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1160 "Solaris Ready products and to");
1161 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1162 "www.sun.com/bigadmin/hcl for additional");
1163 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1164 "compatible USB products.");
1165
1166 return (DDI_FAILURE);
1167
1168 } else if (ehci_vt62x2_workaround) {
1169
1170 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1171 "Applying VIA workarounds");
1172 }
1173 }
1174
1175 return (DDI_SUCCESS);
1176 }
1177
1178
1179 /*
1180 * ehci_init_check_status
1181 *
1182 * Check if EHCI host controller is running
1183 */
1184 int
ehci_init_check_status(ehci_state_t * ehcip)1185 ehci_init_check_status(ehci_state_t *ehcip)
1186 {
1187 clock_t sof_time_wait;
1188
1189 /*
1190 * Get the number of clock ticks to wait.
1191 * This is based on the maximum time it takes for a frame list rollover
1192 * and maximum time wait for SOFs to begin.
1193 */
1194 sof_time_wait = drv_usectohz((EHCI_NUM_PERIODIC_FRAME_LISTS * 1000) +
1195 EHCI_SOF_TIMEWAIT);
1196
1197 /* Tell the ISR to broadcast ehci_async_schedule_advance_cv */
1198 ehcip->ehci_flags |= EHCI_CV_INTR;
1199
1200 /* We need to add a delay to allow the chip time to start running */
1201 (void) cv_reltimedwait(&ehcip->ehci_async_schedule_advance_cv,
1202 &ehcip->ehci_int_mutex, sof_time_wait, TR_CLOCK_TICK);
1203
1204 /*
1205 * Check EHCI host controller is running, otherwise return failure.
1206 */
1207 if ((ehcip->ehci_flags & EHCI_CV_INTR) ||
1208 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
1209
1210 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1211 "No SOF interrupts have been received, this USB EHCI host"
1212 "controller is unusable");
1213
1214 /*
1215 * Route all Root hub ports to Classic host
1216 * controller, in case this is an unusable ALI M5273
1217 * EHCI controller.
1218 */
1219 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1220 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1221 }
1222
1223 return (DDI_FAILURE);
1224 }
1225
1226 return (DDI_SUCCESS);
1227 }
1228
1229
1230 /*
1231 * ehci_init_ctlr:
1232 *
1233 * Initialize the Host Controller (HC).
1234 */
1235 int
ehci_init_ctlr(ehci_state_t * ehcip,int init_type)1236 ehci_init_ctlr(ehci_state_t *ehcip, int init_type)
1237 {
1238 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_init_ctlr:");
1239
1240 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1241
1242 if (ehci_init_hardware(ehcip) != DDI_SUCCESS) {
1243
1244 return (DDI_FAILURE);
1245 }
1246 }
1247
1248 /*
1249 * Check for Asynchronous schedule park capability feature. If this
1250 * feature is supported, then, program ehci command register with
1251 * appropriate values..
1252 */
1253 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_ASYNC_SCHED_PARK_CAP) {
1254
1255 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1256 "ehci_init_ctlr: Async park mode is supported");
1257
1258 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1259 (EHCI_CMD_ASYNC_PARK_ENABLE |
1260 EHCI_CMD_ASYNC_PARK_COUNT_3)));
1261 }
1262
1263 /*
1264 * Check for programmable periodic frame list feature. If this
1265 * feature is supported, then, program ehci command register with
1266 * 1024 frame list value.
1267 */
1268 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_PROG_FRAME_LIST_FLAG) {
1269
1270 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1271 "ehci_init_ctlr: Variable programmable periodic "
1272 "frame list is supported");
1273
1274 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1275 EHCI_CMD_FRAME_1024_SIZE));
1276 }
1277
1278 /*
1279 * Currently EHCI driver doesn't support 64 bit addressing.
1280 *
1281 * If we are using 64 bit addressing capability, then, program
1282 * ehci_ctrl_segment register with 4 Gigabyte segment where all
1283 * of the interface data structures are allocated.
1284 */
1285 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_64BIT_ADDR_CAP) {
1286
1287 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1288 "ehci_init_ctlr: EHCI driver doesn't support "
1289 "64 bit addressing");
1290 }
1291
1292 /* 64 bit addressing is not support */
1293 Set_OpReg(ehci_ctrl_segment, 0x00000000);
1294
1295 /* Turn on/off the schedulers */
1296 ehci_toggle_scheduler(ehcip);
1297
1298 /* Set host controller soft state to operational */
1299 ehcip->ehci_hc_soft_state = EHCI_CTLR_OPERATIONAL_STATE;
1300
1301 /*
1302 * Set the Periodic Frame List Base Address register with the
1303 * starting physical address of the Periodic Frame List.
1304 */
1305 Set_OpReg(ehci_periodic_list_base,
1306 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
1307 EHCI_PERIODIC_LIST_BASE));
1308
1309 /*
1310 * Set ehci_interrupt to enable all interrupts except Root
1311 * Hub Status change interrupt.
1312 */
1313 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
1314 EHCI_INTR_FRAME_LIST_ROLLOVER | EHCI_INTR_USB_ERROR |
1315 EHCI_INTR_USB);
1316
1317 /*
1318 * Set the desired interrupt threshold and turn on EHCI host controller.
1319 */
1320 Set_OpReg(ehci_command,
1321 ((Get_OpReg(ehci_command) & ~EHCI_CMD_INTR_THRESHOLD) |
1322 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
1323
1324 ASSERT(Get_OpReg(ehci_command) & EHCI_CMD_HOST_CTRL_RUN);
1325
1326 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1327
1328 if (ehci_init_workaround(ehcip) != DDI_SUCCESS) {
1329
1330 /* Set host controller soft state to error */
1331 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1332
1333 return (DDI_FAILURE);
1334 }
1335
1336 if (ehci_init_check_status(ehcip) != DDI_SUCCESS) {
1337
1338 /* Set host controller soft state to error */
1339 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1340
1341 return (DDI_FAILURE);
1342 }
1343
1344 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1345 "ehci_init_ctlr: SOF's have started");
1346 }
1347
1348 /* Route all Root hub ports to EHCI host controller */
1349 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1350
1351 return (DDI_SUCCESS);
1352 }
1353
1354 /*
1355 * ehci_take_control:
1356 *
1357 * Handshake to take EHCI control from BIOS if necessary. Its only valid for
1358 * x86 machines, because sparc doesn't have a BIOS.
1359 * On x86 machine, the take control process includes
1360 * o get the base address of the extended capability list
1361 * o find out the capability for handoff synchronization in the list.
1362 * o check if BIOS has owned the host controller.
1363 * o set the OS Owned semaphore bit, ask the BIOS to release the ownership.
1364 * o wait for a constant time and check if BIOS has relinquished control.
1365 */
1366 /* ARGSUSED */
1367 static int
ehci_take_control(ehci_state_t * ehcip)1368 ehci_take_control(ehci_state_t *ehcip)
1369 {
1370 #if defined(__x86)
1371 uint32_t extended_cap;
1372 uint32_t extended_cap_offset;
1373 uint32_t extended_cap_id;
1374 uint_t retry;
1375
1376 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1377 "ehci_take_control:");
1378
1379 /*
1380 * According EHCI Spec 2.2.4, get EECP base address from HCCPARAMS
1381 * register.
1382 */
1383 extended_cap_offset = (Get_Cap(ehci_hcc_params) & EHCI_HCC_EECP) >>
1384 EHCI_HCC_EECP_SHIFT;
1385
1386 /*
1387 * According EHCI Spec 2.2.4, if the extended capability offset is
1388 * less than 40h then its not valid. This means we don't need to
1389 * worry about BIOS handoff.
1390 */
1391 if (extended_cap_offset < EHCI_HCC_EECP_MIN_OFFSET) {
1392
1393 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1394 "ehci_take_control: Hardware doesn't support legacy.");
1395
1396 goto success;
1397 }
1398
1399 /*
1400 * According EHCI Spec 2.1.7, A zero offset indicates the
1401 * end of the extended capability list.
1402 */
1403 while (extended_cap_offset) {
1404
1405 /* Get the extended capability value. */
1406 extended_cap = pci_config_get32(ehcip->ehci_config_handle,
1407 extended_cap_offset);
1408
1409 /*
1410 * It's possible that we'll receive an invalid PCI read here due
1411 * to something going wrong due to platform firmware. This has
1412 * been observed in the wild depending on the version of ACPI in
1413 * use. If this happens, we'll assume that the capability does
1414 * not exist and that we do not need to take control from the
1415 * BIOS.
1416 */
1417 if (extended_cap == PCI_EINVAL32) {
1418 extended_cap_id = EHCI_EX_CAP_ID_RESERVED;
1419 break;
1420 }
1421
1422 /* Get the capability ID */
1423 extended_cap_id = (extended_cap & EHCI_EX_CAP_ID) >>
1424 EHCI_EX_CAP_ID_SHIFT;
1425
1426 /* Check if the card support legacy */
1427 if (extended_cap_id == EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1428 break;
1429 }
1430
1431 /* Get the offset of the next capability */
1432 extended_cap_offset = (extended_cap & EHCI_EX_CAP_NEXT_PTR) >>
1433 EHCI_EX_CAP_NEXT_PTR_SHIFT;
1434
1435 }
1436
1437 /*
1438 * Unable to find legacy support in hardware's extended capability list.
1439 * This means we don't need to worry about BIOS handoff.
1440 */
1441 if (extended_cap_id != EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1442
1443 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1444 "ehci_take_control: Hardware doesn't support legacy");
1445
1446 goto success;
1447 }
1448
1449 /* Check if BIOS has owned it. */
1450 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1451
1452 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1453 "ehci_take_control: BIOS does not own EHCI");
1454
1455 goto success;
1456 }
1457
1458 /*
1459 * According EHCI Spec 5.1, The OS driver initiates an ownership
1460 * request by setting the OS Owned semaphore to a one. The OS
1461 * waits for the BIOS Owned bit to go to a zero before attempting
1462 * to use the EHCI controller. The time that OS must wait for BIOS
1463 * to respond to the request for ownership is beyond the scope of
1464 * this specification.
1465 * It waits up to EHCI_TAKEOVER_WAIT_COUNT*EHCI_TAKEOVER_DELAY ms
1466 * for BIOS to release the ownership.
1467 */
1468 extended_cap |= EHCI_LEGSUP_OS_OWNED_SEM;
1469 pci_config_put32(ehcip->ehci_config_handle, extended_cap_offset,
1470 extended_cap);
1471
1472 for (retry = 0; retry < EHCI_TAKEOVER_WAIT_COUNT; retry++) {
1473
1474 /* wait a special interval */
1475 #ifndef __lock_lint
1476 delay(drv_usectohz(EHCI_TAKEOVER_DELAY));
1477 #endif
1478 /* Check to see if the BIOS has released the ownership */
1479 extended_cap = pci_config_get32(
1480 ehcip->ehci_config_handle, extended_cap_offset);
1481
1482 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1483
1484 USB_DPRINTF_L3(PRINT_MASK_ATTA,
1485 ehcip->ehci_log_hdl,
1486 "ehci_take_control: BIOS has released "
1487 "the ownership. retry = %d", retry);
1488
1489 goto success;
1490 }
1491
1492 }
1493
1494 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1495 "ehci_take_control: take control from BIOS failed.");
1496
1497 return (USB_FAILURE);
1498
1499 success:
1500
1501 #endif /* __x86 */
1502 return (USB_SUCCESS);
1503 }
1504
1505
1506 /*
1507 * ehci_init_periodic_frame_list_table :
1508 *
1509 * Allocate the system memory and initialize Host Controller
1510 * Periodic Frame List table area. The starting of the Periodic
1511 * Frame List Table area must be 4096 byte aligned.
1512 */
1513 static int
ehci_init_periodic_frame_lst_table(ehci_state_t * ehcip)1514 ehci_init_periodic_frame_lst_table(ehci_state_t *ehcip)
1515 {
1516 ddi_device_acc_attr_t dev_attr;
1517 size_t real_length;
1518 uint_t ccount;
1519 int result;
1520
1521 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1522
1523 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1524 "ehci_init_periodic_frame_lst_table:");
1525
1526 /* The host controller will be little endian */
1527 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1528 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1529 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1530
1531 /* Force the required 4K restrictive alignment */
1532 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_PFL_ALIGNMENT;
1533
1534 /* Create space for the Periodic Frame List */
1535 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
1536 DDI_DMA_SLEEP, 0, &ehcip->ehci_pflt_dma_handle) != DDI_SUCCESS) {
1537
1538 goto failure;
1539 }
1540
1541 if (ddi_dma_mem_alloc(ehcip->ehci_pflt_dma_handle,
1542 sizeof (ehci_periodic_frame_list_t),
1543 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
1544 0, (caddr_t *)&ehcip->ehci_periodic_frame_list_tablep,
1545 &real_length, &ehcip->ehci_pflt_mem_handle)) {
1546
1547 goto failure;
1548 }
1549
1550 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1551 "ehci_init_periodic_frame_lst_table: "
1552 "Real length %lu", real_length);
1553
1554 /* Map the whole Periodic Frame List into the I/O address space */
1555 result = ddi_dma_addr_bind_handle(ehcip->ehci_pflt_dma_handle,
1556 NULL, (caddr_t)ehcip->ehci_periodic_frame_list_tablep,
1557 real_length, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1558 DDI_DMA_SLEEP, NULL, &ehcip->ehci_pflt_cookie, &ccount);
1559
1560 if (result == DDI_DMA_MAPPED) {
1561 /* The cookie count should be 1 */
1562 if (ccount != 1) {
1563 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1564 "ehci_init_periodic_frame_lst_table: "
1565 "More than 1 cookie");
1566
1567 goto failure;
1568 }
1569 } else {
1570 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
1571
1572 goto failure;
1573 }
1574
1575 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1576 "ehci_init_periodic_frame_lst_table: virtual 0x%p physical 0x%x",
1577 (void *)ehcip->ehci_periodic_frame_list_tablep,
1578 ehcip->ehci_pflt_cookie.dmac_address);
1579
1580 /*
1581 * DMA addresses for Periodic Frame List are bound.
1582 */
1583 ehcip->ehci_dma_addr_bind_flag |= EHCI_PFLT_DMA_BOUND;
1584
1585 bzero((void *)ehcip->ehci_periodic_frame_list_tablep, real_length);
1586
1587 /* Initialize the Periodic Frame List */
1588 ehci_build_interrupt_lattice(ehcip);
1589
1590 /* Reset Byte Alignment to Default */
1591 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1592
1593 return (DDI_SUCCESS);
1594 failure:
1595 /* Byte alignment */
1596 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1597
1598 return (DDI_FAILURE);
1599 }
1600
1601
1602 /*
1603 * ehci_build_interrupt_lattice:
1604 *
1605 * Construct the interrupt lattice tree using static Endpoint Descriptors
1606 * (QH). This interrupt lattice tree will have total of 32 interrupt QH
1607 * lists and the Host Controller (HC) processes one interrupt QH list in
1608 * every frame. The Host Controller traverses the periodic schedule by
1609 * constructing an array offset reference from the Periodic List Base Address
1610 * register and bits 12 to 3 of Frame Index register. It fetches the element
1611 * and begins traversing the graph of linked schedule data structures.
1612 */
1613 static void
ehci_build_interrupt_lattice(ehci_state_t * ehcip)1614 ehci_build_interrupt_lattice(ehci_state_t *ehcip)
1615 {
1616 ehci_qh_t *list_array = ehcip->ehci_qh_pool_addr;
1617 ushort_t ehci_index[EHCI_NUM_PERIODIC_FRAME_LISTS];
1618 ehci_periodic_frame_list_t *periodic_frame_list =
1619 ehcip->ehci_periodic_frame_list_tablep;
1620 ushort_t *temp, num_of_nodes;
1621 uintptr_t addr;
1622 int i, j, k;
1623
1624 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1625 "ehci_build_interrupt_lattice:");
1626
1627 /*
1628 * Reserve the first 63 Endpoint Descriptor (QH) structures
1629 * in the pool as static endpoints & these are required for
1630 * constructing interrupt lattice tree.
1631 */
1632 for (i = 0; i < EHCI_NUM_STATIC_NODES; i++) {
1633 Set_QH(list_array[i].qh_state, EHCI_QH_STATIC);
1634 Set_QH(list_array[i].qh_status, EHCI_QH_STS_HALTED);
1635 Set_QH(list_array[i].qh_next_qtd, EHCI_QH_NEXT_QTD_PTR_VALID);
1636 Set_QH(list_array[i].qh_alt_next_qtd,
1637 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1638 }
1639
1640 /*
1641 * Make sure that last Endpoint on the periodic frame list terminates
1642 * periodic schedule.
1643 */
1644 Set_QH(list_array[0].qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
1645
1646 /* Build the interrupt lattice tree */
1647 for (i = 0; i < (EHCI_NUM_STATIC_NODES / 2); i++) {
1648 /*
1649 * The next pointer in the host controller endpoint
1650 * descriptor must contain an iommu address. Calculate
1651 * the offset into the cpu address and add this to the
1652 * starting iommu address.
1653 */
1654 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)&list_array[i]);
1655
1656 Set_QH(list_array[2*i + 1].qh_link_ptr,
1657 addr | EHCI_QH_LINK_REF_QH);
1658 Set_QH(list_array[2*i + 2].qh_link_ptr,
1659 addr | EHCI_QH_LINK_REF_QH);
1660 }
1661
1662 /* Build the tree bottom */
1663 temp = (unsigned short *)
1664 kmem_zalloc(EHCI_NUM_PERIODIC_FRAME_LISTS * 2, KM_SLEEP);
1665
1666 num_of_nodes = 1;
1667
1668 /*
1669 * Initialize the values which are used for setting up head pointers
1670 * for the 32ms scheduling lists which starts from the Periodic Frame
1671 * List.
1672 */
1673 for (i = 0; i < ehci_log_2(EHCI_NUM_PERIODIC_FRAME_LISTS); i++) {
1674 for (j = 0, k = 0; k < num_of_nodes; k++, j++) {
1675 ehci_index[j++] = temp[k];
1676 ehci_index[j] = temp[k] + ehci_pow_2(i);
1677 }
1678
1679 num_of_nodes *= 2;
1680 for (k = 0; k < num_of_nodes; k++)
1681 temp[k] = ehci_index[k];
1682 }
1683
1684 kmem_free((void *)temp, (EHCI_NUM_PERIODIC_FRAME_LISTS * 2));
1685
1686 /*
1687 * Initialize the interrupt list in the Periodic Frame List Table
1688 * so that it points to the bottom of the tree.
1689 */
1690 for (i = 0, j = 0; i < ehci_pow_2(TREE_HEIGHT); i++) {
1691 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)
1692 (&list_array[((EHCI_NUM_STATIC_NODES + 1) / 2) + i - 1]));
1693
1694 ASSERT(addr);
1695
1696 for (k = 0; k < ehci_pow_2(TREE_HEIGHT); k++) {
1697 Set_PFLT(periodic_frame_list->
1698 ehci_periodic_frame_list_table[ehci_index[j++]],
1699 (uint32_t)(addr | EHCI_QH_LINK_REF_QH));
1700 }
1701 }
1702 }
1703
1704
1705 /*
1706 * ehci_alloc_hcdi_ops:
1707 *
1708 * The HCDI interfaces or entry points are the software interfaces used by
1709 * the Universal Serial Bus Driver (USBA) to access the services of the
1710 * Host Controller Driver (HCD). During HCD initialization, inform USBA
1711 * about all available HCDI interfaces or entry points.
1712 */
1713 usba_hcdi_ops_t *
ehci_alloc_hcdi_ops(ehci_state_t * ehcip)1714 ehci_alloc_hcdi_ops(ehci_state_t *ehcip)
1715 {
1716 usba_hcdi_ops_t *usba_hcdi_ops;
1717
1718 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1719 "ehci_alloc_hcdi_ops:");
1720
1721 usba_hcdi_ops = usba_alloc_hcdi_ops();
1722
1723 usba_hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION;
1724
1725 usba_hcdi_ops->usba_hcdi_pm_support = ehci_hcdi_pm_support;
1726 usba_hcdi_ops->usba_hcdi_pipe_open = ehci_hcdi_pipe_open;
1727 usba_hcdi_ops->usba_hcdi_pipe_close = ehci_hcdi_pipe_close;
1728
1729 usba_hcdi_ops->usba_hcdi_pipe_reset = ehci_hcdi_pipe_reset;
1730 usba_hcdi_ops->usba_hcdi_pipe_reset_data_toggle =
1731 ehci_hcdi_pipe_reset_data_toggle;
1732
1733 usba_hcdi_ops->usba_hcdi_pipe_ctrl_xfer = ehci_hcdi_pipe_ctrl_xfer;
1734 usba_hcdi_ops->usba_hcdi_pipe_bulk_xfer = ehci_hcdi_pipe_bulk_xfer;
1735 usba_hcdi_ops->usba_hcdi_pipe_intr_xfer = ehci_hcdi_pipe_intr_xfer;
1736 usba_hcdi_ops->usba_hcdi_pipe_isoc_xfer = ehci_hcdi_pipe_isoc_xfer;
1737
1738 usba_hcdi_ops->usba_hcdi_bulk_transfer_size =
1739 ehci_hcdi_bulk_transfer_size;
1740
1741 usba_hcdi_ops->usba_hcdi_pipe_stop_intr_polling =
1742 ehci_hcdi_pipe_stop_intr_polling;
1743 usba_hcdi_ops->usba_hcdi_pipe_stop_isoc_polling =
1744 ehci_hcdi_pipe_stop_isoc_polling;
1745
1746 usba_hcdi_ops->usba_hcdi_get_current_frame_number =
1747 ehci_hcdi_get_current_frame_number;
1748 usba_hcdi_ops->usba_hcdi_get_max_isoc_pkts =
1749 ehci_hcdi_get_max_isoc_pkts;
1750
1751 usba_hcdi_ops->usba_hcdi_console_input_init =
1752 ehci_hcdi_polled_input_init;
1753 usba_hcdi_ops->usba_hcdi_console_input_enter =
1754 ehci_hcdi_polled_input_enter;
1755 usba_hcdi_ops->usba_hcdi_console_read =
1756 ehci_hcdi_polled_read;
1757 usba_hcdi_ops->usba_hcdi_console_input_exit =
1758 ehci_hcdi_polled_input_exit;
1759 usba_hcdi_ops->usba_hcdi_console_input_fini =
1760 ehci_hcdi_polled_input_fini;
1761
1762 usba_hcdi_ops->usba_hcdi_console_output_init =
1763 ehci_hcdi_polled_output_init;
1764 usba_hcdi_ops->usba_hcdi_console_output_enter =
1765 ehci_hcdi_polled_output_enter;
1766 usba_hcdi_ops->usba_hcdi_console_write =
1767 ehci_hcdi_polled_write;
1768 usba_hcdi_ops->usba_hcdi_console_output_exit =
1769 ehci_hcdi_polled_output_exit;
1770 usba_hcdi_ops->usba_hcdi_console_output_fini =
1771 ehci_hcdi_polled_output_fini;
1772 return (usba_hcdi_ops);
1773 }
1774
1775
1776 /*
1777 * Host Controller Driver (HCD) deinitialization functions
1778 */
1779
1780 /*
1781 * ehci_cleanup:
1782 *
1783 * Cleanup on attach failure or detach
1784 */
1785 int
ehci_cleanup(ehci_state_t * ehcip)1786 ehci_cleanup(ehci_state_t *ehcip)
1787 {
1788 ehci_trans_wrapper_t *tw;
1789 ehci_pipe_private_t *pp;
1790 ehci_qtd_t *qtd;
1791 int i, ctrl, rval;
1792 int flags = ehcip->ehci_flags;
1793
1794 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_cleanup:");
1795
1796 if (flags & EHCI_RHREG) {
1797 /* Unload the root hub driver */
1798 if (ehci_unload_root_hub_driver(ehcip) != USB_SUCCESS) {
1799
1800 return (DDI_FAILURE);
1801 }
1802 }
1803
1804 if (flags & EHCI_USBAREG) {
1805 /* Unregister this HCD instance with USBA */
1806 usba_hcdi_unregister(ehcip->ehci_dip);
1807 }
1808
1809 if (flags & EHCI_INTR) {
1810
1811 mutex_enter(&ehcip->ehci_int_mutex);
1812
1813 /* Disable all EHCI QH list processing */
1814 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
1815 ~(EHCI_CMD_ASYNC_SCHED_ENABLE |
1816 EHCI_CMD_PERIODIC_SCHED_ENABLE)));
1817
1818 /* Disable all EHCI interrupts */
1819 Set_OpReg(ehci_interrupt, 0);
1820
1821 /* wait for the next SOF */
1822 (void) ehci_wait_for_sof(ehcip);
1823
1824 /* Route all Root hub ports to Classic host controller */
1825 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1826
1827 /* Stop the EHCI host controller */
1828 Set_OpReg(ehci_command,
1829 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
1830
1831 mutex_exit(&ehcip->ehci_int_mutex);
1832
1833 /* Wait for sometime */
1834 delay(drv_usectohz(EHCI_TIMEWAIT));
1835
1836 ehci_rem_intrs(ehcip);
1837 }
1838
1839 /* Unmap the EHCI registers */
1840 if (ehcip->ehci_caps_handle) {
1841 ddi_regs_map_free(&ehcip->ehci_caps_handle);
1842 }
1843
1844 if (ehcip->ehci_config_handle) {
1845 pci_config_teardown(&ehcip->ehci_config_handle);
1846 }
1847
1848 /* Free all the buffers */
1849 if (ehcip->ehci_qtd_pool_addr && ehcip->ehci_qtd_pool_mem_handle) {
1850 for (i = 0; i < ehci_qtd_pool_size; i ++) {
1851 qtd = &ehcip->ehci_qtd_pool_addr[i];
1852 ctrl = Get_QTD(ehcip->
1853 ehci_qtd_pool_addr[i].qtd_state);
1854
1855 if ((ctrl != EHCI_QTD_FREE) &&
1856 (ctrl != EHCI_QTD_DUMMY) &&
1857 (qtd->qtd_trans_wrapper)) {
1858
1859 mutex_enter(&ehcip->ehci_int_mutex);
1860
1861 tw = (ehci_trans_wrapper_t *)
1862 EHCI_LOOKUP_ID((uint32_t)
1863 Get_QTD(qtd->qtd_trans_wrapper));
1864
1865 /* Obtain the pipe private structure */
1866 pp = tw->tw_pipe_private;
1867
1868 /* Stop the the transfer timer */
1869 ehci_stop_xfer_timer(ehcip, tw,
1870 EHCI_REMOVE_XFER_ALWAYS);
1871
1872 ehci_deallocate_tw(ehcip, pp, tw);
1873
1874 mutex_exit(&ehcip->ehci_int_mutex);
1875 }
1876 }
1877
1878 /*
1879 * If EHCI_QTD_POOL_BOUND flag is set, then unbind
1880 * the handle for QTD pools.
1881 */
1882 if ((ehcip->ehci_dma_addr_bind_flag &
1883 EHCI_QTD_POOL_BOUND) == EHCI_QTD_POOL_BOUND) {
1884
1885 rval = ddi_dma_unbind_handle(
1886 ehcip->ehci_qtd_pool_dma_handle);
1887
1888 ASSERT(rval == DDI_SUCCESS);
1889 }
1890 ddi_dma_mem_free(&ehcip->ehci_qtd_pool_mem_handle);
1891 }
1892
1893 /* Free the QTD pool */
1894 if (ehcip->ehci_qtd_pool_dma_handle) {
1895 ddi_dma_free_handle(&ehcip->ehci_qtd_pool_dma_handle);
1896 }
1897
1898 if (ehcip->ehci_qh_pool_addr && ehcip->ehci_qh_pool_mem_handle) {
1899 /*
1900 * If EHCI_QH_POOL_BOUND flag is set, then unbind
1901 * the handle for QH pools.
1902 */
1903 if ((ehcip->ehci_dma_addr_bind_flag &
1904 EHCI_QH_POOL_BOUND) == EHCI_QH_POOL_BOUND) {
1905
1906 rval = ddi_dma_unbind_handle(
1907 ehcip->ehci_qh_pool_dma_handle);
1908
1909 ASSERT(rval == DDI_SUCCESS);
1910 }
1911
1912 ddi_dma_mem_free(&ehcip->ehci_qh_pool_mem_handle);
1913 }
1914
1915 /* Free the QH pool */
1916 if (ehcip->ehci_qh_pool_dma_handle) {
1917 ddi_dma_free_handle(&ehcip->ehci_qh_pool_dma_handle);
1918 }
1919
1920 /* Free the Periodic frame list table (PFLT) area */
1921 if (ehcip->ehci_periodic_frame_list_tablep &&
1922 ehcip->ehci_pflt_mem_handle) {
1923 /*
1924 * If EHCI_PFLT_DMA_BOUND flag is set, then unbind
1925 * the handle for PFLT.
1926 */
1927 if ((ehcip->ehci_dma_addr_bind_flag &
1928 EHCI_PFLT_DMA_BOUND) == EHCI_PFLT_DMA_BOUND) {
1929
1930 rval = ddi_dma_unbind_handle(
1931 ehcip->ehci_pflt_dma_handle);
1932
1933 ASSERT(rval == DDI_SUCCESS);
1934 }
1935
1936 ddi_dma_mem_free(&ehcip->ehci_pflt_mem_handle);
1937 }
1938
1939 (void) ehci_isoc_cleanup(ehcip);
1940
1941 if (ehcip->ehci_pflt_dma_handle) {
1942 ddi_dma_free_handle(&ehcip->ehci_pflt_dma_handle);
1943 }
1944
1945 if (flags & EHCI_INTR) {
1946 /* Destroy the mutex */
1947 mutex_destroy(&ehcip->ehci_int_mutex);
1948
1949 /* Destroy the async schedule advance condition variable */
1950 cv_destroy(&ehcip->ehci_async_schedule_advance_cv);
1951 }
1952
1953 /* clean up kstat structs */
1954 ehci_destroy_stats(ehcip);
1955
1956 /* Free ehci hcdi ops */
1957 if (ehcip->ehci_hcdi_ops) {
1958 usba_free_hcdi_ops(ehcip->ehci_hcdi_ops);
1959 }
1960
1961 if (flags & EHCI_ZALLOC) {
1962
1963 usb_free_log_hdl(ehcip->ehci_log_hdl);
1964
1965 /* Remove all properties that might have been created */
1966 ddi_prop_remove_all(ehcip->ehci_dip);
1967
1968 /* Free the soft state */
1969 ddi_soft_state_free(ehci_statep,
1970 ddi_get_instance(ehcip->ehci_dip));
1971 }
1972
1973 return (DDI_SUCCESS);
1974 }
1975
1976
1977 /*
1978 * ehci_rem_intrs:
1979 *
1980 * Unregister FIXED or MSI interrupts
1981 */
1982 static void
ehci_rem_intrs(ehci_state_t * ehcip)1983 ehci_rem_intrs(ehci_state_t *ehcip)
1984 {
1985 int i;
1986
1987 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1988 "ehci_rem_intrs: interrupt type 0x%x", ehcip->ehci_intr_type);
1989
1990 /* Disable all interrupts */
1991 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
1992 (void) ddi_intr_block_disable(ehcip->ehci_htable,
1993 ehcip->ehci_intr_cnt);
1994 } else {
1995 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
1996 (void) ddi_intr_disable(ehcip->ehci_htable[i]);
1997 }
1998 }
1999
2000 /* Call ddi_intr_remove_handler() */
2001 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
2002 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
2003 (void) ddi_intr_free(ehcip->ehci_htable[i]);
2004 }
2005
2006 kmem_free(ehcip->ehci_htable,
2007 ehcip->ehci_intr_cnt * sizeof (ddi_intr_handle_t));
2008 }
2009
2010
2011 /*
2012 * ehci_cpr_suspend
2013 */
2014 int
ehci_cpr_suspend(ehci_state_t * ehcip)2015 ehci_cpr_suspend(ehci_state_t *ehcip)
2016 {
2017 int i;
2018
2019 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2020 "ehci_cpr_suspend:");
2021
2022 /* Call into the root hub and suspend it */
2023 if (usba_hubdi_detach(ehcip->ehci_dip, DDI_SUSPEND) != DDI_SUCCESS) {
2024
2025 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2026 "ehci_cpr_suspend: root hub fails to suspend");
2027
2028 return (DDI_FAILURE);
2029 }
2030
2031 /* Only root hub's intr pipe should be open at this time */
2032 mutex_enter(&ehcip->ehci_int_mutex);
2033
2034 ASSERT(ehcip->ehci_open_pipe_count == 0);
2035
2036 /* Just wait till all resources are reclaimed */
2037 i = 0;
2038 while ((ehcip->ehci_reclaim_list != NULL) && (i++ < 3)) {
2039 ehci_handle_endpoint_reclaimation(ehcip);
2040 (void) ehci_wait_for_sof(ehcip);
2041 }
2042 ASSERT(ehcip->ehci_reclaim_list == NULL);
2043
2044 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2045 "ehci_cpr_suspend: Disable HC QH list processing");
2046
2047 /* Disable all EHCI QH list processing */
2048 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
2049 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE)));
2050
2051 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2052 "ehci_cpr_suspend: Disable HC interrupts");
2053
2054 /* Disable all EHCI interrupts */
2055 Set_OpReg(ehci_interrupt, 0);
2056
2057 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2058 "ehci_cpr_suspend: Wait for the next SOF");
2059
2060 /* Wait for the next SOF */
2061 if (ehci_wait_for_sof(ehcip) != USB_SUCCESS) {
2062
2063 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2064 "ehci_cpr_suspend: ehci host controller suspend failed");
2065
2066 mutex_exit(&ehcip->ehci_int_mutex);
2067 return (DDI_FAILURE);
2068 }
2069
2070 /*
2071 * Stop the ehci host controller
2072 * if usb keyboard is not connected.
2073 */
2074 if (ehcip->ehci_polled_kbd_count == 0 || force_ehci_off != 0) {
2075 Set_OpReg(ehci_command,
2076 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
2077
2078 }
2079
2080 /* Set host controller soft state to suspend */
2081 ehcip->ehci_hc_soft_state = EHCI_CTLR_SUSPEND_STATE;
2082
2083 mutex_exit(&ehcip->ehci_int_mutex);
2084
2085 return (DDI_SUCCESS);
2086 }
2087
2088
2089 /*
2090 * ehci_cpr_resume
2091 */
2092 int
ehci_cpr_resume(ehci_state_t * ehcip)2093 ehci_cpr_resume(ehci_state_t *ehcip)
2094 {
2095 mutex_enter(&ehcip->ehci_int_mutex);
2096
2097 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2098 "ehci_cpr_resume: Restart the controller");
2099
2100 /* Cleanup ehci specific information across cpr */
2101 ehci_cpr_cleanup(ehcip);
2102
2103 /* Restart the controller */
2104 if (ehci_init_ctlr(ehcip, EHCI_NORMAL_INITIALIZATION) != DDI_SUCCESS) {
2105
2106 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2107 "ehci_cpr_resume: ehci host controller resume failed ");
2108
2109 mutex_exit(&ehcip->ehci_int_mutex);
2110
2111 return (DDI_FAILURE);
2112 }
2113
2114 mutex_exit(&ehcip->ehci_int_mutex);
2115
2116 /* Now resume the root hub */
2117 if (usba_hubdi_attach(ehcip->ehci_dip, DDI_RESUME) != DDI_SUCCESS) {
2118
2119 return (DDI_FAILURE);
2120 }
2121
2122 return (DDI_SUCCESS);
2123 }
2124
2125
2126 /*
2127 * Bandwidth Allocation functions
2128 */
2129
2130 /*
2131 * ehci_allocate_bandwidth:
2132 *
2133 * Figure out whether or not this interval may be supported. Return the index
2134 * into the lattice if it can be supported. Return allocation failure if it
2135 * can not be supported.
2136 */
2137 int
ehci_allocate_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t * pnode,uchar_t * smask,uchar_t * cmask)2138 ehci_allocate_bandwidth(
2139 ehci_state_t *ehcip,
2140 usba_pipe_handle_data_t *ph,
2141 uint_t *pnode,
2142 uchar_t *smask,
2143 uchar_t *cmask)
2144 {
2145 int error = USB_SUCCESS;
2146
2147 /* This routine is protected by the ehci_int_mutex */
2148 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2149
2150 /* Reset the pnode to the last checked pnode */
2151 *pnode = 0;
2152
2153 /* Allocate high speed bandwidth */
2154 if ((error = ehci_allocate_high_speed_bandwidth(ehcip,
2155 ph, pnode, smask, cmask)) != USB_SUCCESS) {
2156
2157 return (error);
2158 }
2159
2160 /*
2161 * For low/full speed usb devices, allocate classic TT bandwidth
2162 * in additional to high speed bandwidth.
2163 */
2164 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2165
2166 /* Allocate classic TT bandwidth */
2167 if ((error = ehci_allocate_classic_tt_bandwidth(
2168 ehcip, ph, *pnode)) != USB_SUCCESS) {
2169
2170 /* Deallocate high speed bandwidth */
2171 ehci_deallocate_high_speed_bandwidth(
2172 ehcip, ph, *pnode, *smask, *cmask);
2173 }
2174 }
2175
2176 return (error);
2177 }
2178
2179
2180 /*
2181 * ehci_allocate_high_speed_bandwidth:
2182 *
2183 * Allocate high speed bandwidth for the low/full/high speed interrupt and
2184 * isochronous endpoints.
2185 */
2186 static int
ehci_allocate_high_speed_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t * pnode,uchar_t * smask,uchar_t * cmask)2187 ehci_allocate_high_speed_bandwidth(
2188 ehci_state_t *ehcip,
2189 usba_pipe_handle_data_t *ph,
2190 uint_t *pnode,
2191 uchar_t *smask,
2192 uchar_t *cmask)
2193 {
2194 uint_t sbandwidth, cbandwidth;
2195 int interval;
2196 usb_ep_descr_t *endpoint = &ph->p_ep;
2197 usba_device_t *child_ud;
2198 usb_port_status_t port_status;
2199 int error;
2200
2201 /* This routine is protected by the ehci_int_mutex */
2202 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2203
2204 /* Get child's usba device structure */
2205 child_ud = ph->p_usba_device;
2206
2207 mutex_enter(&child_ud->usb_mutex);
2208
2209 /* Get the current usb device's port status */
2210 port_status = ph->p_usba_device->usb_port_status;
2211
2212 mutex_exit(&child_ud->usb_mutex);
2213
2214 /*
2215 * Calculate the length in bytes of a transaction on this
2216 * periodic endpoint. Return failure if maximum packet is
2217 * zero.
2218 */
2219 error = ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2220 port_status, &sbandwidth, &cbandwidth);
2221 if (error != USB_SUCCESS) {
2222
2223 return (error);
2224 }
2225
2226 /*
2227 * Adjust polling interval to be a power of 2.
2228 * If this interval can't be supported, return
2229 * allocation failure.
2230 */
2231 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2232 if (interval == USB_FAILURE) {
2233
2234 return (USB_FAILURE);
2235 }
2236
2237 if (port_status == USBA_HIGH_SPEED_DEV) {
2238 /* Allocate bandwidth for high speed devices */
2239 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2240 USB_EP_ATTR_ISOCH) {
2241 error = USB_SUCCESS;
2242 } else {
2243
2244 error = ehci_find_bestfit_hs_mask(ehcip, smask, pnode,
2245 endpoint, sbandwidth, interval);
2246 }
2247
2248 *cmask = 0x00;
2249
2250 } else {
2251 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2252 USB_EP_ATTR_INTR) {
2253
2254 /* Allocate bandwidth for low speed interrupt */
2255 error = ehci_find_bestfit_ls_intr_mask(ehcip,
2256 smask, cmask, pnode, sbandwidth, cbandwidth,
2257 interval);
2258 } else {
2259 if ((endpoint->bEndpointAddress &
2260 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2261
2262 /* Allocate bandwidth for sitd in */
2263 error = ehci_find_bestfit_sitd_in_mask(ehcip,
2264 smask, cmask, pnode, sbandwidth, cbandwidth,
2265 interval);
2266 } else {
2267
2268 /* Allocate bandwidth for sitd out */
2269 error = ehci_find_bestfit_sitd_out_mask(ehcip,
2270 smask, pnode, sbandwidth, interval);
2271 *cmask = 0x00;
2272 }
2273 }
2274 }
2275
2276 if (error != USB_SUCCESS) {
2277 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2278 "ehci_allocate_high_speed_bandwidth: Reached maximum "
2279 "bandwidth value and cannot allocate bandwidth for a "
2280 "given high-speed periodic endpoint");
2281
2282 return (USB_NO_BANDWIDTH);
2283 }
2284
2285 return (error);
2286 }
2287
2288
2289 /*
2290 * ehci_allocate_classic_tt_speed_bandwidth:
2291 *
2292 * Allocate classic TT bandwidth for the low/full speed interrupt and
2293 * isochronous endpoints.
2294 */
2295 static int
ehci_allocate_classic_tt_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t pnode)2296 ehci_allocate_classic_tt_bandwidth(
2297 ehci_state_t *ehcip,
2298 usba_pipe_handle_data_t *ph,
2299 uint_t pnode)
2300 {
2301 uint_t bandwidth, min;
2302 uint_t height, leftmost, list;
2303 usb_ep_descr_t *endpoint = &ph->p_ep;
2304 usba_device_t *child_ud, *parent_ud;
2305 usb_port_status_t port_status;
2306 int i, interval;
2307
2308 /* This routine is protected by the ehci_int_mutex */
2309 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2310
2311 /* Get child's usba device structure */
2312 child_ud = ph->p_usba_device;
2313
2314 mutex_enter(&child_ud->usb_mutex);
2315
2316 /* Get the current usb device's port status */
2317 port_status = child_ud->usb_port_status;
2318
2319 /* Get the parent high speed hub's usba device structure */
2320 parent_ud = child_ud->usb_hs_hub_usba_dev;
2321
2322 mutex_exit(&child_ud->usb_mutex);
2323
2324 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2325 "ehci_allocate_classic_tt_bandwidth: "
2326 "child_ud 0x%p parent_ud 0x%p",
2327 (void *)child_ud, (void *)parent_ud);
2328
2329 /*
2330 * Calculate the length in bytes of a transaction on this
2331 * periodic endpoint. Return failure if maximum packet is
2332 * zero.
2333 */
2334 if (ehci_compute_classic_bandwidth(endpoint,
2335 port_status, &bandwidth) != USB_SUCCESS) {
2336
2337 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2338 "ehci_allocate_classic_tt_bandwidth: Periodic endpoint "
2339 "with zero endpoint maximum packet size is not supported");
2340
2341 return (USB_NOT_SUPPORTED);
2342 }
2343
2344 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2345 "ehci_allocate_classic_tt_bandwidth: bandwidth %d", bandwidth);
2346
2347 mutex_enter(&parent_ud->usb_mutex);
2348
2349 /*
2350 * If the length in bytes plus the allocated bandwidth exceeds
2351 * the maximum, return bandwidth allocation failure.
2352 */
2353 if ((parent_ud->usb_hs_hub_min_bandwidth + bandwidth) >
2354 FS_PERIODIC_BANDWIDTH) {
2355
2356 mutex_exit(&parent_ud->usb_mutex);
2357
2358 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2359 "ehci_allocate_classic_tt_bandwidth: Reached maximum "
2360 "bandwidth value and cannot allocate bandwidth for a "
2361 "given low/full speed periodic endpoint");
2362
2363 return (USB_NO_BANDWIDTH);
2364 }
2365
2366 mutex_exit(&parent_ud->usb_mutex);
2367
2368 /* Adjust polling interval to be a power of 2 */
2369 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2370
2371 /* Find the height in the tree */
2372 height = ehci_lattice_height(interval);
2373
2374 /* Find the leftmost leaf in the subtree specified by the node. */
2375 leftmost = ehci_leftmost_leaf(pnode, height);
2376
2377 mutex_enter(&parent_ud->usb_mutex);
2378
2379 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2380 list = ehci_index[leftmost + i];
2381
2382 if ((parent_ud->usb_hs_hub_bandwidth[list] +
2383 bandwidth) > FS_PERIODIC_BANDWIDTH) {
2384
2385 mutex_exit(&parent_ud->usb_mutex);
2386
2387 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2388 "ehci_allocate_classic_tt_bandwidth: Reached "
2389 "maximum bandwidth value and cannot allocate "
2390 "bandwidth for low/full periodic endpoint");
2391
2392 return (USB_NO_BANDWIDTH);
2393 }
2394 }
2395
2396 /*
2397 * All the leaves for this node must be updated with the bandwidth.
2398 */
2399 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2400 list = ehci_index[leftmost + i];
2401 parent_ud->usb_hs_hub_bandwidth[list] += bandwidth;
2402 }
2403
2404 /* Find the leaf with the smallest allocated bandwidth */
2405 min = parent_ud->usb_hs_hub_bandwidth[0];
2406
2407 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2408 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2409 min = parent_ud->usb_hs_hub_bandwidth[i];
2410 }
2411 }
2412
2413 /* Save the minimum for later use */
2414 parent_ud->usb_hs_hub_min_bandwidth = min;
2415
2416 mutex_exit(&parent_ud->usb_mutex);
2417
2418 return (USB_SUCCESS);
2419 }
2420
2421
2422 /*
2423 * ehci_deallocate_bandwidth:
2424 *
2425 * Deallocate bandwidth for the given node in the lattice and the length
2426 * of transfer.
2427 */
2428 void
ehci_deallocate_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t pnode,uchar_t smask,uchar_t cmask)2429 ehci_deallocate_bandwidth(
2430 ehci_state_t *ehcip,
2431 usba_pipe_handle_data_t *ph,
2432 uint_t pnode,
2433 uchar_t smask,
2434 uchar_t cmask)
2435 {
2436 /* This routine is protected by the ehci_int_mutex */
2437 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2438
2439 ehci_deallocate_high_speed_bandwidth(ehcip, ph, pnode, smask, cmask);
2440
2441 /*
2442 * For low/full speed usb devices, deallocate classic TT bandwidth
2443 * in additional to high speed bandwidth.
2444 */
2445 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2446
2447 /* Deallocate classic TT bandwidth */
2448 ehci_deallocate_classic_tt_bandwidth(ehcip, ph, pnode);
2449 }
2450 }
2451
2452
2453 /*
2454 * ehci_deallocate_high_speed_bandwidth:
2455 *
2456 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2457 */
2458 static void
ehci_deallocate_high_speed_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t pnode,uchar_t smask,uchar_t cmask)2459 ehci_deallocate_high_speed_bandwidth(
2460 ehci_state_t *ehcip,
2461 usba_pipe_handle_data_t *ph,
2462 uint_t pnode,
2463 uchar_t smask,
2464 uchar_t cmask)
2465 {
2466 uint_t height, leftmost;
2467 uint_t list_count;
2468 uint_t sbandwidth, cbandwidth;
2469 int interval;
2470 usb_ep_descr_t *endpoint = &ph->p_ep;
2471 usba_device_t *child_ud;
2472 usb_port_status_t port_status;
2473
2474 /* This routine is protected by the ehci_int_mutex */
2475 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2476
2477 /* Get child's usba device structure */
2478 child_ud = ph->p_usba_device;
2479
2480 mutex_enter(&child_ud->usb_mutex);
2481
2482 /* Get the current usb device's port status */
2483 port_status = ph->p_usba_device->usb_port_status;
2484
2485 mutex_exit(&child_ud->usb_mutex);
2486
2487 (void) ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2488 port_status, &sbandwidth, &cbandwidth);
2489
2490 /* Adjust polling interval to be a power of 2 */
2491 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2492
2493 /* Find the height in the tree */
2494 height = ehci_lattice_height(interval);
2495
2496 /*
2497 * Find the leftmost leaf in the subtree specified by the node
2498 */
2499 leftmost = ehci_leftmost_leaf(pnode, height);
2500
2501 list_count = EHCI_NUM_INTR_QH_LISTS/interval;
2502
2503 /* Delete the bandwidth from the appropriate lists */
2504 if (port_status == USBA_HIGH_SPEED_DEV) {
2505
2506 ehci_update_bw_availability(ehcip, -sbandwidth,
2507 leftmost, list_count, smask);
2508 } else {
2509 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2510 USB_EP_ATTR_INTR) {
2511
2512 ehci_update_bw_availability(ehcip, -sbandwidth,
2513 leftmost, list_count, smask);
2514 ehci_update_bw_availability(ehcip, -cbandwidth,
2515 leftmost, list_count, cmask);
2516 } else {
2517 if ((endpoint->bEndpointAddress &
2518 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2519
2520 ehci_update_bw_availability(ehcip, -sbandwidth,
2521 leftmost, list_count, smask);
2522 ehci_update_bw_availability(ehcip,
2523 -MAX_UFRAME_SITD_XFER, leftmost,
2524 list_count, cmask);
2525 } else {
2526
2527 ehci_update_bw_availability(ehcip,
2528 -MAX_UFRAME_SITD_XFER, leftmost,
2529 list_count, smask);
2530 }
2531 }
2532 }
2533 }
2534
2535 /*
2536 * ehci_deallocate_classic_tt_bandwidth:
2537 *
2538 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2539 */
2540 static void
ehci_deallocate_classic_tt_bandwidth(ehci_state_t * ehcip,usba_pipe_handle_data_t * ph,uint_t pnode)2541 ehci_deallocate_classic_tt_bandwidth(
2542 ehci_state_t *ehcip,
2543 usba_pipe_handle_data_t *ph,
2544 uint_t pnode)
2545 {
2546 uint_t bandwidth, height, leftmost, list, min;
2547 int i, interval;
2548 usb_ep_descr_t *endpoint = &ph->p_ep;
2549 usba_device_t *child_ud, *parent_ud;
2550 usb_port_status_t port_status;
2551
2552 /* This routine is protected by the ehci_int_mutex */
2553 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2554
2555 /* Get child's usba device structure */
2556 child_ud = ph->p_usba_device;
2557
2558 mutex_enter(&child_ud->usb_mutex);
2559
2560 /* Get the current usb device's port status */
2561 port_status = child_ud->usb_port_status;
2562
2563 /* Get the parent high speed hub's usba device structure */
2564 parent_ud = child_ud->usb_hs_hub_usba_dev;
2565
2566 mutex_exit(&child_ud->usb_mutex);
2567
2568 /* Obtain the bandwidth */
2569 (void) ehci_compute_classic_bandwidth(endpoint,
2570 port_status, &bandwidth);
2571
2572 /* Adjust polling interval to be a power of 2 */
2573 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2574
2575 /* Find the height in the tree */
2576 height = ehci_lattice_height(interval);
2577
2578 /* Find the leftmost leaf in the subtree specified by the node */
2579 leftmost = ehci_leftmost_leaf(pnode, height);
2580
2581 mutex_enter(&parent_ud->usb_mutex);
2582
2583 /* Delete the bandwidth from the appropriate lists */
2584 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2585 list = ehci_index[leftmost + i];
2586 parent_ud->usb_hs_hub_bandwidth[list] -= bandwidth;
2587 }
2588
2589 /* Find the leaf with the smallest allocated bandwidth */
2590 min = parent_ud->usb_hs_hub_bandwidth[0];
2591
2592 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2593 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2594 min = parent_ud->usb_hs_hub_bandwidth[i];
2595 }
2596 }
2597
2598 /* Save the minimum for later use */
2599 parent_ud->usb_hs_hub_min_bandwidth = min;
2600
2601 mutex_exit(&parent_ud->usb_mutex);
2602 }
2603
2604
2605 /*
2606 * ehci_compute_high_speed_bandwidth:
2607 *
2608 * Given a periodic endpoint (interrupt or isochronous) determine the total
2609 * bandwidth for one transaction. The EHCI host controller traverses the
2610 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2611 * services an endpoint, only a single transaction attempt is made. The HC
2612 * moves to the next Endpoint Descriptor after the first transaction attempt
2613 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2614 * Transfer Descriptor is inserted into the lattice, we will only count the
2615 * number of bytes for one transaction.
2616 *
2617 * The following are the formulas used for calculating bandwidth in terms
2618 * bytes and it is for the single USB high speed transaction. The protocol
2619 * overheads will be different for each of type of USB transfer & all these
2620 * formulas & protocol overheads are derived from the 5.11.3 section of the
2621 * USB 2.0 Specification.
2622 *
2623 * High-Speed:
2624 * Protocol overhead + ((MaxPktSz * 7)/6) + Host_Delay
2625 *
2626 * Split Transaction: (Low/Full speed devices connected behind usb2.0 hub)
2627 *
2628 * Protocol overhead + Split transaction overhead +
2629 * ((MaxPktSz * 7)/6) + Host_Delay;
2630 */
2631 /* ARGSUSED */
2632 static int
ehci_compute_high_speed_bandwidth(ehci_state_t * ehcip,usb_ep_descr_t * endpoint,usb_port_status_t port_status,uint_t * sbandwidth,uint_t * cbandwidth)2633 ehci_compute_high_speed_bandwidth(
2634 ehci_state_t *ehcip,
2635 usb_ep_descr_t *endpoint,
2636 usb_port_status_t port_status,
2637 uint_t *sbandwidth,
2638 uint_t *cbandwidth)
2639 {
2640 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2641
2642 /* Return failure if endpoint maximum packet is zero */
2643 if (maxpacketsize == 0) {
2644 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2645 "ehci_allocate_high_speed_bandwidth: Periodic endpoint "
2646 "with zero endpoint maximum packet size is not supported");
2647
2648 return (USB_NOT_SUPPORTED);
2649 }
2650
2651 /* Add bit-stuffing overhead */
2652 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2653
2654 /* Add Host Controller specific delay to required bandwidth */
2655 *sbandwidth = EHCI_HOST_CONTROLLER_DELAY;
2656
2657 /* Add xfer specific protocol overheads */
2658 if ((endpoint->bmAttributes &
2659 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2660 /* High speed interrupt transaction */
2661 *sbandwidth += HS_NON_ISOC_PROTO_OVERHEAD;
2662 } else {
2663 /* Isochronous transaction */
2664 *sbandwidth += HS_ISOC_PROTO_OVERHEAD;
2665 }
2666
2667 /*
2668 * For low/full speed devices, add split transaction specific
2669 * overheads.
2670 */
2671 if (port_status != USBA_HIGH_SPEED_DEV) {
2672 /*
2673 * Add start and complete split transaction
2674 * tokens overheads.
2675 */
2676 *cbandwidth = *sbandwidth + COMPLETE_SPLIT_OVERHEAD;
2677 *sbandwidth += START_SPLIT_OVERHEAD;
2678
2679 /* Add data overhead depending on data direction */
2680 if ((endpoint->bEndpointAddress &
2681 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2682 *cbandwidth += maxpacketsize;
2683 } else {
2684 if ((endpoint->bmAttributes &
2685 USB_EP_ATTR_MASK) == USB_EP_ATTR_ISOCH) {
2686 /* There is no compete splits for out */
2687 *cbandwidth = 0;
2688 }
2689 *sbandwidth += maxpacketsize;
2690 }
2691 } else {
2692 uint_t xactions;
2693
2694 /* Get the max transactions per microframe */
2695 xactions = ((maxpacketsize & USB_EP_MAX_XACTS_MASK) >>
2696 USB_EP_MAX_XACTS_SHIFT) + 1;
2697
2698 /* High speed transaction */
2699 *sbandwidth += maxpacketsize;
2700
2701 /* Calculate bandwidth per micro-frame */
2702 *sbandwidth *= xactions;
2703
2704 *cbandwidth = 0;
2705 }
2706
2707 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2708 "ehci_allocate_high_speed_bandwidth: "
2709 "Start split bandwidth %d Complete split bandwidth %d",
2710 *sbandwidth, *cbandwidth);
2711
2712 return (USB_SUCCESS);
2713 }
2714
2715
2716 /*
2717 * ehci_compute_classic_bandwidth:
2718 *
2719 * Given a periodic endpoint (interrupt or isochronous) determine the total
2720 * bandwidth for one transaction. The EHCI host controller traverses the
2721 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2722 * services an endpoint, only a single transaction attempt is made. The HC
2723 * moves to the next Endpoint Descriptor after the first transaction attempt
2724 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2725 * Transfer Descriptor is inserted into the lattice, we will only count the
2726 * number of bytes for one transaction.
2727 *
2728 * The following are the formulas used for calculating bandwidth in terms
2729 * bytes and it is for the single USB high speed transaction. The protocol
2730 * overheads will be different for each of type of USB transfer & all these
2731 * formulas & protocol overheads are derived from the 5.11.3 section of the
2732 * USB 2.0 Specification.
2733 *
2734 * Low-Speed:
2735 * Protocol overhead + Hub LS overhead +
2736 * (Low Speed clock * ((MaxPktSz * 7)/6)) + TT_Delay
2737 *
2738 * Full-Speed:
2739 * Protocol overhead + ((MaxPktSz * 7)/6) + TT_Delay
2740 */
2741 /* ARGSUSED */
2742 static int
ehci_compute_classic_bandwidth(usb_ep_descr_t * endpoint,usb_port_status_t port_status,uint_t * bandwidth)2743 ehci_compute_classic_bandwidth(
2744 usb_ep_descr_t *endpoint,
2745 usb_port_status_t port_status,
2746 uint_t *bandwidth)
2747 {
2748 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2749
2750 /*
2751 * If endpoint maximum packet is zero, then return immediately.
2752 */
2753 if (maxpacketsize == 0) {
2754
2755 return (USB_NOT_SUPPORTED);
2756 }
2757
2758 /* Add TT delay to required bandwidth */
2759 *bandwidth = TT_DELAY;
2760
2761 /* Add bit-stuffing overhead */
2762 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2763
2764 switch (port_status) {
2765 case USBA_LOW_SPEED_DEV:
2766 /* Low speed interrupt transaction */
2767 *bandwidth += (LOW_SPEED_PROTO_OVERHEAD +
2768 HUB_LOW_SPEED_PROTO_OVERHEAD +
2769 (LOW_SPEED_CLOCK * maxpacketsize));
2770 break;
2771 case USBA_FULL_SPEED_DEV:
2772 /* Full speed transaction */
2773 *bandwidth += maxpacketsize;
2774
2775 /* Add xfer specific protocol overheads */
2776 if ((endpoint->bmAttributes &
2777 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2778 /* Full speed interrupt transaction */
2779 *bandwidth += FS_NON_ISOC_PROTO_OVERHEAD;
2780 } else {
2781 /* Isochronous and input transaction */
2782 if ((endpoint->bEndpointAddress &
2783 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2784 *bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD;
2785 } else {
2786 /* Isochronous and output transaction */
2787 *bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD;
2788 }
2789 }
2790 break;
2791 }
2792
2793 return (USB_SUCCESS);
2794 }
2795
2796
2797 /*
2798 * ehci_adjust_polling_interval:
2799 *
2800 * Adjust bandwidth according usb device speed.
2801 */
2802 /* ARGSUSED */
2803 int
ehci_adjust_polling_interval(ehci_state_t * ehcip,usb_ep_descr_t * endpoint,usb_port_status_t port_status)2804 ehci_adjust_polling_interval(
2805 ehci_state_t *ehcip,
2806 usb_ep_descr_t *endpoint,
2807 usb_port_status_t port_status)
2808 {
2809 uint_t interval;
2810 int i = 0;
2811
2812 /* Get the polling interval */
2813 interval = endpoint->bInterval;
2814
2815 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2816 "ehci_adjust_polling_interval: Polling interval 0x%x", interval);
2817
2818 /*
2819 * According USB 2.0 Specifications, a high-speed endpoint's
2820 * polling intervals are specified interms of 125us or micro
2821 * frame, where as full/low endpoint's polling intervals are
2822 * specified in milliseconds.
2823 *
2824 * A high speed interrupt/isochronous endpoints can specify
2825 * desired polling interval between 1 to 16 micro-frames,
2826 * where as full/low endpoints can specify between 1 to 255
2827 * milliseconds.
2828 */
2829 switch (port_status) {
2830 case USBA_LOW_SPEED_DEV:
2831 /*
2832 * Low speed endpoints are limited to specifying
2833 * only 8ms to 255ms in this driver. If a device
2834 * reports a polling interval that is less than 8ms,
2835 * it will use 8 ms instead.
2836 */
2837 if (interval < LS_MIN_POLL_INTERVAL) {
2838
2839 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2840 "Low speed endpoint's poll interval of %d ms "
2841 "is below threshold. Rounding up to %d ms",
2842 interval, LS_MIN_POLL_INTERVAL);
2843
2844 interval = LS_MIN_POLL_INTERVAL;
2845 }
2846
2847 /*
2848 * Return an error if the polling interval is greater
2849 * than 255ms.
2850 */
2851 if (interval > LS_MAX_POLL_INTERVAL) {
2852
2853 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2854 "Low speed endpoint's poll interval is "
2855 "greater than %d ms", LS_MAX_POLL_INTERVAL);
2856
2857 return (USB_FAILURE);
2858 }
2859 break;
2860
2861 case USBA_FULL_SPEED_DEV:
2862 /*
2863 * Return an error if the polling interval is less
2864 * than 1ms and greater than 255ms.
2865 */
2866 if ((interval < FS_MIN_POLL_INTERVAL) &&
2867 (interval > FS_MAX_POLL_INTERVAL)) {
2868
2869 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2870 "Full speed endpoint's poll interval must "
2871 "be between %d and %d ms", FS_MIN_POLL_INTERVAL,
2872 FS_MAX_POLL_INTERVAL);
2873
2874 return (USB_FAILURE);
2875 }
2876 break;
2877 case USBA_HIGH_SPEED_DEV:
2878 /*
2879 * Return an error if the polling interval is less 1
2880 * and greater than 16. Convert this value to 125us
2881 * units using 2^(bInterval -1). refer usb 2.0 spec
2882 * page 51 for details.
2883 */
2884 if ((interval < HS_MIN_POLL_INTERVAL) &&
2885 (interval > HS_MAX_POLL_INTERVAL)) {
2886
2887 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2888 "High speed endpoint's poll interval "
2889 "must be between %d and %d units",
2890 HS_MIN_POLL_INTERVAL, HS_MAX_POLL_INTERVAL);
2891
2892 return (USB_FAILURE);
2893 }
2894
2895 /* Adjust high speed device polling interval */
2896 interval =
2897 ehci_adjust_high_speed_polling_interval(ehcip, endpoint);
2898
2899 break;
2900 }
2901
2902 /*
2903 * If polling interval is greater than 32ms,
2904 * adjust polling interval equal to 32ms.
2905 */
2906 if (interval > EHCI_NUM_INTR_QH_LISTS) {
2907 interval = EHCI_NUM_INTR_QH_LISTS;
2908 }
2909
2910 /*
2911 * Find the nearest power of 2 that's less
2912 * than interval.
2913 */
2914 while ((ehci_pow_2(i)) <= interval) {
2915 i++;
2916 }
2917
2918 return (ehci_pow_2((i - 1)));
2919 }
2920
2921
2922 /*
2923 * ehci_adjust_high_speed_polling_interval:
2924 */
2925 /* ARGSUSED */
2926 static int
ehci_adjust_high_speed_polling_interval(ehci_state_t * ehcip,usb_ep_descr_t * endpoint)2927 ehci_adjust_high_speed_polling_interval(
2928 ehci_state_t *ehcip,
2929 usb_ep_descr_t *endpoint)
2930 {
2931 uint_t interval;
2932
2933 /* Get the polling interval */
2934 interval = ehci_pow_2(endpoint->bInterval - 1);
2935
2936 /*
2937 * Convert polling interval from micro seconds
2938 * to milli seconds.
2939 */
2940 if (interval <= EHCI_MAX_UFRAMES) {
2941 interval = 1;
2942 } else {
2943 interval = interval/EHCI_MAX_UFRAMES;
2944 }
2945
2946 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2947 "ehci_adjust_high_speed_polling_interval: "
2948 "High speed adjusted interval 0x%x", interval);
2949
2950 return (interval);
2951 }
2952
2953
2954 /*
2955 * ehci_lattice_height:
2956 *
2957 * Given the requested bandwidth, find the height in the tree at which the
2958 * nodes for this bandwidth fall. The height is measured as the number of
2959 * nodes from the leaf to the level specified by bandwidth The root of the
2960 * tree is at height TREE_HEIGHT.
2961 */
2962 static uint_t
ehci_lattice_height(uint_t interval)2963 ehci_lattice_height(uint_t interval)
2964 {
2965 return (TREE_HEIGHT - (ehci_log_2(interval)));
2966 }
2967
2968
2969 /*
2970 * ehci_lattice_parent:
2971 *
2972 * Given a node in the lattice, find the index of the parent node
2973 */
2974 static uint_t
ehci_lattice_parent(uint_t node)2975 ehci_lattice_parent(uint_t node)
2976 {
2977 if ((node % 2) == 0) {
2978
2979 return ((node/2) - 1);
2980 } else {
2981
2982 return ((node + 1)/2 - 1);
2983 }
2984 }
2985
2986
2987 /*
2988 * ehci_find_periodic_node:
2989 *
2990 * Based on the "real" array leaf node and interval, get the periodic node.
2991 */
2992 static uint_t
ehci_find_periodic_node(uint_t leaf,int interval)2993 ehci_find_periodic_node(uint_t leaf, int interval)
2994 {
2995 uint_t lattice_leaf;
2996 uint_t height = ehci_lattice_height(interval);
2997 uint_t pnode;
2998 int i;
2999
3000 /* Get the leaf number in the lattice */
3001 lattice_leaf = leaf + EHCI_NUM_INTR_QH_LISTS - 1;
3002
3003 /* Get the node in the lattice based on the height and leaf */
3004 pnode = lattice_leaf;
3005 for (i = 0; i < height; i++) {
3006 pnode = ehci_lattice_parent(pnode);
3007 }
3008
3009 return (pnode);
3010 }
3011
3012
3013 /*
3014 * ehci_leftmost_leaf:
3015 *
3016 * Find the leftmost leaf in the subtree specified by the node. Height refers
3017 * to number of nodes from the bottom of the tree to the node, including the
3018 * node.
3019 *
3020 * The formula for a zero based tree is:
3021 * 2^H * Node + 2^H - 1
3022 * The leaf of the tree is an array, convert the number for the array.
3023 * Subtract the size of nodes not in the array
3024 * 2^H * Node + 2^H - 1 - (EHCI_NUM_INTR_QH_LISTS - 1) =
3025 * 2^H * Node + 2^H - EHCI_NUM_INTR_QH_LISTS =
3026 * 2^H * (Node + 1) - EHCI_NUM_INTR_QH_LISTS
3027 * 0
3028 * 1 2
3029 * 0 1 2 3
3030 */
3031 static uint_t
ehci_leftmost_leaf(uint_t node,uint_t height)3032 ehci_leftmost_leaf(
3033 uint_t node,
3034 uint_t height)
3035 {
3036 return ((ehci_pow_2(height) * (node + 1)) - EHCI_NUM_INTR_QH_LISTS);
3037 }
3038
3039
3040 /*
3041 * ehci_pow_2:
3042 *
3043 * Compute 2 to the power
3044 */
3045 static uint_t
ehci_pow_2(uint_t x)3046 ehci_pow_2(uint_t x)
3047 {
3048 if (x == 0) {
3049
3050 return (1);
3051 } else {
3052
3053 return (2 << (x - 1));
3054 }
3055 }
3056
3057
3058 /*
3059 * ehci_log_2:
3060 *
3061 * Compute log base 2 of x
3062 */
3063 static uint_t
ehci_log_2(uint_t x)3064 ehci_log_2(uint_t x)
3065 {
3066 int i = 0;
3067
3068 while (x != 1) {
3069 x = x >> 1;
3070 i++;
3071 }
3072
3073 return (i);
3074 }
3075
3076
3077 /*
3078 * ehci_find_bestfit_hs_mask:
3079 *
3080 * Find the smask and cmask in the bandwidth allocation, and update the
3081 * bandwidth allocation.
3082 */
3083 static int
ehci_find_bestfit_hs_mask(ehci_state_t * ehcip,uchar_t * smask,uint_t * pnode,usb_ep_descr_t * endpoint,uint_t bandwidth,int interval)3084 ehci_find_bestfit_hs_mask(
3085 ehci_state_t *ehcip,
3086 uchar_t *smask,
3087 uint_t *pnode,
3088 usb_ep_descr_t *endpoint,
3089 uint_t bandwidth,
3090 int interval)
3091 {
3092 int i;
3093 uint_t elements, index;
3094 int array_leaf, best_array_leaf;
3095 uint_t node_bandwidth, best_node_bandwidth;
3096 uint_t leaf_count;
3097 uchar_t bw_mask;
3098 uchar_t best_smask;
3099
3100 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3101 "ehci_find_bestfit_hs_mask: ");
3102
3103 /* Get all the valid smasks */
3104 switch (ehci_pow_2(endpoint->bInterval - 1)) {
3105 case EHCI_INTR_1US_POLL:
3106 index = EHCI_1US_MASK_INDEX;
3107 elements = EHCI_INTR_1US_POLL;
3108 break;
3109 case EHCI_INTR_2US_POLL:
3110 index = EHCI_2US_MASK_INDEX;
3111 elements = EHCI_INTR_2US_POLL;
3112 break;
3113 case EHCI_INTR_4US_POLL:
3114 index = EHCI_4US_MASK_INDEX;
3115 elements = EHCI_INTR_4US_POLL;
3116 break;
3117 case EHCI_INTR_XUS_POLL:
3118 default:
3119 index = EHCI_XUS_MASK_INDEX;
3120 elements = EHCI_INTR_XUS_POLL;
3121 break;
3122 }
3123
3124 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3125
3126 /*
3127 * Because of the way the leaves are setup, we will automatically
3128 * hit the leftmost leaf of every possible node with this interval.
3129 */
3130 best_smask = 0x00;
3131 best_node_bandwidth = 0;
3132 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3133 /* Find the bandwidth mask */
3134 node_bandwidth = ehci_calculate_bw_availability_mask(ehcip,
3135 bandwidth, ehci_index[array_leaf], leaf_count, &bw_mask);
3136
3137 /*
3138 * If this node cannot support our requirements skip to the
3139 * next leaf.
3140 */
3141 if (bw_mask == 0x00) {
3142 continue;
3143 }
3144
3145 /*
3146 * Now make sure our bandwidth requirements can be
3147 * satisfied with one of smasks in this node.
3148 */
3149 *smask = 0x00;
3150 for (i = index; i < (index + elements); i++) {
3151 /* Check the start split mask value */
3152 if (ehci_start_split_mask[index] & bw_mask) {
3153 *smask = ehci_start_split_mask[index];
3154 break;
3155 }
3156 }
3157
3158 /*
3159 * If an appropriate smask is found save the information if:
3160 * o best_smask has not been found yet.
3161 * - or -
3162 * o This is the node with the least amount of bandwidth
3163 */
3164 if ((*smask != 0x00) &&
3165 ((best_smask == 0x00) ||
3166 (best_node_bandwidth > node_bandwidth))) {
3167
3168 best_node_bandwidth = node_bandwidth;
3169 best_array_leaf = array_leaf;
3170 best_smask = *smask;
3171 }
3172 }
3173
3174 /*
3175 * If we find node that can handle the bandwidth populate the
3176 * appropriate variables and return success.
3177 */
3178 if (best_smask) {
3179 *smask = best_smask;
3180 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3181 interval);
3182 ehci_update_bw_availability(ehcip, bandwidth,
3183 ehci_index[best_array_leaf], leaf_count, best_smask);
3184
3185 return (USB_SUCCESS);
3186 }
3187
3188 return (USB_FAILURE);
3189 }
3190
3191
3192 /*
3193 * ehci_find_bestfit_ls_intr_mask:
3194 *
3195 * Find the smask and cmask in the bandwidth allocation.
3196 */
3197 static int
ehci_find_bestfit_ls_intr_mask(ehci_state_t * ehcip,uchar_t * smask,uchar_t * cmask,uint_t * pnode,uint_t sbandwidth,uint_t cbandwidth,int interval)3198 ehci_find_bestfit_ls_intr_mask(
3199 ehci_state_t *ehcip,
3200 uchar_t *smask,
3201 uchar_t *cmask,
3202 uint_t *pnode,
3203 uint_t sbandwidth,
3204 uint_t cbandwidth,
3205 int interval)
3206 {
3207 int i;
3208 uint_t elements, index;
3209 int array_leaf, best_array_leaf;
3210 uint_t node_sbandwidth, node_cbandwidth;
3211 uint_t best_node_bandwidth;
3212 uint_t leaf_count;
3213 uchar_t bw_smask, bw_cmask;
3214 uchar_t best_smask, best_cmask;
3215
3216 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3217 "ehci_find_bestfit_ls_intr_mask: ");
3218
3219 /* For low and full speed devices */
3220 index = EHCI_XUS_MASK_INDEX;
3221 elements = EHCI_INTR_4MS_POLL;
3222
3223 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3224
3225 /*
3226 * Because of the way the leaves are setup, we will automatically
3227 * hit the leftmost leaf of every possible node with this interval.
3228 */
3229 best_smask = 0x00;
3230 best_node_bandwidth = 0;
3231 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3232 /* Find the bandwidth mask */
3233 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3234 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3235 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3236 cbandwidth, ehci_index[array_leaf], leaf_count, &bw_cmask);
3237
3238 /*
3239 * If this node cannot support our requirements skip to the
3240 * next leaf.
3241 */
3242 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3243 continue;
3244 }
3245
3246 /*
3247 * Now make sure our bandwidth requirements can be
3248 * satisfied with one of smasks in this node.
3249 */
3250 *smask = 0x00;
3251 *cmask = 0x00;
3252 for (i = index; i < (index + elements); i++) {
3253 /* Check the start split mask value */
3254 if ((ehci_start_split_mask[index] & bw_smask) &&
3255 (ehci_intr_complete_split_mask[index] & bw_cmask)) {
3256 *smask = ehci_start_split_mask[index];
3257 *cmask = ehci_intr_complete_split_mask[index];
3258 break;
3259 }
3260 }
3261
3262 /*
3263 * If an appropriate smask is found save the information if:
3264 * o best_smask has not been found yet.
3265 * - or -
3266 * o This is the node with the least amount of bandwidth
3267 */
3268 if ((*smask != 0x00) &&
3269 ((best_smask == 0x00) ||
3270 (best_node_bandwidth >
3271 (node_sbandwidth + node_cbandwidth)))) {
3272 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3273 best_array_leaf = array_leaf;
3274 best_smask = *smask;
3275 best_cmask = *cmask;
3276 }
3277 }
3278
3279 /*
3280 * If we find node that can handle the bandwidth populate the
3281 * appropriate variables and return success.
3282 */
3283 if (best_smask) {
3284 *smask = best_smask;
3285 *cmask = best_cmask;
3286 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3287 interval);
3288 ehci_update_bw_availability(ehcip, sbandwidth,
3289 ehci_index[best_array_leaf], leaf_count, best_smask);
3290 ehci_update_bw_availability(ehcip, cbandwidth,
3291 ehci_index[best_array_leaf], leaf_count, best_cmask);
3292
3293 return (USB_SUCCESS);
3294 }
3295
3296 return (USB_FAILURE);
3297 }
3298
3299
3300 /*
3301 * ehci_find_bestfit_sitd_in_mask:
3302 *
3303 * Find the smask and cmask in the bandwidth allocation.
3304 */
3305 static int
ehci_find_bestfit_sitd_in_mask(ehci_state_t * ehcip,uchar_t * smask,uchar_t * cmask,uint_t * pnode,uint_t sbandwidth,uint_t cbandwidth,int interval)3306 ehci_find_bestfit_sitd_in_mask(
3307 ehci_state_t *ehcip,
3308 uchar_t *smask,
3309 uchar_t *cmask,
3310 uint_t *pnode,
3311 uint_t sbandwidth,
3312 uint_t cbandwidth,
3313 int interval)
3314 {
3315 int i, uFrames, found;
3316 int array_leaf, best_array_leaf;
3317 uint_t node_sbandwidth, node_cbandwidth;
3318 uint_t best_node_bandwidth;
3319 uint_t leaf_count;
3320 uchar_t bw_smask, bw_cmask;
3321 uchar_t best_smask, best_cmask;
3322
3323 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3324 "ehci_find_bestfit_sitd_in_mask: ");
3325
3326 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3327
3328 /*
3329 * Because of the way the leaves are setup, we will automatically
3330 * hit the leftmost leaf of every possible node with this interval.
3331 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3332 */
3333 /*
3334 * Need to add an additional 2 uFrames, if the "L"ast
3335 * complete split is before uFrame 6. See section
3336 * 11.8.4 in USB 2.0 Spec. Currently we do not support
3337 * the "Back Ptr" which means we support on IN of
3338 * ~4*MAX_UFRAME_SITD_XFER bandwidth/
3339 */
3340 uFrames = (cbandwidth / MAX_UFRAME_SITD_XFER) + 2;
3341 if (cbandwidth % MAX_UFRAME_SITD_XFER) {
3342 uFrames++;
3343 }
3344 if (uFrames > 6) {
3345
3346 return (USB_FAILURE);
3347 }
3348 *smask = 0x1;
3349 *cmask = 0x00;
3350 for (i = 0; i < uFrames; i++) {
3351 *cmask = *cmask << 1;
3352 *cmask |= 0x1;
3353 }
3354 /* cmask must start 2 frames after the smask */
3355 *cmask = *cmask << 2;
3356
3357 found = 0;
3358 best_smask = 0x00;
3359 best_node_bandwidth = 0;
3360 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3361 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3362 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3363 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3364 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3365 &bw_cmask);
3366
3367 /*
3368 * If this node cannot support our requirements skip to the
3369 * next leaf.
3370 */
3371 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3372 continue;
3373 }
3374
3375 for (i = 0; i < (EHCI_MAX_UFRAMES - uFrames - 2); i++) {
3376 if ((*smask & bw_smask) && (*cmask & bw_cmask)) {
3377 found = 1;
3378 break;
3379 }
3380 *smask = *smask << 1;
3381 *cmask = *cmask << 1;
3382 }
3383
3384 /*
3385 * If an appropriate smask is found save the information if:
3386 * o best_smask has not been found yet.
3387 * - or -
3388 * o This is the node with the least amount of bandwidth
3389 */
3390 if (found &&
3391 ((best_smask == 0x00) ||
3392 (best_node_bandwidth >
3393 (node_sbandwidth + node_cbandwidth)))) {
3394 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3395 best_array_leaf = array_leaf;
3396 best_smask = *smask;
3397 best_cmask = *cmask;
3398 }
3399 }
3400
3401 /*
3402 * If we find node that can handle the bandwidth populate the
3403 * appropriate variables and return success.
3404 */
3405 if (best_smask) {
3406 *smask = best_smask;
3407 *cmask = best_cmask;
3408 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3409 interval);
3410 ehci_update_bw_availability(ehcip, sbandwidth,
3411 ehci_index[best_array_leaf], leaf_count, best_smask);
3412 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3413 ehci_index[best_array_leaf], leaf_count, best_cmask);
3414
3415 return (USB_SUCCESS);
3416 }
3417
3418 return (USB_FAILURE);
3419 }
3420
3421
3422 /*
3423 * ehci_find_bestfit_sitd_out_mask:
3424 *
3425 * Find the smask in the bandwidth allocation.
3426 */
3427 static int
ehci_find_bestfit_sitd_out_mask(ehci_state_t * ehcip,uchar_t * smask,uint_t * pnode,uint_t sbandwidth,int interval)3428 ehci_find_bestfit_sitd_out_mask(
3429 ehci_state_t *ehcip,
3430 uchar_t *smask,
3431 uint_t *pnode,
3432 uint_t sbandwidth,
3433 int interval)
3434 {
3435 int i, uFrames, found;
3436 int array_leaf, best_array_leaf;
3437 uint_t node_sbandwidth;
3438 uint_t best_node_bandwidth;
3439 uint_t leaf_count;
3440 uchar_t bw_smask;
3441 uchar_t best_smask;
3442
3443 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3444 "ehci_find_bestfit_sitd_out_mask: ");
3445
3446 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3447
3448 /*
3449 * Because of the way the leaves are setup, we will automatically
3450 * hit the leftmost leaf of every possible node with this interval.
3451 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3452 */
3453 *smask = 0x00;
3454 uFrames = sbandwidth / MAX_UFRAME_SITD_XFER;
3455 if (sbandwidth % MAX_UFRAME_SITD_XFER) {
3456 uFrames++;
3457 }
3458 for (i = 0; i < uFrames; i++) {
3459 *smask = *smask << 1;
3460 *smask |= 0x1;
3461 }
3462
3463 found = 0;
3464 best_smask = 0x00;
3465 best_node_bandwidth = 0;
3466 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3467 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3468 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3469 &bw_smask);
3470
3471 /*
3472 * If this node cannot support our requirements skip to the
3473 * next leaf.
3474 */
3475 if (bw_smask == 0x00) {
3476 continue;
3477 }
3478
3479 /* You cannot have a start split on the 8th uFrame */
3480 for (i = 0; (*smask & 0x80) == 0; i++) {
3481 if (*smask & bw_smask) {
3482 found = 1;
3483 break;
3484 }
3485 *smask = *smask << 1;
3486 }
3487
3488 /*
3489 * If an appropriate smask is found save the information if:
3490 * o best_smask has not been found yet.
3491 * - or -
3492 * o This is the node with the least amount of bandwidth
3493 */
3494 if (found &&
3495 ((best_smask == 0x00) ||
3496 (best_node_bandwidth > node_sbandwidth))) {
3497 best_node_bandwidth = node_sbandwidth;
3498 best_array_leaf = array_leaf;
3499 best_smask = *smask;
3500 }
3501 }
3502
3503 /*
3504 * If we find node that can handle the bandwidth populate the
3505 * appropriate variables and return success.
3506 */
3507 if (best_smask) {
3508 *smask = best_smask;
3509 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3510 interval);
3511 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3512 ehci_index[best_array_leaf], leaf_count, best_smask);
3513
3514 return (USB_SUCCESS);
3515 }
3516
3517 return (USB_FAILURE);
3518 }
3519
3520
3521 /*
3522 * ehci_calculate_bw_availability_mask:
3523 *
3524 * Returns the "total bandwidth used" in this node.
3525 * Populates bw_mask with the uFrames that can support the bandwidth.
3526 *
3527 * If all the Frames cannot support this bandwidth, then bw_mask
3528 * will return 0x00 and the "total bandwidth used" will be invalid.
3529 */
3530 static uint_t
ehci_calculate_bw_availability_mask(ehci_state_t * ehcip,uint_t bandwidth,int leaf,int leaf_count,uchar_t * bw_mask)3531 ehci_calculate_bw_availability_mask(
3532 ehci_state_t *ehcip,
3533 uint_t bandwidth,
3534 int leaf,
3535 int leaf_count,
3536 uchar_t *bw_mask)
3537 {
3538 int i, j;
3539 uchar_t bw_uframe;
3540 int uframe_total;
3541 ehci_frame_bandwidth_t *fbp;
3542 uint_t total_bandwidth = 0;
3543
3544 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3545 "ehci_calculate_bw_availability_mask: leaf %d leaf count %d",
3546 leaf, leaf_count);
3547
3548 /* Start by saying all uFrames are available */
3549 *bw_mask = 0xFF;
3550
3551 for (i = 0; (i < leaf_count) || (*bw_mask == 0x00); i++) {
3552 fbp = &ehcip->ehci_frame_bandwidth[leaf + i];
3553
3554 total_bandwidth += fbp->ehci_allocated_frame_bandwidth;
3555
3556 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3557 /*
3558 * If the uFrame in bw_mask is available check to see if
3559 * it can support the additional bandwidth.
3560 */
3561 bw_uframe = (*bw_mask & (0x1 << j));
3562 uframe_total =
3563 fbp->ehci_micro_frame_bandwidth[j] +
3564 bandwidth;
3565 if ((bw_uframe) &&
3566 (uframe_total > HS_PERIODIC_BANDWIDTH)) {
3567 *bw_mask = *bw_mask & ~bw_uframe;
3568 }
3569 }
3570 }
3571
3572 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3573 "ehci_calculate_bw_availability_mask: bandwidth mask 0x%x",
3574 *bw_mask);
3575
3576 return (total_bandwidth);
3577 }
3578
3579
3580 /*
3581 * ehci_update_bw_availability:
3582 *
3583 * The leftmost leaf needs to be in terms of array position and
3584 * not the actual lattice position.
3585 */
3586 static void
ehci_update_bw_availability(ehci_state_t * ehcip,int bandwidth,int leftmost_leaf,int leaf_count,uchar_t mask)3587 ehci_update_bw_availability(
3588 ehci_state_t *ehcip,
3589 int bandwidth,
3590 int leftmost_leaf,
3591 int leaf_count,
3592 uchar_t mask)
3593 {
3594 int i, j;
3595 ehci_frame_bandwidth_t *fbp;
3596 int uFrame_bandwidth[8];
3597
3598 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3599 "ehci_update_bw_availability: "
3600 "leaf %d count %d bandwidth 0x%x mask 0x%x",
3601 leftmost_leaf, leaf_count, bandwidth, mask);
3602
3603 ASSERT(leftmost_leaf < 32);
3604 ASSERT(leftmost_leaf >= 0);
3605
3606 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3607 if (mask & 0x1) {
3608 uFrame_bandwidth[j] = bandwidth;
3609 } else {
3610 uFrame_bandwidth[j] = 0;
3611 }
3612
3613 mask = mask >> 1;
3614 }
3615
3616 /* Updated all the effected leafs with the bandwidth */
3617 for (i = 0; i < leaf_count; i++) {
3618 fbp = &ehcip->ehci_frame_bandwidth[leftmost_leaf + i];
3619
3620 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3621 fbp->ehci_micro_frame_bandwidth[j] +=
3622 uFrame_bandwidth[j];
3623 fbp->ehci_allocated_frame_bandwidth +=
3624 uFrame_bandwidth[j];
3625 }
3626 }
3627 }
3628
3629 /*
3630 * Miscellaneous functions
3631 */
3632
3633 /*
3634 * ehci_obtain_state:
3635 *
3636 * NOTE: This function is also called from POLLED MODE.
3637 */
3638 ehci_state_t *
ehci_obtain_state(dev_info_t * dip)3639 ehci_obtain_state(dev_info_t *dip)
3640 {
3641 int instance = ddi_get_instance(dip);
3642
3643 ehci_state_t *state = ddi_get_soft_state(ehci_statep, instance);
3644
3645 ASSERT(state != NULL);
3646
3647 return (state);
3648 }
3649
3650
3651 /*
3652 * ehci_state_is_operational:
3653 *
3654 * Check the Host controller state and return proper values.
3655 */
3656 int
ehci_state_is_operational(ehci_state_t * ehcip)3657 ehci_state_is_operational(ehci_state_t *ehcip)
3658 {
3659 int val;
3660
3661 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3662
3663 switch (ehcip->ehci_hc_soft_state) {
3664 case EHCI_CTLR_INIT_STATE:
3665 case EHCI_CTLR_SUSPEND_STATE:
3666 val = USB_FAILURE;
3667 break;
3668 case EHCI_CTLR_OPERATIONAL_STATE:
3669 val = USB_SUCCESS;
3670 break;
3671 case EHCI_CTLR_ERROR_STATE:
3672 val = USB_HC_HARDWARE_ERROR;
3673 break;
3674 default:
3675 val = USB_FAILURE;
3676 break;
3677 }
3678
3679 return (val);
3680 }
3681
3682
3683 /*
3684 * ehci_do_soft_reset
3685 *
3686 * Do soft reset of ehci host controller.
3687 */
3688 int
ehci_do_soft_reset(ehci_state_t * ehcip)3689 ehci_do_soft_reset(ehci_state_t *ehcip)
3690 {
3691 usb_frame_number_t before_frame_number, after_frame_number;
3692 ehci_regs_t *ehci_save_regs;
3693
3694 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3695
3696 /* Increment host controller error count */
3697 ehcip->ehci_hc_error++;
3698
3699 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3700 "ehci_do_soft_reset:"
3701 "Reset ehci host controller 0x%x", ehcip->ehci_hc_error);
3702
3703 /*
3704 * Allocate space for saving current Host Controller
3705 * registers. Don't do any recovery if allocation
3706 * fails.
3707 */
3708 ehci_save_regs = (ehci_regs_t *)
3709 kmem_zalloc(sizeof (ehci_regs_t), KM_NOSLEEP);
3710
3711 if (ehci_save_regs == NULL) {
3712 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3713 "ehci_do_soft_reset: kmem_zalloc failed");
3714
3715 return (USB_FAILURE);
3716 }
3717
3718 /* Save current ehci registers */
3719 ehci_save_regs->ehci_command = Get_OpReg(ehci_command);
3720 ehci_save_regs->ehci_interrupt = Get_OpReg(ehci_interrupt);
3721 ehci_save_regs->ehci_ctrl_segment = Get_OpReg(ehci_ctrl_segment);
3722 ehci_save_regs->ehci_async_list_addr = Get_OpReg(ehci_async_list_addr);
3723 ehci_save_regs->ehci_config_flag = Get_OpReg(ehci_config_flag);
3724 ehci_save_regs->ehci_periodic_list_base =
3725 Get_OpReg(ehci_periodic_list_base);
3726
3727 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3728 "ehci_do_soft_reset: Save reg = 0x%p", (void *)ehci_save_regs);
3729
3730 /* Disable all list processing and interrupts */
3731 Set_OpReg(ehci_command, Get_OpReg(ehci_command) &
3732 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE));
3733
3734 /* Disable all EHCI interrupts */
3735 Set_OpReg(ehci_interrupt, 0);
3736
3737 /* Wait for few milliseconds */
3738 drv_usecwait(EHCI_SOF_TIMEWAIT);
3739
3740 /* Do light soft reset of ehci host controller */
3741 Set_OpReg(ehci_command,
3742 Get_OpReg(ehci_command) | EHCI_CMD_LIGHT_HC_RESET);
3743
3744 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3745 "ehci_do_soft_reset: Reset in progress");
3746
3747 /* Wait for reset to complete */
3748 drv_usecwait(EHCI_RESET_TIMEWAIT);
3749
3750 /*
3751 * Restore previous saved EHCI register value
3752 * into the current EHCI registers.
3753 */
3754 Set_OpReg(ehci_ctrl_segment, (uint32_t)
3755 ehci_save_regs->ehci_ctrl_segment);
3756
3757 Set_OpReg(ehci_periodic_list_base, (uint32_t)
3758 ehci_save_regs->ehci_periodic_list_base);
3759
3760 Set_OpReg(ehci_async_list_addr, (uint32_t)
3761 ehci_save_regs->ehci_async_list_addr);
3762
3763 /*
3764 * For some reason this register might get nulled out by
3765 * the Uli M1575 South Bridge. To workaround the hardware
3766 * problem, check the value after write and retry if the
3767 * last write fails.
3768 */
3769 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
3770 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
3771 (ehci_save_regs->ehci_async_list_addr !=
3772 Get_OpReg(ehci_async_list_addr))) {
3773 int retry = 0;
3774
3775 Set_OpRegRetry(ehci_async_list_addr, (uint32_t)
3776 ehci_save_regs->ehci_async_list_addr, retry);
3777 if (retry >= EHCI_MAX_RETRY) {
3778 USB_DPRINTF_L2(PRINT_MASK_ATTA,
3779 ehcip->ehci_log_hdl, "ehci_do_soft_reset:"
3780 " ASYNCLISTADDR write failed.");
3781
3782 return (USB_FAILURE);
3783 }
3784 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
3785 "ehci_do_soft_reset: ASYNCLISTADDR "
3786 "write failed, retry=%d", retry);
3787 }
3788
3789 Set_OpReg(ehci_config_flag, (uint32_t)
3790 ehci_save_regs->ehci_config_flag);
3791
3792 /* Enable both Asynchronous and Periodic Schedule if necessary */
3793 ehci_toggle_scheduler(ehcip);
3794
3795 /*
3796 * Set ehci_interrupt to enable all interrupts except Root
3797 * Hub Status change and frame list rollover interrupts.
3798 */
3799 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
3800 EHCI_INTR_FRAME_LIST_ROLLOVER |
3801 EHCI_INTR_USB_ERROR |
3802 EHCI_INTR_USB);
3803
3804 /*
3805 * Deallocate the space that allocated for saving
3806 * HC registers.
3807 */
3808 kmem_free((void *) ehci_save_regs, sizeof (ehci_regs_t));
3809
3810 /*
3811 * Set the desired interrupt threshold, frame list size (if
3812 * applicable) and turn EHCI host controller.
3813 */
3814 Set_OpReg(ehci_command, ((Get_OpReg(ehci_command) &
3815 ~EHCI_CMD_INTR_THRESHOLD) |
3816 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
3817
3818 /* Wait 10ms for EHCI to start sending SOF */
3819 drv_usecwait(EHCI_RESET_TIMEWAIT);
3820
3821 /*
3822 * Get the current usb frame number before waiting for
3823 * few milliseconds.
3824 */
3825 before_frame_number = ehci_get_current_frame_number(ehcip);
3826
3827 /* Wait for few milliseconds */
3828 drv_usecwait(EHCI_SOF_TIMEWAIT);
3829
3830 /*
3831 * Get the current usb frame number after waiting for
3832 * few milliseconds.
3833 */
3834 after_frame_number = ehci_get_current_frame_number(ehcip);
3835
3836 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3837 "ehci_do_soft_reset: Before Frame Number 0x%llx "
3838 "After Frame Number 0x%llx",
3839 (unsigned long long)before_frame_number,
3840 (unsigned long long)after_frame_number);
3841
3842 if ((after_frame_number <= before_frame_number) &&
3843 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
3844
3845 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3846 "ehci_do_soft_reset: Soft reset failed");
3847
3848 return (USB_FAILURE);
3849 }
3850
3851 return (USB_SUCCESS);
3852 }
3853
3854
3855 /*
3856 * ehci_get_xfer_attrs:
3857 *
3858 * Get the attributes of a particular xfer.
3859 *
3860 * NOTE: This function is also called from POLLED MODE.
3861 */
3862 usb_req_attrs_t
ehci_get_xfer_attrs(ehci_state_t * ehcip,ehci_pipe_private_t * pp,ehci_trans_wrapper_t * tw)3863 ehci_get_xfer_attrs(
3864 ehci_state_t *ehcip,
3865 ehci_pipe_private_t *pp,
3866 ehci_trans_wrapper_t *tw)
3867 {
3868 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep;
3869 usb_req_attrs_t attrs = USB_ATTRS_NONE;
3870
3871 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3872 "ehci_get_xfer_attrs:");
3873
3874 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
3875 case USB_EP_ATTR_CONTROL:
3876 attrs = ((usb_ctrl_req_t *)
3877 tw->tw_curr_xfer_reqp)->ctrl_attributes;
3878 break;
3879 case USB_EP_ATTR_BULK:
3880 attrs = ((usb_bulk_req_t *)
3881 tw->tw_curr_xfer_reqp)->bulk_attributes;
3882 break;
3883 case USB_EP_ATTR_INTR:
3884 attrs = ((usb_intr_req_t *)
3885 tw->tw_curr_xfer_reqp)->intr_attributes;
3886 break;
3887 }
3888
3889 return (attrs);
3890 }
3891
3892
3893 /*
3894 * ehci_get_current_frame_number:
3895 *
3896 * Get the current software based usb frame number.
3897 */
3898 usb_frame_number_t
ehci_get_current_frame_number(ehci_state_t * ehcip)3899 ehci_get_current_frame_number(ehci_state_t *ehcip)
3900 {
3901 usb_frame_number_t usb_frame_number;
3902 usb_frame_number_t ehci_fno, micro_frame_number;
3903
3904 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3905
3906 ehci_fno = ehcip->ehci_fno;
3907 micro_frame_number = Get_OpReg(ehci_frame_index) & 0x3FFF;
3908
3909 /*
3910 * Calculate current software based usb frame number.
3911 *
3912 * This code accounts for the fact that frame number is
3913 * updated by the Host Controller before the ehci driver
3914 * gets an FrameListRollover interrupt that will adjust
3915 * Frame higher part.
3916 *
3917 * Refer ehci specification 1.0, section 2.3.2, page 21.
3918 */
3919 micro_frame_number = ((micro_frame_number & 0x1FFF) |
3920 ehci_fno) + (((micro_frame_number & 0x3FFF) ^
3921 ehci_fno) & 0x2000);
3922
3923 /*
3924 * Micro Frame number is equivalent to 125 usec. Eight
3925 * Micro Frame numbers are equivalent to one millsecond
3926 * or one usb frame number.
3927 */
3928 usb_frame_number = micro_frame_number >>
3929 EHCI_uFRAMES_PER_USB_FRAME_SHIFT;
3930
3931 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3932 "ehci_get_current_frame_number: "
3933 "Current usb uframe number = 0x%llx "
3934 "Current usb frame number = 0x%llx",
3935 (unsigned long long)micro_frame_number,
3936 (unsigned long long)usb_frame_number);
3937
3938 return (usb_frame_number);
3939 }
3940
3941
3942 /*
3943 * ehci_cpr_cleanup:
3944 *
3945 * Cleanup ehci state and other ehci specific informations across
3946 * Check Point Resume (CPR).
3947 */
3948 static void
ehci_cpr_cleanup(ehci_state_t * ehcip)3949 ehci_cpr_cleanup(ehci_state_t *ehcip)
3950 {
3951 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3952
3953 /* Reset software part of usb frame number */
3954 ehcip->ehci_fno = 0;
3955 }
3956
3957
3958 /*
3959 * ehci_wait_for_sof:
3960 *
3961 * Wait for couple of SOF interrupts
3962 */
3963 int
ehci_wait_for_sof(ehci_state_t * ehcip)3964 ehci_wait_for_sof(ehci_state_t *ehcip)
3965 {
3966 usb_frame_number_t before_frame_number, after_frame_number;
3967 int error = USB_SUCCESS;
3968
3969 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3970 ehcip->ehci_log_hdl, "ehci_wait_for_sof");
3971
3972 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3973
3974 error = ehci_state_is_operational(ehcip);
3975
3976 if (error != USB_SUCCESS) {
3977
3978 return (error);
3979 }
3980
3981 /* Get the current usb frame number before waiting for two SOFs */
3982 before_frame_number = ehci_get_current_frame_number(ehcip);
3983
3984 mutex_exit(&ehcip->ehci_int_mutex);
3985
3986 /* Wait for few milliseconds */
3987 delay(